Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Wed, 27 Jul 2016 19:19:33
Message-Id: 1469647156.880094e53ecd7eee3c6a893854af3814d41387cd.mpagano@gentoo
1 commit: 880094e53ecd7eee3c6a893854af3814d41387cd
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Jul 27 19:19:16 2016 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Jul 27 19:19:16 2016 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=880094e5
7
8 Linux patcch 4.4.16
9
10 0000_README | 4 +
11 1015_linux-4.4.16.patch | 5742 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 5746 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 4eca9f9..8de8b32 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -99,6 +99,10 @@ Patch: 1014_linux-4.4.15.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.4.15
21
22 +Patch: 1015_linux-4.4.16.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.4.16
25 +
26 Patch: 1013_linux-4.4.14.patch
27 From: http://www.kernel.org
28 Desc: Linux 4.4.14
29
30 diff --git a/1015_linux-4.4.16.patch b/1015_linux-4.4.16.patch
31 new file mode 100644
32 index 0000000..2ac6012
33 --- /dev/null
34 +++ b/1015_linux-4.4.16.patch
35 @@ -0,0 +1,5742 @@
36 +diff --git a/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935 b/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
37 +index 6708c5e264aa..33e96f740639 100644
38 +--- a/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
39 ++++ b/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
40 +@@ -1,4 +1,4 @@
41 +-What /sys/bus/iio/devices/iio:deviceX/in_proximity_raw
42 ++What /sys/bus/iio/devices/iio:deviceX/in_proximity_input
43 + Date: March 2014
44 + KernelVersion: 3.15
45 + Contact: Matt Ranostay <mranostay@×××××.com>
46 +diff --git a/Documentation/scsi/scsi_eh.txt b/Documentation/scsi/scsi_eh.txt
47 +index 8638f61c8c9d..37eca00796ee 100644
48 +--- a/Documentation/scsi/scsi_eh.txt
49 ++++ b/Documentation/scsi/scsi_eh.txt
50 +@@ -263,19 +263,23 @@ scmd->allowed.
51 +
52 + 3. scmd recovered
53 + ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd
54 +- - shost->host_failed--
55 + - clear scmd->eh_eflags
56 + - scsi_setup_cmd_retry()
57 + - move from local eh_work_q to local eh_done_q
58 + LOCKING: none
59 ++ CONCURRENCY: at most one thread per separate eh_work_q to
60 ++ keep queue manipulation lockless
61 +
62 + 4. EH completes
63 + ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper
64 +- layer of failure.
65 ++ layer of failure. May be called concurrently but must have
66 ++ a no more than one thread per separate eh_work_q to
67 ++ manipulate the queue locklessly
68 + - scmd is removed from eh_done_q and scmd->eh_entry is cleared
69 + - if retry is necessary, scmd is requeued using
70 + scsi_queue_insert()
71 + - otherwise, scsi_finish_command() is invoked for scmd
72 ++ - zero shost->host_failed
73 + LOCKING: queue or finish function performs appropriate locking
74 +
75 +
76 +diff --git a/Makefile b/Makefile
77 +index 979088079338..da7621cadc8e 100644
78 +--- a/Makefile
79 ++++ b/Makefile
80 +@@ -1,6 +1,6 @@
81 + VERSION = 4
82 + PATCHLEVEL = 4
83 +-SUBLEVEL = 15
84 ++SUBLEVEL = 16
85 + EXTRAVERSION =
86 + NAME = Blurry Fish Butt
87 +
88 +diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
89 +index 6312f607932f..2d785f5a3041 100644
90 +--- a/arch/arc/Kconfig
91 ++++ b/arch/arc/Kconfig
92 +@@ -387,7 +387,7 @@ config ARC_HAS_LLSC
93 +
94 + config ARC_STAR_9000923308
95 + bool "Workaround for llock/scond livelock"
96 +- default y
97 ++ default n
98 + depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC
99 +
100 + config ARC_HAS_SWAPE
101 +diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
102 +index e1b87444ea9a..05131805aa33 100644
103 +--- a/arch/arc/kernel/setup.c
104 ++++ b/arch/arc/kernel/setup.c
105 +@@ -332,10 +332,6 @@ static void arc_chk_core_config(void)
106 + pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n");
107 + else if (!cpu->extn.fpu_dp && fpu_enabled)
108 + panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n");
109 +-
110 +- if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic &&
111 +- !IS_ENABLED(CONFIG_ARC_STAR_9000923308))
112 +- panic("llock/scond livelock workaround missing\n");
113 + }
114 +
115 + /*
116 +diff --git a/arch/arm/boot/dts/armada-385-linksys.dtsi b/arch/arm/boot/dts/armada-385-linksys.dtsi
117 +index 8450944b28e6..22f7a13e20b4 100644
118 +--- a/arch/arm/boot/dts/armada-385-linksys.dtsi
119 ++++ b/arch/arm/boot/dts/armada-385-linksys.dtsi
120 +@@ -58,8 +58,8 @@
121 + soc {
122 + ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
123 + MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
124 +- MBUS_ID(0x09, 0x09) 0 0xf1100000 0x10000
125 +- MBUS_ID(0x09, 0x05) 0 0xf1110000 0x10000>;
126 ++ MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000
127 ++ MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>;
128 +
129 + internal-regs {
130 +
131 +diff --git a/arch/arm/boot/dts/sun5i-r8-chip.dts b/arch/arm/boot/dts/sun5i-r8-chip.dts
132 +index 530ab28e9ca2..d21f50ba3172 100644
133 +--- a/arch/arm/boot/dts/sun5i-r8-chip.dts
134 ++++ b/arch/arm/boot/dts/sun5i-r8-chip.dts
135 +@@ -52,7 +52,7 @@
136 +
137 + / {
138 + model = "NextThing C.H.I.P.";
139 +- compatible = "nextthing,chip", "allwinner,sun5i-r8";
140 ++ compatible = "nextthing,chip", "allwinner,sun5i-r8", "allwinner,sun5i-a13";
141 +
142 + aliases {
143 + i2c0 = &i2c0;
144 +diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
145 +index aeddd28b3595..92fd2c8a9af0 100644
146 +--- a/arch/arm/include/asm/pgtable-2level.h
147 ++++ b/arch/arm/include/asm/pgtable-2level.h
148 +@@ -193,6 +193,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
149 +
150 + #define pmd_large(pmd) (pmd_val(pmd) & 2)
151 + #define pmd_bad(pmd) (pmd_val(pmd) & 2)
152 ++#define pmd_present(pmd) (pmd_val(pmd))
153 +
154 + #define copy_pmd(pmdpd,pmdps) \
155 + do { \
156 +diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
157 +index a745a2a53853..fd929b5ded9e 100644
158 +--- a/arch/arm/include/asm/pgtable-3level.h
159 ++++ b/arch/arm/include/asm/pgtable-3level.h
160 +@@ -212,6 +212,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
161 + : !!(pmd_val(pmd) & (val)))
162 + #define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val)))
163 +
164 ++#define pmd_present(pmd) (pmd_isset((pmd), L_PMD_SECT_VALID))
165 + #define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF))
166 + #define pte_special(pte) (pte_isset((pte), L_PTE_SPECIAL))
167 + static inline pte_t pte_mkspecial(pte_t pte)
168 +@@ -257,10 +258,10 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
169 + #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
170 + #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
171 +
172 +-/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */
173 ++/* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */
174 + static inline pmd_t pmd_mknotpresent(pmd_t pmd)
175 + {
176 +- return __pmd(0);
177 ++ return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID);
178 + }
179 +
180 + static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
181 +diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
182 +index 348caabb7625..d62204060cbe 100644
183 +--- a/arch/arm/include/asm/pgtable.h
184 ++++ b/arch/arm/include/asm/pgtable.h
185 +@@ -182,7 +182,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
186 + #define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
187 +
188 + #define pmd_none(pmd) (!pmd_val(pmd))
189 +-#define pmd_present(pmd) (pmd_val(pmd))
190 +
191 + static inline pte_t *pmd_page_vaddr(pmd_t pmd)
192 + {
193 +diff --git a/arch/arm/mach-imx/mach-imx6ul.c b/arch/arm/mach-imx/mach-imx6ul.c
194 +index acaf7056efa5..e08d02667c81 100644
195 +--- a/arch/arm/mach-imx/mach-imx6ul.c
196 ++++ b/arch/arm/mach-imx/mach-imx6ul.c
197 +@@ -46,7 +46,7 @@ static int ksz8081_phy_fixup(struct phy_device *dev)
198 + static void __init imx6ul_enet_phy_init(void)
199 + {
200 + if (IS_BUILTIN(CONFIG_PHYLIB))
201 +- phy_register_fixup_for_uid(PHY_ID_KSZ8081, 0xffffffff,
202 ++ phy_register_fixup_for_uid(PHY_ID_KSZ8081, MICREL_PHY_ID_MASK,
203 + ksz8081_phy_fixup);
204 + }
205 +
206 +diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
207 +index 55348ee5a352..feed36b32ff6 100644
208 +--- a/arch/arm/mach-mvebu/coherency.c
209 ++++ b/arch/arm/mach-mvebu/coherency.c
210 +@@ -162,22 +162,16 @@ exit:
211 + }
212 +
213 + /*
214 +- * This ioremap hook is used on Armada 375/38x to ensure that PCIe
215 +- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
216 +- * is needed as a workaround for a deadlock issue between the PCIe
217 +- * interface and the cache controller.
218 ++ * This ioremap hook is used on Armada 375/38x to ensure that all MMIO
219 ++ * areas are mapped as MT_UNCACHED instead of MT_DEVICE. This is
220 ++ * needed for the HW I/O coherency mechanism to work properly without
221 ++ * deadlock.
222 + */
223 + static void __iomem *
224 +-armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
225 +- unsigned int mtype, void *caller)
226 ++armada_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
227 ++ unsigned int mtype, void *caller)
228 + {
229 +- struct resource pcie_mem;
230 +-
231 +- mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
232 +-
233 +- if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
234 +- mtype = MT_UNCACHED;
235 +-
236 ++ mtype = MT_UNCACHED;
237 + return __arm_ioremap_caller(phys_addr, size, mtype, caller);
238 + }
239 +
240 +@@ -186,7 +180,7 @@ static void __init armada_375_380_coherency_init(struct device_node *np)
241 + struct device_node *cache_dn;
242 +
243 + coherency_cpu_base = of_iomap(np, 0);
244 +- arch_ioremap_caller = armada_pcie_wa_ioremap_caller;
245 ++ arch_ioremap_caller = armada_wa_ioremap_caller;
246 +
247 + /*
248 + * We should switch the PL310 to I/O coherency mode only if
249 +diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
250 +index e9e5467e0bf4..a307eb6e7fa8 100644
251 +--- a/arch/arm64/include/asm/ptrace.h
252 ++++ b/arch/arm64/include/asm/ptrace.h
253 +@@ -58,6 +58,7 @@
254 + #define COMPAT_PSR_Z_BIT 0x40000000
255 + #define COMPAT_PSR_N_BIT 0x80000000
256 + #define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
257 ++#define COMPAT_PSR_GE_MASK 0x000f0000
258 +
259 + #ifdef CONFIG_CPU_BIG_ENDIAN
260 + #define COMPAT_PSR_ENDSTATE COMPAT_PSR_E_BIT
261 +@@ -151,35 +152,9 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
262 + return regs->regs[0];
263 + }
264 +
265 +-/*
266 +- * Are the current registers suitable for user mode? (used to maintain
267 +- * security in signal handlers)
268 +- */
269 +-static inline int valid_user_regs(struct user_pt_regs *regs)
270 +-{
271 +- if (user_mode(regs) && (regs->pstate & PSR_I_BIT) == 0) {
272 +- regs->pstate &= ~(PSR_F_BIT | PSR_A_BIT);
273 +-
274 +- /* The T bit is reserved for AArch64 */
275 +- if (!(regs->pstate & PSR_MODE32_BIT))
276 +- regs->pstate &= ~COMPAT_PSR_T_BIT;
277 +-
278 +- return 1;
279 +- }
280 +-
281 +- /*
282 +- * Force PSR to something logical...
283 +- */
284 +- regs->pstate &= PSR_f | PSR_s | (PSR_x & ~PSR_A_BIT) | \
285 +- COMPAT_PSR_T_BIT | PSR_MODE32_BIT;
286 +-
287 +- if (!(regs->pstate & PSR_MODE32_BIT)) {
288 +- regs->pstate &= ~COMPAT_PSR_T_BIT;
289 +- regs->pstate |= PSR_MODE_EL0t;
290 +- }
291 +-
292 +- return 0;
293 +-}
294 ++/* We must avoid circular header include via sched.h */
295 ++struct task_struct;
296 ++int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task);
297 +
298 + #define instruction_pointer(regs) ((unsigned long)(regs)->pc)
299 +
300 +diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
301 +index ff7f13239515..fc779ec6f051 100644
302 +--- a/arch/arm64/kernel/ptrace.c
303 ++++ b/arch/arm64/kernel/ptrace.c
304 +@@ -39,6 +39,7 @@
305 + #include <linux/elf.h>
306 +
307 + #include <asm/compat.h>
308 ++#include <asm/cpufeature.h>
309 + #include <asm/debug-monitors.h>
310 + #include <asm/pgtable.h>
311 + #include <asm/syscall.h>
312 +@@ -500,7 +501,7 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
313 + if (ret)
314 + return ret;
315 +
316 +- if (!valid_user_regs(&newregs))
317 ++ if (!valid_user_regs(&newregs, target))
318 + return -EINVAL;
319 +
320 + task_pt_regs(target)->user_regs = newregs;
321 +@@ -770,7 +771,7 @@ static int compat_gpr_set(struct task_struct *target,
322 +
323 + }
324 +
325 +- if (valid_user_regs(&newregs.user_regs))
326 ++ if (valid_user_regs(&newregs.user_regs, target))
327 + *task_pt_regs(target) = newregs;
328 + else
329 + ret = -EINVAL;
330 +@@ -1272,3 +1273,79 @@ asmlinkage void syscall_trace_exit(struct pt_regs *regs)
331 + if (test_thread_flag(TIF_SYSCALL_TRACE))
332 + tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
333 + }
334 ++
335 ++/*
336 ++ * Bits which are always architecturally RES0 per ARM DDI 0487A.h
337 ++ * Userspace cannot use these until they have an architectural meaning.
338 ++ * We also reserve IL for the kernel; SS is handled dynamically.
339 ++ */
340 ++#define SPSR_EL1_AARCH64_RES0_BITS \
341 ++ (GENMASK_ULL(63,32) | GENMASK_ULL(27, 22) | GENMASK_ULL(20, 10) | \
342 ++ GENMASK_ULL(5, 5))
343 ++#define SPSR_EL1_AARCH32_RES0_BITS \
344 ++ (GENMASK_ULL(63,32) | GENMASK_ULL(24, 22) | GENMASK_ULL(20,20))
345 ++
346 ++static int valid_compat_regs(struct user_pt_regs *regs)
347 ++{
348 ++ regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS;
349 ++
350 ++ if (!system_supports_mixed_endian_el0()) {
351 ++ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
352 ++ regs->pstate |= COMPAT_PSR_E_BIT;
353 ++ else
354 ++ regs->pstate &= ~COMPAT_PSR_E_BIT;
355 ++ }
356 ++
357 ++ if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
358 ++ (regs->pstate & COMPAT_PSR_A_BIT) == 0 &&
359 ++ (regs->pstate & COMPAT_PSR_I_BIT) == 0 &&
360 ++ (regs->pstate & COMPAT_PSR_F_BIT) == 0) {
361 ++ return 1;
362 ++ }
363 ++
364 ++ /*
365 ++ * Force PSR to a valid 32-bit EL0t, preserving the same bits as
366 ++ * arch/arm.
367 ++ */
368 ++ regs->pstate &= COMPAT_PSR_N_BIT | COMPAT_PSR_Z_BIT |
369 ++ COMPAT_PSR_C_BIT | COMPAT_PSR_V_BIT |
370 ++ COMPAT_PSR_Q_BIT | COMPAT_PSR_IT_MASK |
371 ++ COMPAT_PSR_GE_MASK | COMPAT_PSR_E_BIT |
372 ++ COMPAT_PSR_T_BIT;
373 ++ regs->pstate |= PSR_MODE32_BIT;
374 ++
375 ++ return 0;
376 ++}
377 ++
378 ++static int valid_native_regs(struct user_pt_regs *regs)
379 ++{
380 ++ regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS;
381 ++
382 ++ if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) &&
383 ++ (regs->pstate & PSR_D_BIT) == 0 &&
384 ++ (regs->pstate & PSR_A_BIT) == 0 &&
385 ++ (regs->pstate & PSR_I_BIT) == 0 &&
386 ++ (regs->pstate & PSR_F_BIT) == 0) {
387 ++ return 1;
388 ++ }
389 ++
390 ++ /* Force PSR to a valid 64-bit EL0t */
391 ++ regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT;
392 ++
393 ++ return 0;
394 ++}
395 ++
396 ++/*
397 ++ * Are the current registers suitable for user mode? (used to maintain
398 ++ * security in signal handlers)
399 ++ */
400 ++int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
401 ++{
402 ++ if (!test_tsk_thread_flag(task, TIF_SINGLESTEP))
403 ++ regs->pstate &= ~DBG_SPSR_SS;
404 ++
405 ++ if (is_compat_thread(task_thread_info(task)))
406 ++ return valid_compat_regs(regs);
407 ++ else
408 ++ return valid_native_regs(regs);
409 ++}
410 +diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
411 +index e18c48cb6db1..a8eafdbc7cb8 100644
412 +--- a/arch/arm64/kernel/signal.c
413 ++++ b/arch/arm64/kernel/signal.c
414 +@@ -115,7 +115,7 @@ static int restore_sigframe(struct pt_regs *regs,
415 + */
416 + regs->syscallno = ~0UL;
417 +
418 +- err |= !valid_user_regs(&regs->user_regs);
419 ++ err |= !valid_user_regs(&regs->user_regs, current);
420 +
421 + if (err == 0) {
422 + struct fpsimd_context *fpsimd_ctx =
423 +@@ -307,7 +307,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
424 + /*
425 + * Check that the resulting registers are actually sane.
426 + */
427 +- ret |= !valid_user_regs(&regs->user_regs);
428 ++ ret |= !valid_user_regs(&regs->user_regs, current);
429 +
430 + /*
431 + * Fast forward the stepping logic so we step into the signal
432 +diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
433 +index 71ef6dc89ae5..107335637390 100644
434 +--- a/arch/arm64/kernel/signal32.c
435 ++++ b/arch/arm64/kernel/signal32.c
436 +@@ -356,7 +356,7 @@ static int compat_restore_sigframe(struct pt_regs *regs,
437 + */
438 + regs->syscallno = ~0UL;
439 +
440 +- err |= !valid_user_regs(&regs->user_regs);
441 ++ err |= !valid_user_regs(&regs->user_regs, current);
442 +
443 + aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace;
444 + if (err == 0)
445 +diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
446 +index 4e956b3e16f5..dd7cee795709 100644
447 +--- a/arch/mips/include/asm/kvm_host.h
448 ++++ b/arch/mips/include/asm/kvm_host.h
449 +@@ -372,6 +372,7 @@ struct kvm_mips_tlb {
450 + #define KVM_MIPS_GUEST_TLB_SIZE 64
451 + struct kvm_vcpu_arch {
452 + void *host_ebase, *guest_ebase;
453 ++ int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
454 + unsigned long host_stack;
455 + unsigned long host_gp;
456 +
457 +diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h
458 +index 4ab4bdfad703..2143884709e4 100644
459 +--- a/arch/mips/kvm/interrupt.h
460 ++++ b/arch/mips/kvm/interrupt.h
461 +@@ -28,6 +28,7 @@
462 + #define MIPS_EXC_MAX 12
463 + /* XXXSL More to follow */
464 +
465 ++extern char __kvm_mips_vcpu_run_end[];
466 + extern char mips32_exception[], mips32_exceptionEnd[];
467 + extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
468 +
469 +diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
470 +index 7e2210846b8b..77706433651b 100644
471 +--- a/arch/mips/kvm/locore.S
472 ++++ b/arch/mips/kvm/locore.S
473 +@@ -227,6 +227,7 @@ FEXPORT(__kvm_mips_load_k0k1)
474 +
475 + /* Jump to guest */
476 + eret
477 ++EXPORT(__kvm_mips_vcpu_run_end)
478 +
479 + VECTOR(MIPSX(exception), unknown)
480 + /* Find out what mode we came from and jump to the proper handler. */
481 +diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
482 +index 2683d04fdda5..e86b7499921a 100644
483 +--- a/arch/mips/kvm/mips.c
484 ++++ b/arch/mips/kvm/mips.c
485 +@@ -314,6 +314,15 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
486 + memcpy(gebase + offset, mips32_GuestException,
487 + mips32_GuestExceptionEnd - mips32_GuestException);
488 +
489 ++#ifdef MODULE
490 ++ offset += mips32_GuestExceptionEnd - mips32_GuestException;
491 ++ memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
492 ++ __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
493 ++ vcpu->arch.vcpu_run = gebase + offset;
494 ++#else
495 ++ vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
496 ++#endif
497 ++
498 + /* Invalidate the icache for these ranges */
499 + local_flush_icache_range((unsigned long)gebase,
500 + (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
501 +@@ -403,7 +412,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
502 + /* Disable hardware page table walking while in guest */
503 + htw_stop();
504 +
505 +- r = __kvm_mips_vcpu_run(run, vcpu);
506 ++ r = vcpu->arch.vcpu_run(run, vcpu);
507 +
508 + /* Re-enable HTW before enabling interrupts */
509 + htw_start();
510 +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
511 +index 646bf4d222c1..cf788d7d7e56 100644
512 +--- a/arch/powerpc/kernel/process.c
513 ++++ b/arch/powerpc/kernel/process.c
514 +@@ -1239,6 +1239,16 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
515 + current->thread.regs = regs - 1;
516 + }
517 +
518 ++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
519 ++ /*
520 ++ * Clear any transactional state, we're exec()ing. The cause is
521 ++ * not important as there will never be a recheckpoint so it's not
522 ++ * user visible.
523 ++ */
524 ++ if (MSR_TM_SUSPENDED(mfmsr()))
525 ++ tm_reclaim_current(0);
526 ++#endif
527 ++
528 + memset(regs->gpr, 0, sizeof(regs->gpr));
529 + regs->ctr = 0;
530 + regs->link = 0;
531 +diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
532 +index e52b82b71d79..b7e86e00048f 100644
533 +--- a/arch/powerpc/kernel/prom_init.c
534 ++++ b/arch/powerpc/kernel/prom_init.c
535 +@@ -718,7 +718,7 @@ unsigned char ibm_architecture_vec[] = {
536 + * must match by the macro below. Update the definition if
537 + * the structure layout changes.
538 + */
539 +-#define IBM_ARCH_VEC_NRCORES_OFFSET 125
540 ++#define IBM_ARCH_VEC_NRCORES_OFFSET 133
541 + W(NR_CPUS), /* number of cores supported */
542 + 0,
543 + 0,
544 +diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
545 +index bd98ce2be17b..3e8865b187de 100644
546 +--- a/arch/powerpc/platforms/pseries/iommu.c
547 ++++ b/arch/powerpc/platforms/pseries/iommu.c
548 +@@ -912,7 +912,8 @@ machine_arch_initcall(pseries, find_existing_ddw_windows);
549 + static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
550 + struct ddw_query_response *query)
551 + {
552 +- struct eeh_dev *edev;
553 ++ struct device_node *dn;
554 ++ struct pci_dn *pdn;
555 + u32 cfg_addr;
556 + u64 buid;
557 + int ret;
558 +@@ -923,11 +924,10 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
559 + * Retrieve them from the pci device, not the node with the
560 + * dma-window property
561 + */
562 +- edev = pci_dev_to_eeh_dev(dev);
563 +- cfg_addr = edev->config_addr;
564 +- if (edev->pe_config_addr)
565 +- cfg_addr = edev->pe_config_addr;
566 +- buid = edev->phb->buid;
567 ++ dn = pci_device_to_OF_node(dev);
568 ++ pdn = PCI_DN(dn);
569 ++ buid = pdn->phb->buid;
570 ++ cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
571 +
572 + ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
573 + cfg_addr, BUID_HI(buid), BUID_LO(buid));
574 +@@ -941,7 +941,8 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
575 + struct ddw_create_response *create, int page_shift,
576 + int window_shift)
577 + {
578 +- struct eeh_dev *edev;
579 ++ struct device_node *dn;
580 ++ struct pci_dn *pdn;
581 + u32 cfg_addr;
582 + u64 buid;
583 + int ret;
584 +@@ -952,11 +953,10 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
585 + * Retrieve them from the pci device, not the node with the
586 + * dma-window property
587 + */
588 +- edev = pci_dev_to_eeh_dev(dev);
589 +- cfg_addr = edev->config_addr;
590 +- if (edev->pe_config_addr)
591 +- cfg_addr = edev->pe_config_addr;
592 +- buid = edev->phb->buid;
593 ++ dn = pci_device_to_OF_node(dev);
594 ++ pdn = PCI_DN(dn);
595 ++ buid = pdn->phb->buid;
596 ++ cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
597 +
598 + do {
599 + /* extra outputs are LIOBN and dma-addr (hi, lo) */
600 +diff --git a/arch/s390/include/asm/fpu/api.h b/arch/s390/include/asm/fpu/api.h
601 +index 5e04f3cbd320..8ae236b0f80b 100644
602 +--- a/arch/s390/include/asm/fpu/api.h
603 ++++ b/arch/s390/include/asm/fpu/api.h
604 +@@ -22,7 +22,7 @@ static inline int test_fp_ctl(u32 fpc)
605 + " la %0,0\n"
606 + "1:\n"
607 + EX_TABLE(0b,1b)
608 +- : "=d" (rc), "=d" (orig_fpc)
609 ++ : "=d" (rc), "=&d" (orig_fpc)
610 + : "d" (fpc), "0" (-EINVAL));
611 + return rc;
612 + }
613 +diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
614 +index 2ee62dba0373..c0cc2a6be0bf 100644
615 +--- a/arch/x86/boot/Makefile
616 ++++ b/arch/x86/boot/Makefile
617 +@@ -162,6 +162,9 @@ isoimage: $(obj)/bzImage
618 + for i in lib lib64 share end ; do \
619 + if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
620 + cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
621 ++ if [ -f /usr/$$i/syslinux/ldlinux.c32 ]; then \
622 ++ cp /usr/$$i/syslinux/ldlinux.c32 $(obj)/isoimage ; \
623 ++ fi ; \
624 + break ; \
625 + fi ; \
626 + if [ $$i = end ] ; then exit 1 ; fi ; \
627 +diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
628 +index 29fa475ec518..c986d0b3bc35 100644
629 +--- a/arch/x86/kernel/amd_nb.c
630 ++++ b/arch/x86/kernel/amd_nb.c
631 +@@ -71,8 +71,8 @@ int amd_cache_northbridges(void)
632 + while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
633 + i++;
634 +
635 +- if (i == 0)
636 +- return 0;
637 ++ if (!i)
638 ++ return -ENODEV;
639 +
640 + nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
641 + if (!nb)
642 +diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
643 +index 078de2e86b7a..5f82cd59f0e5 100644
644 +--- a/arch/x86/kernel/cpu/perf_event_intel.c
645 ++++ b/arch/x86/kernel/cpu/perf_event_intel.c
646 +@@ -3601,7 +3601,7 @@ __init int intel_pmu_init(void)
647 + c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
648 + }
649 + c->idxmsk64 &=
650 +- ~(~0UL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
651 ++ ~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
652 + c->weight = hweight64(c->idxmsk64);
653 + }
654 + }
655 +diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
656 +index 1deffe6cc873..023c442c33bb 100644
657 +--- a/arch/x86/kernel/kprobes/core.c
658 ++++ b/arch/x86/kernel/kprobes/core.c
659 +@@ -959,7 +959,19 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
660 + * normal page fault.
661 + */
662 + regs->ip = (unsigned long)cur->addr;
663 ++ /*
664 ++ * Trap flag (TF) has been set here because this fault
665 ++ * happened where the single stepping will be done.
666 ++ * So clear it by resetting the current kprobe:
667 ++ */
668 ++ regs->flags &= ~X86_EFLAGS_TF;
669 ++
670 ++ /*
671 ++ * If the TF flag was set before the kprobe hit,
672 ++ * don't touch it:
673 ++ */
674 + regs->flags |= kcb->kprobe_old_flags;
675 ++
676 + if (kcb->kprobe_status == KPROBE_REENTER)
677 + restore_previous_kprobe(kcb);
678 + else
679 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
680 +index f314e9b9660b..41e7943004fe 100644
681 +--- a/arch/x86/kvm/vmx.c
682 ++++ b/arch/x86/kvm/vmx.c
683 +@@ -6579,7 +6579,13 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
684 +
685 + /* Checks for #GP/#SS exceptions. */
686 + exn = false;
687 +- if (is_protmode(vcpu)) {
688 ++ if (is_long_mode(vcpu)) {
689 ++ /* Long mode: #GP(0)/#SS(0) if the memory address is in a
690 ++ * non-canonical form. This is the only check on the memory
691 ++ * destination for long mode!
692 ++ */
693 ++ exn = is_noncanonical_address(*ret);
694 ++ } else if (is_protmode(vcpu)) {
695 + /* Protected mode: apply checks for segment validity in the
696 + * following order:
697 + * - segment type check (#GP(0) may be thrown)
698 +@@ -6596,17 +6602,10 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
699 + * execute-only code segment
700 + */
701 + exn = ((s.type & 0xa) == 8);
702 +- }
703 +- if (exn) {
704 +- kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
705 +- return 1;
706 +- }
707 +- if (is_long_mode(vcpu)) {
708 +- /* Long mode: #GP(0)/#SS(0) if the memory address is in a
709 +- * non-canonical form. This is an only check for long mode.
710 +- */
711 +- exn = is_noncanonical_address(*ret);
712 +- } else if (is_protmode(vcpu)) {
713 ++ if (exn) {
714 ++ kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
715 ++ return 1;
716 ++ }
717 + /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
718 + */
719 + exn = (s.unusable != 0);
720 +diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
721 +index 961acc788f44..91a9e6af2ec4 100644
722 +--- a/drivers/ata/libata-eh.c
723 ++++ b/drivers/ata/libata-eh.c
724 +@@ -606,7 +606,7 @@ void ata_scsi_error(struct Scsi_Host *host)
725 + ata_scsi_port_error_handler(host, ap);
726 +
727 + /* finish or retry handled scmd's and clean up */
728 +- WARN_ON(host->host_failed || !list_empty(&eh_work_q));
729 ++ WARN_ON(!list_empty(&eh_work_q));
730 +
731 + DPRINTK("EXIT\n");
732 + }
733 +diff --git a/drivers/base/module.c b/drivers/base/module.c
734 +index db930d3ee312..2a215780eda2 100644
735 +--- a/drivers/base/module.c
736 ++++ b/drivers/base/module.c
737 +@@ -24,10 +24,12 @@ static char *make_driver_name(struct device_driver *drv)
738 +
739 + static void module_create_drivers_dir(struct module_kobject *mk)
740 + {
741 +- if (!mk || mk->drivers_dir)
742 +- return;
743 ++ static DEFINE_MUTEX(drivers_dir_mutex);
744 +
745 +- mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
746 ++ mutex_lock(&drivers_dir_mutex);
747 ++ if (mk && !mk->drivers_dir)
748 ++ mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
749 ++ mutex_unlock(&drivers_dir_mutex);
750 + }
751 +
752 + void module_add_driver(struct module *mod, struct device_driver *drv)
753 +diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
754 +index e3536da05c88..a084a4751fa9 100644
755 +--- a/drivers/char/ipmi/ipmi_msghandler.c
756 ++++ b/drivers/char/ipmi/ipmi_msghandler.c
757 +@@ -3819,6 +3819,7 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
758 + while (!list_empty(&intf->waiting_rcv_msgs)) {
759 + smi_msg = list_entry(intf->waiting_rcv_msgs.next,
760 + struct ipmi_smi_msg, link);
761 ++ list_del(&smi_msg->link);
762 + if (!run_to_completion)
763 + spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
764 + flags);
765 +@@ -3828,11 +3829,14 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
766 + if (rv > 0) {
767 + /*
768 + * To preserve message order, quit if we
769 +- * can't handle a message.
770 ++ * can't handle a message. Add the message
771 ++ * back at the head, this is safe because this
772 ++ * tasklet is the only thing that pulls the
773 ++ * messages.
774 + */
775 ++ list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
776 + break;
777 + } else {
778 +- list_del(&smi_msg->link);
779 + if (rv == 0)
780 + /* Message handled */
781 + ipmi_free_smi_msg(smi_msg);
782 +diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
783 +index 9e9e196c6d51..45b5adaafa6f 100644
784 +--- a/drivers/crypto/qat/qat_common/Makefile
785 ++++ b/drivers/crypto/qat/qat_common/Makefile
786 +@@ -2,6 +2,7 @@ $(obj)/qat_rsapubkey-asn1.o: $(obj)/qat_rsapubkey-asn1.c \
787 + $(obj)/qat_rsapubkey-asn1.h
788 + $(obj)/qat_rsaprivkey-asn1.o: $(obj)/qat_rsaprivkey-asn1.c \
789 + $(obj)/qat_rsaprivkey-asn1.h
790 ++$(obj)/qat_asym_algs.o: $(obj)/qat_rsapubkey-asn1.h $(obj)/qat_rsaprivkey-asn1.h
791 +
792 + clean-files += qat_rsapubkey-asn1.c qat_rsapubkey-asn1.h
793 + clean-files += qat_rsaprivkey-asn1.c qat_rsapvivkey-asn1.h
794 +diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
795 +index 37649221f81c..ca64b174f8a3 100644
796 +--- a/drivers/edac/sb_edac.c
797 ++++ b/drivers/edac/sb_edac.c
798 +@@ -218,8 +218,11 @@ static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
799 + { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
800 + };
801 +
802 +-#define RIR_RNK_TGT(reg) GET_BITFIELD(reg, 16, 19)
803 +-#define RIR_OFFSET(reg) GET_BITFIELD(reg, 2, 14)
804 ++#define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \
805 ++ GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19))
806 ++
807 ++#define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \
808 ++ GET_BITFIELD(reg, 2, 15) : GET_BITFIELD(reg, 2, 14))
809 +
810 + /* Device 16, functions 2-7 */
811 +
812 +@@ -1175,14 +1178,14 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
813 + pci_read_config_dword(pvt->pci_tad[i],
814 + rir_offset[j][k],
815 + &reg);
816 +- tmp_mb = RIR_OFFSET(reg) << 6;
817 ++ tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6;
818 +
819 + gb = div_u64_rem(tmp_mb, 1024, &mb);
820 + edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
821 + i, j, k,
822 + gb, (mb*1000)/1024,
823 + ((u64)tmp_mb) << 20L,
824 +- (u32)RIR_RNK_TGT(reg),
825 ++ (u32)RIR_RNK_TGT(pvt->info.type, reg),
826 + reg);
827 + }
828 + }
829 +@@ -1512,7 +1515,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
830 + pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
831 + rir_offset[n_rir][idx],
832 + &reg);
833 +- *rank = RIR_RNK_TGT(reg);
834 ++ *rank = RIR_RNK_TGT(pvt->info.type, reg);
835 +
836 + edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
837 + n_rir,
838 +diff --git a/drivers/gpio/gpiolib-legacy.c b/drivers/gpio/gpiolib-legacy.c
839 +index 3a5c7011ad3b..8b830996fe02 100644
840 +--- a/drivers/gpio/gpiolib-legacy.c
841 ++++ b/drivers/gpio/gpiolib-legacy.c
842 +@@ -28,6 +28,10 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
843 + if (!desc && gpio_is_valid(gpio))
844 + return -EPROBE_DEFER;
845 +
846 ++ err = gpiod_request(desc, label);
847 ++ if (err)
848 ++ return err;
849 ++
850 + if (flags & GPIOF_OPEN_DRAIN)
851 + set_bit(FLAG_OPEN_DRAIN, &desc->flags);
852 +
853 +@@ -37,10 +41,6 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
854 + if (flags & GPIOF_ACTIVE_LOW)
855 + set_bit(FLAG_ACTIVE_LOW, &desc->flags);
856 +
857 +- err = gpiod_request(desc, label);
858 +- if (err)
859 +- return err;
860 +-
861 + if (flags & GPIOF_DIR_IN)
862 + err = gpiod_direction_input(desc);
863 + else
864 +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
865 +index 4e4c3083ae56..06d345b087f8 100644
866 +--- a/drivers/gpio/gpiolib.c
867 ++++ b/drivers/gpio/gpiolib.c
868 +@@ -927,14 +927,6 @@ static int __gpiod_request(struct gpio_desc *desc, const char *label)
869 + spin_lock_irqsave(&gpio_lock, flags);
870 + }
871 + done:
872 +- if (status < 0) {
873 +- /* Clear flags that might have been set by the caller before
874 +- * requesting the GPIO.
875 +- */
876 +- clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
877 +- clear_bit(FLAG_OPEN_DRAIN, &desc->flags);
878 +- clear_bit(FLAG_OPEN_SOURCE, &desc->flags);
879 +- }
880 + spin_unlock_irqrestore(&gpio_lock, flags);
881 + return status;
882 + }
883 +@@ -2062,28 +2054,13 @@ struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
884 + }
885 + EXPORT_SYMBOL_GPL(gpiod_get_optional);
886 +
887 +-/**
888 +- * gpiod_parse_flags - helper function to parse GPIO lookup flags
889 +- * @desc: gpio to be setup
890 +- * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
891 +- * of_get_gpio_hog()
892 +- *
893 +- * Set the GPIO descriptor flags based on the given GPIO lookup flags.
894 +- */
895 +-static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags)
896 +-{
897 +- if (lflags & GPIO_ACTIVE_LOW)
898 +- set_bit(FLAG_ACTIVE_LOW, &desc->flags);
899 +- if (lflags & GPIO_OPEN_DRAIN)
900 +- set_bit(FLAG_OPEN_DRAIN, &desc->flags);
901 +- if (lflags & GPIO_OPEN_SOURCE)
902 +- set_bit(FLAG_OPEN_SOURCE, &desc->flags);
903 +-}
904 +
905 + /**
906 + * gpiod_configure_flags - helper function to configure a given GPIO
907 + * @desc: gpio whose value will be assigned
908 + * @con_id: function within the GPIO consumer
909 ++ * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
910 ++ * of_get_gpio_hog()
911 + * @dflags: gpiod_flags - optional GPIO initialization flags
912 + *
913 + * Return 0 on success, -ENOENT if no GPIO has been assigned to the
914 +@@ -2091,10 +2068,17 @@ static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags)
915 + * occurred while trying to acquire the GPIO.
916 + */
917 + static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
918 +- enum gpiod_flags dflags)
919 ++ unsigned long lflags, enum gpiod_flags dflags)
920 + {
921 + int status;
922 +
923 ++ if (lflags & GPIO_ACTIVE_LOW)
924 ++ set_bit(FLAG_ACTIVE_LOW, &desc->flags);
925 ++ if (lflags & GPIO_OPEN_DRAIN)
926 ++ set_bit(FLAG_OPEN_DRAIN, &desc->flags);
927 ++ if (lflags & GPIO_OPEN_SOURCE)
928 ++ set_bit(FLAG_OPEN_SOURCE, &desc->flags);
929 ++
930 + /* No particular flag request, return here... */
931 + if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
932 + pr_debug("no flags found for %s\n", con_id);
933 +@@ -2161,13 +2145,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
934 + return desc;
935 + }
936 +
937 +- gpiod_parse_flags(desc, lookupflags);
938 +-
939 + status = gpiod_request(desc, con_id);
940 + if (status < 0)
941 + return ERR_PTR(status);
942 +
943 +- status = gpiod_configure_flags(desc, con_id, flags);
944 ++ status = gpiod_configure_flags(desc, con_id, lookupflags, flags);
945 + if (status < 0) {
946 + dev_dbg(dev, "setup of GPIO %s failed\n", con_id);
947 + gpiod_put(desc);
948 +@@ -2223,6 +2205,10 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
949 + if (IS_ERR(desc))
950 + return desc;
951 +
952 ++ ret = gpiod_request(desc, NULL);
953 ++ if (ret)
954 ++ return ERR_PTR(ret);
955 ++
956 + if (active_low)
957 + set_bit(FLAG_ACTIVE_LOW, &desc->flags);
958 +
959 +@@ -2233,10 +2219,6 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
960 + set_bit(FLAG_OPEN_SOURCE, &desc->flags);
961 + }
962 +
963 +- ret = gpiod_request(desc, NULL);
964 +- if (ret)
965 +- return ERR_PTR(ret);
966 +-
967 + return desc;
968 + }
969 + EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod);
970 +@@ -2289,8 +2271,6 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
971 + chip = gpiod_to_chip(desc);
972 + hwnum = gpio_chip_hwgpio(desc);
973 +
974 +- gpiod_parse_flags(desc, lflags);
975 +-
976 + local_desc = gpiochip_request_own_desc(chip, hwnum, name);
977 + if (IS_ERR(local_desc)) {
978 + pr_err("requesting hog GPIO %s (chip %s, offset %d) failed\n",
979 +@@ -2298,7 +2278,7 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
980 + return PTR_ERR(local_desc);
981 + }
982 +
983 +- status = gpiod_configure_flags(desc, name, dflags);
984 ++ status = gpiod_configure_flags(desc, name, lflags, dflags);
985 + if (status < 0) {
986 + pr_err("setup of hog GPIO %s (chip %s, offset %d) failed\n",
987 + name, chip->label, hwnum);
988 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
989 +index 946300764609..b57fffc2d4af 100644
990 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
991 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
992 +@@ -5463,7 +5463,7 @@ static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
993 + case 2:
994 + for (i = 0; i < adev->gfx.num_compute_rings; i++) {
995 + ring = &adev->gfx.compute_ring[i];
996 +- if ((ring->me == me_id) & (ring->pipe == pipe_id))
997 ++ if ((ring->me == me_id) && (ring->pipe == pipe_id))
998 + amdgpu_fence_process(ring);
999 + }
1000 + break;
1001 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
1002 +index 9be007081b72..eb1da83c9902 100644
1003 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
1004 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
1005 +@@ -242,13 +242,19 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
1006 + pqm_uninit(&p->pqm);
1007 +
1008 + /* Iterate over all process device data structure and check
1009 +- * if we should reset all wavefronts */
1010 +- list_for_each_entry(pdd, &p->per_device_data, per_device_list)
1011 ++ * if we should delete debug managers and reset all wavefronts
1012 ++ */
1013 ++ list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
1014 ++ if ((pdd->dev->dbgmgr) &&
1015 ++ (pdd->dev->dbgmgr->pasid == p->pasid))
1016 ++ kfd_dbgmgr_destroy(pdd->dev->dbgmgr);
1017 ++
1018 + if (pdd->reset_wavefronts) {
1019 + pr_warn("amdkfd: Resetting all wave fronts\n");
1020 + dbgdev_wave_reset_wavefronts(pdd->dev, p);
1021 + pdd->reset_wavefronts = false;
1022 + }
1023 ++ }
1024 +
1025 + mutex_unlock(&p->mutex);
1026 +
1027 +@@ -404,42 +410,52 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
1028 +
1029 + idx = srcu_read_lock(&kfd_processes_srcu);
1030 +
1031 ++ /*
1032 ++ * Look for the process that matches the pasid. If there is no such
1033 ++ * process, we either released it in amdkfd's own notifier, or there
1034 ++ * is a bug. Unfortunately, there is no way to tell...
1035 ++ */
1036 + hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes)
1037 +- if (p->pasid == pasid)
1038 +- break;
1039 ++ if (p->pasid == pasid) {
1040 +
1041 +- srcu_read_unlock(&kfd_processes_srcu, idx);
1042 ++ srcu_read_unlock(&kfd_processes_srcu, idx);
1043 +
1044 +- BUG_ON(p->pasid != pasid);
1045 ++ pr_debug("Unbinding process %d from IOMMU\n", pasid);
1046 +
1047 +- mutex_lock(&p->mutex);
1048 ++ mutex_lock(&p->mutex);
1049 +
1050 +- if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
1051 +- kfd_dbgmgr_destroy(dev->dbgmgr);
1052 ++ if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
1053 ++ kfd_dbgmgr_destroy(dev->dbgmgr);
1054 +
1055 +- pqm_uninit(&p->pqm);
1056 ++ pqm_uninit(&p->pqm);
1057 +
1058 +- pdd = kfd_get_process_device_data(dev, p);
1059 ++ pdd = kfd_get_process_device_data(dev, p);
1060 +
1061 +- if (!pdd) {
1062 +- mutex_unlock(&p->mutex);
1063 +- return;
1064 +- }
1065 ++ if (!pdd) {
1066 ++ mutex_unlock(&p->mutex);
1067 ++ return;
1068 ++ }
1069 +
1070 +- if (pdd->reset_wavefronts) {
1071 +- dbgdev_wave_reset_wavefronts(pdd->dev, p);
1072 +- pdd->reset_wavefronts = false;
1073 +- }
1074 ++ if (pdd->reset_wavefronts) {
1075 ++ dbgdev_wave_reset_wavefronts(pdd->dev, p);
1076 ++ pdd->reset_wavefronts = false;
1077 ++ }
1078 +
1079 +- /*
1080 +- * Just mark pdd as unbound, because we still need it to call
1081 +- * amd_iommu_unbind_pasid() in when the process exits.
1082 +- * We don't call amd_iommu_unbind_pasid() here
1083 +- * because the IOMMU called us.
1084 +- */
1085 +- pdd->bound = false;
1086 ++ /*
1087 ++ * Just mark pdd as unbound, because we still need it
1088 ++ * to call amd_iommu_unbind_pasid() in when the
1089 ++ * process exits.
1090 ++ * We don't call amd_iommu_unbind_pasid() here
1091 ++ * because the IOMMU called us.
1092 ++ */
1093 ++ pdd->bound = false;
1094 +
1095 +- mutex_unlock(&p->mutex);
1096 ++ mutex_unlock(&p->mutex);
1097 ++
1098 ++ return;
1099 ++ }
1100 ++
1101 ++ srcu_read_unlock(&kfd_processes_srcu, idx);
1102 + }
1103 +
1104 + struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
1105 +diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
1106 +index d0299aed517e..59d1269626b1 100644
1107 +--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
1108 ++++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
1109 +@@ -335,6 +335,8 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
1110 +
1111 + atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff,
1112 + factor_reg);
1113 ++ } else {
1114 ++ atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff, 0);
1115 + }
1116 + }
1117 +
1118 +diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
1119 +index aed2e3f8a1a2..6253775b8d9c 100644
1120 +--- a/drivers/gpu/drm/drm_atomic.c
1121 ++++ b/drivers/gpu/drm/drm_atomic.c
1122 +@@ -367,6 +367,8 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
1123 + drm_property_unreference_blob(state->mode_blob);
1124 + state->mode_blob = NULL;
1125 +
1126 ++ memset(&state->mode, 0, sizeof(state->mode));
1127 ++
1128 + if (blob) {
1129 + if (blob->length != sizeof(struct drm_mode_modeinfo) ||
1130 + drm_mode_convert_umode(&state->mode,
1131 +@@ -379,7 +381,6 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
1132 + DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
1133 + state->mode.name, state);
1134 + } else {
1135 +- memset(&state->mode, 0, sizeof(state->mode));
1136 + state->enable = false;
1137 + DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
1138 + state);
1139 +diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
1140 +index a02238c85e18..dc84003f694e 100644
1141 +--- a/drivers/gpu/drm/drm_crtc.c
1142 ++++ b/drivers/gpu/drm/drm_crtc.c
1143 +@@ -2682,8 +2682,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
1144 + goto out;
1145 + }
1146 +
1147 +- drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
1148 +-
1149 + /*
1150 + * Check whether the primary plane supports the fb pixel format.
1151 + * Drivers not implementing the universal planes API use a
1152 +diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
1153 +index d268bf18a662..2485fb652716 100644
1154 +--- a/drivers/gpu/drm/drm_dp_mst_topology.c
1155 ++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
1156 +@@ -2874,11 +2874,9 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
1157 + drm_dp_port_teardown_pdt(port, port->pdt);
1158 +
1159 + if (!port->input && port->vcpi.vcpi > 0) {
1160 +- if (mgr->mst_state) {
1161 +- drm_dp_mst_reset_vcpi_slots(mgr, port);
1162 +- drm_dp_update_payload_part1(mgr);
1163 +- drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
1164 +- }
1165 ++ drm_dp_mst_reset_vcpi_slots(mgr, port);
1166 ++ drm_dp_update_payload_part1(mgr);
1167 ++ drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
1168 + }
1169 +
1170 + kref_put(&port->kref, drm_dp_free_mst_port);
1171 +diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
1172 +index cd74a0953f42..39e30abddf08 100644
1173 +--- a/drivers/gpu/drm/drm_modes.c
1174 ++++ b/drivers/gpu/drm/drm_modes.c
1175 +@@ -1487,6 +1487,8 @@ int drm_mode_convert_umode(struct drm_display_mode *out,
1176 + if (out->status != MODE_OK)
1177 + goto out;
1178 +
1179 ++ drm_mode_set_crtcinfo(out, CRTC_INTERLACE_HALVE_V);
1180 ++
1181 + ret = 0;
1182 +
1183 + out:
1184 +diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
1185 +index f7df54a8ee2b..c0a96f1ee18e 100644
1186 +--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
1187 ++++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
1188 +@@ -39,7 +39,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
1189 + if (!mutex_is_locked(mutex))
1190 + return false;
1191 +
1192 +-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
1193 ++#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
1194 + return mutex->owner == task;
1195 + #else
1196 + /* Since UP may be pre-empted, we cannot assume that we own the lock */
1197 +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
1198 +index 7e461dca564c..9ed9f6dde86f 100644
1199 +--- a/drivers/gpu/drm/i915/i915_reg.h
1200 ++++ b/drivers/gpu/drm/i915/i915_reg.h
1201 +@@ -7357,6 +7357,8 @@ enum skl_disp_power_wells {
1202 + #define TRANS_CLK_SEL_DISABLED (0x0<<29)
1203 + #define TRANS_CLK_SEL_PORT(x) (((x)+1)<<29)
1204 +
1205 ++#define CDCLK_FREQ 0x46200
1206 ++
1207 + #define TRANSA_MSA_MISC 0x60410
1208 + #define TRANSB_MSA_MISC 0x61410
1209 + #define TRANSC_MSA_MISC 0x62410
1210 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1211 +index afa81691163d..c41bc42b6fa7 100644
1212 +--- a/drivers/gpu/drm/i915/intel_display.c
1213 ++++ b/drivers/gpu/drm/i915/intel_display.c
1214 +@@ -8228,12 +8228,14 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
1215 + {
1216 + struct drm_i915_private *dev_priv = dev->dev_private;
1217 + struct intel_encoder *encoder;
1218 ++ int i;
1219 + u32 val, final;
1220 + bool has_lvds = false;
1221 + bool has_cpu_edp = false;
1222 + bool has_panel = false;
1223 + bool has_ck505 = false;
1224 + bool can_ssc = false;
1225 ++ bool using_ssc_source = false;
1226 +
1227 + /* We need to take the global config into account */
1228 + for_each_intel_encoder(dev, encoder) {
1229 +@@ -8260,8 +8262,22 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
1230 + can_ssc = true;
1231 + }
1232 +
1233 +- DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
1234 +- has_panel, has_lvds, has_ck505);
1235 ++ /* Check if any DPLLs are using the SSC source */
1236 ++ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
1237 ++ u32 temp = I915_READ(PCH_DPLL(i));
1238 ++
1239 ++ if (!(temp & DPLL_VCO_ENABLE))
1240 ++ continue;
1241 ++
1242 ++ if ((temp & PLL_REF_INPUT_MASK) ==
1243 ++ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
1244 ++ using_ssc_source = true;
1245 ++ break;
1246 ++ }
1247 ++ }
1248 ++
1249 ++ DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
1250 ++ has_panel, has_lvds, has_ck505, using_ssc_source);
1251 +
1252 + /* Ironlake: try to setup display ref clock before DPLL
1253 + * enabling. This is only under driver's control after
1254 +@@ -8298,9 +8314,9 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
1255 + final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
1256 + } else
1257 + final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
1258 +- } else {
1259 +- final |= DREF_SSC_SOURCE_DISABLE;
1260 +- final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
1261 ++ } else if (using_ssc_source) {
1262 ++ final |= DREF_SSC_SOURCE_ENABLE;
1263 ++ final |= DREF_SSC1_ENABLE;
1264 + }
1265 +
1266 + if (final == val)
1267 +@@ -8346,7 +8362,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
1268 + POSTING_READ(PCH_DREF_CONTROL);
1269 + udelay(200);
1270 + } else {
1271 +- DRM_DEBUG_KMS("Disabling SSC entirely\n");
1272 ++ DRM_DEBUG_KMS("Disabling CPU source output\n");
1273 +
1274 + val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
1275 +
1276 +@@ -8357,16 +8373,20 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
1277 + POSTING_READ(PCH_DREF_CONTROL);
1278 + udelay(200);
1279 +
1280 +- /* Turn off the SSC source */
1281 +- val &= ~DREF_SSC_SOURCE_MASK;
1282 +- val |= DREF_SSC_SOURCE_DISABLE;
1283 ++ if (!using_ssc_source) {
1284 ++ DRM_DEBUG_KMS("Disabling SSC source\n");
1285 +
1286 +- /* Turn off SSC1 */
1287 +- val &= ~DREF_SSC1_ENABLE;
1288 ++ /* Turn off the SSC source */
1289 ++ val &= ~DREF_SSC_SOURCE_MASK;
1290 ++ val |= DREF_SSC_SOURCE_DISABLE;
1291 +
1292 +- I915_WRITE(PCH_DREF_CONTROL, val);
1293 +- POSTING_READ(PCH_DREF_CONTROL);
1294 +- udelay(200);
1295 ++ /* Turn off SSC1 */
1296 ++ val &= ~DREF_SSC1_ENABLE;
1297 ++
1298 ++ I915_WRITE(PCH_DREF_CONTROL, val);
1299 ++ POSTING_READ(PCH_DREF_CONTROL);
1300 ++ udelay(200);
1301 ++ }
1302 + }
1303 +
1304 + BUG_ON(val != final);
1305 +@@ -9669,6 +9689,8 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
1306 + sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
1307 + mutex_unlock(&dev_priv->rps.hw_lock);
1308 +
1309 ++ I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
1310 ++
1311 + intel_update_cdclk(dev);
1312 +
1313 + WARN(cdclk != dev_priv->cdclk_freq,
1314 +diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
1315 +index e55a82a99e7f..8e1d6d74c203 100644
1316 +--- a/drivers/gpu/drm/i915/intel_dp.c
1317 ++++ b/drivers/gpu/drm/i915/intel_dp.c
1318 +@@ -3628,8 +3628,7 @@ static bool
1319 + intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
1320 + uint8_t dp_train_pat)
1321 + {
1322 +- if (!intel_dp->train_set_valid)
1323 +- memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
1324 ++ memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
1325 + intel_dp_set_signal_levels(intel_dp, DP);
1326 + return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
1327 + }
1328 +@@ -3746,22 +3745,6 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
1329 + break;
1330 + }
1331 +
1332 +- /*
1333 +- * if we used previously trained voltage and pre-emphasis values
1334 +- * and we don't get clock recovery, reset link training values
1335 +- */
1336 +- if (intel_dp->train_set_valid) {
1337 +- DRM_DEBUG_KMS("clock recovery not ok, reset");
1338 +- /* clear the flag as we are not reusing train set */
1339 +- intel_dp->train_set_valid = false;
1340 +- if (!intel_dp_reset_link_train(intel_dp, &DP,
1341 +- DP_TRAINING_PATTERN_1 |
1342 +- DP_LINK_SCRAMBLING_DISABLE)) {
1343 +- DRM_ERROR("failed to enable link training\n");
1344 +- return;
1345 +- }
1346 +- continue;
1347 +- }
1348 +
1349 + /* Check to see if we've tried the max voltage */
1350 + for (i = 0; i < intel_dp->lane_count; i++)
1351 +@@ -3854,7 +3837,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
1352 + /* Make sure clock is still ok */
1353 + if (!drm_dp_clock_recovery_ok(link_status,
1354 + intel_dp->lane_count)) {
1355 +- intel_dp->train_set_valid = false;
1356 + intel_dp_link_training_clock_recovery(intel_dp);
1357 + intel_dp_set_link_train(intel_dp, &DP,
1358 + training_pattern |
1359 +@@ -3871,7 +3853,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
1360 +
1361 + /* Try 5 times, then try clock recovery if that fails */
1362 + if (tries > 5) {
1363 +- intel_dp->train_set_valid = false;
1364 + intel_dp_link_training_clock_recovery(intel_dp);
1365 + intel_dp_set_link_train(intel_dp, &DP,
1366 + training_pattern |
1367 +@@ -3893,10 +3874,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
1368 +
1369 + intel_dp->DP = DP;
1370 +
1371 +- if (channel_eq) {
1372 +- intel_dp->train_set_valid = true;
1373 ++ if (channel_eq)
1374 + DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
1375 +- }
1376 + }
1377 +
1378 + void intel_dp_stop_link_train(struct intel_dp *intel_dp)
1379 +@@ -5079,13 +5058,15 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
1380 +
1381 + void intel_dp_encoder_reset(struct drm_encoder *encoder)
1382 + {
1383 +- struct intel_dp *intel_dp;
1384 ++ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
1385 ++ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1386 ++
1387 ++ if (!HAS_DDI(dev_priv))
1388 ++ intel_dp->DP = I915_READ(intel_dp->output_reg);
1389 +
1390 + if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
1391 + return;
1392 +
1393 +- intel_dp = enc_to_intel_dp(encoder);
1394 +-
1395 + pps_lock(intel_dp);
1396 +
1397 + /*
1398 +@@ -5157,9 +5138,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
1399 + intel_display_power_get(dev_priv, power_domain);
1400 +
1401 + if (long_hpd) {
1402 +- /* indicate that we need to restart link training */
1403 +- intel_dp->train_set_valid = false;
1404 +-
1405 + if (!intel_digital_port_connected(dev_priv, intel_dig_port))
1406 + goto mst_fail;
1407 +
1408 +diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
1409 +index f34a219ec5c4..c5f11e0c5d5b 100644
1410 +--- a/drivers/gpu/drm/i915/intel_drv.h
1411 ++++ b/drivers/gpu/drm/i915/intel_drv.h
1412 +@@ -783,7 +783,6 @@ struct intel_dp {
1413 + bool has_aux_irq,
1414 + int send_bytes,
1415 + uint32_t aux_clock_divider);
1416 +- bool train_set_valid;
1417 +
1418 + /* Displayport compliance testing */
1419 + unsigned long compliance_test_type;
1420 +diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
1421 +index c99d3fe12881..e5bb40e58020 100644
1422 +--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
1423 ++++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
1424 +@@ -194,7 +194,7 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
1425 + }
1426 + }
1427 +
1428 +- fvv = pllreffreq * testn / testm;
1429 ++ fvv = pllreffreq * (n + 1) / (m + 1);
1430 + fvv = (fvv - 800000) / 50000;
1431 +
1432 + if (fvv > 15)
1433 +@@ -214,6 +214,14 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
1434 + WREG_DAC(MGA1064_PIX_PLLC_M, m);
1435 + WREG_DAC(MGA1064_PIX_PLLC_N, n);
1436 + WREG_DAC(MGA1064_PIX_PLLC_P, p);
1437 ++
1438 ++ if (mdev->unique_rev_id >= 0x04) {
1439 ++ WREG_DAC(0x1a, 0x09);
1440 ++ msleep(20);
1441 ++ WREG_DAC(0x1a, 0x01);
1442 ++
1443 ++ }
1444 ++
1445 + return 0;
1446 + }
1447 +
1448 +diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
1449 +index 59f27e774acb..e40a1b07a014 100644
1450 +--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
1451 ++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
1452 +@@ -557,6 +557,8 @@ nouveau_fbcon_init(struct drm_device *dev)
1453 + if (ret)
1454 + goto fini;
1455 +
1456 ++ if (fbcon->helper.fbdev)
1457 ++ fbcon->helper.fbdev->pixmap.buf_align = 4;
1458 + return 0;
1459 +
1460 + fini:
1461 +diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
1462 +index 789dc2993b0d..8f715feadf56 100644
1463 +--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
1464 ++++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
1465 +@@ -82,7 +82,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
1466 + uint32_t fg;
1467 + uint32_t bg;
1468 + uint32_t dsize;
1469 +- uint32_t width;
1470 + uint32_t *data = (uint32_t *)image->data;
1471 + int ret;
1472 +
1473 +@@ -93,9 +92,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
1474 + if (ret)
1475 + return ret;
1476 +
1477 +- width = ALIGN(image->width, 8);
1478 +- dsize = ALIGN(width * image->height, 32) >> 5;
1479 +-
1480 + if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
1481 + info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
1482 + fg = ((uint32_t *) info->pseudo_palette)[image->fg_color];
1483 +@@ -111,10 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
1484 + ((image->dx + image->width) & 0xffff));
1485 + OUT_RING(chan, bg);
1486 + OUT_RING(chan, fg);
1487 +- OUT_RING(chan, (image->height << 16) | width);
1488 ++ OUT_RING(chan, (image->height << 16) | image->width);
1489 + OUT_RING(chan, (image->height << 16) | image->width);
1490 + OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
1491 +
1492 ++ dsize = ALIGN(image->width * image->height, 32) >> 5;
1493 + while (dsize) {
1494 + int iter_len = dsize > 128 ? 128 : dsize;
1495 +
1496 +diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
1497 +index e05499d6ed83..a4e259a00430 100644
1498 +--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
1499 ++++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
1500 +@@ -95,7 +95,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
1501 + struct nouveau_fbdev *nfbdev = info->par;
1502 + struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
1503 + struct nouveau_channel *chan = drm->channel;
1504 +- uint32_t width, dwords, *data = (uint32_t *)image->data;
1505 ++ uint32_t dwords, *data = (uint32_t *)image->data;
1506 + uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
1507 + uint32_t *palette = info->pseudo_palette;
1508 + int ret;
1509 +@@ -107,9 +107,6 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
1510 + if (ret)
1511 + return ret;
1512 +
1513 +- width = ALIGN(image->width, 32);
1514 +- dwords = (width * image->height) >> 5;
1515 +-
1516 + BEGIN_NV04(chan, NvSub2D, 0x0814, 2);
1517 + if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
1518 + info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
1519 +@@ -128,6 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
1520 + OUT_RING(chan, 0);
1521 + OUT_RING(chan, image->dy);
1522 +
1523 ++ dwords = ALIGN(image->width * image->height, 32) >> 5;
1524 + while (dwords) {
1525 + int push = dwords > 2047 ? 2047 : dwords;
1526 +
1527 +diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
1528 +index c97395b4a312..f28315e865a5 100644
1529 +--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
1530 ++++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
1531 +@@ -95,7 +95,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
1532 + struct nouveau_fbdev *nfbdev = info->par;
1533 + struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
1534 + struct nouveau_channel *chan = drm->channel;
1535 +- uint32_t width, dwords, *data = (uint32_t *)image->data;
1536 ++ uint32_t dwords, *data = (uint32_t *)image->data;
1537 + uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
1538 + uint32_t *palette = info->pseudo_palette;
1539 + int ret;
1540 +@@ -107,9 +107,6 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
1541 + if (ret)
1542 + return ret;
1543 +
1544 +- width = ALIGN(image->width, 32);
1545 +- dwords = (width * image->height) >> 5;
1546 +-
1547 + BEGIN_NVC0(chan, NvSub2D, 0x0814, 2);
1548 + if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
1549 + info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
1550 +@@ -128,6 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
1551 + OUT_RING (chan, 0);
1552 + OUT_RING (chan, image->dy);
1553 +
1554 ++ dwords = ALIGN(image->width * image->height, 32) >> 5;
1555 + while (dwords) {
1556 + int push = dwords > 2047 ? 2047 : dwords;
1557 +
1558 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
1559 +index b4b41b135643..2aaf0dd19a55 100644
1560 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
1561 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
1562 +@@ -40,8 +40,8 @@ static int
1563 + gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
1564 + {
1565 + struct nvkm_device *device = outp->base.disp->engine.subdev.device;
1566 +- const u32 loff = gf119_sor_loff(outp);
1567 +- nvkm_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
1568 ++ const u32 soff = gf119_sor_soff(outp);
1569 ++ nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, 0x01010101 * pattern);
1570 + return 0;
1571 + }
1572 +
1573 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
1574 +index 36655a74c538..eeeea1c2ca23 100644
1575 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
1576 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
1577 +@@ -874,22 +874,41 @@ gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc)
1578 + }
1579 +
1580 + static const struct nvkm_enum gf100_mp_warp_error[] = {
1581 +- { 0x00, "NO_ERROR" },
1582 +- { 0x01, "STACK_MISMATCH" },
1583 ++ { 0x01, "STACK_ERROR" },
1584 ++ { 0x02, "API_STACK_ERROR" },
1585 ++ { 0x03, "RET_EMPTY_STACK_ERROR" },
1586 ++ { 0x04, "PC_WRAP" },
1587 + { 0x05, "MISALIGNED_PC" },
1588 +- { 0x08, "MISALIGNED_GPR" },
1589 +- { 0x09, "INVALID_OPCODE" },
1590 +- { 0x0d, "GPR_OUT_OF_BOUNDS" },
1591 +- { 0x0e, "MEM_OUT_OF_BOUNDS" },
1592 +- { 0x0f, "UNALIGNED_MEM_ACCESS" },
1593 ++ { 0x06, "PC_OVERFLOW" },
1594 ++ { 0x07, "MISALIGNED_IMMC_ADDR" },
1595 ++ { 0x08, "MISALIGNED_REG" },
1596 ++ { 0x09, "ILLEGAL_INSTR_ENCODING" },
1597 ++ { 0x0a, "ILLEGAL_SPH_INSTR_COMBO" },
1598 ++ { 0x0b, "ILLEGAL_INSTR_PARAM" },
1599 ++ { 0x0c, "INVALID_CONST_ADDR" },
1600 ++ { 0x0d, "OOR_REG" },
1601 ++ { 0x0e, "OOR_ADDR" },
1602 ++ { 0x0f, "MISALIGNED_ADDR" },
1603 + { 0x10, "INVALID_ADDR_SPACE" },
1604 +- { 0x11, "INVALID_PARAM" },
1605 ++ { 0x11, "ILLEGAL_INSTR_PARAM2" },
1606 ++ { 0x12, "INVALID_CONST_ADDR_LDC" },
1607 ++ { 0x13, "GEOMETRY_SM_ERROR" },
1608 ++ { 0x14, "DIVERGENT" },
1609 ++ { 0x15, "WARP_EXIT" },
1610 + {}
1611 + };
1612 +
1613 + static const struct nvkm_bitfield gf100_mp_global_error[] = {
1614 ++ { 0x00000001, "SM_TO_SM_FAULT" },
1615 ++ { 0x00000002, "L1_ERROR" },
1616 + { 0x00000004, "MULTIPLE_WARP_ERRORS" },
1617 +- { 0x00000008, "OUT_OF_STACK_SPACE" },
1618 ++ { 0x00000008, "PHYSICAL_STACK_OVERFLOW" },
1619 ++ { 0x00000010, "BPT_INT" },
1620 ++ { 0x00000020, "BPT_PAUSE" },
1621 ++ { 0x00000040, "SINGLE_STEP_COMPLETE" },
1622 ++ { 0x20000000, "ECC_SEC_ERROR" },
1623 ++ { 0x40000000, "ECC_DED_ERROR" },
1624 ++ { 0x80000000, "TIMEOUT" },
1625 + {}
1626 + };
1627 +
1628 +diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
1629 +index c566993a2ec3..e2dd5d19c32c 100644
1630 +--- a/drivers/gpu/drm/radeon/radeon_device.c
1631 ++++ b/drivers/gpu/drm/radeon/radeon_device.c
1632 +@@ -630,6 +630,23 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1633 + /*
1634 + * GPU helpers function.
1635 + */
1636 ++
1637 ++/**
1638 ++ * radeon_device_is_virtual - check if we are running is a virtual environment
1639 ++ *
1640 ++ * Check if the asic has been passed through to a VM (all asics).
1641 ++ * Used at driver startup.
1642 ++ * Returns true if virtual or false if not.
1643 ++ */
1644 ++static bool radeon_device_is_virtual(void)
1645 ++{
1646 ++#ifdef CONFIG_X86
1647 ++ return boot_cpu_has(X86_FEATURE_HYPERVISOR);
1648 ++#else
1649 ++ return false;
1650 ++#endif
1651 ++}
1652 ++
1653 + /**
1654 + * radeon_card_posted - check if the hw has already been initialized
1655 + *
1656 +@@ -643,6 +660,10 @@ bool radeon_card_posted(struct radeon_device *rdev)
1657 + {
1658 + uint32_t reg;
1659 +
1660 ++ /* for pass through, always force asic_init */
1661 ++ if (radeon_device_is_virtual())
1662 ++ return false;
1663 ++
1664 + /* required for EFI mode on macbook2,1 which uses an r5xx asic */
1665 + if (efi_enabled(EFI_BOOT) &&
1666 + (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
1667 +diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
1668 +index 745e996d2dbc..4ae8b56b1847 100644
1669 +--- a/drivers/gpu/drm/ttm/ttm_bo.c
1670 ++++ b/drivers/gpu/drm/ttm/ttm_bo.c
1671 +@@ -1004,9 +1004,9 @@ out_unlock:
1672 + return ret;
1673 + }
1674 +
1675 +-static bool ttm_bo_mem_compat(struct ttm_placement *placement,
1676 +- struct ttm_mem_reg *mem,
1677 +- uint32_t *new_flags)
1678 ++bool ttm_bo_mem_compat(struct ttm_placement *placement,
1679 ++ struct ttm_mem_reg *mem,
1680 ++ uint32_t *new_flags)
1681 + {
1682 + int i;
1683 +
1684 +@@ -1038,6 +1038,7 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement,
1685 +
1686 + return false;
1687 + }
1688 ++EXPORT_SYMBOL(ttm_bo_mem_compat);
1689 +
1690 + int ttm_bo_validate(struct ttm_buffer_object *bo,
1691 + struct ttm_placement *placement,
1692 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
1693 +index 299925a1f6c6..eadc981ee79a 100644
1694 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
1695 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
1696 +@@ -49,6 +49,7 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
1697 + {
1698 + struct ttm_buffer_object *bo = &buf->base;
1699 + int ret;
1700 ++ uint32_t new_flags;
1701 +
1702 + ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
1703 + if (unlikely(ret != 0))
1704 +@@ -60,7 +61,12 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
1705 + if (unlikely(ret != 0))
1706 + goto err;
1707 +
1708 +- ret = ttm_bo_validate(bo, placement, interruptible, false);
1709 ++ if (buf->pin_count > 0)
1710 ++ ret = ttm_bo_mem_compat(placement, &bo->mem,
1711 ++ &new_flags) == true ? 0 : -EINVAL;
1712 ++ else
1713 ++ ret = ttm_bo_validate(bo, placement, interruptible, false);
1714 ++
1715 + if (!ret)
1716 + vmw_bo_pin_reserved(buf, true);
1717 +
1718 +@@ -91,6 +97,7 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
1719 + {
1720 + struct ttm_buffer_object *bo = &buf->base;
1721 + int ret;
1722 ++ uint32_t new_flags;
1723 +
1724 + ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
1725 + if (unlikely(ret != 0))
1726 +@@ -102,6 +109,12 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
1727 + if (unlikely(ret != 0))
1728 + goto err;
1729 +
1730 ++ if (buf->pin_count > 0) {
1731 ++ ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
1732 ++ &new_flags) == true ? 0 : -EINVAL;
1733 ++ goto out_unreserve;
1734 ++ }
1735 ++
1736 + ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
1737 + false);
1738 + if (likely(ret == 0) || ret == -ERESTARTSYS)
1739 +@@ -161,6 +174,7 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
1740 + struct ttm_placement placement;
1741 + struct ttm_place place;
1742 + int ret = 0;
1743 ++ uint32_t new_flags;
1744 +
1745 + place = vmw_vram_placement.placement[0];
1746 + place.lpfn = bo->num_pages;
1747 +@@ -185,10 +199,15 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
1748 + */
1749 + if (bo->mem.mem_type == TTM_PL_VRAM &&
1750 + bo->mem.start < bo->num_pages &&
1751 +- bo->mem.start > 0)
1752 ++ bo->mem.start > 0 &&
1753 ++ buf->pin_count == 0)
1754 + (void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
1755 +
1756 +- ret = ttm_bo_validate(bo, &placement, interruptible, false);
1757 ++ if (buf->pin_count > 0)
1758 ++ ret = ttm_bo_mem_compat(&placement, &bo->mem,
1759 ++ &new_flags) == true ? 0 : -EINVAL;
1760 ++ else
1761 ++ ret = ttm_bo_validate(bo, &placement, interruptible, false);
1762 +
1763 + /* For some reason we didn't end up at the start of vram */
1764 + WARN_ON(ret == 0 && bo->offset != 0);
1765 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
1766 +index 24fb348a44e1..f3f31f995878 100644
1767 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
1768 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
1769 +@@ -227,6 +227,7 @@ static int vmw_force_iommu;
1770 + static int vmw_restrict_iommu;
1771 + static int vmw_force_coherent;
1772 + static int vmw_restrict_dma_mask;
1773 ++static int vmw_assume_16bpp;
1774 +
1775 + static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
1776 + static void vmw_master_init(struct vmw_master *);
1777 +@@ -243,6 +244,8 @@ MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
1778 + module_param_named(force_coherent, vmw_force_coherent, int, 0600);
1779 + MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
1780 + module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
1781 ++MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
1782 ++module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
1783 +
1784 +
1785 + static void vmw_print_capabilities(uint32_t capabilities)
1786 +@@ -652,6 +655,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
1787 + dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
1788 + dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
1789 +
1790 ++ dev_priv->assume_16bpp = !!vmw_assume_16bpp;
1791 ++
1792 + dev_priv->enable_fb = enable_fbdev;
1793 +
1794 + vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1795 +@@ -698,6 +703,13 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
1796 + vmw_read(dev_priv,
1797 + SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
1798 +
1799 ++ /*
1800 ++ * Workaround for low memory 2D VMs to compensate for the
1801 ++ * allocation taken by fbdev
1802 ++ */
1803 ++ if (!(dev_priv->capabilities & SVGA_CAP_3D))
1804 ++ mem_size *= 2;
1805 ++
1806 + dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
1807 + dev_priv->prim_bb_mem =
1808 + vmw_read(dev_priv,
1809 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
1810 +index 469cdd520615..2e94fe27b3f6 100644
1811 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
1812 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
1813 +@@ -387,6 +387,7 @@ struct vmw_private {
1814 + spinlock_t hw_lock;
1815 + spinlock_t cap_lock;
1816 + bool has_dx;
1817 ++ bool assume_16bpp;
1818 +
1819 + /*
1820 + * VGA registers.
1821 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
1822 +index 679a4cb98ee3..d2d93959b119 100644
1823 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
1824 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
1825 +@@ -517,28 +517,6 @@ static int vmw_fb_kms_framebuffer(struct fb_info *info)
1826 +
1827 + par->set_fb = &vfb->base;
1828 +
1829 +- if (!par->bo_ptr) {
1830 +- /*
1831 +- * Pin before mapping. Since we don't know in what placement
1832 +- * to pin, call into KMS to do it for us.
1833 +- */
1834 +- ret = vfb->pin(vfb);
1835 +- if (ret) {
1836 +- DRM_ERROR("Could not pin the fbdev framebuffer.\n");
1837 +- return ret;
1838 +- }
1839 +-
1840 +- ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
1841 +- par->vmw_bo->base.num_pages, &par->map);
1842 +- if (ret) {
1843 +- vfb->unpin(vfb);
1844 +- DRM_ERROR("Could not map the fbdev framebuffer.\n");
1845 +- return ret;
1846 +- }
1847 +-
1848 +- par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
1849 +- }
1850 +-
1851 + return 0;
1852 + }
1853 +
1854 +@@ -601,6 +579,31 @@ static int vmw_fb_set_par(struct fb_info *info)
1855 + if (ret)
1856 + goto out_unlock;
1857 +
1858 ++ if (!par->bo_ptr) {
1859 ++ struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(set.fb);
1860 ++
1861 ++ /*
1862 ++ * Pin before mapping. Since we don't know in what placement
1863 ++ * to pin, call into KMS to do it for us.
1864 ++ */
1865 ++ ret = vfb->pin(vfb);
1866 ++ if (ret) {
1867 ++ DRM_ERROR("Could not pin the fbdev framebuffer.\n");
1868 ++ goto out_unlock;
1869 ++ }
1870 ++
1871 ++ ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
1872 ++ par->vmw_bo->base.num_pages, &par->map);
1873 ++ if (ret) {
1874 ++ vfb->unpin(vfb);
1875 ++ DRM_ERROR("Could not map the fbdev framebuffer.\n");
1876 ++ goto out_unlock;
1877 ++ }
1878 ++
1879 ++ par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
1880 ++ }
1881 ++
1882 ++
1883 + vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
1884 + par->set_fb->width, par->set_fb->height);
1885 +
1886 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
1887 +index 7c2e118a77b0..060e5c6f4446 100644
1888 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
1889 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
1890 +@@ -1538,14 +1538,10 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
1891 + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
1892 + };
1893 + int i;
1894 +- u32 assumed_bpp = 2;
1895 ++ u32 assumed_bpp = 4;
1896 +
1897 +- /*
1898 +- * If using screen objects, then assume 32-bpp because that's what the
1899 +- * SVGA device is assuming
1900 +- */
1901 +- if (dev_priv->active_display_unit == vmw_du_screen_object)
1902 +- assumed_bpp = 4;
1903 ++ if (dev_priv->assume_16bpp)
1904 ++ assumed_bpp = 2;
1905 +
1906 + if (dev_priv->active_display_unit == vmw_du_screen_target) {
1907 + max_width = min(max_width, dev_priv->stdu_max_width);
1908 +diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
1909 +index aad8c162a825..0cd4f7216239 100644
1910 +--- a/drivers/hid/hid-elo.c
1911 ++++ b/drivers/hid/hid-elo.c
1912 +@@ -261,7 +261,7 @@ static void elo_remove(struct hid_device *hdev)
1913 + struct elo_priv *priv = hid_get_drvdata(hdev);
1914 +
1915 + hid_hw_stop(hdev);
1916 +- flush_workqueue(wq);
1917 ++ cancel_delayed_work_sync(&priv->work);
1918 + kfree(priv);
1919 + }
1920 +
1921 +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
1922 +index c5ec4f915594..f62a9d6601cc 100644
1923 +--- a/drivers/hid/hid-multitouch.c
1924 ++++ b/drivers/hid/hid-multitouch.c
1925 +@@ -61,6 +61,7 @@ MODULE_LICENSE("GPL");
1926 + #define MT_QUIRK_ALWAYS_VALID (1 << 4)
1927 + #define MT_QUIRK_VALID_IS_INRANGE (1 << 5)
1928 + #define MT_QUIRK_VALID_IS_CONFIDENCE (1 << 6)
1929 ++#define MT_QUIRK_CONFIDENCE (1 << 7)
1930 + #define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE (1 << 8)
1931 + #define MT_QUIRK_NO_AREA (1 << 9)
1932 + #define MT_QUIRK_IGNORE_DUPLICATES (1 << 10)
1933 +@@ -78,6 +79,7 @@ struct mt_slot {
1934 + __s32 contactid; /* the device ContactID assigned to this slot */
1935 + bool touch_state; /* is the touch valid? */
1936 + bool inrange_state; /* is the finger in proximity of the sensor? */
1937 ++ bool confidence_state; /* is the touch made by a finger? */
1938 + };
1939 +
1940 + struct mt_class {
1941 +@@ -502,6 +504,9 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
1942 + mt_store_field(usage, td, hi);
1943 + return 1;
1944 + case HID_DG_CONFIDENCE:
1945 ++ if (cls->name == MT_CLS_WIN_8 &&
1946 ++ field->application == HID_DG_TOUCHPAD)
1947 ++ cls->quirks |= MT_QUIRK_CONFIDENCE;
1948 + mt_store_field(usage, td, hi);
1949 + return 1;
1950 + case HID_DG_TIPSWITCH:
1951 +@@ -614,6 +619,7 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
1952 + return;
1953 +
1954 + if (td->curvalid || (td->mtclass.quirks & MT_QUIRK_ALWAYS_VALID)) {
1955 ++ int active;
1956 + int slotnum = mt_compute_slot(td, input);
1957 + struct mt_slot *s = &td->curdata;
1958 + struct input_mt *mt = input->mt;
1959 +@@ -628,10 +634,14 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
1960 + return;
1961 + }
1962 +
1963 ++ if (!(td->mtclass.quirks & MT_QUIRK_CONFIDENCE))
1964 ++ s->confidence_state = 1;
1965 ++ active = (s->touch_state || s->inrange_state) &&
1966 ++ s->confidence_state;
1967 ++
1968 + input_mt_slot(input, slotnum);
1969 +- input_mt_report_slot_state(input, MT_TOOL_FINGER,
1970 +- s->touch_state || s->inrange_state);
1971 +- if (s->touch_state || s->inrange_state) {
1972 ++ input_mt_report_slot_state(input, MT_TOOL_FINGER, active);
1973 ++ if (active) {
1974 + /* this finger is in proximity of the sensor */
1975 + int wide = (s->w > s->h);
1976 + /* divided by two to match visual scale of touch */
1977 +@@ -696,6 +706,8 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
1978 + td->curdata.touch_state = value;
1979 + break;
1980 + case HID_DG_CONFIDENCE:
1981 ++ if (quirks & MT_QUIRK_CONFIDENCE)
1982 ++ td->curdata.confidence_state = value;
1983 + if (quirks & MT_QUIRK_VALID_IS_CONFIDENCE)
1984 + td->curvalid = value;
1985 + break;
1986 +diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
1987 +index 2f1ddca6f2e0..700145b15088 100644
1988 +--- a/drivers/hid/usbhid/hiddev.c
1989 ++++ b/drivers/hid/usbhid/hiddev.c
1990 +@@ -516,13 +516,13 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
1991 + goto inval;
1992 + } else if (uref->usage_index >= field->report_count)
1993 + goto inval;
1994 +-
1995 +- else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
1996 +- (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
1997 +- uref->usage_index + uref_multi->num_values > field->report_count))
1998 +- goto inval;
1999 + }
2000 +
2001 ++ if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
2002 ++ (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
2003 ++ uref->usage_index + uref_multi->num_values > field->report_count))
2004 ++ goto inval;
2005 ++
2006 + switch (cmd) {
2007 + case HIDIOCGUSAGE:
2008 + uref->value = field->value[uref->usage_index];
2009 +diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
2010 +index c43318d3416e..a9356a3dea92 100644
2011 +--- a/drivers/hwmon/dell-smm-hwmon.c
2012 ++++ b/drivers/hwmon/dell-smm-hwmon.c
2013 +@@ -66,11 +66,13 @@
2014 +
2015 + static DEFINE_MUTEX(i8k_mutex);
2016 + static char bios_version[4];
2017 ++static char bios_machineid[16];
2018 + static struct device *i8k_hwmon_dev;
2019 + static u32 i8k_hwmon_flags;
2020 + static uint i8k_fan_mult = I8K_FAN_MULT;
2021 + static uint i8k_pwm_mult;
2022 + static uint i8k_fan_max = I8K_FAN_HIGH;
2023 ++static bool disallow_fan_type_call;
2024 +
2025 + #define I8K_HWMON_HAVE_TEMP1 (1 << 0)
2026 + #define I8K_HWMON_HAVE_TEMP2 (1 << 1)
2027 +@@ -94,13 +96,13 @@ module_param(ignore_dmi, bool, 0);
2028 + MODULE_PARM_DESC(ignore_dmi, "Continue probing hardware even if DMI data does not match");
2029 +
2030 + #if IS_ENABLED(CONFIG_I8K)
2031 +-static bool restricted;
2032 ++static bool restricted = true;
2033 + module_param(restricted, bool, 0);
2034 +-MODULE_PARM_DESC(restricted, "Allow fan control if SYS_ADMIN capability set");
2035 ++MODULE_PARM_DESC(restricted, "Restrict fan control and serial number to CAP_SYS_ADMIN (default: 1)");
2036 +
2037 + static bool power_status;
2038 + module_param(power_status, bool, 0600);
2039 +-MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k");
2040 ++MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k (default: 0)");
2041 + #endif
2042 +
2043 + static uint fan_mult;
2044 +@@ -235,14 +237,28 @@ static int i8k_get_fan_speed(int fan)
2045 + /*
2046 + * Read the fan type.
2047 + */
2048 +-static int i8k_get_fan_type(int fan)
2049 ++static int _i8k_get_fan_type(int fan)
2050 + {
2051 + struct smm_regs regs = { .eax = I8K_SMM_GET_FAN_TYPE, };
2052 +
2053 ++ if (disallow_fan_type_call)
2054 ++ return -EINVAL;
2055 ++
2056 + regs.ebx = fan & 0xff;
2057 + return i8k_smm(&regs) ? : regs.eax & 0xff;
2058 + }
2059 +
2060 ++static int i8k_get_fan_type(int fan)
2061 ++{
2062 ++ /* I8K_SMM_GET_FAN_TYPE SMM call is expensive, so cache values */
2063 ++ static int types[2] = { INT_MIN, INT_MIN };
2064 ++
2065 ++ if (types[fan] == INT_MIN)
2066 ++ types[fan] = _i8k_get_fan_type(fan);
2067 ++
2068 ++ return types[fan];
2069 ++}
2070 ++
2071 + /*
2072 + * Read the fan nominal rpm for specific fan speed.
2073 + */
2074 +@@ -392,9 +408,11 @@ i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg)
2075 + break;
2076 +
2077 + case I8K_MACHINE_ID:
2078 +- memset(buff, 0, 16);
2079 +- strlcpy(buff, i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
2080 +- sizeof(buff));
2081 ++ if (restricted && !capable(CAP_SYS_ADMIN))
2082 ++ return -EPERM;
2083 ++
2084 ++ memset(buff, 0, sizeof(buff));
2085 ++ strlcpy(buff, bios_machineid, sizeof(buff));
2086 + break;
2087 +
2088 + case I8K_FN_STATUS:
2089 +@@ -511,7 +529,7 @@ static int i8k_proc_show(struct seq_file *seq, void *offset)
2090 + seq_printf(seq, "%s %s %s %d %d %d %d %d %d %d\n",
2091 + I8K_PROC_FMT,
2092 + bios_version,
2093 +- i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
2094 ++ (restricted && !capable(CAP_SYS_ADMIN)) ? "-1" : bios_machineid,
2095 + cpu_temp,
2096 + left_fan, right_fan, left_speed, right_speed,
2097 + ac_power, fn_key);
2098 +@@ -718,6 +736,9 @@ static struct attribute *i8k_attrs[] = {
2099 + static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
2100 + int index)
2101 + {
2102 ++ if (disallow_fan_type_call &&
2103 ++ (index == 9 || index == 12))
2104 ++ return 0;
2105 + if (index >= 0 && index <= 1 &&
2106 + !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
2107 + return 0;
2108 +@@ -767,13 +788,17 @@ static int __init i8k_init_hwmon(void)
2109 + if (err >= 0)
2110 + i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP4;
2111 +
2112 +- /* First fan attributes, if fan type is OK */
2113 +- err = i8k_get_fan_type(0);
2114 ++ /* First fan attributes, if fan status or type is OK */
2115 ++ err = i8k_get_fan_status(0);
2116 ++ if (err < 0)
2117 ++ err = i8k_get_fan_type(0);
2118 + if (err >= 0)
2119 + i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN1;
2120 +
2121 +- /* Second fan attributes, if fan type is OK */
2122 +- err = i8k_get_fan_type(1);
2123 ++ /* Second fan attributes, if fan status or type is OK */
2124 ++ err = i8k_get_fan_status(1);
2125 ++ if (err < 0)
2126 ++ err = i8k_get_fan_type(1);
2127 + if (err >= 0)
2128 + i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2;
2129 +
2130 +@@ -929,12 +954,14 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
2131 +
2132 + MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
2133 +
2134 +-static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
2135 ++/*
2136 ++ * On some machines once I8K_SMM_GET_FAN_TYPE is issued then CPU fan speed
2137 ++ * randomly going up and down due to bug in Dell SMM or BIOS. Here is blacklist
2138 ++ * of affected Dell machines for which we disallow I8K_SMM_GET_FAN_TYPE call.
2139 ++ * See bug: https://bugzilla.kernel.org/show_bug.cgi?id=100121
2140 ++ */
2141 ++static struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initdata = {
2142 + {
2143 +- /*
2144 +- * CPU fan speed going up and down on Dell Studio XPS 8000
2145 +- * for unknown reasons.
2146 +- */
2147 + .ident = "Dell Studio XPS 8000",
2148 + .matches = {
2149 + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
2150 +@@ -942,16 +969,19 @@ static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
2151 + },
2152 + },
2153 + {
2154 +- /*
2155 +- * CPU fan speed going up and down on Dell Studio XPS 8100
2156 +- * for unknown reasons.
2157 +- */
2158 + .ident = "Dell Studio XPS 8100",
2159 + .matches = {
2160 + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
2161 + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"),
2162 + },
2163 + },
2164 ++ {
2165 ++ .ident = "Dell Inspiron 580",
2166 ++ .matches = {
2167 ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
2168 ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Inspiron 580 "),
2169 ++ },
2170 ++ },
2171 + { }
2172 + };
2173 +
2174 +@@ -966,8 +996,7 @@ static int __init i8k_probe(void)
2175 + /*
2176 + * Get DMI information
2177 + */
2178 +- if (!dmi_check_system(i8k_dmi_table) ||
2179 +- dmi_check_system(i8k_blacklist_dmi_table)) {
2180 ++ if (!dmi_check_system(i8k_dmi_table)) {
2181 + if (!ignore_dmi && !force)
2182 + return -ENODEV;
2183 +
2184 +@@ -978,8 +1007,13 @@ static int __init i8k_probe(void)
2185 + i8k_get_dmi_data(DMI_BIOS_VERSION));
2186 + }
2187 +
2188 ++ if (dmi_check_system(i8k_blacklist_fan_type_dmi_table))
2189 ++ disallow_fan_type_call = true;
2190 ++
2191 + strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION),
2192 + sizeof(bios_version));
2193 ++ strlcpy(bios_machineid, i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
2194 ++ sizeof(bios_machineid));
2195 +
2196 + /*
2197 + * Get SMM Dell signature
2198 +diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
2199 +index 923f56598d4b..3a9f106787d2 100644
2200 +--- a/drivers/iio/accel/kxsd9.c
2201 ++++ b/drivers/iio/accel/kxsd9.c
2202 +@@ -81,7 +81,7 @@ static int kxsd9_write_scale(struct iio_dev *indio_dev, int micro)
2203 +
2204 + mutex_lock(&st->buf_lock);
2205 + ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
2206 +- if (ret)
2207 ++ if (ret < 0)
2208 + goto error_ret;
2209 + st->tx[0] = KXSD9_WRITE(KXSD9_REG_CTRL_C);
2210 + st->tx[1] = (ret & ~KXSD9_FS_MASK) | i;
2211 +@@ -163,7 +163,7 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
2212 + break;
2213 + case IIO_CHAN_INFO_SCALE:
2214 + ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
2215 +- if (ret)
2216 ++ if (ret < 0)
2217 + goto error_ret;
2218 + *val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK];
2219 + ret = IIO_VAL_INT_PLUS_MICRO;
2220 +diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
2221 +index 21e19b60e2b9..2123f0ac2e2a 100644
2222 +--- a/drivers/iio/adc/ad7266.c
2223 ++++ b/drivers/iio/adc/ad7266.c
2224 +@@ -396,8 +396,8 @@ static int ad7266_probe(struct spi_device *spi)
2225 +
2226 + st = iio_priv(indio_dev);
2227 +
2228 +- st->reg = devm_regulator_get(&spi->dev, "vref");
2229 +- if (!IS_ERR_OR_NULL(st->reg)) {
2230 ++ st->reg = devm_regulator_get_optional(&spi->dev, "vref");
2231 ++ if (!IS_ERR(st->reg)) {
2232 + ret = regulator_enable(st->reg);
2233 + if (ret)
2234 + return ret;
2235 +@@ -408,6 +408,9 @@ static int ad7266_probe(struct spi_device *spi)
2236 +
2237 + st->vref_mv = ret / 1000;
2238 + } else {
2239 ++ /* Any other error indicates that the regulator does exist */
2240 ++ if (PTR_ERR(st->reg) != -ENODEV)
2241 ++ return PTR_ERR(st->reg);
2242 + /* Use internal reference */
2243 + st->vref_mv = 2500;
2244 + }
2245 +diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
2246 +index a7f61e881a49..dc5e7e70f951 100644
2247 +--- a/drivers/iio/humidity/hdc100x.c
2248 ++++ b/drivers/iio/humidity/hdc100x.c
2249 +@@ -55,7 +55,7 @@ static const struct {
2250 + },
2251 + { /* IIO_HUMIDITYRELATIVE channel */
2252 + .shift = 8,
2253 +- .mask = 2,
2254 ++ .mask = 3,
2255 + },
2256 + };
2257 +
2258 +@@ -164,14 +164,14 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
2259 + dev_err(&client->dev, "cannot read high byte measurement");
2260 + return ret;
2261 + }
2262 +- val = ret << 6;
2263 ++ val = ret << 8;
2264 +
2265 + ret = i2c_smbus_read_byte(client);
2266 + if (ret < 0) {
2267 + dev_err(&client->dev, "cannot read low byte measurement");
2268 + return ret;
2269 + }
2270 +- val |= ret >> 2;
2271 ++ val |= ret;
2272 +
2273 + return val;
2274 + }
2275 +@@ -211,18 +211,18 @@ static int hdc100x_read_raw(struct iio_dev *indio_dev,
2276 + return IIO_VAL_INT_PLUS_MICRO;
2277 + case IIO_CHAN_INFO_SCALE:
2278 + if (chan->type == IIO_TEMP) {
2279 +- *val = 165;
2280 +- *val2 = 65536 >> 2;
2281 ++ *val = 165000;
2282 ++ *val2 = 65536;
2283 + return IIO_VAL_FRACTIONAL;
2284 + } else {
2285 +- *val = 0;
2286 +- *val2 = 10000;
2287 +- return IIO_VAL_INT_PLUS_MICRO;
2288 ++ *val = 100;
2289 ++ *val2 = 65536;
2290 ++ return IIO_VAL_FRACTIONAL;
2291 + }
2292 + break;
2293 + case IIO_CHAN_INFO_OFFSET:
2294 +- *val = -3971;
2295 +- *val2 = 879096;
2296 ++ *val = -15887;
2297 ++ *val2 = 515151;
2298 + return IIO_VAL_INT_PLUS_MICRO;
2299 + default:
2300 + return -EINVAL;
2301 +diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
2302 +index ae2806aafb72..0c52dfe64977 100644
2303 +--- a/drivers/iio/industrialio-trigger.c
2304 ++++ b/drivers/iio/industrialio-trigger.c
2305 +@@ -210,22 +210,35 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
2306 +
2307 + /* Prevent the module from being removed whilst attached to a trigger */
2308 + __module_get(pf->indio_dev->info->driver_module);
2309 ++
2310 ++ /* Get irq number */
2311 + pf->irq = iio_trigger_get_irq(trig);
2312 ++ if (pf->irq < 0)
2313 ++ goto out_put_module;
2314 ++
2315 ++ /* Request irq */
2316 + ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
2317 + pf->type, pf->name,
2318 + pf);
2319 +- if (ret < 0) {
2320 +- module_put(pf->indio_dev->info->driver_module);
2321 +- return ret;
2322 +- }
2323 ++ if (ret < 0)
2324 ++ goto out_put_irq;
2325 +
2326 ++ /* Enable trigger in driver */
2327 + if (trig->ops && trig->ops->set_trigger_state && notinuse) {
2328 + ret = trig->ops->set_trigger_state(trig, true);
2329 + if (ret < 0)
2330 +- module_put(pf->indio_dev->info->driver_module);
2331 ++ goto out_free_irq;
2332 + }
2333 +
2334 + return ret;
2335 ++
2336 ++out_free_irq:
2337 ++ free_irq(pf->irq, pf);
2338 ++out_put_irq:
2339 ++ iio_trigger_put_irq(trig, pf->irq);
2340 ++out_put_module:
2341 ++ module_put(pf->indio_dev->info->driver_module);
2342 ++ return ret;
2343 + }
2344 +
2345 + static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
2346 +diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
2347 +index f6a07dc32ae4..4a6d9670e4cd 100644
2348 +--- a/drivers/iio/light/apds9960.c
2349 ++++ b/drivers/iio/light/apds9960.c
2350 +@@ -1005,6 +1005,7 @@ static int apds9960_probe(struct i2c_client *client,
2351 +
2352 + iio_device_attach_buffer(indio_dev, buffer);
2353 +
2354 ++ indio_dev->dev.parent = &client->dev;
2355 + indio_dev->info = &apds9960_info;
2356 + indio_dev->name = APDS9960_DRV_NAME;
2357 + indio_dev->channels = apds9960_channels;
2358 +diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
2359 +index b39a2fb0671c..5056bd68573f 100644
2360 +--- a/drivers/iio/pressure/st_pressure_core.c
2361 ++++ b/drivers/iio/pressure/st_pressure_core.c
2362 +@@ -28,15 +28,21 @@
2363 + #include <linux/iio/common/st_sensors.h>
2364 + #include "st_pressure.h"
2365 +
2366 ++#define MCELSIUS_PER_CELSIUS 1000
2367 ++
2368 ++/* Default pressure sensitivity */
2369 + #define ST_PRESS_LSB_PER_MBAR 4096UL
2370 + #define ST_PRESS_KPASCAL_NANO_SCALE (100000000UL / \
2371 + ST_PRESS_LSB_PER_MBAR)
2372 ++
2373 ++/* Default temperature sensitivity */
2374 + #define ST_PRESS_LSB_PER_CELSIUS 480UL
2375 +-#define ST_PRESS_CELSIUS_NANO_SCALE (1000000000UL / \
2376 +- ST_PRESS_LSB_PER_CELSIUS)
2377 ++#define ST_PRESS_MILLI_CELSIUS_OFFSET 42500UL
2378 ++
2379 + #define ST_PRESS_NUMBER_DATA_CHANNELS 1
2380 +
2381 + /* FULLSCALE */
2382 ++#define ST_PRESS_FS_AVL_1100MB 1100
2383 + #define ST_PRESS_FS_AVL_1260MB 1260
2384 +
2385 + #define ST_PRESS_1_OUT_XL_ADDR 0x28
2386 +@@ -54,18 +60,20 @@
2387 + #define ST_PRESS_LPS331AP_PW_MASK 0x80
2388 + #define ST_PRESS_LPS331AP_FS_ADDR 0x23
2389 + #define ST_PRESS_LPS331AP_FS_MASK 0x30
2390 +-#define ST_PRESS_LPS331AP_FS_AVL_1260_VAL 0x00
2391 +-#define ST_PRESS_LPS331AP_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE
2392 +-#define ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE
2393 + #define ST_PRESS_LPS331AP_BDU_ADDR 0x20
2394 + #define ST_PRESS_LPS331AP_BDU_MASK 0x04
2395 + #define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR 0x22
2396 + #define ST_PRESS_LPS331AP_DRDY_IRQ_INT1_MASK 0x04
2397 + #define ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK 0x20
2398 + #define ST_PRESS_LPS331AP_MULTIREAD_BIT true
2399 +-#define ST_PRESS_LPS331AP_TEMP_OFFSET 42500
2400 +
2401 + /* CUSTOM VALUES FOR LPS001WP SENSOR */
2402 ++
2403 ++/* LPS001WP pressure resolution */
2404 ++#define ST_PRESS_LPS001WP_LSB_PER_MBAR 16UL
2405 ++/* LPS001WP temperature resolution */
2406 ++#define ST_PRESS_LPS001WP_LSB_PER_CELSIUS 64UL
2407 ++
2408 + #define ST_PRESS_LPS001WP_WAI_EXP 0xba
2409 + #define ST_PRESS_LPS001WP_ODR_ADDR 0x20
2410 + #define ST_PRESS_LPS001WP_ODR_MASK 0x30
2411 +@@ -74,6 +82,8 @@
2412 + #define ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL 0x03
2413 + #define ST_PRESS_LPS001WP_PW_ADDR 0x20
2414 + #define ST_PRESS_LPS001WP_PW_MASK 0x40
2415 ++#define ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN \
2416 ++ (100000000UL / ST_PRESS_LPS001WP_LSB_PER_MBAR)
2417 + #define ST_PRESS_LPS001WP_BDU_ADDR 0x20
2418 + #define ST_PRESS_LPS001WP_BDU_MASK 0x04
2419 + #define ST_PRESS_LPS001WP_MULTIREAD_BIT true
2420 +@@ -90,18 +100,12 @@
2421 + #define ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL 0x04
2422 + #define ST_PRESS_LPS25H_PW_ADDR 0x20
2423 + #define ST_PRESS_LPS25H_PW_MASK 0x80
2424 +-#define ST_PRESS_LPS25H_FS_ADDR 0x00
2425 +-#define ST_PRESS_LPS25H_FS_MASK 0x00
2426 +-#define ST_PRESS_LPS25H_FS_AVL_1260_VAL 0x00
2427 +-#define ST_PRESS_LPS25H_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE
2428 +-#define ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE
2429 + #define ST_PRESS_LPS25H_BDU_ADDR 0x20
2430 + #define ST_PRESS_LPS25H_BDU_MASK 0x04
2431 + #define ST_PRESS_LPS25H_DRDY_IRQ_ADDR 0x23
2432 + #define ST_PRESS_LPS25H_DRDY_IRQ_INT1_MASK 0x01
2433 + #define ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK 0x10
2434 + #define ST_PRESS_LPS25H_MULTIREAD_BIT true
2435 +-#define ST_PRESS_LPS25H_TEMP_OFFSET 42500
2436 + #define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28
2437 + #define ST_TEMP_LPS25H_OUT_L_ADDR 0x2b
2438 +
2439 +@@ -153,7 +157,9 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
2440 + .storagebits = 16,
2441 + .endianness = IIO_LE,
2442 + },
2443 +- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
2444 ++ .info_mask_separate =
2445 ++ BIT(IIO_CHAN_INFO_RAW) |
2446 ++ BIT(IIO_CHAN_INFO_SCALE),
2447 + .modified = 0,
2448 + },
2449 + {
2450 +@@ -169,7 +175,7 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
2451 + },
2452 + .info_mask_separate =
2453 + BIT(IIO_CHAN_INFO_RAW) |
2454 +- BIT(IIO_CHAN_INFO_OFFSET),
2455 ++ BIT(IIO_CHAN_INFO_SCALE),
2456 + .modified = 0,
2457 + },
2458 + IIO_CHAN_SOFT_TIMESTAMP(1)
2459 +@@ -204,11 +210,14 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
2460 + .addr = ST_PRESS_LPS331AP_FS_ADDR,
2461 + .mask = ST_PRESS_LPS331AP_FS_MASK,
2462 + .fs_avl = {
2463 ++ /*
2464 ++ * Pressure and temperature sensitivity values
2465 ++ * as defined in table 3 of LPS331AP datasheet.
2466 ++ */
2467 + [0] = {
2468 + .num = ST_PRESS_FS_AVL_1260MB,
2469 +- .value = ST_PRESS_LPS331AP_FS_AVL_1260_VAL,
2470 +- .gain = ST_PRESS_LPS331AP_FS_AVL_1260_GAIN,
2471 +- .gain2 = ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN,
2472 ++ .gain = ST_PRESS_KPASCAL_NANO_SCALE,
2473 ++ .gain2 = ST_PRESS_LSB_PER_CELSIUS,
2474 + },
2475 + },
2476 + },
2477 +@@ -248,7 +257,17 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
2478 + .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
2479 + },
2480 + .fs = {
2481 +- .addr = 0,
2482 ++ .fs_avl = {
2483 ++ /*
2484 ++ * Pressure and temperature resolution values
2485 ++ * as defined in table 3 of LPS001WP datasheet.
2486 ++ */
2487 ++ [0] = {
2488 ++ .num = ST_PRESS_FS_AVL_1100MB,
2489 ++ .gain = ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN,
2490 ++ .gain2 = ST_PRESS_LPS001WP_LSB_PER_CELSIUS,
2491 ++ },
2492 ++ },
2493 + },
2494 + .bdu = {
2495 + .addr = ST_PRESS_LPS001WP_BDU_ADDR,
2496 +@@ -285,14 +304,15 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
2497 + .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
2498 + },
2499 + .fs = {
2500 +- .addr = ST_PRESS_LPS25H_FS_ADDR,
2501 +- .mask = ST_PRESS_LPS25H_FS_MASK,
2502 + .fs_avl = {
2503 ++ /*
2504 ++ * Pressure and temperature sensitivity values
2505 ++ * as defined in table 3 of LPS25H datasheet.
2506 ++ */
2507 + [0] = {
2508 + .num = ST_PRESS_FS_AVL_1260MB,
2509 +- .value = ST_PRESS_LPS25H_FS_AVL_1260_VAL,
2510 +- .gain = ST_PRESS_LPS25H_FS_AVL_1260_GAIN,
2511 +- .gain2 = ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN,
2512 ++ .gain = ST_PRESS_KPASCAL_NANO_SCALE,
2513 ++ .gain2 = ST_PRESS_LSB_PER_CELSIUS,
2514 + },
2515 + },
2516 + },
2517 +@@ -346,26 +366,26 @@ static int st_press_read_raw(struct iio_dev *indio_dev,
2518 +
2519 + return IIO_VAL_INT;
2520 + case IIO_CHAN_INFO_SCALE:
2521 +- *val = 0;
2522 +-
2523 + switch (ch->type) {
2524 + case IIO_PRESSURE:
2525 ++ *val = 0;
2526 + *val2 = press_data->current_fullscale->gain;
2527 +- break;
2528 ++ return IIO_VAL_INT_PLUS_NANO;
2529 + case IIO_TEMP:
2530 ++ *val = MCELSIUS_PER_CELSIUS;
2531 + *val2 = press_data->current_fullscale->gain2;
2532 +- break;
2533 ++ return IIO_VAL_FRACTIONAL;
2534 + default:
2535 + err = -EINVAL;
2536 + goto read_error;
2537 + }
2538 +
2539 +- return IIO_VAL_INT_PLUS_NANO;
2540 + case IIO_CHAN_INFO_OFFSET:
2541 + switch (ch->type) {
2542 + case IIO_TEMP:
2543 +- *val = 425;
2544 +- *val2 = 10;
2545 ++ *val = ST_PRESS_MILLI_CELSIUS_OFFSET *
2546 ++ press_data->current_fullscale->gain2;
2547 ++ *val2 = MCELSIUS_PER_CELSIUS;
2548 + break;
2549 + default:
2550 + err = -EINVAL;
2551 +diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
2552 +index f4d29d5dbd5f..e2f926cdcad2 100644
2553 +--- a/drivers/iio/proximity/as3935.c
2554 ++++ b/drivers/iio/proximity/as3935.c
2555 +@@ -64,6 +64,7 @@ struct as3935_state {
2556 + struct delayed_work work;
2557 +
2558 + u32 tune_cap;
2559 ++ u8 buffer[16]; /* 8-bit data + 56-bit padding + 64-bit timestamp */
2560 + u8 buf[2] ____cacheline_aligned;
2561 + };
2562 +
2563 +@@ -72,7 +73,8 @@ static const struct iio_chan_spec as3935_channels[] = {
2564 + .type = IIO_PROXIMITY,
2565 + .info_mask_separate =
2566 + BIT(IIO_CHAN_INFO_RAW) |
2567 +- BIT(IIO_CHAN_INFO_PROCESSED),
2568 ++ BIT(IIO_CHAN_INFO_PROCESSED) |
2569 ++ BIT(IIO_CHAN_INFO_SCALE),
2570 + .scan_index = 0,
2571 + .scan_type = {
2572 + .sign = 'u',
2573 +@@ -181,7 +183,12 @@ static int as3935_read_raw(struct iio_dev *indio_dev,
2574 + /* storm out of range */
2575 + if (*val == AS3935_DATA_MASK)
2576 + return -EINVAL;
2577 +- *val *= 1000;
2578 ++
2579 ++ if (m == IIO_CHAN_INFO_PROCESSED)
2580 ++ *val *= 1000;
2581 ++ break;
2582 ++ case IIO_CHAN_INFO_SCALE:
2583 ++ *val = 1000;
2584 + break;
2585 + default:
2586 + return -EINVAL;
2587 +@@ -206,10 +213,10 @@ static irqreturn_t as3935_trigger_handler(int irq, void *private)
2588 + ret = as3935_read(st, AS3935_DATA, &val);
2589 + if (ret)
2590 + goto err_read;
2591 +- val &= AS3935_DATA_MASK;
2592 +- val *= 1000;
2593 +
2594 +- iio_push_to_buffers_with_timestamp(indio_dev, &val, pf->timestamp);
2595 ++ st->buffer[0] = val & AS3935_DATA_MASK;
2596 ++ iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer,
2597 ++ pf->timestamp);
2598 + err_read:
2599 + iio_trigger_notify_done(indio_dev->trig);
2600 +
2601 +diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
2602 +index d6d2b3582910..4d8e7f18a9af 100644
2603 +--- a/drivers/infiniband/core/cm.c
2604 ++++ b/drivers/infiniband/core/cm.c
2605 +@@ -3430,14 +3430,14 @@ static int cm_establish(struct ib_cm_id *cm_id)
2606 + work->cm_event.event = IB_CM_USER_ESTABLISHED;
2607 +
2608 + /* Check if the device started its remove_one */
2609 +- spin_lock_irq(&cm.lock);
2610 ++ spin_lock_irqsave(&cm.lock, flags);
2611 + if (!cm_dev->going_down) {
2612 + queue_delayed_work(cm.wq, &work->work, 0);
2613 + } else {
2614 + kfree(work);
2615 + ret = -ENODEV;
2616 + }
2617 +- spin_unlock_irq(&cm.lock);
2618 ++ spin_unlock_irqrestore(&cm.lock, flags);
2619 +
2620 + out:
2621 + return ret;
2622 +diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
2623 +index 86af71351d9a..06da56bda201 100644
2624 +--- a/drivers/infiniband/hw/mlx4/ah.c
2625 ++++ b/drivers/infiniband/hw/mlx4/ah.c
2626 +@@ -47,6 +47,7 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
2627 +
2628 + ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
2629 + ah->av.ib.g_slid = ah_attr->src_path_bits;
2630 ++ ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
2631 + if (ah_attr->ah_flags & IB_AH_GRH) {
2632 + ah->av.ib.g_slid |= 0x80;
2633 + ah->av.ib.gid_index = ah_attr->grh.sgid_index;
2634 +@@ -64,7 +65,6 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
2635 + !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support))
2636 + --ah->av.ib.stat_rate;
2637 + }
2638 +- ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
2639 +
2640 + return &ah->ibah;
2641 + }
2642 +diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
2643 +index bf4959f4225b..94f1bf772ec9 100644
2644 +--- a/drivers/iommu/amd_iommu_init.c
2645 ++++ b/drivers/iommu/amd_iommu_init.c
2646 +@@ -1363,13 +1363,23 @@ static int __init amd_iommu_init_pci(void)
2647 + break;
2648 + }
2649 +
2650 ++ /*
2651 ++ * Order is important here to make sure any unity map requirements are
2652 ++ * fulfilled. The unity mappings are created and written to the device
2653 ++ * table during the amd_iommu_init_api() call.
2654 ++ *
2655 ++ * After that we call init_device_table_dma() to make sure any
2656 ++ * uninitialized DTE will block DMA, and in the end we flush the caches
2657 ++ * of all IOMMUs to make sure the changes to the device table are
2658 ++ * active.
2659 ++ */
2660 ++ ret = amd_iommu_init_api();
2661 ++
2662 + init_device_table_dma();
2663 +
2664 + for_each_iommu(iommu)
2665 + iommu_flush_all_caches(iommu);
2666 +
2667 +- ret = amd_iommu_init_api();
2668 +-
2669 + if (!ret)
2670 + print_iommu_info();
2671 +
2672 +diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
2673 +index 4e5118a4cd30..8487987458a1 100644
2674 +--- a/drivers/iommu/arm-smmu-v3.c
2675 ++++ b/drivers/iommu/arm-smmu-v3.c
2676 +@@ -1919,6 +1919,7 @@ static struct iommu_ops arm_smmu_ops = {
2677 + .detach_dev = arm_smmu_detach_dev,
2678 + .map = arm_smmu_map,
2679 + .unmap = arm_smmu_unmap,
2680 ++ .map_sg = default_iommu_map_sg,
2681 + .iova_to_phys = arm_smmu_iova_to_phys,
2682 + .add_device = arm_smmu_add_device,
2683 + .remove_device = arm_smmu_remove_device,
2684 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
2685 +index a2e1b7f14df2..6763a4dfed94 100644
2686 +--- a/drivers/iommu/intel-iommu.c
2687 ++++ b/drivers/iommu/intel-iommu.c
2688 +@@ -3169,11 +3169,6 @@ static int __init init_dmars(void)
2689 + }
2690 + }
2691 +
2692 +- iommu_flush_write_buffer(iommu);
2693 +- iommu_set_root_entry(iommu);
2694 +- iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2695 +- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2696 +-
2697 + if (!ecap_pass_through(iommu->ecap))
2698 + hw_pass_through = 0;
2699 + #ifdef CONFIG_INTEL_IOMMU_SVM
2700 +@@ -3182,6 +3177,18 @@ static int __init init_dmars(void)
2701 + #endif
2702 + }
2703 +
2704 ++ /*
2705 ++ * Now that qi is enabled on all iommus, set the root entry and flush
2706 ++ * caches. This is required on some Intel X58 chipsets, otherwise the
2707 ++ * flush_context function will loop forever and the boot hangs.
2708 ++ */
2709 ++ for_each_active_iommu(iommu, drhd) {
2710 ++ iommu_flush_write_buffer(iommu);
2711 ++ iommu_set_root_entry(iommu);
2712 ++ iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2713 ++ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2714 ++ }
2715 ++
2716 + if (iommu_pass_through)
2717 + iommu_identity_mapping |= IDENTMAP_ALL;
2718 +
2719 +diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
2720 +index 2764f43607c1..0e7d16fe84d4 100644
2721 +--- a/drivers/media/usb/uvc/uvc_v4l2.c
2722 ++++ b/drivers/media/usb/uvc/uvc_v4l2.c
2723 +@@ -1388,47 +1388,44 @@ static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp,
2724 + static long uvc_v4l2_compat_ioctl32(struct file *file,
2725 + unsigned int cmd, unsigned long arg)
2726 + {
2727 ++ struct uvc_fh *handle = file->private_data;
2728 + union {
2729 + struct uvc_xu_control_mapping xmap;
2730 + struct uvc_xu_control_query xqry;
2731 + } karg;
2732 + void __user *up = compat_ptr(arg);
2733 +- mm_segment_t old_fs;
2734 + long ret;
2735 +
2736 + switch (cmd) {
2737 + case UVCIOC_CTRL_MAP32:
2738 +- cmd = UVCIOC_CTRL_MAP;
2739 + ret = uvc_v4l2_get_xu_mapping(&karg.xmap, up);
2740 ++ if (ret)
2741 ++ return ret;
2742 ++ ret = uvc_ioctl_ctrl_map(handle->chain, &karg.xmap);
2743 ++ if (ret)
2744 ++ return ret;
2745 ++ ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up);
2746 ++ if (ret)
2747 ++ return ret;
2748 ++
2749 + break;
2750 +
2751 + case UVCIOC_CTRL_QUERY32:
2752 +- cmd = UVCIOC_CTRL_QUERY;
2753 + ret = uvc_v4l2_get_xu_query(&karg.xqry, up);
2754 ++ if (ret)
2755 ++ return ret;
2756 ++ ret = uvc_xu_ctrl_query(handle->chain, &karg.xqry);
2757 ++ if (ret)
2758 ++ return ret;
2759 ++ ret = uvc_v4l2_put_xu_query(&karg.xqry, up);
2760 ++ if (ret)
2761 ++ return ret;
2762 + break;
2763 +
2764 + default:
2765 + return -ENOIOCTLCMD;
2766 + }
2767 +
2768 +- old_fs = get_fs();
2769 +- set_fs(KERNEL_DS);
2770 +- ret = video_ioctl2(file, cmd, (unsigned long)&karg);
2771 +- set_fs(old_fs);
2772 +-
2773 +- if (ret < 0)
2774 +- return ret;
2775 +-
2776 +- switch (cmd) {
2777 +- case UVCIOC_CTRL_MAP:
2778 +- ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up);
2779 +- break;
2780 +-
2781 +- case UVCIOC_CTRL_QUERY:
2782 +- ret = uvc_v4l2_put_xu_query(&karg.xqry, up);
2783 +- break;
2784 +- }
2785 +-
2786 + return ret;
2787 + }
2788 + #endif
2789 +diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
2790 +index 6515dfc2b805..55cba89dbdb8 100644
2791 +--- a/drivers/memory/omap-gpmc.c
2792 ++++ b/drivers/memory/omap-gpmc.c
2793 +@@ -394,7 +394,7 @@ static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p)
2794 + gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
2795 + GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay);
2796 + gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
2797 +- GPMC_CONFIG4_OEEXTRADELAY, p->we_extra_delay);
2798 ++ GPMC_CONFIG4_WEEXTRADELAY, p->we_extra_delay);
2799 + gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
2800 + GPMC_CONFIG6_CYCLE2CYCLESAMECSEN,
2801 + p->cycle2cyclesamecsen);
2802 +diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
2803 +index 96fddb016bf1..4dd0391d2942 100644
2804 +--- a/drivers/mtd/ubi/eba.c
2805 ++++ b/drivers/mtd/ubi/eba.c
2806 +@@ -575,6 +575,7 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
2807 + int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
2808 + struct ubi_volume *vol = ubi->volumes[idx];
2809 + struct ubi_vid_hdr *vid_hdr;
2810 ++ uint32_t crc;
2811 +
2812 + vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
2813 + if (!vid_hdr)
2814 +@@ -599,14 +600,8 @@ retry:
2815 + goto out_put;
2816 + }
2817 +
2818 +- vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
2819 +- err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
2820 +- if (err) {
2821 +- up_read(&ubi->fm_eba_sem);
2822 +- goto write_error;
2823 +- }
2824 ++ ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
2825 +
2826 +- data_size = offset + len;
2827 + mutex_lock(&ubi->buf_mutex);
2828 + memset(ubi->peb_buf + offset, 0xFF, len);
2829 +
2830 +@@ -621,6 +616,19 @@ retry:
2831 +
2832 + memcpy(ubi->peb_buf + offset, buf, len);
2833 +
2834 ++ data_size = offset + len;
2835 ++ crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
2836 ++ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
2837 ++ vid_hdr->copy_flag = 1;
2838 ++ vid_hdr->data_size = cpu_to_be32(data_size);
2839 ++ vid_hdr->data_crc = cpu_to_be32(crc);
2840 ++ err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
2841 ++ if (err) {
2842 ++ mutex_unlock(&ubi->buf_mutex);
2843 ++ up_read(&ubi->fm_eba_sem);
2844 ++ goto write_error;
2845 ++ }
2846 ++
2847 + err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
2848 + if (err) {
2849 + mutex_unlock(&ubi->buf_mutex);
2850 +diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
2851 +index 8c2bb77db049..a790d5f90b83 100644
2852 +--- a/drivers/net/usb/cdc_ncm.c
2853 ++++ b/drivers/net/usb/cdc_ncm.c
2854 +@@ -809,6 +809,13 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
2855 + if (cdc_ncm_init(dev))
2856 + goto error2;
2857 +
2858 ++ /* Some firmwares need a pause here or they will silently fail
2859 ++ * to set up the interface properly. This value was decided
2860 ++ * empirically on a Sierra Wireless MC7455 running 02.08.02.00
2861 ++ * firmware.
2862 ++ */
2863 ++ usleep_range(10000, 20000);
2864 ++
2865 + /* configure data interface */
2866 + temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
2867 + if (temp) {
2868 +diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
2869 +index c00a7daaa4bc..0cd95120bc78 100644
2870 +--- a/drivers/net/wireless/mac80211_hwsim.c
2871 ++++ b/drivers/net/wireless/mac80211_hwsim.c
2872 +@@ -2723,6 +2723,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
2873 + if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
2874 + !info->attrs[HWSIM_ATTR_FLAGS] ||
2875 + !info->attrs[HWSIM_ATTR_COOKIE] ||
2876 ++ !info->attrs[HWSIM_ATTR_SIGNAL] ||
2877 + !info->attrs[HWSIM_ATTR_TX_INFO])
2878 + goto out;
2879 +
2880 +diff --git a/drivers/of/irq.c b/drivers/of/irq.c
2881 +index 72a2c1969646..28da6242eb84 100644
2882 +--- a/drivers/of/irq.c
2883 ++++ b/drivers/of/irq.c
2884 +@@ -386,13 +386,13 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
2885 + EXPORT_SYMBOL_GPL(of_irq_to_resource);
2886 +
2887 + /**
2888 +- * of_irq_get - Decode a node's IRQ and return it as a Linux irq number
2889 ++ * of_irq_get - Decode a node's IRQ and return it as a Linux IRQ number
2890 + * @dev: pointer to device tree node
2891 +- * @index: zero-based index of the irq
2892 +- *
2893 +- * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
2894 +- * is not yet created.
2895 ++ * @index: zero-based index of the IRQ
2896 + *
2897 ++ * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or
2898 ++ * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case
2899 ++ * of any other failure.
2900 + */
2901 + int of_irq_get(struct device_node *dev, int index)
2902 + {
2903 +@@ -413,12 +413,13 @@ int of_irq_get(struct device_node *dev, int index)
2904 + EXPORT_SYMBOL_GPL(of_irq_get);
2905 +
2906 + /**
2907 +- * of_irq_get_byname - Decode a node's IRQ and return it as a Linux irq number
2908 ++ * of_irq_get_byname - Decode a node's IRQ and return it as a Linux IRQ number
2909 + * @dev: pointer to device tree node
2910 +- * @name: irq name
2911 ++ * @name: IRQ name
2912 + *
2913 +- * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
2914 +- * is not yet created, or error code in case of any other failure.
2915 ++ * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or
2916 ++ * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case
2917 ++ * of any other failure.
2918 + */
2919 + int of_irq_get_byname(struct device_node *dev, const char *name)
2920 + {
2921 +diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
2922 +index d4c285688ce9..3ddc85e6efd6 100644
2923 +--- a/drivers/scsi/53c700.c
2924 ++++ b/drivers/scsi/53c700.c
2925 +@@ -1122,7 +1122,7 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
2926 + } else {
2927 + struct scsi_cmnd *SCp;
2928 +
2929 +- SCp = scsi_host_find_tag(SDp->host, SCSI_NO_TAG);
2930 ++ SCp = SDp->current_cmnd;
2931 + if(unlikely(SCp == NULL)) {
2932 + sdev_printk(KERN_ERR, SDp,
2933 + "no saved request for untagged cmd\n");
2934 +@@ -1826,7 +1826,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
2935 + slot->tag, slot);
2936 + } else {
2937 + slot->tag = SCSI_NO_TAG;
2938 +- /* must populate current_cmnd for scsi_host_find_tag to work */
2939 ++ /* save current command for reselection */
2940 + SCp->device->current_cmnd = SCp;
2941 + }
2942 + /* sanity check: some of the commands generated by the mid-layer
2943 +diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
2944 +index 984ddcb4786d..1b9c049bd5c5 100644
2945 +--- a/drivers/scsi/scsi_error.c
2946 ++++ b/drivers/scsi/scsi_error.c
2947 +@@ -1127,7 +1127,6 @@ static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn)
2948 + */
2949 + void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
2950 + {
2951 +- scmd->device->host->host_failed--;
2952 + scmd->eh_eflags = 0;
2953 + list_move_tail(&scmd->eh_entry, done_q);
2954 + }
2955 +@@ -2226,6 +2225,9 @@ int scsi_error_handler(void *data)
2956 + else
2957 + scsi_unjam_host(shost);
2958 +
2959 ++ /* All scmds have been handled */
2960 ++ shost->host_failed = 0;
2961 ++
2962 + /*
2963 + * Note - if the above fails completely, the action is to take
2964 + * individual devices offline and flush the queue of any
2965 +diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
2966 +index 02e930c55570..e4839ee4ca61 100644
2967 +--- a/drivers/staging/iio/accel/sca3000_core.c
2968 ++++ b/drivers/staging/iio/accel/sca3000_core.c
2969 +@@ -595,7 +595,7 @@ static ssize_t sca3000_read_frequency(struct device *dev,
2970 + goto error_ret_mut;
2971 + ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
2972 + mutex_unlock(&st->lock);
2973 +- if (ret)
2974 ++ if (ret < 0)
2975 + goto error_ret;
2976 + val = ret;
2977 + if (base_freq > 0)
2978 +diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
2979 +index 6ceac4f2d4b2..5b4b47ed948b 100644
2980 +--- a/drivers/thermal/cpu_cooling.c
2981 ++++ b/drivers/thermal/cpu_cooling.c
2982 +@@ -857,14 +857,6 @@ __cpufreq_cooling_register(struct device_node *np,
2983 + goto free_power_table;
2984 + }
2985 +
2986 +- snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
2987 +- cpufreq_dev->id);
2988 +-
2989 +- cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
2990 +- &cpufreq_cooling_ops);
2991 +- if (IS_ERR(cool_dev))
2992 +- goto remove_idr;
2993 +-
2994 + /* Fill freq-table in descending order of frequencies */
2995 + for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) {
2996 + freq = find_next_max(table, freq);
2997 +@@ -877,6 +869,14 @@ __cpufreq_cooling_register(struct device_node *np,
2998 + pr_debug("%s: freq:%u KHz\n", __func__, freq);
2999 + }
3000 +
3001 ++ snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
3002 ++ cpufreq_dev->id);
3003 ++
3004 ++ cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
3005 ++ &cpufreq_cooling_ops);
3006 ++ if (IS_ERR(cool_dev))
3007 ++ goto remove_idr;
3008 ++
3009 + cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
3010 + cpufreq_dev->cool_dev = cool_dev;
3011 +
3012 +diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
3013 +index 6f0336fff501..41987a55a538 100644
3014 +--- a/drivers/tty/vt/keyboard.c
3015 ++++ b/drivers/tty/vt/keyboard.c
3016 +@@ -366,34 +366,22 @@ static void to_utf8(struct vc_data *vc, uint c)
3017 +
3018 + static void do_compute_shiftstate(void)
3019 + {
3020 +- unsigned int i, j, k, sym, val;
3021 ++ unsigned int k, sym, val;
3022 +
3023 + shift_state = 0;
3024 + memset(shift_down, 0, sizeof(shift_down));
3025 +
3026 +- for (i = 0; i < ARRAY_SIZE(key_down); i++) {
3027 +-
3028 +- if (!key_down[i])
3029 ++ for_each_set_bit(k, key_down, min(NR_KEYS, KEY_CNT)) {
3030 ++ sym = U(key_maps[0][k]);
3031 ++ if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
3032 + continue;
3033 +
3034 +- k = i * BITS_PER_LONG;
3035 +-
3036 +- for (j = 0; j < BITS_PER_LONG; j++, k++) {
3037 +-
3038 +- if (!test_bit(k, key_down))
3039 +- continue;
3040 ++ val = KVAL(sym);
3041 ++ if (val == KVAL(K_CAPSSHIFT))
3042 ++ val = KVAL(K_SHIFT);
3043 +
3044 +- sym = U(key_maps[0][k]);
3045 +- if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
3046 +- continue;
3047 +-
3048 +- val = KVAL(sym);
3049 +- if (val == KVAL(K_CAPSSHIFT))
3050 +- val = KVAL(K_SHIFT);
3051 +-
3052 +- shift_down[val]++;
3053 +- shift_state |= (1 << val);
3054 +- }
3055 ++ shift_down[val]++;
3056 ++ shift_state |= BIT(val);
3057 + }
3058 + }
3059 +
3060 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
3061 +index cf20282f79f0..136ebaaa9cc0 100644
3062 +--- a/drivers/tty/vt/vt.c
3063 ++++ b/drivers/tty/vt/vt.c
3064 +@@ -750,6 +750,7 @@ static void visual_init(struct vc_data *vc, int num, int init)
3065 + vc->vc_complement_mask = 0;
3066 + vc->vc_can_do_color = 0;
3067 + vc->vc_panic_force_write = false;
3068 ++ vc->vc_cur_blink_ms = DEFAULT_CURSOR_BLINK_MS;
3069 + vc->vc_sw->con_init(vc, init);
3070 + if (!vc->vc_complement_mask)
3071 + vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800;
3072 +diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c
3073 +index 61d538aa2346..4f4f06a5889f 100644
3074 +--- a/drivers/usb/common/usb-otg-fsm.c
3075 ++++ b/drivers/usb/common/usb-otg-fsm.c
3076 +@@ -21,6 +21,7 @@
3077 + * 675 Mass Ave, Cambridge, MA 02139, USA.
3078 + */
3079 +
3080 ++#include <linux/module.h>
3081 + #include <linux/kernel.h>
3082 + #include <linux/types.h>
3083 + #include <linux/mutex.h>
3084 +@@ -365,3 +366,4 @@ int otg_statemachine(struct otg_fsm *fsm)
3085 + return state_changed;
3086 + }
3087 + EXPORT_SYMBOL_GPL(otg_statemachine);
3088 ++MODULE_LICENSE("GPL");
3089 +diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
3090 +index a66d3cb62b65..a738a68d2292 100644
3091 +--- a/drivers/usb/dwc2/core.h
3092 ++++ b/drivers/usb/dwc2/core.h
3093 +@@ -44,6 +44,17 @@
3094 + #include <linux/usb/phy.h>
3095 + #include "hw.h"
3096 +
3097 ++#ifdef CONFIG_MIPS
3098 ++/*
3099 ++ * There are some MIPS machines that can run in either big-endian
3100 ++ * or little-endian mode and that use the dwc2 register without
3101 ++ * a byteswap in both ways.
3102 ++ * Unlike other architectures, MIPS apparently does not require a
3103 ++ * barrier before the __raw_writel() to synchronize with DMA but does
3104 ++ * require the barrier after the __raw_writel() to serialize a set of
3105 ++ * writes. This set of operations was added specifically for MIPS and
3106 ++ * should only be used there.
3107 ++ */
3108 + static inline u32 dwc2_readl(const void __iomem *addr)
3109 + {
3110 + u32 value = __raw_readl(addr);
3111 +@@ -70,6 +81,22 @@ static inline void dwc2_writel(u32 value, void __iomem *addr)
3112 + pr_info("INFO:: wrote %08x to %p\n", value, addr);
3113 + #endif
3114 + }
3115 ++#else
3116 ++/* Normal architectures just use readl/write */
3117 ++static inline u32 dwc2_readl(const void __iomem *addr)
3118 ++{
3119 ++ return readl(addr);
3120 ++}
3121 ++
3122 ++static inline void dwc2_writel(u32 value, void __iomem *addr)
3123 ++{
3124 ++ writel(value, addr);
3125 ++
3126 ++#ifdef DWC2_LOG_WRITES
3127 ++ pr_info("info:: wrote %08x to %p\n", value, addr);
3128 ++#endif
3129 ++}
3130 ++#endif
3131 +
3132 + /* Maximum number of Endpoints/HostChannels */
3133 + #define MAX_EPS_CHANNELS 16
3134 +diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
3135 +index 7d3e5d0e9aa4..8ab6238c9299 100644
3136 +--- a/drivers/virtio/virtio_balloon.c
3137 ++++ b/drivers/virtio/virtio_balloon.c
3138 +@@ -73,7 +73,7 @@ struct virtio_balloon {
3139 +
3140 + /* The array of pfns we tell the Host about. */
3141 + unsigned int num_pfns;
3142 +- u32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX];
3143 ++ __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX];
3144 +
3145 + /* Memory statistics */
3146 + int need_stats_update;
3147 +@@ -125,14 +125,16 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
3148 + wait_event(vb->acked, virtqueue_get_buf(vq, &len));
3149 + }
3150 +
3151 +-static void set_page_pfns(u32 pfns[], struct page *page)
3152 ++static void set_page_pfns(struct virtio_balloon *vb,
3153 ++ __virtio32 pfns[], struct page *page)
3154 + {
3155 + unsigned int i;
3156 +
3157 + /* Set balloon pfns pointing at this page.
3158 + * Note that the first pfn points at start of the page. */
3159 + for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++)
3160 +- pfns[i] = page_to_balloon_pfn(page) + i;
3161 ++ pfns[i] = cpu_to_virtio32(vb->vdev,
3162 ++ page_to_balloon_pfn(page) + i);
3163 + }
3164 +
3165 + static void fill_balloon(struct virtio_balloon *vb, size_t num)
3166 +@@ -155,7 +157,7 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num)
3167 + msleep(200);
3168 + break;
3169 + }
3170 +- set_page_pfns(vb->pfns + vb->num_pfns, page);
3171 ++ set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
3172 + vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
3173 + if (!virtio_has_feature(vb->vdev,
3174 + VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
3175 +@@ -171,10 +173,12 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num)
3176 + static void release_pages_balloon(struct virtio_balloon *vb)
3177 + {
3178 + unsigned int i;
3179 ++ struct page *page;
3180 +
3181 + /* Find pfns pointing at start of each page, get pages and free them. */
3182 + for (i = 0; i < vb->num_pfns; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
3183 +- struct page *page = balloon_pfn_to_page(vb->pfns[i]);
3184 ++ page = balloon_pfn_to_page(virtio32_to_cpu(vb->vdev,
3185 ++ vb->pfns[i]));
3186 + if (!virtio_has_feature(vb->vdev,
3187 + VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
3188 + adjust_managed_page_count(page, 1);
3189 +@@ -197,7 +201,7 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
3190 + page = balloon_page_dequeue(vb_dev_info);
3191 + if (!page)
3192 + break;
3193 +- set_page_pfns(vb->pfns + vb->num_pfns, page);
3194 ++ set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
3195 + vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE;
3196 + }
3197 +
3198 +@@ -465,13 +469,13 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
3199 + __count_vm_event(BALLOON_MIGRATE);
3200 + spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
3201 + vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
3202 +- set_page_pfns(vb->pfns, newpage);
3203 ++ set_page_pfns(vb, vb->pfns, newpage);
3204 + tell_host(vb, vb->inflate_vq);
3205 +
3206 + /* balloon's page migration 2nd step -- deflate "page" */
3207 + balloon_page_delete(page);
3208 + vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
3209 +- set_page_pfns(vb->pfns, page);
3210 ++ set_page_pfns(vb, vb->pfns, page);
3211 + tell_host(vb, vb->deflate_vq);
3212 +
3213 + mutex_unlock(&vb->balloon_lock);
3214 +diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
3215 +index 364bc44610c1..cfab1d24e4bc 100644
3216 +--- a/drivers/xen/balloon.c
3217 ++++ b/drivers/xen/balloon.c
3218 +@@ -152,8 +152,6 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
3219 + static void balloon_process(struct work_struct *work);
3220 + static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
3221 +
3222 +-static void release_memory_resource(struct resource *resource);
3223 +-
3224 + /* When ballooning out (allocating memory to return to Xen) we don't really
3225 + want the kernel to try too hard since that can trigger the oom killer. */
3226 + #define GFP_BALLOON \
3227 +@@ -249,6 +247,19 @@ static enum bp_state update_schedule(enum bp_state state)
3228 + }
3229 +
3230 + #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
3231 ++static void release_memory_resource(struct resource *resource)
3232 ++{
3233 ++ if (!resource)
3234 ++ return;
3235 ++
3236 ++ /*
3237 ++ * No need to reset region to identity mapped since we now
3238 ++ * know that no I/O can be in this region
3239 ++ */
3240 ++ release_resource(resource);
3241 ++ kfree(resource);
3242 ++}
3243 ++
3244 + static struct resource *additional_memory_resource(phys_addr_t size)
3245 + {
3246 + struct resource *res;
3247 +@@ -287,19 +298,6 @@ static struct resource *additional_memory_resource(phys_addr_t size)
3248 + return res;
3249 + }
3250 +
3251 +-static void release_memory_resource(struct resource *resource)
3252 +-{
3253 +- if (!resource)
3254 +- return;
3255 +-
3256 +- /*
3257 +- * No need to reset region to identity mapped since we now
3258 +- * know that no I/O can be in this region
3259 +- */
3260 +- release_resource(resource);
3261 +- kfree(resource);
3262 +-}
3263 +-
3264 + static enum bp_state reserve_additional_memory(void)
3265 + {
3266 + long credit;
3267 +diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
3268 +index 70fa438000af..611f9c11da85 100644
3269 +--- a/drivers/xen/xen-acpi-processor.c
3270 ++++ b/drivers/xen/xen-acpi-processor.c
3271 +@@ -423,36 +423,7 @@ upload:
3272 +
3273 + return 0;
3274 + }
3275 +-static int __init check_prereq(void)
3276 +-{
3277 +- struct cpuinfo_x86 *c = &cpu_data(0);
3278 +-
3279 +- if (!xen_initial_domain())
3280 +- return -ENODEV;
3281 +-
3282 +- if (!acpi_gbl_FADT.smi_command)
3283 +- return -ENODEV;
3284 +-
3285 +- if (c->x86_vendor == X86_VENDOR_INTEL) {
3286 +- if (!cpu_has(c, X86_FEATURE_EST))
3287 +- return -ENODEV;
3288 +
3289 +- return 0;
3290 +- }
3291 +- if (c->x86_vendor == X86_VENDOR_AMD) {
3292 +- /* Copied from powernow-k8.h, can't include ../cpufreq/powernow
3293 +- * as we get compile warnings for the static functions.
3294 +- */
3295 +-#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007
3296 +-#define USE_HW_PSTATE 0x00000080
3297 +- u32 eax, ebx, ecx, edx;
3298 +- cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
3299 +- if ((edx & USE_HW_PSTATE) != USE_HW_PSTATE)
3300 +- return -ENODEV;
3301 +- return 0;
3302 +- }
3303 +- return -ENODEV;
3304 +-}
3305 + /* acpi_perf_data is a pointer to percpu data. */
3306 + static struct acpi_processor_performance __percpu *acpi_perf_data;
3307 +
3308 +@@ -509,10 +480,10 @@ struct notifier_block xen_acpi_processor_resume_nb = {
3309 + static int __init xen_acpi_processor_init(void)
3310 + {
3311 + unsigned int i;
3312 +- int rc = check_prereq();
3313 ++ int rc;
3314 +
3315 +- if (rc)
3316 +- return rc;
3317 ++ if (!xen_initial_domain())
3318 ++ return -ENODEV;
3319 +
3320 + nr_acpi_bits = get_max_acpi_id() + 1;
3321 + acpi_ids_done = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL);
3322 +diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
3323 +index 5b8e235c4b6d..0f2b7c622ce3 100644
3324 +--- a/fs/btrfs/ctree.c
3325 ++++ b/fs/btrfs/ctree.c
3326 +@@ -1551,6 +1551,7 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
3327 + trans->transid, root->fs_info->generation);
3328 +
3329 + if (!should_cow_block(trans, root, buf)) {
3330 ++ trans->dirty = true;
3331 + *cow_ret = buf;
3332 + return 0;
3333 + }
3334 +@@ -2773,8 +2774,10 @@ again:
3335 + * then we don't want to set the path blocking,
3336 + * so we test it here
3337 + */
3338 +- if (!should_cow_block(trans, root, b))
3339 ++ if (!should_cow_block(trans, root, b)) {
3340 ++ trans->dirty = true;
3341 + goto cow_done;
3342 ++ }
3343 +
3344 + /*
3345 + * must have write locks on this node and the
3346 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
3347 +index 2368cac1115a..47cdc6f3390b 100644
3348 +--- a/fs/btrfs/extent-tree.c
3349 ++++ b/fs/btrfs/extent-tree.c
3350 +@@ -7856,7 +7856,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3351 + set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
3352 + buf->start + buf->len - 1, GFP_NOFS);
3353 + }
3354 +- trans->blocks_used++;
3355 ++ trans->dirty = true;
3356 + /* this returns a buffer locked for blocking */
3357 + return buf;
3358 + }
3359 +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
3360 +index fe609b81dd1b..5d34a062ca4f 100644
3361 +--- a/fs/btrfs/super.c
3362 ++++ b/fs/btrfs/super.c
3363 +@@ -239,7 +239,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
3364 + trans->aborted = errno;
3365 + /* Nothing used. The other threads that have joined this
3366 + * transaction may be able to continue. */
3367 +- if (!trans->blocks_used && list_empty(&trans->new_bgs)) {
3368 ++ if (!trans->dirty && list_empty(&trans->new_bgs)) {
3369 + const char *errstr;
3370 +
3371 + errstr = btrfs_decode_error(errno);
3372 +diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
3373 +index 64c8221b6165..1e872923ec2c 100644
3374 +--- a/fs/btrfs/transaction.h
3375 ++++ b/fs/btrfs/transaction.h
3376 +@@ -110,7 +110,6 @@ struct btrfs_trans_handle {
3377 + u64 chunk_bytes_reserved;
3378 + unsigned long use_count;
3379 + unsigned long blocks_reserved;
3380 +- unsigned long blocks_used;
3381 + unsigned long delayed_ref_updates;
3382 + struct btrfs_transaction *transaction;
3383 + struct btrfs_block_rsv *block_rsv;
3384 +@@ -121,6 +120,7 @@ struct btrfs_trans_handle {
3385 + bool can_flush_pending_bgs;
3386 + bool reloc_reserved;
3387 + bool sync;
3388 ++ bool dirty;
3389 + unsigned int type;
3390 + /*
3391 + * this root is only needed to validate that the root passed to
3392 +diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
3393 +index 5a53ac6b1e02..02b071bf3732 100644
3394 +--- a/fs/cifs/cifs_unicode.c
3395 ++++ b/fs/cifs/cifs_unicode.c
3396 +@@ -101,6 +101,12 @@ convert_sfm_char(const __u16 src_char, char *target)
3397 + case SFM_SLASH:
3398 + *target = '\\';
3399 + break;
3400 ++ case SFM_SPACE:
3401 ++ *target = ' ';
3402 ++ break;
3403 ++ case SFM_PERIOD:
3404 ++ *target = '.';
3405 ++ break;
3406 + default:
3407 + return false;
3408 + }
3409 +@@ -404,7 +410,7 @@ static __le16 convert_to_sfu_char(char src_char)
3410 + return dest_char;
3411 + }
3412 +
3413 +-static __le16 convert_to_sfm_char(char src_char)
3414 ++static __le16 convert_to_sfm_char(char src_char, bool end_of_string)
3415 + {
3416 + __le16 dest_char;
3417 +
3418 +@@ -427,6 +433,18 @@ static __le16 convert_to_sfm_char(char src_char)
3419 + case '|':
3420 + dest_char = cpu_to_le16(SFM_PIPE);
3421 + break;
3422 ++ case '.':
3423 ++ if (end_of_string)
3424 ++ dest_char = cpu_to_le16(SFM_PERIOD);
3425 ++ else
3426 ++ dest_char = 0;
3427 ++ break;
3428 ++ case ' ':
3429 ++ if (end_of_string)
3430 ++ dest_char = cpu_to_le16(SFM_SPACE);
3431 ++ else
3432 ++ dest_char = 0;
3433 ++ break;
3434 + default:
3435 + dest_char = 0;
3436 + }
3437 +@@ -469,9 +487,16 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
3438 + /* see if we must remap this char */
3439 + if (map_chars == SFU_MAP_UNI_RSVD)
3440 + dst_char = convert_to_sfu_char(src_char);
3441 +- else if (map_chars == SFM_MAP_UNI_RSVD)
3442 +- dst_char = convert_to_sfm_char(src_char);
3443 +- else
3444 ++ else if (map_chars == SFM_MAP_UNI_RSVD) {
3445 ++ bool end_of_string;
3446 ++
3447 ++ if (i == srclen - 1)
3448 ++ end_of_string = true;
3449 ++ else
3450 ++ end_of_string = false;
3451 ++
3452 ++ dst_char = convert_to_sfm_char(src_char, end_of_string);
3453 ++ } else
3454 + dst_char = 0;
3455 + /*
3456 + * FIXME: We can not handle remapping backslash (UNI_SLASH)
3457 +diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
3458 +index bdc52cb9a676..479bc0a941f3 100644
3459 +--- a/fs/cifs/cifs_unicode.h
3460 ++++ b/fs/cifs/cifs_unicode.h
3461 +@@ -64,6 +64,8 @@
3462 + #define SFM_LESSTHAN ((__u16) 0xF023)
3463 + #define SFM_PIPE ((__u16) 0xF027)
3464 + #define SFM_SLASH ((__u16) 0xF026)
3465 ++#define SFM_PERIOD ((__u16) 0xF028)
3466 ++#define SFM_SPACE ((__u16) 0xF029)
3467 +
3468 + /*
3469 + * Mapping mechanism to use when one of the seven reserved characters is
3470 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
3471 +index 3c194ff0d2f0..5481a6eb9a95 100644
3472 +--- a/fs/cifs/connect.c
3473 ++++ b/fs/cifs/connect.c
3474 +@@ -425,7 +425,9 @@ cifs_echo_request(struct work_struct *work)
3475 + * server->ops->need_neg() == true. Also, no need to ping if
3476 + * we got a response recently.
3477 + */
3478 +- if (!server->ops->need_neg || server->ops->need_neg(server) ||
3479 ++
3480 ++ if (server->tcpStatus == CifsNeedReconnect ||
3481 ++ server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew ||
3482 + (server->ops->can_echo && !server->ops->can_echo(server)) ||
3483 + time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
3484 + goto requeue_echo;
3485 +diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
3486 +index 848249fa120f..3079b38f0afb 100644
3487 +--- a/fs/cifs/ntlmssp.h
3488 ++++ b/fs/cifs/ntlmssp.h
3489 +@@ -133,6 +133,6 @@ typedef struct _AUTHENTICATE_MESSAGE {
3490 +
3491 + int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, struct cifs_ses *ses);
3492 + void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, struct cifs_ses *ses);
3493 +-int build_ntlmssp_auth_blob(unsigned char *pbuffer, u16 *buflen,
3494 ++int build_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen,
3495 + struct cifs_ses *ses,
3496 + const struct nls_table *nls_cp);
3497 +diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
3498 +index af0ec2d5ad0e..e88ffe1da045 100644
3499 +--- a/fs/cifs/sess.c
3500 ++++ b/fs/cifs/sess.c
3501 +@@ -364,19 +364,43 @@ void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
3502 + sec_blob->DomainName.MaximumLength = 0;
3503 + }
3504 +
3505 +-/* We do not malloc the blob, it is passed in pbuffer, because its
3506 +- maximum possible size is fixed and small, making this approach cleaner.
3507 +- This function returns the length of the data in the blob */
3508 +-int build_ntlmssp_auth_blob(unsigned char *pbuffer,
3509 ++static int size_of_ntlmssp_blob(struct cifs_ses *ses)
3510 ++{
3511 ++ int sz = sizeof(AUTHENTICATE_MESSAGE) + ses->auth_key.len
3512 ++ - CIFS_SESS_KEY_SIZE + CIFS_CPHTXT_SIZE + 2;
3513 ++
3514 ++ if (ses->domainName)
3515 ++ sz += 2 * strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
3516 ++ else
3517 ++ sz += 2;
3518 ++
3519 ++ if (ses->user_name)
3520 ++ sz += 2 * strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN);
3521 ++ else
3522 ++ sz += 2;
3523 ++
3524 ++ return sz;
3525 ++}
3526 ++
3527 ++int build_ntlmssp_auth_blob(unsigned char **pbuffer,
3528 + u16 *buflen,
3529 + struct cifs_ses *ses,
3530 + const struct nls_table *nls_cp)
3531 + {
3532 + int rc;
3533 +- AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer;
3534 ++ AUTHENTICATE_MESSAGE *sec_blob;
3535 + __u32 flags;
3536 + unsigned char *tmp;
3537 +
3538 ++ rc = setup_ntlmv2_rsp(ses, nls_cp);
3539 ++ if (rc) {
3540 ++ cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
3541 ++ *buflen = 0;
3542 ++ goto setup_ntlmv2_ret;
3543 ++ }
3544 ++ *pbuffer = kmalloc(size_of_ntlmssp_blob(ses), GFP_KERNEL);
3545 ++ sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer;
3546 ++
3547 + memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
3548 + sec_blob->MessageType = NtLmAuthenticate;
3549 +
3550 +@@ -391,7 +415,7 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
3551 + flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
3552 + }
3553 +
3554 +- tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE);
3555 ++ tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
3556 + sec_blob->NegotiateFlags = cpu_to_le32(flags);
3557 +
3558 + sec_blob->LmChallengeResponse.BufferOffset =
3559 +@@ -399,13 +423,9 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
3560 + sec_blob->LmChallengeResponse.Length = 0;
3561 + sec_blob->LmChallengeResponse.MaximumLength = 0;
3562 +
3563 +- sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
3564 ++ sec_blob->NtChallengeResponse.BufferOffset =
3565 ++ cpu_to_le32(tmp - *pbuffer);
3566 + if (ses->user_name != NULL) {
3567 +- rc = setup_ntlmv2_rsp(ses, nls_cp);
3568 +- if (rc) {
3569 +- cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
3570 +- goto setup_ntlmv2_ret;
3571 +- }
3572 + memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
3573 + ses->auth_key.len - CIFS_SESS_KEY_SIZE);
3574 + tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
3575 +@@ -423,7 +443,7 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
3576 + }
3577 +
3578 + if (ses->domainName == NULL) {
3579 +- sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
3580 ++ sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
3581 + sec_blob->DomainName.Length = 0;
3582 + sec_blob->DomainName.MaximumLength = 0;
3583 + tmp += 2;
3584 +@@ -432,14 +452,14 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
3585 + len = cifs_strtoUTF16((__le16 *)tmp, ses->domainName,
3586 + CIFS_MAX_USERNAME_LEN, nls_cp);
3587 + len *= 2; /* unicode is 2 bytes each */
3588 +- sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
3589 ++ sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
3590 + sec_blob->DomainName.Length = cpu_to_le16(len);
3591 + sec_blob->DomainName.MaximumLength = cpu_to_le16(len);
3592 + tmp += len;
3593 + }
3594 +
3595 + if (ses->user_name == NULL) {
3596 +- sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
3597 ++ sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
3598 + sec_blob->UserName.Length = 0;
3599 + sec_blob->UserName.MaximumLength = 0;
3600 + tmp += 2;
3601 +@@ -448,13 +468,13 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
3602 + len = cifs_strtoUTF16((__le16 *)tmp, ses->user_name,
3603 + CIFS_MAX_USERNAME_LEN, nls_cp);
3604 + len *= 2; /* unicode is 2 bytes each */
3605 +- sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
3606 ++ sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
3607 + sec_blob->UserName.Length = cpu_to_le16(len);
3608 + sec_blob->UserName.MaximumLength = cpu_to_le16(len);
3609 + tmp += len;
3610 + }
3611 +
3612 +- sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - pbuffer);
3613 ++ sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
3614 + sec_blob->WorkstationName.Length = 0;
3615 + sec_blob->WorkstationName.MaximumLength = 0;
3616 + tmp += 2;
3617 +@@ -463,19 +483,19 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
3618 + (ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
3619 + && !calc_seckey(ses)) {
3620 + memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE);
3621 +- sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
3622 ++ sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
3623 + sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
3624 + sec_blob->SessionKey.MaximumLength =
3625 + cpu_to_le16(CIFS_CPHTXT_SIZE);
3626 + tmp += CIFS_CPHTXT_SIZE;
3627 + } else {
3628 +- sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
3629 ++ sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
3630 + sec_blob->SessionKey.Length = 0;
3631 + sec_blob->SessionKey.MaximumLength = 0;
3632 + }
3633 +
3634 ++ *buflen = tmp - *pbuffer;
3635 + setup_ntlmv2_ret:
3636 +- *buflen = tmp - pbuffer;
3637 + return rc;
3638 + }
3639 +
3640 +@@ -1266,7 +1286,7 @@ sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
3641 + struct cifs_ses *ses = sess_data->ses;
3642 + __u16 bytes_remaining;
3643 + char *bcc_ptr;
3644 +- char *ntlmsspblob = NULL;
3645 ++ unsigned char *ntlmsspblob = NULL;
3646 + u16 blob_len;
3647 +
3648 + cifs_dbg(FYI, "rawntlmssp session setup authenticate phase\n");
3649 +@@ -1279,19 +1299,7 @@ sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
3650 + /* Build security blob before we assemble the request */
3651 + pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
3652 + smb_buf = (struct smb_hdr *)pSMB;
3653 +- /*
3654 +- * 5 is an empirical value, large enough to hold
3655 +- * authenticate message plus max 10 of av paris,
3656 +- * domain, user, workstation names, flags, etc.
3657 +- */
3658 +- ntlmsspblob = kzalloc(5*sizeof(struct _AUTHENTICATE_MESSAGE),
3659 +- GFP_KERNEL);
3660 +- if (!ntlmsspblob) {
3661 +- rc = -ENOMEM;
3662 +- goto out;
3663 +- }
3664 +-
3665 +- rc = build_ntlmssp_auth_blob(ntlmsspblob,
3666 ++ rc = build_ntlmssp_auth_blob(&ntlmsspblob,
3667 + &blob_len, ses, sess_data->nls_cp);
3668 + if (rc)
3669 + goto out_free_ntlmsspblob;
3670 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
3671 +index 82c5f57382b2..0b6dc1942bdc 100644
3672 +--- a/fs/cifs/smb2pdu.c
3673 ++++ b/fs/cifs/smb2pdu.c
3674 +@@ -591,7 +591,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
3675 + u16 blob_length = 0;
3676 + struct key *spnego_key = NULL;
3677 + char *security_blob = NULL;
3678 +- char *ntlmssp_blob = NULL;
3679 ++ unsigned char *ntlmssp_blob = NULL;
3680 + bool use_spnego = false; /* else use raw ntlmssp */
3681 +
3682 + cifs_dbg(FYI, "Session Setup\n");
3683 +@@ -716,13 +716,7 @@ ssetup_ntlmssp_authenticate:
3684 + iov[1].iov_len = blob_length;
3685 + } else if (phase == NtLmAuthenticate) {
3686 + req->hdr.SessionId = ses->Suid;
3687 +- ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
3688 +- GFP_KERNEL);
3689 +- if (ntlmssp_blob == NULL) {
3690 +- rc = -ENOMEM;
3691 +- goto ssetup_exit;
3692 +- }
3693 +- rc = build_ntlmssp_auth_blob(ntlmssp_blob, &blob_length, ses,
3694 ++ rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses,
3695 + nls_cp);
3696 + if (rc) {
3697 + cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n",
3698 +@@ -1820,6 +1814,33 @@ SMB2_echo(struct TCP_Server_Info *server)
3699 +
3700 + cifs_dbg(FYI, "In echo request\n");
3701 +
3702 ++ if (server->tcpStatus == CifsNeedNegotiate) {
3703 ++ struct list_head *tmp, *tmp2;
3704 ++ struct cifs_ses *ses;
3705 ++ struct cifs_tcon *tcon;
3706 ++
3707 ++ cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
3708 ++ spin_lock(&cifs_tcp_ses_lock);
3709 ++ list_for_each(tmp, &server->smb_ses_list) {
3710 ++ ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
3711 ++ list_for_each(tmp2, &ses->tcon_list) {
3712 ++ tcon = list_entry(tmp2, struct cifs_tcon,
3713 ++ tcon_list);
3714 ++ /* add check for persistent handle reconnect */
3715 ++ if (tcon && tcon->need_reconnect) {
3716 ++ spin_unlock(&cifs_tcp_ses_lock);
3717 ++ rc = smb2_reconnect(SMB2_ECHO, tcon);
3718 ++ spin_lock(&cifs_tcp_ses_lock);
3719 ++ }
3720 ++ }
3721 ++ }
3722 ++ spin_unlock(&cifs_tcp_ses_lock);
3723 ++ }
3724 ++
3725 ++ /* if no session, renegotiate failed above */
3726 ++ if (server->tcpStatus == CifsNeedNegotiate)
3727 ++ return -EIO;
3728 ++
3729 + rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
3730 + if (rc)
3731 + return rc;
3732 +diff --git a/fs/namespace.c b/fs/namespace.c
3733 +index 0570729c87fd..33064fcbfff9 100644
3734 +--- a/fs/namespace.c
3735 ++++ b/fs/namespace.c
3736 +@@ -2401,8 +2401,10 @@ static int do_new_mount(struct path *path, const char *fstype, int flags,
3737 + mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV;
3738 + }
3739 + if (type->fs_flags & FS_USERNS_VISIBLE) {
3740 +- if (!fs_fully_visible(type, &mnt_flags))
3741 ++ if (!fs_fully_visible(type, &mnt_flags)) {
3742 ++ put_filesystem(type);
3743 + return -EPERM;
3744 ++ }
3745 + }
3746 + }
3747 +
3748 +@@ -3236,6 +3238,10 @@ static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags)
3749 + if (mnt->mnt.mnt_sb->s_iflags & SB_I_NOEXEC)
3750 + mnt_flags &= ~(MNT_LOCK_NOSUID | MNT_LOCK_NOEXEC);
3751 +
3752 ++ /* Don't miss readonly hidden in the superblock flags */
3753 ++ if (mnt->mnt.mnt_sb->s_flags & MS_RDONLY)
3754 ++ mnt_flags |= MNT_LOCK_READONLY;
3755 ++
3756 + /* Verify the mount flags are equal to or more permissive
3757 + * than the proposed new mount.
3758 + */
3759 +@@ -3262,7 +3268,7 @@ static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags)
3760 + list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
3761 + struct inode *inode = child->mnt_mountpoint->d_inode;
3762 + /* Only worry about locked mounts */
3763 +- if (!(mnt_flags & MNT_LOCKED))
3764 ++ if (!(child->mnt.mnt_flags & MNT_LOCKED))
3765 + continue;
3766 + /* Is the directory permanetly empty? */
3767 + if (!is_empty_dir_inode(inode))
3768 +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
3769 +index 5fc2162afb67..46cfed63d229 100644
3770 +--- a/fs/nfs/dir.c
3771 ++++ b/fs/nfs/dir.c
3772 +@@ -1531,9 +1531,9 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
3773 + err = PTR_ERR(inode);
3774 + trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
3775 + put_nfs_open_context(ctx);
3776 ++ d_drop(dentry);
3777 + switch (err) {
3778 + case -ENOENT:
3779 +- d_drop(dentry);
3780 + d_add(dentry, NULL);
3781 + nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
3782 + break;
3783 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
3784 +index 98a44157353a..fc215ab4dcd5 100644
3785 +--- a/fs/nfs/nfs4proc.c
3786 ++++ b/fs/nfs/nfs4proc.c
3787 +@@ -2854,12 +2854,11 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
3788 + call_close |= is_wronly;
3789 + else if (is_wronly)
3790 + calldata->arg.fmode |= FMODE_WRITE;
3791 ++ if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
3792 ++ call_close |= is_rdwr;
3793 + } else if (is_rdwr)
3794 + calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
3795 +
3796 +- if (calldata->arg.fmode == 0)
3797 +- call_close |= is_rdwr;
3798 +-
3799 + if (!nfs4_valid_open_stateid(state))
3800 + call_close = 0;
3801 + spin_unlock(&state->owner->so_lock);
3802 +diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
3803 +index 1580ea6fd64d..d08cd88155c7 100644
3804 +--- a/fs/nfsd/nfs2acl.c
3805 ++++ b/fs/nfsd/nfs2acl.c
3806 +@@ -104,22 +104,21 @@ static __be32 nfsacld_proc_setacl(struct svc_rqst * rqstp,
3807 + goto out;
3808 +
3809 + inode = d_inode(fh->fh_dentry);
3810 +- if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
3811 +- error = -EOPNOTSUPP;
3812 +- goto out_errno;
3813 +- }
3814 +
3815 + error = fh_want_write(fh);
3816 + if (error)
3817 + goto out_errno;
3818 +
3819 +- error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS);
3820 ++ fh_lock(fh);
3821 ++
3822 ++ error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
3823 + if (error)
3824 +- goto out_drop_write;
3825 +- error = inode->i_op->set_acl(inode, argp->acl_default,
3826 +- ACL_TYPE_DEFAULT);
3827 ++ goto out_drop_lock;
3828 ++ error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
3829 + if (error)
3830 +- goto out_drop_write;
3831 ++ goto out_drop_lock;
3832 ++
3833 ++ fh_unlock(fh);
3834 +
3835 + fh_drop_write(fh);
3836 +
3837 +@@ -131,7 +130,8 @@ out:
3838 + posix_acl_release(argp->acl_access);
3839 + posix_acl_release(argp->acl_default);
3840 + return nfserr;
3841 +-out_drop_write:
3842 ++out_drop_lock:
3843 ++ fh_unlock(fh);
3844 + fh_drop_write(fh);
3845 + out_errno:
3846 + nfserr = nfserrno(error);
3847 +diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
3848 +index 01df4cd7c753..0c890347cde3 100644
3849 +--- a/fs/nfsd/nfs3acl.c
3850 ++++ b/fs/nfsd/nfs3acl.c
3851 +@@ -95,22 +95,20 @@ static __be32 nfsd3_proc_setacl(struct svc_rqst * rqstp,
3852 + goto out;
3853 +
3854 + inode = d_inode(fh->fh_dentry);
3855 +- if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
3856 +- error = -EOPNOTSUPP;
3857 +- goto out_errno;
3858 +- }
3859 +
3860 + error = fh_want_write(fh);
3861 + if (error)
3862 + goto out_errno;
3863 +
3864 +- error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS);
3865 ++ fh_lock(fh);
3866 ++
3867 ++ error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
3868 + if (error)
3869 +- goto out_drop_write;
3870 +- error = inode->i_op->set_acl(inode, argp->acl_default,
3871 +- ACL_TYPE_DEFAULT);
3872 ++ goto out_drop_lock;
3873 ++ error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
3874 +
3875 +-out_drop_write:
3876 ++out_drop_lock:
3877 ++ fh_unlock(fh);
3878 + fh_drop_write(fh);
3879 + out_errno:
3880 + nfserr = nfserrno(error);
3881 +diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
3882 +index 6adabd6049b7..71292a0d6f09 100644
3883 +--- a/fs/nfsd/nfs4acl.c
3884 ++++ b/fs/nfsd/nfs4acl.c
3885 +@@ -770,9 +770,6 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
3886 + dentry = fhp->fh_dentry;
3887 + inode = d_inode(dentry);
3888 +
3889 +- if (!inode->i_op->set_acl || !IS_POSIXACL(inode))
3890 +- return nfserr_attrnotsupp;
3891 +-
3892 + if (S_ISDIR(inode->i_mode))
3893 + flags = NFS4_ACL_DIR;
3894 +
3895 +@@ -782,16 +779,19 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
3896 + if (host_error < 0)
3897 + goto out_nfserr;
3898 +
3899 +- host_error = inode->i_op->set_acl(inode, pacl, ACL_TYPE_ACCESS);
3900 ++ fh_lock(fhp);
3901 ++
3902 ++ host_error = set_posix_acl(inode, ACL_TYPE_ACCESS, pacl);
3903 + if (host_error < 0)
3904 +- goto out_release;
3905 ++ goto out_drop_lock;
3906 +
3907 + if (S_ISDIR(inode->i_mode)) {
3908 +- host_error = inode->i_op->set_acl(inode, dpacl,
3909 +- ACL_TYPE_DEFAULT);
3910 ++ host_error = set_posix_acl(inode, ACL_TYPE_DEFAULT, dpacl);
3911 + }
3912 +
3913 +-out_release:
3914 ++out_drop_lock:
3915 ++ fh_unlock(fhp);
3916 ++
3917 + posix_acl_release(pacl);
3918 + posix_acl_release(dpacl);
3919 + out_nfserr:
3920 +diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
3921 +index e7f50c4081d6..15bdc2d48cfe 100644
3922 +--- a/fs/nfsd/nfs4callback.c
3923 ++++ b/fs/nfsd/nfs4callback.c
3924 +@@ -710,22 +710,6 @@ static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc
3925 + }
3926 + }
3927 +
3928 +-static struct rpc_clnt *create_backchannel_client(struct rpc_create_args *args)
3929 +-{
3930 +- struct rpc_xprt *xprt;
3931 +-
3932 +- if (args->protocol != XPRT_TRANSPORT_BC_TCP)
3933 +- return rpc_create(args);
3934 +-
3935 +- xprt = args->bc_xprt->xpt_bc_xprt;
3936 +- if (xprt) {
3937 +- xprt_get(xprt);
3938 +- return rpc_create_xprt(args, xprt);
3939 +- }
3940 +-
3941 +- return rpc_create(args);
3942 +-}
3943 +-
3944 + static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
3945 + {
3946 + int maxtime = max_cb_time(clp->net);
3947 +@@ -768,7 +752,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
3948 + args.authflavor = ses->se_cb_sec.flavor;
3949 + }
3950 + /* Create RPC client */
3951 +- client = create_backchannel_client(&args);
3952 ++ client = rpc_create(&args);
3953 + if (IS_ERR(client)) {
3954 + dprintk("NFSD: couldn't create callback client: %ld\n",
3955 + PTR_ERR(client));
3956 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
3957 +index 6b800b5b8fed..ed2f64ca49de 100644
3958 +--- a/fs/nfsd/nfs4state.c
3959 ++++ b/fs/nfsd/nfs4state.c
3960 +@@ -3452,6 +3452,10 @@ init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
3961 + struct nfs4_openowner *oo = open->op_openowner;
3962 + struct nfs4_ol_stateid *retstp = NULL;
3963 +
3964 ++ /* We are moving these outside of the spinlocks to avoid the warnings */
3965 ++ mutex_init(&stp->st_mutex);
3966 ++ mutex_lock(&stp->st_mutex);
3967 ++
3968 + spin_lock(&oo->oo_owner.so_client->cl_lock);
3969 + spin_lock(&fp->fi_lock);
3970 +
3971 +@@ -3467,13 +3471,17 @@ init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
3972 + stp->st_access_bmap = 0;
3973 + stp->st_deny_bmap = 0;
3974 + stp->st_openstp = NULL;
3975 +- init_rwsem(&stp->st_rwsem);
3976 + list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
3977 + list_add(&stp->st_perfile, &fp->fi_stateids);
3978 +
3979 + out_unlock:
3980 + spin_unlock(&fp->fi_lock);
3981 + spin_unlock(&oo->oo_owner.so_client->cl_lock);
3982 ++ if (retstp) {
3983 ++ mutex_lock(&retstp->st_mutex);
3984 ++ /* Not that we need to, just for neatness */
3985 ++ mutex_unlock(&stp->st_mutex);
3986 ++ }
3987 + return retstp;
3988 + }
3989 +
3990 +@@ -4300,32 +4308,34 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
3991 + */
3992 + if (stp) {
3993 + /* Stateid was found, this is an OPEN upgrade */
3994 +- down_read(&stp->st_rwsem);
3995 ++ mutex_lock(&stp->st_mutex);
3996 + status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
3997 + if (status) {
3998 +- up_read(&stp->st_rwsem);
3999 ++ mutex_unlock(&stp->st_mutex);
4000 + goto out;
4001 + }
4002 + } else {
4003 + stp = open->op_stp;
4004 + open->op_stp = NULL;
4005 ++ /*
4006 ++ * init_open_stateid() either returns a locked stateid
4007 ++ * it found, or initializes and locks the new one we passed in
4008 ++ */
4009 + swapstp = init_open_stateid(stp, fp, open);
4010 + if (swapstp) {
4011 + nfs4_put_stid(&stp->st_stid);
4012 + stp = swapstp;
4013 +- down_read(&stp->st_rwsem);
4014 + status = nfs4_upgrade_open(rqstp, fp, current_fh,
4015 + stp, open);
4016 + if (status) {
4017 +- up_read(&stp->st_rwsem);
4018 ++ mutex_unlock(&stp->st_mutex);
4019 + goto out;
4020 + }
4021 + goto upgrade_out;
4022 + }
4023 +- down_read(&stp->st_rwsem);
4024 + status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
4025 + if (status) {
4026 +- up_read(&stp->st_rwsem);
4027 ++ mutex_unlock(&stp->st_mutex);
4028 + release_open_stateid(stp);
4029 + goto out;
4030 + }
4031 +@@ -4337,7 +4347,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
4032 + }
4033 + upgrade_out:
4034 + nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
4035 +- up_read(&stp->st_rwsem);
4036 ++ mutex_unlock(&stp->st_mutex);
4037 +
4038 + if (nfsd4_has_session(&resp->cstate)) {
4039 + if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
4040 +@@ -4950,12 +4960,12 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
4041 + * revoked delegations are kept only for free_stateid.
4042 + */
4043 + return nfserr_bad_stateid;
4044 +- down_write(&stp->st_rwsem);
4045 ++ mutex_lock(&stp->st_mutex);
4046 + status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
4047 + if (status == nfs_ok)
4048 + status = nfs4_check_fh(current_fh, &stp->st_stid);
4049 + if (status != nfs_ok)
4050 +- up_write(&stp->st_rwsem);
4051 ++ mutex_unlock(&stp->st_mutex);
4052 + return status;
4053 + }
4054 +
4055 +@@ -5003,7 +5013,7 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs
4056 + return status;
4057 + oo = openowner(stp->st_stateowner);
4058 + if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
4059 +- up_write(&stp->st_rwsem);
4060 ++ mutex_unlock(&stp->st_mutex);
4061 + nfs4_put_stid(&stp->st_stid);
4062 + return nfserr_bad_stateid;
4063 + }
4064 +@@ -5035,12 +5045,12 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4065 + oo = openowner(stp->st_stateowner);
4066 + status = nfserr_bad_stateid;
4067 + if (oo->oo_flags & NFS4_OO_CONFIRMED) {
4068 +- up_write(&stp->st_rwsem);
4069 ++ mutex_unlock(&stp->st_mutex);
4070 + goto put_stateid;
4071 + }
4072 + oo->oo_flags |= NFS4_OO_CONFIRMED;
4073 + nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
4074 +- up_write(&stp->st_rwsem);
4075 ++ mutex_unlock(&stp->st_mutex);
4076 + dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
4077 + __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
4078 +
4079 +@@ -5116,7 +5126,7 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
4080 + nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
4081 + status = nfs_ok;
4082 + put_stateid:
4083 +- up_write(&stp->st_rwsem);
4084 ++ mutex_unlock(&stp->st_mutex);
4085 + nfs4_put_stid(&stp->st_stid);
4086 + out:
4087 + nfsd4_bump_seqid(cstate, status);
4088 +@@ -5169,7 +5179,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4089 + if (status)
4090 + goto out;
4091 + nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
4092 +- up_write(&stp->st_rwsem);
4093 ++ mutex_unlock(&stp->st_mutex);
4094 +
4095 + nfsd4_close_open_stateid(stp);
4096 +
4097 +@@ -5395,7 +5405,7 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
4098 + stp->st_access_bmap = 0;
4099 + stp->st_deny_bmap = open_stp->st_deny_bmap;
4100 + stp->st_openstp = open_stp;
4101 +- init_rwsem(&stp->st_rwsem);
4102 ++ mutex_init(&stp->st_mutex);
4103 + list_add(&stp->st_locks, &open_stp->st_locks);
4104 + list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
4105 + spin_lock(&fp->fi_lock);
4106 +@@ -5564,7 +5574,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4107 + &open_stp, nn);
4108 + if (status)
4109 + goto out;
4110 +- up_write(&open_stp->st_rwsem);
4111 ++ mutex_unlock(&open_stp->st_mutex);
4112 + open_sop = openowner(open_stp->st_stateowner);
4113 + status = nfserr_bad_stateid;
4114 + if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
4115 +@@ -5573,7 +5583,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4116 + status = lookup_or_create_lock_state(cstate, open_stp, lock,
4117 + &lock_stp, &new);
4118 + if (status == nfs_ok)
4119 +- down_write(&lock_stp->st_rwsem);
4120 ++ mutex_lock(&lock_stp->st_mutex);
4121 + } else {
4122 + status = nfs4_preprocess_seqid_op(cstate,
4123 + lock->lk_old_lock_seqid,
4124 +@@ -5677,7 +5687,7 @@ out:
4125 + seqid_mutating_err(ntohl(status)))
4126 + lock_sop->lo_owner.so_seqid++;
4127 +
4128 +- up_write(&lock_stp->st_rwsem);
4129 ++ mutex_unlock(&lock_stp->st_mutex);
4130 +
4131 + /*
4132 + * If this is a new, never-before-used stateid, and we are
4133 +@@ -5847,7 +5857,7 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4134 + fput:
4135 + fput(filp);
4136 + put_stateid:
4137 +- up_write(&stp->st_rwsem);
4138 ++ mutex_unlock(&stp->st_mutex);
4139 + nfs4_put_stid(&stp->st_stid);
4140 + out:
4141 + nfsd4_bump_seqid(cstate, status);
4142 +diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
4143 +index 77fdf4de91ba..77860b75da9d 100644
4144 +--- a/fs/nfsd/state.h
4145 ++++ b/fs/nfsd/state.h
4146 +@@ -535,7 +535,7 @@ struct nfs4_ol_stateid {
4147 + unsigned char st_access_bmap;
4148 + unsigned char st_deny_bmap;
4149 + struct nfs4_ol_stateid *st_openstp;
4150 +- struct rw_semaphore st_rwsem;
4151 ++ struct mutex st_mutex;
4152 + };
4153 +
4154 + static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s)
4155 +diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
4156 +index a2b1d7ce3e1a..ba5ef733951f 100644
4157 +--- a/fs/overlayfs/dir.c
4158 ++++ b/fs/overlayfs/dir.c
4159 +@@ -511,6 +511,7 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
4160 + struct dentry *upper;
4161 + struct dentry *opaquedir = NULL;
4162 + int err;
4163 ++ int flags = 0;
4164 +
4165 + if (WARN_ON(!workdir))
4166 + return -EROFS;
4167 +@@ -540,46 +541,39 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
4168 + if (err)
4169 + goto out_dput;
4170 +
4171 +- whiteout = ovl_whiteout(workdir, dentry);
4172 +- err = PTR_ERR(whiteout);
4173 +- if (IS_ERR(whiteout))
4174 ++ upper = lookup_one_len(dentry->d_name.name, upperdir,
4175 ++ dentry->d_name.len);
4176 ++ err = PTR_ERR(upper);
4177 ++ if (IS_ERR(upper))
4178 + goto out_unlock;
4179 +
4180 +- upper = ovl_dentry_upper(dentry);
4181 +- if (!upper) {
4182 +- upper = lookup_one_len(dentry->d_name.name, upperdir,
4183 +- dentry->d_name.len);
4184 +- err = PTR_ERR(upper);
4185 +- if (IS_ERR(upper))
4186 +- goto kill_whiteout;
4187 +-
4188 +- err = ovl_do_rename(wdir, whiteout, udir, upper, 0);
4189 +- dput(upper);
4190 +- if (err)
4191 +- goto kill_whiteout;
4192 +- } else {
4193 +- int flags = 0;
4194 ++ err = -ESTALE;
4195 ++ if ((opaquedir && upper != opaquedir) ||
4196 ++ (!opaquedir && ovl_dentry_upper(dentry) &&
4197 ++ upper != ovl_dentry_upper(dentry))) {
4198 ++ goto out_dput_upper;
4199 ++ }
4200 +
4201 +- if (opaquedir)
4202 +- upper = opaquedir;
4203 +- err = -ESTALE;
4204 +- if (upper->d_parent != upperdir)
4205 +- goto kill_whiteout;
4206 ++ whiteout = ovl_whiteout(workdir, dentry);
4207 ++ err = PTR_ERR(whiteout);
4208 ++ if (IS_ERR(whiteout))
4209 ++ goto out_dput_upper;
4210 +
4211 +- if (is_dir)
4212 +- flags |= RENAME_EXCHANGE;
4213 ++ if (d_is_dir(upper))
4214 ++ flags = RENAME_EXCHANGE;
4215 +
4216 +- err = ovl_do_rename(wdir, whiteout, udir, upper, flags);
4217 +- if (err)
4218 +- goto kill_whiteout;
4219 ++ err = ovl_do_rename(wdir, whiteout, udir, upper, flags);
4220 ++ if (err)
4221 ++ goto kill_whiteout;
4222 ++ if (flags)
4223 ++ ovl_cleanup(wdir, upper);
4224 +
4225 +- if (is_dir)
4226 +- ovl_cleanup(wdir, upper);
4227 +- }
4228 + ovl_dentry_version_inc(dentry->d_parent);
4229 + out_d_drop:
4230 + d_drop(dentry);
4231 + dput(whiteout);
4232 ++out_dput_upper:
4233 ++ dput(upper);
4234 + out_unlock:
4235 + unlock_rename(workdir, upperdir);
4236 + out_dput:
4237 +@@ -596,21 +590,25 @@ static int ovl_remove_upper(struct dentry *dentry, bool is_dir)
4238 + {
4239 + struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent);
4240 + struct inode *dir = upperdir->d_inode;
4241 +- struct dentry *upper = ovl_dentry_upper(dentry);
4242 ++ struct dentry *upper;
4243 + int err;
4244 +
4245 + mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
4246 ++ upper = lookup_one_len(dentry->d_name.name, upperdir,
4247 ++ dentry->d_name.len);
4248 ++ err = PTR_ERR(upper);
4249 ++ if (IS_ERR(upper))
4250 ++ goto out_unlock;
4251 ++
4252 + err = -ESTALE;
4253 +- if (upper->d_parent == upperdir) {
4254 +- /* Don't let d_delete() think it can reset d_inode */
4255 +- dget(upper);
4256 ++ if (upper == ovl_dentry_upper(dentry)) {
4257 + if (is_dir)
4258 + err = vfs_rmdir(dir, upper);
4259 + else
4260 + err = vfs_unlink(dir, upper, NULL);
4261 +- dput(upper);
4262 + ovl_dentry_version_inc(dentry->d_parent);
4263 + }
4264 ++ dput(upper);
4265 +
4266 + /*
4267 + * Keeping this dentry hashed would mean having to release
4268 +@@ -620,6 +618,7 @@ static int ovl_remove_upper(struct dentry *dentry, bool is_dir)
4269 + */
4270 + if (!err)
4271 + d_drop(dentry);
4272 ++out_unlock:
4273 + mutex_unlock(&dir->i_mutex);
4274 +
4275 + return err;
4276 +@@ -840,29 +839,39 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
4277 +
4278 + trap = lock_rename(new_upperdir, old_upperdir);
4279 +
4280 +- olddentry = ovl_dentry_upper(old);
4281 +- newdentry = ovl_dentry_upper(new);
4282 +- if (newdentry) {
4283 ++
4284 ++ olddentry = lookup_one_len(old->d_name.name, old_upperdir,
4285 ++ old->d_name.len);
4286 ++ err = PTR_ERR(olddentry);
4287 ++ if (IS_ERR(olddentry))
4288 ++ goto out_unlock;
4289 ++
4290 ++ err = -ESTALE;
4291 ++ if (olddentry != ovl_dentry_upper(old))
4292 ++ goto out_dput_old;
4293 ++
4294 ++ newdentry = lookup_one_len(new->d_name.name, new_upperdir,
4295 ++ new->d_name.len);
4296 ++ err = PTR_ERR(newdentry);
4297 ++ if (IS_ERR(newdentry))
4298 ++ goto out_dput_old;
4299 ++
4300 ++ err = -ESTALE;
4301 ++ if (ovl_dentry_upper(new)) {
4302 + if (opaquedir) {
4303 +- newdentry = opaquedir;
4304 +- opaquedir = NULL;
4305 ++ if (newdentry != opaquedir)
4306 ++ goto out_dput;
4307 + } else {
4308 +- dget(newdentry);
4309 ++ if (newdentry != ovl_dentry_upper(new))
4310 ++ goto out_dput;
4311 + }
4312 + } else {
4313 + new_create = true;
4314 +- newdentry = lookup_one_len(new->d_name.name, new_upperdir,
4315 +- new->d_name.len);
4316 +- err = PTR_ERR(newdentry);
4317 +- if (IS_ERR(newdentry))
4318 +- goto out_unlock;
4319 ++ if (!d_is_negative(newdentry) &&
4320 ++ (!new_opaque || !ovl_is_whiteout(newdentry)))
4321 ++ goto out_dput;
4322 + }
4323 +
4324 +- err = -ESTALE;
4325 +- if (olddentry->d_parent != old_upperdir)
4326 +- goto out_dput;
4327 +- if (newdentry->d_parent != new_upperdir)
4328 +- goto out_dput;
4329 + if (olddentry == trap)
4330 + goto out_dput;
4331 + if (newdentry == trap)
4332 +@@ -925,6 +934,8 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
4333 +
4334 + out_dput:
4335 + dput(newdentry);
4336 ++out_dput_old:
4337 ++ dput(olddentry);
4338 + out_unlock:
4339 + unlock_rename(new_upperdir, old_upperdir);
4340 + out_revert_creds:
4341 +diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
4342 +index 05ac9a95e881..0597820f5d9d 100644
4343 +--- a/fs/overlayfs/inode.c
4344 ++++ b/fs/overlayfs/inode.c
4345 +@@ -412,12 +412,11 @@ struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
4346 + if (!inode)
4347 + return NULL;
4348 +
4349 +- mode &= S_IFMT;
4350 +-
4351 + inode->i_ino = get_next_ino();
4352 + inode->i_mode = mode;
4353 + inode->i_flags |= S_NOATIME | S_NOCMTIME;
4354 +
4355 ++ mode &= S_IFMT;
4356 + switch (mode) {
4357 + case S_IFDIR:
4358 + inode->i_private = oe;
4359 +diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
4360 +index e17154aeaae4..735e1d49b301 100644
4361 +--- a/fs/overlayfs/overlayfs.h
4362 ++++ b/fs/overlayfs/overlayfs.h
4363 +@@ -181,6 +181,7 @@ static inline void ovl_copyattr(struct inode *from, struct inode *to)
4364 + {
4365 + to->i_uid = from->i_uid;
4366 + to->i_gid = from->i_gid;
4367 ++ to->i_mode = from->i_mode;
4368 + }
4369 +
4370 + /* dir.c */
4371 +diff --git a/fs/posix_acl.c b/fs/posix_acl.c
4372 +index 4adde1e2cbec..34bd1bd354e6 100644
4373 +--- a/fs/posix_acl.c
4374 ++++ b/fs/posix_acl.c
4375 +@@ -788,6 +788,28 @@ posix_acl_xattr_get(const struct xattr_handler *handler,
4376 + return error;
4377 + }
4378 +
4379 ++int
4380 ++set_posix_acl(struct inode *inode, int type, struct posix_acl *acl)
4381 ++{
4382 ++ if (!IS_POSIXACL(inode))
4383 ++ return -EOPNOTSUPP;
4384 ++ if (!inode->i_op->set_acl)
4385 ++ return -EOPNOTSUPP;
4386 ++
4387 ++ if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
4388 ++ return acl ? -EACCES : 0;
4389 ++ if (!inode_owner_or_capable(inode))
4390 ++ return -EPERM;
4391 ++
4392 ++ if (acl) {
4393 ++ int ret = posix_acl_valid(acl);
4394 ++ if (ret)
4395 ++ return ret;
4396 ++ }
4397 ++ return inode->i_op->set_acl(inode, acl, type);
4398 ++}
4399 ++EXPORT_SYMBOL(set_posix_acl);
4400 ++
4401 + static int
4402 + posix_acl_xattr_set(const struct xattr_handler *handler,
4403 + struct dentry *dentry, const char *name,
4404 +@@ -799,30 +821,13 @@ posix_acl_xattr_set(const struct xattr_handler *handler,
4405 +
4406 + if (strcmp(name, "") != 0)
4407 + return -EINVAL;
4408 +- if (!IS_POSIXACL(inode))
4409 +- return -EOPNOTSUPP;
4410 +- if (!inode->i_op->set_acl)
4411 +- return -EOPNOTSUPP;
4412 +-
4413 +- if (handler->flags == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
4414 +- return value ? -EACCES : 0;
4415 +- if (!inode_owner_or_capable(inode))
4416 +- return -EPERM;
4417 +
4418 + if (value) {
4419 + acl = posix_acl_from_xattr(&init_user_ns, value, size);
4420 + if (IS_ERR(acl))
4421 + return PTR_ERR(acl);
4422 +-
4423 +- if (acl) {
4424 +- ret = posix_acl_valid(acl);
4425 +- if (ret)
4426 +- goto out;
4427 +- }
4428 + }
4429 +-
4430 +- ret = inode->i_op->set_acl(inode, acl, handler->flags);
4431 +-out:
4432 ++ ret = set_posix_acl(inode, handler->flags, acl);
4433 + posix_acl_release(acl);
4434 + return ret;
4435 + }
4436 +diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
4437 +index 0edc12856147..b895af7d8d80 100644
4438 +--- a/fs/ubifs/file.c
4439 ++++ b/fs/ubifs/file.c
4440 +@@ -52,6 +52,7 @@
4441 + #include "ubifs.h"
4442 + #include <linux/mount.h>
4443 + #include <linux/slab.h>
4444 ++#include <linux/migrate.h>
4445 +
4446 + static int read_block(struct inode *inode, void *addr, unsigned int block,
4447 + struct ubifs_data_node *dn)
4448 +@@ -1452,6 +1453,26 @@ static int ubifs_set_page_dirty(struct page *page)
4449 + return ret;
4450 + }
4451 +
4452 ++#ifdef CONFIG_MIGRATION
4453 ++static int ubifs_migrate_page(struct address_space *mapping,
4454 ++ struct page *newpage, struct page *page, enum migrate_mode mode)
4455 ++{
4456 ++ int rc;
4457 ++
4458 ++ rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
4459 ++ if (rc != MIGRATEPAGE_SUCCESS)
4460 ++ return rc;
4461 ++
4462 ++ if (PagePrivate(page)) {
4463 ++ ClearPagePrivate(page);
4464 ++ SetPagePrivate(newpage);
4465 ++ }
4466 ++
4467 ++ migrate_page_copy(newpage, page);
4468 ++ return MIGRATEPAGE_SUCCESS;
4469 ++}
4470 ++#endif
4471 ++
4472 + static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
4473 + {
4474 + /*
4475 +@@ -1591,6 +1612,9 @@ const struct address_space_operations ubifs_file_address_operations = {
4476 + .write_end = ubifs_write_end,
4477 + .invalidatepage = ubifs_invalidatepage,
4478 + .set_page_dirty = ubifs_set_page_dirty,
4479 ++#ifdef CONFIG_MIGRATION
4480 ++ .migratepage = ubifs_migrate_page,
4481 ++#endif
4482 + .releasepage = ubifs_releasepage,
4483 + };
4484 +
4485 +diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
4486 +index 7d633f19e38a..1885fc44b1bc 100644
4487 +--- a/include/asm-generic/qspinlock.h
4488 ++++ b/include/asm-generic/qspinlock.h
4489 +@@ -21,37 +21,33 @@
4490 + #include <asm-generic/qspinlock_types.h>
4491 +
4492 + /**
4493 ++ * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
4494 ++ * @lock : Pointer to queued spinlock structure
4495 ++ *
4496 ++ * There is a very slight possibility of live-lock if the lockers keep coming
4497 ++ * and the waiter is just unfortunate enough to not see any unlock state.
4498 ++ */
4499 ++#ifndef queued_spin_unlock_wait
4500 ++extern void queued_spin_unlock_wait(struct qspinlock *lock);
4501 ++#endif
4502 ++
4503 ++/**
4504 + * queued_spin_is_locked - is the spinlock locked?
4505 + * @lock: Pointer to queued spinlock structure
4506 + * Return: 1 if it is locked, 0 otherwise
4507 + */
4508 ++#ifndef queued_spin_is_locked
4509 + static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
4510 + {
4511 + /*
4512 +- * queued_spin_lock_slowpath() can ACQUIRE the lock before
4513 +- * issuing the unordered store that sets _Q_LOCKED_VAL.
4514 +- *
4515 +- * See both smp_cond_acquire() sites for more detail.
4516 +- *
4517 +- * This however means that in code like:
4518 +- *
4519 +- * spin_lock(A) spin_lock(B)
4520 +- * spin_unlock_wait(B) spin_is_locked(A)
4521 +- * do_something() do_something()
4522 +- *
4523 +- * Both CPUs can end up running do_something() because the store
4524 +- * setting _Q_LOCKED_VAL will pass through the loads in
4525 +- * spin_unlock_wait() and/or spin_is_locked().
4526 ++ * See queued_spin_unlock_wait().
4527 + *
4528 +- * Avoid this by issuing a full memory barrier between the spin_lock()
4529 +- * and the loads in spin_unlock_wait() and spin_is_locked().
4530 +- *
4531 +- * Note that regular mutual exclusion doesn't care about this
4532 +- * delayed store.
4533 ++ * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
4534 ++ * isn't immediately observable.
4535 + */
4536 +- smp_mb();
4537 +- return atomic_read(&lock->val) & _Q_LOCKED_MASK;
4538 ++ return atomic_read(&lock->val);
4539 + }
4540 ++#endif
4541 +
4542 + /**
4543 + * queued_spin_value_unlocked - is the spinlock structure unlocked?
4544 +@@ -121,21 +117,6 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
4545 + }
4546 + #endif
4547 +
4548 +-/**
4549 +- * queued_spin_unlock_wait - wait until current lock holder releases the lock
4550 +- * @lock : Pointer to queued spinlock structure
4551 +- *
4552 +- * There is a very slight possibility of live-lock if the lockers keep coming
4553 +- * and the waiter is just unfortunate enough to not see any unlock state.
4554 +- */
4555 +-static inline void queued_spin_unlock_wait(struct qspinlock *lock)
4556 +-{
4557 +- /* See queued_spin_is_locked() */
4558 +- smp_mb();
4559 +- while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
4560 +- cpu_relax();
4561 +-}
4562 +-
4563 + #ifndef virt_spin_lock
4564 + static __always_inline bool virt_spin_lock(struct qspinlock *lock)
4565 + {
4566 +diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
4567 +index c768ddfbe53c..b7bfa513e6ed 100644
4568 +--- a/include/drm/ttm/ttm_bo_api.h
4569 ++++ b/include/drm/ttm/ttm_bo_api.h
4570 +@@ -316,6 +316,20 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
4571 + */
4572 + extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
4573 + bool interruptible, bool no_wait);
4574 ++
4575 ++/**
4576 ++ * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
4577 ++ *
4578 ++ * @placement: Return immediately if buffer is busy.
4579 ++ * @mem: The struct ttm_mem_reg indicating the region where the bo resides
4580 ++ * @new_flags: Describes compatible placement found
4581 ++ *
4582 ++ * Returns true if the placement is compatible
4583 ++ */
4584 ++extern bool ttm_bo_mem_compat(struct ttm_placement *placement,
4585 ++ struct ttm_mem_reg *mem,
4586 ++ uint32_t *new_flags);
4587 ++
4588 + /**
4589 + * ttm_bo_validate
4590 + *
4591 +diff --git a/include/linux/dcache.h b/include/linux/dcache.h
4592 +index d81746d3b2da..8d7151eb6ceb 100644
4593 +--- a/include/linux/dcache.h
4594 ++++ b/include/linux/dcache.h
4595 +@@ -603,5 +603,17 @@ static inline struct inode *vfs_select_inode(struct dentry *dentry,
4596 + return inode;
4597 + }
4598 +
4599 ++/**
4600 ++ * d_real_inode - Return the real inode
4601 ++ * @dentry: The dentry to query
4602 ++ *
4603 ++ * If dentry is on an union/overlay, then return the underlying, real inode.
4604 ++ * Otherwise return d_inode().
4605 ++ */
4606 ++static inline struct inode *d_real_inode(struct dentry *dentry)
4607 ++{
4608 ++ return d_backing_inode(d_real(dentry));
4609 ++}
4610 ++
4611 +
4612 + #endif /* __LINUX_DCACHE_H */
4613 +diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
4614 +index 0536524bb9eb..68904469fba1 100644
4615 +--- a/include/linux/jump_label.h
4616 ++++ b/include/linux/jump_label.h
4617 +@@ -117,13 +117,18 @@ struct module;
4618 +
4619 + #include <linux/atomic.h>
4620 +
4621 ++#ifdef HAVE_JUMP_LABEL
4622 ++
4623 + static inline int static_key_count(struct static_key *key)
4624 + {
4625 +- return atomic_read(&key->enabled);
4626 ++ /*
4627 ++ * -1 means the first static_key_slow_inc() is in progress.
4628 ++ * static_key_enabled() must return true, so return 1 here.
4629 ++ */
4630 ++ int n = atomic_read(&key->enabled);
4631 ++ return n >= 0 ? n : 1;
4632 + }
4633 +
4634 +-#ifdef HAVE_JUMP_LABEL
4635 +-
4636 + #define JUMP_TYPE_FALSE 0UL
4637 + #define JUMP_TYPE_TRUE 1UL
4638 + #define JUMP_TYPE_MASK 1UL
4639 +@@ -162,6 +167,11 @@ extern void jump_label_apply_nops(struct module *mod);
4640 +
4641 + #else /* !HAVE_JUMP_LABEL */
4642 +
4643 ++static inline int static_key_count(struct static_key *key)
4644 ++{
4645 ++ return atomic_read(&key->enabled);
4646 ++}
4647 ++
4648 + static __always_inline void jump_label_init(void)
4649 + {
4650 + static_key_initialized = true;
4651 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
4652 +index 1716f9395010..d443d9ab0236 100644
4653 +--- a/include/linux/skbuff.h
4654 ++++ b/include/linux/skbuff.h
4655 +@@ -982,6 +982,7 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
4656 + }
4657 +
4658 + void __skb_get_hash(struct sk_buff *skb);
4659 ++u32 __skb_get_hash_symmetric(struct sk_buff *skb);
4660 + u32 skb_get_poff(const struct sk_buff *skb);
4661 + u32 __skb_get_poff(const struct sk_buff *skb, void *data,
4662 + const struct flow_keys *keys, int hlen);
4663 +@@ -2773,6 +2774,25 @@ static inline void skb_postpush_rcsum(struct sk_buff *skb,
4664 + }
4665 +
4666 + /**
4667 ++ * skb_push_rcsum - push skb and update receive checksum
4668 ++ * @skb: buffer to update
4669 ++ * @len: length of data pulled
4670 ++ *
4671 ++ * This function performs an skb_push on the packet and updates
4672 ++ * the CHECKSUM_COMPLETE checksum. It should be used on
4673 ++ * receive path processing instead of skb_push unless you know
4674 ++ * that the checksum difference is zero (e.g., a valid IP header)
4675 ++ * or you are setting ip_summed to CHECKSUM_NONE.
4676 ++ */
4677 ++static inline unsigned char *skb_push_rcsum(struct sk_buff *skb,
4678 ++ unsigned int len)
4679 ++{
4680 ++ skb_push(skb, len);
4681 ++ skb_postpush_rcsum(skb, skb->data, len);
4682 ++ return skb->data;
4683 ++}
4684 ++
4685 ++/**
4686 + * pskb_trim_rcsum - trim received skb and update checksum
4687 + * @skb: buffer to trim
4688 + * @len: new length
4689 +diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
4690 +index 131032f15cc1..9b6027c51736 100644
4691 +--- a/include/linux/sunrpc/clnt.h
4692 ++++ b/include/linux/sunrpc/clnt.h
4693 +@@ -135,8 +135,6 @@ struct rpc_create_args {
4694 + #define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9)
4695 +
4696 + struct rpc_clnt *rpc_create(struct rpc_create_args *args);
4697 +-struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
4698 +- struct rpc_xprt *xprt);
4699 + struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
4700 + const struct rpc_program *, u32);
4701 + void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt);
4702 +diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h
4703 +index 966889a20ea3..e479033bd782 100644
4704 +--- a/include/linux/usb/ehci_def.h
4705 ++++ b/include/linux/usb/ehci_def.h
4706 +@@ -180,11 +180,11 @@ struct ehci_regs {
4707 + * PORTSCx
4708 + */
4709 + /* HOSTPC: offset 0x84 */
4710 +- u32 hostpc[1]; /* HOSTPC extension */
4711 ++ u32 hostpc[0]; /* HOSTPC extension */
4712 + #define HOSTPC_PHCD (1<<22) /* Phy clock disable */
4713 + #define HOSTPC_PSPD (3<<25) /* Port speed detection */
4714 +
4715 +- u32 reserved5[16];
4716 ++ u32 reserved5[17];
4717 +
4718 + /* USBMODE_EX: offset 0xc8 */
4719 + u32 usbmode_ex; /* USB Device mode extension */
4720 +diff --git a/kernel/jump_label.c b/kernel/jump_label.c
4721 +index 05254eeb4b4e..4b353e0be121 100644
4722 +--- a/kernel/jump_label.c
4723 ++++ b/kernel/jump_label.c
4724 +@@ -58,13 +58,36 @@ static void jump_label_update(struct static_key *key);
4725 +
4726 + void static_key_slow_inc(struct static_key *key)
4727 + {
4728 ++ int v, v1;
4729 ++
4730 + STATIC_KEY_CHECK_USE();
4731 +- if (atomic_inc_not_zero(&key->enabled))
4732 +- return;
4733 ++
4734 ++ /*
4735 ++ * Careful if we get concurrent static_key_slow_inc() calls;
4736 ++ * later calls must wait for the first one to _finish_ the
4737 ++ * jump_label_update() process. At the same time, however,
4738 ++ * the jump_label_update() call below wants to see
4739 ++ * static_key_enabled(&key) for jumps to be updated properly.
4740 ++ *
4741 ++ * So give a special meaning to negative key->enabled: it sends
4742 ++ * static_key_slow_inc() down the slow path, and it is non-zero
4743 ++ * so it counts as "enabled" in jump_label_update(). Note that
4744 ++ * atomic_inc_unless_negative() checks >= 0, so roll our own.
4745 ++ */
4746 ++ for (v = atomic_read(&key->enabled); v > 0; v = v1) {
4747 ++ v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
4748 ++ if (likely(v1 == v))
4749 ++ return;
4750 ++ }
4751 +
4752 + jump_label_lock();
4753 +- if (atomic_inc_return(&key->enabled) == 1)
4754 ++ if (atomic_read(&key->enabled) == 0) {
4755 ++ atomic_set(&key->enabled, -1);
4756 + jump_label_update(key);
4757 ++ atomic_set(&key->enabled, 1);
4758 ++ } else {
4759 ++ atomic_inc(&key->enabled);
4760 ++ }
4761 + jump_label_unlock();
4762 + }
4763 + EXPORT_SYMBOL_GPL(static_key_slow_inc);
4764 +@@ -72,6 +95,13 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc);
4765 + static void __static_key_slow_dec(struct static_key *key,
4766 + unsigned long rate_limit, struct delayed_work *work)
4767 + {
4768 ++ /*
4769 ++ * The negative count check is valid even when a negative
4770 ++ * key->enabled is in use by static_key_slow_inc(); a
4771 ++ * __static_key_slow_dec() before the first static_key_slow_inc()
4772 ++ * returns is unbalanced, because all other static_key_slow_inc()
4773 ++ * instances block while the update is in progress.
4774 ++ */
4775 + if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
4776 + WARN(atomic_read(&key->enabled) < 0,
4777 + "jump label: negative count!\n");
4778 +diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
4779 +index 0551c219c40e..89350f924c85 100644
4780 +--- a/kernel/locking/mutex.c
4781 ++++ b/kernel/locking/mutex.c
4782 +@@ -486,9 +486,6 @@ __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
4783 + if (!hold_ctx)
4784 + return 0;
4785 +
4786 +- if (unlikely(ctx == hold_ctx))
4787 +- return -EALREADY;
4788 +-
4789 + if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
4790 + (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
4791 + #ifdef CONFIG_DEBUG_MUTEXES
4792 +@@ -514,6 +511,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
4793 + unsigned long flags;
4794 + int ret;
4795 +
4796 ++ if (use_ww_ctx) {
4797 ++ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
4798 ++ if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
4799 ++ return -EALREADY;
4800 ++ }
4801 ++
4802 + preempt_disable();
4803 + mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
4804 +
4805 +diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
4806 +index 87e9ce6a63c5..8173bc7fec92 100644
4807 +--- a/kernel/locking/qspinlock.c
4808 ++++ b/kernel/locking/qspinlock.c
4809 +@@ -255,6 +255,66 @@ static __always_inline void __pv_wait_head(struct qspinlock *lock,
4810 + #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath
4811 + #endif
4812 +
4813 ++/*
4814 ++ * queued_spin_lock_slowpath() can (load-)ACQUIRE the lock before
4815 ++ * issuing an _unordered_ store to set _Q_LOCKED_VAL.
4816 ++ *
4817 ++ * This means that the store can be delayed, but no later than the
4818 ++ * store-release from the unlock. This means that simply observing
4819 ++ * _Q_LOCKED_VAL is not sufficient to determine if the lock is acquired.
4820 ++ *
4821 ++ * There are two paths that can issue the unordered store:
4822 ++ *
4823 ++ * (1) clear_pending_set_locked(): *,1,0 -> *,0,1
4824 ++ *
4825 ++ * (2) set_locked(): t,0,0 -> t,0,1 ; t != 0
4826 ++ * atomic_cmpxchg_relaxed(): t,0,0 -> 0,0,1
4827 ++ *
4828 ++ * However, in both cases we have other !0 state we've set before to queue
4829 ++ * ourseves:
4830 ++ *
4831 ++ * For (1) we have the atomic_cmpxchg_acquire() that set _Q_PENDING_VAL, our
4832 ++ * load is constrained by that ACQUIRE to not pass before that, and thus must
4833 ++ * observe the store.
4834 ++ *
4835 ++ * For (2) we have a more intersting scenario. We enqueue ourselves using
4836 ++ * xchg_tail(), which ends up being a RELEASE. This in itself is not
4837 ++ * sufficient, however that is followed by an smp_cond_acquire() on the same
4838 ++ * word, giving a RELEASE->ACQUIRE ordering. This again constrains our load and
4839 ++ * guarantees we must observe that store.
4840 ++ *
4841 ++ * Therefore both cases have other !0 state that is observable before the
4842 ++ * unordered locked byte store comes through. This means we can use that to
4843 ++ * wait for the lock store, and then wait for an unlock.
4844 ++ */
4845 ++#ifndef queued_spin_unlock_wait
4846 ++void queued_spin_unlock_wait(struct qspinlock *lock)
4847 ++{
4848 ++ u32 val;
4849 ++
4850 ++ for (;;) {
4851 ++ val = atomic_read(&lock->val);
4852 ++
4853 ++ if (!val) /* not locked, we're done */
4854 ++ goto done;
4855 ++
4856 ++ if (val & _Q_LOCKED_MASK) /* locked, go wait for unlock */
4857 ++ break;
4858 ++
4859 ++ /* not locked, but pending, wait until we observe the lock */
4860 ++ cpu_relax();
4861 ++ }
4862 ++
4863 ++ /* any unlock is good */
4864 ++ while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
4865 ++ cpu_relax();
4866 ++
4867 ++done:
4868 ++ smp_rmb(); /* CTRL + RMB -> ACQUIRE */
4869 ++}
4870 ++EXPORT_SYMBOL(queued_spin_unlock_wait);
4871 ++#endif
4872 ++
4873 + #endif /* _GEN_PV_LOCK_SLOWPATH */
4874 +
4875 + /**
4876 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
4877 +index cfdc0e61066c..51c615279b23 100644
4878 +--- a/kernel/sched/fair.c
4879 ++++ b/kernel/sched/fair.c
4880 +@@ -2682,6 +2682,23 @@ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
4881 +
4882 + static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
4883 +
4884 ++/*
4885 ++ * Unsigned subtract and clamp on underflow.
4886 ++ *
4887 ++ * Explicitly do a load-store to ensure the intermediate value never hits
4888 ++ * memory. This allows lockless observations without ever seeing the negative
4889 ++ * values.
4890 ++ */
4891 ++#define sub_positive(_ptr, _val) do { \
4892 ++ typeof(_ptr) ptr = (_ptr); \
4893 ++ typeof(*ptr) val = (_val); \
4894 ++ typeof(*ptr) res, var = READ_ONCE(*ptr); \
4895 ++ res = var - val; \
4896 ++ if (res > var) \
4897 ++ res = 0; \
4898 ++ WRITE_ONCE(*ptr, res); \
4899 ++} while (0)
4900 ++
4901 + /* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
4902 + static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
4903 + {
4904 +@@ -2690,15 +2707,15 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
4905 +
4906 + if (atomic_long_read(&cfs_rq->removed_load_avg)) {
4907 + s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
4908 +- sa->load_avg = max_t(long, sa->load_avg - r, 0);
4909 +- sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0);
4910 ++ sub_positive(&sa->load_avg, r);
4911 ++ sub_positive(&sa->load_sum, r * LOAD_AVG_MAX);
4912 + removed = 1;
4913 + }
4914 +
4915 + if (atomic_long_read(&cfs_rq->removed_util_avg)) {
4916 + long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
4917 +- sa->util_avg = max_t(long, sa->util_avg - r, 0);
4918 +- sa->util_sum = max_t(s32, sa->util_sum - r * LOAD_AVG_MAX, 0);
4919 ++ sub_positive(&sa->util_avg, r);
4920 ++ sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
4921 + }
4922 +
4923 + decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
4924 +@@ -2764,10 +2781,10 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
4925 + &se->avg, se->on_rq * scale_load_down(se->load.weight),
4926 + cfs_rq->curr == se, NULL);
4927 +
4928 +- cfs_rq->avg.load_avg = max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0);
4929 +- cfs_rq->avg.load_sum = max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0);
4930 +- cfs_rq->avg.util_avg = max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
4931 +- cfs_rq->avg.util_sum = max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0);
4932 ++ sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
4933 ++ sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum);
4934 ++ sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
4935 ++ sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
4936 + }
4937 +
4938 + /* Add the load generated by se into cfs_rq's load average */
4939 +diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
4940 +index f96f0383f6c6..ad1d6164e946 100644
4941 +--- a/kernel/trace/trace_printk.c
4942 ++++ b/kernel/trace/trace_printk.c
4943 +@@ -36,6 +36,10 @@ struct trace_bprintk_fmt {
4944 + static inline struct trace_bprintk_fmt *lookup_format(const char *fmt)
4945 + {
4946 + struct trace_bprintk_fmt *pos;
4947 ++
4948 ++ if (!fmt)
4949 ++ return ERR_PTR(-EINVAL);
4950 ++
4951 + list_for_each_entry(pos, &trace_bprintk_fmt_list, list) {
4952 + if (!strcmp(pos->fmt, fmt))
4953 + return pos;
4954 +@@ -57,7 +61,8 @@ void hold_module_trace_bprintk_format(const char **start, const char **end)
4955 + for (iter = start; iter < end; iter++) {
4956 + struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
4957 + if (tb_fmt) {
4958 +- *iter = tb_fmt->fmt;
4959 ++ if (!IS_ERR(tb_fmt))
4960 ++ *iter = tb_fmt->fmt;
4961 + continue;
4962 + }
4963 +
4964 +diff --git a/mm/migrate.c b/mm/migrate.c
4965 +index bbeb0b71fcf4..72c09dea6526 100644
4966 +--- a/mm/migrate.c
4967 ++++ b/mm/migrate.c
4968 +@@ -429,6 +429,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
4969 +
4970 + return MIGRATEPAGE_SUCCESS;
4971 + }
4972 ++EXPORT_SYMBOL(migrate_page_move_mapping);
4973 +
4974 + /*
4975 + * The expected number of remaining references is the same as that
4976 +@@ -579,6 +580,7 @@ void migrate_page_copy(struct page *newpage, struct page *page)
4977 + if (PageWriteback(newpage))
4978 + end_page_writeback(newpage);
4979 + }
4980 ++EXPORT_SYMBOL(migrate_page_copy);
4981 +
4982 + /************************************************************
4983 + * Migration functions
4984 +diff --git a/mm/page-writeback.c b/mm/page-writeback.c
4985 +index e40c9364582d..fd51ebfc423f 100644
4986 +--- a/mm/page-writeback.c
4987 ++++ b/mm/page-writeback.c
4988 +@@ -359,8 +359,9 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
4989 + struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc);
4990 + unsigned long bytes = vm_dirty_bytes;
4991 + unsigned long bg_bytes = dirty_background_bytes;
4992 +- unsigned long ratio = vm_dirty_ratio;
4993 +- unsigned long bg_ratio = dirty_background_ratio;
4994 ++ /* convert ratios to per-PAGE_SIZE for higher precision */
4995 ++ unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100;
4996 ++ unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100;
4997 + unsigned long thresh;
4998 + unsigned long bg_thresh;
4999 + struct task_struct *tsk;
5000 +@@ -372,26 +373,28 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
5001 + /*
5002 + * The byte settings can't be applied directly to memcg
5003 + * domains. Convert them to ratios by scaling against
5004 +- * globally available memory.
5005 ++ * globally available memory. As the ratios are in
5006 ++ * per-PAGE_SIZE, they can be obtained by dividing bytes by
5007 ++ * number of pages.
5008 + */
5009 + if (bytes)
5010 +- ratio = min(DIV_ROUND_UP(bytes, PAGE_SIZE) * 100 /
5011 +- global_avail, 100UL);
5012 ++ ratio = min(DIV_ROUND_UP(bytes, global_avail),
5013 ++ PAGE_SIZE);
5014 + if (bg_bytes)
5015 +- bg_ratio = min(DIV_ROUND_UP(bg_bytes, PAGE_SIZE) * 100 /
5016 +- global_avail, 100UL);
5017 ++ bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail),
5018 ++ PAGE_SIZE);
5019 + bytes = bg_bytes = 0;
5020 + }
5021 +
5022 + if (bytes)
5023 + thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
5024 + else
5025 +- thresh = (ratio * available_memory) / 100;
5026 ++ thresh = (ratio * available_memory) / PAGE_SIZE;
5027 +
5028 + if (bg_bytes)
5029 + bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
5030 + else
5031 +- bg_thresh = (bg_ratio * available_memory) / 100;
5032 ++ bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
5033 +
5034 + if (bg_thresh >= thresh)
5035 + bg_thresh = thresh / 2;
5036 +diff --git a/mm/percpu.c b/mm/percpu.c
5037 +index 8a943b97a053..1f376bce413c 100644
5038 +--- a/mm/percpu.c
5039 ++++ b/mm/percpu.c
5040 +@@ -110,7 +110,7 @@ struct pcpu_chunk {
5041 + int map_used; /* # of map entries used before the sentry */
5042 + int map_alloc; /* # of map entries allocated */
5043 + int *map; /* allocation map */
5044 +- struct work_struct map_extend_work;/* async ->map[] extension */
5045 ++ struct list_head map_extend_list;/* on pcpu_map_extend_chunks */
5046 +
5047 + void *data; /* chunk data */
5048 + int first_free; /* no free below this */
5049 +@@ -160,10 +160,13 @@ static struct pcpu_chunk *pcpu_reserved_chunk;
5050 + static int pcpu_reserved_chunk_limit;
5051 +
5052 + static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
5053 +-static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop */
5054 ++static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
5055 +
5056 + static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
5057 +
5058 ++/* chunks which need their map areas extended, protected by pcpu_lock */
5059 ++static LIST_HEAD(pcpu_map_extend_chunks);
5060 ++
5061 + /*
5062 + * The number of empty populated pages, protected by pcpu_lock. The
5063 + * reserved chunk doesn't contribute to the count.
5064 +@@ -397,13 +400,19 @@ static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
5065 + {
5066 + int margin, new_alloc;
5067 +
5068 ++ lockdep_assert_held(&pcpu_lock);
5069 ++
5070 + if (is_atomic) {
5071 + margin = 3;
5072 +
5073 + if (chunk->map_alloc <
5074 +- chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW &&
5075 +- pcpu_async_enabled)
5076 +- schedule_work(&chunk->map_extend_work);
5077 ++ chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) {
5078 ++ if (list_empty(&chunk->map_extend_list)) {
5079 ++ list_add_tail(&chunk->map_extend_list,
5080 ++ &pcpu_map_extend_chunks);
5081 ++ pcpu_schedule_balance_work();
5082 ++ }
5083 ++ }
5084 + } else {
5085 + margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
5086 + }
5087 +@@ -437,6 +446,8 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
5088 + size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
5089 + unsigned long flags;
5090 +
5091 ++ lockdep_assert_held(&pcpu_alloc_mutex);
5092 ++
5093 + new = pcpu_mem_zalloc(new_size);
5094 + if (!new)
5095 + return -ENOMEM;
5096 +@@ -469,20 +480,6 @@ out_unlock:
5097 + return 0;
5098 + }
5099 +
5100 +-static void pcpu_map_extend_workfn(struct work_struct *work)
5101 +-{
5102 +- struct pcpu_chunk *chunk = container_of(work, struct pcpu_chunk,
5103 +- map_extend_work);
5104 +- int new_alloc;
5105 +-
5106 +- spin_lock_irq(&pcpu_lock);
5107 +- new_alloc = pcpu_need_to_extend(chunk, false);
5108 +- spin_unlock_irq(&pcpu_lock);
5109 +-
5110 +- if (new_alloc)
5111 +- pcpu_extend_area_map(chunk, new_alloc);
5112 +-}
5113 +-
5114 + /**
5115 + * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
5116 + * @chunk: chunk the candidate area belongs to
5117 +@@ -742,7 +739,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
5118 + chunk->map_used = 1;
5119 +
5120 + INIT_LIST_HEAD(&chunk->list);
5121 +- INIT_WORK(&chunk->map_extend_work, pcpu_map_extend_workfn);
5122 ++ INIT_LIST_HEAD(&chunk->map_extend_list);
5123 + chunk->free_size = pcpu_unit_size;
5124 + chunk->contig_hint = pcpu_unit_size;
5125 +
5126 +@@ -897,6 +894,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
5127 + return NULL;
5128 + }
5129 +
5130 ++ if (!is_atomic)
5131 ++ mutex_lock(&pcpu_alloc_mutex);
5132 ++
5133 + spin_lock_irqsave(&pcpu_lock, flags);
5134 +
5135 + /* serve reserved allocations from the reserved chunk if available */
5136 +@@ -969,12 +969,9 @@ restart:
5137 + if (is_atomic)
5138 + goto fail;
5139 +
5140 +- mutex_lock(&pcpu_alloc_mutex);
5141 +-
5142 + if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
5143 + chunk = pcpu_create_chunk();
5144 + if (!chunk) {
5145 +- mutex_unlock(&pcpu_alloc_mutex);
5146 + err = "failed to allocate new chunk";
5147 + goto fail;
5148 + }
5149 +@@ -985,7 +982,6 @@ restart:
5150 + spin_lock_irqsave(&pcpu_lock, flags);
5151 + }
5152 +
5153 +- mutex_unlock(&pcpu_alloc_mutex);
5154 + goto restart;
5155 +
5156 + area_found:
5157 +@@ -995,8 +991,6 @@ area_found:
5158 + if (!is_atomic) {
5159 + int page_start, page_end, rs, re;
5160 +
5161 +- mutex_lock(&pcpu_alloc_mutex);
5162 +-
5163 + page_start = PFN_DOWN(off);
5164 + page_end = PFN_UP(off + size);
5165 +
5166 +@@ -1007,7 +1001,6 @@ area_found:
5167 +
5168 + spin_lock_irqsave(&pcpu_lock, flags);
5169 + if (ret) {
5170 +- mutex_unlock(&pcpu_alloc_mutex);
5171 + pcpu_free_area(chunk, off, &occ_pages);
5172 + err = "failed to populate";
5173 + goto fail_unlock;
5174 +@@ -1047,6 +1040,8 @@ fail:
5175 + /* see the flag handling in pcpu_blance_workfn() */
5176 + pcpu_atomic_alloc_failed = true;
5177 + pcpu_schedule_balance_work();
5178 ++ } else {
5179 ++ mutex_unlock(&pcpu_alloc_mutex);
5180 + }
5181 + return NULL;
5182 + }
5183 +@@ -1131,6 +1126,7 @@ static void pcpu_balance_workfn(struct work_struct *work)
5184 + if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
5185 + continue;
5186 +
5187 ++ list_del_init(&chunk->map_extend_list);
5188 + list_move(&chunk->list, &to_free);
5189 + }
5190 +
5191 +@@ -1148,6 +1144,25 @@ static void pcpu_balance_workfn(struct work_struct *work)
5192 + pcpu_destroy_chunk(chunk);
5193 + }
5194 +
5195 ++ /* service chunks which requested async area map extension */
5196 ++ do {
5197 ++ int new_alloc = 0;
5198 ++
5199 ++ spin_lock_irq(&pcpu_lock);
5200 ++
5201 ++ chunk = list_first_entry_or_null(&pcpu_map_extend_chunks,
5202 ++ struct pcpu_chunk, map_extend_list);
5203 ++ if (chunk) {
5204 ++ list_del_init(&chunk->map_extend_list);
5205 ++ new_alloc = pcpu_need_to_extend(chunk, false);
5206 ++ }
5207 ++
5208 ++ spin_unlock_irq(&pcpu_lock);
5209 ++
5210 ++ if (new_alloc)
5211 ++ pcpu_extend_area_map(chunk, new_alloc);
5212 ++ } while (chunk);
5213 ++
5214 + /*
5215 + * Ensure there are certain number of free populated pages for
5216 + * atomic allocs. Fill up from the most packed so that atomic
5217 +@@ -1646,7 +1661,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
5218 + */
5219 + schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
5220 + INIT_LIST_HEAD(&schunk->list);
5221 +- INIT_WORK(&schunk->map_extend_work, pcpu_map_extend_workfn);
5222 ++ INIT_LIST_HEAD(&schunk->map_extend_list);
5223 + schunk->base_addr = base_addr;
5224 + schunk->map = smap;
5225 + schunk->map_alloc = ARRAY_SIZE(smap);
5226 +@@ -1675,7 +1690,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
5227 + if (dyn_size) {
5228 + dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
5229 + INIT_LIST_HEAD(&dchunk->list);
5230 +- INIT_WORK(&dchunk->map_extend_work, pcpu_map_extend_workfn);
5231 ++ INIT_LIST_HEAD(&dchunk->map_extend_list);
5232 + dchunk->base_addr = base_addr;
5233 + dchunk->map = dmap;
5234 + dchunk->map_alloc = ARRAY_SIZE(dmap);
5235 +diff --git a/mm/shmem.c b/mm/shmem.c
5236 +index ea5a70cfc1d8..1b11ccc0a3b7 100644
5237 +--- a/mm/shmem.c
5238 ++++ b/mm/shmem.c
5239 +@@ -2153,9 +2153,11 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
5240 + NULL);
5241 + if (error) {
5242 + /* Remove the !PageUptodate pages we added */
5243 +- shmem_undo_range(inode,
5244 +- (loff_t)start << PAGE_CACHE_SHIFT,
5245 +- (loff_t)index << PAGE_CACHE_SHIFT, true);
5246 ++ if (index > start) {
5247 ++ shmem_undo_range(inode,
5248 ++ (loff_t)start << PAGE_CACHE_SHIFT,
5249 ++ ((loff_t)index << PAGE_CACHE_SHIFT) - 1, true);
5250 ++ }
5251 + goto undone;
5252 + }
5253 +
5254 +diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
5255 +index 12e700332010..4ab6ead3d8ee 100644
5256 +--- a/net/core/flow_dissector.c
5257 ++++ b/net/core/flow_dissector.c
5258 +@@ -662,6 +662,23 @@ void make_flow_keys_digest(struct flow_keys_digest *digest,
5259 + }
5260 + EXPORT_SYMBOL(make_flow_keys_digest);
5261 +
5262 ++static struct flow_dissector flow_keys_dissector_symmetric __read_mostly;
5263 ++
5264 ++u32 __skb_get_hash_symmetric(struct sk_buff *skb)
5265 ++{
5266 ++ struct flow_keys keys;
5267 ++
5268 ++ __flow_hash_secret_init();
5269 ++
5270 ++ memset(&keys, 0, sizeof(keys));
5271 ++ __skb_flow_dissect(skb, &flow_keys_dissector_symmetric, &keys,
5272 ++ NULL, 0, 0, 0,
5273 ++ FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
5274 ++
5275 ++ return __flow_hash_from_keys(&keys, hashrnd);
5276 ++}
5277 ++EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
5278 ++
5279 + /**
5280 + * __skb_get_hash: calculate a flow hash
5281 + * @skb: sk_buff to calculate flow hash from
5282 +@@ -874,6 +891,29 @@ static const struct flow_dissector_key flow_keys_dissector_keys[] = {
5283 + },
5284 + };
5285 +
5286 ++static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = {
5287 ++ {
5288 ++ .key_id = FLOW_DISSECTOR_KEY_CONTROL,
5289 ++ .offset = offsetof(struct flow_keys, control),
5290 ++ },
5291 ++ {
5292 ++ .key_id = FLOW_DISSECTOR_KEY_BASIC,
5293 ++ .offset = offsetof(struct flow_keys, basic),
5294 ++ },
5295 ++ {
5296 ++ .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
5297 ++ .offset = offsetof(struct flow_keys, addrs.v4addrs),
5298 ++ },
5299 ++ {
5300 ++ .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
5301 ++ .offset = offsetof(struct flow_keys, addrs.v6addrs),
5302 ++ },
5303 ++ {
5304 ++ .key_id = FLOW_DISSECTOR_KEY_PORTS,
5305 ++ .offset = offsetof(struct flow_keys, ports),
5306 ++ },
5307 ++};
5308 ++
5309 + static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = {
5310 + {
5311 + .key_id = FLOW_DISSECTOR_KEY_CONTROL,
5312 +@@ -895,6 +935,9 @@ static int __init init_default_flow_dissectors(void)
5313 + skb_flow_dissector_init(&flow_keys_dissector,
5314 + flow_keys_dissector_keys,
5315 + ARRAY_SIZE(flow_keys_dissector_keys));
5316 ++ skb_flow_dissector_init(&flow_keys_dissector_symmetric,
5317 ++ flow_keys_dissector_symmetric_keys,
5318 ++ ARRAY_SIZE(flow_keys_dissector_symmetric_keys));
5319 + skb_flow_dissector_init(&flow_keys_buf_dissector,
5320 + flow_keys_buf_dissector_keys,
5321 + ARRAY_SIZE(flow_keys_buf_dissector_keys));
5322 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
5323 +index 9835d9a8a7a4..4968b5ddea69 100644
5324 +--- a/net/core/skbuff.c
5325 ++++ b/net/core/skbuff.c
5326 +@@ -2948,24 +2948,6 @@ int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
5327 + EXPORT_SYMBOL_GPL(skb_append_pagefrags);
5328 +
5329 + /**
5330 +- * skb_push_rcsum - push skb and update receive checksum
5331 +- * @skb: buffer to update
5332 +- * @len: length of data pulled
5333 +- *
5334 +- * This function performs an skb_push on the packet and updates
5335 +- * the CHECKSUM_COMPLETE checksum. It should be used on
5336 +- * receive path processing instead of skb_push unless you know
5337 +- * that the checksum difference is zero (e.g., a valid IP header)
5338 +- * or you are setting ip_summed to CHECKSUM_NONE.
5339 +- */
5340 +-static unsigned char *skb_push_rcsum(struct sk_buff *skb, unsigned len)
5341 +-{
5342 +- skb_push(skb, len);
5343 +- skb_postpush_rcsum(skb, skb->data, len);
5344 +- return skb->data;
5345 +-}
5346 +-
5347 +-/**
5348 + * skb_pull_rcsum - pull skb and update receive checksum
5349 + * @skb: buffer to update
5350 + * @len: length of data pulled
5351 +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
5352 +index 0c7e276c230e..34cf46d74554 100644
5353 +--- a/net/ipv6/ip6_fib.c
5354 ++++ b/net/ipv6/ip6_fib.c
5355 +@@ -179,6 +179,7 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
5356 + }
5357 + }
5358 +
5359 ++ free_percpu(non_pcpu_rt->rt6i_pcpu);
5360 + non_pcpu_rt->rt6i_pcpu = NULL;
5361 + }
5362 +
5363 +diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
5364 +index 6f85b6ab8e51..f7bb6829b415 100644
5365 +--- a/net/mac80211/mesh.c
5366 ++++ b/net/mac80211/mesh.c
5367 +@@ -151,19 +151,26 @@ u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
5368 + void mesh_sta_cleanup(struct sta_info *sta)
5369 + {
5370 + struct ieee80211_sub_if_data *sdata = sta->sdata;
5371 +- u32 changed;
5372 ++ u32 changed = 0;
5373 +
5374 + /*
5375 + * maybe userspace handles peer allocation and peering, but in either
5376 + * case the beacon is still generated by the kernel and we might need
5377 + * an update.
5378 + */
5379 +- changed = mesh_accept_plinks_update(sdata);
5380 ++ if (sdata->u.mesh.user_mpm &&
5381 ++ sta->mesh->plink_state == NL80211_PLINK_ESTAB)
5382 ++ changed |= mesh_plink_dec_estab_count(sdata);
5383 ++ changed |= mesh_accept_plinks_update(sdata);
5384 + if (!sdata->u.mesh.user_mpm) {
5385 + changed |= mesh_plink_deactivate(sta);
5386 + del_timer_sync(&sta->mesh->plink_timer);
5387 + }
5388 +
5389 ++ /* make sure no readers can access nexthop sta from here on */
5390 ++ mesh_path_flush_by_nexthop(sta);
5391 ++ synchronize_net();
5392 ++
5393 + if (changed)
5394 + ieee80211_mbss_info_change_notify(sdata, changed);
5395 + }
5396 +diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
5397 +index 2cafb21b422f..15b0150283b6 100644
5398 +--- a/net/mac80211/sta_info.h
5399 ++++ b/net/mac80211/sta_info.h
5400 +@@ -269,7 +269,7 @@ struct ieee80211_fast_tx {
5401 + u8 sa_offs, da_offs, pn_offs;
5402 + u8 band;
5403 + u8 hdr[30 + 2 + IEEE80211_FAST_XMIT_MAX_IV +
5404 +- sizeof(rfc1042_header)];
5405 ++ sizeof(rfc1042_header)] __aligned(2);
5406 +
5407 + struct rcu_head rcu_head;
5408 + };
5409 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
5410 +index 9cc7b512b472..a86f26d05bc2 100644
5411 +--- a/net/packet/af_packet.c
5412 ++++ b/net/packet/af_packet.c
5413 +@@ -1341,7 +1341,7 @@ static unsigned int fanout_demux_hash(struct packet_fanout *f,
5414 + struct sk_buff *skb,
5415 + unsigned int num)
5416 + {
5417 +- return reciprocal_scale(skb_get_hash(skb), num);
5418 ++ return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
5419 + }
5420 +
5421 + static unsigned int fanout_demux_lb(struct packet_fanout *f,
5422 +diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
5423 +index 32fcdecdb9e2..e384d6aefa3a 100644
5424 +--- a/net/sched/act_mirred.c
5425 ++++ b/net/sched/act_mirred.c
5426 +@@ -170,7 +170,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
5427 +
5428 + if (!(at & AT_EGRESS)) {
5429 + if (m->tcfm_ok_push)
5430 +- skb_push(skb2, skb->mac_len);
5431 ++ skb_push_rcsum(skb2, skb->mac_len);
5432 + }
5433 +
5434 + /* mirror is always swallowed */
5435 +diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
5436 +index 23608eb0ded2..7a93922457ff 100644
5437 +--- a/net/sunrpc/clnt.c
5438 ++++ b/net/sunrpc/clnt.c
5439 +@@ -442,7 +442,7 @@ out_no_rpciod:
5440 + return ERR_PTR(err);
5441 + }
5442 +
5443 +-struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
5444 ++static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
5445 + struct rpc_xprt *xprt)
5446 + {
5447 + struct rpc_clnt *clnt = NULL;
5448 +@@ -474,7 +474,6 @@ struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
5449 +
5450 + return clnt;
5451 + }
5452 +-EXPORT_SYMBOL_GPL(rpc_create_xprt);
5453 +
5454 + /**
5455 + * rpc_create - create an RPC client and transport with one call
5456 +@@ -500,6 +499,15 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
5457 + };
5458 + char servername[48];
5459 +
5460 ++ if (args->bc_xprt) {
5461 ++ WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
5462 ++ xprt = args->bc_xprt->xpt_bc_xprt;
5463 ++ if (xprt) {
5464 ++ xprt_get(xprt);
5465 ++ return rpc_create_xprt(args, xprt);
5466 ++ }
5467 ++ }
5468 ++
5469 + if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
5470 + xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS;
5471 + if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT)
5472 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
5473 +index 898a53a562b8..6579fd6e7459 100644
5474 +--- a/net/unix/af_unix.c
5475 ++++ b/net/unix/af_unix.c
5476 +@@ -315,7 +315,7 @@ static struct sock *unix_find_socket_byinode(struct inode *i)
5477 + &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
5478 + struct dentry *dentry = unix_sk(s)->path.dentry;
5479 +
5480 +- if (dentry && d_backing_inode(dentry) == i) {
5481 ++ if (dentry && d_real_inode(dentry) == i) {
5482 + sock_hold(s);
5483 + goto found;
5484 + }
5485 +@@ -911,7 +911,7 @@ static struct sock *unix_find_other(struct net *net,
5486 + err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
5487 + if (err)
5488 + goto fail;
5489 +- inode = d_backing_inode(path.dentry);
5490 ++ inode = d_real_inode(path.dentry);
5491 + err = inode_permission(inode, MAY_WRITE);
5492 + if (err)
5493 + goto put_fail;
5494 +@@ -1048,7 +1048,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
5495 + goto out_up;
5496 + }
5497 + addr->hash = UNIX_HASH_SIZE;
5498 +- hash = d_backing_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
5499 ++ hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
5500 + spin_lock(&unix_table_lock);
5501 + u->path = u_path;
5502 + list = &unix_socket_table[hash];
5503 +diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
5504 +index 5b96206e9aab..9f5cdd49ff0b 100644
5505 +--- a/scripts/mod/file2alias.c
5506 ++++ b/scripts/mod/file2alias.c
5507 +@@ -695,7 +695,7 @@ static int do_of_entry (const char *filename, void *symval, char *alias)
5508 + len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
5509 + (*type)[0] ? *type : "*");
5510 +
5511 +- if (compatible[0])
5512 ++ if ((*compatible)[0])
5513 + sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
5514 + *compatible);
5515 +
5516 +diff --git a/security/keys/key.c b/security/keys/key.c
5517 +index ab7997ded725..534808915371 100644
5518 +--- a/security/keys/key.c
5519 ++++ b/security/keys/key.c
5520 +@@ -578,7 +578,7 @@ int key_reject_and_link(struct key *key,
5521 +
5522 + mutex_unlock(&key_construction_mutex);
5523 +
5524 +- if (keyring)
5525 ++ if (keyring && link_ret == 0)
5526 + __key_link_end(keyring, &key->index_key, edit);
5527 +
5528 + /* wake up anyone waiting for a key to be constructed */
5529 +diff --git a/sound/core/control.c b/sound/core/control.c
5530 +index a85d45595d02..b4fe9b002512 100644
5531 +--- a/sound/core/control.c
5532 ++++ b/sound/core/control.c
5533 +@@ -160,6 +160,8 @@ void snd_ctl_notify(struct snd_card *card, unsigned int mask,
5534 +
5535 + if (snd_BUG_ON(!card || !id))
5536 + return;
5537 ++ if (card->shutdown)
5538 ++ return;
5539 + read_lock(&card->ctl_files_rwlock);
5540 + #if IS_ENABLED(CONFIG_SND_MIXER_OSS)
5541 + card->mixer_oss_change_count++;
5542 +diff --git a/sound/core/pcm.c b/sound/core/pcm.c
5543 +index 308c9ecf73db..8e980aa678d0 100644
5544 +--- a/sound/core/pcm.c
5545 ++++ b/sound/core/pcm.c
5546 +@@ -849,6 +849,14 @@ int snd_pcm_new_internal(struct snd_card *card, const char *id, int device,
5547 + }
5548 + EXPORT_SYMBOL(snd_pcm_new_internal);
5549 +
5550 ++static void free_chmap(struct snd_pcm_str *pstr)
5551 ++{
5552 ++ if (pstr->chmap_kctl) {
5553 ++ snd_ctl_remove(pstr->pcm->card, pstr->chmap_kctl);
5554 ++ pstr->chmap_kctl = NULL;
5555 ++ }
5556 ++}
5557 ++
5558 + static void snd_pcm_free_stream(struct snd_pcm_str * pstr)
5559 + {
5560 + struct snd_pcm_substream *substream, *substream_next;
5561 +@@ -871,6 +879,7 @@ static void snd_pcm_free_stream(struct snd_pcm_str * pstr)
5562 + kfree(setup);
5563 + }
5564 + #endif
5565 ++ free_chmap(pstr);
5566 + if (pstr->substream_count)
5567 + put_device(&pstr->dev);
5568 + }
5569 +@@ -1135,10 +1144,7 @@ static int snd_pcm_dev_disconnect(struct snd_device *device)
5570 + for (cidx = 0; cidx < 2; cidx++) {
5571 + if (!pcm->internal)
5572 + snd_unregister_device(&pcm->streams[cidx].dev);
5573 +- if (pcm->streams[cidx].chmap_kctl) {
5574 +- snd_ctl_remove(pcm->card, pcm->streams[cidx].chmap_kctl);
5575 +- pcm->streams[cidx].chmap_kctl = NULL;
5576 +- }
5577 ++ free_chmap(&pcm->streams[cidx]);
5578 + }
5579 + mutex_unlock(&pcm->open_mutex);
5580 + mutex_unlock(&register_mutex);
5581 +diff --git a/sound/core/timer.c b/sound/core/timer.c
5582 +index b982d1b089bd..7c6155f5865b 100644
5583 +--- a/sound/core/timer.c
5584 ++++ b/sound/core/timer.c
5585 +@@ -1961,6 +1961,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
5586 +
5587 + qhead = tu->qhead++;
5588 + tu->qhead %= tu->queue_size;
5589 ++ tu->qused--;
5590 + spin_unlock_irq(&tu->qlock);
5591 +
5592 + if (tu->tread) {
5593 +@@ -1974,7 +1975,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
5594 + }
5595 +
5596 + spin_lock_irq(&tu->qlock);
5597 +- tu->qused--;
5598 + if (err < 0)
5599 + goto _error;
5600 + result += unit;
5601 +diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
5602 +index a9f7a75702d2..67628616506e 100644
5603 +--- a/sound/drivers/dummy.c
5604 ++++ b/sound/drivers/dummy.c
5605 +@@ -420,6 +420,7 @@ static int dummy_hrtimer_stop(struct snd_pcm_substream *substream)
5606 +
5607 + static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm)
5608 + {
5609 ++ hrtimer_cancel(&dpcm->timer);
5610 + tasklet_kill(&dpcm->tasklet);
5611 + }
5612 +
5613 +diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
5614 +index 4667c3232b7f..74177189063c 100644
5615 +--- a/sound/pci/au88x0/au88x0_core.c
5616 ++++ b/sound/pci/au88x0/au88x0_core.c
5617 +@@ -1444,9 +1444,8 @@ static int vortex_wtdma_bufshift(vortex_t * vortex, int wtdma)
5618 + int page, p, pp, delta, i;
5619 +
5620 + page =
5621 +- (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)) &
5622 +- WT_SUBBUF_MASK)
5623 +- >> WT_SUBBUF_SHIFT;
5624 ++ (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2))
5625 ++ >> WT_SUBBUF_SHIFT) & WT_SUBBUF_MASK;
5626 + if (dma->nr_periods >= 4)
5627 + delta = (page - dma->period_real) & 3;
5628 + else {
5629 +diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
5630 +index 1cb85aeb0cea..286f5e3686a3 100644
5631 +--- a/sound/pci/echoaudio/echoaudio.c
5632 ++++ b/sound/pci/echoaudio/echoaudio.c
5633 +@@ -2200,11 +2200,11 @@ static int snd_echo_resume(struct device *dev)
5634 + u32 pipe_alloc_mask;
5635 + int err;
5636 +
5637 +- commpage_bak = kmalloc(sizeof(struct echoaudio), GFP_KERNEL);
5638 ++ commpage_bak = kmalloc(sizeof(*commpage), GFP_KERNEL);
5639 + if (commpage_bak == NULL)
5640 + return -ENOMEM;
5641 + commpage = chip->comm_page;
5642 +- memcpy(commpage_bak, commpage, sizeof(struct comm_page));
5643 ++ memcpy(commpage_bak, commpage, sizeof(*commpage));
5644 +
5645 + err = init_hw(chip, chip->pci->device, chip->pci->subsystem_device);
5646 + if (err < 0) {
5647 +diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
5648 +index 367dbf0d285e..dc2fa576d60d 100644
5649 +--- a/sound/pci/hda/hda_generic.c
5650 ++++ b/sound/pci/hda/hda_generic.c
5651 +@@ -3994,6 +3994,8 @@ static hda_nid_t set_path_power(struct hda_codec *codec, hda_nid_t nid,
5652 +
5653 + for (n = 0; n < spec->paths.used; n++) {
5654 + path = snd_array_elem(&spec->paths, n);
5655 ++ if (!path->depth)
5656 ++ continue;
5657 + if (path->path[0] == nid ||
5658 + path->path[path->depth - 1] == nid) {
5659 + bool pin_old = path->pin_enabled;
5660 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
5661 +index 1475440b70aa..8218cace8fea 100644
5662 +--- a/sound/pci/hda/hda_intel.c
5663 ++++ b/sound/pci/hda/hda_intel.c
5664 +@@ -361,9 +361,10 @@ enum {
5665 + #define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
5666 + #define IS_KBL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa171)
5667 + #define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71)
5668 ++#define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0)
5669 + #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
5670 + #define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \
5671 +- IS_KBL(pci) || IS_KBL_LP(pci)
5672 ++ IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci)
5673 +
5674 + static char *driver_short_names[] = {
5675 + [AZX_DRIVER_ICH] = "HDA Intel",
5676 +@@ -1255,8 +1256,10 @@ static int azx_free(struct azx *chip)
5677 + if (use_vga_switcheroo(hda)) {
5678 + if (chip->disabled && hda->probe_continued)
5679 + snd_hda_unlock_devices(&chip->bus);
5680 +- if (hda->vga_switcheroo_registered)
5681 ++ if (hda->vga_switcheroo_registered) {
5682 + vga_switcheroo_unregister_client(chip->pci);
5683 ++ vga_switcheroo_fini_domain_pm_ops(chip->card->dev);
5684 ++ }
5685 + }
5686 +
5687 + if (bus->chip_init) {
5688 +@@ -2213,6 +2216,9 @@ static const struct pci_device_id azx_ids[] = {
5689 + /* Kabylake-LP */
5690 + { PCI_DEVICE(0x8086, 0x9d71),
5691 + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
5692 ++ /* Kabylake-H */
5693 ++ { PCI_DEVICE(0x8086, 0xa2f0),
5694 ++ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
5695 + /* Broxton-P(Apollolake) */
5696 + { PCI_DEVICE(0x8086, 0x5a98),
5697 + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
5698 +@@ -2286,6 +2292,8 @@ static const struct pci_device_id azx_ids[] = {
5699 + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
5700 + { PCI_DEVICE(0x1002, 0x157a),
5701 + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
5702 ++ { PCI_DEVICE(0x1002, 0x15b3),
5703 ++ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
5704 + { PCI_DEVICE(0x1002, 0x793b),
5705 + .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
5706 + { PCI_DEVICE(0x1002, 0x7919),
5707 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5708 +index 0fe18ede3e85..abcb5a6a1cd9 100644
5709 +--- a/sound/pci/hda/patch_realtek.c
5710 ++++ b/sound/pci/hda/patch_realtek.c
5711 +@@ -5650,6 +5650,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5712 + SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
5713 + SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
5714 + SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
5715 ++ SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460),
5716 ++ SND_PCI_QUIRK(0x17aa, 0x5051, "Thinkpad L460", ALC292_FIXUP_TPT460),
5717 ++ SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460),
5718 + SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5719 + SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
5720 + SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
5721 +@@ -5735,7 +5738,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
5722 + {}
5723 + };
5724 + #define ALC225_STANDARD_PINS \
5725 +- {0x12, 0xb7a60130}, \
5726 + {0x21, 0x04211020}
5727 +
5728 + #define ALC256_STANDARD_PINS \
5729 +@@ -5760,10 +5762,24 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
5730 + static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5731 + SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
5732 + ALC225_STANDARD_PINS,
5733 ++ {0x12, 0xb7a60130},
5734 + {0x14, 0x901701a0}),
5735 + SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
5736 + ALC225_STANDARD_PINS,
5737 ++ {0x12, 0xb7a60130},
5738 + {0x14, 0x901701b0}),
5739 ++ SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
5740 ++ ALC225_STANDARD_PINS,
5741 ++ {0x12, 0xb7a60150},
5742 ++ {0x14, 0x901701a0}),
5743 ++ SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
5744 ++ ALC225_STANDARD_PINS,
5745 ++ {0x12, 0xb7a60150},
5746 ++ {0x14, 0x901701b0}),
5747 ++ SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
5748 ++ ALC225_STANDARD_PINS,
5749 ++ {0x12, 0xb7a60130},
5750 ++ {0x1b, 0x90170110}),
5751 + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
5752 + {0x14, 0x90170110},
5753 + {0x21, 0x02211020}),
5754 +@@ -5832,6 +5848,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5755 + {0x14, 0x90170120},
5756 + {0x21, 0x02211030}),
5757 + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5758 ++ {0x12, 0x90a60170},
5759 ++ {0x14, 0x90170120},
5760 ++ {0x21, 0x02211030}),
5761 ++ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5762 + ALC256_STANDARD_PINS),
5763 + SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
5764 + {0x12, 0x90a60130},
5765 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
5766 +index fefbf2d148ef..510df220d1b5 100644
5767 +--- a/virt/kvm/kvm_main.c
5768 ++++ b/virt/kvm/kvm_main.c
5769 +@@ -2861,7 +2861,7 @@ static long kvm_vm_ioctl(struct file *filp,
5770 + if (copy_from_user(&routing, argp, sizeof(routing)))
5771 + goto out;
5772 + r = -EINVAL;
5773 +- if (routing.nr >= KVM_MAX_IRQ_ROUTES)
5774 ++ if (routing.nr > KVM_MAX_IRQ_ROUTES)
5775 + goto out;
5776 + if (routing.flags)
5777 + goto out;