Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.6 commit in: /
Date: Wed, 27 Jul 2016 19:23:42
Message-Id: 1469647406.221739056950edff86a332751f03ac75fb232f2e.mpagano@gentoo
1 commit: 221739056950edff86a332751f03ac75fb232f2e
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Jul 27 19:23:26 2016 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Jul 27 19:23:26 2016 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=22173905
7
8 Linux patch 4.6.5
9
10 0000_README | 4 +
11 1004_linux-4.6.5.patch | 7262 ++++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 7266 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 5c841a9..67da565 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -59,6 +59,10 @@ Patch: 1003_linux-4.6.4.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.6.4
21
22 +Patch: 1004_linux-4.6.5.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.6.5
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1004_linux-4.6.5.patch b/1004_linux-4.6.5.patch
31 new file mode 100644
32 index 0000000..21cc942
33 --- /dev/null
34 +++ b/1004_linux-4.6.5.patch
35 @@ -0,0 +1,7262 @@
36 +diff --git a/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935 b/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
37 +index 6708c5e264aa..33e96f740639 100644
38 +--- a/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
39 ++++ b/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
40 +@@ -1,4 +1,4 @@
41 +-What /sys/bus/iio/devices/iio:deviceX/in_proximity_raw
42 ++What /sys/bus/iio/devices/iio:deviceX/in_proximity_input
43 + Date: March 2014
44 + KernelVersion: 3.15
45 + Contact: Matt Ranostay <mranostay@×××××.com>
46 +diff --git a/Documentation/scsi/scsi_eh.txt b/Documentation/scsi/scsi_eh.txt
47 +index 8638f61c8c9d..37eca00796ee 100644
48 +--- a/Documentation/scsi/scsi_eh.txt
49 ++++ b/Documentation/scsi/scsi_eh.txt
50 +@@ -263,19 +263,23 @@ scmd->allowed.
51 +
52 + 3. scmd recovered
53 + ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd
54 +- - shost->host_failed--
55 + - clear scmd->eh_eflags
56 + - scsi_setup_cmd_retry()
57 + - move from local eh_work_q to local eh_done_q
58 + LOCKING: none
59 ++ CONCURRENCY: at most one thread per separate eh_work_q to
60 ++ keep queue manipulation lockless
61 +
62 + 4. EH completes
63 + ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper
64 +- layer of failure.
65 ++ layer of failure. May be called concurrently but must have
66 ++ a no more than one thread per separate eh_work_q to
67 ++ manipulate the queue locklessly
68 + - scmd is removed from eh_done_q and scmd->eh_entry is cleared
69 + - if retry is necessary, scmd is requeued using
70 + scsi_queue_insert()
71 + - otherwise, scsi_finish_command() is invoked for scmd
72 ++ - zero shost->host_failed
73 + LOCKING: queue or finish function performs appropriate locking
74 +
75 +
76 +diff --git a/Makefile b/Makefile
77 +index cd374426114a..7d693a825fc7 100644
78 +--- a/Makefile
79 ++++ b/Makefile
80 +@@ -1,6 +1,6 @@
81 + VERSION = 4
82 + PATCHLEVEL = 6
83 +-SUBLEVEL = 4
84 ++SUBLEVEL = 5
85 + EXTRAVERSION =
86 + NAME = Charred Weasel
87 +
88 +diff --git a/arch/arm/boot/dts/armada-385-linksys.dtsi b/arch/arm/boot/dts/armada-385-linksys.dtsi
89 +index 8450944b28e6..22f7a13e20b4 100644
90 +--- a/arch/arm/boot/dts/armada-385-linksys.dtsi
91 ++++ b/arch/arm/boot/dts/armada-385-linksys.dtsi
92 +@@ -58,8 +58,8 @@
93 + soc {
94 + ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
95 + MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
96 +- MBUS_ID(0x09, 0x09) 0 0xf1100000 0x10000
97 +- MBUS_ID(0x09, 0x05) 0 0xf1110000 0x10000>;
98 ++ MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000
99 ++ MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>;
100 +
101 + internal-regs {
102 +
103 +diff --git a/arch/arm/boot/dts/sun5i-r8-chip.dts b/arch/arm/boot/dts/sun5i-r8-chip.dts
104 +index f6898c6b84d4..c937c85ffb45 100644
105 +--- a/arch/arm/boot/dts/sun5i-r8-chip.dts
106 ++++ b/arch/arm/boot/dts/sun5i-r8-chip.dts
107 +@@ -52,7 +52,7 @@
108 +
109 + / {
110 + model = "NextThing C.H.I.P.";
111 +- compatible = "nextthing,chip", "allwinner,sun5i-r8";
112 ++ compatible = "nextthing,chip", "allwinner,sun5i-r8", "allwinner,sun5i-a13";
113 +
114 + aliases {
115 + i2c0 = &i2c0;
116 +diff --git a/arch/arm/boot/dts/sun6i-a31s-primo81.dts b/arch/arm/boot/dts/sun6i-a31s-primo81.dts
117 +index 68b479b8772c..73c133f5e79c 100644
118 +--- a/arch/arm/boot/dts/sun6i-a31s-primo81.dts
119 ++++ b/arch/arm/boot/dts/sun6i-a31s-primo81.dts
120 +@@ -176,8 +176,6 @@
121 + };
122 +
123 + &reg_dc1sw {
124 +- regulator-min-microvolt = <3000000>;
125 +- regulator-max-microvolt = <3000000>;
126 + regulator-name = "vcc-lcd";
127 + };
128 +
129 +diff --git a/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts b/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts
130 +index 360adfb1e9ca..d6ad6196a768 100644
131 +--- a/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts
132 ++++ b/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts
133 +@@ -135,8 +135,6 @@
134 +
135 + &reg_dc1sw {
136 + regulator-name = "vcc-lcd-usb2";
137 +- regulator-min-microvolt = <3000000>;
138 +- regulator-max-microvolt = <3000000>;
139 + };
140 +
141 + &reg_dc5ldo {
142 +diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
143 +index aeddd28b3595..92fd2c8a9af0 100644
144 +--- a/arch/arm/include/asm/pgtable-2level.h
145 ++++ b/arch/arm/include/asm/pgtable-2level.h
146 +@@ -193,6 +193,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
147 +
148 + #define pmd_large(pmd) (pmd_val(pmd) & 2)
149 + #define pmd_bad(pmd) (pmd_val(pmd) & 2)
150 ++#define pmd_present(pmd) (pmd_val(pmd))
151 +
152 + #define copy_pmd(pmdpd,pmdps) \
153 + do { \
154 +diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
155 +index dc46398bc3a5..74114667d116 100644
156 +--- a/arch/arm/include/asm/pgtable-3level.h
157 ++++ b/arch/arm/include/asm/pgtable-3level.h
158 +@@ -211,6 +211,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
159 + : !!(pmd_val(pmd) & (val)))
160 + #define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val)))
161 +
162 ++#define pmd_present(pmd) (pmd_isset((pmd), L_PMD_SECT_VALID))
163 + #define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF))
164 + #define pte_special(pte) (pte_isset((pte), L_PTE_SPECIAL))
165 + static inline pte_t pte_mkspecial(pte_t pte)
166 +@@ -249,10 +250,10 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
167 + #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
168 + #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
169 +
170 +-/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */
171 ++/* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */
172 + static inline pmd_t pmd_mknotpresent(pmd_t pmd)
173 + {
174 +- return __pmd(0);
175 ++ return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID);
176 + }
177 +
178 + static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
179 +diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
180 +index 348caabb7625..d62204060cbe 100644
181 +--- a/arch/arm/include/asm/pgtable.h
182 ++++ b/arch/arm/include/asm/pgtable.h
183 +@@ -182,7 +182,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
184 + #define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
185 +
186 + #define pmd_none(pmd) (!pmd_val(pmd))
187 +-#define pmd_present(pmd) (pmd_val(pmd))
188 +
189 + static inline pte_t *pmd_page_vaddr(pmd_t pmd)
190 + {
191 +diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
192 +index dded1b763c16..72b11d91ede2 100644
193 +--- a/arch/arm/kvm/arm.c
194 ++++ b/arch/arm/kvm/arm.c
195 +@@ -267,6 +267,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
196 + kvm_timer_vcpu_terminate(vcpu);
197 + kvm_vgic_vcpu_destroy(vcpu);
198 + kvm_pmu_vcpu_destroy(vcpu);
199 ++ kvm_vcpu_uninit(vcpu);
200 + kmem_cache_free(kvm_vcpu_cache, vcpu);
201 + }
202 +
203 +diff --git a/arch/arm/mach-imx/mach-imx6ul.c b/arch/arm/mach-imx/mach-imx6ul.c
204 +index a38b16b69923..b56de4b8cdf2 100644
205 +--- a/arch/arm/mach-imx/mach-imx6ul.c
206 ++++ b/arch/arm/mach-imx/mach-imx6ul.c
207 +@@ -46,7 +46,7 @@ static int ksz8081_phy_fixup(struct phy_device *dev)
208 + static void __init imx6ul_enet_phy_init(void)
209 + {
210 + if (IS_BUILTIN(CONFIG_PHYLIB))
211 +- phy_register_fixup_for_uid(PHY_ID_KSZ8081, 0xffffffff,
212 ++ phy_register_fixup_for_uid(PHY_ID_KSZ8081, MICREL_PHY_ID_MASK,
213 + ksz8081_phy_fixup);
214 + }
215 +
216 +diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
217 +index 7e989d61159c..474abff7e855 100644
218 +--- a/arch/arm/mach-mvebu/coherency.c
219 ++++ b/arch/arm/mach-mvebu/coherency.c
220 +@@ -162,22 +162,16 @@ exit:
221 + }
222 +
223 + /*
224 +- * This ioremap hook is used on Armada 375/38x to ensure that PCIe
225 +- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
226 +- * is needed as a workaround for a deadlock issue between the PCIe
227 +- * interface and the cache controller.
228 ++ * This ioremap hook is used on Armada 375/38x to ensure that all MMIO
229 ++ * areas are mapped as MT_UNCACHED instead of MT_DEVICE. This is
230 ++ * needed for the HW I/O coherency mechanism to work properly without
231 ++ * deadlock.
232 + */
233 + static void __iomem *
234 +-armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
235 +- unsigned int mtype, void *caller)
236 ++armada_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
237 ++ unsigned int mtype, void *caller)
238 + {
239 +- struct resource pcie_mem;
240 +-
241 +- mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
242 +-
243 +- if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
244 +- mtype = MT_UNCACHED;
245 +-
246 ++ mtype = MT_UNCACHED;
247 + return __arm_ioremap_caller(phys_addr, size, mtype, caller);
248 + }
249 +
250 +@@ -186,7 +180,7 @@ static void __init armada_375_380_coherency_init(struct device_node *np)
251 + struct device_node *cache_dn;
252 +
253 + coherency_cpu_base = of_iomap(np, 0);
254 +- arch_ioremap_caller = armada_pcie_wa_ioremap_caller;
255 ++ arch_ioremap_caller = armada_wa_ioremap_caller;
256 +
257 + /*
258 + * We should switch the PL310 to I/O coherency mode only if
259 +diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
260 +index a307eb6e7fa8..7f94755089e2 100644
261 +--- a/arch/arm64/include/asm/ptrace.h
262 ++++ b/arch/arm64/include/asm/ptrace.h
263 +@@ -117,6 +117,8 @@ struct pt_regs {
264 + };
265 + u64 orig_x0;
266 + u64 syscallno;
267 ++ u64 orig_addr_limit;
268 ++ u64 unused; // maintain 16 byte alignment
269 + };
270 +
271 + #define arch_has_single_step() (1)
272 +diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
273 +index 3ae6b310ac9b..1abcd8829f3b 100644
274 +--- a/arch/arm64/kernel/asm-offsets.c
275 ++++ b/arch/arm64/kernel/asm-offsets.c
276 +@@ -59,6 +59,7 @@ int main(void)
277 + DEFINE(S_PC, offsetof(struct pt_regs, pc));
278 + DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0));
279 + DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
280 ++ DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit));
281 + DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
282 + BLANK();
283 + DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter));
284 +diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
285 +index 12e8d2bcb3f9..6c3b7345a6c4 100644
286 +--- a/arch/arm64/kernel/entry.S
287 ++++ b/arch/arm64/kernel/entry.S
288 +@@ -28,6 +28,7 @@
289 + #include <asm/errno.h>
290 + #include <asm/esr.h>
291 + #include <asm/irq.h>
292 ++#include <asm/memory.h>
293 + #include <asm/thread_info.h>
294 + #include <asm/unistd.h>
295 +
296 +@@ -97,7 +98,14 @@
297 + mov x29, xzr // fp pointed to user-space
298 + .else
299 + add x21, sp, #S_FRAME_SIZE
300 +- .endif
301 ++ get_thread_info tsk
302 ++ /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
303 ++ ldr x20, [tsk, #TI_ADDR_LIMIT]
304 ++ str x20, [sp, #S_ORIG_ADDR_LIMIT]
305 ++ mov x20, #TASK_SIZE_64
306 ++ str x20, [tsk, #TI_ADDR_LIMIT]
307 ++ ALTERNATIVE(nop, SET_PSTATE_UAO(0), ARM64_HAS_UAO, CONFIG_ARM64_UAO)
308 ++ .endif /* \el == 0 */
309 + mrs x22, elr_el1
310 + mrs x23, spsr_el1
311 + stp lr, x21, [sp, #S_LR]
312 +@@ -128,6 +136,14 @@
313 + .endm
314 +
315 + .macro kernel_exit, el
316 ++ .if \el != 0
317 ++ /* Restore the task's original addr_limit. */
318 ++ ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
319 ++ str x20, [tsk, #TI_ADDR_LIMIT]
320 ++
321 ++ /* No need to restore UAO, it will be restored from SPSR_EL1 */
322 ++ .endif
323 ++
324 + ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
325 + .if \el == 0
326 + ct_user_enter
327 +@@ -406,7 +422,6 @@ el1_irq:
328 + bl trace_hardirqs_off
329 + #endif
330 +
331 +- get_thread_info tsk
332 + irq_handler
333 +
334 + #ifdef CONFIG_PREEMPT
335 +diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
336 +index c5392081b49b..58651a9dfcf8 100644
337 +--- a/arch/arm64/kernel/traps.c
338 ++++ b/arch/arm64/kernel/traps.c
339 +@@ -64,8 +64,7 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
340 +
341 + /*
342 + * We need to switch to kernel mode so that we can use __get_user
343 +- * to safely read from kernel space. Note that we now dump the
344 +- * code first, just in case the backtrace kills us.
345 ++ * to safely read from kernel space.
346 + */
347 + fs = get_fs();
348 + set_fs(KERNEL_DS);
349 +@@ -111,21 +110,12 @@ static void dump_backtrace_entry(unsigned long where)
350 + print_ip_sym(where);
351 + }
352 +
353 +-static void dump_instr(const char *lvl, struct pt_regs *regs)
354 ++static void __dump_instr(const char *lvl, struct pt_regs *regs)
355 + {
356 + unsigned long addr = instruction_pointer(regs);
357 +- mm_segment_t fs;
358 + char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
359 + int i;
360 +
361 +- /*
362 +- * We need to switch to kernel mode so that we can use __get_user
363 +- * to safely read from kernel space. Note that we now dump the
364 +- * code first, just in case the backtrace kills us.
365 +- */
366 +- fs = get_fs();
367 +- set_fs(KERNEL_DS);
368 +-
369 + for (i = -4; i < 1; i++) {
370 + unsigned int val, bad;
371 +
372 +@@ -139,8 +129,18 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
373 + }
374 + }
375 + printk("%sCode: %s\n", lvl, str);
376 ++}
377 +
378 +- set_fs(fs);
379 ++static void dump_instr(const char *lvl, struct pt_regs *regs)
380 ++{
381 ++ if (!user_mode(regs)) {
382 ++ mm_segment_t fs = get_fs();
383 ++ set_fs(KERNEL_DS);
384 ++ __dump_instr(lvl, regs);
385 ++ set_fs(fs);
386 ++ } else {
387 ++ __dump_instr(lvl, regs);
388 ++ }
389 + }
390 +
391 + static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
392 +diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
393 +index 10b79e9e87d1..e22849a90557 100644
394 +--- a/arch/arm64/mm/fault.c
395 ++++ b/arch/arm64/mm/fault.c
396 +@@ -284,7 +284,8 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
397 + }
398 +
399 + if (permission_fault(esr) && (addr < USER_DS)) {
400 +- if (get_fs() == KERNEL_DS)
401 ++ /* regs->orig_addr_limit may be 0 if we entered from EL0 */
402 ++ if (regs->orig_addr_limit == KERNEL_DS)
403 + die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
404 +
405 + if (!search_exception_tables(regs->pc))
406 +diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
407 +index dbd12ea8ce68..43a76b07eb32 100644
408 +--- a/arch/arm64/mm/flush.c
409 ++++ b/arch/arm64/mm/flush.c
410 +@@ -71,10 +71,6 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
411 + {
412 + struct page *page = pte_page(pte);
413 +
414 +- /* no flushing needed for anonymous pages */
415 +- if (!page_mapping(page))
416 +- return;
417 +-
418 + if (!test_and_set_bit(PG_dcache_clean, &page->flags))
419 + sync_icache_aliases(page_address(page),
420 + PAGE_SIZE << compound_order(page));
421 +diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
422 +index 942b8f6bf35b..1907ab379fad 100644
423 +--- a/arch/mips/include/asm/kvm_host.h
424 ++++ b/arch/mips/include/asm/kvm_host.h
425 +@@ -336,6 +336,7 @@ struct kvm_mips_tlb {
426 + #define KVM_MIPS_GUEST_TLB_SIZE 64
427 + struct kvm_vcpu_arch {
428 + void *host_ebase, *guest_ebase;
429 ++ int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
430 + unsigned long host_stack;
431 + unsigned long host_gp;
432 +
433 +diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h
434 +index 4ab4bdfad703..2143884709e4 100644
435 +--- a/arch/mips/kvm/interrupt.h
436 ++++ b/arch/mips/kvm/interrupt.h
437 +@@ -28,6 +28,7 @@
438 + #define MIPS_EXC_MAX 12
439 + /* XXXSL More to follow */
440 +
441 ++extern char __kvm_mips_vcpu_run_end[];
442 + extern char mips32_exception[], mips32_exceptionEnd[];
443 + extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
444 +
445 +diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
446 +index 81687ab1b523..fc93a08b6954 100644
447 +--- a/arch/mips/kvm/locore.S
448 ++++ b/arch/mips/kvm/locore.S
449 +@@ -227,6 +227,7 @@ FEXPORT(__kvm_mips_load_k0k1)
450 +
451 + /* Jump to guest */
452 + eret
453 ++EXPORT(__kvm_mips_vcpu_run_end)
454 +
455 + VECTOR(MIPSX(exception), unknown)
456 + /* Find out what mode we came from and jump to the proper handler. */
457 +diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
458 +index 70ef1a43c114..e223cb3d9e81 100644
459 +--- a/arch/mips/kvm/mips.c
460 ++++ b/arch/mips/kvm/mips.c
461 +@@ -314,6 +314,15 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
462 + memcpy(gebase + offset, mips32_GuestException,
463 + mips32_GuestExceptionEnd - mips32_GuestException);
464 +
465 ++#ifdef MODULE
466 ++ offset += mips32_GuestExceptionEnd - mips32_GuestException;
467 ++ memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
468 ++ __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
469 ++ vcpu->arch.vcpu_run = gebase + offset;
470 ++#else
471 ++ vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
472 ++#endif
473 ++
474 + /* Invalidate the icache for these ranges */
475 + local_flush_icache_range((unsigned long)gebase,
476 + (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
477 +@@ -403,7 +412,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
478 + /* Disable hardware page table walking while in guest */
479 + htw_stop();
480 +
481 +- r = __kvm_mips_vcpu_run(run, vcpu);
482 ++ r = vcpu->arch.vcpu_run(run, vcpu);
483 +
484 + /* Re-enable HTW before enabling interrupts */
485 + htw_start();
486 +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
487 +index b8500b4ac7fe..bec85055fc42 100644
488 +--- a/arch/powerpc/kernel/process.c
489 ++++ b/arch/powerpc/kernel/process.c
490 +@@ -1501,6 +1501,16 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
491 + current->thread.regs = regs - 1;
492 + }
493 +
494 ++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
495 ++ /*
496 ++ * Clear any transactional state, we're exec()ing. The cause is
497 ++ * not important as there will never be a recheckpoint so it's not
498 ++ * user visible.
499 ++ */
500 ++ if (MSR_TM_SUSPENDED(mfmsr()))
501 ++ tm_reclaim_current(0);
502 ++#endif
503 ++
504 + memset(regs->gpr, 0, sizeof(regs->gpr));
505 + regs->ctr = 0;
506 + regs->link = 0;
507 +diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
508 +index ccd2037c797f..6ee4b72cda42 100644
509 +--- a/arch/powerpc/kernel/prom_init.c
510 ++++ b/arch/powerpc/kernel/prom_init.c
511 +@@ -719,7 +719,7 @@ unsigned char ibm_architecture_vec[] = {
512 + * must match by the macro below. Update the definition if
513 + * the structure layout changes.
514 + */
515 +-#define IBM_ARCH_VEC_NRCORES_OFFSET 125
516 ++#define IBM_ARCH_VEC_NRCORES_OFFSET 133
517 + W(NR_CPUS), /* number of cores supported */
518 + 0,
519 + 0,
520 +diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
521 +index bd98ce2be17b..3e8865b187de 100644
522 +--- a/arch/powerpc/platforms/pseries/iommu.c
523 ++++ b/arch/powerpc/platforms/pseries/iommu.c
524 +@@ -912,7 +912,8 @@ machine_arch_initcall(pseries, find_existing_ddw_windows);
525 + static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
526 + struct ddw_query_response *query)
527 + {
528 +- struct eeh_dev *edev;
529 ++ struct device_node *dn;
530 ++ struct pci_dn *pdn;
531 + u32 cfg_addr;
532 + u64 buid;
533 + int ret;
534 +@@ -923,11 +924,10 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
535 + * Retrieve them from the pci device, not the node with the
536 + * dma-window property
537 + */
538 +- edev = pci_dev_to_eeh_dev(dev);
539 +- cfg_addr = edev->config_addr;
540 +- if (edev->pe_config_addr)
541 +- cfg_addr = edev->pe_config_addr;
542 +- buid = edev->phb->buid;
543 ++ dn = pci_device_to_OF_node(dev);
544 ++ pdn = PCI_DN(dn);
545 ++ buid = pdn->phb->buid;
546 ++ cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
547 +
548 + ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
549 + cfg_addr, BUID_HI(buid), BUID_LO(buid));
550 +@@ -941,7 +941,8 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
551 + struct ddw_create_response *create, int page_shift,
552 + int window_shift)
553 + {
554 +- struct eeh_dev *edev;
555 ++ struct device_node *dn;
556 ++ struct pci_dn *pdn;
557 + u32 cfg_addr;
558 + u64 buid;
559 + int ret;
560 +@@ -952,11 +953,10 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
561 + * Retrieve them from the pci device, not the node with the
562 + * dma-window property
563 + */
564 +- edev = pci_dev_to_eeh_dev(dev);
565 +- cfg_addr = edev->config_addr;
566 +- if (edev->pe_config_addr)
567 +- cfg_addr = edev->pe_config_addr;
568 +- buid = edev->phb->buid;
569 ++ dn = pci_device_to_OF_node(dev);
570 ++ pdn = PCI_DN(dn);
571 ++ buid = pdn->phb->buid;
572 ++ cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
573 +
574 + do {
575 + /* extra outputs are LIOBN and dma-addr (hi, lo) */
576 +diff --git a/arch/s390/include/asm/fpu/api.h b/arch/s390/include/asm/fpu/api.h
577 +index 5e04f3cbd320..8ae236b0f80b 100644
578 +--- a/arch/s390/include/asm/fpu/api.h
579 ++++ b/arch/s390/include/asm/fpu/api.h
580 +@@ -22,7 +22,7 @@ static inline int test_fp_ctl(u32 fpc)
581 + " la %0,0\n"
582 + "1:\n"
583 + EX_TABLE(0b,1b)
584 +- : "=d" (rc), "=d" (orig_fpc)
585 ++ : "=d" (rc), "=&d" (orig_fpc)
586 + : "d" (fpc), "0" (-EINVAL));
587 + return rc;
588 + }
589 +diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
590 +index f20abdb5630a..d14069d4b88d 100644
591 +--- a/arch/s390/kernel/ipl.c
592 ++++ b/arch/s390/kernel/ipl.c
593 +@@ -2064,12 +2064,5 @@ void s390_reset_system(void)
594 + S390_lowcore.program_new_psw.addr =
595 + (unsigned long) s390_base_pgm_handler;
596 +
597 +- /*
598 +- * Clear subchannel ID and number to signal new kernel that no CCW or
599 +- * SCSI IPL has been done (for kexec and kdump)
600 +- */
601 +- S390_lowcore.subchannel_id = 0;
602 +- S390_lowcore.subchannel_nr = 0;
603 +-
604 + do_reset_calls();
605 + }
606 +diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
607 +index 4324b87f9398..9f0ce0e6eeb4 100644
608 +--- a/arch/s390/mm/pgtable.c
609 ++++ b/arch/s390/mm/pgtable.c
610 +@@ -437,7 +437,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
611 + pgste = pgste_get_lock(ptep);
612 + pgstev = pgste_val(pgste);
613 + pte = *ptep;
614 +- if (pte_swap(pte) &&
615 ++ if (!reset && pte_swap(pte) &&
616 + ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
617 + (pgstev & _PGSTE_GPS_ZERO))) {
618 + ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
619 +diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
620 +index b1ef9e489084..b67f9e8b93a6 100644
621 +--- a/arch/x86/boot/Makefile
622 ++++ b/arch/x86/boot/Makefile
623 +@@ -171,6 +171,9 @@ isoimage: $(obj)/bzImage
624 + for i in lib lib64 share end ; do \
625 + if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
626 + cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
627 ++ if [ -f /usr/$$i/syslinux/ldlinux.c32 ]; then \
628 ++ cp /usr/$$i/syslinux/ldlinux.c32 $(obj)/isoimage ; \
629 ++ fi ; \
630 + break ; \
631 + fi ; \
632 + if [ $$i = end ] ; then exit 1 ; fi ; \
633 +diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
634 +index 041e442a3e28..7eb806ca6b03 100644
635 +--- a/arch/x86/events/core.c
636 ++++ b/arch/x86/events/core.c
637 +@@ -2313,7 +2313,7 @@ void
638 + perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
639 + {
640 + struct stack_frame frame;
641 +- const void __user *fp;
642 ++ const unsigned long __user *fp;
643 +
644 + if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
645 + /* TODO: We don't support guest os callchain now */
646 +@@ -2326,7 +2326,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
647 + if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
648 + return;
649 +
650 +- fp = (void __user *)regs->bp;
651 ++ fp = (unsigned long __user *)regs->bp;
652 +
653 + perf_callchain_store(entry, regs->ip);
654 +
655 +@@ -2339,16 +2339,17 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
656 + pagefault_disable();
657 + while (entry->nr < PERF_MAX_STACK_DEPTH) {
658 + unsigned long bytes;
659 ++
660 + frame.next_frame = NULL;
661 + frame.return_address = 0;
662 +
663 +- if (!access_ok(VERIFY_READ, fp, 16))
664 ++ if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2))
665 + break;
666 +
667 +- bytes = __copy_from_user_nmi(&frame.next_frame, fp, 8);
668 ++ bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
669 + if (bytes != 0)
670 + break;
671 +- bytes = __copy_from_user_nmi(&frame.return_address, fp+8, 8);
672 ++ bytes = __copy_from_user_nmi(&frame.return_address, fp + 1, sizeof(*fp));
673 + if (bytes != 0)
674 + break;
675 +
676 +diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
677 +index 1705c9d75e44..78ee9ebe38dd 100644
678 +--- a/arch/x86/events/intel/rapl.c
679 ++++ b/arch/x86/events/intel/rapl.c
680 +@@ -665,7 +665,7 @@ static void __init cleanup_rapl_pmus(void)
681 + int i;
682 +
683 + for (i = 0; i < rapl_pmus->maxpkg; i++)
684 +- kfree(rapl_pmus->pmus + i);
685 ++ kfree(rapl_pmus->pmus[i]);
686 + kfree(rapl_pmus);
687 + }
688 +
689 +diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
690 +index 7a79ee2778b3..33c709ca2666 100644
691 +--- a/arch/x86/include/asm/msr.h
692 ++++ b/arch/x86/include/asm/msr.h
693 +@@ -112,7 +112,7 @@ static inline void native_write_msr(unsigned int msr,
694 + unsigned low, unsigned high)
695 + {
696 + asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
697 +- if (msr_tracepoint_active(__tracepoint_read_msr))
698 ++ if (msr_tracepoint_active(__tracepoint_write_msr))
699 + do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
700 + }
701 +
702 +@@ -131,7 +131,7 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
703 + : "c" (msr), "0" (low), "d" (high),
704 + [fault] "i" (-EIO)
705 + : "memory");
706 +- if (msr_tracepoint_active(__tracepoint_read_msr))
707 ++ if (msr_tracepoint_active(__tracepoint_write_msr))
708 + do_trace_write_msr(msr, ((u64)high << 32 | low), err);
709 + return err;
710 + }
711 +diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
712 +index a147e676fc7b..e991d5c8bb3a 100644
713 +--- a/arch/x86/kernel/amd_nb.c
714 ++++ b/arch/x86/kernel/amd_nb.c
715 +@@ -71,8 +71,8 @@ int amd_cache_northbridges(void)
716 + while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
717 + i++;
718 +
719 +- if (i == 0)
720 +- return 0;
721 ++ if (!i)
722 ++ return -ENODEV;
723 +
724 + nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
725 + if (!nb)
726 +diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
727 +index ae703acb85c1..44bcd5779ec1 100644
728 +--- a/arch/x86/kernel/kprobes/core.c
729 ++++ b/arch/x86/kernel/kprobes/core.c
730 +@@ -960,7 +960,19 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
731 + * normal page fault.
732 + */
733 + regs->ip = (unsigned long)cur->addr;
734 ++ /*
735 ++ * Trap flag (TF) has been set here because this fault
736 ++ * happened where the single stepping will be done.
737 ++ * So clear it by resetting the current kprobe:
738 ++ */
739 ++ regs->flags &= ~X86_EFLAGS_TF;
740 ++
741 ++ /*
742 ++ * If the TF flag was set before the kprobe hit,
743 ++ * don't touch it:
744 ++ */
745 + regs->flags |= kcb->kprobe_old_flags;
746 ++
747 + if (kcb->kprobe_status == KPROBE_REENTER)
748 + restore_previous_kprobe(kcb);
749 + else
750 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
751 +index faf52bac1416..c4217a23a98d 100644
752 +--- a/arch/x86/kvm/vmx.c
753 ++++ b/arch/x86/kvm/vmx.c
754 +@@ -2072,7 +2072,8 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
755 + unsigned int dest;
756 +
757 + if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
758 +- !irq_remapping_cap(IRQ_POSTING_CAP))
759 ++ !irq_remapping_cap(IRQ_POSTING_CAP) ||
760 ++ !kvm_vcpu_apicv_active(vcpu))
761 + return;
762 +
763 + do {
764 +@@ -2180,7 +2181,8 @@ static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
765 + struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
766 +
767 + if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
768 +- !irq_remapping_cap(IRQ_POSTING_CAP))
769 ++ !irq_remapping_cap(IRQ_POSTING_CAP) ||
770 ++ !kvm_vcpu_apicv_active(vcpu))
771 + return;
772 +
773 + /* Set SN when the vCPU is preempted */
774 +@@ -6657,7 +6659,13 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
775 +
776 + /* Checks for #GP/#SS exceptions. */
777 + exn = false;
778 +- if (is_protmode(vcpu)) {
779 ++ if (is_long_mode(vcpu)) {
780 ++ /* Long mode: #GP(0)/#SS(0) if the memory address is in a
781 ++ * non-canonical form. This is the only check on the memory
782 ++ * destination for long mode!
783 ++ */
784 ++ exn = is_noncanonical_address(*ret);
785 ++ } else if (is_protmode(vcpu)) {
786 + /* Protected mode: apply checks for segment validity in the
787 + * following order:
788 + * - segment type check (#GP(0) may be thrown)
789 +@@ -6674,17 +6682,10 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
790 + * execute-only code segment
791 + */
792 + exn = ((s.type & 0xa) == 8);
793 +- }
794 +- if (exn) {
795 +- kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
796 +- return 1;
797 +- }
798 +- if (is_long_mode(vcpu)) {
799 +- /* Long mode: #GP(0)/#SS(0) if the memory address is in a
800 +- * non-canonical form. This is an only check for long mode.
801 +- */
802 +- exn = is_noncanonical_address(*ret);
803 +- } else if (is_protmode(vcpu)) {
804 ++ if (exn) {
805 ++ kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
806 ++ return 1;
807 ++ }
808 + /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
809 + */
810 + exn = (s.unusable != 0);
811 +@@ -10702,7 +10703,8 @@ static int vmx_pre_block(struct kvm_vcpu *vcpu)
812 + struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
813 +
814 + if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
815 +- !irq_remapping_cap(IRQ_POSTING_CAP))
816 ++ !irq_remapping_cap(IRQ_POSTING_CAP) ||
817 ++ !kvm_vcpu_apicv_active(vcpu))
818 + return 0;
819 +
820 + vcpu->pre_pcpu = vcpu->cpu;
821 +@@ -10768,7 +10770,8 @@ static void vmx_post_block(struct kvm_vcpu *vcpu)
822 + unsigned long flags;
823 +
824 + if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
825 +- !irq_remapping_cap(IRQ_POSTING_CAP))
826 ++ !irq_remapping_cap(IRQ_POSTING_CAP) ||
827 ++ !kvm_vcpu_apicv_active(vcpu))
828 + return;
829 +
830 + do {
831 +@@ -10821,7 +10824,8 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
832 + int idx, ret = -EINVAL;
833 +
834 + if (!kvm_arch_has_assigned_device(kvm) ||
835 +- !irq_remapping_cap(IRQ_POSTING_CAP))
836 ++ !irq_remapping_cap(IRQ_POSTING_CAP) ||
837 ++ !kvm_vcpu_apicv_active(kvm->vcpus[0]))
838 + return 0;
839 +
840 + idx = srcu_read_lock(&kvm->irq_srcu);
841 +diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
842 +index ead8dc0d084e..8ba426635b1b 100644
843 +--- a/crypto/rsa-pkcs1pad.c
844 ++++ b/crypto/rsa-pkcs1pad.c
845 +@@ -102,10 +102,10 @@ struct pkcs1pad_inst_ctx {
846 + };
847 +
848 + struct pkcs1pad_request {
849 +- struct akcipher_request child_req;
850 +-
851 + struct scatterlist in_sg[3], out_sg[2];
852 + uint8_t *in_buf, *out_buf;
853 ++
854 ++ struct akcipher_request child_req;
855 + };
856 +
857 + static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key,
858 +diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
859 +index 961acc788f44..91a9e6af2ec4 100644
860 +--- a/drivers/ata/libata-eh.c
861 ++++ b/drivers/ata/libata-eh.c
862 +@@ -606,7 +606,7 @@ void ata_scsi_error(struct Scsi_Host *host)
863 + ata_scsi_port_error_handler(host, ap);
864 +
865 + /* finish or retry handled scmd's and clean up */
866 +- WARN_ON(host->host_failed || !list_empty(&eh_work_q));
867 ++ WARN_ON(!list_empty(&eh_work_q));
868 +
869 + DPRINTK("EXIT\n");
870 + }
871 +diff --git a/drivers/base/module.c b/drivers/base/module.c
872 +index db930d3ee312..2a215780eda2 100644
873 +--- a/drivers/base/module.c
874 ++++ b/drivers/base/module.c
875 +@@ -24,10 +24,12 @@ static char *make_driver_name(struct device_driver *drv)
876 +
877 + static void module_create_drivers_dir(struct module_kobject *mk)
878 + {
879 +- if (!mk || mk->drivers_dir)
880 +- return;
881 ++ static DEFINE_MUTEX(drivers_dir_mutex);
882 +
883 +- mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
884 ++ mutex_lock(&drivers_dir_mutex);
885 ++ if (mk && !mk->drivers_dir)
886 ++ mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
887 ++ mutex_unlock(&drivers_dir_mutex);
888 + }
889 +
890 + void module_add_driver(struct module *mod, struct device_driver *drv)
891 +diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
892 +index 94fb407d8561..44b1bd6baa38 100644
893 +--- a/drivers/char/ipmi/ipmi_msghandler.c
894 ++++ b/drivers/char/ipmi/ipmi_msghandler.c
895 +@@ -3820,6 +3820,7 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
896 + while (!list_empty(&intf->waiting_rcv_msgs)) {
897 + smi_msg = list_entry(intf->waiting_rcv_msgs.next,
898 + struct ipmi_smi_msg, link);
899 ++ list_del(&smi_msg->link);
900 + if (!run_to_completion)
901 + spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
902 + flags);
903 +@@ -3829,11 +3830,14 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
904 + if (rv > 0) {
905 + /*
906 + * To preserve message order, quit if we
907 +- * can't handle a message.
908 ++ * can't handle a message. Add the message
909 ++ * back at the head, this is safe because this
910 ++ * tasklet is the only thing that pulls the
911 ++ * messages.
912 + */
913 ++ list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
914 + break;
915 + } else {
916 +- list_del(&smi_msg->link);
917 + if (rv == 0)
918 + /* Message handled */
919 + ipmi_free_smi_msg(smi_msg);
920 +diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
921 +index 29c7c53d2845..92561c87f349 100644
922 +--- a/drivers/crypto/qat/qat_common/Makefile
923 ++++ b/drivers/crypto/qat/qat_common/Makefile
924 +@@ -2,6 +2,7 @@ $(obj)/qat_rsapubkey-asn1.o: $(obj)/qat_rsapubkey-asn1.c \
925 + $(obj)/qat_rsapubkey-asn1.h
926 + $(obj)/qat_rsaprivkey-asn1.o: $(obj)/qat_rsaprivkey-asn1.c \
927 + $(obj)/qat_rsaprivkey-asn1.h
928 ++$(obj)/qat_asym_algs.o: $(obj)/qat_rsapubkey-asn1.h $(obj)/qat_rsaprivkey-asn1.h
929 +
930 + clean-files += qat_rsapubkey-asn1.c qat_rsapubkey-asn1.h
931 + clean-files += qat_rsaprivkey-asn1.c qat_rsaprivkey-asn1.h
932 +diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
933 +index 1472f48c8ac6..ff51b51d2fd4 100644
934 +--- a/drivers/edac/edac_mc.c
935 ++++ b/drivers/edac/edac_mc.c
936 +@@ -565,7 +565,8 @@ void edac_mc_reset_delay_period(unsigned long value)
937 + list_for_each(item, &mc_devices) {
938 + mci = list_entry(item, struct mem_ctl_info, link);
939 +
940 +- edac_mod_work(&mci->work, value);
941 ++ if (mci->op_state == OP_RUNNING_POLL)
942 ++ edac_mod_work(&mci->work, value);
943 + }
944 + mutex_unlock(&mem_ctls_mutex);
945 + }
946 +diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
947 +index 8bf745d2da7e..b274fa2ffdec 100644
948 +--- a/drivers/edac/sb_edac.c
949 ++++ b/drivers/edac/sb_edac.c
950 +@@ -239,8 +239,11 @@ static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
951 + { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
952 + };
953 +
954 +-#define RIR_RNK_TGT(reg) GET_BITFIELD(reg, 16, 19)
955 +-#define RIR_OFFSET(reg) GET_BITFIELD(reg, 2, 14)
956 ++#define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \
957 ++ GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19))
958 ++
959 ++#define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \
960 ++ GET_BITFIELD(reg, 2, 15) : GET_BITFIELD(reg, 2, 14))
961 +
962 + /* Device 16, functions 2-7 */
963 +
964 +@@ -1916,14 +1919,14 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
965 + pci_read_config_dword(pvt->pci_tad[i],
966 + rir_offset[j][k],
967 + &reg);
968 +- tmp_mb = RIR_OFFSET(reg) << 6;
969 ++ tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6;
970 +
971 + gb = div_u64_rem(tmp_mb, 1024, &mb);
972 + edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
973 + i, j, k,
974 + gb, (mb*1000)/1024,
975 + ((u64)tmp_mb) << 20L,
976 +- (u32)RIR_RNK_TGT(reg),
977 ++ (u32)RIR_RNK_TGT(pvt->info.type, reg),
978 + reg);
979 + }
980 + }
981 +@@ -2256,7 +2259,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
982 + pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
983 + rir_offset[n_rir][idx],
984 + &reg);
985 +- *rank = RIR_RNK_TGT(reg);
986 ++ *rank = RIR_RNK_TGT(pvt->info.type, reg);
987 +
988 + edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
989 + n_rir,
990 +diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
991 +index 8b3226dca1d9..caff46c0e214 100644
992 +--- a/drivers/extcon/extcon-palmas.c
993 ++++ b/drivers/extcon/extcon-palmas.c
994 +@@ -360,6 +360,8 @@ static int palmas_usb_probe(struct platform_device *pdev)
995 +
996 + palmas_enable_irq(palmas_usb);
997 + /* perform initial detection */
998 ++ if (palmas_usb->enable_gpio_vbus_detection)
999 ++ palmas_vbus_irq_handler(palmas_usb->gpio_vbus_irq, palmas_usb);
1000 + palmas_gpio_id_detect(&palmas_usb->wq_detectid.work);
1001 + device_set_wakeup_capable(&pdev->dev, true);
1002 + return 0;
1003 +diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
1004 +index e85e7539cf5d..eb43ae4835c1 100644
1005 +--- a/drivers/gpio/gpio-sch.c
1006 ++++ b/drivers/gpio/gpio-sch.c
1007 +@@ -61,9 +61,8 @@ static unsigned sch_gpio_bit(struct sch_gpio *sch, unsigned gpio)
1008 + return gpio % 8;
1009 + }
1010 +
1011 +-static int sch_gpio_reg_get(struct gpio_chip *gc, unsigned gpio, unsigned reg)
1012 ++static int sch_gpio_reg_get(struct sch_gpio *sch, unsigned gpio, unsigned reg)
1013 + {
1014 +- struct sch_gpio *sch = gpiochip_get_data(gc);
1015 + unsigned short offset, bit;
1016 + u8 reg_val;
1017 +
1018 +@@ -75,10 +74,9 @@ static int sch_gpio_reg_get(struct gpio_chip *gc, unsigned gpio, unsigned reg)
1019 + return reg_val;
1020 + }
1021 +
1022 +-static void sch_gpio_reg_set(struct gpio_chip *gc, unsigned gpio, unsigned reg,
1023 ++static void sch_gpio_reg_set(struct sch_gpio *sch, unsigned gpio, unsigned reg,
1024 + int val)
1025 + {
1026 +- struct sch_gpio *sch = gpiochip_get_data(gc);
1027 + unsigned short offset, bit;
1028 + u8 reg_val;
1029 +
1030 +@@ -98,14 +96,15 @@ static int sch_gpio_direction_in(struct gpio_chip *gc, unsigned gpio_num)
1031 + struct sch_gpio *sch = gpiochip_get_data(gc);
1032 +
1033 + spin_lock(&sch->lock);
1034 +- sch_gpio_reg_set(gc, gpio_num, GIO, 1);
1035 ++ sch_gpio_reg_set(sch, gpio_num, GIO, 1);
1036 + spin_unlock(&sch->lock);
1037 + return 0;
1038 + }
1039 +
1040 + static int sch_gpio_get(struct gpio_chip *gc, unsigned gpio_num)
1041 + {
1042 +- return sch_gpio_reg_get(gc, gpio_num, GLV);
1043 ++ struct sch_gpio *sch = gpiochip_get_data(gc);
1044 ++ return sch_gpio_reg_get(sch, gpio_num, GLV);
1045 + }
1046 +
1047 + static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val)
1048 +@@ -113,7 +112,7 @@ static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val)
1049 + struct sch_gpio *sch = gpiochip_get_data(gc);
1050 +
1051 + spin_lock(&sch->lock);
1052 +- sch_gpio_reg_set(gc, gpio_num, GLV, val);
1053 ++ sch_gpio_reg_set(sch, gpio_num, GLV, val);
1054 + spin_unlock(&sch->lock);
1055 + }
1056 +
1057 +@@ -123,7 +122,7 @@ static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned gpio_num,
1058 + struct sch_gpio *sch = gpiochip_get_data(gc);
1059 +
1060 + spin_lock(&sch->lock);
1061 +- sch_gpio_reg_set(gc, gpio_num, GIO, 0);
1062 ++ sch_gpio_reg_set(sch, gpio_num, GIO, 0);
1063 + spin_unlock(&sch->lock);
1064 +
1065 + /*
1066 +@@ -182,13 +181,13 @@ static int sch_gpio_probe(struct platform_device *pdev)
1067 + * GPIO7 is configured by the CMC as SLPIOVR
1068 + * Enable GPIO[9:8] core powered gpios explicitly
1069 + */
1070 +- sch_gpio_reg_set(&sch->chip, 8, GEN, 1);
1071 +- sch_gpio_reg_set(&sch->chip, 9, GEN, 1);
1072 ++ sch_gpio_reg_set(sch, 8, GEN, 1);
1073 ++ sch_gpio_reg_set(sch, 9, GEN, 1);
1074 + /*
1075 + * SUS_GPIO[2:0] enabled by default
1076 + * Enable SUS_GPIO3 resume powered gpio explicitly
1077 + */
1078 +- sch_gpio_reg_set(&sch->chip, 13, GEN, 1);
1079 ++ sch_gpio_reg_set(sch, 13, GEN, 1);
1080 + break;
1081 +
1082 + case PCI_DEVICE_ID_INTEL_ITC_LPC:
1083 +diff --git a/drivers/gpio/gpiolib-legacy.c b/drivers/gpio/gpiolib-legacy.c
1084 +index 3a5c7011ad3b..8b830996fe02 100644
1085 +--- a/drivers/gpio/gpiolib-legacy.c
1086 ++++ b/drivers/gpio/gpiolib-legacy.c
1087 +@@ -28,6 +28,10 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
1088 + if (!desc && gpio_is_valid(gpio))
1089 + return -EPROBE_DEFER;
1090 +
1091 ++ err = gpiod_request(desc, label);
1092 ++ if (err)
1093 ++ return err;
1094 ++
1095 + if (flags & GPIOF_OPEN_DRAIN)
1096 + set_bit(FLAG_OPEN_DRAIN, &desc->flags);
1097 +
1098 +@@ -37,10 +41,6 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
1099 + if (flags & GPIOF_ACTIVE_LOW)
1100 + set_bit(FLAG_ACTIVE_LOW, &desc->flags);
1101 +
1102 +- err = gpiod_request(desc, label);
1103 +- if (err)
1104 +- return err;
1105 +-
1106 + if (flags & GPIOF_DIR_IN)
1107 + err = gpiod_direction_input(desc);
1108 + else
1109 +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
1110 +index cf3e71243d6d..996a73390bba 100644
1111 +--- a/drivers/gpio/gpiolib.c
1112 ++++ b/drivers/gpio/gpiolib.c
1113 +@@ -1324,14 +1324,6 @@ static int __gpiod_request(struct gpio_desc *desc, const char *label)
1114 + spin_lock_irqsave(&gpio_lock, flags);
1115 + }
1116 + done:
1117 +- if (status < 0) {
1118 +- /* Clear flags that might have been set by the caller before
1119 +- * requesting the GPIO.
1120 +- */
1121 +- clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
1122 +- clear_bit(FLAG_OPEN_DRAIN, &desc->flags);
1123 +- clear_bit(FLAG_OPEN_SOURCE, &desc->flags);
1124 +- }
1125 + spin_unlock_irqrestore(&gpio_lock, flags);
1126 + return status;
1127 + }
1128 +@@ -1345,8 +1337,12 @@ done:
1129 + #define VALIDATE_DESC(desc) do { \
1130 + if (!desc) \
1131 + return 0; \
1132 ++ if (IS_ERR(desc)) { \
1133 ++ pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \
1134 ++ return PTR_ERR(desc); \
1135 ++ } \
1136 + if (!desc->gdev) { \
1137 +- pr_warn("%s: invalid GPIO\n", __func__); \
1138 ++ pr_warn("%s: invalid GPIO (no device)\n", __func__); \
1139 + return -EINVAL; \
1140 + } \
1141 + if ( !desc->gdev->chip ) { \
1142 +@@ -1358,8 +1354,12 @@ done:
1143 + #define VALIDATE_DESC_VOID(desc) do { \
1144 + if (!desc) \
1145 + return; \
1146 ++ if (IS_ERR(desc)) { \
1147 ++ pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \
1148 ++ return; \
1149 ++ } \
1150 + if (!desc->gdev) { \
1151 +- pr_warn("%s: invalid GPIO\n", __func__); \
1152 ++ pr_warn("%s: invalid GPIO (no device)\n", __func__); \
1153 + return; \
1154 + } \
1155 + if (!desc->gdev->chip) { \
1156 +@@ -2011,7 +2011,7 @@ int gpiod_to_irq(const struct gpio_desc *desc)
1157 + * requires this function to not return zero on an invalid descriptor
1158 + * but rather a negative error number.
1159 + */
1160 +- if (!desc || !desc->gdev || !desc->gdev->chip)
1161 ++ if (!desc || IS_ERR(desc) || !desc->gdev || !desc->gdev->chip)
1162 + return -EINVAL;
1163 +
1164 + chip = desc->gdev->chip;
1165 +@@ -2507,28 +2507,13 @@ struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
1166 + }
1167 + EXPORT_SYMBOL_GPL(gpiod_get_optional);
1168 +
1169 +-/**
1170 +- * gpiod_parse_flags - helper function to parse GPIO lookup flags
1171 +- * @desc: gpio to be setup
1172 +- * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
1173 +- * of_get_gpio_hog()
1174 +- *
1175 +- * Set the GPIO descriptor flags based on the given GPIO lookup flags.
1176 +- */
1177 +-static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags)
1178 +-{
1179 +- if (lflags & GPIO_ACTIVE_LOW)
1180 +- set_bit(FLAG_ACTIVE_LOW, &desc->flags);
1181 +- if (lflags & GPIO_OPEN_DRAIN)
1182 +- set_bit(FLAG_OPEN_DRAIN, &desc->flags);
1183 +- if (lflags & GPIO_OPEN_SOURCE)
1184 +- set_bit(FLAG_OPEN_SOURCE, &desc->flags);
1185 +-}
1186 +
1187 + /**
1188 + * gpiod_configure_flags - helper function to configure a given GPIO
1189 + * @desc: gpio whose value will be assigned
1190 + * @con_id: function within the GPIO consumer
1191 ++ * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
1192 ++ * of_get_gpio_hog()
1193 + * @dflags: gpiod_flags - optional GPIO initialization flags
1194 + *
1195 + * Return 0 on success, -ENOENT if no GPIO has been assigned to the
1196 +@@ -2536,10 +2521,17 @@ static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags)
1197 + * occurred while trying to acquire the GPIO.
1198 + */
1199 + static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
1200 +- enum gpiod_flags dflags)
1201 ++ unsigned long lflags, enum gpiod_flags dflags)
1202 + {
1203 + int status;
1204 +
1205 ++ if (lflags & GPIO_ACTIVE_LOW)
1206 ++ set_bit(FLAG_ACTIVE_LOW, &desc->flags);
1207 ++ if (lflags & GPIO_OPEN_DRAIN)
1208 ++ set_bit(FLAG_OPEN_DRAIN, &desc->flags);
1209 ++ if (lflags & GPIO_OPEN_SOURCE)
1210 ++ set_bit(FLAG_OPEN_SOURCE, &desc->flags);
1211 ++
1212 + /* No particular flag request, return here... */
1213 + if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
1214 + pr_debug("no flags found for %s\n", con_id);
1215 +@@ -2606,13 +2598,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
1216 + return desc;
1217 + }
1218 +
1219 +- gpiod_parse_flags(desc, lookupflags);
1220 +-
1221 + status = gpiod_request(desc, con_id);
1222 + if (status < 0)
1223 + return ERR_PTR(status);
1224 +
1225 +- status = gpiod_configure_flags(desc, con_id, flags);
1226 ++ status = gpiod_configure_flags(desc, con_id, lookupflags, flags);
1227 + if (status < 0) {
1228 + dev_dbg(dev, "setup of GPIO %s failed\n", con_id);
1229 + gpiod_put(desc);
1230 +@@ -2668,6 +2658,10 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
1231 + if (IS_ERR(desc))
1232 + return desc;
1233 +
1234 ++ ret = gpiod_request(desc, NULL);
1235 ++ if (ret)
1236 ++ return ERR_PTR(ret);
1237 ++
1238 + if (active_low)
1239 + set_bit(FLAG_ACTIVE_LOW, &desc->flags);
1240 +
1241 +@@ -2678,10 +2672,6 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
1242 + set_bit(FLAG_OPEN_SOURCE, &desc->flags);
1243 + }
1244 +
1245 +- ret = gpiod_request(desc, NULL);
1246 +- if (ret)
1247 +- return ERR_PTR(ret);
1248 +-
1249 + return desc;
1250 + }
1251 + EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod);
1252 +@@ -2734,8 +2724,6 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
1253 + chip = gpiod_to_chip(desc);
1254 + hwnum = gpio_chip_hwgpio(desc);
1255 +
1256 +- gpiod_parse_flags(desc, lflags);
1257 +-
1258 + local_desc = gpiochip_request_own_desc(chip, hwnum, name);
1259 + if (IS_ERR(local_desc)) {
1260 + pr_err("requesting hog GPIO %s (chip %s, offset %d) failed\n",
1261 +@@ -2743,7 +2731,7 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
1262 + return PTR_ERR(local_desc);
1263 + }
1264 +
1265 +- status = gpiod_configure_flags(desc, name, dflags);
1266 ++ status = gpiod_configure_flags(desc, name, lflags, dflags);
1267 + if (status < 0) {
1268 + pr_err("setup of hog GPIO %s (chip %s, offset %d) failed\n",
1269 + name, chip->label, hwnum);
1270 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
1271 +index 6043dc7c3a94..3e21732f22e3 100644
1272 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
1273 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
1274 +@@ -880,7 +880,7 @@ static int amdgpu_cgs_acpi_eval_object(void *cgs_device,
1275 + struct cgs_acpi_method_argument *argument = NULL;
1276 + uint32_t i, count;
1277 + acpi_status status;
1278 +- int result;
1279 ++ int result = 0;
1280 + uint32_t func_no = 0xFFFFFFFF;
1281 +
1282 + handle = ACPI_HANDLE(&adev->pdev->dev);
1283 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
1284 +index b04337de65d1..d78739d2952d 100644
1285 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
1286 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
1287 +@@ -448,7 +448,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
1288 + dev_info.max_memory_clock = adev->pm.default_mclk * 10;
1289 + }
1290 + dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
1291 +- dev_info.num_rb_pipes = adev->gfx.config.num_rbs;
1292 ++ dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
1293 ++ adev->gfx.config.max_shader_engines;
1294 + dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
1295 + dev_info._pad = 0;
1296 + dev_info.ids_flags = 0;
1297 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
1298 +index bb8709066fd8..d2216f83bd7a 100644
1299 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
1300 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
1301 +@@ -5074,7 +5074,7 @@ static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
1302 + case 2:
1303 + for (i = 0; i < adev->gfx.num_compute_rings; i++) {
1304 + ring = &adev->gfx.compute_ring[i];
1305 +- if ((ring->me == me_id) & (ring->pipe == pipe_id))
1306 ++ if ((ring->me == me_id) && (ring->pipe == pipe_id))
1307 + amdgpu_fence_process(ring);
1308 + }
1309 + break;
1310 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
1311 +index ac005796b71c..7708d90b9da9 100644
1312 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
1313 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
1314 +@@ -242,13 +242,19 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
1315 + pqm_uninit(&p->pqm);
1316 +
1317 + /* Iterate over all process device data structure and check
1318 +- * if we should reset all wavefronts */
1319 +- list_for_each_entry(pdd, &p->per_device_data, per_device_list)
1320 ++ * if we should delete debug managers and reset all wavefronts
1321 ++ */
1322 ++ list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
1323 ++ if ((pdd->dev->dbgmgr) &&
1324 ++ (pdd->dev->dbgmgr->pasid == p->pasid))
1325 ++ kfd_dbgmgr_destroy(pdd->dev->dbgmgr);
1326 ++
1327 + if (pdd->reset_wavefronts) {
1328 + pr_warn("amdkfd: Resetting all wave fronts\n");
1329 + dbgdev_wave_reset_wavefronts(pdd->dev, p);
1330 + pdd->reset_wavefronts = false;
1331 + }
1332 ++ }
1333 +
1334 + mutex_unlock(&p->mutex);
1335 +
1336 +@@ -404,42 +410,52 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
1337 +
1338 + idx = srcu_read_lock(&kfd_processes_srcu);
1339 +
1340 ++ /*
1341 ++ * Look for the process that matches the pasid. If there is no such
1342 ++ * process, we either released it in amdkfd's own notifier, or there
1343 ++ * is a bug. Unfortunately, there is no way to tell...
1344 ++ */
1345 + hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes)
1346 +- if (p->pasid == pasid)
1347 +- break;
1348 ++ if (p->pasid == pasid) {
1349 +
1350 +- srcu_read_unlock(&kfd_processes_srcu, idx);
1351 ++ srcu_read_unlock(&kfd_processes_srcu, idx);
1352 +
1353 +- BUG_ON(p->pasid != pasid);
1354 ++ pr_debug("Unbinding process %d from IOMMU\n", pasid);
1355 +
1356 +- mutex_lock(&p->mutex);
1357 ++ mutex_lock(&p->mutex);
1358 +
1359 +- if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
1360 +- kfd_dbgmgr_destroy(dev->dbgmgr);
1361 ++ if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
1362 ++ kfd_dbgmgr_destroy(dev->dbgmgr);
1363 +
1364 +- pqm_uninit(&p->pqm);
1365 ++ pqm_uninit(&p->pqm);
1366 +
1367 +- pdd = kfd_get_process_device_data(dev, p);
1368 ++ pdd = kfd_get_process_device_data(dev, p);
1369 +
1370 +- if (!pdd) {
1371 +- mutex_unlock(&p->mutex);
1372 +- return;
1373 +- }
1374 ++ if (!pdd) {
1375 ++ mutex_unlock(&p->mutex);
1376 ++ return;
1377 ++ }
1378 +
1379 +- if (pdd->reset_wavefronts) {
1380 +- dbgdev_wave_reset_wavefronts(pdd->dev, p);
1381 +- pdd->reset_wavefronts = false;
1382 +- }
1383 ++ if (pdd->reset_wavefronts) {
1384 ++ dbgdev_wave_reset_wavefronts(pdd->dev, p);
1385 ++ pdd->reset_wavefronts = false;
1386 ++ }
1387 +
1388 +- /*
1389 +- * Just mark pdd as unbound, because we still need it to call
1390 +- * amd_iommu_unbind_pasid() in when the process exits.
1391 +- * We don't call amd_iommu_unbind_pasid() here
1392 +- * because the IOMMU called us.
1393 +- */
1394 +- pdd->bound = false;
1395 ++ /*
1396 ++ * Just mark pdd as unbound, because we still need it
1397 ++ * to call amd_iommu_unbind_pasid() in when the
1398 ++ * process exits.
1399 ++ * We don't call amd_iommu_unbind_pasid() here
1400 ++ * because the IOMMU called us.
1401 ++ */
1402 ++ pdd->bound = false;
1403 +
1404 +- mutex_unlock(&p->mutex);
1405 ++ mutex_unlock(&p->mutex);
1406 ++
1407 ++ return;
1408 ++ }
1409 ++
1410 ++ srcu_read_unlock(&kfd_processes_srcu, idx);
1411 + }
1412 +
1413 + struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
1414 +diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
1415 +index fa208ada6892..efb77eda7508 100644
1416 +--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
1417 ++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
1418 +@@ -306,10 +306,14 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
1419 + {
1420 + PHM_FUNC_CHECK(hwmgr);
1421 +
1422 +- if (hwmgr->hwmgr_func->store_cc6_data == NULL)
1423 ++ if (display_config == NULL)
1424 + return -EINVAL;
1425 +
1426 + hwmgr->display_config = *display_config;
1427 ++
1428 ++ if (hwmgr->hwmgr_func->store_cc6_data == NULL)
1429 ++ return -EINVAL;
1430 ++
1431 + /* to do pass other display configuration in furture */
1432 +
1433 + if (hwmgr->hwmgr_func->store_cc6_data)
1434 +diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
1435 +index 7b2d5000292d..7cce483b0859 100644
1436 +--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
1437 ++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
1438 +@@ -21,6 +21,20 @@ bool acpi_atcs_functions_supported(void *device, uint32_t index)
1439 + return result == 0 ? (output_buf.function_bits & (1 << (index - 1))) != 0 : false;
1440 + }
1441 +
1442 ++bool acpi_atcs_notify_pcie_device_ready(void *device)
1443 ++{
1444 ++ int32_t temp_buffer = 1;
1445 ++
1446 ++ return cgs_call_acpi_method(device, CGS_ACPI_METHOD_ATCS,
1447 ++ ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION,
1448 ++ &temp_buffer,
1449 ++ NULL,
1450 ++ 0,
1451 ++ sizeof(temp_buffer),
1452 ++ 0);
1453 ++}
1454 ++
1455 ++
1456 + int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
1457 + {
1458 + struct atcs_pref_req_input atcs_input;
1459 +@@ -29,7 +43,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
1460 + int result;
1461 + struct cgs_system_info info = {0};
1462 +
1463 +- if (!acpi_atcs_functions_supported(device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST))
1464 ++ if( 0 != acpi_atcs_notify_pcie_device_ready(device))
1465 + return -EINVAL;
1466 +
1467 + info.size = sizeof(struct cgs_system_info);
1468 +@@ -54,7 +68,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
1469 + ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST,
1470 + &atcs_input,
1471 + &atcs_output,
1472 +- 0,
1473 ++ 1,
1474 + sizeof(atcs_input),
1475 + sizeof(atcs_output));
1476 + if (result != 0)
1477 +diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
1478 +index 0d5d8372953e..aae2e8ec0542 100644
1479 +--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
1480 ++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
1481 +@@ -1298,7 +1298,7 @@ static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
1482 + table->Smio[count] |=
1483 + data->mvdd_voltage_table.entries[count].smio_low;
1484 + }
1485 +- table->SmioMask2 = data->vddci_voltage_table.mask_low;
1486 ++ table->SmioMask2 = data->mvdd_voltage_table.mask_low;
1487 +
1488 + CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
1489 + }
1490 +diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
1491 +index b156481b50e8..17766e8da0ca 100644
1492 +--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
1493 ++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
1494 +@@ -299,7 +299,7 @@ static int init_dpm_2_parameters(
1495 + (((unsigned long)powerplay_table) + le16_to_cpu(powerplay_table->usPPMTableOffset));
1496 +
1497 + if (0 != powerplay_table->usPPMTableOffset) {
1498 +- if (1 == get_platform_power_management_table(hwmgr, atom_ppm_table)) {
1499 ++ if (get_platform_power_management_table(hwmgr, atom_ppm_table) == 0) {
1500 + phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1501 + PHM_PlatformCaps_EnablePlatformPowerManagement);
1502 + }
1503 +diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
1504 +index 3bd5e69b9045..3df5de2cdab0 100644
1505 +--- a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
1506 ++++ b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
1507 +@@ -26,3 +26,4 @@ extern bool acpi_atcs_functions_supported(void *device,
1508 + extern int acpi_pcie_perf_request(void *device,
1509 + uint8_t perf_req,
1510 + bool advertise);
1511 ++extern bool acpi_atcs_notify_pcie_device_ready(void *device);
1512 +diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
1513 +index d65dcaee3832..6d9c0f5bcba6 100644
1514 +--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
1515 ++++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
1516 +@@ -335,6 +335,8 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
1517 +
1518 + atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff,
1519 + factor_reg);
1520 ++ } else {
1521 ++ atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff, 0);
1522 + }
1523 + }
1524 +
1525 +diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
1526 +index d307d9627887..080a09014580 100644
1527 +--- a/drivers/gpu/drm/drm_atomic.c
1528 ++++ b/drivers/gpu/drm/drm_atomic.c
1529 +@@ -354,6 +354,8 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
1530 + drm_property_unreference_blob(state->mode_blob);
1531 + state->mode_blob = NULL;
1532 +
1533 ++ memset(&state->mode, 0, sizeof(state->mode));
1534 ++
1535 + if (blob) {
1536 + if (blob->length != sizeof(struct drm_mode_modeinfo) ||
1537 + drm_mode_convert_umode(&state->mode,
1538 +@@ -366,7 +368,6 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
1539 + DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
1540 + state->mode.name, state);
1541 + } else {
1542 +- memset(&state->mode, 0, sizeof(state->mode));
1543 + state->enable = false;
1544 + DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
1545 + state);
1546 +@@ -1287,14 +1288,39 @@ EXPORT_SYMBOL(drm_atomic_add_affected_planes);
1547 + */
1548 + void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
1549 + {
1550 ++ struct drm_device *dev = state->dev;
1551 ++ unsigned crtc_mask = 0;
1552 ++ struct drm_crtc *crtc;
1553 + int ret;
1554 ++ bool global = false;
1555 ++
1556 ++ drm_for_each_crtc(crtc, dev) {
1557 ++ if (crtc->acquire_ctx != state->acquire_ctx)
1558 ++ continue;
1559 ++
1560 ++ crtc_mask |= drm_crtc_mask(crtc);
1561 ++ crtc->acquire_ctx = NULL;
1562 ++ }
1563 ++
1564 ++ if (WARN_ON(dev->mode_config.acquire_ctx == state->acquire_ctx)) {
1565 ++ global = true;
1566 ++
1567 ++ dev->mode_config.acquire_ctx = NULL;
1568 ++ }
1569 +
1570 + retry:
1571 + drm_modeset_backoff(state->acquire_ctx);
1572 +
1573 +- ret = drm_modeset_lock_all_ctx(state->dev, state->acquire_ctx);
1574 ++ ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
1575 + if (ret)
1576 + goto retry;
1577 ++
1578 ++ drm_for_each_crtc(crtc, dev)
1579 ++ if (drm_crtc_mask(crtc) & crtc_mask)
1580 ++ crtc->acquire_ctx = state->acquire_ctx;
1581 ++
1582 ++ if (global)
1583 ++ dev->mode_config.acquire_ctx = state->acquire_ctx;
1584 + }
1585 + EXPORT_SYMBOL(drm_atomic_legacy_backoff);
1586 +
1587 +diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
1588 +index f30de8053545..691a1b939c1c 100644
1589 +--- a/drivers/gpu/drm/drm_crtc.c
1590 ++++ b/drivers/gpu/drm/drm_crtc.c
1591 +@@ -2800,8 +2800,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
1592 + goto out;
1593 + }
1594 +
1595 +- drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
1596 +-
1597 + /*
1598 + * Check whether the primary plane supports the fb pixel format.
1599 + * Drivers not implementing the universal planes API use a
1600 +diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
1601 +index 71ea0521ea96..ccfe7e72d8fc 100644
1602 +--- a/drivers/gpu/drm/drm_dp_mst_topology.c
1603 ++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
1604 +@@ -2908,11 +2908,9 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
1605 + drm_dp_port_teardown_pdt(port, port->pdt);
1606 +
1607 + if (!port->input && port->vcpi.vcpi > 0) {
1608 +- if (mgr->mst_state) {
1609 +- drm_dp_mst_reset_vcpi_slots(mgr, port);
1610 +- drm_dp_update_payload_part1(mgr);
1611 +- drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
1612 +- }
1613 ++ drm_dp_mst_reset_vcpi_slots(mgr, port);
1614 ++ drm_dp_update_payload_part1(mgr);
1615 ++ drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
1616 + }
1617 +
1618 + kref_put(&port->kref, drm_dp_free_mst_port);
1619 +diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
1620 +index bb88e3df9257..e619b00c7343 100644
1621 +--- a/drivers/gpu/drm/drm_fb_cma_helper.c
1622 ++++ b/drivers/gpu/drm/drm_fb_cma_helper.c
1623 +@@ -301,7 +301,7 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
1624 + err_fb_info_destroy:
1625 + drm_fb_helper_release_fbi(helper);
1626 + err_gem_free_object:
1627 +- dev->driver->gem_free_object(&obj->base);
1628 ++ drm_gem_object_unreference_unlocked(&obj->base);
1629 + return ret;
1630 + }
1631 +
1632 +diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
1633 +index 1f500a1b9969..d988ca0b597a 100644
1634 +--- a/drivers/gpu/drm/drm_gem_cma_helper.c
1635 ++++ b/drivers/gpu/drm/drm_gem_cma_helper.c
1636 +@@ -121,7 +121,7 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
1637 + return cma_obj;
1638 +
1639 + error:
1640 +- drm->driver->gem_free_object(&cma_obj->base);
1641 ++ drm_gem_object_unreference_unlocked(&cma_obj->base);
1642 + return ERR_PTR(ret);
1643 + }
1644 + EXPORT_SYMBOL_GPL(drm_gem_cma_create);
1645 +@@ -162,18 +162,12 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv,
1646 + * and handle has the id what user can see.
1647 + */
1648 + ret = drm_gem_handle_create(file_priv, gem_obj, handle);
1649 +- if (ret)
1650 +- goto err_handle_create;
1651 +-
1652 + /* drop reference from allocate - handle holds it now. */
1653 + drm_gem_object_unreference_unlocked(gem_obj);
1654 ++ if (ret)
1655 ++ return ERR_PTR(ret);
1656 +
1657 + return cma_obj;
1658 +-
1659 +-err_handle_create:
1660 +- drm->driver->gem_free_object(gem_obj);
1661 +-
1662 +- return ERR_PTR(ret);
1663 + }
1664 +
1665 + /**
1666 +diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
1667 +index f7448a5e95a9..5d0fc2644352 100644
1668 +--- a/drivers/gpu/drm/drm_modes.c
1669 ++++ b/drivers/gpu/drm/drm_modes.c
1670 +@@ -1518,6 +1518,8 @@ int drm_mode_convert_umode(struct drm_display_mode *out,
1671 + if (out->status != MODE_OK)
1672 + goto out;
1673 +
1674 ++ drm_mode_set_crtcinfo(out, CRTC_INTERLACE_HALVE_V);
1675 ++
1676 + ret = 0;
1677 +
1678 + out:
1679 +diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
1680 +index e8d9337a66d8..77886f1182f1 100644
1681 +--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
1682 ++++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
1683 +@@ -40,9 +40,10 @@ static const struct regmap_config fsl_dcu_regmap_config = {
1684 + .reg_bits = 32,
1685 + .reg_stride = 4,
1686 + .val_bits = 32,
1687 +- .cache_type = REGCACHE_RBTREE,
1688 ++ .cache_type = REGCACHE_FLAT,
1689 +
1690 + .volatile_reg = fsl_dcu_drm_is_volatile_reg,
1691 ++ .max_register = 0x11fc,
1692 + };
1693 +
1694 + static int fsl_dcu_drm_irq_init(struct drm_device *dev)
1695 +diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
1696 +index d3c473ffb90a..3af40616bf8b 100644
1697 +--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
1698 ++++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
1699 +@@ -39,7 +39,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
1700 + if (!mutex_is_locked(mutex))
1701 + return false;
1702 +
1703 +-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
1704 ++#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
1705 + return mutex->owner == task;
1706 + #else
1707 + /* Since UP may be pre-empted, we cannot assume that we own the lock */
1708 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1709 +index 7741efbd5e57..e5db9e1f623f 100644
1710 +--- a/drivers/gpu/drm/i915/intel_display.c
1711 ++++ b/drivers/gpu/drm/i915/intel_display.c
1712 +@@ -8229,12 +8229,14 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
1713 + {
1714 + struct drm_i915_private *dev_priv = dev->dev_private;
1715 + struct intel_encoder *encoder;
1716 ++ int i;
1717 + u32 val, final;
1718 + bool has_lvds = false;
1719 + bool has_cpu_edp = false;
1720 + bool has_panel = false;
1721 + bool has_ck505 = false;
1722 + bool can_ssc = false;
1723 ++ bool using_ssc_source = false;
1724 +
1725 + /* We need to take the global config into account */
1726 + for_each_intel_encoder(dev, encoder) {
1727 +@@ -8261,8 +8263,22 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
1728 + can_ssc = true;
1729 + }
1730 +
1731 +- DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
1732 +- has_panel, has_lvds, has_ck505);
1733 ++ /* Check if any DPLLs are using the SSC source */
1734 ++ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
1735 ++ u32 temp = I915_READ(PCH_DPLL(i));
1736 ++
1737 ++ if (!(temp & DPLL_VCO_ENABLE))
1738 ++ continue;
1739 ++
1740 ++ if ((temp & PLL_REF_INPUT_MASK) ==
1741 ++ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
1742 ++ using_ssc_source = true;
1743 ++ break;
1744 ++ }
1745 ++ }
1746 ++
1747 ++ DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
1748 ++ has_panel, has_lvds, has_ck505, using_ssc_source);
1749 +
1750 + /* Ironlake: try to setup display ref clock before DPLL
1751 + * enabling. This is only under driver's control after
1752 +@@ -8299,9 +8315,9 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
1753 + final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
1754 + } else
1755 + final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
1756 +- } else {
1757 +- final |= DREF_SSC_SOURCE_DISABLE;
1758 +- final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
1759 ++ } else if (using_ssc_source) {
1760 ++ final |= DREF_SSC_SOURCE_ENABLE;
1761 ++ final |= DREF_SSC1_ENABLE;
1762 + }
1763 +
1764 + if (final == val)
1765 +@@ -8347,7 +8363,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
1766 + POSTING_READ(PCH_DREF_CONTROL);
1767 + udelay(200);
1768 + } else {
1769 +- DRM_DEBUG_KMS("Disabling SSC entirely\n");
1770 ++ DRM_DEBUG_KMS("Disabling CPU source output\n");
1771 +
1772 + val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
1773 +
1774 +@@ -8358,16 +8374,20 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
1775 + POSTING_READ(PCH_DREF_CONTROL);
1776 + udelay(200);
1777 +
1778 +- /* Turn off the SSC source */
1779 +- val &= ~DREF_SSC_SOURCE_MASK;
1780 +- val |= DREF_SSC_SOURCE_DISABLE;
1781 ++ if (!using_ssc_source) {
1782 ++ DRM_DEBUG_KMS("Disabling SSC source\n");
1783 +
1784 +- /* Turn off SSC1 */
1785 +- val &= ~DREF_SSC1_ENABLE;
1786 ++ /* Turn off the SSC source */
1787 ++ val &= ~DREF_SSC_SOURCE_MASK;
1788 ++ val |= DREF_SSC_SOURCE_DISABLE;
1789 +
1790 +- I915_WRITE(PCH_DREF_CONTROL, val);
1791 +- POSTING_READ(PCH_DREF_CONTROL);
1792 +- udelay(200);
1793 ++ /* Turn off SSC1 */
1794 ++ val &= ~DREF_SSC1_ENABLE;
1795 ++
1796 ++ I915_WRITE(PCH_DREF_CONTROL, val);
1797 ++ POSTING_READ(PCH_DREF_CONTROL);
1798 ++ udelay(200);
1799 ++ }
1800 + }
1801 +
1802 + BUG_ON(val != final);
1803 +diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
1804 +index 412a34c39522..69054ef978fa 100644
1805 +--- a/drivers/gpu/drm/i915/intel_dp.c
1806 ++++ b/drivers/gpu/drm/i915/intel_dp.c
1807 +@@ -4942,13 +4942,15 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
1808 +
1809 + void intel_dp_encoder_reset(struct drm_encoder *encoder)
1810 + {
1811 +- struct intel_dp *intel_dp;
1812 ++ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
1813 ++ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1814 ++
1815 ++ if (!HAS_DDI(dev_priv))
1816 ++ intel_dp->DP = I915_READ(intel_dp->output_reg);
1817 +
1818 + if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
1819 + return;
1820 +
1821 +- intel_dp = enc_to_intel_dp(encoder);
1822 +-
1823 + pps_lock(intel_dp);
1824 +
1825 + /*
1826 +@@ -5020,9 +5022,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
1827 + intel_display_power_get(dev_priv, power_domain);
1828 +
1829 + if (long_hpd) {
1830 +- /* indicate that we need to restart link training */
1831 +- intel_dp->train_set_valid = false;
1832 +-
1833 + if (!intel_digital_port_connected(dev_priv, intel_dig_port))
1834 + goto mst_fail;
1835 +
1836 +diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
1837 +index 0b8eefc2acc5..926a1e6ea2f6 100644
1838 +--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
1839 ++++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
1840 +@@ -85,8 +85,7 @@ static bool
1841 + intel_dp_reset_link_train(struct intel_dp *intel_dp,
1842 + uint8_t dp_train_pat)
1843 + {
1844 +- if (!intel_dp->train_set_valid)
1845 +- memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
1846 ++ memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
1847 + intel_dp_set_signal_levels(intel_dp);
1848 + return intel_dp_set_link_train(intel_dp, dp_train_pat);
1849 + }
1850 +@@ -161,22 +160,6 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
1851 + break;
1852 + }
1853 +
1854 +- /*
1855 +- * if we used previously trained voltage and pre-emphasis values
1856 +- * and we don't get clock recovery, reset link training values
1857 +- */
1858 +- if (intel_dp->train_set_valid) {
1859 +- DRM_DEBUG_KMS("clock recovery not ok, reset");
1860 +- /* clear the flag as we are not reusing train set */
1861 +- intel_dp->train_set_valid = false;
1862 +- if (!intel_dp_reset_link_train(intel_dp,
1863 +- DP_TRAINING_PATTERN_1 |
1864 +- DP_LINK_SCRAMBLING_DISABLE)) {
1865 +- DRM_ERROR("failed to enable link training\n");
1866 +- return;
1867 +- }
1868 +- continue;
1869 +- }
1870 +
1871 + /* Check to see if we've tried the max voltage */
1872 + for (i = 0; i < intel_dp->lane_count; i++)
1873 +@@ -284,7 +267,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
1874 + /* Make sure clock is still ok */
1875 + if (!drm_dp_clock_recovery_ok(link_status,
1876 + intel_dp->lane_count)) {
1877 +- intel_dp->train_set_valid = false;
1878 + intel_dp_link_training_clock_recovery(intel_dp);
1879 + intel_dp_set_link_train(intel_dp,
1880 + training_pattern |
1881 +@@ -301,7 +283,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
1882 +
1883 + /* Try 5 times, then try clock recovery if that fails */
1884 + if (tries > 5) {
1885 +- intel_dp->train_set_valid = false;
1886 + intel_dp_link_training_clock_recovery(intel_dp);
1887 + intel_dp_set_link_train(intel_dp,
1888 + training_pattern |
1889 +@@ -322,10 +303,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
1890 +
1891 + intel_dp_set_idle_link_train(intel_dp);
1892 +
1893 +- if (channel_eq) {
1894 +- intel_dp->train_set_valid = true;
1895 ++ if (channel_eq)
1896 + DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
1897 +- }
1898 + }
1899 +
1900 + void intel_dp_stop_link_train(struct intel_dp *intel_dp)
1901 +diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
1902 +index 3a30b37d6885..8dd2cc56451f 100644
1903 +--- a/drivers/gpu/drm/i915/intel_drv.h
1904 ++++ b/drivers/gpu/drm/i915/intel_drv.h
1905 +@@ -811,8 +811,6 @@ struct intel_dp {
1906 + /* This is called before a link training is starterd */
1907 + void (*prepare_link_retrain)(struct intel_dp *intel_dp);
1908 +
1909 +- bool train_set_valid;
1910 +-
1911 + /* Displayport compliance testing */
1912 + unsigned long compliance_test_type;
1913 + unsigned long compliance_test_data;
1914 +diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
1915 +index 0f0492f4a357..28f4407722a1 100644
1916 +--- a/drivers/gpu/drm/i915/intel_fbc.c
1917 ++++ b/drivers/gpu/drm/i915/intel_fbc.c
1918 +@@ -823,8 +823,7 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc)
1919 + {
1920 + struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1921 + struct intel_fbc *fbc = &dev_priv->fbc;
1922 +- bool enable_by_default = IS_HASWELL(dev_priv) ||
1923 +- IS_BROADWELL(dev_priv);
1924 ++ bool enable_by_default = IS_BROADWELL(dev_priv);
1925 +
1926 + if (intel_vgpu_active(dev_priv->dev)) {
1927 + fbc->no_fbc_reason = "VGPU is active";
1928 +diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
1929 +index 14e64e08909e..d347dca17267 100644
1930 +--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
1931 ++++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
1932 +@@ -182,7 +182,7 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
1933 + }
1934 + }
1935 +
1936 +- fvv = pllreffreq * testn / testm;
1937 ++ fvv = pllreffreq * (n + 1) / (m + 1);
1938 + fvv = (fvv - 800000) / 50000;
1939 +
1940 + if (fvv > 15)
1941 +@@ -202,6 +202,14 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
1942 + WREG_DAC(MGA1064_PIX_PLLC_M, m);
1943 + WREG_DAC(MGA1064_PIX_PLLC_N, n);
1944 + WREG_DAC(MGA1064_PIX_PLLC_P, p);
1945 ++
1946 ++ if (mdev->unique_rev_id >= 0x04) {
1947 ++ WREG_DAC(0x1a, 0x09);
1948 ++ msleep(20);
1949 ++ WREG_DAC(0x1a, 0x01);
1950 ++
1951 ++ }
1952 ++
1953 + return 0;
1954 + }
1955 +
1956 +diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
1957 +index db10c11f0595..c5a6ebd5a478 100644
1958 +--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
1959 ++++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
1960 +@@ -25,7 +25,8 @@ u16 nvbios_outp_match(struct nvkm_bios *, u16 type, u16 mask,
1961 + u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *);
1962 +
1963 + struct nvbios_ocfg {
1964 +- u16 match;
1965 ++ u8 proto;
1966 ++ u8 flags;
1967 + u16 clkcmp[2];
1968 + };
1969 +
1970 +@@ -33,7 +34,7 @@ u16 nvbios_ocfg_entry(struct nvkm_bios *, u16 outp, u8 idx,
1971 + u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
1972 + u16 nvbios_ocfg_parse(struct nvkm_bios *, u16 outp, u8 idx,
1973 + u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
1974 +-u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u16 type,
1975 ++u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u8 proto, u8 flags,
1976 + u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
1977 + u16 nvbios_oclk_match(struct nvkm_bios *, u16 cmp, u32 khz);
1978 + #endif
1979 +diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
1980 +index 59f27e774acb..e40a1b07a014 100644
1981 +--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
1982 ++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
1983 +@@ -557,6 +557,8 @@ nouveau_fbcon_init(struct drm_device *dev)
1984 + if (ret)
1985 + goto fini;
1986 +
1987 ++ if (fbcon->helper.fbdev)
1988 ++ fbcon->helper.fbdev->pixmap.buf_align = 4;
1989 + return 0;
1990 +
1991 + fini:
1992 +diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
1993 +index 789dc2993b0d..8f715feadf56 100644
1994 +--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
1995 ++++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
1996 +@@ -82,7 +82,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
1997 + uint32_t fg;
1998 + uint32_t bg;
1999 + uint32_t dsize;
2000 +- uint32_t width;
2001 + uint32_t *data = (uint32_t *)image->data;
2002 + int ret;
2003 +
2004 +@@ -93,9 +92,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
2005 + if (ret)
2006 + return ret;
2007 +
2008 +- width = ALIGN(image->width, 8);
2009 +- dsize = ALIGN(width * image->height, 32) >> 5;
2010 +-
2011 + if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
2012 + info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
2013 + fg = ((uint32_t *) info->pseudo_palette)[image->fg_color];
2014 +@@ -111,10 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
2015 + ((image->dx + image->width) & 0xffff));
2016 + OUT_RING(chan, bg);
2017 + OUT_RING(chan, fg);
2018 +- OUT_RING(chan, (image->height << 16) | width);
2019 ++ OUT_RING(chan, (image->height << 16) | image->width);
2020 + OUT_RING(chan, (image->height << 16) | image->width);
2021 + OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
2022 +
2023 ++ dsize = ALIGN(image->width * image->height, 32) >> 5;
2024 + while (dsize) {
2025 + int iter_len = dsize > 128 ? 128 : dsize;
2026 +
2027 +diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
2028 +index e05499d6ed83..a4e259a00430 100644
2029 +--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
2030 ++++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
2031 +@@ -95,7 +95,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
2032 + struct nouveau_fbdev *nfbdev = info->par;
2033 + struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
2034 + struct nouveau_channel *chan = drm->channel;
2035 +- uint32_t width, dwords, *data = (uint32_t *)image->data;
2036 ++ uint32_t dwords, *data = (uint32_t *)image->data;
2037 + uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
2038 + uint32_t *palette = info->pseudo_palette;
2039 + int ret;
2040 +@@ -107,9 +107,6 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
2041 + if (ret)
2042 + return ret;
2043 +
2044 +- width = ALIGN(image->width, 32);
2045 +- dwords = (width * image->height) >> 5;
2046 +-
2047 + BEGIN_NV04(chan, NvSub2D, 0x0814, 2);
2048 + if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
2049 + info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
2050 +@@ -128,6 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
2051 + OUT_RING(chan, 0);
2052 + OUT_RING(chan, image->dy);
2053 +
2054 ++ dwords = ALIGN(image->width * image->height, 32) >> 5;
2055 + while (dwords) {
2056 + int push = dwords > 2047 ? 2047 : dwords;
2057 +
2058 +diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
2059 +index c97395b4a312..f28315e865a5 100644
2060 +--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
2061 ++++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
2062 +@@ -95,7 +95,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
2063 + struct nouveau_fbdev *nfbdev = info->par;
2064 + struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
2065 + struct nouveau_channel *chan = drm->channel;
2066 +- uint32_t width, dwords, *data = (uint32_t *)image->data;
2067 ++ uint32_t dwords, *data = (uint32_t *)image->data;
2068 + uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
2069 + uint32_t *palette = info->pseudo_palette;
2070 + int ret;
2071 +@@ -107,9 +107,6 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
2072 + if (ret)
2073 + return ret;
2074 +
2075 +- width = ALIGN(image->width, 32);
2076 +- dwords = (width * image->height) >> 5;
2077 +-
2078 + BEGIN_NVC0(chan, NvSub2D, 0x0814, 2);
2079 + if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
2080 + info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
2081 +@@ -128,6 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
2082 + OUT_RING (chan, 0);
2083 + OUT_RING (chan, image->dy);
2084 +
2085 ++ dwords = ALIGN(image->width * image->height, 32) >> 5;
2086 + while (dwords) {
2087 + int push = dwords > 2047 ? 2047 : dwords;
2088 +
2089 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
2090 +index 18fab3973ce5..62ad0300cfa5 100644
2091 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
2092 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
2093 +@@ -1614,7 +1614,7 @@ nvkm_device_pci_func = {
2094 + .fini = nvkm_device_pci_fini,
2095 + .resource_addr = nvkm_device_pci_resource_addr,
2096 + .resource_size = nvkm_device_pci_resource_size,
2097 +- .cpu_coherent = !IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_ARM64),
2098 ++ .cpu_coherent = !IS_ENABLED(CONFIG_ARM),
2099 + };
2100 +
2101 + int
2102 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
2103 +index a74c5dd27dc0..e2a64ed14b22 100644
2104 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
2105 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
2106 +@@ -18,6 +18,7 @@ nvkm-y += nvkm/engine/disp/piornv50.o
2107 + nvkm-y += nvkm/engine/disp/sornv50.o
2108 + nvkm-y += nvkm/engine/disp/sorg94.o
2109 + nvkm-y += nvkm/engine/disp/sorgf119.o
2110 ++nvkm-y += nvkm/engine/disp/sorgm107.o
2111 + nvkm-y += nvkm/engine/disp/sorgm200.o
2112 + nvkm-y += nvkm/engine/disp/dport.o
2113 +
2114 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
2115 +index f0314664349c..5dd34382f55a 100644
2116 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
2117 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
2118 +@@ -76,6 +76,7 @@ exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
2119 + mask |= 0x0001 << or;
2120 + mask |= 0x0100 << head;
2121 +
2122 ++
2123 + list_for_each_entry(outp, &disp->base.outp, head) {
2124 + if ((outp->info.hasht & 0xff) == type &&
2125 + (outp->info.hashm & mask) == mask) {
2126 +@@ -155,25 +156,21 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
2127 + if (!outp)
2128 + return NULL;
2129 +
2130 ++ *conf = (ctrl & 0x00000f00) >> 8;
2131 + switch (outp->info.type) {
2132 + case DCB_OUTPUT_TMDS:
2133 +- *conf = (ctrl & 0x00000f00) >> 8;
2134 + if (*conf == 5)
2135 + *conf |= 0x0100;
2136 + break;
2137 + case DCB_OUTPUT_LVDS:
2138 +- *conf = disp->sor.lvdsconf;
2139 +- break;
2140 +- case DCB_OUTPUT_DP:
2141 +- *conf = (ctrl & 0x00000f00) >> 8;
2142 ++ *conf |= disp->sor.lvdsconf;
2143 + break;
2144 +- case DCB_OUTPUT_ANALOG:
2145 + default:
2146 +- *conf = 0x00ff;
2147 + break;
2148 + }
2149 +
2150 +- data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
2151 ++ data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8,
2152 ++ &ver, &hdr, &cnt, &len, &info2);
2153 + if (data && id < 0xff) {
2154 + data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
2155 + if (data) {
2156 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
2157 +index b6944142d616..f4b9cf8574be 100644
2158 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
2159 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
2160 +@@ -36,7 +36,7 @@ gm107_disp = {
2161 + .outp.internal.crt = nv50_dac_output_new,
2162 + .outp.internal.tmds = nv50_sor_output_new,
2163 + .outp.internal.lvds = nv50_sor_output_new,
2164 +- .outp.internal.dp = gf119_sor_dp_new,
2165 ++ .outp.internal.dp = gm107_sor_dp_new,
2166 + .dac.nr = 3,
2167 + .dac.power = nv50_dac_power,
2168 + .dac.sense = nv50_dac_sense,
2169 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
2170 +index 4226d2153b9c..fcb1b0c46d64 100644
2171 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
2172 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
2173 +@@ -387,22 +387,17 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
2174 + if (!outp)
2175 + return NULL;
2176 +
2177 ++ *conf = (ctrl & 0x00000f00) >> 8;
2178 + if (outp->info.location == 0) {
2179 + switch (outp->info.type) {
2180 + case DCB_OUTPUT_TMDS:
2181 +- *conf = (ctrl & 0x00000f00) >> 8;
2182 + if (*conf == 5)
2183 + *conf |= 0x0100;
2184 + break;
2185 + case DCB_OUTPUT_LVDS:
2186 +- *conf = disp->sor.lvdsconf;
2187 ++ *conf |= disp->sor.lvdsconf;
2188 + break;
2189 +- case DCB_OUTPUT_DP:
2190 +- *conf = (ctrl & 0x00000f00) >> 8;
2191 +- break;
2192 +- case DCB_OUTPUT_ANALOG:
2193 + default:
2194 +- *conf = 0x00ff;
2195 + break;
2196 + }
2197 + } else {
2198 +@@ -410,7 +405,8 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
2199 + pclk = pclk / 2;
2200 + }
2201 +
2202 +- data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
2203 ++ data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8,
2204 ++ &ver, &hdr, &cnt, &len, &info2);
2205 + if (data && id < 0xff) {
2206 + data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
2207 + if (data) {
2208 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
2209 +index e9067ba4e179..4e983f6d7032 100644
2210 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
2211 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
2212 +@@ -62,7 +62,12 @@ int g94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int);
2213 + int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
2214 + struct nvkm_output **);
2215 + int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
2216 ++int gf119_sor_dp_drv_ctl(struct nvkm_output_dp *, int, int, int, int);
2217 +
2218 +-int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
2219 +- struct nvkm_output **);
2220 ++int gm107_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
2221 ++ struct nvkm_output **);
2222 ++int gm107_sor_dp_pattern(struct nvkm_output_dp *, int);
2223 ++
2224 ++int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
2225 ++ struct nvkm_output **);
2226 + #endif
2227 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
2228 +index b4b41b135643..49bd5da194e1 100644
2229 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
2230 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
2231 +@@ -40,8 +40,8 @@ static int
2232 + gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
2233 + {
2234 + struct nvkm_device *device = outp->base.disp->engine.subdev.device;
2235 +- const u32 loff = gf119_sor_loff(outp);
2236 +- nvkm_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
2237 ++ const u32 soff = gf119_sor_soff(outp);
2238 ++ nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, 0x01010101 * pattern);
2239 + return 0;
2240 + }
2241 +
2242 +@@ -64,7 +64,7 @@ gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
2243 + return 0;
2244 + }
2245 +
2246 +-static int
2247 ++int
2248 + gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
2249 + int ln, int vs, int pe, int pc)
2250 + {
2251 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
2252 +new file mode 100644
2253 +index 000000000000..37790b2617c5
2254 +--- /dev/null
2255 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
2256 +@@ -0,0 +1,53 @@
2257 ++/*
2258 ++ * Copyright 2016 Red Hat Inc.
2259 ++ *
2260 ++ * Permission is hereby granted, free of charge, to any person obtaining a
2261 ++ * copy of this software and associated documentation files (the "Software"),
2262 ++ * to deal in the Software without restriction, including without limitation
2263 ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
2264 ++ * and/or sell copies of the Software, and to permit persons to whom the
2265 ++ * Software is furnished to do so, subject to the following conditions:
2266 ++ *
2267 ++ * The above copyright notice and this permission notice shall be included in
2268 ++ * all copies or substantial portions of the Software.
2269 ++ *
2270 ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
2271 ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
2272 ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
2273 ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
2274 ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
2275 ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
2276 ++ * OTHER DEALINGS IN THE SOFTWARE.
2277 ++ *
2278 ++ * Authors: Ben Skeggs <bskeggs@××××××.com>
2279 ++ */
2280 ++#include "nv50.h"
2281 ++#include "outpdp.h"
2282 ++
2283 ++int
2284 ++gm107_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
2285 ++{
2286 ++ struct nvkm_device *device = outp->base.disp->engine.subdev.device;
2287 ++ const u32 soff = outp->base.or * 0x800;
2288 ++ const u32 data = 0x01010101 * pattern;
2289 ++ if (outp->base.info.sorconf.link & 1)
2290 ++ nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
2291 ++ else
2292 ++ nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
2293 ++ return 0;
2294 ++}
2295 ++
2296 ++static const struct nvkm_output_dp_func
2297 ++gm107_sor_dp_func = {
2298 ++ .pattern = gm107_sor_dp_pattern,
2299 ++ .lnk_pwr = g94_sor_dp_lnk_pwr,
2300 ++ .lnk_ctl = gf119_sor_dp_lnk_ctl,
2301 ++ .drv_ctl = gf119_sor_dp_drv_ctl,
2302 ++};
2303 ++
2304 ++int
2305 ++gm107_sor_dp_new(struct nvkm_disp *disp, int index,
2306 ++ struct dcb_output *dcbE, struct nvkm_output **poutp)
2307 ++{
2308 ++ return nvkm_output_dp_new_(&gm107_sor_dp_func, disp, index, dcbE, poutp);
2309 ++}
2310 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
2311 +index 2cfbef9c344f..c44fa7ea672a 100644
2312 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
2313 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
2314 +@@ -57,19 +57,6 @@ gm200_sor_dp_lane_map(struct nvkm_device *device, u8 lane)
2315 + }
2316 +
2317 + static int
2318 +-gm200_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
2319 +-{
2320 +- struct nvkm_device *device = outp->base.disp->engine.subdev.device;
2321 +- const u32 soff = gm200_sor_soff(outp);
2322 +- const u32 data = 0x01010101 * pattern;
2323 +- if (outp->base.info.sorconf.link & 1)
2324 +- nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
2325 +- else
2326 +- nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
2327 +- return 0;
2328 +-}
2329 +-
2330 +-static int
2331 + gm200_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
2332 + {
2333 + struct nvkm_device *device = outp->base.disp->engine.subdev.device;
2334 +@@ -129,7 +116,7 @@ gm200_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
2335 +
2336 + static const struct nvkm_output_dp_func
2337 + gm200_sor_dp_func = {
2338 +- .pattern = gm200_sor_dp_pattern,
2339 ++ .pattern = gm107_sor_dp_pattern,
2340 + .lnk_pwr = gm200_sor_dp_lnk_pwr,
2341 + .lnk_ctl = gf119_sor_dp_lnk_ctl,
2342 + .drv_ctl = gm200_sor_dp_drv_ctl,
2343 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
2344 +index b2de290da16f..b0c721616c4e 100644
2345 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
2346 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
2347 +@@ -942,22 +942,41 @@ gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc)
2348 + }
2349 +
2350 + static const struct nvkm_enum gf100_mp_warp_error[] = {
2351 +- { 0x00, "NO_ERROR" },
2352 +- { 0x01, "STACK_MISMATCH" },
2353 ++ { 0x01, "STACK_ERROR" },
2354 ++ { 0x02, "API_STACK_ERROR" },
2355 ++ { 0x03, "RET_EMPTY_STACK_ERROR" },
2356 ++ { 0x04, "PC_WRAP" },
2357 + { 0x05, "MISALIGNED_PC" },
2358 +- { 0x08, "MISALIGNED_GPR" },
2359 +- { 0x09, "INVALID_OPCODE" },
2360 +- { 0x0d, "GPR_OUT_OF_BOUNDS" },
2361 +- { 0x0e, "MEM_OUT_OF_BOUNDS" },
2362 +- { 0x0f, "UNALIGNED_MEM_ACCESS" },
2363 ++ { 0x06, "PC_OVERFLOW" },
2364 ++ { 0x07, "MISALIGNED_IMMC_ADDR" },
2365 ++ { 0x08, "MISALIGNED_REG" },
2366 ++ { 0x09, "ILLEGAL_INSTR_ENCODING" },
2367 ++ { 0x0a, "ILLEGAL_SPH_INSTR_COMBO" },
2368 ++ { 0x0b, "ILLEGAL_INSTR_PARAM" },
2369 ++ { 0x0c, "INVALID_CONST_ADDR" },
2370 ++ { 0x0d, "OOR_REG" },
2371 ++ { 0x0e, "OOR_ADDR" },
2372 ++ { 0x0f, "MISALIGNED_ADDR" },
2373 + { 0x10, "INVALID_ADDR_SPACE" },
2374 +- { 0x11, "INVALID_PARAM" },
2375 ++ { 0x11, "ILLEGAL_INSTR_PARAM2" },
2376 ++ { 0x12, "INVALID_CONST_ADDR_LDC" },
2377 ++ { 0x13, "GEOMETRY_SM_ERROR" },
2378 ++ { 0x14, "DIVERGENT" },
2379 ++ { 0x15, "WARP_EXIT" },
2380 + {}
2381 + };
2382 +
2383 + static const struct nvkm_bitfield gf100_mp_global_error[] = {
2384 ++ { 0x00000001, "SM_TO_SM_FAULT" },
2385 ++ { 0x00000002, "L1_ERROR" },
2386 + { 0x00000004, "MULTIPLE_WARP_ERRORS" },
2387 +- { 0x00000008, "OUT_OF_STACK_SPACE" },
2388 ++ { 0x00000008, "PHYSICAL_STACK_OVERFLOW" },
2389 ++ { 0x00000010, "BPT_INT" },
2390 ++ { 0x00000020, "BPT_PAUSE" },
2391 ++ { 0x00000040, "SINGLE_STEP_COMPLETE" },
2392 ++ { 0x20000000, "ECC_SEC_ERROR" },
2393 ++ { 0x40000000, "ECC_DED_ERROR" },
2394 ++ { 0x80000000, "TIMEOUT" },
2395 + {}
2396 + };
2397 +
2398 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
2399 +index a5e92135cd77..9efb1b48cd54 100644
2400 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
2401 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
2402 +@@ -141,7 +141,8 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
2403 + {
2404 + u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
2405 + if (data) {
2406 +- info->match = nvbios_rd16(bios, data + 0x00);
2407 ++ info->proto = nvbios_rd08(bios, data + 0x00);
2408 ++ info->flags = nvbios_rd16(bios, data + 0x01);
2409 + info->clkcmp[0] = nvbios_rd16(bios, data + 0x02);
2410 + info->clkcmp[1] = nvbios_rd16(bios, data + 0x04);
2411 + }
2412 +@@ -149,12 +150,13 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
2413 + }
2414 +
2415 + u16
2416 +-nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u16 type,
2417 ++nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u8 proto, u8 flags,
2418 + u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *info)
2419 + {
2420 + u16 data, idx = 0;
2421 + while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) {
2422 +- if (info->match == type)
2423 ++ if ((info->proto == proto || info->proto == 0xff) &&
2424 ++ (info->flags == flags))
2425 + break;
2426 + }
2427 + return data;
2428 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
2429 +index e292f5679418..389fb13a1998 100644
2430 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
2431 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
2432 +@@ -69,11 +69,11 @@ gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
2433 + }
2434 +
2435 + static void
2436 +-gm107_ltc_lts_isr(struct nvkm_ltc *ltc, int c, int s)
2437 ++gm107_ltc_intr_lts(struct nvkm_ltc *ltc, int c, int s)
2438 + {
2439 + struct nvkm_subdev *subdev = &ltc->subdev;
2440 + struct nvkm_device *device = subdev->device;
2441 +- u32 base = 0x140000 + (c * 0x2000) + (s * 0x200);
2442 ++ u32 base = 0x140400 + (c * 0x2000) + (s * 0x200);
2443 + u32 stat = nvkm_rd32(device, base + 0x00c);
2444 +
2445 + if (stat) {
2446 +@@ -92,7 +92,7 @@ gm107_ltc_intr(struct nvkm_ltc *ltc)
2447 + while (mask) {
2448 + u32 s, c = __ffs(mask);
2449 + for (s = 0; s < ltc->lts_nr; s++)
2450 +- gm107_ltc_lts_isr(ltc, c, s);
2451 ++ gm107_ltc_intr_lts(ltc, c, s);
2452 + mask &= ~(1 << c);
2453 + }
2454 + }
2455 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
2456 +index 2a29bfd5125a..e18e0dc19ec8 100644
2457 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
2458 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
2459 +@@ -46,7 +46,7 @@ static const struct nvkm_ltc_func
2460 + gm200_ltc = {
2461 + .oneinit = gm200_ltc_oneinit,
2462 + .init = gm200_ltc_init,
2463 +- .intr = gm107_ltc_intr, /*XXX: not validated */
2464 ++ .intr = gm107_ltc_intr,
2465 + .cbc_clear = gm107_ltc_cbc_clear,
2466 + .cbc_wait = gm107_ltc_cbc_wait,
2467 + .zbc = 16,
2468 +diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
2469 +index d0826fb0434c..cb2986876738 100644
2470 +--- a/drivers/gpu/drm/radeon/radeon_device.c
2471 ++++ b/drivers/gpu/drm/radeon/radeon_device.c
2472 +@@ -630,6 +630,23 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
2473 + /*
2474 + * GPU helpers function.
2475 + */
2476 ++
2477 ++/**
2478 ++ * radeon_device_is_virtual - check if we are running is a virtual environment
2479 ++ *
2480 ++ * Check if the asic has been passed through to a VM (all asics).
2481 ++ * Used at driver startup.
2482 ++ * Returns true if virtual or false if not.
2483 ++ */
2484 ++static bool radeon_device_is_virtual(void)
2485 ++{
2486 ++#ifdef CONFIG_X86
2487 ++ return boot_cpu_has(X86_FEATURE_HYPERVISOR);
2488 ++#else
2489 ++ return false;
2490 ++#endif
2491 ++}
2492 ++
2493 + /**
2494 + * radeon_card_posted - check if the hw has already been initialized
2495 + *
2496 +@@ -643,6 +660,10 @@ bool radeon_card_posted(struct radeon_device *rdev)
2497 + {
2498 + uint32_t reg;
2499 +
2500 ++ /* for pass through, always force asic_init */
2501 ++ if (radeon_device_is_virtual())
2502 ++ return false;
2503 ++
2504 + /* required for EFI mode on macbook2,1 which uses an r5xx asic */
2505 + if (efi_enabled(EFI_BOOT) &&
2506 + (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
2507 +diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
2508 +index e3daafa1be13..3e7c9ac50ccd 100644
2509 +--- a/drivers/gpu/drm/ttm/ttm_bo.c
2510 ++++ b/drivers/gpu/drm/ttm/ttm_bo.c
2511 +@@ -1016,9 +1016,9 @@ out_unlock:
2512 + return ret;
2513 + }
2514 +
2515 +-static bool ttm_bo_mem_compat(struct ttm_placement *placement,
2516 +- struct ttm_mem_reg *mem,
2517 +- uint32_t *new_flags)
2518 ++bool ttm_bo_mem_compat(struct ttm_placement *placement,
2519 ++ struct ttm_mem_reg *mem,
2520 ++ uint32_t *new_flags)
2521 + {
2522 + int i;
2523 +
2524 +@@ -1050,6 +1050,7 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement,
2525 +
2526 + return false;
2527 + }
2528 ++EXPORT_SYMBOL(ttm_bo_mem_compat);
2529 +
2530 + int ttm_bo_validate(struct ttm_buffer_object *bo,
2531 + struct ttm_placement *placement,
2532 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
2533 +index 299925a1f6c6..eadc981ee79a 100644
2534 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
2535 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
2536 +@@ -49,6 +49,7 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
2537 + {
2538 + struct ttm_buffer_object *bo = &buf->base;
2539 + int ret;
2540 ++ uint32_t new_flags;
2541 +
2542 + ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
2543 + if (unlikely(ret != 0))
2544 +@@ -60,7 +61,12 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
2545 + if (unlikely(ret != 0))
2546 + goto err;
2547 +
2548 +- ret = ttm_bo_validate(bo, placement, interruptible, false);
2549 ++ if (buf->pin_count > 0)
2550 ++ ret = ttm_bo_mem_compat(placement, &bo->mem,
2551 ++ &new_flags) == true ? 0 : -EINVAL;
2552 ++ else
2553 ++ ret = ttm_bo_validate(bo, placement, interruptible, false);
2554 ++
2555 + if (!ret)
2556 + vmw_bo_pin_reserved(buf, true);
2557 +
2558 +@@ -91,6 +97,7 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
2559 + {
2560 + struct ttm_buffer_object *bo = &buf->base;
2561 + int ret;
2562 ++ uint32_t new_flags;
2563 +
2564 + ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
2565 + if (unlikely(ret != 0))
2566 +@@ -102,6 +109,12 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
2567 + if (unlikely(ret != 0))
2568 + goto err;
2569 +
2570 ++ if (buf->pin_count > 0) {
2571 ++ ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
2572 ++ &new_flags) == true ? 0 : -EINVAL;
2573 ++ goto out_unreserve;
2574 ++ }
2575 ++
2576 + ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
2577 + false);
2578 + if (likely(ret == 0) || ret == -ERESTARTSYS)
2579 +@@ -161,6 +174,7 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
2580 + struct ttm_placement placement;
2581 + struct ttm_place place;
2582 + int ret = 0;
2583 ++ uint32_t new_flags;
2584 +
2585 + place = vmw_vram_placement.placement[0];
2586 + place.lpfn = bo->num_pages;
2587 +@@ -185,10 +199,15 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
2588 + */
2589 + if (bo->mem.mem_type == TTM_PL_VRAM &&
2590 + bo->mem.start < bo->num_pages &&
2591 +- bo->mem.start > 0)
2592 ++ bo->mem.start > 0 &&
2593 ++ buf->pin_count == 0)
2594 + (void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
2595 +
2596 +- ret = ttm_bo_validate(bo, &placement, interruptible, false);
2597 ++ if (buf->pin_count > 0)
2598 ++ ret = ttm_bo_mem_compat(&placement, &bo->mem,
2599 ++ &new_flags) == true ? 0 : -EINVAL;
2600 ++ else
2601 ++ ret = ttm_bo_validate(bo, &placement, interruptible, false);
2602 +
2603 + /* For some reason we didn't end up at the start of vram */
2604 + WARN_ON(ret == 0 && bo->offset != 0);
2605 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
2606 +index f2cf9231872a..2a505464c50f 100644
2607 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
2608 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
2609 +@@ -227,6 +227,7 @@ static int vmw_force_iommu;
2610 + static int vmw_restrict_iommu;
2611 + static int vmw_force_coherent;
2612 + static int vmw_restrict_dma_mask;
2613 ++static int vmw_assume_16bpp;
2614 +
2615 + static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
2616 + static void vmw_master_init(struct vmw_master *);
2617 +@@ -243,6 +244,8 @@ MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
2618 + module_param_named(force_coherent, vmw_force_coherent, int, 0600);
2619 + MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
2620 + module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
2621 ++MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
2622 ++module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
2623 +
2624 +
2625 + static void vmw_print_capabilities(uint32_t capabilities)
2626 +@@ -653,6 +656,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
2627 + dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
2628 + dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
2629 +
2630 ++ dev_priv->assume_16bpp = !!vmw_assume_16bpp;
2631 ++
2632 + dev_priv->enable_fb = enable_fbdev;
2633 +
2634 + vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
2635 +@@ -699,6 +704,13 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
2636 + vmw_read(dev_priv,
2637 + SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
2638 +
2639 ++ /*
2640 ++ * Workaround for low memory 2D VMs to compensate for the
2641 ++ * allocation taken by fbdev
2642 ++ */
2643 ++ if (!(dev_priv->capabilities & SVGA_CAP_3D))
2644 ++ mem_size *= 2;
2645 ++
2646 + dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
2647 + dev_priv->prim_bb_mem =
2648 + vmw_read(dev_priv,
2649 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
2650 +index 6db358a85b46..cab0c54b46ae 100644
2651 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
2652 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
2653 +@@ -386,6 +386,7 @@ struct vmw_private {
2654 + spinlock_t hw_lock;
2655 + spinlock_t cap_lock;
2656 + bool has_dx;
2657 ++ bool assume_16bpp;
2658 +
2659 + /*
2660 + * VGA registers.
2661 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
2662 +index 679a4cb98ee3..d2d93959b119 100644
2663 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
2664 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
2665 +@@ -517,28 +517,6 @@ static int vmw_fb_kms_framebuffer(struct fb_info *info)
2666 +
2667 + par->set_fb = &vfb->base;
2668 +
2669 +- if (!par->bo_ptr) {
2670 +- /*
2671 +- * Pin before mapping. Since we don't know in what placement
2672 +- * to pin, call into KMS to do it for us.
2673 +- */
2674 +- ret = vfb->pin(vfb);
2675 +- if (ret) {
2676 +- DRM_ERROR("Could not pin the fbdev framebuffer.\n");
2677 +- return ret;
2678 +- }
2679 +-
2680 +- ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
2681 +- par->vmw_bo->base.num_pages, &par->map);
2682 +- if (ret) {
2683 +- vfb->unpin(vfb);
2684 +- DRM_ERROR("Could not map the fbdev framebuffer.\n");
2685 +- return ret;
2686 +- }
2687 +-
2688 +- par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
2689 +- }
2690 +-
2691 + return 0;
2692 + }
2693 +
2694 +@@ -601,6 +579,31 @@ static int vmw_fb_set_par(struct fb_info *info)
2695 + if (ret)
2696 + goto out_unlock;
2697 +
2698 ++ if (!par->bo_ptr) {
2699 ++ struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(set.fb);
2700 ++
2701 ++ /*
2702 ++ * Pin before mapping. Since we don't know in what placement
2703 ++ * to pin, call into KMS to do it for us.
2704 ++ */
2705 ++ ret = vfb->pin(vfb);
2706 ++ if (ret) {
2707 ++ DRM_ERROR("Could not pin the fbdev framebuffer.\n");
2708 ++ goto out_unlock;
2709 ++ }
2710 ++
2711 ++ ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
2712 ++ par->vmw_bo->base.num_pages, &par->map);
2713 ++ if (ret) {
2714 ++ vfb->unpin(vfb);
2715 ++ DRM_ERROR("Could not map the fbdev framebuffer.\n");
2716 ++ goto out_unlock;
2717 ++ }
2718 ++
2719 ++ par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
2720 ++ }
2721 ++
2722 ++
2723 + vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
2724 + par->set_fb->width, par->set_fb->height);
2725 +
2726 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
2727 +index b07543b5cea4..6ccd61d37b78 100644
2728 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
2729 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
2730 +@@ -1553,14 +1553,10 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
2731 + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2732 + };
2733 + int i;
2734 +- u32 assumed_bpp = 2;
2735 ++ u32 assumed_bpp = 4;
2736 +
2737 +- /*
2738 +- * If using screen objects, then assume 32-bpp because that's what the
2739 +- * SVGA device is assuming
2740 +- */
2741 +- if (dev_priv->active_display_unit == vmw_du_screen_object)
2742 +- assumed_bpp = 4;
2743 ++ if (dev_priv->assume_16bpp)
2744 ++ assumed_bpp = 2;
2745 +
2746 + if (dev_priv->active_display_unit == vmw_du_screen_target) {
2747 + max_width = min(max_width, dev_priv->stdu_max_width);
2748 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
2749 +index 9ca818fb034c..41932a7c4f79 100644
2750 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
2751 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
2752 +@@ -399,8 +399,10 @@ static int vmw_stdu_bind_fb(struct vmw_private *dev_priv,
2753 +
2754 + WARN_ON_ONCE(!stdu->defined);
2755 +
2756 +- if (!vfb->dmabuf && new_fb->width == mode->hdisplay &&
2757 +- new_fb->height == mode->vdisplay)
2758 ++ new_vfbs = (vfb->dmabuf) ? NULL : vmw_framebuffer_to_vfbs(new_fb);
2759 ++
2760 ++ if (new_vfbs && new_vfbs->surface->base_size.width == mode->hdisplay &&
2761 ++ new_vfbs->surface->base_size.height == mode->vdisplay)
2762 + new_content_type = SAME_AS_DISPLAY;
2763 + else if (vfb->dmabuf)
2764 + new_content_type = SEPARATE_DMA;
2765 +@@ -444,7 +446,6 @@ static int vmw_stdu_bind_fb(struct vmw_private *dev_priv,
2766 + content_srf.mip_levels[0] = 1;
2767 + content_srf.multisample_count = 0;
2768 + } else {
2769 +- new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
2770 + content_srf = *new_vfbs->surface;
2771 + }
2772 +
2773 +@@ -464,7 +465,6 @@ static int vmw_stdu_bind_fb(struct vmw_private *dev_priv,
2774 + return ret;
2775 + }
2776 + } else if (new_content_type == SAME_AS_DISPLAY) {
2777 +- new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
2778 + new_display_srf = vmw_surface_reference(new_vfbs->surface);
2779 + }
2780 +
2781 +diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
2782 +index aad8c162a825..0cd4f7216239 100644
2783 +--- a/drivers/hid/hid-elo.c
2784 ++++ b/drivers/hid/hid-elo.c
2785 +@@ -261,7 +261,7 @@ static void elo_remove(struct hid_device *hdev)
2786 + struct elo_priv *priv = hid_get_drvdata(hdev);
2787 +
2788 + hid_hw_stop(hdev);
2789 +- flush_workqueue(wq);
2790 ++ cancel_delayed_work_sync(&priv->work);
2791 + kfree(priv);
2792 + }
2793 +
2794 +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
2795 +index c741f5e50a66..0088979f7281 100644
2796 +--- a/drivers/hid/hid-multitouch.c
2797 ++++ b/drivers/hid/hid-multitouch.c
2798 +@@ -61,6 +61,7 @@ MODULE_LICENSE("GPL");
2799 + #define MT_QUIRK_ALWAYS_VALID (1 << 4)
2800 + #define MT_QUIRK_VALID_IS_INRANGE (1 << 5)
2801 + #define MT_QUIRK_VALID_IS_CONFIDENCE (1 << 6)
2802 ++#define MT_QUIRK_CONFIDENCE (1 << 7)
2803 + #define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE (1 << 8)
2804 + #define MT_QUIRK_NO_AREA (1 << 9)
2805 + #define MT_QUIRK_IGNORE_DUPLICATES (1 << 10)
2806 +@@ -78,6 +79,7 @@ struct mt_slot {
2807 + __s32 contactid; /* the device ContactID assigned to this slot */
2808 + bool touch_state; /* is the touch valid? */
2809 + bool inrange_state; /* is the finger in proximity of the sensor? */
2810 ++ bool confidence_state; /* is the touch made by a finger? */
2811 + };
2812 +
2813 + struct mt_class {
2814 +@@ -503,10 +505,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
2815 + return 1;
2816 + case HID_DG_CONFIDENCE:
2817 + if (cls->name == MT_CLS_WIN_8 &&
2818 +- field->application == HID_DG_TOUCHPAD) {
2819 +- cls->quirks &= ~MT_QUIRK_ALWAYS_VALID;
2820 +- cls->quirks |= MT_QUIRK_VALID_IS_CONFIDENCE;
2821 +- }
2822 ++ field->application == HID_DG_TOUCHPAD)
2823 ++ cls->quirks |= MT_QUIRK_CONFIDENCE;
2824 + mt_store_field(usage, td, hi);
2825 + return 1;
2826 + case HID_DG_TIPSWITCH:
2827 +@@ -619,6 +619,7 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
2828 + return;
2829 +
2830 + if (td->curvalid || (td->mtclass.quirks & MT_QUIRK_ALWAYS_VALID)) {
2831 ++ int active;
2832 + int slotnum = mt_compute_slot(td, input);
2833 + struct mt_slot *s = &td->curdata;
2834 + struct input_mt *mt = input->mt;
2835 +@@ -633,10 +634,14 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
2836 + return;
2837 + }
2838 +
2839 ++ if (!(td->mtclass.quirks & MT_QUIRK_CONFIDENCE))
2840 ++ s->confidence_state = 1;
2841 ++ active = (s->touch_state || s->inrange_state) &&
2842 ++ s->confidence_state;
2843 ++
2844 + input_mt_slot(input, slotnum);
2845 +- input_mt_report_slot_state(input, MT_TOOL_FINGER,
2846 +- s->touch_state || s->inrange_state);
2847 +- if (s->touch_state || s->inrange_state) {
2848 ++ input_mt_report_slot_state(input, MT_TOOL_FINGER, active);
2849 ++ if (active) {
2850 + /* this finger is in proximity of the sensor */
2851 + int wide = (s->w > s->h);
2852 + /* divided by two to match visual scale of touch */
2853 +@@ -701,6 +706,8 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
2854 + td->curdata.touch_state = value;
2855 + break;
2856 + case HID_DG_CONFIDENCE:
2857 ++ if (quirks & MT_QUIRK_CONFIDENCE)
2858 ++ td->curdata.confidence_state = value;
2859 + if (quirks & MT_QUIRK_VALID_IS_CONFIDENCE)
2860 + td->curvalid = value;
2861 + break;
2862 +diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
2863 +index 2f1ddca6f2e0..700145b15088 100644
2864 +--- a/drivers/hid/usbhid/hiddev.c
2865 ++++ b/drivers/hid/usbhid/hiddev.c
2866 +@@ -516,13 +516,13 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
2867 + goto inval;
2868 + } else if (uref->usage_index >= field->report_count)
2869 + goto inval;
2870 +-
2871 +- else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
2872 +- (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
2873 +- uref->usage_index + uref_multi->num_values > field->report_count))
2874 +- goto inval;
2875 + }
2876 +
2877 ++ if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
2878 ++ (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
2879 ++ uref->usage_index + uref_multi->num_values > field->report_count))
2880 ++ goto inval;
2881 ++
2882 + switch (cmd) {
2883 + case HIDIOCGUSAGE:
2884 + uref->value = field->value[uref->usage_index];
2885 +diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
2886 +index c43318d3416e..a9356a3dea92 100644
2887 +--- a/drivers/hwmon/dell-smm-hwmon.c
2888 ++++ b/drivers/hwmon/dell-smm-hwmon.c
2889 +@@ -66,11 +66,13 @@
2890 +
2891 + static DEFINE_MUTEX(i8k_mutex);
2892 + static char bios_version[4];
2893 ++static char bios_machineid[16];
2894 + static struct device *i8k_hwmon_dev;
2895 + static u32 i8k_hwmon_flags;
2896 + static uint i8k_fan_mult = I8K_FAN_MULT;
2897 + static uint i8k_pwm_mult;
2898 + static uint i8k_fan_max = I8K_FAN_HIGH;
2899 ++static bool disallow_fan_type_call;
2900 +
2901 + #define I8K_HWMON_HAVE_TEMP1 (1 << 0)
2902 + #define I8K_HWMON_HAVE_TEMP2 (1 << 1)
2903 +@@ -94,13 +96,13 @@ module_param(ignore_dmi, bool, 0);
2904 + MODULE_PARM_DESC(ignore_dmi, "Continue probing hardware even if DMI data does not match");
2905 +
2906 + #if IS_ENABLED(CONFIG_I8K)
2907 +-static bool restricted;
2908 ++static bool restricted = true;
2909 + module_param(restricted, bool, 0);
2910 +-MODULE_PARM_DESC(restricted, "Allow fan control if SYS_ADMIN capability set");
2911 ++MODULE_PARM_DESC(restricted, "Restrict fan control and serial number to CAP_SYS_ADMIN (default: 1)");
2912 +
2913 + static bool power_status;
2914 + module_param(power_status, bool, 0600);
2915 +-MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k");
2916 ++MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k (default: 0)");
2917 + #endif
2918 +
2919 + static uint fan_mult;
2920 +@@ -235,14 +237,28 @@ static int i8k_get_fan_speed(int fan)
2921 + /*
2922 + * Read the fan type.
2923 + */
2924 +-static int i8k_get_fan_type(int fan)
2925 ++static int _i8k_get_fan_type(int fan)
2926 + {
2927 + struct smm_regs regs = { .eax = I8K_SMM_GET_FAN_TYPE, };
2928 +
2929 ++ if (disallow_fan_type_call)
2930 ++ return -EINVAL;
2931 ++
2932 + regs.ebx = fan & 0xff;
2933 + return i8k_smm(&regs) ? : regs.eax & 0xff;
2934 + }
2935 +
2936 ++static int i8k_get_fan_type(int fan)
2937 ++{
2938 ++ /* I8K_SMM_GET_FAN_TYPE SMM call is expensive, so cache values */
2939 ++ static int types[2] = { INT_MIN, INT_MIN };
2940 ++
2941 ++ if (types[fan] == INT_MIN)
2942 ++ types[fan] = _i8k_get_fan_type(fan);
2943 ++
2944 ++ return types[fan];
2945 ++}
2946 ++
2947 + /*
2948 + * Read the fan nominal rpm for specific fan speed.
2949 + */
2950 +@@ -392,9 +408,11 @@ i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg)
2951 + break;
2952 +
2953 + case I8K_MACHINE_ID:
2954 +- memset(buff, 0, 16);
2955 +- strlcpy(buff, i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
2956 +- sizeof(buff));
2957 ++ if (restricted && !capable(CAP_SYS_ADMIN))
2958 ++ return -EPERM;
2959 ++
2960 ++ memset(buff, 0, sizeof(buff));
2961 ++ strlcpy(buff, bios_machineid, sizeof(buff));
2962 + break;
2963 +
2964 + case I8K_FN_STATUS:
2965 +@@ -511,7 +529,7 @@ static int i8k_proc_show(struct seq_file *seq, void *offset)
2966 + seq_printf(seq, "%s %s %s %d %d %d %d %d %d %d\n",
2967 + I8K_PROC_FMT,
2968 + bios_version,
2969 +- i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
2970 ++ (restricted && !capable(CAP_SYS_ADMIN)) ? "-1" : bios_machineid,
2971 + cpu_temp,
2972 + left_fan, right_fan, left_speed, right_speed,
2973 + ac_power, fn_key);
2974 +@@ -718,6 +736,9 @@ static struct attribute *i8k_attrs[] = {
2975 + static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
2976 + int index)
2977 + {
2978 ++ if (disallow_fan_type_call &&
2979 ++ (index == 9 || index == 12))
2980 ++ return 0;
2981 + if (index >= 0 && index <= 1 &&
2982 + !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
2983 + return 0;
2984 +@@ -767,13 +788,17 @@ static int __init i8k_init_hwmon(void)
2985 + if (err >= 0)
2986 + i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP4;
2987 +
2988 +- /* First fan attributes, if fan type is OK */
2989 +- err = i8k_get_fan_type(0);
2990 ++ /* First fan attributes, if fan status or type is OK */
2991 ++ err = i8k_get_fan_status(0);
2992 ++ if (err < 0)
2993 ++ err = i8k_get_fan_type(0);
2994 + if (err >= 0)
2995 + i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN1;
2996 +
2997 +- /* Second fan attributes, if fan type is OK */
2998 +- err = i8k_get_fan_type(1);
2999 ++ /* Second fan attributes, if fan status or type is OK */
3000 ++ err = i8k_get_fan_status(1);
3001 ++ if (err < 0)
3002 ++ err = i8k_get_fan_type(1);
3003 + if (err >= 0)
3004 + i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2;
3005 +
3006 +@@ -929,12 +954,14 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
3007 +
3008 + MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
3009 +
3010 +-static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
3011 ++/*
3012 ++ * On some machines once I8K_SMM_GET_FAN_TYPE is issued then CPU fan speed
3013 ++ * randomly going up and down due to bug in Dell SMM or BIOS. Here is blacklist
3014 ++ * of affected Dell machines for which we disallow I8K_SMM_GET_FAN_TYPE call.
3015 ++ * See bug: https://bugzilla.kernel.org/show_bug.cgi?id=100121
3016 ++ */
3017 ++static struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initdata = {
3018 + {
3019 +- /*
3020 +- * CPU fan speed going up and down on Dell Studio XPS 8000
3021 +- * for unknown reasons.
3022 +- */
3023 + .ident = "Dell Studio XPS 8000",
3024 + .matches = {
3025 + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
3026 +@@ -942,16 +969,19 @@ static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
3027 + },
3028 + },
3029 + {
3030 +- /*
3031 +- * CPU fan speed going up and down on Dell Studio XPS 8100
3032 +- * for unknown reasons.
3033 +- */
3034 + .ident = "Dell Studio XPS 8100",
3035 + .matches = {
3036 + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
3037 + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"),
3038 + },
3039 + },
3040 ++ {
3041 ++ .ident = "Dell Inspiron 580",
3042 ++ .matches = {
3043 ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
3044 ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Inspiron 580 "),
3045 ++ },
3046 ++ },
3047 + { }
3048 + };
3049 +
3050 +@@ -966,8 +996,7 @@ static int __init i8k_probe(void)
3051 + /*
3052 + * Get DMI information
3053 + */
3054 +- if (!dmi_check_system(i8k_dmi_table) ||
3055 +- dmi_check_system(i8k_blacklist_dmi_table)) {
3056 ++ if (!dmi_check_system(i8k_dmi_table)) {
3057 + if (!ignore_dmi && !force)
3058 + return -ENODEV;
3059 +
3060 +@@ -978,8 +1007,13 @@ static int __init i8k_probe(void)
3061 + i8k_get_dmi_data(DMI_BIOS_VERSION));
3062 + }
3063 +
3064 ++ if (dmi_check_system(i8k_blacklist_fan_type_dmi_table))
3065 ++ disallow_fan_type_call = true;
3066 ++
3067 + strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION),
3068 + sizeof(bios_version));
3069 ++ strlcpy(bios_machineid, i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
3070 ++ sizeof(bios_machineid));
3071 +
3072 + /*
3073 + * Get SMM Dell signature
3074 +diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
3075 +index 923f56598d4b..3a9f106787d2 100644
3076 +--- a/drivers/iio/accel/kxsd9.c
3077 ++++ b/drivers/iio/accel/kxsd9.c
3078 +@@ -81,7 +81,7 @@ static int kxsd9_write_scale(struct iio_dev *indio_dev, int micro)
3079 +
3080 + mutex_lock(&st->buf_lock);
3081 + ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
3082 +- if (ret)
3083 ++ if (ret < 0)
3084 + goto error_ret;
3085 + st->tx[0] = KXSD9_WRITE(KXSD9_REG_CTRL_C);
3086 + st->tx[1] = (ret & ~KXSD9_FS_MASK) | i;
3087 +@@ -163,7 +163,7 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
3088 + break;
3089 + case IIO_CHAN_INFO_SCALE:
3090 + ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
3091 +- if (ret)
3092 ++ if (ret < 0)
3093 + goto error_ret;
3094 + *val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK];
3095 + ret = IIO_VAL_INT_PLUS_MICRO;
3096 +diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
3097 +index 21e19b60e2b9..2123f0ac2e2a 100644
3098 +--- a/drivers/iio/adc/ad7266.c
3099 ++++ b/drivers/iio/adc/ad7266.c
3100 +@@ -396,8 +396,8 @@ static int ad7266_probe(struct spi_device *spi)
3101 +
3102 + st = iio_priv(indio_dev);
3103 +
3104 +- st->reg = devm_regulator_get(&spi->dev, "vref");
3105 +- if (!IS_ERR_OR_NULL(st->reg)) {
3106 ++ st->reg = devm_regulator_get_optional(&spi->dev, "vref");
3107 ++ if (!IS_ERR(st->reg)) {
3108 + ret = regulator_enable(st->reg);
3109 + if (ret)
3110 + return ret;
3111 +@@ -408,6 +408,9 @@ static int ad7266_probe(struct spi_device *spi)
3112 +
3113 + st->vref_mv = ret / 1000;
3114 + } else {
3115 ++ /* Any other error indicates that the regulator does exist */
3116 ++ if (PTR_ERR(st->reg) != -ENODEV)
3117 ++ return PTR_ERR(st->reg);
3118 + /* Use internal reference */
3119 + st->vref_mv = 2500;
3120 + }
3121 +diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
3122 +index fa4767613173..a03832a5fc95 100644
3123 +--- a/drivers/iio/humidity/hdc100x.c
3124 ++++ b/drivers/iio/humidity/hdc100x.c
3125 +@@ -55,7 +55,7 @@ static const struct {
3126 + },
3127 + { /* IIO_HUMIDITYRELATIVE channel */
3128 + .shift = 8,
3129 +- .mask = 2,
3130 ++ .mask = 3,
3131 + },
3132 + };
3133 +
3134 +@@ -164,14 +164,14 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
3135 + dev_err(&client->dev, "cannot read high byte measurement");
3136 + return ret;
3137 + }
3138 +- val = ret << 6;
3139 ++ val = ret << 8;
3140 +
3141 + ret = i2c_smbus_read_byte(client);
3142 + if (ret < 0) {
3143 + dev_err(&client->dev, "cannot read low byte measurement");
3144 + return ret;
3145 + }
3146 +- val |= ret >> 2;
3147 ++ val |= ret;
3148 +
3149 + return val;
3150 + }
3151 +@@ -211,18 +211,18 @@ static int hdc100x_read_raw(struct iio_dev *indio_dev,
3152 + return IIO_VAL_INT_PLUS_MICRO;
3153 + case IIO_CHAN_INFO_SCALE:
3154 + if (chan->type == IIO_TEMP) {
3155 +- *val = 165;
3156 +- *val2 = 65536 >> 2;
3157 ++ *val = 165000;
3158 ++ *val2 = 65536;
3159 + return IIO_VAL_FRACTIONAL;
3160 + } else {
3161 +- *val = 0;
3162 +- *val2 = 10000;
3163 +- return IIO_VAL_INT_PLUS_MICRO;
3164 ++ *val = 100;
3165 ++ *val2 = 65536;
3166 ++ return IIO_VAL_FRACTIONAL;
3167 + }
3168 + break;
3169 + case IIO_CHAN_INFO_OFFSET:
3170 +- *val = -3971;
3171 +- *val2 = 879096;
3172 ++ *val = -15887;
3173 ++ *val2 = 515151;
3174 + return IIO_VAL_INT_PLUS_MICRO;
3175 + default:
3176 + return -EINVAL;
3177 +diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
3178 +index ae2806aafb72..0c52dfe64977 100644
3179 +--- a/drivers/iio/industrialio-trigger.c
3180 ++++ b/drivers/iio/industrialio-trigger.c
3181 +@@ -210,22 +210,35 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
3182 +
3183 + /* Prevent the module from being removed whilst attached to a trigger */
3184 + __module_get(pf->indio_dev->info->driver_module);
3185 ++
3186 ++ /* Get irq number */
3187 + pf->irq = iio_trigger_get_irq(trig);
3188 ++ if (pf->irq < 0)
3189 ++ goto out_put_module;
3190 ++
3191 ++ /* Request irq */
3192 + ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
3193 + pf->type, pf->name,
3194 + pf);
3195 +- if (ret < 0) {
3196 +- module_put(pf->indio_dev->info->driver_module);
3197 +- return ret;
3198 +- }
3199 ++ if (ret < 0)
3200 ++ goto out_put_irq;
3201 +
3202 ++ /* Enable trigger in driver */
3203 + if (trig->ops && trig->ops->set_trigger_state && notinuse) {
3204 + ret = trig->ops->set_trigger_state(trig, true);
3205 + if (ret < 0)
3206 +- module_put(pf->indio_dev->info->driver_module);
3207 ++ goto out_free_irq;
3208 + }
3209 +
3210 + return ret;
3211 ++
3212 ++out_free_irq:
3213 ++ free_irq(pf->irq, pf);
3214 ++out_put_irq:
3215 ++ iio_trigger_put_irq(trig, pf->irq);
3216 ++out_put_module:
3217 ++ module_put(pf->indio_dev->info->driver_module);
3218 ++ return ret;
3219 + }
3220 +
3221 + static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
3222 +diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
3223 +index a6af56ad10e1..6443aad809b2 100644
3224 +--- a/drivers/iio/light/apds9960.c
3225 ++++ b/drivers/iio/light/apds9960.c
3226 +@@ -1006,6 +1006,7 @@ static int apds9960_probe(struct i2c_client *client,
3227 +
3228 + iio_device_attach_buffer(indio_dev, buffer);
3229 +
3230 ++ indio_dev->dev.parent = &client->dev;
3231 + indio_dev->info = &apds9960_info;
3232 + indio_dev->name = APDS9960_DRV_NAME;
3233 + indio_dev->channels = apds9960_channels;
3234 +diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
3235 +index 172393ad34af..d3ca3207935d 100644
3236 +--- a/drivers/iio/pressure/st_pressure_core.c
3237 ++++ b/drivers/iio/pressure/st_pressure_core.c
3238 +@@ -28,15 +28,21 @@
3239 + #include <linux/iio/common/st_sensors.h>
3240 + #include "st_pressure.h"
3241 +
3242 ++#define MCELSIUS_PER_CELSIUS 1000
3243 ++
3244 ++/* Default pressure sensitivity */
3245 + #define ST_PRESS_LSB_PER_MBAR 4096UL
3246 + #define ST_PRESS_KPASCAL_NANO_SCALE (100000000UL / \
3247 + ST_PRESS_LSB_PER_MBAR)
3248 ++
3249 ++/* Default temperature sensitivity */
3250 + #define ST_PRESS_LSB_PER_CELSIUS 480UL
3251 +-#define ST_PRESS_CELSIUS_NANO_SCALE (1000000000UL / \
3252 +- ST_PRESS_LSB_PER_CELSIUS)
3253 ++#define ST_PRESS_MILLI_CELSIUS_OFFSET 42500UL
3254 ++
3255 + #define ST_PRESS_NUMBER_DATA_CHANNELS 1
3256 +
3257 + /* FULLSCALE */
3258 ++#define ST_PRESS_FS_AVL_1100MB 1100
3259 + #define ST_PRESS_FS_AVL_1260MB 1260
3260 +
3261 + #define ST_PRESS_1_OUT_XL_ADDR 0x28
3262 +@@ -54,9 +60,6 @@
3263 + #define ST_PRESS_LPS331AP_PW_MASK 0x80
3264 + #define ST_PRESS_LPS331AP_FS_ADDR 0x23
3265 + #define ST_PRESS_LPS331AP_FS_MASK 0x30
3266 +-#define ST_PRESS_LPS331AP_FS_AVL_1260_VAL 0x00
3267 +-#define ST_PRESS_LPS331AP_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE
3268 +-#define ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE
3269 + #define ST_PRESS_LPS331AP_BDU_ADDR 0x20
3270 + #define ST_PRESS_LPS331AP_BDU_MASK 0x04
3271 + #define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR 0x22
3272 +@@ -65,9 +68,14 @@
3273 + #define ST_PRESS_LPS331AP_IHL_IRQ_ADDR 0x22
3274 + #define ST_PRESS_LPS331AP_IHL_IRQ_MASK 0x80
3275 + #define ST_PRESS_LPS331AP_MULTIREAD_BIT true
3276 +-#define ST_PRESS_LPS331AP_TEMP_OFFSET 42500
3277 +
3278 + /* CUSTOM VALUES FOR LPS001WP SENSOR */
3279 ++
3280 ++/* LPS001WP pressure resolution */
3281 ++#define ST_PRESS_LPS001WP_LSB_PER_MBAR 16UL
3282 ++/* LPS001WP temperature resolution */
3283 ++#define ST_PRESS_LPS001WP_LSB_PER_CELSIUS 64UL
3284 ++
3285 + #define ST_PRESS_LPS001WP_WAI_EXP 0xba
3286 + #define ST_PRESS_LPS001WP_ODR_ADDR 0x20
3287 + #define ST_PRESS_LPS001WP_ODR_MASK 0x30
3288 +@@ -76,6 +84,8 @@
3289 + #define ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL 0x03
3290 + #define ST_PRESS_LPS001WP_PW_ADDR 0x20
3291 + #define ST_PRESS_LPS001WP_PW_MASK 0x40
3292 ++#define ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN \
3293 ++ (100000000UL / ST_PRESS_LPS001WP_LSB_PER_MBAR)
3294 + #define ST_PRESS_LPS001WP_BDU_ADDR 0x20
3295 + #define ST_PRESS_LPS001WP_BDU_MASK 0x04
3296 + #define ST_PRESS_LPS001WP_MULTIREAD_BIT true
3297 +@@ -92,11 +102,6 @@
3298 + #define ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL 0x04
3299 + #define ST_PRESS_LPS25H_PW_ADDR 0x20
3300 + #define ST_PRESS_LPS25H_PW_MASK 0x80
3301 +-#define ST_PRESS_LPS25H_FS_ADDR 0x00
3302 +-#define ST_PRESS_LPS25H_FS_MASK 0x00
3303 +-#define ST_PRESS_LPS25H_FS_AVL_1260_VAL 0x00
3304 +-#define ST_PRESS_LPS25H_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE
3305 +-#define ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE
3306 + #define ST_PRESS_LPS25H_BDU_ADDR 0x20
3307 + #define ST_PRESS_LPS25H_BDU_MASK 0x04
3308 + #define ST_PRESS_LPS25H_DRDY_IRQ_ADDR 0x23
3309 +@@ -105,7 +110,6 @@
3310 + #define ST_PRESS_LPS25H_IHL_IRQ_ADDR 0x22
3311 + #define ST_PRESS_LPS25H_IHL_IRQ_MASK 0x80
3312 + #define ST_PRESS_LPS25H_MULTIREAD_BIT true
3313 +-#define ST_PRESS_LPS25H_TEMP_OFFSET 42500
3314 + #define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28
3315 + #define ST_TEMP_LPS25H_OUT_L_ADDR 0x2b
3316 +
3317 +@@ -157,7 +161,9 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
3318 + .storagebits = 16,
3319 + .endianness = IIO_LE,
3320 + },
3321 +- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
3322 ++ .info_mask_separate =
3323 ++ BIT(IIO_CHAN_INFO_RAW) |
3324 ++ BIT(IIO_CHAN_INFO_SCALE),
3325 + .modified = 0,
3326 + },
3327 + {
3328 +@@ -173,7 +179,7 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
3329 + },
3330 + .info_mask_separate =
3331 + BIT(IIO_CHAN_INFO_RAW) |
3332 +- BIT(IIO_CHAN_INFO_OFFSET),
3333 ++ BIT(IIO_CHAN_INFO_SCALE),
3334 + .modified = 0,
3335 + },
3336 + IIO_CHAN_SOFT_TIMESTAMP(1)
3337 +@@ -208,11 +214,14 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
3338 + .addr = ST_PRESS_LPS331AP_FS_ADDR,
3339 + .mask = ST_PRESS_LPS331AP_FS_MASK,
3340 + .fs_avl = {
3341 ++ /*
3342 ++ * Pressure and temperature sensitivity values
3343 ++ * as defined in table 3 of LPS331AP datasheet.
3344 ++ */
3345 + [0] = {
3346 + .num = ST_PRESS_FS_AVL_1260MB,
3347 +- .value = ST_PRESS_LPS331AP_FS_AVL_1260_VAL,
3348 +- .gain = ST_PRESS_LPS331AP_FS_AVL_1260_GAIN,
3349 +- .gain2 = ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN,
3350 ++ .gain = ST_PRESS_KPASCAL_NANO_SCALE,
3351 ++ .gain2 = ST_PRESS_LSB_PER_CELSIUS,
3352 + },
3353 + },
3354 + },
3355 +@@ -254,7 +263,17 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
3356 + .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
3357 + },
3358 + .fs = {
3359 +- .addr = 0,
3360 ++ .fs_avl = {
3361 ++ /*
3362 ++ * Pressure and temperature resolution values
3363 ++ * as defined in table 3 of LPS001WP datasheet.
3364 ++ */
3365 ++ [0] = {
3366 ++ .num = ST_PRESS_FS_AVL_1100MB,
3367 ++ .gain = ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN,
3368 ++ .gain2 = ST_PRESS_LPS001WP_LSB_PER_CELSIUS,
3369 ++ },
3370 ++ },
3371 + },
3372 + .bdu = {
3373 + .addr = ST_PRESS_LPS001WP_BDU_ADDR,
3374 +@@ -291,14 +310,15 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
3375 + .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
3376 + },
3377 + .fs = {
3378 +- .addr = ST_PRESS_LPS25H_FS_ADDR,
3379 +- .mask = ST_PRESS_LPS25H_FS_MASK,
3380 + .fs_avl = {
3381 ++ /*
3382 ++ * Pressure and temperature sensitivity values
3383 ++ * as defined in table 3 of LPS25H datasheet.
3384 ++ */
3385 + [0] = {
3386 + .num = ST_PRESS_FS_AVL_1260MB,
3387 +- .value = ST_PRESS_LPS25H_FS_AVL_1260_VAL,
3388 +- .gain = ST_PRESS_LPS25H_FS_AVL_1260_GAIN,
3389 +- .gain2 = ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN,
3390 ++ .gain = ST_PRESS_KPASCAL_NANO_SCALE,
3391 ++ .gain2 = ST_PRESS_LSB_PER_CELSIUS,
3392 + },
3393 + },
3394 + },
3395 +@@ -354,26 +374,26 @@ static int st_press_read_raw(struct iio_dev *indio_dev,
3396 +
3397 + return IIO_VAL_INT;
3398 + case IIO_CHAN_INFO_SCALE:
3399 +- *val = 0;
3400 +-
3401 + switch (ch->type) {
3402 + case IIO_PRESSURE:
3403 ++ *val = 0;
3404 + *val2 = press_data->current_fullscale->gain;
3405 +- break;
3406 ++ return IIO_VAL_INT_PLUS_NANO;
3407 + case IIO_TEMP:
3408 ++ *val = MCELSIUS_PER_CELSIUS;
3409 + *val2 = press_data->current_fullscale->gain2;
3410 +- break;
3411 ++ return IIO_VAL_FRACTIONAL;
3412 + default:
3413 + err = -EINVAL;
3414 + goto read_error;
3415 + }
3416 +
3417 +- return IIO_VAL_INT_PLUS_NANO;
3418 + case IIO_CHAN_INFO_OFFSET:
3419 + switch (ch->type) {
3420 + case IIO_TEMP:
3421 +- *val = 425;
3422 +- *val2 = 10;
3423 ++ *val = ST_PRESS_MILLI_CELSIUS_OFFSET *
3424 ++ press_data->current_fullscale->gain2;
3425 ++ *val2 = MCELSIUS_PER_CELSIUS;
3426 + break;
3427 + default:
3428 + err = -EINVAL;
3429 +diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
3430 +index f4d29d5dbd5f..e2f926cdcad2 100644
3431 +--- a/drivers/iio/proximity/as3935.c
3432 ++++ b/drivers/iio/proximity/as3935.c
3433 +@@ -64,6 +64,7 @@ struct as3935_state {
3434 + struct delayed_work work;
3435 +
3436 + u32 tune_cap;
3437 ++ u8 buffer[16]; /* 8-bit data + 56-bit padding + 64-bit timestamp */
3438 + u8 buf[2] ____cacheline_aligned;
3439 + };
3440 +
3441 +@@ -72,7 +73,8 @@ static const struct iio_chan_spec as3935_channels[] = {
3442 + .type = IIO_PROXIMITY,
3443 + .info_mask_separate =
3444 + BIT(IIO_CHAN_INFO_RAW) |
3445 +- BIT(IIO_CHAN_INFO_PROCESSED),
3446 ++ BIT(IIO_CHAN_INFO_PROCESSED) |
3447 ++ BIT(IIO_CHAN_INFO_SCALE),
3448 + .scan_index = 0,
3449 + .scan_type = {
3450 + .sign = 'u',
3451 +@@ -181,7 +183,12 @@ static int as3935_read_raw(struct iio_dev *indio_dev,
3452 + /* storm out of range */
3453 + if (*val == AS3935_DATA_MASK)
3454 + return -EINVAL;
3455 +- *val *= 1000;
3456 ++
3457 ++ if (m == IIO_CHAN_INFO_PROCESSED)
3458 ++ *val *= 1000;
3459 ++ break;
3460 ++ case IIO_CHAN_INFO_SCALE:
3461 ++ *val = 1000;
3462 + break;
3463 + default:
3464 + return -EINVAL;
3465 +@@ -206,10 +213,10 @@ static irqreturn_t as3935_trigger_handler(int irq, void *private)
3466 + ret = as3935_read(st, AS3935_DATA, &val);
3467 + if (ret)
3468 + goto err_read;
3469 +- val &= AS3935_DATA_MASK;
3470 +- val *= 1000;
3471 +
3472 +- iio_push_to_buffers_with_timestamp(indio_dev, &val, pf->timestamp);
3473 ++ st->buffer[0] = val & AS3935_DATA_MASK;
3474 ++ iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer,
3475 ++ pf->timestamp);
3476 + err_read:
3477 + iio_trigger_notify_done(indio_dev->trig);
3478 +
3479 +diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
3480 +index 1d92e091e22e..c99525512b34 100644
3481 +--- a/drivers/infiniband/core/cm.c
3482 ++++ b/drivers/infiniband/core/cm.c
3483 +@@ -3452,14 +3452,14 @@ static int cm_establish(struct ib_cm_id *cm_id)
3484 + work->cm_event.event = IB_CM_USER_ESTABLISHED;
3485 +
3486 + /* Check if the device started its remove_one */
3487 +- spin_lock_irq(&cm.lock);
3488 ++ spin_lock_irqsave(&cm.lock, flags);
3489 + if (!cm_dev->going_down) {
3490 + queue_delayed_work(cm.wq, &work->work, 0);
3491 + } else {
3492 + kfree(work);
3493 + ret = -ENODEV;
3494 + }
3495 +- spin_unlock_irq(&cm.lock);
3496 ++ spin_unlock_irqrestore(&cm.lock, flags);
3497 +
3498 + out:
3499 + return ret;
3500 +diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
3501 +index 105246fba2e7..5fc623362731 100644
3502 +--- a/drivers/infiniband/hw/mlx4/ah.c
3503 ++++ b/drivers/infiniband/hw/mlx4/ah.c
3504 +@@ -47,6 +47,7 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
3505 +
3506 + ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
3507 + ah->av.ib.g_slid = ah_attr->src_path_bits;
3508 ++ ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
3509 + if (ah_attr->ah_flags & IB_AH_GRH) {
3510 + ah->av.ib.g_slid |= 0x80;
3511 + ah->av.ib.gid_index = ah_attr->grh.sgid_index;
3512 +@@ -64,7 +65,6 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
3513 + !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support))
3514 + --ah->av.ib.stat_rate;
3515 + }
3516 +- ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
3517 +
3518 + return &ah->ibah;
3519 + }
3520 +diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
3521 +index a9e3bcc522c4..a0ecf08b2b86 100644
3522 +--- a/drivers/infiniband/sw/rdmavt/qp.c
3523 ++++ b/drivers/infiniband/sw/rdmavt/qp.c
3524 +@@ -683,8 +683,10 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
3525 + * initialization that is needed.
3526 + */
3527 + priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp);
3528 +- if (!priv)
3529 ++ if (IS_ERR(priv)) {
3530 ++ ret = priv;
3531 + goto bail_qp;
3532 ++ }
3533 + qp->priv = priv;
3534 + qp->timeout_jiffies =
3535 + usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
3536 +diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
3537 +index bf4959f4225b..94f1bf772ec9 100644
3538 +--- a/drivers/iommu/amd_iommu_init.c
3539 ++++ b/drivers/iommu/amd_iommu_init.c
3540 +@@ -1363,13 +1363,23 @@ static int __init amd_iommu_init_pci(void)
3541 + break;
3542 + }
3543 +
3544 ++ /*
3545 ++ * Order is important here to make sure any unity map requirements are
3546 ++ * fulfilled. The unity mappings are created and written to the device
3547 ++ * table during the amd_iommu_init_api() call.
3548 ++ *
3549 ++ * After that we call init_device_table_dma() to make sure any
3550 ++ * uninitialized DTE will block DMA, and in the end we flush the caches
3551 ++ * of all IOMMUs to make sure the changes to the device table are
3552 ++ * active.
3553 ++ */
3554 ++ ret = amd_iommu_init_api();
3555 ++
3556 + init_device_table_dma();
3557 +
3558 + for_each_iommu(iommu)
3559 + iommu_flush_all_caches(iommu);
3560 +
3561 +- ret = amd_iommu_init_api();
3562 +-
3563 + if (!ret)
3564 + print_iommu_info();
3565 +
3566 +diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
3567 +index 4ff73ff64e49..3e20208d6fdb 100644
3568 +--- a/drivers/iommu/arm-smmu-v3.c
3569 ++++ b/drivers/iommu/arm-smmu-v3.c
3570 +@@ -1942,6 +1942,7 @@ static struct iommu_ops arm_smmu_ops = {
3571 + .attach_dev = arm_smmu_attach_dev,
3572 + .map = arm_smmu_map,
3573 + .unmap = arm_smmu_unmap,
3574 ++ .map_sg = default_iommu_map_sg,
3575 + .iova_to_phys = arm_smmu_iova_to_phys,
3576 + .add_device = arm_smmu_add_device,
3577 + .remove_device = arm_smmu_remove_device,
3578 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
3579 +index e1852e845d21..ae364e07840c 100644
3580 +--- a/drivers/iommu/intel-iommu.c
3581 ++++ b/drivers/iommu/intel-iommu.c
3582 +@@ -3169,11 +3169,6 @@ static int __init init_dmars(void)
3583 + }
3584 + }
3585 +
3586 +- iommu_flush_write_buffer(iommu);
3587 +- iommu_set_root_entry(iommu);
3588 +- iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3589 +- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3590 +-
3591 + if (!ecap_pass_through(iommu->ecap))
3592 + hw_pass_through = 0;
3593 + #ifdef CONFIG_INTEL_IOMMU_SVM
3594 +@@ -3182,6 +3177,18 @@ static int __init init_dmars(void)
3595 + #endif
3596 + }
3597 +
3598 ++ /*
3599 ++ * Now that qi is enabled on all iommus, set the root entry and flush
3600 ++ * caches. This is required on some Intel X58 chipsets, otherwise the
3601 ++ * flush_context function will loop forever and the boot hangs.
3602 ++ */
3603 ++ for_each_active_iommu(iommu, drhd) {
3604 ++ iommu_flush_write_buffer(iommu);
3605 ++ iommu_set_root_entry(iommu);
3606 ++ iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3607 ++ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3608 ++ }
3609 ++
3610 + if (iommu_pass_through)
3611 + iommu_identity_mapping |= IDENTMAP_ALL;
3612 +
3613 +diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
3614 +index 5710a06c3049..0ea8d9a24de0 100644
3615 +--- a/drivers/iommu/rockchip-iommu.c
3616 ++++ b/drivers/iommu/rockchip-iommu.c
3617 +@@ -815,7 +815,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
3618 + dte_addr = virt_to_phys(rk_domain->dt);
3619 + for (i = 0; i < iommu->num_mmu; i++) {
3620 + rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr);
3621 +- rk_iommu_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
3622 ++ rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
3623 + rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
3624 + }
3625 +
3626 +diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
3627 +index 4dffccf532a2..40fb1209d512 100644
3628 +--- a/drivers/irqchip/irq-mips-gic.c
3629 ++++ b/drivers/irqchip/irq-mips-gic.c
3630 +@@ -734,6 +734,12 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
3631 + /* verify that it doesn't conflict with an IPI irq */
3632 + if (test_bit(spec->hwirq, ipi_resrv))
3633 + return -EBUSY;
3634 ++
3635 ++ hwirq = GIC_SHARED_TO_HWIRQ(spec->hwirq);
3636 ++
3637 ++ return irq_domain_set_hwirq_and_chip(d, virq, hwirq,
3638 ++ &gic_level_irq_controller,
3639 ++ NULL);
3640 + } else {
3641 + base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs);
3642 + if (base_hwirq == gic_shared_intrs) {
3643 +@@ -855,10 +861,14 @@ static int gic_dev_domain_alloc(struct irq_domain *d, unsigned int virq,
3644 + &gic_level_irq_controller,
3645 + NULL);
3646 + if (ret)
3647 +- return ret;
3648 ++ goto error;
3649 + }
3650 +
3651 + return 0;
3652 ++
3653 ++error:
3654 ++ irq_domain_free_irqs_parent(d, virq, nr_irqs);
3655 ++ return ret;
3656 + }
3657 +
3658 + void gic_dev_domain_free(struct irq_domain *d, unsigned int virq,
3659 +diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
3660 +index d7723ce772b3..12690c1ea8f8 100644
3661 +--- a/drivers/media/usb/uvc/uvc_v4l2.c
3662 ++++ b/drivers/media/usb/uvc/uvc_v4l2.c
3663 +@@ -1408,47 +1408,44 @@ static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp,
3664 + static long uvc_v4l2_compat_ioctl32(struct file *file,
3665 + unsigned int cmd, unsigned long arg)
3666 + {
3667 ++ struct uvc_fh *handle = file->private_data;
3668 + union {
3669 + struct uvc_xu_control_mapping xmap;
3670 + struct uvc_xu_control_query xqry;
3671 + } karg;
3672 + void __user *up = compat_ptr(arg);
3673 +- mm_segment_t old_fs;
3674 + long ret;
3675 +
3676 + switch (cmd) {
3677 + case UVCIOC_CTRL_MAP32:
3678 +- cmd = UVCIOC_CTRL_MAP;
3679 + ret = uvc_v4l2_get_xu_mapping(&karg.xmap, up);
3680 ++ if (ret)
3681 ++ return ret;
3682 ++ ret = uvc_ioctl_ctrl_map(handle->chain, &karg.xmap);
3683 ++ if (ret)
3684 ++ return ret;
3685 ++ ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up);
3686 ++ if (ret)
3687 ++ return ret;
3688 ++
3689 + break;
3690 +
3691 + case UVCIOC_CTRL_QUERY32:
3692 +- cmd = UVCIOC_CTRL_QUERY;
3693 + ret = uvc_v4l2_get_xu_query(&karg.xqry, up);
3694 ++ if (ret)
3695 ++ return ret;
3696 ++ ret = uvc_xu_ctrl_query(handle->chain, &karg.xqry);
3697 ++ if (ret)
3698 ++ return ret;
3699 ++ ret = uvc_v4l2_put_xu_query(&karg.xqry, up);
3700 ++ if (ret)
3701 ++ return ret;
3702 + break;
3703 +
3704 + default:
3705 + return -ENOIOCTLCMD;
3706 + }
3707 +
3708 +- old_fs = get_fs();
3709 +- set_fs(KERNEL_DS);
3710 +- ret = video_ioctl2(file, cmd, (unsigned long)&karg);
3711 +- set_fs(old_fs);
3712 +-
3713 +- if (ret < 0)
3714 +- return ret;
3715 +-
3716 +- switch (cmd) {
3717 +- case UVCIOC_CTRL_MAP:
3718 +- ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up);
3719 +- break;
3720 +-
3721 +- case UVCIOC_CTRL_QUERY:
3722 +- ret = uvc_v4l2_put_xu_query(&karg.xqry, up);
3723 +- break;
3724 +- }
3725 +-
3726 + return ret;
3727 + }
3728 + #endif
3729 +diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
3730 +index 21825ddce4a3..859b4a1d11e4 100644
3731 +--- a/drivers/memory/omap-gpmc.c
3732 ++++ b/drivers/memory/omap-gpmc.c
3733 +@@ -394,7 +394,7 @@ static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p)
3734 + gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
3735 + GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay);
3736 + gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
3737 +- GPMC_CONFIG4_OEEXTRADELAY, p->we_extra_delay);
3738 ++ GPMC_CONFIG4_WEEXTRADELAY, p->we_extra_delay);
3739 + gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
3740 + GPMC_CONFIG6_CYCLE2CYCLESAMECSEN,
3741 + p->cycle2cyclesamecsen);
3742 +diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
3743 +index 96fddb016bf1..4dd0391d2942 100644
3744 +--- a/drivers/mtd/ubi/eba.c
3745 ++++ b/drivers/mtd/ubi/eba.c
3746 +@@ -575,6 +575,7 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
3747 + int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
3748 + struct ubi_volume *vol = ubi->volumes[idx];
3749 + struct ubi_vid_hdr *vid_hdr;
3750 ++ uint32_t crc;
3751 +
3752 + vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
3753 + if (!vid_hdr)
3754 +@@ -599,14 +600,8 @@ retry:
3755 + goto out_put;
3756 + }
3757 +
3758 +- vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
3759 +- err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
3760 +- if (err) {
3761 +- up_read(&ubi->fm_eba_sem);
3762 +- goto write_error;
3763 +- }
3764 ++ ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
3765 +
3766 +- data_size = offset + len;
3767 + mutex_lock(&ubi->buf_mutex);
3768 + memset(ubi->peb_buf + offset, 0xFF, len);
3769 +
3770 +@@ -621,6 +616,19 @@ retry:
3771 +
3772 + memcpy(ubi->peb_buf + offset, buf, len);
3773 +
3774 ++ data_size = offset + len;
3775 ++ crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
3776 ++ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
3777 ++ vid_hdr->copy_flag = 1;
3778 ++ vid_hdr->data_size = cpu_to_be32(data_size);
3779 ++ vid_hdr->data_crc = cpu_to_be32(crc);
3780 ++ err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
3781 ++ if (err) {
3782 ++ mutex_unlock(&ubi->buf_mutex);
3783 ++ up_read(&ubi->fm_eba_sem);
3784 ++ goto write_error;
3785 ++ }
3786 ++
3787 + err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
3788 + if (err) {
3789 + mutex_unlock(&ubi->buf_mutex);
3790 +diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
3791 +index 9fcb4898fb68..c70e51567eed 100644
3792 +--- a/drivers/net/geneve.c
3793 ++++ b/drivers/net/geneve.c
3794 +@@ -1092,12 +1092,17 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
3795 +
3796 + static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict)
3797 + {
3798 ++ struct geneve_dev *geneve = netdev_priv(dev);
3799 + /* The max_mtu calculation does not take account of GENEVE
3800 + * options, to avoid excluding potentially valid
3801 + * configurations.
3802 + */
3803 +- int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - sizeof(struct iphdr)
3804 +- - dev->hard_header_len;
3805 ++ int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - dev->hard_header_len;
3806 ++
3807 ++ if (geneve->remote.sa.sa_family == AF_INET6)
3808 ++ max_mtu -= sizeof(struct ipv6hdr);
3809 ++ else
3810 ++ max_mtu -= sizeof(struct iphdr);
3811 +
3812 + if (new_mtu < 68)
3813 + return -EINVAL;
3814 +diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
3815 +index 9e803bbcc0b6..8f3c55d03d5d 100644
3816 +--- a/drivers/net/macsec.c
3817 ++++ b/drivers/net/macsec.c
3818 +@@ -2564,6 +2564,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
3819 + u64_stats_update_begin(&secy_stats->syncp);
3820 + secy_stats->stats.OutPktsUntagged++;
3821 + u64_stats_update_end(&secy_stats->syncp);
3822 ++ skb->dev = macsec->real_dev;
3823 + len = skb->len;
3824 + ret = dev_queue_xmit(skb);
3825 + count_tx(dev, ret, len);
3826 +diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
3827 +index 2fb31edab125..d4425c565839 100644
3828 +--- a/drivers/net/usb/cdc_ncm.c
3829 ++++ b/drivers/net/usb/cdc_ncm.c
3830 +@@ -852,6 +852,13 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
3831 + if (cdc_ncm_init(dev))
3832 + goto error2;
3833 +
3834 ++ /* Some firmwares need a pause here or they will silently fail
3835 ++ * to set up the interface properly. This value was decided
3836 ++ * empirically on a Sierra Wireless MC7455 running 02.08.02.00
3837 ++ * firmware.
3838 ++ */
3839 ++ usleep_range(10000, 20000);
3840 ++
3841 + /* configure data interface */
3842 + temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
3843 + if (temp) {
3844 +diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
3845 +index e85e0737771c..06664baa43d9 100644
3846 +--- a/drivers/net/wireless/mac80211_hwsim.c
3847 ++++ b/drivers/net/wireless/mac80211_hwsim.c
3848 +@@ -2771,6 +2771,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
3849 + if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
3850 + !info->attrs[HWSIM_ATTR_FLAGS] ||
3851 + !info->attrs[HWSIM_ATTR_COOKIE] ||
3852 ++ !info->attrs[HWSIM_ATTR_SIGNAL] ||
3853 + !info->attrs[HWSIM_ATTR_TX_INFO])
3854 + goto out;
3855 +
3856 +diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
3857 +index 0f48048b8654..3a0faa8fe9d4 100644
3858 +--- a/drivers/net/wireless/realtek/rtlwifi/core.c
3859 ++++ b/drivers/net/wireless/realtek/rtlwifi/core.c
3860 +@@ -54,7 +54,7 @@ EXPORT_SYMBOL(channel5g_80m);
3861 + void rtl_addr_delay(u32 addr)
3862 + {
3863 + if (addr == 0xfe)
3864 +- msleep(50);
3865 ++ mdelay(50);
3866 + else if (addr == 0xfd)
3867 + msleep(5);
3868 + else if (addr == 0xfc)
3869 +@@ -75,7 +75,7 @@ void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
3870 + rtl_addr_delay(addr);
3871 + } else {
3872 + rtl_set_rfreg(hw, rfpath, addr, mask, data);
3873 +- usleep_range(1, 2);
3874 ++ udelay(1);
3875 + }
3876 + }
3877 + EXPORT_SYMBOL(rtl_rfreg_delay);
3878 +@@ -86,7 +86,7 @@ void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data)
3879 + rtl_addr_delay(addr);
3880 + } else {
3881 + rtl_set_bbreg(hw, addr, MASKDWORD, data);
3882 +- usleep_range(1, 2);
3883 ++ udelay(1);
3884 + }
3885 + }
3886 + EXPORT_SYMBOL(rtl_bb_delay);
3887 +diff --git a/drivers/of/irq.c b/drivers/of/irq.c
3888 +index e7bfc175b8e1..6ec743faabe8 100644
3889 +--- a/drivers/of/irq.c
3890 ++++ b/drivers/of/irq.c
3891 +@@ -386,13 +386,13 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
3892 + EXPORT_SYMBOL_GPL(of_irq_to_resource);
3893 +
3894 + /**
3895 +- * of_irq_get - Decode a node's IRQ and return it as a Linux irq number
3896 ++ * of_irq_get - Decode a node's IRQ and return it as a Linux IRQ number
3897 + * @dev: pointer to device tree node
3898 +- * @index: zero-based index of the irq
3899 +- *
3900 +- * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
3901 +- * is not yet created.
3902 ++ * @index: zero-based index of the IRQ
3903 + *
3904 ++ * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or
3905 ++ * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case
3906 ++ * of any other failure.
3907 + */
3908 + int of_irq_get(struct device_node *dev, int index)
3909 + {
3910 +@@ -413,12 +413,13 @@ int of_irq_get(struct device_node *dev, int index)
3911 + EXPORT_SYMBOL_GPL(of_irq_get);
3912 +
3913 + /**
3914 +- * of_irq_get_byname - Decode a node's IRQ and return it as a Linux irq number
3915 ++ * of_irq_get_byname - Decode a node's IRQ and return it as a Linux IRQ number
3916 + * @dev: pointer to device tree node
3917 +- * @name: irq name
3918 ++ * @name: IRQ name
3919 + *
3920 +- * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
3921 +- * is not yet created, or error code in case of any other failure.
3922 ++ * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or
3923 ++ * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case
3924 ++ * of any other failure.
3925 + */
3926 + int of_irq_get_byname(struct device_node *dev, const char *name)
3927 + {
3928 +diff --git a/drivers/pci/vc.c b/drivers/pci/vc.c
3929 +index dfbab61a1b47..1fa3a3219c45 100644
3930 +--- a/drivers/pci/vc.c
3931 ++++ b/drivers/pci/vc.c
3932 +@@ -221,9 +221,9 @@ static int pci_vc_do_save_buffer(struct pci_dev *dev, int pos,
3933 + else
3934 + pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL,
3935 + *(u16 *)buf);
3936 +- buf += 2;
3937 ++ buf += 4;
3938 + }
3939 +- len += 2;
3940 ++ len += 4;
3941 +
3942 + /*
3943 + * If we have any Low Priority VCs and a VC Arbitration Table Offset
3944 +diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
3945 +index 56a17ec5b5ef..6c7fe4778793 100644
3946 +--- a/drivers/regulator/qcom_smd-regulator.c
3947 ++++ b/drivers/regulator/qcom_smd-regulator.c
3948 +@@ -140,6 +140,18 @@ static const struct regulator_ops rpm_smps_ldo_ops = {
3949 + .enable = rpm_reg_enable,
3950 + .disable = rpm_reg_disable,
3951 + .is_enabled = rpm_reg_is_enabled,
3952 ++ .list_voltage = regulator_list_voltage_linear_range,
3953 ++
3954 ++ .get_voltage = rpm_reg_get_voltage,
3955 ++ .set_voltage = rpm_reg_set_voltage,
3956 ++
3957 ++ .set_load = rpm_reg_set_load,
3958 ++};
3959 ++
3960 ++static const struct regulator_ops rpm_smps_ldo_ops_fixed = {
3961 ++ .enable = rpm_reg_enable,
3962 ++ .disable = rpm_reg_disable,
3963 ++ .is_enabled = rpm_reg_is_enabled,
3964 +
3965 + .get_voltage = rpm_reg_get_voltage,
3966 + .set_voltage = rpm_reg_set_voltage,
3967 +@@ -247,7 +259,7 @@ static const struct regulator_desc pm8941_nldo = {
3968 + static const struct regulator_desc pm8941_lnldo = {
3969 + .fixed_uV = 1740000,
3970 + .n_voltages = 1,
3971 +- .ops = &rpm_smps_ldo_ops,
3972 ++ .ops = &rpm_smps_ldo_ops_fixed,
3973 + };
3974 +
3975 + static const struct regulator_desc pm8941_switch = {
3976 +diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
3977 +index d4c285688ce9..3ddc85e6efd6 100644
3978 +--- a/drivers/scsi/53c700.c
3979 ++++ b/drivers/scsi/53c700.c
3980 +@@ -1122,7 +1122,7 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
3981 + } else {
3982 + struct scsi_cmnd *SCp;
3983 +
3984 +- SCp = scsi_host_find_tag(SDp->host, SCSI_NO_TAG);
3985 ++ SCp = SDp->current_cmnd;
3986 + if(unlikely(SCp == NULL)) {
3987 + sdev_printk(KERN_ERR, SDp,
3988 + "no saved request for untagged cmd\n");
3989 +@@ -1826,7 +1826,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
3990 + slot->tag, slot);
3991 + } else {
3992 + slot->tag = SCSI_NO_TAG;
3993 +- /* must populate current_cmnd for scsi_host_find_tag to work */
3994 ++ /* save current command for reselection */
3995 + SCp->device->current_cmnd = SCp;
3996 + }
3997 + /* sanity check: some of the commands generated by the mid-layer
3998 +diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
3999 +index 984ddcb4786d..1b9c049bd5c5 100644
4000 +--- a/drivers/scsi/scsi_error.c
4001 ++++ b/drivers/scsi/scsi_error.c
4002 +@@ -1127,7 +1127,6 @@ static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn)
4003 + */
4004 + void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
4005 + {
4006 +- scmd->device->host->host_failed--;
4007 + scmd->eh_eflags = 0;
4008 + list_move_tail(&scmd->eh_entry, done_q);
4009 + }
4010 +@@ -2226,6 +2225,9 @@ int scsi_error_handler(void *data)
4011 + else
4012 + scsi_unjam_host(shost);
4013 +
4014 ++ /* All scmds have been handled */
4015 ++ shost->host_failed = 0;
4016 ++
4017 + /*
4018 + * Note - if the above fails completely, the action is to take
4019 + * individual devices offline and flush the queue of any
4020 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
4021 +index f52b74cf8d1e..41c3a2c4f112 100644
4022 +--- a/drivers/scsi/sd.c
4023 ++++ b/drivers/scsi/sd.c
4024 +@@ -2862,10 +2862,10 @@ static int sd_revalidate_disk(struct gendisk *disk)
4025 + if (sdkp->opt_xfer_blocks &&
4026 + sdkp->opt_xfer_blocks <= dev_max &&
4027 + sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
4028 +- sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE)
4029 +- rw_max = q->limits.io_opt =
4030 +- sdkp->opt_xfer_blocks * sdp->sector_size;
4031 +- else
4032 ++ logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_SIZE) {
4033 ++ q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
4034 ++ rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
4035 ++ } else
4036 + rw_max = BLK_DEF_MAX_SECTORS;
4037 +
4038 + /* Combine with controller limits */
4039 +diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
4040 +index 654630bb7d0e..765a6f1ac1b7 100644
4041 +--- a/drivers/scsi/sd.h
4042 ++++ b/drivers/scsi/sd.h
4043 +@@ -151,6 +151,11 @@ static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blo
4044 + return blocks << (ilog2(sdev->sector_size) - 9);
4045 + }
4046 +
4047 ++static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t blocks)
4048 ++{
4049 ++ return blocks * sdev->sector_size;
4050 ++}
4051 ++
4052 + /*
4053 + * A DIF-capable target device can be formatted with different
4054 + * protection schemes. Currently 0 through 3 are defined:
4055 +diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
4056 +index a8f533af9eca..ec12181822e6 100644
4057 +--- a/drivers/staging/iio/accel/sca3000_core.c
4058 ++++ b/drivers/staging/iio/accel/sca3000_core.c
4059 +@@ -594,7 +594,7 @@ static ssize_t sca3000_read_frequency(struct device *dev,
4060 + goto error_ret_mut;
4061 + ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
4062 + mutex_unlock(&st->lock);
4063 +- if (ret)
4064 ++ if (ret < 0)
4065 + goto error_ret;
4066 + val = ret;
4067 + if (base_freq > 0)
4068 +diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
4069 +index 6ceac4f2d4b2..5b4b47ed948b 100644
4070 +--- a/drivers/thermal/cpu_cooling.c
4071 ++++ b/drivers/thermal/cpu_cooling.c
4072 +@@ -857,14 +857,6 @@ __cpufreq_cooling_register(struct device_node *np,
4073 + goto free_power_table;
4074 + }
4075 +
4076 +- snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
4077 +- cpufreq_dev->id);
4078 +-
4079 +- cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
4080 +- &cpufreq_cooling_ops);
4081 +- if (IS_ERR(cool_dev))
4082 +- goto remove_idr;
4083 +-
4084 + /* Fill freq-table in descending order of frequencies */
4085 + for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) {
4086 + freq = find_next_max(table, freq);
4087 +@@ -877,6 +869,14 @@ __cpufreq_cooling_register(struct device_node *np,
4088 + pr_debug("%s: freq:%u KHz\n", __func__, freq);
4089 + }
4090 +
4091 ++ snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
4092 ++ cpufreq_dev->id);
4093 ++
4094 ++ cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
4095 ++ &cpufreq_cooling_ops);
4096 ++ if (IS_ERR(cool_dev))
4097 ++ goto remove_idr;
4098 ++
4099 + cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
4100 + cpufreq_dev->cool_dev = cool_dev;
4101 +
4102 +diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
4103 +index f973bfce5d08..1e93a37e27f0 100644
4104 +--- a/drivers/tty/vt/keyboard.c
4105 ++++ b/drivers/tty/vt/keyboard.c
4106 +@@ -366,34 +366,22 @@ static void to_utf8(struct vc_data *vc, uint c)
4107 +
4108 + static void do_compute_shiftstate(void)
4109 + {
4110 +- unsigned int i, j, k, sym, val;
4111 ++ unsigned int k, sym, val;
4112 +
4113 + shift_state = 0;
4114 + memset(shift_down, 0, sizeof(shift_down));
4115 +
4116 +- for (i = 0; i < ARRAY_SIZE(key_down); i++) {
4117 +-
4118 +- if (!key_down[i])
4119 ++ for_each_set_bit(k, key_down, min(NR_KEYS, KEY_CNT)) {
4120 ++ sym = U(key_maps[0][k]);
4121 ++ if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
4122 + continue;
4123 +
4124 +- k = i * BITS_PER_LONG;
4125 +-
4126 +- for (j = 0; j < BITS_PER_LONG; j++, k++) {
4127 +-
4128 +- if (!test_bit(k, key_down))
4129 +- continue;
4130 ++ val = KVAL(sym);
4131 ++ if (val == KVAL(K_CAPSSHIFT))
4132 ++ val = KVAL(K_SHIFT);
4133 +
4134 +- sym = U(key_maps[0][k]);
4135 +- if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
4136 +- continue;
4137 +-
4138 +- val = KVAL(sym);
4139 +- if (val == KVAL(K_CAPSSHIFT))
4140 +- val = KVAL(K_SHIFT);
4141 +-
4142 +- shift_down[val]++;
4143 +- shift_state |= (1 << val);
4144 +- }
4145 ++ shift_down[val]++;
4146 ++ shift_state |= BIT(val);
4147 + }
4148 + }
4149 +
4150 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
4151 +index bd523adb9643..e9e29ded3f30 100644
4152 +--- a/drivers/tty/vt/vt.c
4153 ++++ b/drivers/tty/vt/vt.c
4154 +@@ -750,6 +750,7 @@ static void visual_init(struct vc_data *vc, int num, int init)
4155 + vc->vc_complement_mask = 0;
4156 + vc->vc_can_do_color = 0;
4157 + vc->vc_panic_force_write = false;
4158 ++ vc->vc_cur_blink_ms = DEFAULT_CURSOR_BLINK_MS;
4159 + vc->vc_sw->con_init(vc, init);
4160 + if (!vc->vc_complement_mask)
4161 + vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800;
4162 +diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c
4163 +index 504708f59b93..6c6040c22c7a 100644
4164 +--- a/drivers/usb/common/usb-otg-fsm.c
4165 ++++ b/drivers/usb/common/usb-otg-fsm.c
4166 +@@ -21,6 +21,7 @@
4167 + * 675 Mass Ave, Cambridge, MA 02139, USA.
4168 + */
4169 +
4170 ++#include <linux/module.h>
4171 + #include <linux/kernel.h>
4172 + #include <linux/types.h>
4173 + #include <linux/mutex.h>
4174 +@@ -452,3 +453,4 @@ int otg_statemachine(struct otg_fsm *fsm)
4175 + return state_changed;
4176 + }
4177 + EXPORT_SYMBOL_GPL(otg_statemachine);
4178 ++MODULE_LICENSE("GPL");
4179 +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
4180 +index 980fc5774151..2d107d0f61b0 100644
4181 +--- a/drivers/usb/core/hcd.c
4182 ++++ b/drivers/usb/core/hcd.c
4183 +@@ -2597,26 +2597,23 @@ EXPORT_SYMBOL_GPL(usb_create_hcd);
4184 + * Don't deallocate the bandwidth_mutex until the last shared usb_hcd is
4185 + * deallocated.
4186 + *
4187 +- * Make sure to only deallocate the bandwidth_mutex when the primary HCD is
4188 +- * freed. When hcd_release() is called for either hcd in a peer set
4189 +- * invalidate the peer's ->shared_hcd and ->primary_hcd pointers to
4190 +- * block new peering attempts
4191 ++ * Make sure to deallocate the bandwidth_mutex only when the last HCD is
4192 ++ * freed. When hcd_release() is called for either hcd in a peer set,
4193 ++ * invalidate the peer's ->shared_hcd and ->primary_hcd pointers.
4194 + */
4195 + static void hcd_release(struct kref *kref)
4196 + {
4197 + struct usb_hcd *hcd = container_of (kref, struct usb_hcd, kref);
4198 +
4199 + mutex_lock(&usb_port_peer_mutex);
4200 +- if (usb_hcd_is_primary_hcd(hcd)) {
4201 +- kfree(hcd->address0_mutex);
4202 +- kfree(hcd->bandwidth_mutex);
4203 +- }
4204 + if (hcd->shared_hcd) {
4205 + struct usb_hcd *peer = hcd->shared_hcd;
4206 +
4207 + peer->shared_hcd = NULL;
4208 +- if (peer->primary_hcd == hcd)
4209 +- peer->primary_hcd = NULL;
4210 ++ peer->primary_hcd = NULL;
4211 ++ } else {
4212 ++ kfree(hcd->address0_mutex);
4213 ++ kfree(hcd->bandwidth_mutex);
4214 + }
4215 + mutex_unlock(&usb_port_peer_mutex);
4216 + kfree(hcd);
4217 +diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
4218 +index 3c58d633ce80..dec0b21fc626 100644
4219 +--- a/drivers/usb/dwc2/core.h
4220 ++++ b/drivers/usb/dwc2/core.h
4221 +@@ -64,6 +64,17 @@
4222 + DWC2_TRACE_SCHEDULER_VB(pr_fmt("%s: SCH: " fmt), \
4223 + dev_name(hsotg->dev), ##__VA_ARGS__)
4224 +
4225 ++#ifdef CONFIG_MIPS
4226 ++/*
4227 ++ * There are some MIPS machines that can run in either big-endian
4228 ++ * or little-endian mode and that use the dwc2 register without
4229 ++ * a byteswap in both ways.
4230 ++ * Unlike other architectures, MIPS apparently does not require a
4231 ++ * barrier before the __raw_writel() to synchronize with DMA but does
4232 ++ * require the barrier after the __raw_writel() to serialize a set of
4233 ++ * writes. This set of operations was added specifically for MIPS and
4234 ++ * should only be used there.
4235 ++ */
4236 + static inline u32 dwc2_readl(const void __iomem *addr)
4237 + {
4238 + u32 value = __raw_readl(addr);
4239 +@@ -90,6 +101,22 @@ static inline void dwc2_writel(u32 value, void __iomem *addr)
4240 + pr_info("INFO:: wrote %08x to %p\n", value, addr);
4241 + #endif
4242 + }
4243 ++#else
4244 ++/* Normal architectures just use readl/write */
4245 ++static inline u32 dwc2_readl(const void __iomem *addr)
4246 ++{
4247 ++ return readl(addr);
4248 ++}
4249 ++
4250 ++static inline void dwc2_writel(u32 value, void __iomem *addr)
4251 ++{
4252 ++ writel(value, addr);
4253 ++
4254 ++#ifdef DWC2_LOG_WRITES
4255 ++ pr_info("info:: wrote %08x to %p\n", value, addr);
4256 ++#endif
4257 ++}
4258 ++#endif
4259 +
4260 + /* Maximum number of Endpoints/HostChannels */
4261 + #define MAX_EPS_CHANNELS 16
4262 +diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
4263 +index 7b6d74f0c72f..476c0e3a7150 100644
4264 +--- a/drivers/virtio/virtio_balloon.c
4265 ++++ b/drivers/virtio/virtio_balloon.c
4266 +@@ -75,7 +75,7 @@ struct virtio_balloon {
4267 +
4268 + /* The array of pfns we tell the Host about. */
4269 + unsigned int num_pfns;
4270 +- u32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX];
4271 ++ __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX];
4272 +
4273 + /* Memory statistics */
4274 + struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
4275 +@@ -127,14 +127,16 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
4276 +
4277 + }
4278 +
4279 +-static void set_page_pfns(u32 pfns[], struct page *page)
4280 ++static void set_page_pfns(struct virtio_balloon *vb,
4281 ++ __virtio32 pfns[], struct page *page)
4282 + {
4283 + unsigned int i;
4284 +
4285 + /* Set balloon pfns pointing at this page.
4286 + * Note that the first pfn points at start of the page. */
4287 + for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++)
4288 +- pfns[i] = page_to_balloon_pfn(page) + i;
4289 ++ pfns[i] = cpu_to_virtio32(vb->vdev,
4290 ++ page_to_balloon_pfn(page) + i);
4291 + }
4292 +
4293 + static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
4294 +@@ -158,7 +160,7 @@ static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
4295 + msleep(200);
4296 + break;
4297 + }
4298 +- set_page_pfns(vb->pfns + vb->num_pfns, page);
4299 ++ set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
4300 + vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
4301 + if (!virtio_has_feature(vb->vdev,
4302 + VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
4303 +@@ -177,10 +179,12 @@ static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
4304 + static void release_pages_balloon(struct virtio_balloon *vb)
4305 + {
4306 + unsigned int i;
4307 ++ struct page *page;
4308 +
4309 + /* Find pfns pointing at start of each page, get pages and free them. */
4310 + for (i = 0; i < vb->num_pfns; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
4311 +- struct page *page = balloon_pfn_to_page(vb->pfns[i]);
4312 ++ page = balloon_pfn_to_page(virtio32_to_cpu(vb->vdev,
4313 ++ vb->pfns[i]));
4314 + if (!virtio_has_feature(vb->vdev,
4315 + VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
4316 + adjust_managed_page_count(page, 1);
4317 +@@ -203,7 +207,7 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
4318 + page = balloon_page_dequeue(vb_dev_info);
4319 + if (!page)
4320 + break;
4321 +- set_page_pfns(vb->pfns + vb->num_pfns, page);
4322 ++ set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
4323 + vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE;
4324 + }
4325 +
4326 +@@ -471,13 +475,13 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
4327 + __count_vm_event(BALLOON_MIGRATE);
4328 + spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
4329 + vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
4330 +- set_page_pfns(vb->pfns, newpage);
4331 ++ set_page_pfns(vb, vb->pfns, newpage);
4332 + tell_host(vb, vb->inflate_vq);
4333 +
4334 + /* balloon's page migration 2nd step -- deflate "page" */
4335 + balloon_page_delete(page);
4336 + vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
4337 +- set_page_pfns(vb->pfns, page);
4338 ++ set_page_pfns(vb, vb->pfns, page);
4339 + tell_host(vb, vb->deflate_vq);
4340 +
4341 + mutex_unlock(&vb->balloon_lock);
4342 +diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
4343 +index d46839f51e73..e4db19e88ab1 100644
4344 +--- a/drivers/xen/balloon.c
4345 ++++ b/drivers/xen/balloon.c
4346 +@@ -151,8 +151,6 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
4347 + static void balloon_process(struct work_struct *work);
4348 + static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
4349 +
4350 +-static void release_memory_resource(struct resource *resource);
4351 +-
4352 + /* When ballooning out (allocating memory to return to Xen) we don't really
4353 + want the kernel to try too hard since that can trigger the oom killer. */
4354 + #define GFP_BALLOON \
4355 +@@ -248,6 +246,19 @@ static enum bp_state update_schedule(enum bp_state state)
4356 + }
4357 +
4358 + #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
4359 ++static void release_memory_resource(struct resource *resource)
4360 ++{
4361 ++ if (!resource)
4362 ++ return;
4363 ++
4364 ++ /*
4365 ++ * No need to reset region to identity mapped since we now
4366 ++ * know that no I/O can be in this region
4367 ++ */
4368 ++ release_resource(resource);
4369 ++ kfree(resource);
4370 ++}
4371 ++
4372 + static struct resource *additional_memory_resource(phys_addr_t size)
4373 + {
4374 + struct resource *res;
4375 +@@ -286,19 +297,6 @@ static struct resource *additional_memory_resource(phys_addr_t size)
4376 + return res;
4377 + }
4378 +
4379 +-static void release_memory_resource(struct resource *resource)
4380 +-{
4381 +- if (!resource)
4382 +- return;
4383 +-
4384 +- /*
4385 +- * No need to reset region to identity mapped since we now
4386 +- * know that no I/O can be in this region
4387 +- */
4388 +- release_resource(resource);
4389 +- kfree(resource);
4390 +-}
4391 +-
4392 + static enum bp_state reserve_additional_memory(void)
4393 + {
4394 + long credit;
4395 +diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
4396 +index 076970a54f89..4ce10bcca18b 100644
4397 +--- a/drivers/xen/xen-acpi-processor.c
4398 ++++ b/drivers/xen/xen-acpi-processor.c
4399 +@@ -423,36 +423,7 @@ upload:
4400 +
4401 + return 0;
4402 + }
4403 +-static int __init check_prereq(void)
4404 +-{
4405 +- struct cpuinfo_x86 *c = &cpu_data(0);
4406 +-
4407 +- if (!xen_initial_domain())
4408 +- return -ENODEV;
4409 +-
4410 +- if (!acpi_gbl_FADT.smi_command)
4411 +- return -ENODEV;
4412 +-
4413 +- if (c->x86_vendor == X86_VENDOR_INTEL) {
4414 +- if (!cpu_has(c, X86_FEATURE_EST))
4415 +- return -ENODEV;
4416 +
4417 +- return 0;
4418 +- }
4419 +- if (c->x86_vendor == X86_VENDOR_AMD) {
4420 +- /* Copied from powernow-k8.h, can't include ../cpufreq/powernow
4421 +- * as we get compile warnings for the static functions.
4422 +- */
4423 +-#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007
4424 +-#define USE_HW_PSTATE 0x00000080
4425 +- u32 eax, ebx, ecx, edx;
4426 +- cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
4427 +- if ((edx & USE_HW_PSTATE) != USE_HW_PSTATE)
4428 +- return -ENODEV;
4429 +- return 0;
4430 +- }
4431 +- return -ENODEV;
4432 +-}
4433 + /* acpi_perf_data is a pointer to percpu data. */
4434 + static struct acpi_processor_performance __percpu *acpi_perf_data;
4435 +
4436 +@@ -509,10 +480,10 @@ struct notifier_block xen_acpi_processor_resume_nb = {
4437 + static int __init xen_acpi_processor_init(void)
4438 + {
4439 + unsigned int i;
4440 +- int rc = check_prereq();
4441 ++ int rc;
4442 +
4443 +- if (rc)
4444 +- return rc;
4445 ++ if (!xen_initial_domain())
4446 ++ return -ENODEV;
4447 +
4448 + nr_acpi_bits = get_max_acpi_id() + 1;
4449 + acpi_ids_done = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL);
4450 +diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
4451 +index ec7928a27aaa..234707cc419c 100644
4452 +--- a/fs/btrfs/ctree.c
4453 ++++ b/fs/btrfs/ctree.c
4454 +@@ -1552,6 +1552,7 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
4455 + trans->transid, root->fs_info->generation);
4456 +
4457 + if (!should_cow_block(trans, root, buf)) {
4458 ++ trans->dirty = true;
4459 + *cow_ret = buf;
4460 + return 0;
4461 + }
4462 +@@ -2773,8 +2774,10 @@ again:
4463 + * then we don't want to set the path blocking,
4464 + * so we test it here
4465 + */
4466 +- if (!should_cow_block(trans, root, b))
4467 ++ if (!should_cow_block(trans, root, b)) {
4468 ++ trans->dirty = true;
4469 + goto cow_done;
4470 ++ }
4471 +
4472 + /*
4473 + * must have write locks on this node and the
4474 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
4475 +index 84e060eb0de8..78f1b57d0b46 100644
4476 +--- a/fs/btrfs/extent-tree.c
4477 ++++ b/fs/btrfs/extent-tree.c
4478 +@@ -7929,7 +7929,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4479 + set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
4480 + buf->start + buf->len - 1, GFP_NOFS);
4481 + }
4482 +- trans->blocks_used++;
4483 ++ trans->dirty = true;
4484 + /* this returns a buffer locked for blocking */
4485 + return buf;
4486 + }
4487 +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
4488 +index 00b8f37cc306..d7c138f42bdf 100644
4489 +--- a/fs/btrfs/super.c
4490 ++++ b/fs/btrfs/super.c
4491 +@@ -239,7 +239,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
4492 + trans->aborted = errno;
4493 + /* Nothing used. The other threads that have joined this
4494 + * transaction may be able to continue. */
4495 +- if (!trans->blocks_used && list_empty(&trans->new_bgs)) {
4496 ++ if (!trans->dirty && list_empty(&trans->new_bgs)) {
4497 + const char *errstr;
4498 +
4499 + errstr = btrfs_decode_error(errno);
4500 +diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
4501 +index 72be51f7ca2f..c0b501a5a353 100644
4502 +--- a/fs/btrfs/transaction.h
4503 ++++ b/fs/btrfs/transaction.h
4504 +@@ -110,7 +110,6 @@ struct btrfs_trans_handle {
4505 + u64 chunk_bytes_reserved;
4506 + unsigned long use_count;
4507 + unsigned long blocks_reserved;
4508 +- unsigned long blocks_used;
4509 + unsigned long delayed_ref_updates;
4510 + struct btrfs_transaction *transaction;
4511 + struct btrfs_block_rsv *block_rsv;
4512 +@@ -121,6 +120,7 @@ struct btrfs_trans_handle {
4513 + bool can_flush_pending_bgs;
4514 + bool reloc_reserved;
4515 + bool sync;
4516 ++ bool dirty;
4517 + unsigned int type;
4518 + /*
4519 + * this root is only needed to validate that the root passed to
4520 +diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
4521 +index 5a53ac6b1e02..02b071bf3732 100644
4522 +--- a/fs/cifs/cifs_unicode.c
4523 ++++ b/fs/cifs/cifs_unicode.c
4524 +@@ -101,6 +101,12 @@ convert_sfm_char(const __u16 src_char, char *target)
4525 + case SFM_SLASH:
4526 + *target = '\\';
4527 + break;
4528 ++ case SFM_SPACE:
4529 ++ *target = ' ';
4530 ++ break;
4531 ++ case SFM_PERIOD:
4532 ++ *target = '.';
4533 ++ break;
4534 + default:
4535 + return false;
4536 + }
4537 +@@ -404,7 +410,7 @@ static __le16 convert_to_sfu_char(char src_char)
4538 + return dest_char;
4539 + }
4540 +
4541 +-static __le16 convert_to_sfm_char(char src_char)
4542 ++static __le16 convert_to_sfm_char(char src_char, bool end_of_string)
4543 + {
4544 + __le16 dest_char;
4545 +
4546 +@@ -427,6 +433,18 @@ static __le16 convert_to_sfm_char(char src_char)
4547 + case '|':
4548 + dest_char = cpu_to_le16(SFM_PIPE);
4549 + break;
4550 ++ case '.':
4551 ++ if (end_of_string)
4552 ++ dest_char = cpu_to_le16(SFM_PERIOD);
4553 ++ else
4554 ++ dest_char = 0;
4555 ++ break;
4556 ++ case ' ':
4557 ++ if (end_of_string)
4558 ++ dest_char = cpu_to_le16(SFM_SPACE);
4559 ++ else
4560 ++ dest_char = 0;
4561 ++ break;
4562 + default:
4563 + dest_char = 0;
4564 + }
4565 +@@ -469,9 +487,16 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
4566 + /* see if we must remap this char */
4567 + if (map_chars == SFU_MAP_UNI_RSVD)
4568 + dst_char = convert_to_sfu_char(src_char);
4569 +- else if (map_chars == SFM_MAP_UNI_RSVD)
4570 +- dst_char = convert_to_sfm_char(src_char);
4571 +- else
4572 ++ else if (map_chars == SFM_MAP_UNI_RSVD) {
4573 ++ bool end_of_string;
4574 ++
4575 ++ if (i == srclen - 1)
4576 ++ end_of_string = true;
4577 ++ else
4578 ++ end_of_string = false;
4579 ++
4580 ++ dst_char = convert_to_sfm_char(src_char, end_of_string);
4581 ++ } else
4582 + dst_char = 0;
4583 + /*
4584 + * FIXME: We can not handle remapping backslash (UNI_SLASH)
4585 +diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
4586 +index bdc52cb9a676..479bc0a941f3 100644
4587 +--- a/fs/cifs/cifs_unicode.h
4588 ++++ b/fs/cifs/cifs_unicode.h
4589 +@@ -64,6 +64,8 @@
4590 + #define SFM_LESSTHAN ((__u16) 0xF023)
4591 + #define SFM_PIPE ((__u16) 0xF027)
4592 + #define SFM_SLASH ((__u16) 0xF026)
4593 ++#define SFM_PERIOD ((__u16) 0xF028)
4594 ++#define SFM_SPACE ((__u16) 0xF029)
4595 +
4596 + /*
4597 + * Mapping mechanism to use when one of the seven reserved characters is
4598 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
4599 +index 6f62ac821a84..34cbc582e8d7 100644
4600 +--- a/fs/cifs/connect.c
4601 ++++ b/fs/cifs/connect.c
4602 +@@ -428,7 +428,9 @@ cifs_echo_request(struct work_struct *work)
4603 + * server->ops->need_neg() == true. Also, no need to ping if
4604 + * we got a response recently.
4605 + */
4606 +- if (!server->ops->need_neg || server->ops->need_neg(server) ||
4607 ++
4608 ++ if (server->tcpStatus == CifsNeedReconnect ||
4609 ++ server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew ||
4610 + (server->ops->can_echo && !server->ops->can_echo(server)) ||
4611 + time_before(jiffies, server->lstrp + echo_interval - HZ))
4612 + goto requeue_echo;
4613 +diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
4614 +index 848249fa120f..3079b38f0afb 100644
4615 +--- a/fs/cifs/ntlmssp.h
4616 ++++ b/fs/cifs/ntlmssp.h
4617 +@@ -133,6 +133,6 @@ typedef struct _AUTHENTICATE_MESSAGE {
4618 +
4619 + int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, struct cifs_ses *ses);
4620 + void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, struct cifs_ses *ses);
4621 +-int build_ntlmssp_auth_blob(unsigned char *pbuffer, u16 *buflen,
4622 ++int build_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen,
4623 + struct cifs_ses *ses,
4624 + const struct nls_table *nls_cp);
4625 +diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
4626 +index af0ec2d5ad0e..e88ffe1da045 100644
4627 +--- a/fs/cifs/sess.c
4628 ++++ b/fs/cifs/sess.c
4629 +@@ -364,19 +364,43 @@ void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
4630 + sec_blob->DomainName.MaximumLength = 0;
4631 + }
4632 +
4633 +-/* We do not malloc the blob, it is passed in pbuffer, because its
4634 +- maximum possible size is fixed and small, making this approach cleaner.
4635 +- This function returns the length of the data in the blob */
4636 +-int build_ntlmssp_auth_blob(unsigned char *pbuffer,
4637 ++static int size_of_ntlmssp_blob(struct cifs_ses *ses)
4638 ++{
4639 ++ int sz = sizeof(AUTHENTICATE_MESSAGE) + ses->auth_key.len
4640 ++ - CIFS_SESS_KEY_SIZE + CIFS_CPHTXT_SIZE + 2;
4641 ++
4642 ++ if (ses->domainName)
4643 ++ sz += 2 * strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
4644 ++ else
4645 ++ sz += 2;
4646 ++
4647 ++ if (ses->user_name)
4648 ++ sz += 2 * strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN);
4649 ++ else
4650 ++ sz += 2;
4651 ++
4652 ++ return sz;
4653 ++}
4654 ++
4655 ++int build_ntlmssp_auth_blob(unsigned char **pbuffer,
4656 + u16 *buflen,
4657 + struct cifs_ses *ses,
4658 + const struct nls_table *nls_cp)
4659 + {
4660 + int rc;
4661 +- AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer;
4662 ++ AUTHENTICATE_MESSAGE *sec_blob;
4663 + __u32 flags;
4664 + unsigned char *tmp;
4665 +
4666 ++ rc = setup_ntlmv2_rsp(ses, nls_cp);
4667 ++ if (rc) {
4668 ++ cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
4669 ++ *buflen = 0;
4670 ++ goto setup_ntlmv2_ret;
4671 ++ }
4672 ++ *pbuffer = kmalloc(size_of_ntlmssp_blob(ses), GFP_KERNEL);
4673 ++ sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer;
4674 ++
4675 + memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
4676 + sec_blob->MessageType = NtLmAuthenticate;
4677 +
4678 +@@ -391,7 +415,7 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
4679 + flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
4680 + }
4681 +
4682 +- tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE);
4683 ++ tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
4684 + sec_blob->NegotiateFlags = cpu_to_le32(flags);
4685 +
4686 + sec_blob->LmChallengeResponse.BufferOffset =
4687 +@@ -399,13 +423,9 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
4688 + sec_blob->LmChallengeResponse.Length = 0;
4689 + sec_blob->LmChallengeResponse.MaximumLength = 0;
4690 +
4691 +- sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
4692 ++ sec_blob->NtChallengeResponse.BufferOffset =
4693 ++ cpu_to_le32(tmp - *pbuffer);
4694 + if (ses->user_name != NULL) {
4695 +- rc = setup_ntlmv2_rsp(ses, nls_cp);
4696 +- if (rc) {
4697 +- cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
4698 +- goto setup_ntlmv2_ret;
4699 +- }
4700 + memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
4701 + ses->auth_key.len - CIFS_SESS_KEY_SIZE);
4702 + tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
4703 +@@ -423,7 +443,7 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
4704 + }
4705 +
4706 + if (ses->domainName == NULL) {
4707 +- sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
4708 ++ sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
4709 + sec_blob->DomainName.Length = 0;
4710 + sec_blob->DomainName.MaximumLength = 0;
4711 + tmp += 2;
4712 +@@ -432,14 +452,14 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
4713 + len = cifs_strtoUTF16((__le16 *)tmp, ses->domainName,
4714 + CIFS_MAX_USERNAME_LEN, nls_cp);
4715 + len *= 2; /* unicode is 2 bytes each */
4716 +- sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
4717 ++ sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
4718 + sec_blob->DomainName.Length = cpu_to_le16(len);
4719 + sec_blob->DomainName.MaximumLength = cpu_to_le16(len);
4720 + tmp += len;
4721 + }
4722 +
4723 + if (ses->user_name == NULL) {
4724 +- sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
4725 ++ sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
4726 + sec_blob->UserName.Length = 0;
4727 + sec_blob->UserName.MaximumLength = 0;
4728 + tmp += 2;
4729 +@@ -448,13 +468,13 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
4730 + len = cifs_strtoUTF16((__le16 *)tmp, ses->user_name,
4731 + CIFS_MAX_USERNAME_LEN, nls_cp);
4732 + len *= 2; /* unicode is 2 bytes each */
4733 +- sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
4734 ++ sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
4735 + sec_blob->UserName.Length = cpu_to_le16(len);
4736 + sec_blob->UserName.MaximumLength = cpu_to_le16(len);
4737 + tmp += len;
4738 + }
4739 +
4740 +- sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - pbuffer);
4741 ++ sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
4742 + sec_blob->WorkstationName.Length = 0;
4743 + sec_blob->WorkstationName.MaximumLength = 0;
4744 + tmp += 2;
4745 +@@ -463,19 +483,19 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
4746 + (ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
4747 + && !calc_seckey(ses)) {
4748 + memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE);
4749 +- sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
4750 ++ sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
4751 + sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
4752 + sec_blob->SessionKey.MaximumLength =
4753 + cpu_to_le16(CIFS_CPHTXT_SIZE);
4754 + tmp += CIFS_CPHTXT_SIZE;
4755 + } else {
4756 +- sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
4757 ++ sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
4758 + sec_blob->SessionKey.Length = 0;
4759 + sec_blob->SessionKey.MaximumLength = 0;
4760 + }
4761 +
4762 ++ *buflen = tmp - *pbuffer;
4763 + setup_ntlmv2_ret:
4764 +- *buflen = tmp - pbuffer;
4765 + return rc;
4766 + }
4767 +
4768 +@@ -1266,7 +1286,7 @@ sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
4769 + struct cifs_ses *ses = sess_data->ses;
4770 + __u16 bytes_remaining;
4771 + char *bcc_ptr;
4772 +- char *ntlmsspblob = NULL;
4773 ++ unsigned char *ntlmsspblob = NULL;
4774 + u16 blob_len;
4775 +
4776 + cifs_dbg(FYI, "rawntlmssp session setup authenticate phase\n");
4777 +@@ -1279,19 +1299,7 @@ sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
4778 + /* Build security blob before we assemble the request */
4779 + pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
4780 + smb_buf = (struct smb_hdr *)pSMB;
4781 +- /*
4782 +- * 5 is an empirical value, large enough to hold
4783 +- * authenticate message plus max 10 of av paris,
4784 +- * domain, user, workstation names, flags, etc.
4785 +- */
4786 +- ntlmsspblob = kzalloc(5*sizeof(struct _AUTHENTICATE_MESSAGE),
4787 +- GFP_KERNEL);
4788 +- if (!ntlmsspblob) {
4789 +- rc = -ENOMEM;
4790 +- goto out;
4791 +- }
4792 +-
4793 +- rc = build_ntlmssp_auth_blob(ntlmsspblob,
4794 ++ rc = build_ntlmssp_auth_blob(&ntlmsspblob,
4795 + &blob_len, ses, sess_data->nls_cp);
4796 + if (rc)
4797 + goto out_free_ntlmsspblob;
4798 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
4799 +index 8f38e33d365b..29e06db5f187 100644
4800 +--- a/fs/cifs/smb2pdu.c
4801 ++++ b/fs/cifs/smb2pdu.c
4802 +@@ -588,7 +588,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
4803 + u16 blob_length = 0;
4804 + struct key *spnego_key = NULL;
4805 + char *security_blob = NULL;
4806 +- char *ntlmssp_blob = NULL;
4807 ++ unsigned char *ntlmssp_blob = NULL;
4808 + bool use_spnego = false; /* else use raw ntlmssp */
4809 +
4810 + cifs_dbg(FYI, "Session Setup\n");
4811 +@@ -713,13 +713,7 @@ ssetup_ntlmssp_authenticate:
4812 + iov[1].iov_len = blob_length;
4813 + } else if (phase == NtLmAuthenticate) {
4814 + req->hdr.SessionId = ses->Suid;
4815 +- ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
4816 +- GFP_KERNEL);
4817 +- if (ntlmssp_blob == NULL) {
4818 +- rc = -ENOMEM;
4819 +- goto ssetup_exit;
4820 +- }
4821 +- rc = build_ntlmssp_auth_blob(ntlmssp_blob, &blob_length, ses,
4822 ++ rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses,
4823 + nls_cp);
4824 + if (rc) {
4825 + cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n",
4826 +@@ -1818,6 +1812,33 @@ SMB2_echo(struct TCP_Server_Info *server)
4827 +
4828 + cifs_dbg(FYI, "In echo request\n");
4829 +
4830 ++ if (server->tcpStatus == CifsNeedNegotiate) {
4831 ++ struct list_head *tmp, *tmp2;
4832 ++ struct cifs_ses *ses;
4833 ++ struct cifs_tcon *tcon;
4834 ++
4835 ++ cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
4836 ++ spin_lock(&cifs_tcp_ses_lock);
4837 ++ list_for_each(tmp, &server->smb_ses_list) {
4838 ++ ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
4839 ++ list_for_each(tmp2, &ses->tcon_list) {
4840 ++ tcon = list_entry(tmp2, struct cifs_tcon,
4841 ++ tcon_list);
4842 ++ /* add check for persistent handle reconnect */
4843 ++ if (tcon && tcon->need_reconnect) {
4844 ++ spin_unlock(&cifs_tcp_ses_lock);
4845 ++ rc = smb2_reconnect(SMB2_ECHO, tcon);
4846 ++ spin_lock(&cifs_tcp_ses_lock);
4847 ++ }
4848 ++ }
4849 ++ }
4850 ++ spin_unlock(&cifs_tcp_ses_lock);
4851 ++ }
4852 ++
4853 ++ /* if no session, renegotiate failed above */
4854 ++ if (server->tcpStatus == CifsNeedNegotiate)
4855 ++ return -EIO;
4856 ++
4857 + rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
4858 + if (rc)
4859 + return rc;
4860 +diff --git a/fs/namei.c b/fs/namei.c
4861 +index 30145f8f21ed..aaa3b693ec0b 100644
4862 +--- a/fs/namei.c
4863 ++++ b/fs/namei.c
4864 +@@ -3173,6 +3173,10 @@ retry_lookup:
4865 + got_write = false;
4866 + }
4867 +
4868 ++ error = follow_managed(&path, nd);
4869 ++ if (unlikely(error < 0))
4870 ++ return error;
4871 ++
4872 + if (unlikely(d_is_negative(path.dentry))) {
4873 + path_to_nameidata(&path, nd);
4874 + return -ENOENT;
4875 +@@ -3188,10 +3192,6 @@ retry_lookup:
4876 + return -EEXIST;
4877 + }
4878 +
4879 +- error = follow_managed(&path, nd);
4880 +- if (unlikely(error < 0))
4881 +- return error;
4882 +-
4883 + seq = 0; /* out of RCU mode, so the value doesn't matter */
4884 + inode = d_backing_inode(path.dentry);
4885 + finish_lookup:
4886 +diff --git a/fs/namespace.c b/fs/namespace.c
4887 +index 4fb1691b4355..783004af5707 100644
4888 +--- a/fs/namespace.c
4889 ++++ b/fs/namespace.c
4890 +@@ -2409,8 +2409,10 @@ static int do_new_mount(struct path *path, const char *fstype, int flags,
4891 + mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV;
4892 + }
4893 + if (type->fs_flags & FS_USERNS_VISIBLE) {
4894 +- if (!fs_fully_visible(type, &mnt_flags))
4895 ++ if (!fs_fully_visible(type, &mnt_flags)) {
4896 ++ put_filesystem(type);
4897 + return -EPERM;
4898 ++ }
4899 + }
4900 + }
4901 +
4902 +@@ -3245,6 +3247,10 @@ static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags)
4903 + if (mnt->mnt.mnt_sb->s_iflags & SB_I_NOEXEC)
4904 + mnt_flags &= ~(MNT_LOCK_NOSUID | MNT_LOCK_NOEXEC);
4905 +
4906 ++ /* Don't miss readonly hidden in the superblock flags */
4907 ++ if (mnt->mnt.mnt_sb->s_flags & MS_RDONLY)
4908 ++ mnt_flags |= MNT_LOCK_READONLY;
4909 ++
4910 + /* Verify the mount flags are equal to or more permissive
4911 + * than the proposed new mount.
4912 + */
4913 +@@ -3271,7 +3277,7 @@ static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags)
4914 + list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
4915 + struct inode *inode = child->mnt_mountpoint->d_inode;
4916 + /* Only worry about locked mounts */
4917 +- if (!(mnt_flags & MNT_LOCKED))
4918 ++ if (!(child->mnt.mnt_flags & MNT_LOCKED))
4919 + continue;
4920 + /* Is the directory permanetly empty? */
4921 + if (!is_empty_dir_inode(inode))
4922 +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
4923 +index 33eb81738d03..a7dd1fee8f13 100644
4924 +--- a/fs/nfs/dir.c
4925 ++++ b/fs/nfs/dir.c
4926 +@@ -1527,9 +1527,9 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
4927 + err = PTR_ERR(inode);
4928 + trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
4929 + put_nfs_open_context(ctx);
4930 ++ d_drop(dentry);
4931 + switch (err) {
4932 + case -ENOENT:
4933 +- d_drop(dentry);
4934 + d_add(dentry, NULL);
4935 + nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
4936 + break;
4937 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
4938 +index 327b8c34d360..de2523f5e7c9 100644
4939 +--- a/fs/nfs/nfs4proc.c
4940 ++++ b/fs/nfs/nfs4proc.c
4941 +@@ -2860,12 +2860,11 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
4942 + call_close |= is_wronly;
4943 + else if (is_wronly)
4944 + calldata->arg.fmode |= FMODE_WRITE;
4945 ++ if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
4946 ++ call_close |= is_rdwr;
4947 + } else if (is_rdwr)
4948 + calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
4949 +
4950 +- if (calldata->arg.fmode == 0)
4951 +- call_close |= is_rdwr;
4952 +-
4953 + if (!nfs4_valid_open_stateid(state))
4954 + call_close = 0;
4955 + spin_unlock(&state->owner->so_lock);
4956 +diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
4957 +index 776dccbc306d..dcb70001ae2c 100644
4958 +--- a/fs/nfs/pnfs_nfs.c
4959 ++++ b/fs/nfs/pnfs_nfs.c
4960 +@@ -247,7 +247,11 @@ void pnfs_fetch_commit_bucket_list(struct list_head *pages,
4961 + }
4962 +
4963 + /* Helper function for pnfs_generic_commit_pagelist to catch an empty
4964 +- * page list. This can happen when two commits race. */
4965 ++ * page list. This can happen when two commits race.
4966 ++ *
4967 ++ * This must be called instead of nfs_init_commit - call one or the other, but
4968 ++ * not both!
4969 ++ */
4970 + static bool
4971 + pnfs_generic_commit_cancel_empty_pagelist(struct list_head *pages,
4972 + struct nfs_commit_data *data,
4973 +@@ -256,7 +260,11 @@ pnfs_generic_commit_cancel_empty_pagelist(struct list_head *pages,
4974 + if (list_empty(pages)) {
4975 + if (atomic_dec_and_test(&cinfo->mds->rpcs_out))
4976 + wake_up_atomic_t(&cinfo->mds->rpcs_out);
4977 +- nfs_commitdata_release(data);
4978 ++ /* don't call nfs_commitdata_release - it tries to put
4979 ++ * the open_context which is not acquired until nfs_init_commit
4980 ++ * which has not been called on @data */
4981 ++ WARN_ON_ONCE(data->context);
4982 ++ nfs_commit_free(data);
4983 + return true;
4984 + }
4985 +
4986 +diff --git a/fs/nfs/read.c b/fs/nfs/read.c
4987 +index 6776d7a7839e..572e5b3b06f1 100644
4988 +--- a/fs/nfs/read.c
4989 ++++ b/fs/nfs/read.c
4990 +@@ -367,13 +367,13 @@ readpage_async_filler(void *data, struct page *page)
4991 + nfs_list_remove_request(new);
4992 + nfs_readpage_release(new);
4993 + error = desc->pgio->pg_error;
4994 +- goto out_unlock;
4995 ++ goto out;
4996 + }
4997 + return 0;
4998 + out_error:
4999 + error = PTR_ERR(new);
5000 +-out_unlock:
5001 + unlock_page(page);
5002 ++out:
5003 + return error;
5004 + }
5005 +
5006 +diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
5007 +index 1580ea6fd64d..d08cd88155c7 100644
5008 +--- a/fs/nfsd/nfs2acl.c
5009 ++++ b/fs/nfsd/nfs2acl.c
5010 +@@ -104,22 +104,21 @@ static __be32 nfsacld_proc_setacl(struct svc_rqst * rqstp,
5011 + goto out;
5012 +
5013 + inode = d_inode(fh->fh_dentry);
5014 +- if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
5015 +- error = -EOPNOTSUPP;
5016 +- goto out_errno;
5017 +- }
5018 +
5019 + error = fh_want_write(fh);
5020 + if (error)
5021 + goto out_errno;
5022 +
5023 +- error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS);
5024 ++ fh_lock(fh);
5025 ++
5026 ++ error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
5027 + if (error)
5028 +- goto out_drop_write;
5029 +- error = inode->i_op->set_acl(inode, argp->acl_default,
5030 +- ACL_TYPE_DEFAULT);
5031 ++ goto out_drop_lock;
5032 ++ error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
5033 + if (error)
5034 +- goto out_drop_write;
5035 ++ goto out_drop_lock;
5036 ++
5037 ++ fh_unlock(fh);
5038 +
5039 + fh_drop_write(fh);
5040 +
5041 +@@ -131,7 +130,8 @@ out:
5042 + posix_acl_release(argp->acl_access);
5043 + posix_acl_release(argp->acl_default);
5044 + return nfserr;
5045 +-out_drop_write:
5046 ++out_drop_lock:
5047 ++ fh_unlock(fh);
5048 + fh_drop_write(fh);
5049 + out_errno:
5050 + nfserr = nfserrno(error);
5051 +diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
5052 +index 01df4cd7c753..0c890347cde3 100644
5053 +--- a/fs/nfsd/nfs3acl.c
5054 ++++ b/fs/nfsd/nfs3acl.c
5055 +@@ -95,22 +95,20 @@ static __be32 nfsd3_proc_setacl(struct svc_rqst * rqstp,
5056 + goto out;
5057 +
5058 + inode = d_inode(fh->fh_dentry);
5059 +- if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
5060 +- error = -EOPNOTSUPP;
5061 +- goto out_errno;
5062 +- }
5063 +
5064 + error = fh_want_write(fh);
5065 + if (error)
5066 + goto out_errno;
5067 +
5068 +- error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS);
5069 ++ fh_lock(fh);
5070 ++
5071 ++ error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
5072 + if (error)
5073 +- goto out_drop_write;
5074 +- error = inode->i_op->set_acl(inode, argp->acl_default,
5075 +- ACL_TYPE_DEFAULT);
5076 ++ goto out_drop_lock;
5077 ++ error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
5078 +
5079 +-out_drop_write:
5080 ++out_drop_lock:
5081 ++ fh_unlock(fh);
5082 + fh_drop_write(fh);
5083 + out_errno:
5084 + nfserr = nfserrno(error);
5085 +diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
5086 +index 6adabd6049b7..71292a0d6f09 100644
5087 +--- a/fs/nfsd/nfs4acl.c
5088 ++++ b/fs/nfsd/nfs4acl.c
5089 +@@ -770,9 +770,6 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
5090 + dentry = fhp->fh_dentry;
5091 + inode = d_inode(dentry);
5092 +
5093 +- if (!inode->i_op->set_acl || !IS_POSIXACL(inode))
5094 +- return nfserr_attrnotsupp;
5095 +-
5096 + if (S_ISDIR(inode->i_mode))
5097 + flags = NFS4_ACL_DIR;
5098 +
5099 +@@ -782,16 +779,19 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
5100 + if (host_error < 0)
5101 + goto out_nfserr;
5102 +
5103 +- host_error = inode->i_op->set_acl(inode, pacl, ACL_TYPE_ACCESS);
5104 ++ fh_lock(fhp);
5105 ++
5106 ++ host_error = set_posix_acl(inode, ACL_TYPE_ACCESS, pacl);
5107 + if (host_error < 0)
5108 +- goto out_release;
5109 ++ goto out_drop_lock;
5110 +
5111 + if (S_ISDIR(inode->i_mode)) {
5112 +- host_error = inode->i_op->set_acl(inode, dpacl,
5113 +- ACL_TYPE_DEFAULT);
5114 ++ host_error = set_posix_acl(inode, ACL_TYPE_DEFAULT, dpacl);
5115 + }
5116 +
5117 +-out_release:
5118 ++out_drop_lock:
5119 ++ fh_unlock(fhp);
5120 ++
5121 + posix_acl_release(pacl);
5122 + posix_acl_release(dpacl);
5123 + out_nfserr:
5124 +diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
5125 +index 7389cb1d7409..04c68d900324 100644
5126 +--- a/fs/nfsd/nfs4callback.c
5127 ++++ b/fs/nfsd/nfs4callback.c
5128 +@@ -710,22 +710,6 @@ static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc
5129 + }
5130 + }
5131 +
5132 +-static struct rpc_clnt *create_backchannel_client(struct rpc_create_args *args)
5133 +-{
5134 +- struct rpc_xprt *xprt;
5135 +-
5136 +- if (args->protocol != XPRT_TRANSPORT_BC_TCP)
5137 +- return rpc_create(args);
5138 +-
5139 +- xprt = args->bc_xprt->xpt_bc_xprt;
5140 +- if (xprt) {
5141 +- xprt_get(xprt);
5142 +- return rpc_create_xprt(args, xprt);
5143 +- }
5144 +-
5145 +- return rpc_create(args);
5146 +-}
5147 +-
5148 + static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
5149 + {
5150 + int maxtime = max_cb_time(clp->net);
5151 +@@ -768,7 +752,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
5152 + args.authflavor = ses->se_cb_sec.flavor;
5153 + }
5154 + /* Create RPC client */
5155 +- client = create_backchannel_client(&args);
5156 ++ client = rpc_create(&args);
5157 + if (IS_ERR(client)) {
5158 + dprintk("NFSD: couldn't create callback client: %ld\n",
5159 + PTR_ERR(client));
5160 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
5161 +index 0462eeddfff9..9e04e49df681 100644
5162 +--- a/fs/nfsd/nfs4state.c
5163 ++++ b/fs/nfsd/nfs4state.c
5164 +@@ -3487,6 +3487,10 @@ init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
5165 + struct nfs4_openowner *oo = open->op_openowner;
5166 + struct nfs4_ol_stateid *retstp = NULL;
5167 +
5168 ++ /* We are moving these outside of the spinlocks to avoid the warnings */
5169 ++ mutex_init(&stp->st_mutex);
5170 ++ mutex_lock(&stp->st_mutex);
5171 ++
5172 + spin_lock(&oo->oo_owner.so_client->cl_lock);
5173 + spin_lock(&fp->fi_lock);
5174 +
5175 +@@ -3502,13 +3506,17 @@ init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
5176 + stp->st_access_bmap = 0;
5177 + stp->st_deny_bmap = 0;
5178 + stp->st_openstp = NULL;
5179 +- init_rwsem(&stp->st_rwsem);
5180 + list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
5181 + list_add(&stp->st_perfile, &fp->fi_stateids);
5182 +
5183 + out_unlock:
5184 + spin_unlock(&fp->fi_lock);
5185 + spin_unlock(&oo->oo_owner.so_client->cl_lock);
5186 ++ if (retstp) {
5187 ++ mutex_lock(&retstp->st_mutex);
5188 ++ /* Not that we need to, just for neatness */
5189 ++ mutex_unlock(&stp->st_mutex);
5190 ++ }
5191 + return retstp;
5192 + }
5193 +
5194 +@@ -4335,32 +4343,34 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
5195 + */
5196 + if (stp) {
5197 + /* Stateid was found, this is an OPEN upgrade */
5198 +- down_read(&stp->st_rwsem);
5199 ++ mutex_lock(&stp->st_mutex);
5200 + status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
5201 + if (status) {
5202 +- up_read(&stp->st_rwsem);
5203 ++ mutex_unlock(&stp->st_mutex);
5204 + goto out;
5205 + }
5206 + } else {
5207 + stp = open->op_stp;
5208 + open->op_stp = NULL;
5209 ++ /*
5210 ++ * init_open_stateid() either returns a locked stateid
5211 ++ * it found, or initializes and locks the new one we passed in
5212 ++ */
5213 + swapstp = init_open_stateid(stp, fp, open);
5214 + if (swapstp) {
5215 + nfs4_put_stid(&stp->st_stid);
5216 + stp = swapstp;
5217 +- down_read(&stp->st_rwsem);
5218 + status = nfs4_upgrade_open(rqstp, fp, current_fh,
5219 + stp, open);
5220 + if (status) {
5221 +- up_read(&stp->st_rwsem);
5222 ++ mutex_unlock(&stp->st_mutex);
5223 + goto out;
5224 + }
5225 + goto upgrade_out;
5226 + }
5227 +- down_read(&stp->st_rwsem);
5228 + status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
5229 + if (status) {
5230 +- up_read(&stp->st_rwsem);
5231 ++ mutex_unlock(&stp->st_mutex);
5232 + release_open_stateid(stp);
5233 + goto out;
5234 + }
5235 +@@ -4372,7 +4382,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
5236 + }
5237 + upgrade_out:
5238 + nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
5239 +- up_read(&stp->st_rwsem);
5240 ++ mutex_unlock(&stp->st_mutex);
5241 +
5242 + if (nfsd4_has_session(&resp->cstate)) {
5243 + if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
5244 +@@ -4983,12 +4993,12 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
5245 + * revoked delegations are kept only for free_stateid.
5246 + */
5247 + return nfserr_bad_stateid;
5248 +- down_write(&stp->st_rwsem);
5249 ++ mutex_lock(&stp->st_mutex);
5250 + status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
5251 + if (status == nfs_ok)
5252 + status = nfs4_check_fh(current_fh, &stp->st_stid);
5253 + if (status != nfs_ok)
5254 +- up_write(&stp->st_rwsem);
5255 ++ mutex_unlock(&stp->st_mutex);
5256 + return status;
5257 + }
5258 +
5259 +@@ -5036,7 +5046,7 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs
5260 + return status;
5261 + oo = openowner(stp->st_stateowner);
5262 + if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
5263 +- up_write(&stp->st_rwsem);
5264 ++ mutex_unlock(&stp->st_mutex);
5265 + nfs4_put_stid(&stp->st_stid);
5266 + return nfserr_bad_stateid;
5267 + }
5268 +@@ -5068,12 +5078,12 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5269 + oo = openowner(stp->st_stateowner);
5270 + status = nfserr_bad_stateid;
5271 + if (oo->oo_flags & NFS4_OO_CONFIRMED) {
5272 +- up_write(&stp->st_rwsem);
5273 ++ mutex_unlock(&stp->st_mutex);
5274 + goto put_stateid;
5275 + }
5276 + oo->oo_flags |= NFS4_OO_CONFIRMED;
5277 + nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
5278 +- up_write(&stp->st_rwsem);
5279 ++ mutex_unlock(&stp->st_mutex);
5280 + dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
5281 + __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
5282 +
5283 +@@ -5149,7 +5159,7 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
5284 + nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
5285 + status = nfs_ok;
5286 + put_stateid:
5287 +- up_write(&stp->st_rwsem);
5288 ++ mutex_unlock(&stp->st_mutex);
5289 + nfs4_put_stid(&stp->st_stid);
5290 + out:
5291 + nfsd4_bump_seqid(cstate, status);
5292 +@@ -5202,7 +5212,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5293 + if (status)
5294 + goto out;
5295 + nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
5296 +- up_write(&stp->st_rwsem);
5297 ++ mutex_unlock(&stp->st_mutex);
5298 +
5299 + nfsd4_close_open_stateid(stp);
5300 +
5301 +@@ -5428,7 +5438,7 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
5302 + stp->st_access_bmap = 0;
5303 + stp->st_deny_bmap = open_stp->st_deny_bmap;
5304 + stp->st_openstp = open_stp;
5305 +- init_rwsem(&stp->st_rwsem);
5306 ++ mutex_init(&stp->st_mutex);
5307 + list_add(&stp->st_locks, &open_stp->st_locks);
5308 + list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
5309 + spin_lock(&fp->fi_lock);
5310 +@@ -5597,7 +5607,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5311 + &open_stp, nn);
5312 + if (status)
5313 + goto out;
5314 +- up_write(&open_stp->st_rwsem);
5315 ++ mutex_unlock(&open_stp->st_mutex);
5316 + open_sop = openowner(open_stp->st_stateowner);
5317 + status = nfserr_bad_stateid;
5318 + if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
5319 +@@ -5606,7 +5616,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5320 + status = lookup_or_create_lock_state(cstate, open_stp, lock,
5321 + &lock_stp, &new);
5322 + if (status == nfs_ok)
5323 +- down_write(&lock_stp->st_rwsem);
5324 ++ mutex_lock(&lock_stp->st_mutex);
5325 + } else {
5326 + status = nfs4_preprocess_seqid_op(cstate,
5327 + lock->lk_old_lock_seqid,
5328 +@@ -5710,7 +5720,7 @@ out:
5329 + seqid_mutating_err(ntohl(status)))
5330 + lock_sop->lo_owner.so_seqid++;
5331 +
5332 +- up_write(&lock_stp->st_rwsem);
5333 ++ mutex_unlock(&lock_stp->st_mutex);
5334 +
5335 + /*
5336 + * If this is a new, never-before-used stateid, and we are
5337 +@@ -5880,7 +5890,7 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5338 + fput:
5339 + fput(filp);
5340 + put_stateid:
5341 +- up_write(&stp->st_rwsem);
5342 ++ mutex_unlock(&stp->st_mutex);
5343 + nfs4_put_stid(&stp->st_stid);
5344 + out:
5345 + nfsd4_bump_seqid(cstate, status);
5346 +diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
5347 +index c050c53036a6..c89d7b55fb9a 100644
5348 +--- a/fs/nfsd/state.h
5349 ++++ b/fs/nfsd/state.h
5350 +@@ -535,7 +535,7 @@ struct nfs4_ol_stateid {
5351 + unsigned char st_access_bmap;
5352 + unsigned char st_deny_bmap;
5353 + struct nfs4_ol_stateid *st_openstp;
5354 +- struct rw_semaphore st_rwsem;
5355 ++ struct mutex st_mutex;
5356 + };
5357 +
5358 + static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s)
5359 +diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
5360 +index b3fc0a35bf62..fb35aa21b34b 100644
5361 +--- a/fs/overlayfs/dir.c
5362 ++++ b/fs/overlayfs/dir.c
5363 +@@ -511,6 +511,7 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
5364 + struct dentry *upper;
5365 + struct dentry *opaquedir = NULL;
5366 + int err;
5367 ++ int flags = 0;
5368 +
5369 + if (WARN_ON(!workdir))
5370 + return -EROFS;
5371 +@@ -540,46 +541,39 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
5372 + if (err)
5373 + goto out_dput;
5374 +
5375 +- whiteout = ovl_whiteout(workdir, dentry);
5376 +- err = PTR_ERR(whiteout);
5377 +- if (IS_ERR(whiteout))
5378 ++ upper = lookup_one_len(dentry->d_name.name, upperdir,
5379 ++ dentry->d_name.len);
5380 ++ err = PTR_ERR(upper);
5381 ++ if (IS_ERR(upper))
5382 + goto out_unlock;
5383 +
5384 +- upper = ovl_dentry_upper(dentry);
5385 +- if (!upper) {
5386 +- upper = lookup_one_len(dentry->d_name.name, upperdir,
5387 +- dentry->d_name.len);
5388 +- err = PTR_ERR(upper);
5389 +- if (IS_ERR(upper))
5390 +- goto kill_whiteout;
5391 +-
5392 +- err = ovl_do_rename(wdir, whiteout, udir, upper, 0);
5393 +- dput(upper);
5394 +- if (err)
5395 +- goto kill_whiteout;
5396 +- } else {
5397 +- int flags = 0;
5398 ++ err = -ESTALE;
5399 ++ if ((opaquedir && upper != opaquedir) ||
5400 ++ (!opaquedir && ovl_dentry_upper(dentry) &&
5401 ++ upper != ovl_dentry_upper(dentry))) {
5402 ++ goto out_dput_upper;
5403 ++ }
5404 +
5405 +- if (opaquedir)
5406 +- upper = opaquedir;
5407 +- err = -ESTALE;
5408 +- if (upper->d_parent != upperdir)
5409 +- goto kill_whiteout;
5410 ++ whiteout = ovl_whiteout(workdir, dentry);
5411 ++ err = PTR_ERR(whiteout);
5412 ++ if (IS_ERR(whiteout))
5413 ++ goto out_dput_upper;
5414 +
5415 +- if (is_dir)
5416 +- flags |= RENAME_EXCHANGE;
5417 ++ if (d_is_dir(upper))
5418 ++ flags = RENAME_EXCHANGE;
5419 +
5420 +- err = ovl_do_rename(wdir, whiteout, udir, upper, flags);
5421 +- if (err)
5422 +- goto kill_whiteout;
5423 ++ err = ovl_do_rename(wdir, whiteout, udir, upper, flags);
5424 ++ if (err)
5425 ++ goto kill_whiteout;
5426 ++ if (flags)
5427 ++ ovl_cleanup(wdir, upper);
5428 +
5429 +- if (is_dir)
5430 +- ovl_cleanup(wdir, upper);
5431 +- }
5432 + ovl_dentry_version_inc(dentry->d_parent);
5433 + out_d_drop:
5434 + d_drop(dentry);
5435 + dput(whiteout);
5436 ++out_dput_upper:
5437 ++ dput(upper);
5438 + out_unlock:
5439 + unlock_rename(workdir, upperdir);
5440 + out_dput:
5441 +diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
5442 +index a4ff5d0d7db9..d46fa609e803 100644
5443 +--- a/fs/overlayfs/inode.c
5444 ++++ b/fs/overlayfs/inode.c
5445 +@@ -59,16 +59,40 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
5446 + if (err)
5447 + goto out;
5448 +
5449 ++ if (attr->ia_valid & ATTR_SIZE) {
5450 ++ struct inode *realinode = d_inode(ovl_dentry_real(dentry));
5451 ++
5452 ++ err = -ETXTBSY;
5453 ++ if (atomic_read(&realinode->i_writecount) < 0)
5454 ++ goto out_drop_write;
5455 ++ }
5456 ++
5457 + err = ovl_copy_up(dentry);
5458 + if (!err) {
5459 ++ struct inode *winode = NULL;
5460 ++
5461 + upperdentry = ovl_dentry_upper(dentry);
5462 +
5463 ++ if (attr->ia_valid & ATTR_SIZE) {
5464 ++ winode = d_inode(upperdentry);
5465 ++ err = get_write_access(winode);
5466 ++ if (err)
5467 ++ goto out_drop_write;
5468 ++ }
5469 ++
5470 ++ if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
5471 ++ attr->ia_valid &= ~ATTR_MODE;
5472 ++
5473 + inode_lock(upperdentry->d_inode);
5474 + err = notify_change(upperdentry, attr, NULL);
5475 + if (!err)
5476 + ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
5477 + inode_unlock(upperdentry->d_inode);
5478 ++
5479 ++ if (winode)
5480 ++ put_write_access(winode);
5481 + }
5482 ++out_drop_write:
5483 + ovl_drop_write(dentry);
5484 + out:
5485 + return err;
5486 +@@ -121,16 +145,18 @@ int ovl_permission(struct inode *inode, int mask)
5487 +
5488 + err = vfs_getattr(&realpath, &stat);
5489 + if (err)
5490 +- return err;
5491 ++ goto out_dput;
5492 +
5493 ++ err = -ESTALE;
5494 + if ((stat.mode ^ inode->i_mode) & S_IFMT)
5495 +- return -ESTALE;
5496 ++ goto out_dput;
5497 +
5498 + inode->i_mode = stat.mode;
5499 + inode->i_uid = stat.uid;
5500 + inode->i_gid = stat.gid;
5501 +
5502 +- return generic_permission(inode, mask);
5503 ++ err = generic_permission(inode, mask);
5504 ++ goto out_dput;
5505 + }
5506 +
5507 + /* Careful in RCU walk mode */
5508 +@@ -400,12 +426,11 @@ struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
5509 + if (!inode)
5510 + return NULL;
5511 +
5512 +- mode &= S_IFMT;
5513 +-
5514 + inode->i_ino = get_next_ino();
5515 + inode->i_mode = mode;
5516 + inode->i_flags |= S_NOATIME | S_NOCMTIME;
5517 +
5518 ++ mode &= S_IFMT;
5519 + switch (mode) {
5520 + case S_IFDIR:
5521 + inode->i_private = oe;
5522 +diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
5523 +index 6a7090f4a441..294ccc0c1fc7 100644
5524 +--- a/fs/overlayfs/overlayfs.h
5525 ++++ b/fs/overlayfs/overlayfs.h
5526 +@@ -185,6 +185,7 @@ static inline void ovl_copyattr(struct inode *from, struct inode *to)
5527 + {
5528 + to->i_uid = from->i_uid;
5529 + to->i_gid = from->i_gid;
5530 ++ to->i_mode = from->i_mode;
5531 + }
5532 +
5533 + /* dir.c */
5534 +diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
5535 +index 791235e03d17..7952a50f0a72 100644
5536 +--- a/fs/overlayfs/super.c
5537 ++++ b/fs/overlayfs/super.c
5538 +@@ -1064,16 +1064,21 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
5539 + /*
5540 + * Upper should support d_type, else whiteouts are visible.
5541 + * Given workdir and upper are on same fs, we can do
5542 +- * iterate_dir() on workdir.
5543 ++ * iterate_dir() on workdir. This check requires successful
5544 ++ * creation of workdir in previous step.
5545 + */
5546 +- err = ovl_check_d_type_supported(&workpath);
5547 +- if (err < 0)
5548 +- goto out_put_workdir;
5549 ++ if (ufs->workdir) {
5550 ++ err = ovl_check_d_type_supported(&workpath);
5551 ++ if (err < 0)
5552 ++ goto out_put_workdir;
5553 +
5554 +- if (!err) {
5555 +- pr_err("overlayfs: upper fs needs to support d_type.\n");
5556 +- err = -EINVAL;
5557 +- goto out_put_workdir;
5558 ++ /*
5559 ++ * We allowed this configuration and don't want to
5560 ++ * break users over kernel upgrade. So warn instead
5561 ++ * of erroring out.
5562 ++ */
5563 ++ if (!err)
5564 ++ pr_warn("overlayfs: upper fs needs to support d_type.\n");
5565 + }
5566 + }
5567 +
5568 +diff --git a/fs/posix_acl.c b/fs/posix_acl.c
5569 +index 711dd5170376..e11ea5fb1bad 100644
5570 +--- a/fs/posix_acl.c
5571 ++++ b/fs/posix_acl.c
5572 +@@ -786,39 +786,43 @@ posix_acl_xattr_get(const struct xattr_handler *handler,
5573 + return error;
5574 + }
5575 +
5576 +-static int
5577 +-posix_acl_xattr_set(const struct xattr_handler *handler,
5578 +- struct dentry *dentry, const char *name,
5579 +- const void *value, size_t size, int flags)
5580 ++int
5581 ++set_posix_acl(struct inode *inode, int type, struct posix_acl *acl)
5582 + {
5583 +- struct inode *inode = d_backing_inode(dentry);
5584 +- struct posix_acl *acl = NULL;
5585 +- int ret;
5586 +-
5587 + if (!IS_POSIXACL(inode))
5588 + return -EOPNOTSUPP;
5589 + if (!inode->i_op->set_acl)
5590 + return -EOPNOTSUPP;
5591 +
5592 +- if (handler->flags == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
5593 +- return value ? -EACCES : 0;
5594 ++ if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
5595 ++ return acl ? -EACCES : 0;
5596 + if (!inode_owner_or_capable(inode))
5597 + return -EPERM;
5598 +
5599 ++ if (acl) {
5600 ++ int ret = posix_acl_valid(acl);
5601 ++ if (ret)
5602 ++ return ret;
5603 ++ }
5604 ++ return inode->i_op->set_acl(inode, acl, type);
5605 ++}
5606 ++EXPORT_SYMBOL(set_posix_acl);
5607 ++
5608 ++static int
5609 ++posix_acl_xattr_set(const struct xattr_handler *handler,
5610 ++ struct dentry *dentry, const char *name,
5611 ++ const void *value, size_t size, int flags)
5612 ++{
5613 ++ struct inode *inode = d_backing_inode(dentry);
5614 ++ struct posix_acl *acl = NULL;
5615 ++ int ret;
5616 ++
5617 + if (value) {
5618 + acl = posix_acl_from_xattr(&init_user_ns, value, size);
5619 + if (IS_ERR(acl))
5620 + return PTR_ERR(acl);
5621 +-
5622 +- if (acl) {
5623 +- ret = posix_acl_valid(acl);
5624 +- if (ret)
5625 +- goto out;
5626 +- }
5627 + }
5628 +-
5629 +- ret = inode->i_op->set_acl(inode, acl, handler->flags);
5630 +-out:
5631 ++ ret = set_posix_acl(inode, handler->flags, acl);
5632 + posix_acl_release(acl);
5633 + return ret;
5634 + }
5635 +diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
5636 +index 446753d8ac34..5b5ec8d0f324 100644
5637 +--- a/fs/ubifs/file.c
5638 ++++ b/fs/ubifs/file.c
5639 +@@ -52,6 +52,7 @@
5640 + #include "ubifs.h"
5641 + #include <linux/mount.h>
5642 + #include <linux/slab.h>
5643 ++#include <linux/migrate.h>
5644 +
5645 + static int read_block(struct inode *inode, void *addr, unsigned int block,
5646 + struct ubifs_data_node *dn)
5647 +@@ -1452,6 +1453,26 @@ static int ubifs_set_page_dirty(struct page *page)
5648 + return ret;
5649 + }
5650 +
5651 ++#ifdef CONFIG_MIGRATION
5652 ++static int ubifs_migrate_page(struct address_space *mapping,
5653 ++ struct page *newpage, struct page *page, enum migrate_mode mode)
5654 ++{
5655 ++ int rc;
5656 ++
5657 ++ rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
5658 ++ if (rc != MIGRATEPAGE_SUCCESS)
5659 ++ return rc;
5660 ++
5661 ++ if (PagePrivate(page)) {
5662 ++ ClearPagePrivate(page);
5663 ++ SetPagePrivate(newpage);
5664 ++ }
5665 ++
5666 ++ migrate_page_copy(newpage, page);
5667 ++ return MIGRATEPAGE_SUCCESS;
5668 ++}
5669 ++#endif
5670 ++
5671 + static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
5672 + {
5673 + /*
5674 +@@ -1591,6 +1612,9 @@ const struct address_space_operations ubifs_file_address_operations = {
5675 + .write_end = ubifs_write_end,
5676 + .invalidatepage = ubifs_invalidatepage,
5677 + .set_page_dirty = ubifs_set_page_dirty,
5678 ++#ifdef CONFIG_MIGRATION
5679 ++ .migratepage = ubifs_migrate_page,
5680 ++#endif
5681 + .releasepage = ubifs_releasepage,
5682 + };
5683 +
5684 +diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
5685 +index 6bd05700d8c9..05f05f17a7c2 100644
5686 +--- a/include/asm-generic/qspinlock.h
5687 ++++ b/include/asm-generic/qspinlock.h
5688 +@@ -22,37 +22,33 @@
5689 + #include <asm-generic/qspinlock_types.h>
5690 +
5691 + /**
5692 ++ * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
5693 ++ * @lock : Pointer to queued spinlock structure
5694 ++ *
5695 ++ * There is a very slight possibility of live-lock if the lockers keep coming
5696 ++ * and the waiter is just unfortunate enough to not see any unlock state.
5697 ++ */
5698 ++#ifndef queued_spin_unlock_wait
5699 ++extern void queued_spin_unlock_wait(struct qspinlock *lock);
5700 ++#endif
5701 ++
5702 ++/**
5703 + * queued_spin_is_locked - is the spinlock locked?
5704 + * @lock: Pointer to queued spinlock structure
5705 + * Return: 1 if it is locked, 0 otherwise
5706 + */
5707 ++#ifndef queued_spin_is_locked
5708 + static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
5709 + {
5710 + /*
5711 +- * queued_spin_lock_slowpath() can ACQUIRE the lock before
5712 +- * issuing the unordered store that sets _Q_LOCKED_VAL.
5713 +- *
5714 +- * See both smp_cond_acquire() sites for more detail.
5715 +- *
5716 +- * This however means that in code like:
5717 +- *
5718 +- * spin_lock(A) spin_lock(B)
5719 +- * spin_unlock_wait(B) spin_is_locked(A)
5720 +- * do_something() do_something()
5721 +- *
5722 +- * Both CPUs can end up running do_something() because the store
5723 +- * setting _Q_LOCKED_VAL will pass through the loads in
5724 +- * spin_unlock_wait() and/or spin_is_locked().
5725 ++ * See queued_spin_unlock_wait().
5726 + *
5727 +- * Avoid this by issuing a full memory barrier between the spin_lock()
5728 +- * and the loads in spin_unlock_wait() and spin_is_locked().
5729 +- *
5730 +- * Note that regular mutual exclusion doesn't care about this
5731 +- * delayed store.
5732 ++ * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
5733 ++ * isn't immediately observable.
5734 + */
5735 +- smp_mb();
5736 +- return atomic_read(&lock->val) & _Q_LOCKED_MASK;
5737 ++ return atomic_read(&lock->val);
5738 + }
5739 ++#endif
5740 +
5741 + /**
5742 + * queued_spin_value_unlocked - is the spinlock structure unlocked?
5743 +@@ -122,21 +118,6 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
5744 + }
5745 + #endif
5746 +
5747 +-/**
5748 +- * queued_spin_unlock_wait - wait until current lock holder releases the lock
5749 +- * @lock : Pointer to queued spinlock structure
5750 +- *
5751 +- * There is a very slight possibility of live-lock if the lockers keep coming
5752 +- * and the waiter is just unfortunate enough to not see any unlock state.
5753 +- */
5754 +-static inline void queued_spin_unlock_wait(struct qspinlock *lock)
5755 +-{
5756 +- /* See queued_spin_is_locked() */
5757 +- smp_mb();
5758 +- while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
5759 +- cpu_relax();
5760 +-}
5761 +-
5762 + #ifndef virt_spin_lock
5763 + static __always_inline bool virt_spin_lock(struct qspinlock *lock)
5764 + {
5765 +diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
5766 +index 055a08ddac02..a74c49d7c0fc 100644
5767 +--- a/include/drm/ttm/ttm_bo_api.h
5768 ++++ b/include/drm/ttm/ttm_bo_api.h
5769 +@@ -316,6 +316,20 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
5770 + */
5771 + extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
5772 + bool interruptible, bool no_wait);
5773 ++
5774 ++/**
5775 ++ * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
5776 ++ *
5777 ++ * @placement: Return immediately if buffer is busy.
5778 ++ * @mem: The struct ttm_mem_reg indicating the region where the bo resides
5779 ++ * @new_flags: Describes compatible placement found
5780 ++ *
5781 ++ * Returns true if the placement is compatible
5782 ++ */
5783 ++extern bool ttm_bo_mem_compat(struct ttm_placement *placement,
5784 ++ struct ttm_mem_reg *mem,
5785 ++ uint32_t *new_flags);
5786 ++
5787 + /**
5788 + * ttm_bo_validate
5789 + *
5790 +diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
5791 +index 786ad32631a6..07b83d32f66c 100644
5792 +--- a/include/linux/cpuidle.h
5793 ++++ b/include/linux/cpuidle.h
5794 +@@ -152,6 +152,8 @@ extern void cpuidle_disable_device(struct cpuidle_device *dev);
5795 + extern int cpuidle_play_dead(void);
5796 +
5797 + extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
5798 ++static inline struct cpuidle_device *cpuidle_get_device(void)
5799 ++{return __this_cpu_read(cpuidle_devices); }
5800 + #else
5801 + static inline void disable_cpuidle(void) { }
5802 + static inline bool cpuidle_not_available(struct cpuidle_driver *drv,
5803 +@@ -187,6 +189,7 @@ static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
5804 + static inline int cpuidle_play_dead(void) {return -ENODEV; }
5805 + static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
5806 + struct cpuidle_device *dev) {return NULL; }
5807 ++static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
5808 + #endif
5809 +
5810 + #if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND)
5811 +diff --git a/include/linux/dcache.h b/include/linux/dcache.h
5812 +index 7e9422cb5989..ad5d582f9b14 100644
5813 +--- a/include/linux/dcache.h
5814 ++++ b/include/linux/dcache.h
5815 +@@ -576,5 +576,17 @@ static inline struct inode *vfs_select_inode(struct dentry *dentry,
5816 + return inode;
5817 + }
5818 +
5819 ++/**
5820 ++ * d_real_inode - Return the real inode
5821 ++ * @dentry: The dentry to query
5822 ++ *
5823 ++ * If dentry is on an union/overlay, then return the underlying, real inode.
5824 ++ * Otherwise return d_inode().
5825 ++ */
5826 ++static inline struct inode *d_real_inode(struct dentry *dentry)
5827 ++{
5828 ++ return d_backing_inode(d_real(dentry));
5829 ++}
5830 ++
5831 +
5832 + #endif /* __LINUX_DCACHE_H */
5833 +diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
5834 +index 0536524bb9eb..68904469fba1 100644
5835 +--- a/include/linux/jump_label.h
5836 ++++ b/include/linux/jump_label.h
5837 +@@ -117,13 +117,18 @@ struct module;
5838 +
5839 + #include <linux/atomic.h>
5840 +
5841 ++#ifdef HAVE_JUMP_LABEL
5842 ++
5843 + static inline int static_key_count(struct static_key *key)
5844 + {
5845 +- return atomic_read(&key->enabled);
5846 ++ /*
5847 ++ * -1 means the first static_key_slow_inc() is in progress.
5848 ++ * static_key_enabled() must return true, so return 1 here.
5849 ++ */
5850 ++ int n = atomic_read(&key->enabled);
5851 ++ return n >= 0 ? n : 1;
5852 + }
5853 +
5854 +-#ifdef HAVE_JUMP_LABEL
5855 +-
5856 + #define JUMP_TYPE_FALSE 0UL
5857 + #define JUMP_TYPE_TRUE 1UL
5858 + #define JUMP_TYPE_MASK 1UL
5859 +@@ -162,6 +167,11 @@ extern void jump_label_apply_nops(struct module *mod);
5860 +
5861 + #else /* !HAVE_JUMP_LABEL */
5862 +
5863 ++static inline int static_key_count(struct static_key *key)
5864 ++{
5865 ++ return atomic_read(&key->enabled);
5866 ++}
5867 ++
5868 + static __always_inline void jump_label_init(void)
5869 + {
5870 + static_key_initialized = true;
5871 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
5872 +index 15d0df943466..794b924e9669 100644
5873 +--- a/include/linux/skbuff.h
5874 ++++ b/include/linux/skbuff.h
5875 +@@ -1062,6 +1062,7 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
5876 + }
5877 +
5878 + void __skb_get_hash(struct sk_buff *skb);
5879 ++u32 __skb_get_hash_symmetric(struct sk_buff *skb);
5880 + u32 skb_get_poff(const struct sk_buff *skb);
5881 + u32 __skb_get_poff(const struct sk_buff *skb, void *data,
5882 + const struct flow_keys *keys, int hlen);
5883 +@@ -2860,6 +2861,25 @@ static inline void skb_postpush_rcsum(struct sk_buff *skb,
5884 + }
5885 +
5886 + /**
5887 ++ * skb_push_rcsum - push skb and update receive checksum
5888 ++ * @skb: buffer to update
5889 ++ * @len: length of data pulled
5890 ++ *
5891 ++ * This function performs an skb_push on the packet and updates
5892 ++ * the CHECKSUM_COMPLETE checksum. It should be used on
5893 ++ * receive path processing instead of skb_push unless you know
5894 ++ * that the checksum difference is zero (e.g., a valid IP header)
5895 ++ * or you are setting ip_summed to CHECKSUM_NONE.
5896 ++ */
5897 ++static inline unsigned char *skb_push_rcsum(struct sk_buff *skb,
5898 ++ unsigned int len)
5899 ++{
5900 ++ skb_push(skb, len);
5901 ++ skb_postpush_rcsum(skb, skb->data, len);
5902 ++ return skb->data;
5903 ++}
5904 ++
5905 ++/**
5906 + * pskb_trim_rcsum - trim received skb and update checksum
5907 + * @skb: buffer to trim
5908 + * @len: new length
5909 +diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
5910 +index 9a7ddbaf116e..14d70f59f0c2 100644
5911 +--- a/include/linux/sunrpc/clnt.h
5912 ++++ b/include/linux/sunrpc/clnt.h
5913 +@@ -137,8 +137,6 @@ struct rpc_create_args {
5914 + #define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9)
5915 +
5916 + struct rpc_clnt *rpc_create(struct rpc_create_args *args);
5917 +-struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
5918 +- struct rpc_xprt *xprt);
5919 + struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
5920 + const struct rpc_program *, u32);
5921 + struct rpc_clnt *rpc_clone_client(struct rpc_clnt *);
5922 +diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
5923 +index b7dabc4baafd..79ba50856707 100644
5924 +--- a/include/linux/sunrpc/svc_xprt.h
5925 ++++ b/include/linux/sunrpc/svc_xprt.h
5926 +@@ -84,6 +84,7 @@ struct svc_xprt {
5927 +
5928 + struct net *xpt_net;
5929 + struct rpc_xprt *xpt_bc_xprt; /* NFSv4.1 backchannel */
5930 ++ struct rpc_xprt_switch *xpt_bc_xps; /* NFSv4.1 backchannel */
5931 + };
5932 +
5933 + static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
5934 +diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
5935 +index fb0d212e0d3a..9f51e1df3023 100644
5936 +--- a/include/linux/sunrpc/xprt.h
5937 ++++ b/include/linux/sunrpc/xprt.h
5938 +@@ -296,6 +296,7 @@ struct xprt_create {
5939 + size_t addrlen;
5940 + const char *servername;
5941 + struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
5942 ++ struct rpc_xprt_switch *bc_xps;
5943 + unsigned int flags;
5944 + };
5945 +
5946 +diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h
5947 +index 966889a20ea3..e479033bd782 100644
5948 +--- a/include/linux/usb/ehci_def.h
5949 ++++ b/include/linux/usb/ehci_def.h
5950 +@@ -180,11 +180,11 @@ struct ehci_regs {
5951 + * PORTSCx
5952 + */
5953 + /* HOSTPC: offset 0x84 */
5954 +- u32 hostpc[1]; /* HOSTPC extension */
5955 ++ u32 hostpc[0]; /* HOSTPC extension */
5956 + #define HOSTPC_PHCD (1<<22) /* Phy clock disable */
5957 + #define HOSTPC_PSPD (3<<25) /* Port speed detection */
5958 +
5959 +- u32 reserved5[16];
5960 ++ u32 reserved5[17];
5961 +
5962 + /* USBMODE_EX: offset 0xc8 */
5963 + u32 usbmode_ex; /* USB Device mode extension */
5964 +diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
5965 +index fb2cef4e9747..b8334a637095 100644
5966 +--- a/include/rdma/ib_verbs.h
5967 ++++ b/include/rdma/ib_verbs.h
5968 +@@ -217,7 +217,7 @@ enum ib_device_cap_flags {
5969 + IB_DEVICE_CROSS_CHANNEL = (1 << 27),
5970 + IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
5971 + IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30),
5972 +- IB_DEVICE_ON_DEMAND_PAGING = (1 << 31),
5973 ++ IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
5974 + IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
5975 + IB_DEVICE_VIRTUAL_FUNCTION = ((u64)1 << 33),
5976 + };
5977 +diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
5978 +index a8696551abb1..6ee9d97004d5 100644
5979 +--- a/include/rdma/rdma_vt.h
5980 ++++ b/include/rdma/rdma_vt.h
5981 +@@ -203,7 +203,9 @@ struct rvt_driver_provided {
5982 +
5983 + /*
5984 + * Allocate a private queue pair data structure for driver specific
5985 +- * information which is opaque to rdmavt.
5986 ++ * information which is opaque to rdmavt. Errors are returned via
5987 ++ * ERR_PTR(err). The driver is free to return NULL or a valid
5988 ++ * pointer.
5989 + */
5990 + void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
5991 + gfp_t gfp);
5992 +diff --git a/kernel/futex.c b/kernel/futex.c
5993 +index c20f06f38ef3..6555d5459e98 100644
5994 +--- a/kernel/futex.c
5995 ++++ b/kernel/futex.c
5996 +@@ -469,7 +469,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
5997 + {
5998 + unsigned long address = (unsigned long)uaddr;
5999 + struct mm_struct *mm = current->mm;
6000 +- struct page *page;
6001 ++ struct page *page, *tail;
6002 + struct address_space *mapping;
6003 + int err, ro = 0;
6004 +
6005 +@@ -530,7 +530,15 @@ again:
6006 + * considered here and page lock forces unnecessarily serialization
6007 + * From this point on, mapping will be re-verified if necessary and
6008 + * page lock will be acquired only if it is unavoidable
6009 +- */
6010 ++ *
6011 ++ * Mapping checks require the head page for any compound page so the
6012 ++ * head page and mapping is looked up now. For anonymous pages, it
6013 ++ * does not matter if the page splits in the future as the key is
6014 ++ * based on the address. For filesystem-backed pages, the tail is
6015 ++ * required as the index of the page determines the key. For
6016 ++ * base pages, there is no tail page and tail == page.
6017 ++ */
6018 ++ tail = page;
6019 + page = compound_head(page);
6020 + mapping = READ_ONCE(page->mapping);
6021 +
6022 +@@ -654,7 +662,7 @@ again:
6023 +
6024 + key->both.offset |= FUT_OFF_INODE; /* inode-based key */
6025 + key->shared.inode = inode;
6026 +- key->shared.pgoff = basepage_index(page);
6027 ++ key->shared.pgoff = basepage_index(tail);
6028 + rcu_read_unlock();
6029 + }
6030 +
6031 +diff --git a/kernel/jump_label.c b/kernel/jump_label.c
6032 +index 05254eeb4b4e..4b353e0be121 100644
6033 +--- a/kernel/jump_label.c
6034 ++++ b/kernel/jump_label.c
6035 +@@ -58,13 +58,36 @@ static void jump_label_update(struct static_key *key);
6036 +
6037 + void static_key_slow_inc(struct static_key *key)
6038 + {
6039 ++ int v, v1;
6040 ++
6041 + STATIC_KEY_CHECK_USE();
6042 +- if (atomic_inc_not_zero(&key->enabled))
6043 +- return;
6044 ++
6045 ++ /*
6046 ++ * Careful if we get concurrent static_key_slow_inc() calls;
6047 ++ * later calls must wait for the first one to _finish_ the
6048 ++ * jump_label_update() process. At the same time, however,
6049 ++ * the jump_label_update() call below wants to see
6050 ++ * static_key_enabled(&key) for jumps to be updated properly.
6051 ++ *
6052 ++ * So give a special meaning to negative key->enabled: it sends
6053 ++ * static_key_slow_inc() down the slow path, and it is non-zero
6054 ++ * so it counts as "enabled" in jump_label_update(). Note that
6055 ++ * atomic_inc_unless_negative() checks >= 0, so roll our own.
6056 ++ */
6057 ++ for (v = atomic_read(&key->enabled); v > 0; v = v1) {
6058 ++ v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
6059 ++ if (likely(v1 == v))
6060 ++ return;
6061 ++ }
6062 +
6063 + jump_label_lock();
6064 +- if (atomic_inc_return(&key->enabled) == 1)
6065 ++ if (atomic_read(&key->enabled) == 0) {
6066 ++ atomic_set(&key->enabled, -1);
6067 + jump_label_update(key);
6068 ++ atomic_set(&key->enabled, 1);
6069 ++ } else {
6070 ++ atomic_inc(&key->enabled);
6071 ++ }
6072 + jump_label_unlock();
6073 + }
6074 + EXPORT_SYMBOL_GPL(static_key_slow_inc);
6075 +@@ -72,6 +95,13 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc);
6076 + static void __static_key_slow_dec(struct static_key *key,
6077 + unsigned long rate_limit, struct delayed_work *work)
6078 + {
6079 ++ /*
6080 ++ * The negative count check is valid even when a negative
6081 ++ * key->enabled is in use by static_key_slow_inc(); a
6082 ++ * __static_key_slow_dec() before the first static_key_slow_inc()
6083 ++ * returns is unbalanced, because all other static_key_slow_inc()
6084 ++ * instances block while the update is in progress.
6085 ++ */
6086 + if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
6087 + WARN(atomic_read(&key->enabled) < 0,
6088 + "jump label: negative count!\n");
6089 +diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
6090 +index e364b424b019..79d2d765a75f 100644
6091 +--- a/kernel/locking/mutex.c
6092 ++++ b/kernel/locking/mutex.c
6093 +@@ -486,9 +486,6 @@ __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
6094 + if (!hold_ctx)
6095 + return 0;
6096 +
6097 +- if (unlikely(ctx == hold_ctx))
6098 +- return -EALREADY;
6099 +-
6100 + if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
6101 + (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
6102 + #ifdef CONFIG_DEBUG_MUTEXES
6103 +@@ -514,6 +511,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
6104 + unsigned long flags;
6105 + int ret;
6106 +
6107 ++ if (use_ww_ctx) {
6108 ++ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
6109 ++ if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
6110 ++ return -EALREADY;
6111 ++ }
6112 ++
6113 + preempt_disable();
6114 + mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
6115 +
6116 +diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
6117 +index ce2f75e32ae1..5fc8c311b8fe 100644
6118 +--- a/kernel/locking/qspinlock.c
6119 ++++ b/kernel/locking/qspinlock.c
6120 +@@ -267,6 +267,66 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock,
6121 + #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath
6122 + #endif
6123 +
6124 ++/*
6125 ++ * queued_spin_lock_slowpath() can (load-)ACQUIRE the lock before
6126 ++ * issuing an _unordered_ store to set _Q_LOCKED_VAL.
6127 ++ *
6128 ++ * This means that the store can be delayed, but no later than the
6129 ++ * store-release from the unlock. This means that simply observing
6130 ++ * _Q_LOCKED_VAL is not sufficient to determine if the lock is acquired.
6131 ++ *
6132 ++ * There are two paths that can issue the unordered store:
6133 ++ *
6134 ++ * (1) clear_pending_set_locked(): *,1,0 -> *,0,1
6135 ++ *
6136 ++ * (2) set_locked(): t,0,0 -> t,0,1 ; t != 0
6137 ++ * atomic_cmpxchg_relaxed(): t,0,0 -> 0,0,1
6138 ++ *
6139 ++ * However, in both cases we have other !0 state we've set before to queue
6140 ++ * ourseves:
6141 ++ *
6142 ++ * For (1) we have the atomic_cmpxchg_acquire() that set _Q_PENDING_VAL, our
6143 ++ * load is constrained by that ACQUIRE to not pass before that, and thus must
6144 ++ * observe the store.
6145 ++ *
6146 ++ * For (2) we have a more intersting scenario. We enqueue ourselves using
6147 ++ * xchg_tail(), which ends up being a RELEASE. This in itself is not
6148 ++ * sufficient, however that is followed by an smp_cond_acquire() on the same
6149 ++ * word, giving a RELEASE->ACQUIRE ordering. This again constrains our load and
6150 ++ * guarantees we must observe that store.
6151 ++ *
6152 ++ * Therefore both cases have other !0 state that is observable before the
6153 ++ * unordered locked byte store comes through. This means we can use that to
6154 ++ * wait for the lock store, and then wait for an unlock.
6155 ++ */
6156 ++#ifndef queued_spin_unlock_wait
6157 ++void queued_spin_unlock_wait(struct qspinlock *lock)
6158 ++{
6159 ++ u32 val;
6160 ++
6161 ++ for (;;) {
6162 ++ val = atomic_read(&lock->val);
6163 ++
6164 ++ if (!val) /* not locked, we're done */
6165 ++ goto done;
6166 ++
6167 ++ if (val & _Q_LOCKED_MASK) /* locked, go wait for unlock */
6168 ++ break;
6169 ++
6170 ++ /* not locked, but pending, wait until we observe the lock */
6171 ++ cpu_relax();
6172 ++ }
6173 ++
6174 ++ /* any unlock is good */
6175 ++ while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
6176 ++ cpu_relax();
6177 ++
6178 ++done:
6179 ++ smp_rmb(); /* CTRL + RMB -> ACQUIRE */
6180 ++}
6181 ++EXPORT_SYMBOL(queued_spin_unlock_wait);
6182 ++#endif
6183 ++
6184 + #endif /* _GEN_PV_LOCK_SLOWPATH */
6185 +
6186 + /**
6187 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
6188 +index e7dd0ec169be..eeaf920f46b9 100644
6189 +--- a/kernel/sched/fair.c
6190 ++++ b/kernel/sched/fair.c
6191 +@@ -2821,6 +2821,23 @@ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
6192 +
6193 + static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
6194 +
6195 ++/*
6196 ++ * Unsigned subtract and clamp on underflow.
6197 ++ *
6198 ++ * Explicitly do a load-store to ensure the intermediate value never hits
6199 ++ * memory. This allows lockless observations without ever seeing the negative
6200 ++ * values.
6201 ++ */
6202 ++#define sub_positive(_ptr, _val) do { \
6203 ++ typeof(_ptr) ptr = (_ptr); \
6204 ++ typeof(*ptr) val = (_val); \
6205 ++ typeof(*ptr) res, var = READ_ONCE(*ptr); \
6206 ++ res = var - val; \
6207 ++ if (res > var) \
6208 ++ res = 0; \
6209 ++ WRITE_ONCE(*ptr, res); \
6210 ++} while (0)
6211 ++
6212 + /* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
6213 + static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
6214 + {
6215 +@@ -2829,15 +2846,15 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
6216 +
6217 + if (atomic_long_read(&cfs_rq->removed_load_avg)) {
6218 + s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
6219 +- sa->load_avg = max_t(long, sa->load_avg - r, 0);
6220 +- sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0);
6221 ++ sub_positive(&sa->load_avg, r);
6222 ++ sub_positive(&sa->load_sum, r * LOAD_AVG_MAX);
6223 + removed = 1;
6224 + }
6225 +
6226 + if (atomic_long_read(&cfs_rq->removed_util_avg)) {
6227 + long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
6228 +- sa->util_avg = max_t(long, sa->util_avg - r, 0);
6229 +- sa->util_sum = max_t(s32, sa->util_sum - r * LOAD_AVG_MAX, 0);
6230 ++ sub_positive(&sa->util_avg, r);
6231 ++ sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
6232 + }
6233 +
6234 + decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
6235 +@@ -2927,10 +2944,10 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
6236 + &se->avg, se->on_rq * scale_load_down(se->load.weight),
6237 + cfs_rq->curr == se, NULL);
6238 +
6239 +- cfs_rq->avg.load_avg = max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0);
6240 +- cfs_rq->avg.load_sum = max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0);
6241 +- cfs_rq->avg.util_avg = max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
6242 +- cfs_rq->avg.util_sum = max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0);
6243 ++ sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
6244 ++ sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum);
6245 ++ sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
6246 ++ sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
6247 + }
6248 +
6249 + /* Add the load generated by se into cfs_rq's load average */
6250 +diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
6251 +index bd12c6c714ec..c5aeedf4e93a 100644
6252 +--- a/kernel/sched/idle.c
6253 ++++ b/kernel/sched/idle.c
6254 +@@ -127,7 +127,7 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
6255 + */
6256 + static void cpuidle_idle_call(void)
6257 + {
6258 +- struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
6259 ++ struct cpuidle_device *dev = cpuidle_get_device();
6260 + struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
6261 + int next_state, entered_state;
6262 +
6263 +diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
6264 +index f96f0383f6c6..ad1d6164e946 100644
6265 +--- a/kernel/trace/trace_printk.c
6266 ++++ b/kernel/trace/trace_printk.c
6267 +@@ -36,6 +36,10 @@ struct trace_bprintk_fmt {
6268 + static inline struct trace_bprintk_fmt *lookup_format(const char *fmt)
6269 + {
6270 + struct trace_bprintk_fmt *pos;
6271 ++
6272 ++ if (!fmt)
6273 ++ return ERR_PTR(-EINVAL);
6274 ++
6275 + list_for_each_entry(pos, &trace_bprintk_fmt_list, list) {
6276 + if (!strcmp(pos->fmt, fmt))
6277 + return pos;
6278 +@@ -57,7 +61,8 @@ void hold_module_trace_bprintk_format(const char **start, const char **end)
6279 + for (iter = start; iter < end; iter++) {
6280 + struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
6281 + if (tb_fmt) {
6282 +- *iter = tb_fmt->fmt;
6283 ++ if (!IS_ERR(tb_fmt))
6284 ++ *iter = tb_fmt->fmt;
6285 + continue;
6286 + }
6287 +
6288 +diff --git a/mm/migrate.c b/mm/migrate.c
6289 +index f9dfb18a4eba..bdf3410bb4fa 100644
6290 +--- a/mm/migrate.c
6291 ++++ b/mm/migrate.c
6292 +@@ -431,6 +431,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
6293 +
6294 + return MIGRATEPAGE_SUCCESS;
6295 + }
6296 ++EXPORT_SYMBOL(migrate_page_move_mapping);
6297 +
6298 + /*
6299 + * The expected number of remaining references is the same as that
6300 +@@ -586,6 +587,7 @@ void migrate_page_copy(struct page *newpage, struct page *page)
6301 +
6302 + mem_cgroup_migrate(page, newpage);
6303 + }
6304 ++EXPORT_SYMBOL(migrate_page_copy);
6305 +
6306 + /************************************************************
6307 + * Migration functions
6308 +diff --git a/mm/page-writeback.c b/mm/page-writeback.c
6309 +index bc5149d5ec38..e389f0a998f1 100644
6310 +--- a/mm/page-writeback.c
6311 ++++ b/mm/page-writeback.c
6312 +@@ -369,8 +369,9 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
6313 + struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc);
6314 + unsigned long bytes = vm_dirty_bytes;
6315 + unsigned long bg_bytes = dirty_background_bytes;
6316 +- unsigned long ratio = vm_dirty_ratio;
6317 +- unsigned long bg_ratio = dirty_background_ratio;
6318 ++ /* convert ratios to per-PAGE_SIZE for higher precision */
6319 ++ unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100;
6320 ++ unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100;
6321 + unsigned long thresh;
6322 + unsigned long bg_thresh;
6323 + struct task_struct *tsk;
6324 +@@ -382,26 +383,28 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
6325 + /*
6326 + * The byte settings can't be applied directly to memcg
6327 + * domains. Convert them to ratios by scaling against
6328 +- * globally available memory.
6329 ++ * globally available memory. As the ratios are in
6330 ++ * per-PAGE_SIZE, they can be obtained by dividing bytes by
6331 ++ * number of pages.
6332 + */
6333 + if (bytes)
6334 +- ratio = min(DIV_ROUND_UP(bytes, PAGE_SIZE) * 100 /
6335 +- global_avail, 100UL);
6336 ++ ratio = min(DIV_ROUND_UP(bytes, global_avail),
6337 ++ PAGE_SIZE);
6338 + if (bg_bytes)
6339 +- bg_ratio = min(DIV_ROUND_UP(bg_bytes, PAGE_SIZE) * 100 /
6340 +- global_avail, 100UL);
6341 ++ bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail),
6342 ++ PAGE_SIZE);
6343 + bytes = bg_bytes = 0;
6344 + }
6345 +
6346 + if (bytes)
6347 + thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
6348 + else
6349 +- thresh = (ratio * available_memory) / 100;
6350 ++ thresh = (ratio * available_memory) / PAGE_SIZE;
6351 +
6352 + if (bg_bytes)
6353 + bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
6354 + else
6355 +- bg_thresh = (bg_ratio * available_memory) / 100;
6356 ++ bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
6357 +
6358 + if (bg_thresh >= thresh)
6359 + bg_thresh = thresh / 2;
6360 +diff --git a/mm/percpu.c b/mm/percpu.c
6361 +index 0c59684f1ff2..9903830aaebb 100644
6362 +--- a/mm/percpu.c
6363 ++++ b/mm/percpu.c
6364 +@@ -112,7 +112,7 @@ struct pcpu_chunk {
6365 + int map_used; /* # of map entries used before the sentry */
6366 + int map_alloc; /* # of map entries allocated */
6367 + int *map; /* allocation map */
6368 +- struct work_struct map_extend_work;/* async ->map[] extension */
6369 ++ struct list_head map_extend_list;/* on pcpu_map_extend_chunks */
6370 +
6371 + void *data; /* chunk data */
6372 + int first_free; /* no free below this */
6373 +@@ -162,10 +162,13 @@ static struct pcpu_chunk *pcpu_reserved_chunk;
6374 + static int pcpu_reserved_chunk_limit;
6375 +
6376 + static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
6377 +-static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop */
6378 ++static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
6379 +
6380 + static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
6381 +
6382 ++/* chunks which need their map areas extended, protected by pcpu_lock */
6383 ++static LIST_HEAD(pcpu_map_extend_chunks);
6384 ++
6385 + /*
6386 + * The number of empty populated pages, protected by pcpu_lock. The
6387 + * reserved chunk doesn't contribute to the count.
6388 +@@ -395,13 +398,19 @@ static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
6389 + {
6390 + int margin, new_alloc;
6391 +
6392 ++ lockdep_assert_held(&pcpu_lock);
6393 ++
6394 + if (is_atomic) {
6395 + margin = 3;
6396 +
6397 + if (chunk->map_alloc <
6398 +- chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW &&
6399 +- pcpu_async_enabled)
6400 +- schedule_work(&chunk->map_extend_work);
6401 ++ chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) {
6402 ++ if (list_empty(&chunk->map_extend_list)) {
6403 ++ list_add_tail(&chunk->map_extend_list,
6404 ++ &pcpu_map_extend_chunks);
6405 ++ pcpu_schedule_balance_work();
6406 ++ }
6407 ++ }
6408 + } else {
6409 + margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
6410 + }
6411 +@@ -435,6 +444,8 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
6412 + size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
6413 + unsigned long flags;
6414 +
6415 ++ lockdep_assert_held(&pcpu_alloc_mutex);
6416 ++
6417 + new = pcpu_mem_zalloc(new_size);
6418 + if (!new)
6419 + return -ENOMEM;
6420 +@@ -467,20 +478,6 @@ out_unlock:
6421 + return 0;
6422 + }
6423 +
6424 +-static void pcpu_map_extend_workfn(struct work_struct *work)
6425 +-{
6426 +- struct pcpu_chunk *chunk = container_of(work, struct pcpu_chunk,
6427 +- map_extend_work);
6428 +- int new_alloc;
6429 +-
6430 +- spin_lock_irq(&pcpu_lock);
6431 +- new_alloc = pcpu_need_to_extend(chunk, false);
6432 +- spin_unlock_irq(&pcpu_lock);
6433 +-
6434 +- if (new_alloc)
6435 +- pcpu_extend_area_map(chunk, new_alloc);
6436 +-}
6437 +-
6438 + /**
6439 + * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
6440 + * @chunk: chunk the candidate area belongs to
6441 +@@ -740,7 +737,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
6442 + chunk->map_used = 1;
6443 +
6444 + INIT_LIST_HEAD(&chunk->list);
6445 +- INIT_WORK(&chunk->map_extend_work, pcpu_map_extend_workfn);
6446 ++ INIT_LIST_HEAD(&chunk->map_extend_list);
6447 + chunk->free_size = pcpu_unit_size;
6448 + chunk->contig_hint = pcpu_unit_size;
6449 +
6450 +@@ -895,6 +892,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
6451 + return NULL;
6452 + }
6453 +
6454 ++ if (!is_atomic)
6455 ++ mutex_lock(&pcpu_alloc_mutex);
6456 ++
6457 + spin_lock_irqsave(&pcpu_lock, flags);
6458 +
6459 + /* serve reserved allocations from the reserved chunk if available */
6460 +@@ -967,12 +967,9 @@ restart:
6461 + if (is_atomic)
6462 + goto fail;
6463 +
6464 +- mutex_lock(&pcpu_alloc_mutex);
6465 +-
6466 + if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
6467 + chunk = pcpu_create_chunk();
6468 + if (!chunk) {
6469 +- mutex_unlock(&pcpu_alloc_mutex);
6470 + err = "failed to allocate new chunk";
6471 + goto fail;
6472 + }
6473 +@@ -983,7 +980,6 @@ restart:
6474 + spin_lock_irqsave(&pcpu_lock, flags);
6475 + }
6476 +
6477 +- mutex_unlock(&pcpu_alloc_mutex);
6478 + goto restart;
6479 +
6480 + area_found:
6481 +@@ -993,8 +989,6 @@ area_found:
6482 + if (!is_atomic) {
6483 + int page_start, page_end, rs, re;
6484 +
6485 +- mutex_lock(&pcpu_alloc_mutex);
6486 +-
6487 + page_start = PFN_DOWN(off);
6488 + page_end = PFN_UP(off + size);
6489 +
6490 +@@ -1005,7 +999,6 @@ area_found:
6491 +
6492 + spin_lock_irqsave(&pcpu_lock, flags);
6493 + if (ret) {
6494 +- mutex_unlock(&pcpu_alloc_mutex);
6495 + pcpu_free_area(chunk, off, &occ_pages);
6496 + err = "failed to populate";
6497 + goto fail_unlock;
6498 +@@ -1045,6 +1038,8 @@ fail:
6499 + /* see the flag handling in pcpu_blance_workfn() */
6500 + pcpu_atomic_alloc_failed = true;
6501 + pcpu_schedule_balance_work();
6502 ++ } else {
6503 ++ mutex_unlock(&pcpu_alloc_mutex);
6504 + }
6505 + return NULL;
6506 + }
6507 +@@ -1129,6 +1124,7 @@ static void pcpu_balance_workfn(struct work_struct *work)
6508 + if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
6509 + continue;
6510 +
6511 ++ list_del_init(&chunk->map_extend_list);
6512 + list_move(&chunk->list, &to_free);
6513 + }
6514 +
6515 +@@ -1146,6 +1142,25 @@ static void pcpu_balance_workfn(struct work_struct *work)
6516 + pcpu_destroy_chunk(chunk);
6517 + }
6518 +
6519 ++ /* service chunks which requested async area map extension */
6520 ++ do {
6521 ++ int new_alloc = 0;
6522 ++
6523 ++ spin_lock_irq(&pcpu_lock);
6524 ++
6525 ++ chunk = list_first_entry_or_null(&pcpu_map_extend_chunks,
6526 ++ struct pcpu_chunk, map_extend_list);
6527 ++ if (chunk) {
6528 ++ list_del_init(&chunk->map_extend_list);
6529 ++ new_alloc = pcpu_need_to_extend(chunk, false);
6530 ++ }
6531 ++
6532 ++ spin_unlock_irq(&pcpu_lock);
6533 ++
6534 ++ if (new_alloc)
6535 ++ pcpu_extend_area_map(chunk, new_alloc);
6536 ++ } while (chunk);
6537 ++
6538 + /*
6539 + * Ensure there are certain number of free populated pages for
6540 + * atomic allocs. Fill up from the most packed so that atomic
6541 +@@ -1644,7 +1659,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
6542 + */
6543 + schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
6544 + INIT_LIST_HEAD(&schunk->list);
6545 +- INIT_WORK(&schunk->map_extend_work, pcpu_map_extend_workfn);
6546 ++ INIT_LIST_HEAD(&schunk->map_extend_list);
6547 + schunk->base_addr = base_addr;
6548 + schunk->map = smap;
6549 + schunk->map_alloc = ARRAY_SIZE(smap);
6550 +@@ -1673,7 +1688,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
6551 + if (dyn_size) {
6552 + dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
6553 + INIT_LIST_HEAD(&dchunk->list);
6554 +- INIT_WORK(&dchunk->map_extend_work, pcpu_map_extend_workfn);
6555 ++ INIT_LIST_HEAD(&dchunk->map_extend_list);
6556 + dchunk->base_addr = base_addr;
6557 + dchunk->map = dmap;
6558 + dchunk->map_alloc = ARRAY_SIZE(dmap);
6559 +diff --git a/mm/shmem.c b/mm/shmem.c
6560 +index 719bd6b88d98..9ca09f52fef5 100644
6561 +--- a/mm/shmem.c
6562 ++++ b/mm/shmem.c
6563 +@@ -2236,9 +2236,11 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
6564 + NULL);
6565 + if (error) {
6566 + /* Remove the !PageUptodate pages we added */
6567 +- shmem_undo_range(inode,
6568 +- (loff_t)start << PAGE_SHIFT,
6569 +- (loff_t)index << PAGE_SHIFT, true);
6570 ++ if (index > start) {
6571 ++ shmem_undo_range(inode,
6572 ++ (loff_t)start << PAGE_SHIFT,
6573 ++ ((loff_t)index << PAGE_SHIFT) - 1, true);
6574 ++ }
6575 + goto undone;
6576 + }
6577 +
6578 +diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
6579 +index a669dea146c6..61ad43f61c5e 100644
6580 +--- a/net/core/flow_dissector.c
6581 ++++ b/net/core/flow_dissector.c
6582 +@@ -651,6 +651,23 @@ void make_flow_keys_digest(struct flow_keys_digest *digest,
6583 + }
6584 + EXPORT_SYMBOL(make_flow_keys_digest);
6585 +
6586 ++static struct flow_dissector flow_keys_dissector_symmetric __read_mostly;
6587 ++
6588 ++u32 __skb_get_hash_symmetric(struct sk_buff *skb)
6589 ++{
6590 ++ struct flow_keys keys;
6591 ++
6592 ++ __flow_hash_secret_init();
6593 ++
6594 ++ memset(&keys, 0, sizeof(keys));
6595 ++ __skb_flow_dissect(skb, &flow_keys_dissector_symmetric, &keys,
6596 ++ NULL, 0, 0, 0,
6597 ++ FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
6598 ++
6599 ++ return __flow_hash_from_keys(&keys, hashrnd);
6600 ++}
6601 ++EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
6602 ++
6603 + /**
6604 + * __skb_get_hash: calculate a flow hash
6605 + * @skb: sk_buff to calculate flow hash from
6606 +@@ -868,6 +885,29 @@ static const struct flow_dissector_key flow_keys_dissector_keys[] = {
6607 + },
6608 + };
6609 +
6610 ++static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = {
6611 ++ {
6612 ++ .key_id = FLOW_DISSECTOR_KEY_CONTROL,
6613 ++ .offset = offsetof(struct flow_keys, control),
6614 ++ },
6615 ++ {
6616 ++ .key_id = FLOW_DISSECTOR_KEY_BASIC,
6617 ++ .offset = offsetof(struct flow_keys, basic),
6618 ++ },
6619 ++ {
6620 ++ .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
6621 ++ .offset = offsetof(struct flow_keys, addrs.v4addrs),
6622 ++ },
6623 ++ {
6624 ++ .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
6625 ++ .offset = offsetof(struct flow_keys, addrs.v6addrs),
6626 ++ },
6627 ++ {
6628 ++ .key_id = FLOW_DISSECTOR_KEY_PORTS,
6629 ++ .offset = offsetof(struct flow_keys, ports),
6630 ++ },
6631 ++};
6632 ++
6633 + static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = {
6634 + {
6635 + .key_id = FLOW_DISSECTOR_KEY_CONTROL,
6636 +@@ -889,6 +929,9 @@ static int __init init_default_flow_dissectors(void)
6637 + skb_flow_dissector_init(&flow_keys_dissector,
6638 + flow_keys_dissector_keys,
6639 + ARRAY_SIZE(flow_keys_dissector_keys));
6640 ++ skb_flow_dissector_init(&flow_keys_dissector_symmetric,
6641 ++ flow_keys_dissector_symmetric_keys,
6642 ++ ARRAY_SIZE(flow_keys_dissector_symmetric_keys));
6643 + skb_flow_dissector_init(&flow_keys_buf_dissector,
6644 + flow_keys_buf_dissector_keys,
6645 + ARRAY_SIZE(flow_keys_buf_dissector_keys));
6646 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
6647 +index e561f9f07d6d..59bf4d77154f 100644
6648 +--- a/net/core/skbuff.c
6649 ++++ b/net/core/skbuff.c
6650 +@@ -3016,24 +3016,6 @@ int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
6651 + EXPORT_SYMBOL_GPL(skb_append_pagefrags);
6652 +
6653 + /**
6654 +- * skb_push_rcsum - push skb and update receive checksum
6655 +- * @skb: buffer to update
6656 +- * @len: length of data pulled
6657 +- *
6658 +- * This function performs an skb_push on the packet and updates
6659 +- * the CHECKSUM_COMPLETE checksum. It should be used on
6660 +- * receive path processing instead of skb_push unless you know
6661 +- * that the checksum difference is zero (e.g., a valid IP header)
6662 +- * or you are setting ip_summed to CHECKSUM_NONE.
6663 +- */
6664 +-static unsigned char *skb_push_rcsum(struct sk_buff *skb, unsigned len)
6665 +-{
6666 +- skb_push(skb, len);
6667 +- skb_postpush_rcsum(skb, skb->data, len);
6668 +- return skb->data;
6669 +-}
6670 +-
6671 +-/**
6672 + * skb_pull_rcsum - pull skb and update receive checksum
6673 + * @skb: buffer to update
6674 + * @len: length of data pulled
6675 +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
6676 +index ea071fad67a0..c26fac26b23c 100644
6677 +--- a/net/ipv6/ip6_fib.c
6678 ++++ b/net/ipv6/ip6_fib.c
6679 +@@ -177,6 +177,7 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
6680 + }
6681 + }
6682 +
6683 ++ free_percpu(non_pcpu_rt->rt6i_pcpu);
6684 + non_pcpu_rt->rt6i_pcpu = NULL;
6685 + }
6686 +
6687 +diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
6688 +index d32cefcb63b0..34a5712d467f 100644
6689 +--- a/net/mac80211/mesh.c
6690 ++++ b/net/mac80211/mesh.c
6691 +@@ -150,19 +150,26 @@ u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
6692 + void mesh_sta_cleanup(struct sta_info *sta)
6693 + {
6694 + struct ieee80211_sub_if_data *sdata = sta->sdata;
6695 +- u32 changed;
6696 ++ u32 changed = 0;
6697 +
6698 + /*
6699 + * maybe userspace handles peer allocation and peering, but in either
6700 + * case the beacon is still generated by the kernel and we might need
6701 + * an update.
6702 + */
6703 +- changed = mesh_accept_plinks_update(sdata);
6704 ++ if (sdata->u.mesh.user_mpm &&
6705 ++ sta->mesh->plink_state == NL80211_PLINK_ESTAB)
6706 ++ changed |= mesh_plink_dec_estab_count(sdata);
6707 ++ changed |= mesh_accept_plinks_update(sdata);
6708 + if (!sdata->u.mesh.user_mpm) {
6709 + changed |= mesh_plink_deactivate(sta);
6710 + del_timer_sync(&sta->mesh->plink_timer);
6711 + }
6712 +
6713 ++ /* make sure no readers can access nexthop sta from here on */
6714 ++ mesh_path_flush_by_nexthop(sta);
6715 ++ synchronize_net();
6716 ++
6717 + if (changed)
6718 + ieee80211_mbss_info_change_notify(sdata, changed);
6719 + }
6720 +diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
6721 +index 62193f4bc37b..ba7ce53ec615 100644
6722 +--- a/net/mac80211/sta_info.h
6723 ++++ b/net/mac80211/sta_info.h
6724 +@@ -275,7 +275,7 @@ struct ieee80211_fast_tx {
6725 + u8 sa_offs, da_offs, pn_offs;
6726 + u8 band;
6727 + u8 hdr[30 + 2 + IEEE80211_FAST_XMIT_MAX_IV +
6728 +- sizeof(rfc1042_header)];
6729 ++ sizeof(rfc1042_header)] __aligned(2);
6730 +
6731 + struct rcu_head rcu_head;
6732 + };
6733 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
6734 +index 18d0becbc46d..8012f67ca5ae 100644
6735 +--- a/net/packet/af_packet.c
6736 ++++ b/net/packet/af_packet.c
6737 +@@ -1340,7 +1340,7 @@ static unsigned int fanout_demux_hash(struct packet_fanout *f,
6738 + struct sk_buff *skb,
6739 + unsigned int num)
6740 + {
6741 +- return reciprocal_scale(skb_get_hash(skb), num);
6742 ++ return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
6743 + }
6744 +
6745 + static unsigned int fanout_demux_lb(struct packet_fanout *f,
6746 +diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
6747 +index 8f3948dd38b8..934336e12a65 100644
6748 +--- a/net/sched/act_mirred.c
6749 ++++ b/net/sched/act_mirred.c
6750 +@@ -180,7 +180,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
6751 +
6752 + if (!(at & AT_EGRESS)) {
6753 + if (m->tcfm_ok_push)
6754 +- skb_push(skb2, skb->mac_len);
6755 ++ skb_push_rcsum(skb2, skb->mac_len);
6756 + }
6757 +
6758 + /* mirror is always swallowed */
6759 +diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
6760 +index 7e0c9bf22df8..837dd910a252 100644
6761 +--- a/net/sunrpc/clnt.c
6762 ++++ b/net/sunrpc/clnt.c
6763 +@@ -446,16 +446,27 @@ out_no_rpciod:
6764 + return ERR_PTR(err);
6765 + }
6766 +
6767 +-struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
6768 ++static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
6769 + struct rpc_xprt *xprt)
6770 + {
6771 + struct rpc_clnt *clnt = NULL;
6772 + struct rpc_xprt_switch *xps;
6773 +
6774 +- xps = xprt_switch_alloc(xprt, GFP_KERNEL);
6775 +- if (xps == NULL)
6776 +- return ERR_PTR(-ENOMEM);
6777 +-
6778 ++ if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) {
6779 ++ WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
6780 ++ xps = args->bc_xprt->xpt_bc_xps;
6781 ++ xprt_switch_get(xps);
6782 ++ } else {
6783 ++ xps = xprt_switch_alloc(xprt, GFP_KERNEL);
6784 ++ if (xps == NULL) {
6785 ++ xprt_put(xprt);
6786 ++ return ERR_PTR(-ENOMEM);
6787 ++ }
6788 ++ if (xprt->bc_xprt) {
6789 ++ xprt_switch_get(xps);
6790 ++ xprt->bc_xprt->xpt_bc_xps = xps;
6791 ++ }
6792 ++ }
6793 + clnt = rpc_new_client(args, xps, xprt, NULL);
6794 + if (IS_ERR(clnt))
6795 + return clnt;
6796 +@@ -483,7 +494,6 @@ struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
6797 +
6798 + return clnt;
6799 + }
6800 +-EXPORT_SYMBOL_GPL(rpc_create_xprt);
6801 +
6802 + /**
6803 + * rpc_create - create an RPC client and transport with one call
6804 +@@ -509,6 +519,15 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
6805 + };
6806 + char servername[48];
6807 +
6808 ++ if (args->bc_xprt) {
6809 ++ WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
6810 ++ xprt = args->bc_xprt->xpt_bc_xprt;
6811 ++ if (xprt) {
6812 ++ xprt_get(xprt);
6813 ++ return rpc_create_xprt(args, xprt);
6814 ++ }
6815 ++ }
6816 ++
6817 + if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
6818 + xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS;
6819 + if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT)
6820 +diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
6821 +index 7422f28818b2..7231cb413a2c 100644
6822 +--- a/net/sunrpc/svc_xprt.c
6823 ++++ b/net/sunrpc/svc_xprt.c
6824 +@@ -136,6 +136,8 @@ static void svc_xprt_free(struct kref *kref)
6825 + /* See comment on corresponding get in xs_setup_bc_tcp(): */
6826 + if (xprt->xpt_bc_xprt)
6827 + xprt_put(xprt->xpt_bc_xprt);
6828 ++ if (xprt->xpt_bc_xps)
6829 ++ xprt_switch_put(xprt->xpt_bc_xps);
6830 + xprt->xpt_ops->xpo_free(xprt);
6831 + module_put(owner);
6832 + }
6833 +diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
6834 +index 65e759569e48..e9e5dd0dc8f4 100644
6835 +--- a/net/sunrpc/xprtsock.c
6836 ++++ b/net/sunrpc/xprtsock.c
6837 +@@ -3050,6 +3050,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
6838 + return xprt;
6839 +
6840 + args->bc_xprt->xpt_bc_xprt = NULL;
6841 ++ args->bc_xprt->xpt_bc_xps = NULL;
6842 + xprt_put(xprt);
6843 + ret = ERR_PTR(-EINVAL);
6844 + out_err:
6845 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
6846 +index 8269da73e9e5..7748199b3568 100644
6847 +--- a/net/unix/af_unix.c
6848 ++++ b/net/unix/af_unix.c
6849 +@@ -315,7 +315,7 @@ static struct sock *unix_find_socket_byinode(struct inode *i)
6850 + &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
6851 + struct dentry *dentry = unix_sk(s)->path.dentry;
6852 +
6853 +- if (dentry && d_backing_inode(dentry) == i) {
6854 ++ if (dentry && d_real_inode(dentry) == i) {
6855 + sock_hold(s);
6856 + goto found;
6857 + }
6858 +@@ -911,7 +911,7 @@ static struct sock *unix_find_other(struct net *net,
6859 + err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
6860 + if (err)
6861 + goto fail;
6862 +- inode = d_backing_inode(path.dentry);
6863 ++ inode = d_real_inode(path.dentry);
6864 + err = inode_permission(inode, MAY_WRITE);
6865 + if (err)
6866 + goto put_fail;
6867 +@@ -1048,7 +1048,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
6868 + goto out_up;
6869 + }
6870 + addr->hash = UNIX_HASH_SIZE;
6871 +- hash = d_backing_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
6872 ++ hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
6873 + spin_lock(&unix_table_lock);
6874 + u->path = u_path;
6875 + list = &unix_socket_table[hash];
6876 +diff --git a/net/wireless/core.c b/net/wireless/core.c
6877 +index 9f1c4aa851ef..c878045d146a 100644
6878 +--- a/net/wireless/core.c
6879 ++++ b/net/wireless/core.c
6880 +@@ -360,8 +360,6 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
6881 + WARN_ON(ops->remain_on_channel && !ops->cancel_remain_on_channel);
6882 + WARN_ON(ops->tdls_channel_switch && !ops->tdls_cancel_channel_switch);
6883 + WARN_ON(ops->add_tx_ts && !ops->del_tx_ts);
6884 +- WARN_ON(ops->set_tx_power && !ops->get_tx_power);
6885 +- WARN_ON(ops->set_antenna && !ops->get_antenna);
6886 +
6887 + alloc_size = sizeof(*rdev) + sizeof_priv;
6888 +
6889 +diff --git a/net/wireless/util.c b/net/wireless/util.c
6890 +index 9f440a9de63b..47b917841623 100644
6891 +--- a/net/wireless/util.c
6892 ++++ b/net/wireless/util.c
6893 +@@ -509,7 +509,7 @@ static int __ieee80211_data_to_8023(struct sk_buff *skb, struct ethhdr *ehdr,
6894 + * replace EtherType */
6895 + hdrlen += ETH_ALEN + 2;
6896 + else
6897 +- tmp.h_proto = htons(skb->len);
6898 ++ tmp.h_proto = htons(skb->len - hdrlen);
6899 +
6900 + pskb_pull(skb, hdrlen);
6901 +
6902 +diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
6903 +index a9155077feef..fec75786f75b 100644
6904 +--- a/scripts/mod/file2alias.c
6905 ++++ b/scripts/mod/file2alias.c
6906 +@@ -384,7 +384,7 @@ static void do_of_entry_multi(void *symval, struct module *mod)
6907 + len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
6908 + (*type)[0] ? *type : "*");
6909 +
6910 +- if (compatible[0])
6911 ++ if ((*compatible)[0])
6912 + sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
6913 + *compatible);
6914 +
6915 +diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
6916 +index dec607c17b64..5ee820111027 100644
6917 +--- a/security/apparmor/lsm.c
6918 ++++ b/security/apparmor/lsm.c
6919 +@@ -523,34 +523,34 @@ static int apparmor_setprocattr(struct task_struct *task, char *name,
6920 + {
6921 + struct common_audit_data sa;
6922 + struct apparmor_audit_data aad = {0,};
6923 +- char *command, *args = value;
6924 ++ char *command, *largs = NULL, *args = value;
6925 + size_t arg_size;
6926 + int error;
6927 +
6928 + if (size == 0)
6929 + return -EINVAL;
6930 +- /* args points to a PAGE_SIZE buffer, AppArmor requires that
6931 +- * the buffer must be null terminated or have size <= PAGE_SIZE -1
6932 +- * so that AppArmor can null terminate them
6933 +- */
6934 +- if (args[size - 1] != '\0') {
6935 +- if (size == PAGE_SIZE)
6936 +- return -EINVAL;
6937 +- args[size] = '\0';
6938 +- }
6939 +-
6940 + /* task can only write its own attributes */
6941 + if (current != task)
6942 + return -EACCES;
6943 +
6944 +- args = value;
6945 ++ /* AppArmor requires that the buffer must be null terminated atm */
6946 ++ if (args[size - 1] != '\0') {
6947 ++ /* null terminate */
6948 ++ largs = args = kmalloc(size + 1, GFP_KERNEL);
6949 ++ if (!args)
6950 ++ return -ENOMEM;
6951 ++ memcpy(args, value, size);
6952 ++ args[size] = '\0';
6953 ++ }
6954 ++
6955 ++ error = -EINVAL;
6956 + args = strim(args);
6957 + command = strsep(&args, " ");
6958 + if (!args)
6959 +- return -EINVAL;
6960 ++ goto out;
6961 + args = skip_spaces(args);
6962 + if (!*args)
6963 +- return -EINVAL;
6964 ++ goto out;
6965 +
6966 + arg_size = size - (args - (char *) value);
6967 + if (strcmp(name, "current") == 0) {
6968 +@@ -576,10 +576,12 @@ static int apparmor_setprocattr(struct task_struct *task, char *name,
6969 + goto fail;
6970 + } else
6971 + /* only support the "current" and "exec" process attributes */
6972 +- return -EINVAL;
6973 ++ goto fail;
6974 +
6975 + if (!error)
6976 + error = size;
6977 ++out:
6978 ++ kfree(largs);
6979 + return error;
6980 +
6981 + fail:
6982 +@@ -588,9 +590,9 @@ fail:
6983 + aad.profile = aa_current_profile();
6984 + aad.op = OP_SETPROCATTR;
6985 + aad.info = name;
6986 +- aad.error = -EINVAL;
6987 ++ aad.error = error = -EINVAL;
6988 + aa_audit_msg(AUDIT_APPARMOR_DENIED, &sa, NULL);
6989 +- return -EINVAL;
6990 ++ goto out;
6991 + }
6992 +
6993 + static int apparmor_task_setrlimit(struct task_struct *task,
6994 +diff --git a/security/keys/key.c b/security/keys/key.c
6995 +index b28755131687..af7f6821d26b 100644
6996 +--- a/security/keys/key.c
6997 ++++ b/security/keys/key.c
6998 +@@ -584,7 +584,7 @@ int key_reject_and_link(struct key *key,
6999 +
7000 + mutex_unlock(&key_construction_mutex);
7001 +
7002 +- if (keyring)
7003 ++ if (keyring && link_ret == 0)
7004 + __key_link_end(keyring, &key->index_key, edit);
7005 +
7006 + /* wake up anyone waiting for a key to be constructed */
7007 +diff --git a/sound/core/control.c b/sound/core/control.c
7008 +index a85d45595d02..b4fe9b002512 100644
7009 +--- a/sound/core/control.c
7010 ++++ b/sound/core/control.c
7011 +@@ -160,6 +160,8 @@ void snd_ctl_notify(struct snd_card *card, unsigned int mask,
7012 +
7013 + if (snd_BUG_ON(!card || !id))
7014 + return;
7015 ++ if (card->shutdown)
7016 ++ return;
7017 + read_lock(&card->ctl_files_rwlock);
7018 + #if IS_ENABLED(CONFIG_SND_MIXER_OSS)
7019 + card->mixer_oss_change_count++;
7020 +diff --git a/sound/core/pcm.c b/sound/core/pcm.c
7021 +index 308c9ecf73db..8e980aa678d0 100644
7022 +--- a/sound/core/pcm.c
7023 ++++ b/sound/core/pcm.c
7024 +@@ -849,6 +849,14 @@ int snd_pcm_new_internal(struct snd_card *card, const char *id, int device,
7025 + }
7026 + EXPORT_SYMBOL(snd_pcm_new_internal);
7027 +
7028 ++static void free_chmap(struct snd_pcm_str *pstr)
7029 ++{
7030 ++ if (pstr->chmap_kctl) {
7031 ++ snd_ctl_remove(pstr->pcm->card, pstr->chmap_kctl);
7032 ++ pstr->chmap_kctl = NULL;
7033 ++ }
7034 ++}
7035 ++
7036 + static void snd_pcm_free_stream(struct snd_pcm_str * pstr)
7037 + {
7038 + struct snd_pcm_substream *substream, *substream_next;
7039 +@@ -871,6 +879,7 @@ static void snd_pcm_free_stream(struct snd_pcm_str * pstr)
7040 + kfree(setup);
7041 + }
7042 + #endif
7043 ++ free_chmap(pstr);
7044 + if (pstr->substream_count)
7045 + put_device(&pstr->dev);
7046 + }
7047 +@@ -1135,10 +1144,7 @@ static int snd_pcm_dev_disconnect(struct snd_device *device)
7048 + for (cidx = 0; cidx < 2; cidx++) {
7049 + if (!pcm->internal)
7050 + snd_unregister_device(&pcm->streams[cidx].dev);
7051 +- if (pcm->streams[cidx].chmap_kctl) {
7052 +- snd_ctl_remove(pcm->card, pcm->streams[cidx].chmap_kctl);
7053 +- pcm->streams[cidx].chmap_kctl = NULL;
7054 +- }
7055 ++ free_chmap(&pcm->streams[cidx]);
7056 + }
7057 + mutex_unlock(&pcm->open_mutex);
7058 + mutex_unlock(&register_mutex);
7059 +diff --git a/sound/core/timer.c b/sound/core/timer.c
7060 +index 6469bedda2f3..23b73f6ac040 100644
7061 +--- a/sound/core/timer.c
7062 ++++ b/sound/core/timer.c
7063 +@@ -1954,6 +1954,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
7064 +
7065 + qhead = tu->qhead++;
7066 + tu->qhead %= tu->queue_size;
7067 ++ tu->qused--;
7068 + spin_unlock_irq(&tu->qlock);
7069 +
7070 + if (tu->tread) {
7071 +@@ -1967,7 +1968,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
7072 + }
7073 +
7074 + spin_lock_irq(&tu->qlock);
7075 +- tu->qused--;
7076 + if (err < 0)
7077 + goto _error;
7078 + result += unit;
7079 +diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
7080 +index c0f8f613f1f1..172dacd925f5 100644
7081 +--- a/sound/drivers/dummy.c
7082 ++++ b/sound/drivers/dummy.c
7083 +@@ -420,6 +420,7 @@ static int dummy_hrtimer_stop(struct snd_pcm_substream *substream)
7084 +
7085 + static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm)
7086 + {
7087 ++ hrtimer_cancel(&dpcm->timer);
7088 + tasklet_kill(&dpcm->tasklet);
7089 + }
7090 +
7091 +diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
7092 +index 87041ddd29cb..47a358fab132 100644
7093 +--- a/sound/hda/hdac_regmap.c
7094 ++++ b/sound/hda/hdac_regmap.c
7095 +@@ -444,7 +444,7 @@ int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
7096 + err = reg_raw_write(codec, reg, val);
7097 + if (err == -EAGAIN) {
7098 + err = snd_hdac_power_up_pm(codec);
7099 +- if (!err)
7100 ++ if (err >= 0)
7101 + err = reg_raw_write(codec, reg, val);
7102 + snd_hdac_power_down_pm(codec);
7103 + }
7104 +@@ -470,7 +470,7 @@ static int __snd_hdac_regmap_read_raw(struct hdac_device *codec,
7105 + err = reg_raw_read(codec, reg, val, uncached);
7106 + if (err == -EAGAIN) {
7107 + err = snd_hdac_power_up_pm(codec);
7108 +- if (!err)
7109 ++ if (err >= 0)
7110 + err = reg_raw_read(codec, reg, val, uncached);
7111 + snd_hdac_power_down_pm(codec);
7112 + }
7113 +diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
7114 +index 4667c3232b7f..74177189063c 100644
7115 +--- a/sound/pci/au88x0/au88x0_core.c
7116 ++++ b/sound/pci/au88x0/au88x0_core.c
7117 +@@ -1444,9 +1444,8 @@ static int vortex_wtdma_bufshift(vortex_t * vortex, int wtdma)
7118 + int page, p, pp, delta, i;
7119 +
7120 + page =
7121 +- (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)) &
7122 +- WT_SUBBUF_MASK)
7123 +- >> WT_SUBBUF_SHIFT;
7124 ++ (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2))
7125 ++ >> WT_SUBBUF_SHIFT) & WT_SUBBUF_MASK;
7126 + if (dma->nr_periods >= 4)
7127 + delta = (page - dma->period_real) & 3;
7128 + else {
7129 +diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
7130 +index 1cb85aeb0cea..286f5e3686a3 100644
7131 +--- a/sound/pci/echoaudio/echoaudio.c
7132 ++++ b/sound/pci/echoaudio/echoaudio.c
7133 +@@ -2200,11 +2200,11 @@ static int snd_echo_resume(struct device *dev)
7134 + u32 pipe_alloc_mask;
7135 + int err;
7136 +
7137 +- commpage_bak = kmalloc(sizeof(struct echoaudio), GFP_KERNEL);
7138 ++ commpage_bak = kmalloc(sizeof(*commpage), GFP_KERNEL);
7139 + if (commpage_bak == NULL)
7140 + return -ENOMEM;
7141 + commpage = chip->comm_page;
7142 +- memcpy(commpage_bak, commpage, sizeof(struct comm_page));
7143 ++ memcpy(commpage_bak, commpage, sizeof(*commpage));
7144 +
7145 + err = init_hw(chip, chip->pci->device, chip->pci->subsystem_device);
7146 + if (err < 0) {
7147 +diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
7148 +index dfaf1a93fb8a..d77cc76aadab 100644
7149 +--- a/sound/pci/hda/hda_generic.c
7150 ++++ b/sound/pci/hda/hda_generic.c
7151 +@@ -3977,6 +3977,8 @@ static hda_nid_t set_path_power(struct hda_codec *codec, hda_nid_t nid,
7152 +
7153 + for (n = 0; n < spec->paths.used; n++) {
7154 + path = snd_array_elem(&spec->paths, n);
7155 ++ if (!path->depth)
7156 ++ continue;
7157 + if (path->path[0] == nid ||
7158 + path->path[path->depth - 1] == nid) {
7159 + bool pin_old = path->pin_enabled;
7160 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
7161 +index 94089fc71884..6f8ea13323c1 100644
7162 +--- a/sound/pci/hda/hda_intel.c
7163 ++++ b/sound/pci/hda/hda_intel.c
7164 +@@ -367,9 +367,10 @@ enum {
7165 + #define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
7166 + #define IS_KBL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa171)
7167 + #define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71)
7168 ++#define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0)
7169 + #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
7170 + #define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \
7171 +- IS_KBL(pci) || IS_KBL_LP(pci)
7172 ++ IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci)
7173 +
7174 + static char *driver_short_names[] = {
7175 + [AZX_DRIVER_ICH] = "HDA Intel",
7176 +@@ -1217,8 +1218,10 @@ static int azx_free(struct azx *chip)
7177 + if (use_vga_switcheroo(hda)) {
7178 + if (chip->disabled && hda->probe_continued)
7179 + snd_hda_unlock_devices(&chip->bus);
7180 +- if (hda->vga_switcheroo_registered)
7181 ++ if (hda->vga_switcheroo_registered) {
7182 + vga_switcheroo_unregister_client(chip->pci);
7183 ++ vga_switcheroo_fini_domain_pm_ops(chip->card->dev);
7184 ++ }
7185 + }
7186 +
7187 + if (bus->chip_init) {
7188 +@@ -2190,6 +2193,9 @@ static const struct pci_device_id azx_ids[] = {
7189 + /* Kabylake-LP */
7190 + { PCI_DEVICE(0x8086, 0x9d71),
7191 + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
7192 ++ /* Kabylake-H */
7193 ++ { PCI_DEVICE(0x8086, 0xa2f0),
7194 ++ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
7195 + /* Broxton-P(Apollolake) */
7196 + { PCI_DEVICE(0x8086, 0x5a98),
7197 + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
7198 +@@ -2263,6 +2269,8 @@ static const struct pci_device_id azx_ids[] = {
7199 + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
7200 + { PCI_DEVICE(0x1002, 0x157a),
7201 + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
7202 ++ { PCI_DEVICE(0x1002, 0x15b3),
7203 ++ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
7204 + { PCI_DEVICE(0x1002, 0x793b),
7205 + .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
7206 + { PCI_DEVICE(0x1002, 0x7919),
7207 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
7208 +index 0fe18ede3e85..abcb5a6a1cd9 100644
7209 +--- a/sound/pci/hda/patch_realtek.c
7210 ++++ b/sound/pci/hda/patch_realtek.c
7211 +@@ -5650,6 +5650,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
7212 + SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
7213 + SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
7214 + SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
7215 ++ SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460),
7216 ++ SND_PCI_QUIRK(0x17aa, 0x5051, "Thinkpad L460", ALC292_FIXUP_TPT460),
7217 ++ SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460),
7218 + SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
7219 + SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
7220 + SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
7221 +@@ -5735,7 +5738,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
7222 + {}
7223 + };
7224 + #define ALC225_STANDARD_PINS \
7225 +- {0x12, 0xb7a60130}, \
7226 + {0x21, 0x04211020}
7227 +
7228 + #define ALC256_STANDARD_PINS \
7229 +@@ -5760,10 +5762,24 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
7230 + static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
7231 + SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
7232 + ALC225_STANDARD_PINS,
7233 ++ {0x12, 0xb7a60130},
7234 + {0x14, 0x901701a0}),
7235 + SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
7236 + ALC225_STANDARD_PINS,
7237 ++ {0x12, 0xb7a60130},
7238 + {0x14, 0x901701b0}),
7239 ++ SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
7240 ++ ALC225_STANDARD_PINS,
7241 ++ {0x12, 0xb7a60150},
7242 ++ {0x14, 0x901701a0}),
7243 ++ SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
7244 ++ ALC225_STANDARD_PINS,
7245 ++ {0x12, 0xb7a60150},
7246 ++ {0x14, 0x901701b0}),
7247 ++ SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
7248 ++ ALC225_STANDARD_PINS,
7249 ++ {0x12, 0xb7a60130},
7250 ++ {0x1b, 0x90170110}),
7251 + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
7252 + {0x14, 0x90170110},
7253 + {0x21, 0x02211020}),
7254 +@@ -5832,6 +5848,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
7255 + {0x14, 0x90170120},
7256 + {0x21, 0x02211030}),
7257 + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
7258 ++ {0x12, 0x90a60170},
7259 ++ {0x14, 0x90170120},
7260 ++ {0x21, 0x02211030}),
7261 ++ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
7262 + ALC256_STANDARD_PINS),
7263 + SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
7264 + {0x12, 0x90a60130},
7265 +diff --git a/sound/usb/card.c b/sound/usb/card.c
7266 +index 3fc63583a537..2d493501b7f6 100644
7267 +--- a/sound/usb/card.c
7268 ++++ b/sound/usb/card.c
7269 +@@ -552,7 +552,6 @@ static int usb_audio_probe(struct usb_interface *intf,
7270 + goto __error;
7271 + }
7272 + chip = usb_chip[i];
7273 +- dev_set_drvdata(&dev->dev, chip);
7274 + atomic_inc(&chip->active); /* avoid autopm */
7275 + break;
7276 + }
7277 +@@ -578,6 +577,7 @@ static int usb_audio_probe(struct usb_interface *intf,
7278 + goto __error;
7279 + }
7280 + }
7281 ++ dev_set_drvdata(&dev->dev, chip);
7282 +
7283 + /*
7284 + * For devices with more than one control interface, we assume the
7285 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
7286 +index 4fd482fb9260..7cb12249baa5 100644
7287 +--- a/virt/kvm/kvm_main.c
7288 ++++ b/virt/kvm/kvm_main.c
7289 +@@ -2868,7 +2868,7 @@ static long kvm_vm_ioctl(struct file *filp,
7290 + if (copy_from_user(&routing, argp, sizeof(routing)))
7291 + goto out;
7292 + r = -EINVAL;
7293 +- if (routing.nr >= KVM_MAX_IRQ_ROUTES)
7294 ++ if (routing.nr > KVM_MAX_IRQ_ROUTES)
7295 + goto out;
7296 + if (routing.flags)
7297 + goto out;