Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.5 commit in: /
Date: Wed, 04 May 2016 23:56:27
Message-Id: 1462406172.aced38b03d4a5900cd1aadb745ab0d02ff4151db.mpagano@gentoo
1 commit: aced38b03d4a5900cd1aadb745ab0d02ff4151db
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed May 4 23:56:12 2016 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed May 4 23:56:12 2016 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=aced38b0
7
8 Linux patch 4.5.3
9
10 0000_README | 4 +
11 1002_linux-4.5.3.patch | 7456 ++++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 7460 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 0fa777f..0147ad9 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -51,6 +51,10 @@ Patch: 1001_linux-4.5.2.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.5.2
21
22 +Patch: 1002_linux-4.5.3.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.5.3
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1002_linux-4.5.3.patch b/1002_linux-4.5.3.patch
31 new file mode 100644
32 index 0000000..6401e8f
33 --- /dev/null
34 +++ b/1002_linux-4.5.3.patch
35 @@ -0,0 +1,7456 @@
36 +diff --git a/Makefile b/Makefile
37 +index 1ecaaeb7791d..9b56a6c5e36f 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 5
43 +-SUBLEVEL = 2
44 ++SUBLEVEL = 3
45 + EXTRAVERSION =
46 + NAME = Blurry Fish Butt
47 +
48 +diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
49 +index 1fafaad516ba..97471d62d5e4 100644
50 +--- a/arch/arm/boot/dts/am33xx.dtsi
51 ++++ b/arch/arm/boot/dts/am33xx.dtsi
52 +@@ -860,7 +860,7 @@
53 + ti,no-idle-on-init;
54 + reg = <0x50000000 0x2000>;
55 + interrupts = <100>;
56 +- dmas = <&edma 52>;
57 ++ dmas = <&edma 52 0>;
58 + dma-names = "rxtx";
59 + gpmc,num-cs = <7>;
60 + gpmc,num-waitpins = <2>;
61 +diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
62 +index 92068fbf8b57..6bd38a28e26c 100644
63 +--- a/arch/arm/boot/dts/am4372.dtsi
64 ++++ b/arch/arm/boot/dts/am4372.dtsi
65 +@@ -207,7 +207,7 @@
66 + ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 5>,
67 + <&edma_tptc2 0>;
68 +
69 +- ti,edma-memcpy-channels = <32 33>;
70 ++ ti,edma-memcpy-channels = <58 59>;
71 + };
72 +
73 + edma_tptc0: tptc@49800000 {
74 +@@ -884,7 +884,7 @@
75 + gpmc: gpmc@50000000 {
76 + compatible = "ti,am3352-gpmc";
77 + ti,hwmods = "gpmc";
78 +- dmas = <&edma 52>;
79 ++ dmas = <&edma 52 0>;
80 + dma-names = "rxtx";
81 + clocks = <&l3s_gclk>;
82 + clock-names = "fck";
83 +diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
84 +index d580e2b70f9a..637dc5dbc8ac 100644
85 +--- a/arch/arm/boot/dts/am43x-epos-evm.dts
86 ++++ b/arch/arm/boot/dts/am43x-epos-evm.dts
87 +@@ -792,3 +792,8 @@
88 + tx-num-evt = <32>;
89 + rx-num-evt = <32>;
90 + };
91 ++
92 ++&synctimer_32kclk {
93 ++ assigned-clocks = <&mux_synctimer32k_ck>;
94 ++ assigned-clock-parents = <&clkdiv32k_ick>;
95 ++};
96 +diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
97 +index 7ccce7529b0c..cc952cf8ec30 100644
98 +--- a/arch/arm/boot/dts/armada-375.dtsi
99 ++++ b/arch/arm/boot/dts/armada-375.dtsi
100 +@@ -529,7 +529,7 @@
101 + };
102 +
103 + sata@a0000 {
104 +- compatible = "marvell,orion-sata";
105 ++ compatible = "marvell,armada-370-sata";
106 + reg = <0xa0000 0x5000>;
107 + interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
108 + clocks = <&gateclk 14>, <&gateclk 20>;
109 +diff --git a/arch/arm/boot/dts/armada-385-linksys.dtsi b/arch/arm/boot/dts/armada-385-linksys.dtsi
110 +index 3710755c6d76..85d2c377c332 100644
111 +--- a/arch/arm/boot/dts/armada-385-linksys.dtsi
112 ++++ b/arch/arm/boot/dts/armada-385-linksys.dtsi
113 +@@ -117,7 +117,7 @@
114 + };
115 +
116 + /* USB part of the eSATA/USB 2.0 port */
117 +- usb@50000 {
118 ++ usb@58000 {
119 + status = "okay";
120 + };
121 +
122 +diff --git a/arch/arm/boot/dts/pxa3xx.dtsi b/arch/arm/boot/dts/pxa3xx.dtsi
123 +index cf6998a0804d..564341af7e97 100644
124 +--- a/arch/arm/boot/dts/pxa3xx.dtsi
125 ++++ b/arch/arm/boot/dts/pxa3xx.dtsi
126 +@@ -30,7 +30,7 @@
127 + reg = <0x43100000 90>;
128 + interrupts = <45>;
129 + clocks = <&clks CLK_NAND>;
130 +- dmas = <&pdma 97>;
131 ++ dmas = <&pdma 97 3>;
132 + dma-names = "data";
133 + #address-cells = <1>;
134 + #size-cells = <1>;
135 +diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
136 +index 652a0bb11578..5189bcecad12 100644
137 +--- a/arch/arm/mach-exynos/Kconfig
138 ++++ b/arch/arm/mach-exynos/Kconfig
139 +@@ -27,6 +27,7 @@ menuconfig ARCH_EXYNOS
140 + select S5P_DEV_MFC
141 + select SRAM
142 + select THERMAL
143 ++ select THERMAL_OF
144 + select MFD_SYSCON
145 + select CLKSRC_EXYNOS_MCT
146 + select POWER_RESET
147 +diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
148 +index aa7b379e2661..2a3db0bd9e15 100644
149 +--- a/arch/arm/mach-omap2/cpuidle34xx.c
150 ++++ b/arch/arm/mach-omap2/cpuidle34xx.c
151 +@@ -34,6 +34,7 @@
152 + #include "pm.h"
153 + #include "control.h"
154 + #include "common.h"
155 ++#include "soc.h"
156 +
157 + /* Mach specific information to be recorded in the C-state driver_data */
158 + struct omap3_idle_statedata {
159 +@@ -315,6 +316,69 @@ static struct cpuidle_driver omap3_idle_driver = {
160 + .safe_state_index = 0,
161 + };
162 +
163 ++/*
164 ++ * Numbers based on measurements made in October 2009 for PM optimized kernel
165 ++ * with CPU freq enabled on device Nokia N900. Assumes OPP2 (main idle OPP,
166 ++ * and worst case latencies).
167 ++ */
168 ++static struct cpuidle_driver omap3430_idle_driver = {
169 ++ .name = "omap3430_idle",
170 ++ .owner = THIS_MODULE,
171 ++ .states = {
172 ++ {
173 ++ .enter = omap3_enter_idle_bm,
174 ++ .exit_latency = 110 + 162,
175 ++ .target_residency = 5,
176 ++ .name = "C1",
177 ++ .desc = "MPU ON + CORE ON",
178 ++ },
179 ++ {
180 ++ .enter = omap3_enter_idle_bm,
181 ++ .exit_latency = 106 + 180,
182 ++ .target_residency = 309,
183 ++ .name = "C2",
184 ++ .desc = "MPU ON + CORE ON",
185 ++ },
186 ++ {
187 ++ .enter = omap3_enter_idle_bm,
188 ++ .exit_latency = 107 + 410,
189 ++ .target_residency = 46057,
190 ++ .name = "C3",
191 ++ .desc = "MPU RET + CORE ON",
192 ++ },
193 ++ {
194 ++ .enter = omap3_enter_idle_bm,
195 ++ .exit_latency = 121 + 3374,
196 ++ .target_residency = 46057,
197 ++ .name = "C4",
198 ++ .desc = "MPU OFF + CORE ON",
199 ++ },
200 ++ {
201 ++ .enter = omap3_enter_idle_bm,
202 ++ .exit_latency = 855 + 1146,
203 ++ .target_residency = 46057,
204 ++ .name = "C5",
205 ++ .desc = "MPU RET + CORE RET",
206 ++ },
207 ++ {
208 ++ .enter = omap3_enter_idle_bm,
209 ++ .exit_latency = 7580 + 4134,
210 ++ .target_residency = 484329,
211 ++ .name = "C6",
212 ++ .desc = "MPU OFF + CORE RET",
213 ++ },
214 ++ {
215 ++ .enter = omap3_enter_idle_bm,
216 ++ .exit_latency = 7505 + 15274,
217 ++ .target_residency = 484329,
218 ++ .name = "C7",
219 ++ .desc = "MPU OFF + CORE OFF",
220 ++ },
221 ++ },
222 ++ .state_count = ARRAY_SIZE(omap3_idle_data),
223 ++ .safe_state_index = 0,
224 ++};
225 ++
226 + /* Public functions */
227 +
228 + /**
229 +@@ -333,5 +397,8 @@ int __init omap3_idle_init(void)
230 + if (!mpu_pd || !core_pd || !per_pd || !cam_pd)
231 + return -ENODEV;
232 +
233 +- return cpuidle_register(&omap3_idle_driver, NULL);
234 ++ if (cpu_is_omap3430())
235 ++ return cpuidle_register(&omap3430_idle_driver, NULL);
236 ++ else
237 ++ return cpuidle_register(&omap3_idle_driver, NULL);
238 + }
239 +diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
240 +index 3c87e40650cf..9821be6dfd5e 100644
241 +--- a/arch/arm/mach-omap2/io.c
242 ++++ b/arch/arm/mach-omap2/io.c
243 +@@ -368,6 +368,7 @@ void __init omap5_map_io(void)
244 + void __init dra7xx_map_io(void)
245 + {
246 + iotable_init(dra7xx_io_desc, ARRAY_SIZE(dra7xx_io_desc));
247 ++ omap_barriers_init();
248 + }
249 + #endif
250 + /*
251 +diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
252 +index b6d62e4cdfdd..2af6ff63e3b4 100644
253 +--- a/arch/arm/mach-omap2/omap_hwmod.c
254 ++++ b/arch/arm/mach-omap2/omap_hwmod.c
255 +@@ -1416,9 +1416,7 @@ static void _enable_sysc(struct omap_hwmod *oh)
256 + (sf & SYSC_HAS_CLOCKACTIVITY))
257 + _set_clockactivity(oh, oh->class->sysc->clockact, &v);
258 +
259 +- /* If the cached value is the same as the new value, skip the write */
260 +- if (oh->_sysc_cache != v)
261 +- _write_sysconfig(v, oh);
262 ++ _write_sysconfig(v, oh);
263 +
264 + /*
265 + * Set the autoidle bit only after setting the smartidle bit
266 +@@ -1481,7 +1479,9 @@ static void _idle_sysc(struct omap_hwmod *oh)
267 + _set_master_standbymode(oh, idlemode, &v);
268 + }
269 +
270 +- _write_sysconfig(v, oh);
271 ++ /* If the cached value is the same as the new value, skip the write */
272 ++ if (oh->_sysc_cache != v)
273 ++ _write_sysconfig(v, oh);
274 + }
275 +
276 + /**
277 +diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig
278 +index f998eb1c698e..0cf4426183cf 100644
279 +--- a/arch/arm/mach-prima2/Kconfig
280 ++++ b/arch/arm/mach-prima2/Kconfig
281 +@@ -2,6 +2,7 @@ menuconfig ARCH_SIRF
282 + bool "CSR SiRF"
283 + depends on ARCH_MULTI_V7
284 + select ARCH_HAS_RESET_CONTROLLER
285 ++ select RESET_CONTROLLER
286 + select ARCH_REQUIRE_GPIOLIB
287 + select GENERIC_IRQ_CHIP
288 + select NO_IOPORT_MAP
289 +diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
290 +index 8dde19962a5b..f63c96cd3608 100644
291 +--- a/arch/powerpc/include/uapi/asm/cputable.h
292 ++++ b/arch/powerpc/include/uapi/asm/cputable.h
293 +@@ -31,6 +31,7 @@
294 + #define PPC_FEATURE_PSERIES_PERFMON_COMPAT \
295 + 0x00000040
296 +
297 ++/* Reserved - do not use 0x00000004 */
298 + #define PPC_FEATURE_TRUE_LE 0x00000002
299 + #define PPC_FEATURE_PPC_LE 0x00000001
300 +
301 +diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
302 +index 7030b035905d..a15fe1d4e84a 100644
303 +--- a/arch/powerpc/kernel/prom.c
304 ++++ b/arch/powerpc/kernel/prom.c
305 +@@ -148,23 +148,25 @@ static struct ibm_pa_feature {
306 + unsigned long cpu_features; /* CPU_FTR_xxx bit */
307 + unsigned long mmu_features; /* MMU_FTR_xxx bit */
308 + unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
309 ++ unsigned int cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */
310 + unsigned char pabyte; /* byte number in ibm,pa-features */
311 + unsigned char pabit; /* bit number (big-endian) */
312 + unsigned char invert; /* if 1, pa bit set => clear feature */
313 + } ibm_pa_features[] __initdata = {
314 +- {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0},
315 +- {0, 0, PPC_FEATURE_HAS_FPU, 0, 1, 0},
316 +- {CPU_FTR_CTRL, 0, 0, 0, 3, 0},
317 +- {CPU_FTR_NOEXECUTE, 0, 0, 0, 6, 0},
318 +- {CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1},
319 +- {0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
320 +- {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
321 ++ {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0, 0},
322 ++ {0, 0, PPC_FEATURE_HAS_FPU, 0, 0, 1, 0},
323 ++ {CPU_FTR_CTRL, 0, 0, 0, 0, 3, 0},
324 ++ {CPU_FTR_NOEXECUTE, 0, 0, 0, 0, 6, 0},
325 ++ {CPU_FTR_NODSISRALIGN, 0, 0, 0, 1, 1, 1},
326 ++ {0, MMU_FTR_CI_LARGE_PAGE, 0, 0, 1, 2, 0},
327 ++ {CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0},
328 + /*
329 +- * If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n),
330 +- * we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP
331 +- * which is 0 if the kernel doesn't support TM.
332 ++ * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
333 ++ * we don't want to turn on TM here, so we use the *_COMP versions
334 ++ * which are 0 if the kernel doesn't support TM.
335 + */
336 +- {CPU_FTR_TM_COMP, 0, 0, 22, 0, 0},
337 ++ {CPU_FTR_TM_COMP, 0, 0,
338 ++ PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
339 + };
340 +
341 + static void __init scan_features(unsigned long node, const unsigned char *ftrs,
342 +@@ -195,10 +197,12 @@ static void __init scan_features(unsigned long node, const unsigned char *ftrs,
343 + if (bit ^ fp->invert) {
344 + cur_cpu_spec->cpu_features |= fp->cpu_features;
345 + cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
346 ++ cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
347 + cur_cpu_spec->mmu_features |= fp->mmu_features;
348 + } else {
349 + cur_cpu_spec->cpu_features &= ~fp->cpu_features;
350 + cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
351 ++ cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
352 + cur_cpu_spec->mmu_features &= ~fp->mmu_features;
353 + }
354 + }
355 +diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
356 +index 2b2ced9dc00a..6dafabb6ae1a 100644
357 +--- a/arch/s390/include/asm/pci.h
358 ++++ b/arch/s390/include/asm/pci.h
359 +@@ -45,7 +45,8 @@ struct zpci_fmb {
360 + u64 rpcit_ops;
361 + u64 dma_rbytes;
362 + u64 dma_wbytes;
363 +-} __packed __aligned(64);
364 ++ u64 pad[2];
365 ++} __packed __aligned(128);
366 +
367 + enum zpci_state {
368 + ZPCI_FN_STATE_RESERVED,
369 +diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c
370 +index a841e9765bd6..8381c09d2870 100644
371 +--- a/arch/x86/crypto/sha-mb/sha1_mb.c
372 ++++ b/arch/x86/crypto/sha-mb/sha1_mb.c
373 +@@ -453,10 +453,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
374 +
375 + req = cast_mcryptd_ctx_to_req(req_ctx);
376 + if (irqs_disabled())
377 +- rctx->complete(&req->base, ret);
378 ++ req_ctx->complete(&req->base, ret);
379 + else {
380 + local_bh_disable();
381 +- rctx->complete(&req->base, ret);
382 ++ req_ctx->complete(&req->base, ret);
383 + local_bh_enable();
384 + }
385 + }
386 +diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
387 +index f8a29d2c97b0..e6a8613fbfb0 100644
388 +--- a/arch/x86/include/asm/hugetlb.h
389 ++++ b/arch/x86/include/asm/hugetlb.h
390 +@@ -4,6 +4,7 @@
391 + #include <asm/page.h>
392 + #include <asm-generic/hugetlb.h>
393 +
394 ++#define hugepages_supported() cpu_has_pse
395 +
396 + static inline int is_hugepage_only_range(struct mm_struct *mm,
397 + unsigned long addr,
398 +diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
399 +index ad59d70bcb1a..ef495511f019 100644
400 +--- a/arch/x86/kernel/apic/vector.c
401 ++++ b/arch/x86/kernel/apic/vector.c
402 +@@ -256,7 +256,8 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
403 + struct irq_desc *desc;
404 + int cpu, vector;
405 +
406 +- BUG_ON(!data->cfg.vector);
407 ++ if (!data->cfg.vector)
408 ++ return;
409 +
410 + vector = data->cfg.vector;
411 + for_each_cpu_and(cpu, data->domain, cpu_online_mask)
412 +diff --git a/arch/x86/kernel/cpu/mcheck/mce-genpool.c b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
413 +index 0a850100c594..2658e2af74ec 100644
414 +--- a/arch/x86/kernel/cpu/mcheck/mce-genpool.c
415 ++++ b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
416 +@@ -29,7 +29,7 @@ static char gen_pool_buf[MCE_POOLSZ];
417 + void mce_gen_pool_process(void)
418 + {
419 + struct llist_node *head;
420 +- struct mce_evt_llist *node;
421 ++ struct mce_evt_llist *node, *tmp;
422 + struct mce *mce;
423 +
424 + head = llist_del_all(&mce_event_llist);
425 +@@ -37,7 +37,7 @@ void mce_gen_pool_process(void)
426 + return;
427 +
428 + head = llist_reverse_order(head);
429 +- llist_for_each_entry(node, head, llnode) {
430 ++ llist_for_each_entry_safe(node, tmp, head, llnode) {
431 + mce = &node->mce;
432 + atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
433 + gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node));
434 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
435 +index eca5bd9f0e47..ac4963c38aa3 100644
436 +--- a/arch/x86/kvm/x86.c
437 ++++ b/arch/x86/kvm/x86.c
438 +@@ -697,7 +697,6 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
439 + if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
440 + return 1;
441 + }
442 +- kvm_put_guest_xcr0(vcpu);
443 + vcpu->arch.xcr0 = xcr0;
444 +
445 + if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
446 +@@ -6569,8 +6568,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
447 + kvm_x86_ops->prepare_guest_switch(vcpu);
448 + if (vcpu->fpu_active)
449 + kvm_load_guest_fpu(vcpu);
450 +- kvm_load_guest_xcr0(vcpu);
451 +-
452 + vcpu->mode = IN_GUEST_MODE;
453 +
454 + srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
455 +@@ -6593,6 +6590,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
456 + goto cancel_injection;
457 + }
458 +
459 ++ kvm_load_guest_xcr0(vcpu);
460 ++
461 + if (req_immediate_exit)
462 + smp_send_reschedule(vcpu->cpu);
463 +
464 +@@ -6642,6 +6641,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
465 + vcpu->mode = OUTSIDE_GUEST_MODE;
466 + smp_wmb();
467 +
468 ++ kvm_put_guest_xcr0(vcpu);
469 ++
470 + /* Interrupt is enabled by handle_external_intr() */
471 + kvm_x86_ops->handle_external_intr(vcpu);
472 +
473 +@@ -7289,7 +7290,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
474 + * and assume host would use all available bits.
475 + * Guest xcr0 would be loaded later.
476 + */
477 +- kvm_put_guest_xcr0(vcpu);
478 + vcpu->guest_fpu_loaded = 1;
479 + __kernel_fpu_begin();
480 + __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
481 +@@ -7298,8 +7298,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
482 +
483 + void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
484 + {
485 +- kvm_put_guest_xcr0(vcpu);
486 +-
487 + if (!vcpu->guest_fpu_loaded) {
488 + vcpu->fpu_counter = 0;
489 + return;
490 +diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
491 +index 637ab34ed632..ddb2244b06a1 100644
492 +--- a/arch/x86/mm/kmmio.c
493 ++++ b/arch/x86/mm/kmmio.c
494 +@@ -33,7 +33,7 @@
495 + struct kmmio_fault_page {
496 + struct list_head list;
497 + struct kmmio_fault_page *release_next;
498 +- unsigned long page; /* location of the fault page */
499 ++ unsigned long addr; /* the requested address */
500 + pteval_t old_presence; /* page presence prior to arming */
501 + bool armed;
502 +
503 +@@ -70,9 +70,16 @@ unsigned int kmmio_count;
504 + static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
505 + static LIST_HEAD(kmmio_probes);
506 +
507 +-static struct list_head *kmmio_page_list(unsigned long page)
508 ++static struct list_head *kmmio_page_list(unsigned long addr)
509 + {
510 +- return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
511 ++ unsigned int l;
512 ++ pte_t *pte = lookup_address(addr, &l);
513 ++
514 ++ if (!pte)
515 ++ return NULL;
516 ++ addr &= page_level_mask(l);
517 ++
518 ++ return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
519 + }
520 +
521 + /* Accessed per-cpu */
522 +@@ -98,15 +105,19 @@ static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
523 + }
524 +
525 + /* You must be holding RCU read lock. */
526 +-static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
527 ++static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
528 + {
529 + struct list_head *head;
530 + struct kmmio_fault_page *f;
531 ++ unsigned int l;
532 ++ pte_t *pte = lookup_address(addr, &l);
533 +
534 +- page &= PAGE_MASK;
535 +- head = kmmio_page_list(page);
536 ++ if (!pte)
537 ++ return NULL;
538 ++ addr &= page_level_mask(l);
539 ++ head = kmmio_page_list(addr);
540 + list_for_each_entry_rcu(f, head, list) {
541 +- if (f->page == page)
542 ++ if (f->addr == addr)
543 + return f;
544 + }
545 + return NULL;
546 +@@ -137,10 +148,10 @@ static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
547 + static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
548 + {
549 + unsigned int level;
550 +- pte_t *pte = lookup_address(f->page, &level);
551 ++ pte_t *pte = lookup_address(f->addr, &level);
552 +
553 + if (!pte) {
554 +- pr_err("no pte for page 0x%08lx\n", f->page);
555 ++ pr_err("no pte for addr 0x%08lx\n", f->addr);
556 + return -1;
557 + }
558 +
559 +@@ -156,7 +167,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
560 + return -1;
561 + }
562 +
563 +- __flush_tlb_one(f->page);
564 ++ __flush_tlb_one(f->addr);
565 + return 0;
566 + }
567 +
568 +@@ -176,12 +187,12 @@ static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
569 + int ret;
570 + WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
571 + if (f->armed) {
572 +- pr_warning("double-arm: page 0x%08lx, ref %d, old %d\n",
573 +- f->page, f->count, !!f->old_presence);
574 ++ pr_warning("double-arm: addr 0x%08lx, ref %d, old %d\n",
575 ++ f->addr, f->count, !!f->old_presence);
576 + }
577 + ret = clear_page_presence(f, true);
578 +- WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming 0x%08lx failed.\n"),
579 +- f->page);
580 ++ WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming at 0x%08lx failed.\n"),
581 ++ f->addr);
582 + f->armed = true;
583 + return ret;
584 + }
585 +@@ -191,7 +202,7 @@ static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
586 + {
587 + int ret = clear_page_presence(f, false);
588 + WARN_ONCE(ret < 0,
589 +- KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
590 ++ KERN_ERR "kmmio disarming at 0x%08lx failed.\n", f->addr);
591 + f->armed = false;
592 + }
593 +
594 +@@ -215,6 +226,12 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
595 + struct kmmio_context *ctx;
596 + struct kmmio_fault_page *faultpage;
597 + int ret = 0; /* default to fault not handled */
598 ++ unsigned long page_base = addr;
599 ++ unsigned int l;
600 ++ pte_t *pte = lookup_address(addr, &l);
601 ++ if (!pte)
602 ++ return -EINVAL;
603 ++ page_base &= page_level_mask(l);
604 +
605 + /*
606 + * Preemption is now disabled to prevent process switch during
607 +@@ -227,7 +244,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
608 + preempt_disable();
609 + rcu_read_lock();
610 +
611 +- faultpage = get_kmmio_fault_page(addr);
612 ++ faultpage = get_kmmio_fault_page(page_base);
613 + if (!faultpage) {
614 + /*
615 + * Either this page fault is not caused by kmmio, or
616 +@@ -239,7 +256,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
617 +
618 + ctx = &get_cpu_var(kmmio_ctx);
619 + if (ctx->active) {
620 +- if (addr == ctx->addr) {
621 ++ if (page_base == ctx->addr) {
622 + /*
623 + * A second fault on the same page means some other
624 + * condition needs handling by do_page_fault(), the
625 +@@ -267,9 +284,9 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
626 + ctx->active++;
627 +
628 + ctx->fpage = faultpage;
629 +- ctx->probe = get_kmmio_probe(addr);
630 ++ ctx->probe = get_kmmio_probe(page_base);
631 + ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
632 +- ctx->addr = addr;
633 ++ ctx->addr = page_base;
634 +
635 + if (ctx->probe && ctx->probe->pre_handler)
636 + ctx->probe->pre_handler(ctx->probe, regs, addr);
637 +@@ -354,12 +371,11 @@ out:
638 + }
639 +
640 + /* You must be holding kmmio_lock. */
641 +-static int add_kmmio_fault_page(unsigned long page)
642 ++static int add_kmmio_fault_page(unsigned long addr)
643 + {
644 + struct kmmio_fault_page *f;
645 +
646 +- page &= PAGE_MASK;
647 +- f = get_kmmio_fault_page(page);
648 ++ f = get_kmmio_fault_page(addr);
649 + if (f) {
650 + if (!f->count)
651 + arm_kmmio_fault_page(f);
652 +@@ -372,26 +388,25 @@ static int add_kmmio_fault_page(unsigned long page)
653 + return -1;
654 +
655 + f->count = 1;
656 +- f->page = page;
657 ++ f->addr = addr;
658 +
659 + if (arm_kmmio_fault_page(f)) {
660 + kfree(f);
661 + return -1;
662 + }
663 +
664 +- list_add_rcu(&f->list, kmmio_page_list(f->page));
665 ++ list_add_rcu(&f->list, kmmio_page_list(f->addr));
666 +
667 + return 0;
668 + }
669 +
670 + /* You must be holding kmmio_lock. */
671 +-static void release_kmmio_fault_page(unsigned long page,
672 ++static void release_kmmio_fault_page(unsigned long addr,
673 + struct kmmio_fault_page **release_list)
674 + {
675 + struct kmmio_fault_page *f;
676 +
677 +- page &= PAGE_MASK;
678 +- f = get_kmmio_fault_page(page);
679 ++ f = get_kmmio_fault_page(addr);
680 + if (!f)
681 + return;
682 +
683 +@@ -420,18 +435,27 @@ int register_kmmio_probe(struct kmmio_probe *p)
684 + int ret = 0;
685 + unsigned long size = 0;
686 + const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
687 ++ unsigned int l;
688 ++ pte_t *pte;
689 +
690 + spin_lock_irqsave(&kmmio_lock, flags);
691 + if (get_kmmio_probe(p->addr)) {
692 + ret = -EEXIST;
693 + goto out;
694 + }
695 ++
696 ++ pte = lookup_address(p->addr, &l);
697 ++ if (!pte) {
698 ++ ret = -EINVAL;
699 ++ goto out;
700 ++ }
701 ++
702 + kmmio_count++;
703 + list_add_rcu(&p->list, &kmmio_probes);
704 + while (size < size_lim) {
705 + if (add_kmmio_fault_page(p->addr + size))
706 + pr_err("Unable to set page fault.\n");
707 +- size += PAGE_SIZE;
708 ++ size += page_level_size(l);
709 + }
710 + out:
711 + spin_unlock_irqrestore(&kmmio_lock, flags);
712 +@@ -506,11 +530,17 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
713 + const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
714 + struct kmmio_fault_page *release_list = NULL;
715 + struct kmmio_delayed_release *drelease;
716 ++ unsigned int l;
717 ++ pte_t *pte;
718 ++
719 ++ pte = lookup_address(p->addr, &l);
720 ++ if (!pte)
721 ++ return;
722 +
723 + spin_lock_irqsave(&kmmio_lock, flags);
724 + while (size < size_lim) {
725 + release_kmmio_fault_page(p->addr + size, &release_list);
726 +- size += PAGE_SIZE;
727 ++ size += page_level_size(l);
728 + }
729 + list_del_rcu(&p->list);
730 + kmmio_count--;
731 +diff --git a/block/partition-generic.c b/block/partition-generic.c
732 +index fefd01b496a0..cfcfe1b0ecbc 100644
733 +--- a/block/partition-generic.c
734 ++++ b/block/partition-generic.c
735 +@@ -350,15 +350,20 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
736 + goto out_del;
737 + }
738 +
739 ++ err = hd_ref_init(p);
740 ++ if (err) {
741 ++ if (flags & ADDPART_FLAG_WHOLEDISK)
742 ++ goto out_remove_file;
743 ++ goto out_del;
744 ++ }
745 ++
746 + /* everything is up and running, commence */
747 + rcu_assign_pointer(ptbl->part[partno], p);
748 +
749 + /* suppress uevent if the disk suppresses it */
750 + if (!dev_get_uevent_suppress(ddev))
751 + kobject_uevent(&pdev->kobj, KOBJ_ADD);
752 +-
753 +- if (!hd_ref_init(p))
754 +- return p;
755 ++ return p;
756 +
757 + out_free_info:
758 + free_part_info(p);
759 +@@ -367,6 +372,8 @@ out_free_stats:
760 + out_free:
761 + kfree(p);
762 + return ERR_PTR(err);
763 ++out_remove_file:
764 ++ device_remove_file(pdev, &dev_attr_whole_disk);
765 + out_del:
766 + kobject_put(p->holder_dir);
767 + device_del(pdev);
768 +diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
769 +index 50f5c97e1087..0cbc5a5025c2 100644
770 +--- a/crypto/rsa-pkcs1pad.c
771 ++++ b/crypto/rsa-pkcs1pad.c
772 +@@ -310,16 +310,16 @@ static int pkcs1pad_decrypt(struct akcipher_request *req)
773 + req_ctx->child_req.src = req->src;
774 + req_ctx->child_req.src_len = req->src_len;
775 + req_ctx->child_req.dst = req_ctx->out_sg;
776 +- req_ctx->child_req.dst_len = ctx->key_size - 1;
777 ++ req_ctx->child_req.dst_len = ctx->key_size ;
778 +
779 +- req_ctx->out_buf = kmalloc(ctx->key_size - 1,
780 ++ req_ctx->out_buf = kmalloc(ctx->key_size,
781 + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
782 + GFP_KERNEL : GFP_ATOMIC);
783 + if (!req_ctx->out_buf)
784 + return -ENOMEM;
785 +
786 + pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
787 +- ctx->key_size - 1, NULL);
788 ++ ctx->key_size, NULL);
789 +
790 + akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
791 + akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
792 +@@ -491,16 +491,16 @@ static int pkcs1pad_verify(struct akcipher_request *req)
793 + req_ctx->child_req.src = req->src;
794 + req_ctx->child_req.src_len = req->src_len;
795 + req_ctx->child_req.dst = req_ctx->out_sg;
796 +- req_ctx->child_req.dst_len = ctx->key_size - 1;
797 ++ req_ctx->child_req.dst_len = ctx->key_size;
798 +
799 +- req_ctx->out_buf = kmalloc(ctx->key_size - 1,
800 ++ req_ctx->out_buf = kmalloc(ctx->key_size,
801 + (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
802 + GFP_KERNEL : GFP_ATOMIC);
803 + if (!req_ctx->out_buf)
804 + return -ENOMEM;
805 +
806 + pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
807 +- ctx->key_size - 1, NULL);
808 ++ ctx->key_size, NULL);
809 +
810 + akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
811 + akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
812 +diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
813 +index bd75d46234a4..ddb436f86415 100644
814 +--- a/drivers/acpi/acpica/nsinit.c
815 ++++ b/drivers/acpi/acpica/nsinit.c
816 +@@ -83,6 +83,8 @@ acpi_status acpi_ns_initialize_objects(void)
817 +
818 + ACPI_FUNCTION_TRACE(ns_initialize_objects);
819 +
820 ++ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
821 ++ "[Init] Completing Initialization of ACPI Objects\n"));
822 + ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
823 + "**** Starting initialization of namespace objects ****\n"));
824 + ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
825 +diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
826 +index 278666e39563..c37d47982fbe 100644
827 +--- a/drivers/acpi/acpica/tbxfload.c
828 ++++ b/drivers/acpi/acpica/tbxfload.c
829 +@@ -83,6 +83,20 @@ acpi_status __init acpi_load_tables(void)
830 + "While loading namespace from ACPI tables"));
831 + }
832 +
833 ++ if (!acpi_gbl_group_module_level_code) {
834 ++ /*
835 ++ * Initialize the objects that remain uninitialized. This
836 ++ * runs the executable AML that may be part of the
837 ++ * declaration of these objects:
838 ++ * operation_regions, buffer_fields, Buffers, and Packages.
839 ++ */
840 ++ status = acpi_ns_initialize_objects();
841 ++ if (ACPI_FAILURE(status)) {
842 ++ return_ACPI_STATUS(status);
843 ++ }
844 ++ }
845 ++
846 ++ acpi_gbl_reg_methods_enabled = TRUE;
847 + return_ACPI_STATUS(status);
848 + }
849 +
850 +diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
851 +index 721b87cce908..638fbd4ad72b 100644
852 +--- a/drivers/acpi/acpica/utxfinit.c
853 ++++ b/drivers/acpi/acpica/utxfinit.c
854 +@@ -267,7 +267,6 @@ acpi_status __init acpi_initialize_objects(u32 flags)
855 + * initialized, even if they contain executable AML (see the call to
856 + * acpi_ns_initialize_objects below).
857 + */
858 +- acpi_gbl_reg_methods_enabled = TRUE;
859 + if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
860 + ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
861 + "[Init] Executing _REG OpRegion methods\n"));
862 +@@ -299,20 +298,18 @@ acpi_status __init acpi_initialize_objects(u32 flags)
863 + */
864 + if (acpi_gbl_group_module_level_code) {
865 + acpi_ns_exec_module_code_list();
866 +- }
867 +
868 +- /*
869 +- * Initialize the objects that remain uninitialized. This runs the
870 +- * executable AML that may be part of the declaration of these objects:
871 +- * operation_regions, buffer_fields, Buffers, and Packages.
872 +- */
873 +- if (!(flags & ACPI_NO_OBJECT_INIT)) {
874 +- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
875 +- "[Init] Completing Initialization of ACPI Objects\n"));
876 +-
877 +- status = acpi_ns_initialize_objects();
878 +- if (ACPI_FAILURE(status)) {
879 +- return_ACPI_STATUS(status);
880 ++ /*
881 ++ * Initialize the objects that remain uninitialized. This
882 ++ * runs the executable AML that may be part of the
883 ++ * declaration of these objects:
884 ++ * operation_regions, buffer_fields, Buffers, and Packages.
885 ++ */
886 ++ if (!(flags & ACPI_NO_OBJECT_INIT)) {
887 ++ status = acpi_ns_initialize_objects();
888 ++ if (ACPI_FAILURE(status)) {
889 ++ return_ACPI_STATUS(status);
890 ++ }
891 + }
892 + }
893 +
894 +diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
895 +index 301b785f9f56..0caf92ae25f3 100644
896 +--- a/drivers/base/power/domain.c
897 ++++ b/drivers/base/power/domain.c
898 +@@ -1378,7 +1378,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
899 + mutex_lock(&subdomain->lock);
900 + mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
901 +
902 +- if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
903 ++ if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
904 + pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
905 + subdomain->name);
906 + ret = -EBUSY;
907 +diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
908 +index cf351d3dab1c..0708f301ad97 100644
909 +--- a/drivers/base/power/opp/core.c
910 ++++ b/drivers/base/power/opp/core.c
911 +@@ -844,8 +844,14 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
912 + }
913 +
914 + opp->u_volt = microvolt[0];
915 +- opp->u_volt_min = microvolt[1];
916 +- opp->u_volt_max = microvolt[2];
917 ++
918 ++ if (count == 1) {
919 ++ opp->u_volt_min = opp->u_volt;
920 ++ opp->u_volt_max = opp->u_volt;
921 ++ } else {
922 ++ opp->u_volt_min = microvolt[1];
923 ++ opp->u_volt_max = microvolt[2];
924 ++ }
925 +
926 + /* Search for "opp-microamp-<name>" */
927 + prop = NULL;
928 +diff --git a/drivers/block/loop.c b/drivers/block/loop.c
929 +index 423f4ca7d712..80cf8add46ff 100644
930 +--- a/drivers/block/loop.c
931 ++++ b/drivers/block/loop.c
932 +@@ -488,6 +488,12 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
933 + bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
934 + iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
935 + bio_segments(bio), blk_rq_bytes(cmd->rq));
936 ++ /*
937 ++ * This bio may be started from the middle of the 'bvec'
938 ++ * because of bio splitting, so offset from the bvec must
939 ++ * be passed to iov iterator
940 ++ */
941 ++ iter.iov_offset = bio->bi_iter.bi_bvec_done;
942 +
943 + cmd->iocb.ki_pos = pos;
944 + cmd->iocb.ki_filp = file;
945 +diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
946 +index 562b5a4ca7b7..78a39f736c64 100644
947 +--- a/drivers/block/paride/pd.c
948 ++++ b/drivers/block/paride/pd.c
949 +@@ -126,7 +126,7 @@
950 + */
951 + #include <linux/types.h>
952 +
953 +-static bool verbose = 0;
954 ++static int verbose = 0;
955 + static int major = PD_MAJOR;
956 + static char *name = PD_NAME;
957 + static int cluster = 64;
958 +@@ -161,7 +161,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
959 + static DEFINE_MUTEX(pd_mutex);
960 + static DEFINE_SPINLOCK(pd_lock);
961 +
962 +-module_param(verbose, bool, 0);
963 ++module_param(verbose, int, 0);
964 + module_param(major, int, 0);
965 + module_param(name, charp, 0);
966 + module_param(cluster, int, 0);
967 +diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
968 +index 1740d75e8a32..216a94fed5b4 100644
969 +--- a/drivers/block/paride/pt.c
970 ++++ b/drivers/block/paride/pt.c
971 +@@ -117,7 +117,7 @@
972 +
973 + */
974 +
975 +-static bool verbose = 0;
976 ++static int verbose = 0;
977 + static int major = PT_MAJOR;
978 + static char *name = PT_NAME;
979 + static int disable = 0;
980 +@@ -152,7 +152,7 @@ static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
981 +
982 + #include <asm/uaccess.h>
983 +
984 +-module_param(verbose, bool, 0);
985 ++module_param(verbose, int, 0);
986 + module_param(major, int, 0);
987 + module_param(name, charp, 0);
988 + module_param_array(drive0, int, NULL, 0);
989 +diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
990 +index e98d15eaa799..1827fc4d15c1 100644
991 +--- a/drivers/bus/imx-weim.c
992 ++++ b/drivers/bus/imx-weim.c
993 +@@ -150,7 +150,7 @@ static int __init weim_parse_dt(struct platform_device *pdev,
994 + return ret;
995 + }
996 +
997 +- for_each_child_of_node(pdev->dev.of_node, child) {
998 ++ for_each_available_child_of_node(pdev->dev.of_node, child) {
999 + if (!child->name)
1000 + continue;
1001 +
1002 +diff --git a/drivers/bus/uniphier-system-bus.c b/drivers/bus/uniphier-system-bus.c
1003 +index 834a2aeaf27a..350b7309c26d 100644
1004 +--- a/drivers/bus/uniphier-system-bus.c
1005 ++++ b/drivers/bus/uniphier-system-bus.c
1006 +@@ -108,7 +108,7 @@ static int uniphier_system_bus_check_overlap(
1007 +
1008 + for (i = 0; i < ARRAY_SIZE(priv->bank); i++) {
1009 + for (j = i + 1; j < ARRAY_SIZE(priv->bank); j++) {
1010 +- if (priv->bank[i].end > priv->bank[j].base ||
1011 ++ if (priv->bank[i].end > priv->bank[j].base &&
1012 + priv->bank[i].base < priv->bank[j].end) {
1013 + dev_err(priv->dev,
1014 + "region overlap between bank%d and bank%d\n",
1015 +diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
1016 +index 45a634016f95..b28e4da3d2cf 100644
1017 +--- a/drivers/char/tpm/tpm2-cmd.c
1018 ++++ b/drivers/char/tpm/tpm2-cmd.c
1019 +@@ -20,7 +20,11 @@
1020 + #include <keys/trusted-type.h>
1021 +
1022 + enum tpm2_object_attributes {
1023 +- TPM2_ATTR_USER_WITH_AUTH = BIT(6),
1024 ++ TPM2_OA_USER_WITH_AUTH = BIT(6),
1025 ++};
1026 ++
1027 ++enum tpm2_session_attributes {
1028 ++ TPM2_SA_CONTINUE_SESSION = BIT(0),
1029 + };
1030 +
1031 + struct tpm2_startup_in {
1032 +@@ -478,22 +482,18 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
1033 + tpm_buf_append_u8(&buf, payload->migratable);
1034 +
1035 + /* public */
1036 +- if (options->policydigest)
1037 +- tpm_buf_append_u16(&buf, 14 + options->digest_len);
1038 +- else
1039 +- tpm_buf_append_u16(&buf, 14);
1040 +-
1041 ++ tpm_buf_append_u16(&buf, 14 + options->policydigest_len);
1042 + tpm_buf_append_u16(&buf, TPM2_ALG_KEYEDHASH);
1043 + tpm_buf_append_u16(&buf, hash);
1044 +
1045 + /* policy */
1046 +- if (options->policydigest) {
1047 ++ if (options->policydigest_len) {
1048 + tpm_buf_append_u32(&buf, 0);
1049 +- tpm_buf_append_u16(&buf, options->digest_len);
1050 ++ tpm_buf_append_u16(&buf, options->policydigest_len);
1051 + tpm_buf_append(&buf, options->policydigest,
1052 +- options->digest_len);
1053 ++ options->policydigest_len);
1054 + } else {
1055 +- tpm_buf_append_u32(&buf, TPM2_ATTR_USER_WITH_AUTH);
1056 ++ tpm_buf_append_u32(&buf, TPM2_OA_USER_WITH_AUTH);
1057 + tpm_buf_append_u16(&buf, 0);
1058 + }
1059 +
1060 +@@ -631,7 +631,7 @@ static int tpm2_unseal(struct tpm_chip *chip,
1061 + options->policyhandle ?
1062 + options->policyhandle : TPM2_RS_PW,
1063 + NULL /* nonce */, 0,
1064 +- 0 /* session_attributes */,
1065 ++ TPM2_SA_CONTINUE_SESSION,
1066 + options->blobauth /* hmac */,
1067 + TPM_DIGEST_SIZE);
1068 +
1069 +diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
1070 +index cd83d477e32d..e89512383c3c 100644
1071 +--- a/drivers/cpufreq/intel_pstate.c
1072 ++++ b/drivers/cpufreq/intel_pstate.c
1073 +@@ -673,6 +673,11 @@ static int core_get_max_pstate(void)
1074 + if (err)
1075 + goto skip_tar;
1076 +
1077 ++ /* For level 1 and 2, bits[23:16] contain the ratio */
1078 ++ if (tdp_ctrl)
1079 ++ tdp_ratio >>= 16;
1080 ++
1081 ++ tdp_ratio &= 0xff; /* ratios are only 8 bits long */
1082 + if (tdp_ratio - 1 == tar) {
1083 + max_pstate = tar;
1084 + pr_debug("max_pstate=TAC %x\n", max_pstate);
1085 +diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
1086 +index 3d9acc53d247..60fc0fa26fd3 100644
1087 +--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
1088 ++++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
1089 +@@ -225,6 +225,9 @@ static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
1090 + struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
1091 + struct ccp_aes_cmac_exp_ctx state;
1092 +
1093 ++ /* Don't let anything leak to 'out' */
1094 ++ memset(&state, 0, sizeof(state));
1095 ++
1096 + state.null_msg = rctx->null_msg;
1097 + memcpy(state.iv, rctx->iv, sizeof(state.iv));
1098 + state.buf_count = rctx->buf_count;
1099 +diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
1100 +index 8ef06fad8b14..ab9945f2cb7a 100644
1101 +--- a/drivers/crypto/ccp/ccp-crypto-sha.c
1102 ++++ b/drivers/crypto/ccp/ccp-crypto-sha.c
1103 +@@ -212,6 +212,9 @@ static int ccp_sha_export(struct ahash_request *req, void *out)
1104 + struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
1105 + struct ccp_sha_exp_ctx state;
1106 +
1107 ++ /* Don't let anything leak to 'out' */
1108 ++ memset(&state, 0, sizeof(state));
1109 ++
1110 + state.type = rctx->type;
1111 + state.msg_bits = rctx->msg_bits;
1112 + state.first = rctx->first;
1113 +diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
1114 +index a0d4a08313ae..aae05547b924 100644
1115 +--- a/drivers/crypto/talitos.c
1116 ++++ b/drivers/crypto/talitos.c
1117 +@@ -63,6 +63,14 @@ static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
1118 + ptr->eptr = upper_32_bits(dma_addr);
1119 + }
1120 +
1121 ++static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
1122 ++ struct talitos_ptr *src_ptr, bool is_sec1)
1123 ++{
1124 ++ dst_ptr->ptr = src_ptr->ptr;
1125 ++ if (!is_sec1)
1126 ++ dst_ptr->eptr = src_ptr->eptr;
1127 ++}
1128 ++
1129 + static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
1130 + bool is_sec1)
1131 + {
1132 +@@ -1083,21 +1091,20 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1133 + sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
1134 + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1135 + : DMA_TO_DEVICE);
1136 +-
1137 + /* hmac data */
1138 + desc->ptr[1].len = cpu_to_be16(areq->assoclen);
1139 + if (sg_count > 1 &&
1140 + (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
1141 + areq->assoclen,
1142 + &edesc->link_tbl[tbl_off])) > 1) {
1143 +- tbl_off += ret;
1144 +-
1145 + to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
1146 + sizeof(struct talitos_ptr), 0);
1147 + desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
1148 +
1149 + dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1150 + edesc->dma_len, DMA_BIDIRECTIONAL);
1151 ++
1152 ++ tbl_off += ret;
1153 + } else {
1154 + to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
1155 + desc->ptr[1].j_extent = 0;
1156 +@@ -1126,11 +1133,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1157 + if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1158 + sg_link_tbl_len += authsize;
1159 +
1160 +- if (sg_count > 1 &&
1161 +- (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
1162 +- sg_link_tbl_len,
1163 +- &edesc->link_tbl[tbl_off])) > 1) {
1164 +- tbl_off += ret;
1165 ++ if (sg_count == 1) {
1166 ++ to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
1167 ++ areq->assoclen, 0);
1168 ++ } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
1169 ++ areq->assoclen, sg_link_tbl_len,
1170 ++ &edesc->link_tbl[tbl_off])) >
1171 ++ 1) {
1172 + desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1173 + to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1174 + tbl_off *
1175 +@@ -1138,8 +1147,10 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1176 + dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1177 + edesc->dma_len,
1178 + DMA_BIDIRECTIONAL);
1179 +- } else
1180 +- to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
1181 ++ tbl_off += ret;
1182 ++ } else {
1183 ++ copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
1184 ++ }
1185 +
1186 + /* cipher out */
1187 + desc->ptr[5].len = cpu_to_be16(cryptlen);
1188 +@@ -1151,11 +1162,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1189 +
1190 + edesc->icv_ool = false;
1191 +
1192 +- if (sg_count > 1 &&
1193 +- (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
1194 ++ if (sg_count == 1) {
1195 ++ to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
1196 ++ areq->assoclen, 0);
1197 ++ } else if ((sg_count =
1198 ++ sg_to_link_tbl_offset(areq->dst, sg_count,
1199 + areq->assoclen, cryptlen,
1200 +- &edesc->link_tbl[tbl_off])) >
1201 +- 1) {
1202 ++ &edesc->link_tbl[tbl_off])) > 1) {
1203 + struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1204 +
1205 + to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1206 +@@ -1178,8 +1191,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1207 + edesc->dma_len, DMA_BIDIRECTIONAL);
1208 +
1209 + edesc->icv_ool = true;
1210 +- } else
1211 +- to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
1212 ++ } else {
1213 ++ copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
1214 ++ }
1215 +
1216 + /* iv out */
1217 + map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1218 +@@ -2629,21 +2643,11 @@ struct talitos_crypto_alg {
1219 + struct talitos_alg_template algt;
1220 + };
1221 +
1222 +-static int talitos_cra_init(struct crypto_tfm *tfm)
1223 ++static int talitos_init_common(struct talitos_ctx *ctx,
1224 ++ struct talitos_crypto_alg *talitos_alg)
1225 + {
1226 +- struct crypto_alg *alg = tfm->__crt_alg;
1227 +- struct talitos_crypto_alg *talitos_alg;
1228 +- struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
1229 + struct talitos_private *priv;
1230 +
1231 +- if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
1232 +- talitos_alg = container_of(__crypto_ahash_alg(alg),
1233 +- struct talitos_crypto_alg,
1234 +- algt.alg.hash);
1235 +- else
1236 +- talitos_alg = container_of(alg, struct talitos_crypto_alg,
1237 +- algt.alg.crypto);
1238 +-
1239 + /* update context with ptr to dev */
1240 + ctx->dev = talitos_alg->dev;
1241 +
1242 +@@ -2661,10 +2665,33 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
1243 + return 0;
1244 + }
1245 +
1246 ++static int talitos_cra_init(struct crypto_tfm *tfm)
1247 ++{
1248 ++ struct crypto_alg *alg = tfm->__crt_alg;
1249 ++ struct talitos_crypto_alg *talitos_alg;
1250 ++ struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
1251 ++
1252 ++ if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
1253 ++ talitos_alg = container_of(__crypto_ahash_alg(alg),
1254 ++ struct talitos_crypto_alg,
1255 ++ algt.alg.hash);
1256 ++ else
1257 ++ talitos_alg = container_of(alg, struct talitos_crypto_alg,
1258 ++ algt.alg.crypto);
1259 ++
1260 ++ return talitos_init_common(ctx, talitos_alg);
1261 ++}
1262 ++
1263 + static int talitos_cra_init_aead(struct crypto_aead *tfm)
1264 + {
1265 +- talitos_cra_init(crypto_aead_tfm(tfm));
1266 +- return 0;
1267 ++ struct aead_alg *alg = crypto_aead_alg(tfm);
1268 ++ struct talitos_crypto_alg *talitos_alg;
1269 ++ struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
1270 ++
1271 ++ talitos_alg = container_of(alg, struct talitos_crypto_alg,
1272 ++ algt.alg.aead);
1273 ++
1274 ++ return talitos_init_common(ctx, talitos_alg);
1275 + }
1276 +
1277 + static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
1278 +diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
1279 +index 5ad0ec1f0e29..97199b3c25a2 100644
1280 +--- a/drivers/dma/dw/core.c
1281 ++++ b/drivers/dma/dw/core.c
1282 +@@ -130,26 +130,14 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
1283 + static void dwc_initialize(struct dw_dma_chan *dwc)
1284 + {
1285 + struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1286 +- struct dw_dma_slave *dws = dwc->chan.private;
1287 + u32 cfghi = DWC_CFGH_FIFO_MODE;
1288 + u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
1289 +
1290 + if (dwc->initialized == true)
1291 + return;
1292 +
1293 +- if (dws) {
1294 +- /*
1295 +- * We need controller-specific data to set up slave
1296 +- * transfers.
1297 +- */
1298 +- BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
1299 +-
1300 +- cfghi |= DWC_CFGH_DST_PER(dws->dst_id);
1301 +- cfghi |= DWC_CFGH_SRC_PER(dws->src_id);
1302 +- } else {
1303 +- cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
1304 +- cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
1305 +- }
1306 ++ cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
1307 ++ cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
1308 +
1309 + channel_writel(dwc, CFG_LO, cfglo);
1310 + channel_writel(dwc, CFG_HI, cfghi);
1311 +@@ -941,7 +929,7 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
1312 + struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1313 + struct dw_dma_slave *dws = param;
1314 +
1315 +- if (!dws || dws->dma_dev != chan->device->dev)
1316 ++ if (dws->dma_dev != chan->device->dev)
1317 + return false;
1318 +
1319 + /* We have to copy data since dws can be temporary storage */
1320 +@@ -1165,6 +1153,14 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
1321 + * doesn't mean what you think it means), and status writeback.
1322 + */
1323 +
1324 ++ /*
1325 ++ * We need controller-specific data to set up slave transfers.
1326 ++ */
1327 ++ if (chan->private && !dw_dma_filter(chan, chan->private)) {
1328 ++ dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
1329 ++ return -EINVAL;
1330 ++ }
1331 ++
1332 + /* Enable controller here if needed */
1333 + if (!dw->in_use)
1334 + dw_dma_on(dw);
1335 +@@ -1226,6 +1222,14 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
1336 + spin_lock_irqsave(&dwc->lock, flags);
1337 + list_splice_init(&dwc->free_list, &list);
1338 + dwc->descs_allocated = 0;
1339 ++
1340 ++ /* Clear custom channel configuration */
1341 ++ dwc->src_id = 0;
1342 ++ dwc->dst_id = 0;
1343 ++
1344 ++ dwc->src_master = 0;
1345 ++ dwc->dst_master = 0;
1346 ++
1347 + dwc->initialized = false;
1348 +
1349 + /* Disable interrupts */
1350 +diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
1351 +index e3d7fcb69b4c..2dac314a2d7a 100644
1352 +--- a/drivers/dma/edma.c
1353 ++++ b/drivers/dma/edma.c
1354 +@@ -1563,32 +1563,6 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
1355 + return IRQ_HANDLED;
1356 + }
1357 +
1358 +-static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable)
1359 +-{
1360 +- struct platform_device *tc_pdev;
1361 +- int ret;
1362 +-
1363 +- if (!IS_ENABLED(CONFIG_OF) || !tc)
1364 +- return;
1365 +-
1366 +- tc_pdev = of_find_device_by_node(tc->node);
1367 +- if (!tc_pdev) {
1368 +- pr_err("%s: TPTC device is not found\n", __func__);
1369 +- return;
1370 +- }
1371 +- if (!pm_runtime_enabled(&tc_pdev->dev))
1372 +- pm_runtime_enable(&tc_pdev->dev);
1373 +-
1374 +- if (enable)
1375 +- ret = pm_runtime_get_sync(&tc_pdev->dev);
1376 +- else
1377 +- ret = pm_runtime_put_sync(&tc_pdev->dev);
1378 +-
1379 +- if (ret < 0)
1380 +- pr_err("%s: pm_runtime_%s_sync() failed for %s\n", __func__,
1381 +- enable ? "get" : "put", dev_name(&tc_pdev->dev));
1382 +-}
1383 +-
1384 + /* Alloc channel resources */
1385 + static int edma_alloc_chan_resources(struct dma_chan *chan)
1386 + {
1387 +@@ -1625,8 +1599,6 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
1388 + EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
1389 + echan->hw_triggered ? "HW" : "SW");
1390 +
1391 +- edma_tc_set_pm_state(echan->tc, true);
1392 +-
1393 + return 0;
1394 +
1395 + err_slot:
1396 +@@ -1663,7 +1635,6 @@ static void edma_free_chan_resources(struct dma_chan *chan)
1397 + echan->alloced = false;
1398 + }
1399 +
1400 +- edma_tc_set_pm_state(echan->tc, false);
1401 + echan->tc = NULL;
1402 + echan->hw_triggered = false;
1403 +
1404 +@@ -2408,10 +2379,8 @@ static int edma_pm_suspend(struct device *dev)
1405 + int i;
1406 +
1407 + for (i = 0; i < ecc->num_channels; i++) {
1408 +- if (echan[i].alloced) {
1409 ++ if (echan[i].alloced)
1410 + edma_setup_interrupt(&echan[i], false);
1411 +- edma_tc_set_pm_state(echan[i].tc, false);
1412 +- }
1413 + }
1414 +
1415 + return 0;
1416 +@@ -2441,8 +2410,6 @@ static int edma_pm_resume(struct device *dev)
1417 +
1418 + /* Set up channel -> slot mapping for the entry slot */
1419 + edma_set_chmap(&echan[i], echan[i].slot[0]);
1420 +-
1421 +- edma_tc_set_pm_state(echan[i].tc, true);
1422 + }
1423 + }
1424 +
1425 +@@ -2466,7 +2433,8 @@ static struct platform_driver edma_driver = {
1426 +
1427 + static int edma_tptc_probe(struct platform_device *pdev)
1428 + {
1429 +- return 0;
1430 ++ pm_runtime_enable(&pdev->dev);
1431 ++ return pm_runtime_get_sync(&pdev->dev);
1432 + }
1433 +
1434 + static struct platform_driver edma_tptc_driver = {
1435 +diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
1436 +index eef145edb936..025d375fc3d7 100644
1437 +--- a/drivers/dma/hsu/hsu.c
1438 ++++ b/drivers/dma/hsu/hsu.c
1439 +@@ -135,7 +135,7 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
1440 + sr = hsu_chan_readl(hsuc, HSU_CH_SR);
1441 + spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
1442 +
1443 +- return sr;
1444 ++ return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
1445 + }
1446 +
1447 + irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
1448 +@@ -254,10 +254,13 @@ static void hsu_dma_issue_pending(struct dma_chan *chan)
1449 + static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
1450 + {
1451 + struct hsu_dma_desc *desc = hsuc->desc;
1452 +- size_t bytes = desc->length;
1453 ++ size_t bytes = 0;
1454 + int i;
1455 +
1456 +- i = desc->active % HSU_DMA_CHAN_NR_DESC;
1457 ++ for (i = desc->active; i < desc->nents; i++)
1458 ++ bytes += desc->sg[i].len;
1459 ++
1460 ++ i = HSU_DMA_CHAN_NR_DESC - 1;
1461 + do {
1462 + bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
1463 + } while (--i >= 0);
1464 +diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
1465 +index 578a8ee8cd05..6b070c22b1df 100644
1466 +--- a/drivers/dma/hsu/hsu.h
1467 ++++ b/drivers/dma/hsu/hsu.h
1468 +@@ -41,6 +41,9 @@
1469 + #define HSU_CH_SR_DESCTO(x) BIT(8 + (x))
1470 + #define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8))
1471 + #define HSU_CH_SR_CHE BIT(15)
1472 ++#define HSU_CH_SR_DESCE(x) BIT(16 + (x))
1473 ++#define HSU_CH_SR_DESCE_ANY (BIT(19) | BIT(18) | BIT(17) | BIT(16))
1474 ++#define HSU_CH_SR_CDESC_ANY (BIT(31) | BIT(30))
1475 +
1476 + /* Bits in HSU_CH_CR */
1477 + #define HSU_CH_CR_CHA BIT(0)
1478 +diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
1479 +index 9794b073d7d7..a5ed9407c51b 100644
1480 +--- a/drivers/dma/omap-dma.c
1481 ++++ b/drivers/dma/omap-dma.c
1482 +@@ -48,6 +48,7 @@ struct omap_chan {
1483 + unsigned dma_sig;
1484 + bool cyclic;
1485 + bool paused;
1486 ++ bool running;
1487 +
1488 + int dma_ch;
1489 + struct omap_desc *desc;
1490 +@@ -294,6 +295,8 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
1491 +
1492 + /* Enable channel */
1493 + omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
1494 ++
1495 ++ c->running = true;
1496 + }
1497 +
1498 + static void omap_dma_stop(struct omap_chan *c)
1499 +@@ -355,6 +358,8 @@ static void omap_dma_stop(struct omap_chan *c)
1500 +
1501 + omap_dma_chan_write(c, CLNK_CTRL, val);
1502 + }
1503 ++
1504 ++ c->running = false;
1505 + }
1506 +
1507 + static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
1508 +@@ -673,15 +678,20 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
1509 + struct omap_chan *c = to_omap_dma_chan(chan);
1510 + struct virt_dma_desc *vd;
1511 + enum dma_status ret;
1512 +- uint32_t ccr;
1513 + unsigned long flags;
1514 +
1515 +- ccr = omap_dma_chan_read(c, CCR);
1516 +- /* The channel is no longer active, handle the completion right away */
1517 +- if (!(ccr & CCR_ENABLE))
1518 +- omap_dma_callback(c->dma_ch, 0, c);
1519 +-
1520 + ret = dma_cookie_status(chan, cookie, txstate);
1521 ++
1522 ++ if (!c->paused && c->running) {
1523 ++ uint32_t ccr = omap_dma_chan_read(c, CCR);
1524 ++ /*
1525 ++ * The channel is no longer active, set the return value
1526 ++ * accordingly
1527 ++ */
1528 ++ if (!(ccr & CCR_ENABLE))
1529 ++ ret = DMA_COMPLETE;
1530 ++ }
1531 ++
1532 + if (ret == DMA_COMPLETE || !txstate)
1533 + return ret;
1534 +
1535 +diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
1536 +index debca824bed6..77c1c44009d8 100644
1537 +--- a/drivers/dma/pxa_dma.c
1538 ++++ b/drivers/dma/pxa_dma.c
1539 +@@ -122,6 +122,7 @@ struct pxad_chan {
1540 + struct pxad_device {
1541 + struct dma_device slave;
1542 + int nr_chans;
1543 ++ int nr_requestors;
1544 + void __iomem *base;
1545 + struct pxad_phy *phys;
1546 + spinlock_t phy_lock; /* Phy association */
1547 +@@ -473,7 +474,7 @@ static void pxad_free_phy(struct pxad_chan *chan)
1548 + return;
1549 +
1550 + /* clear the channel mapping in DRCMR */
1551 +- if (chan->drcmr <= DRCMR_CHLNUM) {
1552 ++ if (chan->drcmr <= pdev->nr_requestors) {
1553 + reg = pxad_drcmr(chan->drcmr);
1554 + writel_relaxed(0, chan->phy->base + reg);
1555 + }
1556 +@@ -509,6 +510,7 @@ static bool is_running_chan_misaligned(struct pxad_chan *chan)
1557 +
1558 + static void phy_enable(struct pxad_phy *phy, bool misaligned)
1559 + {
1560 ++ struct pxad_device *pdev;
1561 + u32 reg, dalgn;
1562 +
1563 + if (!phy->vchan)
1564 +@@ -518,7 +520,8 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned)
1565 + "%s(); phy=%p(%d) misaligned=%d\n", __func__,
1566 + phy, phy->idx, misaligned);
1567 +
1568 +- if (phy->vchan->drcmr <= DRCMR_CHLNUM) {
1569 ++ pdev = to_pxad_dev(phy->vchan->vc.chan.device);
1570 ++ if (phy->vchan->drcmr <= pdev->nr_requestors) {
1571 + reg = pxad_drcmr(phy->vchan->drcmr);
1572 + writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
1573 + }
1574 +@@ -914,6 +917,7 @@ static void pxad_get_config(struct pxad_chan *chan,
1575 + {
1576 + u32 maxburst = 0, dev_addr = 0;
1577 + enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
1578 ++ struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
1579 +
1580 + *dcmd = 0;
1581 + if (dir == DMA_DEV_TO_MEM) {
1582 +@@ -922,7 +926,7 @@ static void pxad_get_config(struct pxad_chan *chan,
1583 + dev_addr = chan->cfg.src_addr;
1584 + *dev_src = dev_addr;
1585 + *dcmd |= PXA_DCMD_INCTRGADDR;
1586 +- if (chan->drcmr <= DRCMR_CHLNUM)
1587 ++ if (chan->drcmr <= pdev->nr_requestors)
1588 + *dcmd |= PXA_DCMD_FLOWSRC;
1589 + }
1590 + if (dir == DMA_MEM_TO_DEV) {
1591 +@@ -931,7 +935,7 @@ static void pxad_get_config(struct pxad_chan *chan,
1592 + dev_addr = chan->cfg.dst_addr;
1593 + *dev_dst = dev_addr;
1594 + *dcmd |= PXA_DCMD_INCSRCADDR;
1595 +- if (chan->drcmr <= DRCMR_CHLNUM)
1596 ++ if (chan->drcmr <= pdev->nr_requestors)
1597 + *dcmd |= PXA_DCMD_FLOWTRG;
1598 + }
1599 + if (dir == DMA_MEM_TO_MEM)
1600 +@@ -1341,13 +1345,15 @@ static struct dma_chan *pxad_dma_xlate(struct of_phandle_args *dma_spec,
1601 +
1602 + static int pxad_init_dmadev(struct platform_device *op,
1603 + struct pxad_device *pdev,
1604 +- unsigned int nr_phy_chans)
1605 ++ unsigned int nr_phy_chans,
1606 ++ unsigned int nr_requestors)
1607 + {
1608 + int ret;
1609 + unsigned int i;
1610 + struct pxad_chan *c;
1611 +
1612 + pdev->nr_chans = nr_phy_chans;
1613 ++ pdev->nr_requestors = nr_requestors;
1614 + INIT_LIST_HEAD(&pdev->slave.channels);
1615 + pdev->slave.device_alloc_chan_resources = pxad_alloc_chan_resources;
1616 + pdev->slave.device_free_chan_resources = pxad_free_chan_resources;
1617 +@@ -1382,7 +1388,7 @@ static int pxad_probe(struct platform_device *op)
1618 + const struct of_device_id *of_id;
1619 + struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
1620 + struct resource *iores;
1621 +- int ret, dma_channels = 0;
1622 ++ int ret, dma_channels = 0, nb_requestors = 0;
1623 + const enum dma_slave_buswidth widths =
1624 + DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
1625 + DMA_SLAVE_BUSWIDTH_4_BYTES;
1626 +@@ -1399,13 +1405,23 @@ static int pxad_probe(struct platform_device *op)
1627 + return PTR_ERR(pdev->base);
1628 +
1629 + of_id = of_match_device(pxad_dt_ids, &op->dev);
1630 +- if (of_id)
1631 ++ if (of_id) {
1632 + of_property_read_u32(op->dev.of_node, "#dma-channels",
1633 + &dma_channels);
1634 +- else if (pdata && pdata->dma_channels)
1635 ++ ret = of_property_read_u32(op->dev.of_node, "#dma-requests",
1636 ++ &nb_requestors);
1637 ++ if (ret) {
1638 ++ dev_warn(pdev->slave.dev,
1639 ++ "#dma-requests set to default 32 as missing in OF: %d",
1640 ++ ret);
1641 ++ nb_requestors = 32;
1642 ++ };
1643 ++ } else if (pdata && pdata->dma_channels) {
1644 + dma_channels = pdata->dma_channels;
1645 +- else
1646 ++ nb_requestors = pdata->nb_requestors;
1647 ++ } else {
1648 + dma_channels = 32; /* default 32 channel */
1649 ++ }
1650 +
1651 + dma_cap_set(DMA_SLAVE, pdev->slave.cap_mask);
1652 + dma_cap_set(DMA_MEMCPY, pdev->slave.cap_mask);
1653 +@@ -1423,7 +1439,7 @@ static int pxad_probe(struct platform_device *op)
1654 + pdev->slave.descriptor_reuse = true;
1655 +
1656 + pdev->slave.dev = &op->dev;
1657 +- ret = pxad_init_dmadev(op, pdev, dma_channels);
1658 ++ ret = pxad_init_dmadev(op, pdev, dma_channels, nb_requestors);
1659 + if (ret) {
1660 + dev_err(pdev->slave.dev, "unable to register\n");
1661 + return ret;
1662 +@@ -1442,7 +1458,8 @@ static int pxad_probe(struct platform_device *op)
1663 +
1664 + platform_set_drvdata(op, pdev);
1665 + pxad_init_debugfs(pdev);
1666 +- dev_info(pdev->slave.dev, "initialized %d channels\n", dma_channels);
1667 ++ dev_info(pdev->slave.dev, "initialized %d channels on %d requestors\n",
1668 ++ dma_channels, nb_requestors);
1669 + return 0;
1670 + }
1671 +
1672 +diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
1673 +index 01087a38da22..792bdae2b91d 100644
1674 +--- a/drivers/edac/i7core_edac.c
1675 ++++ b/drivers/edac/i7core_edac.c
1676 +@@ -1866,7 +1866,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
1677 +
1678 + i7_dev = get_i7core_dev(mce->socketid);
1679 + if (!i7_dev)
1680 +- return NOTIFY_BAD;
1681 ++ return NOTIFY_DONE;
1682 +
1683 + mci = i7_dev->mci;
1684 + pvt = mci->pvt_info;
1685 +diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
1686 +index 93f0d4120289..8bf745d2da7e 100644
1687 +--- a/drivers/edac/sb_edac.c
1688 ++++ b/drivers/edac/sb_edac.c
1689 +@@ -362,6 +362,7 @@ struct sbridge_pvt {
1690 +
1691 + /* Memory type detection */
1692 + bool is_mirrored, is_lockstep, is_close_pg;
1693 ++ bool is_chan_hash;
1694 +
1695 + /* Fifo double buffers */
1696 + struct mce mce_entry[MCE_LOG_LEN];
1697 +@@ -1060,6 +1061,20 @@ static inline u8 sad_pkg_ha(u8 pkg)
1698 + return (pkg >> 2) & 0x1;
1699 + }
1700 +
1701 ++static int haswell_chan_hash(int idx, u64 addr)
1702 ++{
1703 ++ int i;
1704 ++
1705 ++ /*
1706 ++ * XOR even bits from 12:26 to bit0 of idx,
1707 ++ * odd bits from 13:27 to bit1
1708 ++ */
1709 ++ for (i = 12; i < 28; i += 2)
1710 ++ idx ^= (addr >> i) & 3;
1711 ++
1712 ++ return idx;
1713 ++}
1714 ++
1715 + /****************************************************************************
1716 + Memory check routines
1717 + ****************************************************************************/
1718 +@@ -1616,6 +1631,10 @@ static int get_dimm_config(struct mem_ctl_info *mci)
1719 + KNL_MAX_CHANNELS : NUM_CHANNELS;
1720 + u64 knl_mc_sizes[KNL_MAX_CHANNELS];
1721 +
1722 ++ if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
1723 ++ pci_read_config_dword(pvt->pci_ha0, HASWELL_HASYSDEFEATURE2, &reg);
1724 ++ pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
1725 ++ }
1726 + if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
1727 + pvt->info.type == KNIGHTS_LANDING)
1728 + pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg);
1729 +@@ -2118,12 +2137,15 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
1730 + }
1731 +
1732 + ch_way = TAD_CH(reg) + 1;
1733 +- sck_way = 1 << TAD_SOCK(reg);
1734 ++ sck_way = TAD_SOCK(reg);
1735 +
1736 + if (ch_way == 3)
1737 + idx = addr >> 6;
1738 +- else
1739 ++ else {
1740 + idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
1741 ++ if (pvt->is_chan_hash)
1742 ++ idx = haswell_chan_hash(idx, addr);
1743 ++ }
1744 + idx = idx % ch_way;
1745 +
1746 + /*
1747 +@@ -2157,7 +2179,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
1748 + switch(ch_way) {
1749 + case 2:
1750 + case 4:
1751 +- sck_xch = 1 << sck_way * (ch_way >> 1);
1752 ++ sck_xch = (1 << sck_way) * (ch_way >> 1);
1753 + break;
1754 + default:
1755 + sprintf(msg, "Invalid mirror set. Can't decode addr");
1756 +@@ -2193,7 +2215,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
1757 +
1758 + ch_addr = addr - offset;
1759 + ch_addr >>= (6 + shiftup);
1760 +- ch_addr /= ch_way * sck_way;
1761 ++ ch_addr /= sck_xch;
1762 + ch_addr <<= (6 + shiftup);
1763 + ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
1764 +
1765 +@@ -3146,7 +3168,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
1766 +
1767 + mci = get_mci_for_node_id(mce->socketid);
1768 + if (!mci)
1769 +- return NOTIFY_BAD;
1770 ++ return NOTIFY_DONE;
1771 + pvt = mci->pvt_info;
1772 +
1773 + /*
1774 +diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
1775 +index 74dfb7f4f277..d8cac4661cfe 100644
1776 +--- a/drivers/extcon/extcon-max77843.c
1777 ++++ b/drivers/extcon/extcon-max77843.c
1778 +@@ -803,7 +803,7 @@ static int max77843_muic_probe(struct platform_device *pdev)
1779 + /* Clear IRQ bits before request IRQs */
1780 + ret = regmap_bulk_read(max77843->regmap_muic,
1781 + MAX77843_MUIC_REG_INT1, info->status,
1782 +- MAX77843_MUIC_IRQ_NUM);
1783 ++ MAX77843_MUIC_STATUS_NUM);
1784 + if (ret) {
1785 + dev_err(&pdev->dev, "Failed to Clear IRQ bits\n");
1786 + goto err_muic_irq;
1787 +diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
1788 +index 9e15d571b53c..a76c35fc0b92 100644
1789 +--- a/drivers/firmware/efi/arm-init.c
1790 ++++ b/drivers/firmware/efi/arm-init.c
1791 +@@ -203,7 +203,19 @@ void __init efi_init(void)
1792 +
1793 + reserve_regions();
1794 + early_memunmap(memmap.map, params.mmap_size);
1795 +- memblock_mark_nomap(params.mmap & PAGE_MASK,
1796 +- PAGE_ALIGN(params.mmap_size +
1797 +- (params.mmap & ~PAGE_MASK)));
1798 ++
1799 ++ if (IS_ENABLED(CONFIG_ARM)) {
1800 ++ /*
1801 ++ * ARM currently does not allow ioremap_cache() to be called on
1802 ++ * memory regions that are covered by struct page. So remove the
1803 ++ * UEFI memory map from the linear mapping.
1804 ++ */
1805 ++ memblock_mark_nomap(params.mmap & PAGE_MASK,
1806 ++ PAGE_ALIGN(params.mmap_size +
1807 ++ (params.mmap & ~PAGE_MASK)));
1808 ++ } else {
1809 ++ memblock_reserve(params.mmap & PAGE_MASK,
1810 ++ PAGE_ALIGN(params.mmap_size +
1811 ++ (params.mmap & ~PAGE_MASK)));
1812 ++ }
1813 + }
1814 +diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
1815 +index 2cd37dad67a6..c51f3b2fe3c0 100644
1816 +--- a/drivers/firmware/efi/efi.c
1817 ++++ b/drivers/firmware/efi/efi.c
1818 +@@ -182,6 +182,7 @@ static int generic_ops_register(void)
1819 + {
1820 + generic_ops.get_variable = efi.get_variable;
1821 + generic_ops.set_variable = efi.set_variable;
1822 ++ generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
1823 + generic_ops.get_next_variable = efi.get_next_variable;
1824 + generic_ops.query_variable_store = efi_query_variable_store;
1825 +
1826 +diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
1827 +index 7f2ea21c730d..6f182fd91a6d 100644
1828 +--- a/drivers/firmware/efi/vars.c
1829 ++++ b/drivers/firmware/efi/vars.c
1830 +@@ -202,29 +202,44 @@ static const struct variable_validate variable_validate[] = {
1831 + { NULL_GUID, "", NULL },
1832 + };
1833 +
1834 ++/*
1835 ++ * Check if @var_name matches the pattern given in @match_name.
1836 ++ *
1837 ++ * @var_name: an array of @len non-NUL characters.
1838 ++ * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
1839 ++ * final "*" character matches any trailing characters @var_name,
1840 ++ * including the case when there are none left in @var_name.
1841 ++ * @match: on output, the number of non-wildcard characters in @match_name
1842 ++ * that @var_name matches, regardless of the return value.
1843 ++ * @return: whether @var_name fully matches @match_name.
1844 ++ */
1845 + static bool
1846 + variable_matches(const char *var_name, size_t len, const char *match_name,
1847 + int *match)
1848 + {
1849 + for (*match = 0; ; (*match)++) {
1850 + char c = match_name[*match];
1851 +- char u = var_name[*match];
1852 +
1853 +- /* Wildcard in the matching name means we've matched */
1854 +- if (c == '*')
1855 ++ switch (c) {
1856 ++ case '*':
1857 ++ /* Wildcard in @match_name means we've matched. */
1858 + return true;
1859 +
1860 +- /* Case sensitive match */
1861 +- if (!c && *match == len)
1862 +- return true;
1863 ++ case '\0':
1864 ++ /* @match_name has ended. Has @var_name too? */
1865 ++ return (*match == len);
1866 +
1867 +- if (c != u)
1868 ++ default:
1869 ++ /*
1870 ++ * We've reached a non-wildcard char in @match_name.
1871 ++ * Continue only if there's an identical character in
1872 ++ * @var_name.
1873 ++ */
1874 ++ if (*match < len && c == var_name[*match])
1875 ++ continue;
1876 + return false;
1877 +-
1878 +- if (!c)
1879 +- return true;
1880 ++ }
1881 + }
1882 +- return true;
1883 + }
1884 +
1885 + bool
1886 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
1887 +index 5e7770f9a415..ff299752d5e6 100644
1888 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
1889 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
1890 +@@ -1619,6 +1619,7 @@ struct amdgpu_uvd {
1891 + struct amdgpu_bo *vcpu_bo;
1892 + void *cpu_addr;
1893 + uint64_t gpu_addr;
1894 ++ unsigned fw_version;
1895 + atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
1896 + struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
1897 + struct delayed_work idle_work;
1898 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
1899 +index 81dc6b65436f..3c895863fcf5 100644
1900 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
1901 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
1902 +@@ -63,10 +63,6 @@ bool amdgpu_has_atpx(void) {
1903 + return amdgpu_atpx_priv.atpx_detected;
1904 + }
1905 +
1906 +-bool amdgpu_has_atpx_dgpu_power_cntl(void) {
1907 +- return amdgpu_atpx_priv.atpx.functions.power_cntl;
1908 +-}
1909 +-
1910 + /**
1911 + * amdgpu_atpx_call - call an ATPX method
1912 + *
1913 +@@ -146,6 +142,10 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
1914 + */
1915 + static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
1916 + {
1917 ++ /* make sure required functions are enabled */
1918 ++ /* dGPU power control is required */
1919 ++ atpx->functions.power_cntl = true;
1920 ++
1921 + if (atpx->functions.px_params) {
1922 + union acpi_object *info;
1923 + struct atpx_px_params output;
1924 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1925 +index d6c68d00cbb0..51bfc114584e 100644
1926 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1927 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1928 +@@ -62,12 +62,6 @@ static const char *amdgpu_asic_name[] = {
1929 + "LAST",
1930 + };
1931 +
1932 +-#if defined(CONFIG_VGA_SWITCHEROO)
1933 +-bool amdgpu_has_atpx_dgpu_power_cntl(void);
1934 +-#else
1935 +-static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
1936 +-#endif
1937 +-
1938 + bool amdgpu_device_is_px(struct drm_device *dev)
1939 + {
1940 + struct amdgpu_device *adev = dev->dev_private;
1941 +@@ -1517,7 +1511,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1942 +
1943 + if (amdgpu_runtime_pm == 1)
1944 + runtime = true;
1945 +- if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl())
1946 ++ if (amdgpu_device_is_px(ddev))
1947 + runtime = true;
1948 + vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
1949 + if (runtime)
1950 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
1951 +index e23843f4d877..4488e82f87b0 100644
1952 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
1953 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
1954 +@@ -303,7 +303,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
1955 + fw_info.feature = adev->vce.fb_version;
1956 + break;
1957 + case AMDGPU_INFO_FW_UVD:
1958 +- fw_info.ver = 0;
1959 ++ fw_info.ver = adev->uvd.fw_version;
1960 + fw_info.feature = 0;
1961 + break;
1962 + case AMDGPU_INFO_FW_GMC:
1963 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
1964 +index fdc1be8550da..3b2d75d96ea0 100644
1965 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
1966 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
1967 +@@ -53,7 +53,7 @@ struct amdgpu_hpd;
1968 +
1969 + #define AMDGPU_MAX_HPD_PINS 6
1970 + #define AMDGPU_MAX_CRTCS 6
1971 +-#define AMDGPU_MAX_AFMT_BLOCKS 7
1972 ++#define AMDGPU_MAX_AFMT_BLOCKS 9
1973 +
1974 + enum amdgpu_rmx_type {
1975 + RMX_OFF,
1976 +@@ -309,8 +309,8 @@ struct amdgpu_mode_info {
1977 + struct atom_context *atom_context;
1978 + struct card_info *atom_card_info;
1979 + bool mode_config_initialized;
1980 +- struct amdgpu_crtc *crtcs[6];
1981 +- struct amdgpu_afmt *afmt[7];
1982 ++ struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
1983 ++ struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
1984 + /* DVI-I properties */
1985 + struct drm_property *coherent_mode_property;
1986 + /* DAC enable load detect */
1987 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
1988 +index 53f987aeeacf..3b35ad83867c 100644
1989 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
1990 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
1991 +@@ -156,6 +156,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
1992 + DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
1993 + version_major, version_minor, family_id);
1994 +
1995 ++ adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
1996 ++ (family_id << 8));
1997 ++
1998 + bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
1999 + + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
2000 + r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
2001 +@@ -273,6 +276,8 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
2002 + memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
2003 + (adev->uvd.fw->size) - offset);
2004 +
2005 ++ cancel_delayed_work_sync(&adev->uvd.idle_work);
2006 ++
2007 + size = amdgpu_bo_size(adev->uvd.vcpu_bo);
2008 + size -= le32_to_cpu(hdr->ucode_size_bytes);
2009 + ptr = adev->uvd.cpu_addr;
2010 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
2011 +index a745eeeb5d82..bb0da76051a1 100644
2012 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
2013 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
2014 +@@ -220,6 +220,7 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
2015 + if (i == AMDGPU_MAX_VCE_HANDLES)
2016 + return 0;
2017 +
2018 ++ cancel_delayed_work_sync(&adev->vce.idle_work);
2019 + /* TODO: suspending running encoding sessions isn't supported */
2020 + return -EINVAL;
2021 + }
2022 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
2023 +index 06602df707f8..9b1c43005c80 100644
2024 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
2025 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
2026 +@@ -3628,7 +3628,7 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
2027 + unsigned vm_id, uint64_t pd_addr)
2028 + {
2029 + int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
2030 +- uint32_t seq = ring->fence_drv.sync_seq;
2031 ++ uint32_t seq = ring->fence_drv.sync_seq[ring->idx];
2032 + uint64_t addr = ring->fence_drv.gpu_addr;
2033 +
2034 + amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
2035 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
2036 +index c34c393e9aea..d5e19b5fbbfb 100644
2037 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
2038 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
2039 +@@ -513,7 +513,7 @@ static int dbgdev_wave_control_set_registers(
2040 + union SQ_CMD_BITS *in_reg_sq_cmd,
2041 + union GRBM_GFX_INDEX_BITS *in_reg_gfx_index)
2042 + {
2043 +- int status;
2044 ++ int status = 0;
2045 + union SQ_CMD_BITS reg_sq_cmd;
2046 + union GRBM_GFX_INDEX_BITS reg_gfx_index;
2047 + struct HsaDbgWaveMsgAMDGen2 *pMsg;
2048 +diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
2049 +index 27fbd79d0daf..71ea0521ea96 100644
2050 +--- a/drivers/gpu/drm/drm_dp_mst_topology.c
2051 ++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
2052 +@@ -1672,13 +1672,19 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
2053 + u8 sinks[DRM_DP_MAX_SDP_STREAMS];
2054 + int i;
2055 +
2056 ++ port = drm_dp_get_validated_port_ref(mgr, port);
2057 ++ if (!port)
2058 ++ return -EINVAL;
2059 ++
2060 + port_num = port->port_num;
2061 + mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
2062 + if (!mstb) {
2063 + mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
2064 +
2065 +- if (!mstb)
2066 ++ if (!mstb) {
2067 ++ drm_dp_put_port(port);
2068 + return -EINVAL;
2069 ++ }
2070 + }
2071 +
2072 + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2073 +@@ -1707,6 +1713,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
2074 + kfree(txmsg);
2075 + fail_put:
2076 + drm_dp_put_mst_branch_device(mstb);
2077 ++ drm_dp_put_port(port);
2078 + return ret;
2079 + }
2080 +
2081 +@@ -1789,6 +1796,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
2082 + req_payload.start_slot = cur_slots;
2083 + if (mgr->proposed_vcpis[i]) {
2084 + port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
2085 ++ port = drm_dp_get_validated_port_ref(mgr, port);
2086 ++ if (!port) {
2087 ++ mutex_unlock(&mgr->payload_lock);
2088 ++ return -EINVAL;
2089 ++ }
2090 + req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
2091 + req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
2092 + } else {
2093 +@@ -1816,6 +1828,9 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
2094 + mgr->payloads[i].payload_state = req_payload.payload_state;
2095 + }
2096 + cur_slots += req_payload.num_slots;
2097 ++
2098 ++ if (port)
2099 ++ drm_dp_put_port(port);
2100 + }
2101 +
2102 + for (i = 0; i < mgr->max_payloads; i++) {
2103 +@@ -2121,6 +2136,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
2104 +
2105 + if (mgr->mst_primary) {
2106 + int sret;
2107 ++ u8 guid[16];
2108 ++
2109 + sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2110 + if (sret != DP_RECEIVER_CAP_SIZE) {
2111 + DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2112 +@@ -2135,6 +2152,16 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
2113 + ret = -1;
2114 + goto out_unlock;
2115 + }
2116 ++
2117 ++ /* Some hubs forget their guids after they resume */
2118 ++ sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
2119 ++ if (sret != 16) {
2120 ++ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2121 ++ ret = -1;
2122 ++ goto out_unlock;
2123 ++ }
2124 ++ drm_dp_check_mstb_guid(mgr->mst_primary, guid);
2125 ++
2126 + ret = 0;
2127 + } else
2128 + ret = -1;
2129 +diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
2130 +index 647d85e77c2f..597cfb5ca847 100644
2131 +--- a/drivers/gpu/drm/i915/intel_csr.c
2132 ++++ b/drivers/gpu/drm/i915/intel_csr.c
2133 +@@ -177,7 +177,8 @@ static const struct stepping_info kbl_stepping_info[] = {
2134 + static const struct stepping_info skl_stepping_info[] = {
2135 + {'A', '0'}, {'B', '0'}, {'C', '0'},
2136 + {'D', '0'}, {'E', '0'}, {'F', '0'},
2137 +- {'G', '0'}, {'H', '0'}, {'I', '0'}
2138 ++ {'G', '0'}, {'H', '0'}, {'I', '0'},
2139 ++ {'J', '0'}, {'K', '0'}
2140 + };
2141 +
2142 + static const struct stepping_info bxt_stepping_info[] = {
2143 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
2144 +index 46947fffd599..a9c35134f2e2 100644
2145 +--- a/drivers/gpu/drm/i915/intel_display.c
2146 ++++ b/drivers/gpu/drm/i915/intel_display.c
2147 +@@ -4455,7 +4455,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
2148 + intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
2149 +
2150 + return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
2151 +- &state->scaler_state.scaler_id, DRM_ROTATE_0,
2152 ++ &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
2153 + state->pipe_src_w, state->pipe_src_h,
2154 + adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
2155 + }
2156 +diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
2157 +index fa0dabf578dc..db6361b5a6ab 100644
2158 +--- a/drivers/gpu/drm/i915/intel_dp_mst.c
2159 ++++ b/drivers/gpu/drm/i915/intel_dp_mst.c
2160 +@@ -184,7 +184,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
2161 + intel_mst->port = found->port;
2162 +
2163 + if (intel_dp->active_mst_links == 0) {
2164 +- intel_ddi_clk_select(encoder, intel_crtc->config);
2165 ++ intel_ddi_clk_select(&intel_dig_port->base, intel_crtc->config);
2166 +
2167 + intel_dp_set_link_params(intel_dp, intel_crtc->config);
2168 +
2169 +@@ -499,6 +499,8 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
2170 + struct intel_connector *intel_connector = to_intel_connector(connector);
2171 + struct drm_device *dev = connector->dev;
2172 +
2173 ++ intel_connector->unregister(intel_connector);
2174 ++
2175 + /* need to nuke the connector */
2176 + drm_modeset_lock_all(dev);
2177 + if (connector->state->crtc) {
2178 +@@ -512,11 +514,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
2179 +
2180 + WARN(ret, "Disabling mst crtc failed with %i\n", ret);
2181 + }
2182 +- drm_modeset_unlock_all(dev);
2183 +
2184 +- intel_connector->unregister(intel_connector);
2185 +-
2186 +- drm_modeset_lock_all(dev);
2187 + intel_connector_remove_from_fbdev(intel_connector);
2188 + drm_connector_cleanup(connector);
2189 + drm_modeset_unlock_all(dev);
2190 +diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
2191 +index f1fa756c5d5d..cfd5f9fff2f4 100644
2192 +--- a/drivers/gpu/drm/i915/intel_lrc.c
2193 ++++ b/drivers/gpu/drm/i915/intel_lrc.c
2194 +@@ -781,11 +781,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
2195 + if (unlikely(total_bytes > remain_usable)) {
2196 + /*
2197 + * The base request will fit but the reserved space
2198 +- * falls off the end. So only need to to wait for the
2199 +- * reserved size after flushing out the remainder.
2200 ++ * falls off the end. So don't need an immediate wrap
2201 ++ * and only need to effectively wait for the reserved
2202 ++ * size space from the start of ringbuffer.
2203 + */
2204 + wait_bytes = remain_actual + ringbuf->reserved_size;
2205 +- need_wrap = true;
2206 + } else if (total_bytes > ringbuf->space) {
2207 + /* No wrapping required, just waiting. */
2208 + wait_bytes = total_bytes;
2209 +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
2210 +index b28c29f20e75..7e4a9842b9ea 100644
2211 +--- a/drivers/gpu/drm/i915/intel_pm.c
2212 ++++ b/drivers/gpu/drm/i915/intel_pm.c
2213 +@@ -2281,6 +2281,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc *intel_crtc,
2214 + return PTR_ERR(cstate);
2215 +
2216 + pipe_wm = &cstate->wm.optimal.ilk;
2217 ++ memset(pipe_wm, 0, sizeof(*pipe_wm));
2218 +
2219 + for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2220 + ps = drm_atomic_get_plane_state(state,
2221 +@@ -3606,23 +3607,43 @@ static void skl_update_wm(struct drm_crtc *crtc)
2222 + dev_priv->wm.skl_hw = *results;
2223 + }
2224 +
2225 ++static void ilk_compute_wm_config(struct drm_device *dev,
2226 ++ struct intel_wm_config *config)
2227 ++{
2228 ++ struct intel_crtc *crtc;
2229 ++
2230 ++ /* Compute the currently _active_ config */
2231 ++ for_each_intel_crtc(dev, crtc) {
2232 ++ const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
2233 ++
2234 ++ if (!wm->pipe_enabled)
2235 ++ continue;
2236 ++
2237 ++ config->sprites_enabled |= wm->sprites_enabled;
2238 ++ config->sprites_scaled |= wm->sprites_scaled;
2239 ++ config->num_pipes_active++;
2240 ++ }
2241 ++}
2242 ++
2243 + static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
2244 + {
2245 + struct drm_device *dev = dev_priv->dev;
2246 + struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
2247 + struct ilk_wm_maximums max;
2248 +- struct intel_wm_config *config = &dev_priv->wm.config;
2249 ++ struct intel_wm_config config = {};
2250 + struct ilk_wm_values results = {};
2251 + enum intel_ddb_partitioning partitioning;
2252 +
2253 +- ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_1_2, &max);
2254 +- ilk_wm_merge(dev, config, &max, &lp_wm_1_2);
2255 ++ ilk_compute_wm_config(dev, &config);
2256 ++
2257 ++ ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
2258 ++ ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
2259 +
2260 + /* 5/6 split only in single pipe config on IVB+ */
2261 + if (INTEL_INFO(dev)->gen >= 7 &&
2262 +- config->num_pipes_active == 1 && config->sprites_enabled) {
2263 +- ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_5_6, &max);
2264 +- ilk_wm_merge(dev, config, &max, &lp_wm_5_6);
2265 ++ config.num_pipes_active == 1 && config.sprites_enabled) {
2266 ++ ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
2267 ++ ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
2268 +
2269 + best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
2270 + } else {
2271 +diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
2272 +index 40c6aff57256..549afa7bc75f 100644
2273 +--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
2274 ++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
2275 +@@ -951,7 +951,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
2276 +
2277 + /* WaForceContextSaveRestoreNonCoherent:skl,bxt */
2278 + tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
2279 +- if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) ||
2280 ++ if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
2281 + IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
2282 + tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
2283 + WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
2284 +@@ -1044,7 +1044,8 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
2285 + WA_SET_BIT_MASKED(HIZ_CHICKEN,
2286 + BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
2287 +
2288 +- if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) {
2289 ++ /* This is tied to WaForceContextSaveRestoreNonCoherent */
2290 ++ if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
2291 + /*
2292 + *Use Force Non-Coherent whenever executing a 3D context. This
2293 + * is a workaround for a possible hang in the unlikely event
2294 +@@ -1901,6 +1902,17 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
2295 + return 0;
2296 + }
2297 +
2298 ++static void cleanup_phys_status_page(struct intel_engine_cs *ring)
2299 ++{
2300 ++ struct drm_i915_private *dev_priv = to_i915(ring->dev);
2301 ++
2302 ++ if (!dev_priv->status_page_dmah)
2303 ++ return;
2304 ++
2305 ++ drm_pci_free(ring->dev, dev_priv->status_page_dmah);
2306 ++ ring->status_page.page_addr = NULL;
2307 ++}
2308 ++
2309 + static void cleanup_status_page(struct intel_engine_cs *ring)
2310 + {
2311 + struct drm_i915_gem_object *obj;
2312 +@@ -1917,9 +1929,9 @@ static void cleanup_status_page(struct intel_engine_cs *ring)
2313 +
2314 + static int init_status_page(struct intel_engine_cs *ring)
2315 + {
2316 +- struct drm_i915_gem_object *obj;
2317 ++ struct drm_i915_gem_object *obj = ring->status_page.obj;
2318 +
2319 +- if ((obj = ring->status_page.obj) == NULL) {
2320 ++ if (obj == NULL) {
2321 + unsigned flags;
2322 + int ret;
2323 +
2324 +@@ -2019,10 +2031,12 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
2325 + {
2326 + struct drm_i915_private *dev_priv = to_i915(dev);
2327 + struct drm_i915_gem_object *obj = ringbuf->obj;
2328 ++ /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
2329 ++ unsigned flags = PIN_OFFSET_BIAS | 4096;
2330 + int ret;
2331 +
2332 + if (HAS_LLC(dev_priv) && !obj->stolen) {
2333 +- ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0);
2334 ++ ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
2335 + if (ret)
2336 + return ret;
2337 +
2338 +@@ -2038,7 +2052,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
2339 + return -ENOMEM;
2340 + }
2341 + } else {
2342 +- ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
2343 ++ ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
2344 ++ flags | PIN_MAPPABLE);
2345 + if (ret)
2346 + return ret;
2347 +
2348 +@@ -2164,7 +2179,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
2349 + if (ret)
2350 + goto error;
2351 + } else {
2352 +- BUG_ON(ring->id != RCS);
2353 ++ WARN_ON(ring->id != RCS);
2354 + ret = init_phys_status_page(ring);
2355 + if (ret)
2356 + goto error;
2357 +@@ -2210,7 +2225,12 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
2358 + if (ring->cleanup)
2359 + ring->cleanup(ring);
2360 +
2361 +- cleanup_status_page(ring);
2362 ++ if (I915_NEED_GFX_HWS(ring->dev)) {
2363 ++ cleanup_status_page(ring);
2364 ++ } else {
2365 ++ WARN_ON(ring->id != RCS);
2366 ++ cleanup_phys_status_page(ring);
2367 ++ }
2368 +
2369 + i915_cmd_parser_fini_ring(ring);
2370 + i915_gem_batch_pool_fini(&ring->batch_pool);
2371 +@@ -2373,11 +2393,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
2372 + if (unlikely(total_bytes > remain_usable)) {
2373 + /*
2374 + * The base request will fit but the reserved space
2375 +- * falls off the end. So only need to to wait for the
2376 +- * reserved size after flushing out the remainder.
2377 ++ * falls off the end. So don't need an immediate wrap
2378 ++ * and only need to effectively wait for the reserved
2379 ++ * size space from the start of ringbuffer.
2380 + */
2381 + wait_bytes = remain_actual + ringbuf->reserved_size;
2382 +- need_wrap = true;
2383 + } else if (total_bytes > ringbuf->space) {
2384 + /* No wrapping required, just waiting. */
2385 + wait_bytes = total_bytes;
2386 +diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
2387 +index 277e60ae0e47..08961f7d151c 100644
2388 +--- a/drivers/gpu/drm/i915/intel_uncore.c
2389 ++++ b/drivers/gpu/drm/i915/intel_uncore.c
2390 +@@ -1155,7 +1155,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
2391 + } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2392 + dev_priv->uncore.funcs.force_wake_get =
2393 + fw_domains_get_with_thread_status;
2394 +- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
2395 ++ if (IS_HASWELL(dev))
2396 ++ dev_priv->uncore.funcs.force_wake_put =
2397 ++ fw_domains_put_with_fifo;
2398 ++ else
2399 ++ dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
2400 + fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
2401 + FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
2402 + } else if (IS_IVYBRIDGE(dev)) {
2403 +diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
2404 +index 3216e157a8a0..89da47234016 100644
2405 +--- a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
2406 ++++ b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
2407 +@@ -131,7 +131,7 @@ nvkm_ramht_del(struct nvkm_ramht **pramht)
2408 + struct nvkm_ramht *ramht = *pramht;
2409 + if (ramht) {
2410 + nvkm_gpuobj_del(&ramht->gpuobj);
2411 +- kfree(*pramht);
2412 ++ vfree(*pramht);
2413 + *pramht = NULL;
2414 + }
2415 + }
2416 +@@ -143,8 +143,8 @@ nvkm_ramht_new(struct nvkm_device *device, u32 size, u32 align,
2417 + struct nvkm_ramht *ramht;
2418 + int ret, i;
2419 +
2420 +- if (!(ramht = *pramht = kzalloc(sizeof(*ramht) + (size >> 3) *
2421 +- sizeof(*ramht->data), GFP_KERNEL)))
2422 ++ if (!(ramht = *pramht = vzalloc(sizeof(*ramht) +
2423 ++ (size >> 3) * sizeof(*ramht->data))))
2424 + return -ENOMEM;
2425 +
2426 + ramht->device = device;
2427 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
2428 +index 1f81069edc58..332b5fe687fe 100644
2429 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
2430 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
2431 +@@ -1807,6 +1807,8 @@ gf100_gr_init(struct gf100_gr *gr)
2432 +
2433 + gf100_gr_mmio(gr, gr->func->mmio);
2434 +
2435 ++ nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
2436 ++
2437 + memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
2438 + for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
2439 + do {
2440 +diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
2441 +index 86276519b2ef..47e52647c9e5 100644
2442 +--- a/drivers/gpu/drm/qxl/qxl_display.c
2443 ++++ b/drivers/gpu/drm/qxl/qxl_display.c
2444 +@@ -375,10 +375,15 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
2445 +
2446 + qxl_bo_kunmap(user_bo);
2447 +
2448 ++ qcrtc->cur_x += qcrtc->hot_spot_x - hot_x;
2449 ++ qcrtc->cur_y += qcrtc->hot_spot_y - hot_y;
2450 ++ qcrtc->hot_spot_x = hot_x;
2451 ++ qcrtc->hot_spot_y = hot_y;
2452 ++
2453 + cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
2454 + cmd->type = QXL_CURSOR_SET;
2455 +- cmd->u.set.position.x = qcrtc->cur_x;
2456 +- cmd->u.set.position.y = qcrtc->cur_y;
2457 ++ cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
2458 ++ cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
2459 +
2460 + cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
2461 +
2462 +@@ -441,8 +446,8 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
2463 +
2464 + cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
2465 + cmd->type = QXL_CURSOR_MOVE;
2466 +- cmd->u.position.x = qcrtc->cur_x;
2467 +- cmd->u.position.y = qcrtc->cur_y;
2468 ++ cmd->u.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
2469 ++ cmd->u.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
2470 + qxl_release_unmap(qdev, release, &cmd->release_info);
2471 +
2472 + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
2473 +diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
2474 +index 6e6b9b1519b8..3f3897eb458c 100644
2475 +--- a/drivers/gpu/drm/qxl/qxl_drv.h
2476 ++++ b/drivers/gpu/drm/qxl/qxl_drv.h
2477 +@@ -135,6 +135,8 @@ struct qxl_crtc {
2478 + int index;
2479 + int cur_x;
2480 + int cur_y;
2481 ++ int hot_spot_x;
2482 ++ int hot_spot_y;
2483 + };
2484 +
2485 + struct qxl_output {
2486 +diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
2487 +index 2ad462896896..32491355a1d4 100644
2488 +--- a/drivers/gpu/drm/radeon/evergreen.c
2489 ++++ b/drivers/gpu/drm/radeon/evergreen.c
2490 +@@ -2608,10 +2608,152 @@ static void evergreen_agp_enable(struct radeon_device *rdev)
2491 + WREG32(VM_CONTEXT1_CNTL, 0);
2492 + }
2493 +
2494 ++static const unsigned ni_dig_offsets[] =
2495 ++{
2496 ++ NI_DIG0_REGISTER_OFFSET,
2497 ++ NI_DIG1_REGISTER_OFFSET,
2498 ++ NI_DIG2_REGISTER_OFFSET,
2499 ++ NI_DIG3_REGISTER_OFFSET,
2500 ++ NI_DIG4_REGISTER_OFFSET,
2501 ++ NI_DIG5_REGISTER_OFFSET
2502 ++};
2503 ++
2504 ++static const unsigned ni_tx_offsets[] =
2505 ++{
2506 ++ NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
2507 ++ NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
2508 ++ NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
2509 ++ NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
2510 ++ NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
2511 ++ NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
2512 ++};
2513 ++
2514 ++static const unsigned evergreen_dp_offsets[] =
2515 ++{
2516 ++ EVERGREEN_DP0_REGISTER_OFFSET,
2517 ++ EVERGREEN_DP1_REGISTER_OFFSET,
2518 ++ EVERGREEN_DP2_REGISTER_OFFSET,
2519 ++ EVERGREEN_DP3_REGISTER_OFFSET,
2520 ++ EVERGREEN_DP4_REGISTER_OFFSET,
2521 ++ EVERGREEN_DP5_REGISTER_OFFSET
2522 ++};
2523 ++
2524 ++
2525 ++/*
2526 ++ * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
2527 ++ * We go from crtc to connector and it is not relible since it
2528 ++ * should be an opposite direction .If crtc is enable then
2529 ++ * find the dig_fe which selects this crtc and insure that it enable.
2530 ++ * if such dig_fe is found then find dig_be which selects found dig_be and
2531 ++ * insure that it enable and in DP_SST mode.
2532 ++ * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
2533 ++ * from dp symbols clocks .
2534 ++ */
2535 ++static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
2536 ++ unsigned crtc_id, unsigned *ret_dig_fe)
2537 ++{
2538 ++ unsigned i;
2539 ++ unsigned dig_fe;
2540 ++ unsigned dig_be;
2541 ++ unsigned dig_en_be;
2542 ++ unsigned uniphy_pll;
2543 ++ unsigned digs_fe_selected;
2544 ++ unsigned dig_be_mode;
2545 ++ unsigned dig_fe_mask;
2546 ++ bool is_enabled = false;
2547 ++ bool found_crtc = false;
2548 ++
2549 ++ /* loop through all running dig_fe to find selected crtc */
2550 ++ for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
2551 ++ dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
2552 ++ if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
2553 ++ crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
2554 ++ /* found running pipe */
2555 ++ found_crtc = true;
2556 ++ dig_fe_mask = 1 << i;
2557 ++ dig_fe = i;
2558 ++ break;
2559 ++ }
2560 ++ }
2561 ++
2562 ++ if (found_crtc) {
2563 ++ /* loop through all running dig_be to find selected dig_fe */
2564 ++ for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
2565 ++ dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
2566 ++ /* if dig_fe_selected by dig_be? */
2567 ++ digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
2568 ++ dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
2569 ++ if (dig_fe_mask & digs_fe_selected &&
2570 ++ /* if dig_be in sst mode? */
2571 ++ dig_be_mode == NI_DIG_BE_DPSST) {
2572 ++ dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
2573 ++ ni_dig_offsets[i]);
2574 ++ uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
2575 ++ ni_tx_offsets[i]);
2576 ++ /* dig_be enable and tx is running */
2577 ++ if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
2578 ++ dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
2579 ++ uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
2580 ++ is_enabled = true;
2581 ++ *ret_dig_fe = dig_fe;
2582 ++ break;
2583 ++ }
2584 ++ }
2585 ++ }
2586 ++ }
2587 ++
2588 ++ return is_enabled;
2589 ++}
2590 ++
2591 ++/*
2592 ++ * Blank dig when in dp sst mode
2593 ++ * Dig ignores crtc timing
2594 ++ */
2595 ++static void evergreen_blank_dp_output(struct radeon_device *rdev,
2596 ++ unsigned dig_fe)
2597 ++{
2598 ++ unsigned stream_ctrl;
2599 ++ unsigned fifo_ctrl;
2600 ++ unsigned counter = 0;
2601 ++
2602 ++ if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
2603 ++ DRM_ERROR("invalid dig_fe %d\n", dig_fe);
2604 ++ return;
2605 ++ }
2606 ++
2607 ++ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2608 ++ evergreen_dp_offsets[dig_fe]);
2609 ++ if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
2610 ++ DRM_ERROR("dig %d , should be enable\n", dig_fe);
2611 ++ return;
2612 ++ }
2613 ++
2614 ++ stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
2615 ++ WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2616 ++ evergreen_dp_offsets[dig_fe], stream_ctrl);
2617 ++
2618 ++ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2619 ++ evergreen_dp_offsets[dig_fe]);
2620 ++ while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
2621 ++ msleep(1);
2622 ++ counter++;
2623 ++ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2624 ++ evergreen_dp_offsets[dig_fe]);
2625 ++ }
2626 ++ if (counter >= 32 )
2627 ++ DRM_ERROR("counter exceeds %d\n", counter);
2628 ++
2629 ++ fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
2630 ++ fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
2631 ++ WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
2632 ++
2633 ++}
2634 ++
2635 + void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
2636 + {
2637 + u32 crtc_enabled, tmp, frame_count, blackout;
2638 + int i, j;
2639 ++ unsigned dig_fe;
2640 +
2641 + if (!ASIC_IS_NODCE(rdev)) {
2642 + save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
2643 +@@ -2651,7 +2793,17 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
2644 + break;
2645 + udelay(1);
2646 + }
2647 +-
2648 ++ /*we should disable dig if it drives dp sst*/
2649 ++ /*but we are in radeon_device_init and the topology is unknown*/
2650 ++ /*and it is available after radeon_modeset_init*/
2651 ++ /*the following method radeon_atom_encoder_dpms_dig*/
2652 ++ /*does the job if we initialize it properly*/
2653 ++ /*for now we do it this manually*/
2654 ++ /**/
2655 ++ if (ASIC_IS_DCE5(rdev) &&
2656 ++ evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
2657 ++ evergreen_blank_dp_output(rdev, dig_fe);
2658 ++ /*we could remove 6 lines below*/
2659 + /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
2660 + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
2661 + tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
2662 +diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
2663 +index aa939dfed3a3..b436badf9efa 100644
2664 +--- a/drivers/gpu/drm/radeon/evergreen_reg.h
2665 ++++ b/drivers/gpu/drm/radeon/evergreen_reg.h
2666 +@@ -250,8 +250,43 @@
2667 +
2668 + /* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
2669 + #define EVERGREEN_HDMI_BASE 0x7030
2670 ++/*DIG block*/
2671 ++#define NI_DIG0_REGISTER_OFFSET (0x7000 - 0x7000)
2672 ++#define NI_DIG1_REGISTER_OFFSET (0x7C00 - 0x7000)
2673 ++#define NI_DIG2_REGISTER_OFFSET (0x10800 - 0x7000)
2674 ++#define NI_DIG3_REGISTER_OFFSET (0x11400 - 0x7000)
2675 ++#define NI_DIG4_REGISTER_OFFSET (0x12000 - 0x7000)
2676 ++#define NI_DIG5_REGISTER_OFFSET (0x12C00 - 0x7000)
2677 ++
2678 ++
2679 ++#define NI_DIG_FE_CNTL 0x7000
2680 ++# define NI_DIG_FE_CNTL_SOURCE_SELECT(x) ((x) & 0x3)
2681 ++# define NI_DIG_FE_CNTL_SYMCLK_FE_ON (1<<24)
2682 ++
2683 ++
2684 ++#define NI_DIG_BE_CNTL 0x7140
2685 ++# define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x) (((x) >> 8 ) & 0x3F)
2686 ++# define NI_DIG_FE_CNTL_MODE(x) (((x) >> 16) & 0x7 )
2687 ++
2688 ++#define NI_DIG_BE_EN_CNTL 0x7144
2689 ++# define NI_DIG_BE_EN_CNTL_ENABLE (1 << 0)
2690 ++# define NI_DIG_BE_EN_CNTL_SYMBCLK_ON (1 << 8)
2691 ++# define NI_DIG_BE_DPSST 0
2692 +
2693 + /* Display Port block */
2694 ++#define EVERGREEN_DP0_REGISTER_OFFSET (0x730C - 0x730C)
2695 ++#define EVERGREEN_DP1_REGISTER_OFFSET (0x7F0C - 0x730C)
2696 ++#define EVERGREEN_DP2_REGISTER_OFFSET (0x10B0C - 0x730C)
2697 ++#define EVERGREEN_DP3_REGISTER_OFFSET (0x1170C - 0x730C)
2698 ++#define EVERGREEN_DP4_REGISTER_OFFSET (0x1230C - 0x730C)
2699 ++#define EVERGREEN_DP5_REGISTER_OFFSET (0x12F0C - 0x730C)
2700 ++
2701 ++
2702 ++#define EVERGREEN_DP_VID_STREAM_CNTL 0x730C
2703 ++# define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE (1 << 0)
2704 ++# define EVERGREEN_DP_VID_STREAM_STATUS (1 <<16)
2705 ++#define EVERGREEN_DP_STEER_FIFO 0x7310
2706 ++# define EVERGREEN_DP_STEER_FIFO_RESET (1 << 0)
2707 + #define EVERGREEN_DP_SEC_CNTL 0x7280
2708 + # define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0)
2709 + # define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4)
2710 +@@ -266,4 +301,15 @@
2711 + # define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24)
2712 + # define EVERGREEN_DP_SEC_SS_EN (1 << 28)
2713 +
2714 ++/*DCIO_UNIPHY block*/
2715 ++#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1 (0x6600 -0x6600)
2716 ++#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1 (0x6640 -0x6600)
2717 ++#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1 (0x6680 - 0x6600)
2718 ++#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1 (0x66C0 - 0x6600)
2719 ++#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1 (0x6700 - 0x6600)
2720 ++#define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1 (0x6740 - 0x6600)
2721 ++
2722 ++#define NI_DCIO_UNIPHY0_PLL_CONTROL1 0x6618
2723 ++# define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE (1 << 0)
2724 ++
2725 + #endif
2726 +diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
2727 +index 9bc408c9f9f6..c4b4f298a283 100644
2728 +--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
2729 ++++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
2730 +@@ -62,10 +62,6 @@ bool radeon_has_atpx(void) {
2731 + return radeon_atpx_priv.atpx_detected;
2732 + }
2733 +
2734 +-bool radeon_has_atpx_dgpu_power_cntl(void) {
2735 +- return radeon_atpx_priv.atpx.functions.power_cntl;
2736 +-}
2737 +-
2738 + /**
2739 + * radeon_atpx_call - call an ATPX method
2740 + *
2741 +@@ -145,6 +141,10 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
2742 + */
2743 + static int radeon_atpx_validate(struct radeon_atpx *atpx)
2744 + {
2745 ++ /* make sure required functions are enabled */
2746 ++ /* dGPU power control is required */
2747 ++ atpx->functions.power_cntl = true;
2748 ++
2749 + if (atpx->functions.px_params) {
2750 + union acpi_object *info;
2751 + struct atpx_px_params output;
2752 +diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
2753 +index 340f3f549f29..9cfc1c3e1965 100644
2754 +--- a/drivers/gpu/drm/radeon/radeon_connectors.c
2755 ++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
2756 +@@ -1996,10 +1996,12 @@ radeon_add_atom_connector(struct drm_device *dev,
2757 + rdev->mode_info.dither_property,
2758 + RADEON_FMT_DITHER_DISABLE);
2759 +
2760 +- if (radeon_audio != 0)
2761 ++ if (radeon_audio != 0) {
2762 + drm_object_attach_property(&radeon_connector->base.base,
2763 + rdev->mode_info.audio_property,
2764 + RADEON_AUDIO_AUTO);
2765 ++ radeon_connector->audio = RADEON_AUDIO_AUTO;
2766 ++ }
2767 + if (ASIC_IS_DCE5(rdev))
2768 + drm_object_attach_property(&radeon_connector->base.base,
2769 + rdev->mode_info.output_csc_property,
2770 +@@ -2124,6 +2126,7 @@ radeon_add_atom_connector(struct drm_device *dev,
2771 + drm_object_attach_property(&radeon_connector->base.base,
2772 + rdev->mode_info.audio_property,
2773 + RADEON_AUDIO_AUTO);
2774 ++ radeon_connector->audio = RADEON_AUDIO_AUTO;
2775 + }
2776 + if (connector_type == DRM_MODE_CONNECTOR_DVII) {
2777 + radeon_connector->dac_load_detect = true;
2778 +@@ -2179,6 +2182,7 @@ radeon_add_atom_connector(struct drm_device *dev,
2779 + drm_object_attach_property(&radeon_connector->base.base,
2780 + rdev->mode_info.audio_property,
2781 + RADEON_AUDIO_AUTO);
2782 ++ radeon_connector->audio = RADEON_AUDIO_AUTO;
2783 + }
2784 + if (ASIC_IS_DCE5(rdev))
2785 + drm_object_attach_property(&radeon_connector->base.base,
2786 +@@ -2231,6 +2235,7 @@ radeon_add_atom_connector(struct drm_device *dev,
2787 + drm_object_attach_property(&radeon_connector->base.base,
2788 + rdev->mode_info.audio_property,
2789 + RADEON_AUDIO_AUTO);
2790 ++ radeon_connector->audio = RADEON_AUDIO_AUTO;
2791 + }
2792 + if (ASIC_IS_DCE5(rdev))
2793 + drm_object_attach_property(&radeon_connector->base.base,
2794 +diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
2795 +index e2396336f9e8..4197ca1bb1e4 100644
2796 +--- a/drivers/gpu/drm/radeon/radeon_device.c
2797 ++++ b/drivers/gpu/drm/radeon/radeon_device.c
2798 +@@ -103,12 +103,6 @@ static const char radeon_family_name[][16] = {
2799 + "LAST",
2800 + };
2801 +
2802 +-#if defined(CONFIG_VGA_SWITCHEROO)
2803 +-bool radeon_has_atpx_dgpu_power_cntl(void);
2804 +-#else
2805 +-static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
2806 +-#endif
2807 +-
2808 + #define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
2809 + #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
2810 +
2811 +@@ -1439,7 +1433,7 @@ int radeon_device_init(struct radeon_device *rdev,
2812 + * ignore it */
2813 + vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
2814 +
2815 +- if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl())
2816 ++ if (rdev->flags & RADEON_IS_PX)
2817 + runtime = true;
2818 + vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
2819 + if (runtime)
2820 +diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
2821 +index e06ac546a90f..f342aad79cc6 100644
2822 +--- a/drivers/gpu/drm/radeon/radeon_ttm.c
2823 ++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
2824 +@@ -235,6 +235,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
2825 + {
2826 + struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
2827 +
2828 ++ if (radeon_ttm_tt_has_userptr(bo->ttm))
2829 ++ return -EPERM;
2830 + return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
2831 + }
2832 +
2833 +diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
2834 +index 7285adb27099..caa73de584a5 100644
2835 +--- a/drivers/gpu/drm/radeon/si_dpm.c
2836 ++++ b/drivers/gpu/drm/radeon/si_dpm.c
2837 +@@ -2931,6 +2931,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
2838 + { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
2839 + { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
2840 + { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
2841 ++ { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
2842 + { 0, 0, 0, 0 },
2843 + };
2844 +
2845 +diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
2846 +index 4cbf26555093..e3daafa1be13 100644
2847 +--- a/drivers/gpu/drm/ttm/ttm_bo.c
2848 ++++ b/drivers/gpu/drm/ttm/ttm_bo.c
2849 +@@ -230,22 +230,13 @@ EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
2850 +
2851 + void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
2852 + {
2853 +- struct ttm_bo_device *bdev = bo->bdev;
2854 +- struct ttm_mem_type_manager *man;
2855 ++ int put_count = 0;
2856 +
2857 + lockdep_assert_held(&bo->resv->lock.base);
2858 +
2859 +- if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
2860 +- list_del_init(&bo->swap);
2861 +- list_del_init(&bo->lru);
2862 +-
2863 +- } else {
2864 +- if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
2865 +- list_move_tail(&bo->swap, &bo->glob->swap_lru);
2866 +-
2867 +- man = &bdev->man[bo->mem.mem_type];
2868 +- list_move_tail(&bo->lru, &man->lru);
2869 +- }
2870 ++ put_count = ttm_bo_del_from_lru(bo);
2871 ++ ttm_bo_list_ref_sub(bo, put_count, true);
2872 ++ ttm_bo_add_to_lru(bo);
2873 + }
2874 + EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
2875 +
2876 +diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig
2877 +index 83e9f591a54b..e7a348807f0c 100644
2878 +--- a/drivers/hwtracing/stm/Kconfig
2879 ++++ b/drivers/hwtracing/stm/Kconfig
2880 +@@ -1,6 +1,7 @@
2881 + config STM
2882 + tristate "System Trace Module devices"
2883 + select CONFIGFS_FS
2884 ++ select SRCU
2885 + help
2886 + A System Trace Module (STM) is a device exporting data in System
2887 + Trace Protocol (STP) format as defined by MIPI STP standards.
2888 +diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
2889 +index 714bdc837769..b167ab25310a 100644
2890 +--- a/drivers/i2c/busses/i2c-cpm.c
2891 ++++ b/drivers/i2c/busses/i2c-cpm.c
2892 +@@ -116,8 +116,8 @@ struct cpm_i2c {
2893 + cbd_t __iomem *rbase;
2894 + u_char *txbuf[CPM_MAXBD];
2895 + u_char *rxbuf[CPM_MAXBD];
2896 +- u32 txdma[CPM_MAXBD];
2897 +- u32 rxdma[CPM_MAXBD];
2898 ++ dma_addr_t txdma[CPM_MAXBD];
2899 ++ dma_addr_t rxdma[CPM_MAXBD];
2900 + };
2901 +
2902 + static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id)
2903 +diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
2904 +index b29c7500461a..f54ece8fce78 100644
2905 +--- a/drivers/i2c/busses/i2c-exynos5.c
2906 ++++ b/drivers/i2c/busses/i2c-exynos5.c
2907 +@@ -671,7 +671,9 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
2908 + return -EIO;
2909 + }
2910 +
2911 +- clk_prepare_enable(i2c->clk);
2912 ++ ret = clk_enable(i2c->clk);
2913 ++ if (ret)
2914 ++ return ret;
2915 +
2916 + for (i = 0; i < num; i++, msgs++) {
2917 + stop = (i == num - 1);
2918 +@@ -695,7 +697,7 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
2919 + }
2920 +
2921 + out:
2922 +- clk_disable_unprepare(i2c->clk);
2923 ++ clk_disable(i2c->clk);
2924 + return ret;
2925 + }
2926 +
2927 +@@ -747,7 +749,9 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
2928 + return -ENOENT;
2929 + }
2930 +
2931 +- clk_prepare_enable(i2c->clk);
2932 ++ ret = clk_prepare_enable(i2c->clk);
2933 ++ if (ret)
2934 ++ return ret;
2935 +
2936 + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2937 + i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
2938 +@@ -799,6 +803,10 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
2939 +
2940 + platform_set_drvdata(pdev, i2c);
2941 +
2942 ++ clk_disable(i2c->clk);
2943 ++
2944 ++ return 0;
2945 ++
2946 + err_clk:
2947 + clk_disable_unprepare(i2c->clk);
2948 + return ret;
2949 +@@ -810,6 +818,8 @@ static int exynos5_i2c_remove(struct platform_device *pdev)
2950 +
2951 + i2c_del_adapter(&i2c->adap);
2952 +
2953 ++ clk_unprepare(i2c->clk);
2954 ++
2955 + return 0;
2956 + }
2957 +
2958 +@@ -821,6 +831,8 @@ static int exynos5_i2c_suspend_noirq(struct device *dev)
2959 +
2960 + i2c->suspended = 1;
2961 +
2962 ++ clk_unprepare(i2c->clk);
2963 ++
2964 + return 0;
2965 + }
2966 +
2967 +@@ -830,7 +842,9 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
2968 + struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
2969 + int ret = 0;
2970 +
2971 +- clk_prepare_enable(i2c->clk);
2972 ++ ret = clk_prepare_enable(i2c->clk);
2973 ++ if (ret)
2974 ++ return ret;
2975 +
2976 + ret = exynos5_hsi2c_clock_setup(i2c);
2977 + if (ret) {
2978 +@@ -839,7 +853,7 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
2979 + }
2980 +
2981 + exynos5_i2c_init(i2c);
2982 +- clk_disable_unprepare(i2c->clk);
2983 ++ clk_disable(i2c->clk);
2984 + i2c->suspended = 0;
2985 +
2986 + return 0;
2987 +diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
2988 +index 53343ffbff7a..1b109b2a235e 100644
2989 +--- a/drivers/infiniband/core/cache.c
2990 ++++ b/drivers/infiniband/core/cache.c
2991 +@@ -691,7 +691,8 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
2992 + NULL);
2993 +
2994 + /* Coudn't find default GID location */
2995 +- WARN_ON(ix < 0);
2996 ++ if (WARN_ON(ix < 0))
2997 ++ goto release;
2998 +
2999 + zattr_type.gid_type = gid_type;
3000 +
3001 +diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
3002 +index 6b4e8a008bc0..564adf3116e8 100644
3003 +--- a/drivers/infiniband/core/ucm.c
3004 ++++ b/drivers/infiniband/core/ucm.c
3005 +@@ -48,6 +48,7 @@
3006 +
3007 + #include <asm/uaccess.h>
3008 +
3009 ++#include <rdma/ib.h>
3010 + #include <rdma/ib_cm.h>
3011 + #include <rdma/ib_user_cm.h>
3012 + #include <rdma/ib_marshall.h>
3013 +@@ -1103,6 +1104,9 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
3014 + struct ib_ucm_cmd_hdr hdr;
3015 + ssize_t result;
3016 +
3017 ++ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
3018 ++ return -EACCES;
3019 ++
3020 + if (len < sizeof(hdr))
3021 + return -EINVAL;
3022 +
3023 +diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
3024 +index 8b5a934e1133..886f61ea6cc7 100644
3025 +--- a/drivers/infiniband/core/ucma.c
3026 ++++ b/drivers/infiniband/core/ucma.c
3027 +@@ -1574,6 +1574,9 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
3028 + struct rdma_ucm_cmd_hdr hdr;
3029 + ssize_t ret;
3030 +
3031 ++ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
3032 ++ return -EACCES;
3033 ++
3034 + if (len < sizeof(hdr))
3035 + return -EINVAL;
3036 +
3037 +diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
3038 +index 39680aed99dd..d3fb8aa46c59 100644
3039 +--- a/drivers/infiniband/core/uverbs_main.c
3040 ++++ b/drivers/infiniband/core/uverbs_main.c
3041 +@@ -48,6 +48,8 @@
3042 +
3043 + #include <asm/uaccess.h>
3044 +
3045 ++#include <rdma/ib.h>
3046 ++
3047 + #include "uverbs.h"
3048 +
3049 + MODULE_AUTHOR("Roland Dreier");
3050 +@@ -693,6 +695,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
3051 + int srcu_key;
3052 + ssize_t ret;
3053 +
3054 ++ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
3055 ++ return -EACCES;
3056 ++
3057 + if (count < sizeof hdr)
3058 + return -EINVAL;
3059 +
3060 +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
3061 +index 03c418ccbc98..ed9cefa1f6f1 100644
3062 +--- a/drivers/infiniband/hw/mlx5/main.c
3063 ++++ b/drivers/infiniband/hw/mlx5/main.c
3064 +@@ -517,7 +517,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
3065 + sizeof(struct mlx5_wqe_ctrl_seg)) /
3066 + sizeof(struct mlx5_wqe_data_seg);
3067 + props->max_sge = min(max_rq_sg, max_sq_sg);
3068 +- props->max_sge_rd = props->max_sge;
3069 ++ props->max_sge_rd = MLX5_MAX_SGE_RD;
3070 + props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
3071 + props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
3072 + props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
3073 +diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
3074 +index e449e394963f..24f4a782e0f4 100644
3075 +--- a/drivers/infiniband/hw/qib/qib_file_ops.c
3076 ++++ b/drivers/infiniband/hw/qib/qib_file_ops.c
3077 +@@ -45,6 +45,8 @@
3078 + #include <linux/export.h>
3079 + #include <linux/uio.h>
3080 +
3081 ++#include <rdma/ib.h>
3082 ++
3083 + #include "qib.h"
3084 + #include "qib_common.h"
3085 + #include "qib_user_sdma.h"
3086 +@@ -2067,6 +2069,9 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
3087 + ssize_t ret = 0;
3088 + void *dest;
3089 +
3090 ++ if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
3091 ++ return -EACCES;
3092 ++
3093 + if (count < sizeof(cmd.type)) {
3094 + ret = -EINVAL;
3095 + goto bail;
3096 +diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
3097 +index 3f02e0e03d12..67aab86048ad 100644
3098 +--- a/drivers/input/misc/pmic8xxx-pwrkey.c
3099 ++++ b/drivers/input/misc/pmic8xxx-pwrkey.c
3100 +@@ -353,7 +353,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
3101 + if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay))
3102 + kpd_delay = 15625;
3103 +
3104 +- if (kpd_delay > 62500 || kpd_delay == 0) {
3105 ++ /* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */
3106 ++ if (kpd_delay > USEC_PER_SEC * 2 || kpd_delay < USEC_PER_SEC / 64) {
3107 + dev_err(&pdev->dev, "invalid power key trigger delay\n");
3108 + return -EINVAL;
3109 + }
3110 +@@ -385,8 +386,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
3111 + pwr->name = "pmic8xxx_pwrkey";
3112 + pwr->phys = "pmic8xxx_pwrkey/input0";
3113 +
3114 +- delay = (kpd_delay << 10) / USEC_PER_SEC;
3115 +- delay = 1 + ilog2(delay);
3116 ++ delay = (kpd_delay << 6) / USEC_PER_SEC;
3117 ++ delay = ilog2(delay);
3118 +
3119 + err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
3120 + if (err < 0) {
3121 +diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
3122 +index 3a7f3a4a4396..7c18249d6c8e 100644
3123 +--- a/drivers/input/tablet/gtco.c
3124 ++++ b/drivers/input/tablet/gtco.c
3125 +@@ -858,6 +858,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
3126 + goto err_free_buf;
3127 + }
3128 +
3129 ++ /* Sanity check that a device has an endpoint */
3130 ++ if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
3131 ++ dev_err(&usbinterface->dev,
3132 ++ "Invalid number of endpoints\n");
3133 ++ error = -EINVAL;
3134 ++ goto err_free_urb;
3135 ++ }
3136 ++
3137 + /*
3138 + * The endpoint is always altsetting 0, we know this since we know
3139 + * this device only has one interrupt endpoint
3140 +@@ -879,7 +887,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
3141 + * HID report descriptor
3142 + */
3143 + if (usb_get_extra_descriptor(usbinterface->cur_altsetting,
3144 +- HID_DEVICE_TYPE, &hid_desc) != 0){
3145 ++ HID_DEVICE_TYPE, &hid_desc) != 0) {
3146 + dev_err(&usbinterface->dev,
3147 + "Can't retrieve exta USB descriptor to get hid report descriptor length\n");
3148 + error = -EIO;
3149 +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
3150 +index 374c129219ef..5efadad4615b 100644
3151 +--- a/drivers/iommu/amd_iommu.c
3152 ++++ b/drivers/iommu/amd_iommu.c
3153 +@@ -92,6 +92,7 @@ struct iommu_dev_data {
3154 + struct list_head dev_data_list; /* For global dev_data_list */
3155 + struct protection_domain *domain; /* Domain the device is bound to */
3156 + u16 devid; /* PCI Device ID */
3157 ++ u16 alias; /* Alias Device ID */
3158 + bool iommu_v2; /* Device can make use of IOMMUv2 */
3159 + bool passthrough; /* Device is identity mapped */
3160 + struct {
3161 +@@ -166,6 +167,13 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
3162 + return container_of(dom, struct protection_domain, domain);
3163 + }
3164 +
3165 ++static inline u16 get_device_id(struct device *dev)
3166 ++{
3167 ++ struct pci_dev *pdev = to_pci_dev(dev);
3168 ++
3169 ++ return PCI_DEVID(pdev->bus->number, pdev->devfn);
3170 ++}
3171 ++
3172 + static struct iommu_dev_data *alloc_dev_data(u16 devid)
3173 + {
3174 + struct iommu_dev_data *dev_data;
3175 +@@ -203,6 +211,68 @@ out_unlock:
3176 + return dev_data;
3177 + }
3178 +
3179 ++static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
3180 ++{
3181 ++ *(u16 *)data = alias;
3182 ++ return 0;
3183 ++}
3184 ++
3185 ++static u16 get_alias(struct device *dev)
3186 ++{
3187 ++ struct pci_dev *pdev = to_pci_dev(dev);
3188 ++ u16 devid, ivrs_alias, pci_alias;
3189 ++
3190 ++ devid = get_device_id(dev);
3191 ++ ivrs_alias = amd_iommu_alias_table[devid];
3192 ++ pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
3193 ++
3194 ++ if (ivrs_alias == pci_alias)
3195 ++ return ivrs_alias;
3196 ++
3197 ++ /*
3198 ++ * DMA alias showdown
3199 ++ *
3200 ++ * The IVRS is fairly reliable in telling us about aliases, but it
3201 ++ * can't know about every screwy device. If we don't have an IVRS
3202 ++ * reported alias, use the PCI reported alias. In that case we may
3203 ++ * still need to initialize the rlookup and dev_table entries if the
3204 ++ * alias is to a non-existent device.
3205 ++ */
3206 ++ if (ivrs_alias == devid) {
3207 ++ if (!amd_iommu_rlookup_table[pci_alias]) {
3208 ++ amd_iommu_rlookup_table[pci_alias] =
3209 ++ amd_iommu_rlookup_table[devid];
3210 ++ memcpy(amd_iommu_dev_table[pci_alias].data,
3211 ++ amd_iommu_dev_table[devid].data,
3212 ++ sizeof(amd_iommu_dev_table[pci_alias].data));
3213 ++ }
3214 ++
3215 ++ return pci_alias;
3216 ++ }
3217 ++
3218 ++ pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
3219 ++ "for device %s[%04x:%04x], kernel reported alias "
3220 ++ "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
3221 ++ PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
3222 ++ PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
3223 ++ PCI_FUNC(pci_alias));
3224 ++
3225 ++ /*
3226 ++ * If we don't have a PCI DMA alias and the IVRS alias is on the same
3227 ++ * bus, then the IVRS table may know about a quirk that we don't.
3228 ++ */
3229 ++ if (pci_alias == devid &&
3230 ++ PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
3231 ++ pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
3232 ++ pdev->dma_alias_devfn = ivrs_alias & 0xff;
3233 ++ pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
3234 ++ PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
3235 ++ dev_name(dev));
3236 ++ }
3237 ++
3238 ++ return ivrs_alias;
3239 ++}
3240 ++
3241 + static struct iommu_dev_data *find_dev_data(u16 devid)
3242 + {
3243 + struct iommu_dev_data *dev_data;
3244 +@@ -215,13 +285,6 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
3245 + return dev_data;
3246 + }
3247 +
3248 +-static inline u16 get_device_id(struct device *dev)
3249 +-{
3250 +- struct pci_dev *pdev = to_pci_dev(dev);
3251 +-
3252 +- return PCI_DEVID(pdev->bus->number, pdev->devfn);
3253 +-}
3254 +-
3255 + static struct iommu_dev_data *get_dev_data(struct device *dev)
3256 + {
3257 + return dev->archdata.iommu;
3258 +@@ -349,6 +412,8 @@ static int iommu_init_device(struct device *dev)
3259 + if (!dev_data)
3260 + return -ENOMEM;
3261 +
3262 ++ dev_data->alias = get_alias(dev);
3263 ++
3264 + if (pci_iommuv2_capable(pdev)) {
3265 + struct amd_iommu *iommu;
3266 +
3267 +@@ -369,7 +434,7 @@ static void iommu_ignore_device(struct device *dev)
3268 + u16 devid, alias;
3269 +
3270 + devid = get_device_id(dev);
3271 +- alias = amd_iommu_alias_table[devid];
3272 ++ alias = get_alias(dev);
3273 +
3274 + memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
3275 + memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
3276 +@@ -1061,7 +1126,7 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
3277 + int ret;
3278 +
3279 + iommu = amd_iommu_rlookup_table[dev_data->devid];
3280 +- alias = amd_iommu_alias_table[dev_data->devid];
3281 ++ alias = dev_data->alias;
3282 +
3283 + ret = iommu_flush_dte(iommu, dev_data->devid);
3284 + if (!ret && alias != dev_data->devid)
3285 +@@ -2039,7 +2104,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
3286 + bool ats;
3287 +
3288 + iommu = amd_iommu_rlookup_table[dev_data->devid];
3289 +- alias = amd_iommu_alias_table[dev_data->devid];
3290 ++ alias = dev_data->alias;
3291 + ats = dev_data->ats.enabled;
3292 +
3293 + /* Update data structures */
3294 +@@ -2073,7 +2138,7 @@ static void do_detach(struct iommu_dev_data *dev_data)
3295 + return;
3296 +
3297 + iommu = amd_iommu_rlookup_table[dev_data->devid];
3298 +- alias = amd_iommu_alias_table[dev_data->devid];
3299 ++ alias = dev_data->alias;
3300 +
3301 + /* decrease reference counters */
3302 + dev_data->domain->dev_iommu[iommu->index] -= 1;
3303 +diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
3304 +index 72d6182666cb..58f2fe687a24 100644
3305 +--- a/drivers/iommu/dma-iommu.c
3306 ++++ b/drivers/iommu/dma-iommu.c
3307 +@@ -403,7 +403,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
3308 + unsigned int s_length = sg_dma_len(s);
3309 + unsigned int s_dma_len = s->length;
3310 +
3311 +- s->offset = s_offset;
3312 ++ s->offset += s_offset;
3313 + s->length = s_length;
3314 + sg_dma_address(s) = dma_addr + s_offset;
3315 + dma_addr += s_dma_len;
3316 +@@ -422,7 +422,7 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
3317 +
3318 + for_each_sg(sg, s, nents, i) {
3319 + if (sg_dma_address(s) != DMA_ERROR_CODE)
3320 +- s->offset = sg_dma_address(s);
3321 ++ s->offset += sg_dma_address(s);
3322 + if (sg_dma_len(s))
3323 + s->length = sg_dma_len(s);
3324 + sg_dma_address(s) = DMA_ERROR_CODE;
3325 +diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
3326 +index efe50845939d..17304705f2cf 100644
3327 +--- a/drivers/irqchip/irq-mxs.c
3328 ++++ b/drivers/irqchip/irq-mxs.c
3329 +@@ -183,7 +183,7 @@ static void __iomem * __init icoll_init_iobase(struct device_node *np)
3330 + void __iomem *icoll_base;
3331 +
3332 + icoll_base = of_io_request_and_map(np, 0, np->name);
3333 +- if (!icoll_base)
3334 ++ if (IS_ERR(icoll_base))
3335 + panic("%s: unable to map resource", np->full_name);
3336 + return icoll_base;
3337 + }
3338 +diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
3339 +index 0820f67cc9a7..668730c5cb66 100644
3340 +--- a/drivers/irqchip/irq-sunxi-nmi.c
3341 ++++ b/drivers/irqchip/irq-sunxi-nmi.c
3342 +@@ -160,9 +160,9 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
3343 +
3344 + gc = irq_get_domain_generic_chip(domain, 0);
3345 + gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node));
3346 +- if (!gc->reg_base) {
3347 ++ if (IS_ERR(gc->reg_base)) {
3348 + pr_err("unable to map resource\n");
3349 +- ret = -ENOMEM;
3350 ++ ret = PTR_ERR(gc->reg_base);
3351 + goto fail_irqd_remove;
3352 + }
3353 +
3354 +diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
3355 +index 27f2ef300f8b..3970cda10080 100644
3356 +--- a/drivers/md/dm-cache-metadata.c
3357 ++++ b/drivers/md/dm-cache-metadata.c
3358 +@@ -867,39 +867,55 @@ static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
3359 + return 0;
3360 + }
3361 +
3362 +-#define WRITE_LOCK(cmd) \
3363 +- down_write(&cmd->root_lock); \
3364 +- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
3365 +- up_write(&cmd->root_lock); \
3366 +- return -EINVAL; \
3367 ++static bool cmd_write_lock(struct dm_cache_metadata *cmd)
3368 ++{
3369 ++ down_write(&cmd->root_lock);
3370 ++ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
3371 ++ up_write(&cmd->root_lock);
3372 ++ return false;
3373 + }
3374 ++ return true;
3375 ++}
3376 +
3377 +-#define WRITE_LOCK_VOID(cmd) \
3378 +- down_write(&cmd->root_lock); \
3379 +- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
3380 +- up_write(&cmd->root_lock); \
3381 +- return; \
3382 +- }
3383 ++#define WRITE_LOCK(cmd) \
3384 ++ do { \
3385 ++ if (!cmd_write_lock((cmd))) \
3386 ++ return -EINVAL; \
3387 ++ } while(0)
3388 ++
3389 ++#define WRITE_LOCK_VOID(cmd) \
3390 ++ do { \
3391 ++ if (!cmd_write_lock((cmd))) \
3392 ++ return; \
3393 ++ } while(0)
3394 +
3395 + #define WRITE_UNLOCK(cmd) \
3396 +- up_write(&cmd->root_lock)
3397 ++ up_write(&(cmd)->root_lock)
3398 +
3399 +-#define READ_LOCK(cmd) \
3400 +- down_read(&cmd->root_lock); \
3401 +- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
3402 +- up_read(&cmd->root_lock); \
3403 +- return -EINVAL; \
3404 ++static bool cmd_read_lock(struct dm_cache_metadata *cmd)
3405 ++{
3406 ++ down_read(&cmd->root_lock);
3407 ++ if (cmd->fail_io) {
3408 ++ up_read(&cmd->root_lock);
3409 ++ return false;
3410 + }
3411 ++ return true;
3412 ++}
3413 +
3414 +-#define READ_LOCK_VOID(cmd) \
3415 +- down_read(&cmd->root_lock); \
3416 +- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
3417 +- up_read(&cmd->root_lock); \
3418 +- return; \
3419 +- }
3420 ++#define READ_LOCK(cmd) \
3421 ++ do { \
3422 ++ if (!cmd_read_lock((cmd))) \
3423 ++ return -EINVAL; \
3424 ++ } while(0)
3425 ++
3426 ++#define READ_LOCK_VOID(cmd) \
3427 ++ do { \
3428 ++ if (!cmd_read_lock((cmd))) \
3429 ++ return; \
3430 ++ } while(0)
3431 +
3432 + #define READ_UNLOCK(cmd) \
3433 +- up_read(&cmd->root_lock)
3434 ++ up_read(&(cmd)->root_lock)
3435 +
3436 + int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
3437 + {
3438 +diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
3439 +index de9ff3bb8edd..6996ab8db108 100644
3440 +--- a/drivers/media/usb/usbvision/usbvision-video.c
3441 ++++ b/drivers/media/usb/usbvision/usbvision-video.c
3442 +@@ -1461,13 +1461,6 @@ static int usbvision_probe(struct usb_interface *intf,
3443 + printk(KERN_INFO "%s: %s found\n", __func__,
3444 + usbvision_device_data[model].model_string);
3445 +
3446 +- /*
3447 +- * this is a security check.
3448 +- * an exploit using an incorrect bInterfaceNumber is known
3449 +- */
3450 +- if (ifnum >= USB_MAXINTERFACES || !dev->actconfig->interface[ifnum])
3451 +- return -ENODEV;
3452 +-
3453 + if (usbvision_device_data[model].interface >= 0)
3454 + interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0];
3455 + else if (ifnum < dev->actconfig->desc.bNumInterfaces)
3456 +diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
3457 +index ff8953ae52d1..d7d7c52a3060 100644
3458 +--- a/drivers/media/v4l2-core/videobuf2-core.c
3459 ++++ b/drivers/media/v4l2-core/videobuf2-core.c
3460 +@@ -1643,7 +1643,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
3461 + * Will sleep if required for nonblocking == false.
3462 + */
3463 + static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
3464 +- int nonblocking)
3465 ++ void *pb, int nonblocking)
3466 + {
3467 + unsigned long flags;
3468 + int ret;
3469 +@@ -1664,10 +1664,10 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
3470 + /*
3471 + * Only remove the buffer from done_list if v4l2_buffer can handle all
3472 + * the planes.
3473 +- * Verifying planes is NOT necessary since it already has been checked
3474 +- * before the buffer is queued/prepared. So it can never fail.
3475 + */
3476 +- list_del(&(*vb)->done_entry);
3477 ++ ret = call_bufop(q, verify_planes_array, *vb, pb);
3478 ++ if (!ret)
3479 ++ list_del(&(*vb)->done_entry);
3480 + spin_unlock_irqrestore(&q->done_lock, flags);
3481 +
3482 + return ret;
3483 +@@ -1746,7 +1746,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
3484 + struct vb2_buffer *vb = NULL;
3485 + int ret;
3486 +
3487 +- ret = __vb2_get_done_vb(q, &vb, nonblocking);
3488 ++ ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
3489 + if (ret < 0)
3490 + return ret;
3491 +
3492 +@@ -2293,6 +2293,16 @@ unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
3493 + return POLLERR;
3494 +
3495 + /*
3496 ++ * If this quirk is set and QBUF hasn't been called yet then
3497 ++ * return POLLERR as well. This only affects capture queues, output
3498 ++ * queues will always initialize waiting_for_buffers to false.
3499 ++ * This quirk is set by V4L2 for backwards compatibility reasons.
3500 ++ */
3501 ++ if (q->quirk_poll_must_check_waiting_for_buffers &&
3502 ++ q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
3503 ++ return POLLERR;
3504 ++
3505 ++ /*
3506 + * For output streams you can call write() as long as there are fewer
3507 + * buffers queued than there are buffers available.
3508 + */
3509 +diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
3510 +index dbec5923fcf0..3c3b517f1d1c 100644
3511 +--- a/drivers/media/v4l2-core/videobuf2-memops.c
3512 ++++ b/drivers/media/v4l2-core/videobuf2-memops.c
3513 +@@ -49,7 +49,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
3514 + vec = frame_vector_create(nr);
3515 + if (!vec)
3516 + return ERR_PTR(-ENOMEM);
3517 +- ret = get_vaddr_frames(start, nr, write, 1, vec);
3518 ++ ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
3519 + if (ret < 0)
3520 + goto out_destroy;
3521 + /* We accept only complete set of PFNs */
3522 +diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
3523 +index 91f552124050..7f366f1b0377 100644
3524 +--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
3525 ++++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
3526 +@@ -74,6 +74,11 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
3527 + return 0;
3528 + }
3529 +
3530 ++static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
3531 ++{
3532 ++ return __verify_planes_array(vb, pb);
3533 ++}
3534 ++
3535 + /**
3536 + * __verify_length() - Verify that the bytesused value for each plane fits in
3537 + * the plane length and that the data offset doesn't exceed the bytesused value.
3538 +@@ -437,6 +442,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
3539 + }
3540 +
3541 + static const struct vb2_buf_ops v4l2_buf_ops = {
3542 ++ .verify_planes_array = __verify_planes_array_core,
3543 + .fill_user_buffer = __fill_v4l2_buffer,
3544 + .fill_vb2_buffer = __fill_vb2_buffer,
3545 + .copy_timestamp = __copy_timestamp,
3546 +@@ -765,6 +771,12 @@ int vb2_queue_init(struct vb2_queue *q)
3547 + q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
3548 + q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
3549 + == V4L2_BUF_FLAG_TIMESTAMP_COPY;
3550 ++ /*
3551 ++ * For compatibility with vb1: if QBUF hasn't been called yet, then
3552 ++ * return POLLERR as well. This only affects capture queues, output
3553 ++ * queues will always initialize waiting_for_buffers to false.
3554 ++ */
3555 ++ q->quirk_poll_must_check_waiting_for_buffers = true;
3556 +
3557 + return vb2_core_queue_init(q);
3558 + }
3559 +@@ -818,14 +830,6 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
3560 + poll_wait(file, &fh->wait, wait);
3561 + }
3562 +
3563 +- /*
3564 +- * For compatibility with vb1: if QBUF hasn't been called yet, then
3565 +- * return POLLERR as well. This only affects capture queues, output
3566 +- * queues will always initialize waiting_for_buffers to false.
3567 +- */
3568 +- if (q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
3569 +- return POLLERR;
3570 +-
3571 + return res | vb2_core_poll(q, file, wait);
3572 + }
3573 + EXPORT_SYMBOL_GPL(vb2_poll);
3574 +diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
3575 +index 054fc10cb3b6..b22c03264270 100644
3576 +--- a/drivers/misc/Kconfig
3577 ++++ b/drivers/misc/Kconfig
3578 +@@ -440,7 +440,7 @@ config ARM_CHARLCD
3579 + still useful.
3580 +
3581 + config BMP085
3582 +- bool
3583 ++ tristate
3584 + depends on SYSFS
3585 +
3586 + config BMP085_I2C
3587 +diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
3588 +index 15e88078ba1e..f1a0b99f5a9a 100644
3589 +--- a/drivers/misc/ad525x_dpot.c
3590 ++++ b/drivers/misc/ad525x_dpot.c
3591 +@@ -216,7 +216,7 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
3592 + */
3593 + value = swab16(value);
3594 +
3595 +- if (dpot->uid == DPOT_UID(AD5271_ID))
3596 ++ if (dpot->uid == DPOT_UID(AD5274_ID))
3597 + value = value >> 2;
3598 + return value;
3599 + default:
3600 +diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
3601 +index 09a406058c46..efbb6945eb18 100644
3602 +--- a/drivers/misc/cxl/irq.c
3603 ++++ b/drivers/misc/cxl/irq.c
3604 +@@ -288,7 +288,6 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
3605 + void cxl_unmap_irq(unsigned int virq, void *cookie)
3606 + {
3607 + free_irq(virq, cookie);
3608 +- irq_dispose_mapping(virq);
3609 + }
3610 +
3611 + static int cxl_register_one_irq(struct cxl *adapter,
3612 +diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
3613 +index 8310b4dbff06..6a451bd65bf3 100644
3614 +--- a/drivers/misc/mic/scif/scif_rma.c
3615 ++++ b/drivers/misc/mic/scif/scif_rma.c
3616 +@@ -1511,7 +1511,7 @@ off_t scif_register_pinned_pages(scif_epd_t epd,
3617 + if ((map_flags & SCIF_MAP_FIXED) &&
3618 + ((ALIGN(offset, PAGE_SIZE) != offset) ||
3619 + (offset < 0) ||
3620 +- (offset + (off_t)len < offset)))
3621 ++ (len > LONG_MAX - offset)))
3622 + return -EINVAL;
3623 +
3624 + might_sleep();
3625 +@@ -1614,7 +1614,7 @@ off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset,
3626 + if ((map_flags & SCIF_MAP_FIXED) &&
3627 + ((ALIGN(offset, PAGE_SIZE) != offset) ||
3628 + (offset < 0) ||
3629 +- (offset + (off_t)len < offset)))
3630 ++ (len > LONG_MAX - offset)))
3631 + return -EINVAL;
3632 +
3633 + /* Unsupported protection requested */
3634 +@@ -1732,7 +1732,8 @@ scif_unregister(scif_epd_t epd, off_t offset, size_t len)
3635 +
3636 + /* Offset is not page aligned or offset+len wraps around */
3637 + if ((ALIGN(offset, PAGE_SIZE) != offset) ||
3638 +- (offset + (off_t)len < offset))
3639 ++ (offset < 0) ||
3640 ++ (len > LONG_MAX - offset))
3641 + return -EINVAL;
3642 +
3643 + err = scif_verify_epd(ep);
3644 +diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
3645 +index 5fbffdb6b854..c6f36f3ca5d2 100644
3646 +--- a/drivers/mmc/card/block.c
3647 ++++ b/drivers/mmc/card/block.c
3648 +@@ -86,7 +86,6 @@ static int max_devices;
3649 +
3650 + /* TODO: Replace these with struct ida */
3651 + static DECLARE_BITMAP(dev_use, MAX_DEVICES);
3652 +-static DECLARE_BITMAP(name_use, MAX_DEVICES);
3653 +
3654 + /*
3655 + * There is one mmc_blk_data per slot.
3656 +@@ -105,7 +104,6 @@ struct mmc_blk_data {
3657 + unsigned int usage;
3658 + unsigned int read_only;
3659 + unsigned int part_type;
3660 +- unsigned int name_idx;
3661 + unsigned int reset_done;
3662 + #define MMC_BLK_READ BIT(0)
3663 + #define MMC_BLK_WRITE BIT(1)
3664 +@@ -2202,19 +2200,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
3665 + goto out;
3666 + }
3667 +
3668 +- /*
3669 +- * !subname implies we are creating main mmc_blk_data that will be
3670 +- * associated with mmc_card with dev_set_drvdata. Due to device
3671 +- * partitions, devidx will not coincide with a per-physical card
3672 +- * index anymore so we keep track of a name index.
3673 +- */
3674 +- if (!subname) {
3675 +- md->name_idx = find_first_zero_bit(name_use, max_devices);
3676 +- __set_bit(md->name_idx, name_use);
3677 +- } else
3678 +- md->name_idx = ((struct mmc_blk_data *)
3679 +- dev_to_disk(parent)->private_data)->name_idx;
3680 +-
3681 + md->area_type = area_type;
3682 +
3683 + /*
3684 +@@ -2264,7 +2249,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
3685 + */
3686 +
3687 + snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
3688 +- "mmcblk%u%s", md->name_idx, subname ? subname : "");
3689 ++ "mmcblk%u%s", card->host->index, subname ? subname : "");
3690 +
3691 + if (mmc_card_mmc(card))
3692 + blk_queue_logical_block_size(md->queue.queue,
3693 +@@ -2418,7 +2403,6 @@ static void mmc_blk_remove_parts(struct mmc_card *card,
3694 + struct list_head *pos, *q;
3695 + struct mmc_blk_data *part_md;
3696 +
3697 +- __clear_bit(md->name_idx, name_use);
3698 + list_for_each_safe(pos, q, &md->part) {
3699 + part_md = list_entry(pos, struct mmc_blk_data, part);
3700 + list_del(pos);
3701 +diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
3702 +index 1526b8a10b09..3b944fc70eec 100644
3703 +--- a/drivers/mmc/host/Kconfig
3704 ++++ b/drivers/mmc/host/Kconfig
3705 +@@ -97,6 +97,7 @@ config MMC_RICOH_MMC
3706 + config MMC_SDHCI_ACPI
3707 + tristate "SDHCI support for ACPI enumerated SDHCI controllers"
3708 + depends on MMC_SDHCI && ACPI
3709 ++ select IOSF_MBI if X86
3710 + help
3711 + This selects support for ACPI enumerated SDHCI controllers,
3712 + identified by ACPI Compatibility ID PNP0D40 or specific
3713 +diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
3714 +index a5cda926d38e..975139f97498 100644
3715 +--- a/drivers/mmc/host/sdhci-acpi.c
3716 ++++ b/drivers/mmc/host/sdhci-acpi.c
3717 +@@ -41,6 +41,11 @@
3718 + #include <linux/mmc/pm.h>
3719 + #include <linux/mmc/slot-gpio.h>
3720 +
3721 ++#ifdef CONFIG_X86
3722 ++#include <asm/cpu_device_id.h>
3723 ++#include <asm/iosf_mbi.h>
3724 ++#endif
3725 ++
3726 + #include "sdhci.h"
3727 +
3728 + enum {
3729 +@@ -146,6 +151,75 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
3730 + .ops = &sdhci_acpi_ops_int,
3731 + };
3732 +
3733 ++#ifdef CONFIG_X86
3734 ++
3735 ++static bool sdhci_acpi_byt(void)
3736 ++{
3737 ++ static const struct x86_cpu_id byt[] = {
3738 ++ { X86_VENDOR_INTEL, 6, 0x37 },
3739 ++ {}
3740 ++ };
3741 ++
3742 ++ return x86_match_cpu(byt);
3743 ++}
3744 ++
3745 ++#define BYT_IOSF_SCCEP 0x63
3746 ++#define BYT_IOSF_OCP_NETCTRL0 0x1078
3747 ++#define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8)
3748 ++
3749 ++static void sdhci_acpi_byt_setting(struct device *dev)
3750 ++{
3751 ++ u32 val = 0;
3752 ++
3753 ++ if (!sdhci_acpi_byt())
3754 ++ return;
3755 ++
3756 ++ if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0,
3757 ++ &val)) {
3758 ++ dev_err(dev, "%s read error\n", __func__);
3759 ++ return;
3760 ++ }
3761 ++
3762 ++ if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE))
3763 ++ return;
3764 ++
3765 ++ val &= ~BYT_IOSF_OCP_TIMEOUT_BASE;
3766 ++
3767 ++ if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0,
3768 ++ val)) {
3769 ++ dev_err(dev, "%s write error\n", __func__);
3770 ++ return;
3771 ++ }
3772 ++
3773 ++ dev_dbg(dev, "%s completed\n", __func__);
3774 ++}
3775 ++
3776 ++static bool sdhci_acpi_byt_defer(struct device *dev)
3777 ++{
3778 ++ if (!sdhci_acpi_byt())
3779 ++ return false;
3780 ++
3781 ++ if (!iosf_mbi_available())
3782 ++ return true;
3783 ++
3784 ++ sdhci_acpi_byt_setting(dev);
3785 ++
3786 ++ return false;
3787 ++}
3788 ++
3789 ++#else
3790 ++
3791 ++static inline void sdhci_acpi_byt_setting(struct device *dev)
3792 ++{
3793 ++}
3794 ++
3795 ++static inline bool sdhci_acpi_byt_defer(struct device *dev)
3796 ++{
3797 ++ return false;
3798 ++}
3799 ++
3800 ++#endif
3801 ++
3802 + static int bxt_get_cd(struct mmc_host *mmc)
3803 + {
3804 + int gpio_cd = mmc_gpio_get_cd(mmc);
3805 +@@ -337,6 +411,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
3806 + if (acpi_bus_get_status(device) || !device->status.present)
3807 + return -ENODEV;
3808 +
3809 ++ if (sdhci_acpi_byt_defer(dev))
3810 ++ return -EPROBE_DEFER;
3811 ++
3812 + hid = acpi_device_hid(device);
3813 + uid = device->pnp.unique_id;
3814 +
3815 +@@ -460,6 +537,8 @@ static int sdhci_acpi_resume(struct device *dev)
3816 + {
3817 + struct sdhci_acpi_host *c = dev_get_drvdata(dev);
3818 +
3819 ++ sdhci_acpi_byt_setting(&c->pdev->dev);
3820 ++
3821 + return sdhci_resume_host(c->host);
3822 + }
3823 +
3824 +@@ -483,6 +562,8 @@ static int sdhci_acpi_runtime_resume(struct device *dev)
3825 + {
3826 + struct sdhci_acpi_host *c = dev_get_drvdata(dev);
3827 +
3828 ++ sdhci_acpi_byt_setting(&c->pdev->dev);
3829 ++
3830 + return sdhci_runtime_resume_host(c->host);
3831 + }
3832 +
3833 +diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
3834 +index 844fc07d22cd..f7009c1cb90c 100644
3835 +--- a/drivers/mtd/nand/brcmnand/brcmnand.c
3836 ++++ b/drivers/mtd/nand/brcmnand/brcmnand.c
3837 +@@ -311,6 +311,36 @@ static const u16 brcmnand_regs_v60[] = {
3838 + [BRCMNAND_FC_BASE] = 0x400,
3839 + };
3840 +
3841 ++/* BRCMNAND v7.1 */
3842 ++static const u16 brcmnand_regs_v71[] = {
3843 ++ [BRCMNAND_CMD_START] = 0x04,
3844 ++ [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
3845 ++ [BRCMNAND_CMD_ADDRESS] = 0x0c,
3846 ++ [BRCMNAND_INTFC_STATUS] = 0x14,
3847 ++ [BRCMNAND_CS_SELECT] = 0x18,
3848 ++ [BRCMNAND_CS_XOR] = 0x1c,
3849 ++ [BRCMNAND_LL_OP] = 0x20,
3850 ++ [BRCMNAND_CS0_BASE] = 0x50,
3851 ++ [BRCMNAND_CS1_BASE] = 0,
3852 ++ [BRCMNAND_CORR_THRESHOLD] = 0xdc,
3853 ++ [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
3854 ++ [BRCMNAND_UNCORR_COUNT] = 0xfc,
3855 ++ [BRCMNAND_CORR_COUNT] = 0x100,
3856 ++ [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
3857 ++ [BRCMNAND_CORR_ADDR] = 0x110,
3858 ++ [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
3859 ++ [BRCMNAND_UNCORR_ADDR] = 0x118,
3860 ++ [BRCMNAND_SEMAPHORE] = 0x150,
3861 ++ [BRCMNAND_ID] = 0x194,
3862 ++ [BRCMNAND_ID_EXT] = 0x198,
3863 ++ [BRCMNAND_LL_RDATA] = 0x19c,
3864 ++ [BRCMNAND_OOB_READ_BASE] = 0x200,
3865 ++ [BRCMNAND_OOB_READ_10_BASE] = 0,
3866 ++ [BRCMNAND_OOB_WRITE_BASE] = 0x280,
3867 ++ [BRCMNAND_OOB_WRITE_10_BASE] = 0,
3868 ++ [BRCMNAND_FC_BASE] = 0x400,
3869 ++};
3870 ++
3871 + enum brcmnand_cs_reg {
3872 + BRCMNAND_CS_CFG_EXT = 0,
3873 + BRCMNAND_CS_CFG,
3874 +@@ -406,7 +436,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
3875 + }
3876 +
3877 + /* Register offsets */
3878 +- if (ctrl->nand_version >= 0x0600)
3879 ++ if (ctrl->nand_version >= 0x0701)
3880 ++ ctrl->reg_offsets = brcmnand_regs_v71;
3881 ++ else if (ctrl->nand_version >= 0x0600)
3882 + ctrl->reg_offsets = brcmnand_regs_v60;
3883 + else if (ctrl->nand_version >= 0x0500)
3884 + ctrl->reg_offsets = brcmnand_regs_v50;
3885 +diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
3886 +index f2c8ff398d6c..171d146645ba 100644
3887 +--- a/drivers/mtd/nand/nand_base.c
3888 ++++ b/drivers/mtd/nand/nand_base.c
3889 +@@ -4021,7 +4021,6 @@ static int nand_dt_init(struct nand_chip *chip)
3890 + * This is the first phase of the normal nand_scan() function. It reads the
3891 + * flash ID and sets up MTD fields accordingly.
3892 + *
3893 +- * The mtd->owner field must be set to the module of the caller.
3894 + */
3895 + int nand_scan_ident(struct mtd_info *mtd, int maxchips,
3896 + struct nand_flash_dev *table)
3897 +@@ -4443,19 +4442,12 @@ EXPORT_SYMBOL(nand_scan_tail);
3898 + *
3899 + * This fills out all the uninitialized function pointers with the defaults.
3900 + * The flash ID is read and the mtd/chip structures are filled with the
3901 +- * appropriate values. The mtd->owner field must be set to the module of the
3902 +- * caller.
3903 ++ * appropriate values.
3904 + */
3905 + int nand_scan(struct mtd_info *mtd, int maxchips)
3906 + {
3907 + int ret;
3908 +
3909 +- /* Many callers got this wrong, so check for it for a while... */
3910 +- if (!mtd->owner && caller_is_module()) {
3911 +- pr_crit("%s called with NULL mtd->owner!\n", __func__);
3912 +- BUG();
3913 +- }
3914 +-
3915 + ret = nand_scan_ident(mtd, maxchips, NULL);
3916 + if (!ret)
3917 + ret = nand_scan_tail(mtd);
3918 +diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
3919 +index 86fc245dc71a..fd78644469fa 100644
3920 +--- a/drivers/mtd/nand/pxa3xx_nand.c
3921 ++++ b/drivers/mtd/nand/pxa3xx_nand.c
3922 +@@ -1738,7 +1738,7 @@ static int alloc_nand_resource(struct platform_device *pdev)
3923 + if (ret < 0)
3924 + return ret;
3925 +
3926 +- if (use_dma) {
3927 ++ if (!np && use_dma) {
3928 + r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
3929 + if (r == NULL) {
3930 + dev_err(&pdev->dev,
3931 +diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
3932 +index ed0c19c558b5..3028c06547c1 100644
3933 +--- a/drivers/mtd/spi-nor/spi-nor.c
3934 ++++ b/drivers/mtd/spi-nor/spi-nor.c
3935 +@@ -1100,45 +1100,6 @@ static int spansion_quad_enable(struct spi_nor *nor)
3936 + return 0;
3937 + }
3938 +
3939 +-static int micron_quad_enable(struct spi_nor *nor)
3940 +-{
3941 +- int ret;
3942 +- u8 val;
3943 +-
3944 +- ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
3945 +- if (ret < 0) {
3946 +- dev_err(nor->dev, "error %d reading EVCR\n", ret);
3947 +- return ret;
3948 +- }
3949 +-
3950 +- write_enable(nor);
3951 +-
3952 +- /* set EVCR, enable quad I/O */
3953 +- nor->cmd_buf[0] = val & ~EVCR_QUAD_EN_MICRON;
3954 +- ret = nor->write_reg(nor, SPINOR_OP_WD_EVCR, nor->cmd_buf, 1);
3955 +- if (ret < 0) {
3956 +- dev_err(nor->dev, "error while writing EVCR register\n");
3957 +- return ret;
3958 +- }
3959 +-
3960 +- ret = spi_nor_wait_till_ready(nor);
3961 +- if (ret)
3962 +- return ret;
3963 +-
3964 +- /* read EVCR and check it */
3965 +- ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
3966 +- if (ret < 0) {
3967 +- dev_err(nor->dev, "error %d reading EVCR\n", ret);
3968 +- return ret;
3969 +- }
3970 +- if (val & EVCR_QUAD_EN_MICRON) {
3971 +- dev_err(nor->dev, "Micron EVCR Quad bit not clear\n");
3972 +- return -EINVAL;
3973 +- }
3974 +-
3975 +- return 0;
3976 +-}
3977 +-
3978 + static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
3979 + {
3980 + int status;
3981 +@@ -1152,12 +1113,7 @@ static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
3982 + }
3983 + return status;
3984 + case SNOR_MFR_MICRON:
3985 +- status = micron_quad_enable(nor);
3986 +- if (status) {
3987 +- dev_err(nor->dev, "Micron quad-read not enabled\n");
3988 +- return -EINVAL;
3989 +- }
3990 +- return status;
3991 ++ return 0;
3992 + default:
3993 + status = spansion_quad_enable(nor);
3994 + if (status) {
3995 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
3996 +index d70a1716f3e0..1486f33a743e 100644
3997 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
3998 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
3999 +@@ -1143,6 +1143,8 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
4000 + /* the fw is stopped, the aux sta is dead: clean up driver state */
4001 + iwl_mvm_del_aux_sta(mvm);
4002 +
4003 ++ iwl_free_fw_paging(mvm);
4004 ++
4005 + /*
4006 + * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
4007 + * won't be called in this case).
4008 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
4009 +index e80be9a59520..89ea70deeb84 100644
4010 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
4011 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
4012 +@@ -684,8 +684,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
4013 + for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
4014 + kfree(mvm->nvm_sections[i].data);
4015 +
4016 +- iwl_free_fw_paging(mvm);
4017 +-
4018 + iwl_mvm_tof_clean(mvm);
4019 +
4020 + ieee80211_free_hw(mvm->hw);
4021 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
4022 +index 5a854c609477..1198caac35c8 100644
4023 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
4024 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
4025 +@@ -731,8 +731,8 @@ static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
4026 + */
4027 + val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
4028 + if (val & (BIT(1) | BIT(17))) {
4029 +- IWL_INFO(trans,
4030 +- "can't access the RSA semaphore it is write protected\n");
4031 ++ IWL_DEBUG_INFO(trans,
4032 ++ "can't access the RSA semaphore it is write protected\n");
4033 + return 0;
4034 + }
4035 +
4036 +diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
4037 +index ff3ee9dfbbd5..23bae87d4d3d 100644
4038 +--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
4039 ++++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
4040 +@@ -607,11 +607,13 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
4041 +
4042 + case EVENT_PS_AWAKE:
4043 + mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n");
4044 +- if (!adapter->pps_uapsd_mode && priv->port_open &&
4045 ++ if (!adapter->pps_uapsd_mode &&
4046 ++ (priv->port_open ||
4047 ++ (priv->bss_mode == NL80211_IFTYPE_ADHOC)) &&
4048 + priv->media_connected && adapter->sleep_period.period) {
4049 +- adapter->pps_uapsd_mode = true;
4050 +- mwifiex_dbg(adapter, EVENT,
4051 +- "event: PPS/UAPSD mode activated\n");
4052 ++ adapter->pps_uapsd_mode = true;
4053 ++ mwifiex_dbg(adapter, EVENT,
4054 ++ "event: PPS/UAPSD mode activated\n");
4055 + }
4056 + adapter->tx_lock_flag = false;
4057 + if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) {
4058 +diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
4059 +index acccd6734e3b..499e5a741c62 100644
4060 +--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
4061 ++++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
4062 +@@ -475,7 +475,8 @@ mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
4063 + priv = adapter->priv[i];
4064 + if (!priv)
4065 + continue;
4066 +- if (!priv->port_open)
4067 ++ if (!priv->port_open &&
4068 ++ (priv->bss_mode != NL80211_IFTYPE_ADHOC))
4069 + continue;
4070 + if (adapter->if_ops.is_port_ready &&
4071 + !adapter->if_ops.is_port_ready(priv))
4072 +@@ -1099,7 +1100,8 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
4073 +
4074 + priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv;
4075 +
4076 +- if (!priv_tmp->port_open ||
4077 ++ if (((priv_tmp->bss_mode != NL80211_IFTYPE_ADHOC) &&
4078 ++ !priv_tmp->port_open) ||
4079 + (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0))
4080 + continue;
4081 +
4082 +diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
4083 +index 588803ad6847..6ccba0d862df 100644
4084 +--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
4085 ++++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
4086 +@@ -357,20 +357,6 @@ static int amd_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
4087 + return 0;
4088 + }
4089 +
4090 +-static int amd_ntb_peer_db_addr(struct ntb_dev *ntb,
4091 +- phys_addr_t *db_addr,
4092 +- resource_size_t *db_size)
4093 +-{
4094 +- struct amd_ntb_dev *ndev = ntb_ndev(ntb);
4095 +-
4096 +- if (db_addr)
4097 +- *db_addr = (phys_addr_t)(ndev->peer_mmio + AMD_DBREQ_OFFSET);
4098 +- if (db_size)
4099 +- *db_size = sizeof(u32);
4100 +-
4101 +- return 0;
4102 +-}
4103 +-
4104 + static int amd_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
4105 + {
4106 + struct amd_ntb_dev *ndev = ntb_ndev(ntb);
4107 +@@ -415,20 +401,6 @@ static int amd_ntb_spad_write(struct ntb_dev *ntb,
4108 + return 0;
4109 + }
4110 +
4111 +-static int amd_ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
4112 +- phys_addr_t *spad_addr)
4113 +-{
4114 +- struct amd_ntb_dev *ndev = ntb_ndev(ntb);
4115 +-
4116 +- if (idx < 0 || idx >= ndev->spad_count)
4117 +- return -EINVAL;
4118 +-
4119 +- if (spad_addr)
4120 +- *spad_addr = (phys_addr_t)(ndev->self_mmio + AMD_SPAD_OFFSET +
4121 +- ndev->peer_spad + (idx << 2));
4122 +- return 0;
4123 +-}
4124 +-
4125 + static u32 amd_ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
4126 + {
4127 + struct amd_ntb_dev *ndev = ntb_ndev(ntb);
4128 +@@ -472,12 +444,10 @@ static const struct ntb_dev_ops amd_ntb_ops = {
4129 + .db_clear = amd_ntb_db_clear,
4130 + .db_set_mask = amd_ntb_db_set_mask,
4131 + .db_clear_mask = amd_ntb_db_clear_mask,
4132 +- .peer_db_addr = amd_ntb_peer_db_addr,
4133 + .peer_db_set = amd_ntb_peer_db_set,
4134 + .spad_count = amd_ntb_spad_count,
4135 + .spad_read = amd_ntb_spad_read,
4136 + .spad_write = amd_ntb_spad_write,
4137 +- .peer_spad_addr = amd_ntb_peer_spad_addr,
4138 + .peer_spad_read = amd_ntb_peer_spad_read,
4139 + .peer_spad_write = amd_ntb_peer_spad_write,
4140 + };
4141 +diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
4142 +index c8a37ba4b4f9..6bdc1e7b7503 100644
4143 +--- a/drivers/ntb/test/ntb_perf.c
4144 ++++ b/drivers/ntb/test/ntb_perf.c
4145 +@@ -178,7 +178,7 @@ static void perf_copy_callback(void *data)
4146 + atomic_dec(&pctx->dma_sync);
4147 + }
4148 +
4149 +-static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst,
4150 ++static ssize_t perf_copy(struct pthr_ctx *pctx, char __iomem *dst,
4151 + char *src, size_t size)
4152 + {
4153 + struct perf_ctx *perf = pctx->perf;
4154 +@@ -189,7 +189,8 @@ static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst,
4155 + dma_cookie_t cookie;
4156 + size_t src_off, dst_off;
4157 + struct perf_mw *mw = &perf->mw;
4158 +- u64 vbase, dst_vaddr;
4159 ++ void __iomem *vbase;
4160 ++ void __iomem *dst_vaddr;
4161 + dma_addr_t dst_phys;
4162 + int retries = 0;
4163 +
4164 +@@ -204,14 +205,14 @@ static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst,
4165 + }
4166 +
4167 + device = chan->device;
4168 +- src_off = (size_t)src & ~PAGE_MASK;
4169 +- dst_off = (size_t)dst & ~PAGE_MASK;
4170 ++ src_off = (uintptr_t)src & ~PAGE_MASK;
4171 ++ dst_off = (uintptr_t __force)dst & ~PAGE_MASK;
4172 +
4173 + if (!is_dma_copy_aligned(device, src_off, dst_off, size))
4174 + return -ENODEV;
4175 +
4176 +- vbase = (u64)(u64 *)mw->vbase;
4177 +- dst_vaddr = (u64)(u64 *)dst;
4178 ++ vbase = mw->vbase;
4179 ++ dst_vaddr = dst;
4180 + dst_phys = mw->phys_addr + (dst_vaddr - vbase);
4181 +
4182 + unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
4183 +@@ -261,13 +262,13 @@ err_get_unmap:
4184 + return 0;
4185 + }
4186 +
4187 +-static int perf_move_data(struct pthr_ctx *pctx, char *dst, char *src,
4188 ++static int perf_move_data(struct pthr_ctx *pctx, char __iomem *dst, char *src,
4189 + u64 buf_size, u64 win_size, u64 total)
4190 + {
4191 + int chunks, total_chunks, i;
4192 + int copied_chunks = 0;
4193 + u64 copied = 0, result;
4194 +- char *tmp = dst;
4195 ++ char __iomem *tmp = dst;
4196 + u64 perf, diff_us;
4197 + ktime_t kstart, kstop, kdiff;
4198 +
4199 +@@ -324,7 +325,7 @@ static int ntb_perf_thread(void *data)
4200 + struct perf_ctx *perf = pctx->perf;
4201 + struct pci_dev *pdev = perf->ntb->pdev;
4202 + struct perf_mw *mw = &perf->mw;
4203 +- char *dst;
4204 ++ char __iomem *dst;
4205 + u64 win_size, buf_size, total;
4206 + void *src;
4207 + int rc, node, i;
4208 +@@ -364,7 +365,7 @@ static int ntb_perf_thread(void *data)
4209 + if (buf_size > MAX_TEST_SIZE)
4210 + buf_size = MAX_TEST_SIZE;
4211 +
4212 +- dst = (char *)mw->vbase;
4213 ++ dst = (char __iomem *)mw->vbase;
4214 +
4215 + atomic_inc(&perf->tsync);
4216 + while (atomic_read(&perf->tsync) != perf->perf_threads)
4217 +diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
4218 +index fe600964fa50..88ccfeaa49c7 100644
4219 +--- a/drivers/pci/host/pci-imx6.c
4220 ++++ b/drivers/pci/host/pci-imx6.c
4221 +@@ -32,7 +32,7 @@
4222 + #define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
4223 +
4224 + struct imx6_pcie {
4225 +- struct gpio_desc *reset_gpio;
4226 ++ int reset_gpio;
4227 + struct clk *pcie_bus;
4228 + struct clk *pcie_phy;
4229 + struct clk *pcie;
4230 +@@ -287,10 +287,10 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
4231 + usleep_range(200, 500);
4232 +
4233 + /* Some boards don't have PCIe reset GPIO. */
4234 +- if (imx6_pcie->reset_gpio) {
4235 +- gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 0);
4236 ++ if (gpio_is_valid(imx6_pcie->reset_gpio)) {
4237 ++ gpio_set_value_cansleep(imx6_pcie->reset_gpio, 0);
4238 + msleep(100);
4239 +- gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 1);
4240 ++ gpio_set_value_cansleep(imx6_pcie->reset_gpio, 1);
4241 + }
4242 + return 0;
4243 +
4244 +@@ -561,6 +561,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
4245 + {
4246 + struct imx6_pcie *imx6_pcie;
4247 + struct pcie_port *pp;
4248 ++ struct device_node *np = pdev->dev.of_node;
4249 + struct resource *dbi_base;
4250 + int ret;
4251 +
4252 +@@ -581,8 +582,15 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
4253 + return PTR_ERR(pp->dbi_base);
4254 +
4255 + /* Fetch GPIOs */
4256 +- imx6_pcie->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
4257 +- GPIOD_OUT_LOW);
4258 ++ imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
4259 ++ if (gpio_is_valid(imx6_pcie->reset_gpio)) {
4260 ++ ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
4261 ++ GPIOF_OUT_INIT_LOW, "PCIe reset");
4262 ++ if (ret) {
4263 ++ dev_err(&pdev->dev, "unable to get reset gpio\n");
4264 ++ return ret;
4265 ++ }
4266 ++ }
4267 +
4268 + /* Fetch clocks */
4269 + imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
4270 +diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
4271 +index e96e86d2e745..3878d23ca7a8 100644
4272 +--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
4273 ++++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
4274 +@@ -949,7 +949,8 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
4275 + struct mtk_pinctrl *pctl = dev_get_drvdata(chip->parent);
4276 + int eint_num, virq, eint_offset;
4277 + unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc;
4278 +- static const unsigned int dbnc_arr[] = {0 , 1, 16, 32, 64, 128, 256};
4279 ++ static const unsigned int debounce_time[] = {500, 1000, 16000, 32000, 64000,
4280 ++ 128000, 256000};
4281 + const struct mtk_desc_pin *pin;
4282 + struct irq_data *d;
4283 +
4284 +@@ -967,9 +968,9 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
4285 + if (!mtk_eint_can_en_debounce(pctl, eint_num))
4286 + return -ENOSYS;
4287 +
4288 +- dbnc = ARRAY_SIZE(dbnc_arr);
4289 +- for (i = 0; i < ARRAY_SIZE(dbnc_arr); i++) {
4290 +- if (debounce <= dbnc_arr[i]) {
4291 ++ dbnc = ARRAY_SIZE(debounce_time);
4292 ++ for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
4293 ++ if (debounce <= debounce_time[i]) {
4294 + dbnc = i;
4295 + break;
4296 + }
4297 +diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
4298 +index d24e5f1d1525..bd2e657163b8 100644
4299 +--- a/drivers/pinctrl/pinctrl-single.c
4300 ++++ b/drivers/pinctrl/pinctrl-single.c
4301 +@@ -1273,9 +1273,9 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
4302 +
4303 + /* Parse pins in each row from LSB */
4304 + while (mask) {
4305 +- bit_pos = ffs(mask);
4306 ++ bit_pos = __ffs(mask);
4307 + pin_num_from_lsb = bit_pos / pcs->bits_per_pin;
4308 +- mask_pos = ((pcs->fmask) << (bit_pos - 1));
4309 ++ mask_pos = ((pcs->fmask) << bit_pos);
4310 + val_pos = val & mask_pos;
4311 + submask = mask & mask_pos;
4312 +
4313 +@@ -1844,7 +1844,7 @@ static int pcs_probe(struct platform_device *pdev)
4314 + ret = of_property_read_u32(np, "pinctrl-single,function-mask",
4315 + &pcs->fmask);
4316 + if (!ret) {
4317 +- pcs->fshift = ffs(pcs->fmask) - 1;
4318 ++ pcs->fshift = __ffs(pcs->fmask);
4319 + pcs->fmax = pcs->fmask >> pcs->fshift;
4320 + } else {
4321 + /* If mask property doesn't exist, function mux is invalid. */
4322 +diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
4323 +index 73833079bac8..d6baea6a7544 100644
4324 +--- a/drivers/platform/x86/toshiba_acpi.c
4325 ++++ b/drivers/platform/x86/toshiba_acpi.c
4326 +@@ -133,7 +133,7 @@ MODULE_LICENSE("GPL");
4327 + /* Field definitions */
4328 + #define HCI_ACCEL_MASK 0x7fff
4329 + #define HCI_HOTKEY_DISABLE 0x0b
4330 +-#define HCI_HOTKEY_ENABLE 0x01
4331 ++#define HCI_HOTKEY_ENABLE 0x09
4332 + #define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10
4333 + #define HCI_LCD_BRIGHTNESS_BITS 3
4334 + #define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS)
4335 +diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c
4336 +index 423ce087cd9c..5d5adee16886 100644
4337 +--- a/drivers/pwm/pwm-brcmstb.c
4338 ++++ b/drivers/pwm/pwm-brcmstb.c
4339 +@@ -274,8 +274,8 @@ static int brcmstb_pwm_probe(struct platform_device *pdev)
4340 +
4341 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4342 + p->base = devm_ioremap_resource(&pdev->dev, res);
4343 +- if (!p->base) {
4344 +- ret = -ENOMEM;
4345 ++ if (IS_ERR(p->base)) {
4346 ++ ret = PTR_ERR(p->base);
4347 + goto out_clk;
4348 + }
4349 +
4350 +diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
4351 +index 58f5d3b8e981..27343e1c43ef 100644
4352 +--- a/drivers/regulator/s5m8767.c
4353 ++++ b/drivers/regulator/s5m8767.c
4354 +@@ -202,9 +202,10 @@ static int s5m8767_get_register(struct s5m8767_info *s5m8767, int reg_id,
4355 + }
4356 + }
4357 +
4358 +- if (i < s5m8767->num_regulators)
4359 +- *enable_ctrl =
4360 +- s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
4361 ++ if (i >= s5m8767->num_regulators)
4362 ++ return -EINVAL;
4363 ++
4364 ++ *enable_ctrl = s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
4365 +
4366 + return 0;
4367 + }
4368 +@@ -937,8 +938,12 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
4369 + else
4370 + regulators[id].vsel_mask = 0xff;
4371 +
4372 +- s5m8767_get_register(s5m8767, id, &enable_reg,
4373 ++ ret = s5m8767_get_register(s5m8767, id, &enable_reg,
4374 + &enable_val);
4375 ++ if (ret) {
4376 ++ dev_err(s5m8767->dev, "error reading registers\n");
4377 ++ return ret;
4378 ++ }
4379 + regulators[id].enable_reg = enable_reg;
4380 + regulators[id].enable_mask = S5M8767_ENCTRL_MASK;
4381 + regulators[id].enable_val = enable_val;
4382 +diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
4383 +index 535050fc5e9f..08e0ff8c786a 100644
4384 +--- a/drivers/rtc/rtc-ds1685.c
4385 ++++ b/drivers/rtc/rtc-ds1685.c
4386 +@@ -187,9 +187,9 @@ ds1685_rtc_end_data_access(struct ds1685_priv *rtc)
4387 + * Only use this where you are certain another lock will not be held.
4388 + */
4389 + static inline void
4390 +-ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long flags)
4391 ++ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long *flags)
4392 + {
4393 +- spin_lock_irqsave(&rtc->lock, flags);
4394 ++ spin_lock_irqsave(&rtc->lock, *flags);
4395 + ds1685_rtc_switch_to_bank1(rtc);
4396 + }
4397 +
4398 +@@ -1300,7 +1300,7 @@ ds1685_rtc_sysfs_ctrl_regs_store(struct device *dev,
4399 + {
4400 + struct ds1685_priv *rtc = dev_get_drvdata(dev);
4401 + u8 reg = 0, bit = 0, tmp;
4402 +- unsigned long flags = 0;
4403 ++ unsigned long flags;
4404 + long int val = 0;
4405 + const struct ds1685_rtc_ctrl_regs *reg_info =
4406 + ds1685_rtc_sysfs_ctrl_regs_lookup(attr->attr.name);
4407 +@@ -1321,7 +1321,7 @@ ds1685_rtc_sysfs_ctrl_regs_store(struct device *dev,
4408 + bit = reg_info->bit;
4409 +
4410 + /* Safe to spinlock during a write. */
4411 +- ds1685_rtc_begin_ctrl_access(rtc, flags);
4412 ++ ds1685_rtc_begin_ctrl_access(rtc, &flags);
4413 + tmp = rtc->read(rtc, reg);
4414 + rtc->write(rtc, reg, (val ? (tmp | bit) : (tmp & ~(bit))));
4415 + ds1685_rtc_end_ctrl_access(rtc, flags);
4416 +diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
4417 +index 097325d96db5..b1b4746a0eab 100644
4418 +--- a/drivers/rtc/rtc-hym8563.c
4419 ++++ b/drivers/rtc/rtc-hym8563.c
4420 +@@ -144,7 +144,7 @@ static int hym8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
4421 + * it does not seem to carry it over a subsequent write/read.
4422 + * So we'll limit ourself to 100 years, starting at 2000 for now.
4423 + */
4424 +- buf[6] = tm->tm_year - 100;
4425 ++ buf[6] = bin2bcd(tm->tm_year - 100);
4426 +
4427 + /*
4428 + * CTL1 only contains TEST-mode bits apart from stop,
4429 +diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
4430 +index 7184a0eda793..725dccae24e7 100644
4431 +--- a/drivers/rtc/rtc-max77686.c
4432 ++++ b/drivers/rtc/rtc-max77686.c
4433 +@@ -465,7 +465,7 @@ static int max77686_rtc_probe(struct platform_device *pdev)
4434 +
4435 + info->virq = regmap_irq_get_virq(max77686->rtc_irq_data,
4436 + MAX77686_RTCIRQ_RTCA1);
4437 +- if (!info->virq) {
4438 ++ if (info->virq <= 0) {
4439 + ret = -ENXIO;
4440 + goto err_rtc;
4441 + }
4442 +diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
4443 +index bd911bafb809..17341feadad1 100644
4444 +--- a/drivers/rtc/rtc-rx8025.c
4445 ++++ b/drivers/rtc/rtc-rx8025.c
4446 +@@ -65,7 +65,6 @@
4447 +
4448 + static const struct i2c_device_id rx8025_id[] = {
4449 + { "rx8025", 0 },
4450 +- { "rv8803", 1 },
4451 + { }
4452 + };
4453 + MODULE_DEVICE_TABLE(i2c, rx8025_id);
4454 +diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
4455 +index f64c282275b3..e1b86bb01062 100644
4456 +--- a/drivers/rtc/rtc-vr41xx.c
4457 ++++ b/drivers/rtc/rtc-vr41xx.c
4458 +@@ -272,12 +272,13 @@ static irqreturn_t rtclong1_interrupt(int irq, void *dev_id)
4459 + }
4460 +
4461 + static const struct rtc_class_ops vr41xx_rtc_ops = {
4462 +- .release = vr41xx_rtc_release,
4463 +- .ioctl = vr41xx_rtc_ioctl,
4464 +- .read_time = vr41xx_rtc_read_time,
4465 +- .set_time = vr41xx_rtc_set_time,
4466 +- .read_alarm = vr41xx_rtc_read_alarm,
4467 +- .set_alarm = vr41xx_rtc_set_alarm,
4468 ++ .release = vr41xx_rtc_release,
4469 ++ .ioctl = vr41xx_rtc_ioctl,
4470 ++ .read_time = vr41xx_rtc_read_time,
4471 ++ .set_time = vr41xx_rtc_set_time,
4472 ++ .read_alarm = vr41xx_rtc_read_alarm,
4473 ++ .set_alarm = vr41xx_rtc_set_alarm,
4474 ++ .alarm_irq_enable = vr41xx_rtc_alarm_irq_enable,
4475 + };
4476 +
4477 + static int rtc_probe(struct platform_device *pdev)
4478 +diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
4479 +index e5647d59224f..0b331c9c0a8f 100644
4480 +--- a/drivers/scsi/device_handler/Kconfig
4481 ++++ b/drivers/scsi/device_handler/Kconfig
4482 +@@ -13,13 +13,13 @@ menuconfig SCSI_DH
4483 +
4484 + config SCSI_DH_RDAC
4485 + tristate "LSI RDAC Device Handler"
4486 +- depends on SCSI_DH
4487 ++ depends on SCSI_DH && SCSI
4488 + help
4489 + If you have a LSI RDAC select y. Otherwise, say N.
4490 +
4491 + config SCSI_DH_HP_SW
4492 + tristate "HP/COMPAQ MSA Device Handler"
4493 +- depends on SCSI_DH
4494 ++ depends on SCSI_DH && SCSI
4495 + help
4496 + If you have a HP/COMPAQ MSA device that requires START_STOP to
4497 + be sent to start it and cannot upgrade the firmware then select y.
4498 +@@ -27,13 +27,13 @@ config SCSI_DH_HP_SW
4499 +
4500 + config SCSI_DH_EMC
4501 + tristate "EMC CLARiiON Device Handler"
4502 +- depends on SCSI_DH
4503 ++ depends on SCSI_DH && SCSI
4504 + help
4505 + If you have a EMC CLARiiON select y. Otherwise, say N.
4506 +
4507 + config SCSI_DH_ALUA
4508 + tristate "SPC-3 ALUA Device Handler"
4509 +- depends on SCSI_DH
4510 ++ depends on SCSI_DH && SCSI
4511 + help
4512 + SCSI Device handler for generic SPC-3 Asymmetric Logical Unit
4513 + Access (ALUA).
4514 +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
4515 +index 97a1c1c33b05..00ce3e269a43 100644
4516 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c
4517 ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
4518 +@@ -6282,12 +6282,13 @@ out:
4519 + }
4520 +
4521 + for (i = 0; i < ioc->sge_count; i++) {
4522 +- if (kbuff_arr[i])
4523 ++ if (kbuff_arr[i]) {
4524 + dma_free_coherent(&instance->pdev->dev,
4525 + le32_to_cpu(kern_sge32[i].length),
4526 + kbuff_arr[i],
4527 + le32_to_cpu(kern_sge32[i].phys_addr));
4528 + kbuff_arr[i] = NULL;
4529 ++ }
4530 + }
4531 +
4532 + megasas_return_cmd(instance, cmd);
4533 +diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
4534 +index 7cb1b2d710c1..475fb44c1883 100644
4535 +--- a/drivers/spi/spi-rockchip.c
4536 ++++ b/drivers/spi/spi-rockchip.c
4537 +@@ -265,7 +265,10 @@ static inline u32 rx_max(struct rockchip_spi *rs)
4538 + static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
4539 + {
4540 + u32 ser;
4541 +- struct rockchip_spi *rs = spi_master_get_devdata(spi->master);
4542 ++ struct spi_master *master = spi->master;
4543 ++ struct rockchip_spi *rs = spi_master_get_devdata(master);
4544 ++
4545 ++ pm_runtime_get_sync(rs->dev);
4546 +
4547 + ser = readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & SER_MASK;
4548 +
4549 +@@ -290,6 +293,8 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
4550 + ser &= ~(1 << spi->chip_select);
4551 +
4552 + writel_relaxed(ser, rs->regs + ROCKCHIP_SPI_SER);
4553 ++
4554 ++ pm_runtime_put_sync(rs->dev);
4555 + }
4556 +
4557 + static int rockchip_spi_prepare_message(struct spi_master *master,
4558 +diff --git a/drivers/staging/rdma/hfi1/TODO b/drivers/staging/rdma/hfi1/TODO
4559 +index 05de0dad8762..4c6f1d7d2eaf 100644
4560 +--- a/drivers/staging/rdma/hfi1/TODO
4561 ++++ b/drivers/staging/rdma/hfi1/TODO
4562 +@@ -3,4 +3,4 @@ July, 2015
4563 + - Remove unneeded file entries in sysfs
4564 + - Remove software processing of IB protocol and place in library for use
4565 + by qib, ipath (if still present), hfi1, and eventually soft-roce
4566 +-
4567 ++- Replace incorrect uAPI
4568 +diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c
4569 +index d57d549052c8..29ae777556d2 100644
4570 +--- a/drivers/staging/rdma/hfi1/file_ops.c
4571 ++++ b/drivers/staging/rdma/hfi1/file_ops.c
4572 +@@ -52,6 +52,8 @@
4573 + #include <linux/vmalloc.h>
4574 + #include <linux/io.h>
4575 +
4576 ++#include <rdma/ib.h>
4577 ++
4578 + #include "hfi.h"
4579 + #include "pio.h"
4580 + #include "device.h"
4581 +@@ -194,6 +196,10 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
4582 + int uctxt_required = 1;
4583 + int must_be_root = 0;
4584 +
4585 ++ /* FIXME: This interface cannot continue out of staging */
4586 ++ if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
4587 ++ return -EACCES;
4588 ++
4589 + if (count < sizeof(cmd)) {
4590 + ret = -EINVAL;
4591 + goto bail;
4592 +diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
4593 +index b58e3fb9b311..433085a97626 100644
4594 +--- a/drivers/thermal/rockchip_thermal.c
4595 ++++ b/drivers/thermal/rockchip_thermal.c
4596 +@@ -693,15 +693,14 @@ static int rockchip_configure_from_dt(struct device *dev,
4597 + thermal->chip->tshut_temp);
4598 + thermal->tshut_temp = thermal->chip->tshut_temp;
4599 + } else {
4600 ++ if (shut_temp > INT_MAX) {
4601 ++ dev_err(dev, "Invalid tshut temperature specified: %d\n",
4602 ++ shut_temp);
4603 ++ return -ERANGE;
4604 ++ }
4605 + thermal->tshut_temp = shut_temp;
4606 + }
4607 +
4608 +- if (thermal->tshut_temp > INT_MAX) {
4609 +- dev_err(dev, "Invalid tshut temperature specified: %d\n",
4610 +- thermal->tshut_temp);
4611 +- return -ERANGE;
4612 +- }
4613 +-
4614 + if (of_property_read_u32(np, "rockchip,hw-tshut-mode", &tshut_mode)) {
4615 + dev_warn(dev,
4616 + "Missing tshut mode property, using default (%s)\n",
4617 +diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
4618 +index 9eb1cff28bd4..b8b580e5ae6e 100644
4619 +--- a/drivers/usb/core/hcd-pci.c
4620 ++++ b/drivers/usb/core/hcd-pci.c
4621 +@@ -74,6 +74,15 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd,
4622 + if (companion->bus != pdev->bus ||
4623 + PCI_SLOT(companion->devfn) != slot)
4624 + continue;
4625 ++
4626 ++ /*
4627 ++ * Companion device should be either UHCI,OHCI or EHCI host
4628 ++ * controller, otherwise skip.
4629 ++ */
4630 ++ if (companion->class != CL_UHCI && companion->class != CL_OHCI &&
4631 ++ companion->class != CL_EHCI)
4632 ++ continue;
4633 ++
4634 + companion_hcd = pci_get_drvdata(companion);
4635 + if (!companion_hcd || !companion_hcd->self.root_hub)
4636 + continue;
4637 +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
4638 +index cf43e9e18368..79d895c2dd71 100644
4639 +--- a/drivers/usb/gadget/function/f_fs.c
4640 ++++ b/drivers/usb/gadget/function/f_fs.c
4641 +@@ -646,6 +646,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
4642 + work);
4643 + int ret = io_data->req->status ? io_data->req->status :
4644 + io_data->req->actual;
4645 ++ bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
4646 +
4647 + if (io_data->read && ret > 0) {
4648 + use_mm(io_data->mm);
4649 +@@ -657,13 +658,11 @@ static void ffs_user_copy_worker(struct work_struct *work)
4650 +
4651 + io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
4652 +
4653 +- if (io_data->ffs->ffs_eventfd &&
4654 +- !(io_data->kiocb->ki_flags & IOCB_EVENTFD))
4655 ++ if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
4656 + eventfd_signal(io_data->ffs->ffs_eventfd, 1);
4657 +
4658 + usb_ep_free_request(io_data->ep, io_data->req);
4659 +
4660 +- io_data->kiocb->private = NULL;
4661 + if (io_data->read)
4662 + kfree(io_data->to_free);
4663 + kfree(io_data->buf);
4664 +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
4665 +index 5cd080e0a685..743d9a20e248 100644
4666 +--- a/drivers/usb/host/xhci-mem.c
4667 ++++ b/drivers/usb/host/xhci-mem.c
4668 +@@ -1873,6 +1873,12 @@ no_bw:
4669 + kfree(xhci->rh_bw);
4670 + kfree(xhci->ext_caps);
4671 +
4672 ++ xhci->usb2_ports = NULL;
4673 ++ xhci->usb3_ports = NULL;
4674 ++ xhci->port_array = NULL;
4675 ++ xhci->rh_bw = NULL;
4676 ++ xhci->ext_caps = NULL;
4677 ++
4678 + xhci->page_size = 0;
4679 + xhci->page_shift = 0;
4680 + xhci->bus_state[0].bus_suspended = 0;
4681 +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
4682 +index f0640b7a1c42..48672fac7ff3 100644
4683 +--- a/drivers/usb/host/xhci-pci.c
4684 ++++ b/drivers/usb/host/xhci-pci.c
4685 +@@ -48,6 +48,7 @@
4686 + #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
4687 + #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
4688 + #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
4689 ++#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
4690 +
4691 + static const char hcd_name[] = "xhci_hcd";
4692 +
4693 +@@ -155,7 +156,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
4694 + (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
4695 + pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
4696 + pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
4697 +- pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) {
4698 ++ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
4699 ++ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI)) {
4700 + xhci->quirks |= XHCI_PME_STUCK_QUIRK;
4701 + }
4702 + if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
4703 +@@ -302,6 +304,7 @@ static void xhci_pci_remove(struct pci_dev *dev)
4704 + struct xhci_hcd *xhci;
4705 +
4706 + xhci = hcd_to_xhci(pci_get_drvdata(dev));
4707 ++ xhci->xhc_state |= XHCI_STATE_REMOVING;
4708 + if (xhci->shared_hcd) {
4709 + usb_remove_hcd(xhci->shared_hcd);
4710 + usb_put_hcd(xhci->shared_hcd);
4711 +diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
4712 +index d39d6bf1d090..d4962208be30 100644
4713 +--- a/drivers/usb/host/xhci-plat.c
4714 ++++ b/drivers/usb/host/xhci-plat.c
4715 +@@ -39,12 +39,25 @@ static const struct xhci_driver_overrides xhci_plat_overrides __initconst = {
4716 +
4717 + static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
4718 + {
4719 ++ struct usb_hcd *hcd = xhci_to_hcd(xhci);
4720 ++
4721 + /*
4722 + * As of now platform drivers don't provide MSI support so we ensure
4723 + * here that the generic code does not try to make a pci_dev from our
4724 + * dev struct in order to setup MSI
4725 + */
4726 + xhci->quirks |= XHCI_PLAT;
4727 ++
4728 ++ /*
4729 ++ * On R-Car Gen2 and Gen3, the AC64 bit (bit 0) of HCCPARAMS1 is set
4730 ++ * to 1. However, these SoCs don't support 64-bit address memory
4731 ++ * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
4732 ++ * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
4733 ++ * xhci_gen_setup().
4734 ++ */
4735 ++ if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2) ||
4736 ++ xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3))
4737 ++ xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
4738 + }
4739 +
4740 + /* called during probe() after chip reset completes */
4741 +diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
4742 +index 5a2e2e3936c4..529c3c40f901 100644
4743 +--- a/drivers/usb/host/xhci-plat.h
4744 ++++ b/drivers/usb/host/xhci-plat.h
4745 +@@ -14,7 +14,7 @@
4746 + #include "xhci.h" /* for hcd_to_xhci() */
4747 +
4748 + enum xhci_plat_type {
4749 +- XHCI_PLAT_TYPE_MARVELL_ARMADA,
4750 ++ XHCI_PLAT_TYPE_MARVELL_ARMADA = 1,
4751 + XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2,
4752 + XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3,
4753 + };
4754 +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
4755 +index 3915657e6078..a85a1c993d61 100644
4756 +--- a/drivers/usb/host/xhci-ring.c
4757 ++++ b/drivers/usb/host/xhci-ring.c
4758 +@@ -4014,7 +4014,8 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
4759 + int reserved_trbs = xhci->cmd_ring_reserved_trbs;
4760 + int ret;
4761 +
4762 +- if (xhci->xhc_state) {
4763 ++ if ((xhci->xhc_state & XHCI_STATE_DYING) ||
4764 ++ (xhci->xhc_state & XHCI_STATE_HALTED)) {
4765 + xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
4766 + return -ESHUTDOWN;
4767 + }
4768 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
4769 +index 0c8087d3c313..8e713cca58ed 100644
4770 +--- a/drivers/usb/host/xhci.c
4771 ++++ b/drivers/usb/host/xhci.c
4772 +@@ -147,7 +147,8 @@ static int xhci_start(struct xhci_hcd *xhci)
4773 + "waited %u microseconds.\n",
4774 + XHCI_MAX_HALT_USEC);
4775 + if (!ret)
4776 +- xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
4777 ++ /* clear state flags. Including dying, halted or removing */
4778 ++ xhci->xhc_state = 0;
4779 +
4780 + return ret;
4781 + }
4782 +@@ -1108,8 +1109,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
4783 + /* Resume root hubs only when have pending events. */
4784 + status = readl(&xhci->op_regs->status);
4785 + if (status & STS_EINT) {
4786 +- usb_hcd_resume_root_hub(hcd);
4787 + usb_hcd_resume_root_hub(xhci->shared_hcd);
4788 ++ usb_hcd_resume_root_hub(hcd);
4789 + }
4790 + }
4791 +
4792 +@@ -1124,10 +1125,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
4793 +
4794 + /* Re-enable port polling. */
4795 + xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
4796 +- set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
4797 +- usb_hcd_poll_rh_status(hcd);
4798 + set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
4799 + usb_hcd_poll_rh_status(xhci->shared_hcd);
4800 ++ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
4801 ++ usb_hcd_poll_rh_status(hcd);
4802 +
4803 + return retval;
4804 + }
4805 +@@ -2770,7 +2771,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
4806 + if (ret <= 0)
4807 + return ret;
4808 + xhci = hcd_to_xhci(hcd);
4809 +- if (xhci->xhc_state & XHCI_STATE_DYING)
4810 ++ if ((xhci->xhc_state & XHCI_STATE_DYING) ||
4811 ++ (xhci->xhc_state & XHCI_STATE_REMOVING))
4812 + return -ENODEV;
4813 +
4814 + xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
4815 +@@ -3817,7 +3819,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
4816 +
4817 + mutex_lock(&xhci->mutex);
4818 +
4819 +- if (xhci->xhc_state) /* dying or halted */
4820 ++ if (xhci->xhc_state) /* dying, removing or halted */
4821 + goto out;
4822 +
4823 + if (!udev->slot_id) {
4824 +@@ -4944,6 +4946,16 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4825 + return retval;
4826 + xhci_dbg(xhci, "Reset complete\n");
4827 +
4828 ++ /*
4829 ++ * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
4830 ++ * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
4831 ++ * address memory pointers actually. So, this driver clears the AC64
4832 ++ * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
4833 ++ * DMA_BIT_MASK(32)) in this xhci_gen_setup().
4834 ++ */
4835 ++ if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
4836 ++ xhci->hcc_params &= ~BIT(0);
4837 ++
4838 + /* Set dma_mask and coherent_dma_mask to 64-bits,
4839 + * if xHC supports 64-bit addressing */
4840 + if (HCC_64BIT_ADDR(xhci->hcc_params) &&
4841 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
4842 +index cc651383ce5a..1cdea4a8c895 100644
4843 +--- a/drivers/usb/host/xhci.h
4844 ++++ b/drivers/usb/host/xhci.h
4845 +@@ -1596,6 +1596,7 @@ struct xhci_hcd {
4846 + */
4847 + #define XHCI_STATE_DYING (1 << 0)
4848 + #define XHCI_STATE_HALTED (1 << 1)
4849 ++#define XHCI_STATE_REMOVING (1 << 2)
4850 + /* Statistics */
4851 + int error_bitmask;
4852 + unsigned int quirks;
4853 +@@ -1632,6 +1633,7 @@ struct xhci_hcd {
4854 + #define XHCI_PME_STUCK_QUIRK (1 << 20)
4855 + #define XHCI_MTK_HOST (1 << 21)
4856 + #define XHCI_SSIC_PORT_UNUSED (1 << 22)
4857 ++#define XHCI_NO_64BIT_SUPPORT (1 << 23)
4858 + unsigned int num_active_eps;
4859 + unsigned int limit_active_eps;
4860 + /* There are two roothubs to keep track of bus suspend info for */
4861 +diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
4862 +index facaaf003f19..e40da7759a0e 100644
4863 +--- a/drivers/usb/usbip/usbip_common.c
4864 ++++ b/drivers/usb/usbip/usbip_common.c
4865 +@@ -741,6 +741,17 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
4866 + if (!(size > 0))
4867 + return 0;
4868 +
4869 ++ if (size > urb->transfer_buffer_length) {
4870 ++ /* should not happen, probably malicious packet */
4871 ++ if (ud->side == USBIP_STUB) {
4872 ++ usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
4873 ++ return 0;
4874 ++ } else {
4875 ++ usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
4876 ++ return -EPIPE;
4877 ++ }
4878 ++ }
4879 ++
4880 + ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
4881 + if (ret != size) {
4882 + dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
4883 +diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
4884 +index 8ea45a5cd806..d889ef2048df 100644
4885 +--- a/drivers/video/fbdev/Kconfig
4886 ++++ b/drivers/video/fbdev/Kconfig
4887 +@@ -2246,7 +2246,6 @@ config XEN_FBDEV_FRONTEND
4888 + select FB_SYS_IMAGEBLIT
4889 + select FB_SYS_FOPS
4890 + select FB_DEFERRED_IO
4891 +- select INPUT_XEN_KBDDEV_FRONTEND if INPUT_MISC
4892 + select XEN_XENBUS_FRONTEND
4893 + default y
4894 + help
4895 +diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
4896 +index 9362424c2340..f9ef06d0cd48 100644
4897 +--- a/drivers/video/fbdev/amba-clcd.c
4898 ++++ b/drivers/video/fbdev/amba-clcd.c
4899 +@@ -440,13 +440,14 @@ static int clcdfb_register(struct clcd_fb *fb)
4900 + fb->off_ienb = CLCD_PL111_IENB;
4901 + fb->off_cntl = CLCD_PL111_CNTL;
4902 + } else {
4903 +-#ifdef CONFIG_ARCH_VERSATILE
4904 +- fb->off_ienb = CLCD_PL111_IENB;
4905 +- fb->off_cntl = CLCD_PL111_CNTL;
4906 +-#else
4907 +- fb->off_ienb = CLCD_PL110_IENB;
4908 +- fb->off_cntl = CLCD_PL110_CNTL;
4909 +-#endif
4910 ++ if (of_machine_is_compatible("arm,versatile-ab") ||
4911 ++ of_machine_is_compatible("arm,versatile-pb")) {
4912 ++ fb->off_ienb = CLCD_PL111_IENB;
4913 ++ fb->off_cntl = CLCD_PL111_CNTL;
4914 ++ } else {
4915 ++ fb->off_ienb = CLCD_PL110_IENB;
4916 ++ fb->off_cntl = CLCD_PL110_CNTL;
4917 ++ }
4918 + }
4919 +
4920 + fb->clk = clk_get(&fb->dev->dev, NULL);
4921 +diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
4922 +index 6b2a06d09f2b..d8d583d32a37 100644
4923 +--- a/drivers/video/fbdev/da8xx-fb.c
4924 ++++ b/drivers/video/fbdev/da8xx-fb.c
4925 +@@ -209,8 +209,7 @@ static struct fb_videomode known_lcd_panels[] = {
4926 + .lower_margin = 2,
4927 + .hsync_len = 0,
4928 + .vsync_len = 0,
4929 +- .sync = FB_SYNC_CLK_INVERT |
4930 +- FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
4931 ++ .sync = FB_SYNC_CLK_INVERT,
4932 + },
4933 + /* Sharp LK043T1DG01 */
4934 + [1] = {
4935 +@@ -224,7 +223,7 @@ static struct fb_videomode known_lcd_panels[] = {
4936 + .lower_margin = 2,
4937 + .hsync_len = 41,
4938 + .vsync_len = 10,
4939 +- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
4940 ++ .sync = 0,
4941 + .flag = 0,
4942 + },
4943 + [2] = {
4944 +@@ -239,7 +238,7 @@ static struct fb_videomode known_lcd_panels[] = {
4945 + .lower_margin = 10,
4946 + .hsync_len = 10,
4947 + .vsync_len = 10,
4948 +- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
4949 ++ .sync = 0,
4950 + .flag = 0,
4951 + },
4952 + [3] = {
4953 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
4954 +index 4545e2e2ad45..d8d68af5aef0 100644
4955 +--- a/fs/btrfs/disk-io.c
4956 ++++ b/fs/btrfs/disk-io.c
4957 +@@ -1830,7 +1830,7 @@ static int cleaner_kthread(void *arg)
4958 + */
4959 + btrfs_delete_unused_bgs(root->fs_info);
4960 + sleep:
4961 +- if (!try_to_freeze() && !again) {
4962 ++ if (!again) {
4963 + set_current_state(TASK_INTERRUPTIBLE);
4964 + if (!kthread_should_stop())
4965 + schedule();
4966 +diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
4967 +index 0e1e61a7ec23..d39f714dabeb 100644
4968 +--- a/fs/btrfs/tests/btrfs-tests.c
4969 ++++ b/fs/btrfs/tests/btrfs-tests.c
4970 +@@ -189,12 +189,6 @@ btrfs_alloc_dummy_block_group(unsigned long length)
4971 + kfree(cache);
4972 + return NULL;
4973 + }
4974 +- cache->fs_info = btrfs_alloc_dummy_fs_info();
4975 +- if (!cache->fs_info) {
4976 +- kfree(cache->free_space_ctl);
4977 +- kfree(cache);
4978 +- return NULL;
4979 +- }
4980 +
4981 + cache->key.objectid = 0;
4982 + cache->key.offset = length;
4983 +diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c
4984 +index d05fe1ab4808..7cea4462acd5 100644
4985 +--- a/fs/btrfs/tests/free-space-tree-tests.c
4986 ++++ b/fs/btrfs/tests/free-space-tree-tests.c
4987 +@@ -485,6 +485,7 @@ static int run_test(test_func_t test_func, int bitmaps)
4988 + cache->bitmap_low_thresh = 0;
4989 + cache->bitmap_high_thresh = (u32)-1;
4990 + cache->needs_free_space = 1;
4991 ++ cache->fs_info = root->fs_info;
4992 +
4993 + btrfs_init_dummy_trans(&trans);
4994 +
4995 +diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
4996 +index bece948b363d..8580831ed237 100644
4997 +--- a/fs/debugfs/inode.c
4998 ++++ b/fs/debugfs/inode.c
4999 +@@ -457,7 +457,7 @@ struct dentry *debugfs_create_automount(const char *name,
5000 + if (unlikely(!inode))
5001 + return failed_creating(dentry);
5002 +
5003 +- inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
5004 ++ make_empty_dir_inode(inode);
5005 + inode->i_flags |= S_AUTOMOUNT;
5006 + inode->i_private = data;
5007 + dentry->d_fsdata = (void *)f;
5008 +diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
5009 +index ecb54394492a..25634c353191 100644
5010 +--- a/fs/ext4/crypto.c
5011 ++++ b/fs/ext4/crypto.c
5012 +@@ -34,6 +34,7 @@
5013 + #include <linux/random.h>
5014 + #include <linux/scatterlist.h>
5015 + #include <linux/spinlock_types.h>
5016 ++#include <linux/namei.h>
5017 +
5018 + #include "ext4_extents.h"
5019 + #include "xattr.h"
5020 +@@ -479,6 +480,9 @@ static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
5021 + struct ext4_crypt_info *ci;
5022 + int dir_has_key, cached_with_key;
5023 +
5024 ++ if (flags & LOOKUP_RCU)
5025 ++ return -ECHILD;
5026 ++
5027 + dir = dget_parent(dentry);
5028 + if (!ext4_encrypted_inode(d_inode(dir))) {
5029 + dput(dir);
5030 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
5031 +index aee960b1af34..e6218cbc8332 100644
5032 +--- a/fs/ext4/inode.c
5033 ++++ b/fs/ext4/inode.c
5034 +@@ -5261,6 +5261,8 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
5035 + might_sleep();
5036 + trace_ext4_mark_inode_dirty(inode, _RET_IP_);
5037 + err = ext4_reserve_inode_write(handle, inode, &iloc);
5038 ++ if (err)
5039 ++ return err;
5040 + if (ext4_handle_valid(handle) &&
5041 + EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
5042 + !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
5043 +@@ -5291,9 +5293,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
5044 + }
5045 + }
5046 + }
5047 +- if (!err)
5048 +- err = ext4_mark_iloc_dirty(handle, inode, &iloc);
5049 +- return err;
5050 ++ return ext4_mark_iloc_dirty(handle, inode, &iloc);
5051 + }
5052 +
5053 + /*
5054 +diff --git a/fs/f2fs/crypto_policy.c b/fs/f2fs/crypto_policy.c
5055 +index d4a96af513c2..596f02490f27 100644
5056 +--- a/fs/f2fs/crypto_policy.c
5057 ++++ b/fs/f2fs/crypto_policy.c
5058 +@@ -192,7 +192,8 @@ int f2fs_inherit_context(struct inode *parent, struct inode *child,
5059 + return res;
5060 +
5061 + ci = F2FS_I(parent)->i_crypt_info;
5062 +- BUG_ON(ci == NULL);
5063 ++ if (ci == NULL)
5064 ++ return -ENOKEY;
5065 +
5066 + ctx.format = F2FS_ENCRYPTION_CONTEXT_FORMAT_V1;
5067 +
5068 +diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
5069 +index 5c06db17e41f..44802599fa67 100644
5070 +--- a/fs/f2fs/data.c
5071 ++++ b/fs/f2fs/data.c
5072 +@@ -67,7 +67,6 @@ static void f2fs_write_end_io(struct bio *bio)
5073 + f2fs_restore_and_release_control_page(&page);
5074 +
5075 + if (unlikely(bio->bi_error)) {
5076 +- set_page_dirty(page);
5077 + set_bit(AS_EIO, &page->mapping->flags);
5078 + f2fs_stop_checkpoint(sbi);
5079 + }
5080 +@@ -504,7 +503,7 @@ static int __allocate_data_blocks(struct inode *inode, loff_t offset,
5081 + struct dnode_of_data dn;
5082 + u64 start = F2FS_BYTES_TO_BLK(offset);
5083 + u64 len = F2FS_BYTES_TO_BLK(count);
5084 +- bool allocated;
5085 ++ bool allocated = false;
5086 + u64 end_offset;
5087 + int err = 0;
5088 +
5089 +@@ -546,7 +545,7 @@ static int __allocate_data_blocks(struct inode *inode, loff_t offset,
5090 + f2fs_put_dnode(&dn);
5091 + f2fs_unlock_op(sbi);
5092 +
5093 +- f2fs_balance_fs(sbi, dn.node_changed);
5094 ++ f2fs_balance_fs(sbi, allocated);
5095 + }
5096 + return err;
5097 +
5098 +@@ -556,7 +555,7 @@ sync_out:
5099 + f2fs_put_dnode(&dn);
5100 + out:
5101 + f2fs_unlock_op(sbi);
5102 +- f2fs_balance_fs(sbi, dn.node_changed);
5103 ++ f2fs_balance_fs(sbi, allocated);
5104 + return err;
5105 + }
5106 +
5107 +@@ -650,14 +649,14 @@ get_next:
5108 + if (dn.ofs_in_node >= end_offset) {
5109 + if (allocated)
5110 + sync_inode_page(&dn);
5111 +- allocated = false;
5112 + f2fs_put_dnode(&dn);
5113 +
5114 + if (create) {
5115 + f2fs_unlock_op(sbi);
5116 +- f2fs_balance_fs(sbi, dn.node_changed);
5117 ++ f2fs_balance_fs(sbi, allocated);
5118 + f2fs_lock_op(sbi);
5119 + }
5120 ++ allocated = false;
5121 +
5122 + set_new_dnode(&dn, inode, NULL, NULL, 0);
5123 + err = get_dnode_of_data(&dn, pgofs, mode);
5124 +@@ -715,7 +714,7 @@ put_out:
5125 + unlock_out:
5126 + if (create) {
5127 + f2fs_unlock_op(sbi);
5128 +- f2fs_balance_fs(sbi, dn.node_changed);
5129 ++ f2fs_balance_fs(sbi, allocated);
5130 + }
5131 + out:
5132 + trace_f2fs_map_blocks(inode, map, err);
5133 +diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
5134 +index faa7495e2d7e..30e6b6563494 100644
5135 +--- a/fs/f2fs/dir.c
5136 ++++ b/fs/f2fs/dir.c
5137 +@@ -892,11 +892,19 @@ out:
5138 + return err;
5139 + }
5140 +
5141 ++static int f2fs_dir_open(struct inode *inode, struct file *filp)
5142 ++{
5143 ++ if (f2fs_encrypted_inode(inode))
5144 ++ return f2fs_get_encryption_info(inode) ? -EACCES : 0;
5145 ++ return 0;
5146 ++}
5147 ++
5148 + const struct file_operations f2fs_dir_operations = {
5149 + .llseek = generic_file_llseek,
5150 + .read = generic_read_dir,
5151 + .iterate = f2fs_readdir,
5152 + .fsync = f2fs_sync_file,
5153 ++ .open = f2fs_dir_open,
5154 + .unlocked_ioctl = f2fs_ioctl,
5155 + #ifdef CONFIG_COMPAT
5156 + .compat_ioctl = f2fs_compat_ioctl,
5157 +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
5158 +index ea272be62677..5a322bc00ac4 100644
5159 +--- a/fs/f2fs/file.c
5160 ++++ b/fs/f2fs/file.c
5161 +@@ -425,6 +425,8 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
5162 + err = f2fs_get_encryption_info(inode);
5163 + if (err)
5164 + return 0;
5165 ++ if (!f2fs_encrypted_inode(inode))
5166 ++ return -ENOKEY;
5167 + }
5168 +
5169 + /* we don't need to use inline_data strictly */
5170 +@@ -444,7 +446,9 @@ static int f2fs_file_open(struct inode *inode, struct file *filp)
5171 + if (!ret && f2fs_encrypted_inode(inode)) {
5172 + ret = f2fs_get_encryption_info(inode);
5173 + if (ret)
5174 +- ret = -EACCES;
5175 ++ return -EACCES;
5176 ++ if (!f2fs_encrypted_inode(inode))
5177 ++ return -ENOKEY;
5178 + }
5179 + return ret;
5180 + }
5181 +diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
5182 +index 6f944e5eb76e..7e9e38769660 100644
5183 +--- a/fs/f2fs/namei.c
5184 ++++ b/fs/f2fs/namei.c
5185 +@@ -980,12 +980,6 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
5186 + }
5187 + memcpy(cstr.name, sd->encrypted_path, cstr.len);
5188 +
5189 +- /* this is broken symlink case */
5190 +- if (unlikely(cstr.name[0] == 0)) {
5191 +- res = -ENOENT;
5192 +- goto errout;
5193 +- }
5194 +-
5195 + if ((cstr.len + sizeof(struct f2fs_encrypted_symlink_data) - 1) >
5196 + max_size) {
5197 + /* Symlink data on the disk is corrupted */
5198 +@@ -1002,6 +996,12 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry,
5199 +
5200 + kfree(cstr.name);
5201 +
5202 ++ /* this is broken symlink case */
5203 ++ if (unlikely(pstr.name[0] == 0)) {
5204 ++ res = -ENOENT;
5205 ++ goto errout;
5206 ++ }
5207 ++
5208 + paddr = pstr.name;
5209 +
5210 + /* Null-terminate the name */
5211 +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
5212 +index 6134832baaaf..013a62b2f8ca 100644
5213 +--- a/fs/f2fs/super.c
5214 ++++ b/fs/f2fs/super.c
5215 +@@ -926,9 +926,25 @@ static loff_t max_file_blocks(void)
5216 + return result;
5217 + }
5218 +
5219 ++static int __f2fs_commit_super(struct buffer_head *bh,
5220 ++ struct f2fs_super_block *super)
5221 ++{
5222 ++ lock_buffer(bh);
5223 ++ if (super)
5224 ++ memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
5225 ++ set_buffer_uptodate(bh);
5226 ++ set_buffer_dirty(bh);
5227 ++ unlock_buffer(bh);
5228 ++
5229 ++ /* it's rare case, we can do fua all the time */
5230 ++ return __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
5231 ++}
5232 ++
5233 + static inline bool sanity_check_area_boundary(struct super_block *sb,
5234 +- struct f2fs_super_block *raw_super)
5235 ++ struct buffer_head *bh)
5236 + {
5237 ++ struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
5238 ++ (bh->b_data + F2FS_SUPER_OFFSET);
5239 + u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
5240 + u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
5241 + u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
5242 +@@ -942,6 +958,10 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
5243 + u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
5244 + u32 segment_count = le32_to_cpu(raw_super->segment_count);
5245 + u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
5246 ++ u64 main_end_blkaddr = main_blkaddr +
5247 ++ (segment_count_main << log_blocks_per_seg);
5248 ++ u64 seg_end_blkaddr = segment0_blkaddr +
5249 ++ (segment_count << log_blocks_per_seg);
5250 +
5251 + if (segment0_blkaddr != cp_blkaddr) {
5252 + f2fs_msg(sb, KERN_INFO,
5253 +@@ -986,22 +1006,45 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
5254 + return true;
5255 + }
5256 +
5257 +- if (main_blkaddr + (segment_count_main << log_blocks_per_seg) !=
5258 +- segment0_blkaddr + (segment_count << log_blocks_per_seg)) {
5259 ++ if (main_end_blkaddr > seg_end_blkaddr) {
5260 + f2fs_msg(sb, KERN_INFO,
5261 +- "Wrong MAIN_AREA boundary, start(%u) end(%u) blocks(%u)",
5262 ++ "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
5263 + main_blkaddr,
5264 +- segment0_blkaddr + (segment_count << log_blocks_per_seg),
5265 ++ segment0_blkaddr +
5266 ++ (segment_count << log_blocks_per_seg),
5267 + segment_count_main << log_blocks_per_seg);
5268 + return true;
5269 ++ } else if (main_end_blkaddr < seg_end_blkaddr) {
5270 ++ int err = 0;
5271 ++ char *res;
5272 ++
5273 ++ /* fix in-memory information all the time */
5274 ++ raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
5275 ++ segment0_blkaddr) >> log_blocks_per_seg);
5276 ++
5277 ++ if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
5278 ++ res = "internally";
5279 ++ } else {
5280 ++ err = __f2fs_commit_super(bh, NULL);
5281 ++ res = err ? "failed" : "done";
5282 ++ }
5283 ++ f2fs_msg(sb, KERN_INFO,
5284 ++ "Fix alignment : %s, start(%u) end(%u) block(%u)",
5285 ++ res, main_blkaddr,
5286 ++ segment0_blkaddr +
5287 ++ (segment_count << log_blocks_per_seg),
5288 ++ segment_count_main << log_blocks_per_seg);
5289 ++ if (err)
5290 ++ return true;
5291 + }
5292 +-
5293 + return false;
5294 + }
5295 +
5296 + static int sanity_check_raw_super(struct super_block *sb,
5297 +- struct f2fs_super_block *raw_super)
5298 ++ struct buffer_head *bh)
5299 + {
5300 ++ struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
5301 ++ (bh->b_data + F2FS_SUPER_OFFSET);
5302 + unsigned int blocksize;
5303 +
5304 + if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
5305 +@@ -1068,7 +1111,7 @@ static int sanity_check_raw_super(struct super_block *sb,
5306 + }
5307 +
5308 + /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
5309 +- if (sanity_check_area_boundary(sb, raw_super))
5310 ++ if (sanity_check_area_boundary(sb, bh))
5311 + return 1;
5312 +
5313 + return 0;
5314 +@@ -1134,103 +1177,87 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
5315 +
5316 + /*
5317 + * Read f2fs raw super block.
5318 +- * Because we have two copies of super block, so read the first one at first,
5319 +- * if the first one is invalid, move to read the second one.
5320 ++ * Because we have two copies of super block, so read both of them
5321 ++ * to get the first valid one. If any one of them is broken, we pass
5322 ++ * them recovery flag back to the caller.
5323 + */
5324 + static int read_raw_super_block(struct super_block *sb,
5325 + struct f2fs_super_block **raw_super,
5326 + int *valid_super_block, int *recovery)
5327 + {
5328 +- int block = 0;
5329 ++ int block;
5330 + struct buffer_head *bh;
5331 +- struct f2fs_super_block *super, *buf;
5332 ++ struct f2fs_super_block *super;
5333 + int err = 0;
5334 +
5335 + super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
5336 + if (!super)
5337 + return -ENOMEM;
5338 +-retry:
5339 +- bh = sb_bread(sb, block);
5340 +- if (!bh) {
5341 +- *recovery = 1;
5342 +- f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
5343 ++
5344 ++ for (block = 0; block < 2; block++) {
5345 ++ bh = sb_bread(sb, block);
5346 ++ if (!bh) {
5347 ++ f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
5348 + block + 1);
5349 +- err = -EIO;
5350 +- goto next;
5351 +- }
5352 ++ err = -EIO;
5353 ++ continue;
5354 ++ }
5355 +
5356 +- buf = (struct f2fs_super_block *)(bh->b_data + F2FS_SUPER_OFFSET);
5357 ++ /* sanity checking of raw super */
5358 ++ if (sanity_check_raw_super(sb, bh)) {
5359 ++ f2fs_msg(sb, KERN_ERR,
5360 ++ "Can't find valid F2FS filesystem in %dth superblock",
5361 ++ block + 1);
5362 ++ err = -EINVAL;
5363 ++ brelse(bh);
5364 ++ continue;
5365 ++ }
5366 +
5367 +- /* sanity checking of raw super */
5368 +- if (sanity_check_raw_super(sb, buf)) {
5369 ++ if (!*raw_super) {
5370 ++ memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
5371 ++ sizeof(*super));
5372 ++ *valid_super_block = block;
5373 ++ *raw_super = super;
5374 ++ }
5375 + brelse(bh);
5376 +- *recovery = 1;
5377 +- f2fs_msg(sb, KERN_ERR,
5378 +- "Can't find valid F2FS filesystem in %dth superblock",
5379 +- block + 1);
5380 +- err = -EINVAL;
5381 +- goto next;
5382 + }
5383 +
5384 +- if (!*raw_super) {
5385 +- memcpy(super, buf, sizeof(*super));
5386 +- *valid_super_block = block;
5387 +- *raw_super = super;
5388 +- }
5389 +- brelse(bh);
5390 +-
5391 +-next:
5392 +- /* check the validity of the second superblock */
5393 +- if (block == 0) {
5394 +- block++;
5395 +- goto retry;
5396 +- }
5397 ++ /* Fail to read any one of the superblocks*/
5398 ++ if (err < 0)
5399 ++ *recovery = 1;
5400 +
5401 + /* No valid superblock */
5402 +- if (!*raw_super) {
5403 ++ if (!*raw_super)
5404 + kfree(super);
5405 +- return err;
5406 +- }
5407 ++ else
5408 ++ err = 0;
5409 +
5410 +- return 0;
5411 ++ return err;
5412 + }
5413 +
5414 +-static int __f2fs_commit_super(struct f2fs_sb_info *sbi, int block)
5415 ++int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
5416 + {
5417 +- struct f2fs_super_block *super = F2FS_RAW_SUPER(sbi);
5418 + struct buffer_head *bh;
5419 + int err;
5420 +
5421 +- bh = sb_getblk(sbi->sb, block);
5422 ++ /* write back-up superblock first */
5423 ++ bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1);
5424 + if (!bh)
5425 + return -EIO;
5426 +-
5427 +- lock_buffer(bh);
5428 +- memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
5429 +- set_buffer_uptodate(bh);
5430 +- set_buffer_dirty(bh);
5431 +- unlock_buffer(bh);
5432 +-
5433 +- /* it's rare case, we can do fua all the time */
5434 +- err = __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
5435 ++ err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
5436 + brelse(bh);
5437 +
5438 +- return err;
5439 +-}
5440 +-
5441 +-int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
5442 +-{
5443 +- int err;
5444 +-
5445 +- /* write back-up superblock first */
5446 +- err = __f2fs_commit_super(sbi, sbi->valid_super_block ? 0 : 1);
5447 +-
5448 + /* if we are in recovery path, skip writing valid superblock */
5449 + if (recover || err)
5450 + return err;
5451 +
5452 + /* write current valid superblock */
5453 +- return __f2fs_commit_super(sbi, sbi->valid_super_block);
5454 ++ bh = sb_getblk(sbi->sb, sbi->valid_super_block);
5455 ++ if (!bh)
5456 ++ return -EIO;
5457 ++ err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
5458 ++ brelse(bh);
5459 ++ return err;
5460 + }
5461 +
5462 + static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
5463 +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
5464 +index fa95ab2d3674..9d2f3e0a6360 100644
5465 +--- a/fs/proc/task_mmu.c
5466 ++++ b/fs/proc/task_mmu.c
5467 +@@ -1504,6 +1504,32 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
5468 + return page;
5469 + }
5470 +
5471 ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5472 ++static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
5473 ++ struct vm_area_struct *vma,
5474 ++ unsigned long addr)
5475 ++{
5476 ++ struct page *page;
5477 ++ int nid;
5478 ++
5479 ++ if (!pmd_present(pmd))
5480 ++ return NULL;
5481 ++
5482 ++ page = vm_normal_page_pmd(vma, addr, pmd);
5483 ++ if (!page)
5484 ++ return NULL;
5485 ++
5486 ++ if (PageReserved(page))
5487 ++ return NULL;
5488 ++
5489 ++ nid = page_to_nid(page);
5490 ++ if (!node_isset(nid, node_states[N_MEMORY]))
5491 ++ return NULL;
5492 ++
5493 ++ return page;
5494 ++}
5495 ++#endif
5496 ++
5497 + static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
5498 + unsigned long end, struct mm_walk *walk)
5499 + {
5500 +@@ -1513,14 +1539,14 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
5501 + pte_t *orig_pte;
5502 + pte_t *pte;
5503 +
5504 ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5505 + ptl = pmd_trans_huge_lock(pmd, vma);
5506 + if (ptl) {
5507 +- pte_t huge_pte = *(pte_t *)pmd;
5508 + struct page *page;
5509 +
5510 +- page = can_gather_numa_stats(huge_pte, vma, addr);
5511 ++ page = can_gather_numa_stats_pmd(*pmd, vma, addr);
5512 + if (page)
5513 +- gather_stats(page, md, pte_dirty(huge_pte),
5514 ++ gather_stats(page, md, pmd_dirty(*pmd),
5515 + HPAGE_PMD_SIZE/PAGE_SIZE);
5516 + spin_unlock(ptl);
5517 + return 0;
5518 +@@ -1528,6 +1554,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
5519 +
5520 + if (pmd_trans_unstable(pmd))
5521 + return 0;
5522 ++#endif
5523 + orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
5524 + do {
5525 + struct page *page = can_gather_numa_stats(*pte, vma, addr);
5526 +diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
5527 +index e56272c919b5..bf2d34c9d804 100644
5528 +--- a/include/asm-generic/futex.h
5529 ++++ b/include/asm-generic/futex.h
5530 +@@ -108,11 +108,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
5531 + u32 val;
5532 +
5533 + preempt_disable();
5534 +- if (unlikely(get_user(val, uaddr) != 0))
5535 ++ if (unlikely(get_user(val, uaddr) != 0)) {
5536 ++ preempt_enable();
5537 + return -EFAULT;
5538 ++ }
5539 +
5540 +- if (val == oldval && unlikely(put_user(newval, uaddr) != 0))
5541 ++ if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
5542 ++ preempt_enable();
5543 + return -EFAULT;
5544 ++ }
5545 +
5546 + *uval = val;
5547 + preempt_enable();
5548 +diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
5549 +index 461a0558bca4..cebecff536a3 100644
5550 +--- a/include/drm/drm_cache.h
5551 ++++ b/include/drm/drm_cache.h
5552 +@@ -39,6 +39,8 @@ static inline bool drm_arch_can_wc_memory(void)
5553 + {
5554 + #if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
5555 + return false;
5556 ++#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
5557 ++ return false;
5558 + #else
5559 + return true;
5560 + #endif
5561 +diff --git a/include/keys/trusted-type.h b/include/keys/trusted-type.h
5562 +index 42cf2d991bf4..4ea7e55f20b0 100644
5563 +--- a/include/keys/trusted-type.h
5564 ++++ b/include/keys/trusted-type.h
5565 +@@ -38,7 +38,7 @@ struct trusted_key_options {
5566 + unsigned char pcrinfo[MAX_PCRINFO_SIZE];
5567 + int pcrlock;
5568 + uint32_t hash;
5569 +- uint32_t digest_len;
5570 ++ uint32_t policydigest_len;
5571 + unsigned char policydigest[MAX_DIGEST_SIZE];
5572 + uint32_t policyhandle;
5573 + };
5574 +diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
5575 +index 89d944b25d87..7fc7cb7872e3 100644
5576 +--- a/include/linux/cgroup-defs.h
5577 ++++ b/include/linux/cgroup-defs.h
5578 +@@ -442,6 +442,7 @@ struct cgroup_subsys {
5579 + int (*can_attach)(struct cgroup_taskset *tset);
5580 + void (*cancel_attach)(struct cgroup_taskset *tset);
5581 + void (*attach)(struct cgroup_taskset *tset);
5582 ++ void (*post_attach)(void);
5583 + int (*can_fork)(struct task_struct *task);
5584 + void (*cancel_fork)(struct task_struct *task);
5585 + void (*fork)(struct task_struct *task);
5586 +diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
5587 +index fea160ee5803..85a868ccb493 100644
5588 +--- a/include/linux/cpuset.h
5589 ++++ b/include/linux/cpuset.h
5590 +@@ -137,8 +137,6 @@ static inline void set_mems_allowed(nodemask_t nodemask)
5591 + task_unlock(current);
5592 + }
5593 +
5594 +-extern void cpuset_post_attach_flush(void);
5595 +-
5596 + #else /* !CONFIG_CPUSETS */
5597 +
5598 + static inline bool cpusets_enabled(void) { return false; }
5599 +@@ -245,10 +243,6 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
5600 + return false;
5601 + }
5602 +
5603 +-static inline void cpuset_post_attach_flush(void)
5604 +-{
5605 +-}
5606 +-
5607 + #endif /* !CONFIG_CPUSETS */
5608 +
5609 + #endif /* _LINUX_CPUSET_H */
5610 +diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
5611 +index 987764afa65c..f8b83792939b 100644
5612 +--- a/include/linux/mlx5/device.h
5613 ++++ b/include/linux/mlx5/device.h
5614 +@@ -363,6 +363,17 @@ enum {
5615 + MLX5_CAP_OFF_CMDIF_CSUM = 46,
5616 + };
5617 +
5618 ++enum {
5619 ++ /*
5620 ++ * Max wqe size for rdma read is 512 bytes, so this
5621 ++ * limits our max_sge_rd as the wqe needs to fit:
5622 ++ * - ctrl segment (16 bytes)
5623 ++ * - rdma segment (16 bytes)
5624 ++ * - scatter elements (16 bytes each)
5625 ++ */
5626 ++ MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
5627 ++};
5628 ++
5629 + struct mlx5_inbox_hdr {
5630 + __be16 opcode;
5631 + u8 rsvd[4];
5632 +diff --git a/include/linux/mm.h b/include/linux/mm.h
5633 +index 516e14944339..a6c240e885c0 100644
5634 +--- a/include/linux/mm.h
5635 ++++ b/include/linux/mm.h
5636 +@@ -1010,6 +1010,8 @@ static inline bool page_mapped(struct page *page)
5637 + page = compound_head(page);
5638 + if (atomic_read(compound_mapcount_ptr(page)) >= 0)
5639 + return true;
5640 ++ if (PageHuge(page))
5641 ++ return false;
5642 + for (i = 0; i < hpage_nr_pages(page); i++) {
5643 + if (atomic_read(&page[i]._mapcount) >= 0)
5644 + return true;
5645 +@@ -1117,6 +1119,8 @@ struct zap_details {
5646 +
5647 + struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
5648 + pte_t pte);
5649 ++struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
5650 ++ pmd_t pmd);
5651 +
5652 + int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
5653 + unsigned long size);
5654 +diff --git a/include/linux/platform_data/mmp_dma.h b/include/linux/platform_data/mmp_dma.h
5655 +index 2a330ec9e2af..d1397c8ed94e 100644
5656 +--- a/include/linux/platform_data/mmp_dma.h
5657 ++++ b/include/linux/platform_data/mmp_dma.h
5658 +@@ -14,6 +14,7 @@
5659 +
5660 + struct mmp_dma_platdata {
5661 + int dma_channels;
5662 ++ int nb_requestors;
5663 + };
5664 +
5665 + #endif /* MMP_DMA_H */
5666 +diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
5667 +index 8a0f55b6c2ba..88e3ab496e8f 100644
5668 +--- a/include/media/videobuf2-core.h
5669 ++++ b/include/media/videobuf2-core.h
5670 +@@ -375,6 +375,9 @@ struct vb2_ops {
5671 + /**
5672 + * struct vb2_ops - driver-specific callbacks
5673 + *
5674 ++ * @verify_planes_array: Verify that a given user space structure contains
5675 ++ * enough planes for the buffer. This is called
5676 ++ * for each dequeued buffer.
5677 + * @fill_user_buffer: given a vb2_buffer fill in the userspace structure.
5678 + * For V4L2 this is a struct v4l2_buffer.
5679 + * @fill_vb2_buffer: given a userspace structure, fill in the vb2_buffer.
5680 +@@ -384,6 +387,7 @@ struct vb2_ops {
5681 + * the vb2_buffer struct.
5682 + */
5683 + struct vb2_buf_ops {
5684 ++ int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb);
5685 + void (*fill_user_buffer)(struct vb2_buffer *vb, void *pb);
5686 + int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb,
5687 + struct vb2_plane *planes);
5688 +@@ -400,6 +404,9 @@ struct vb2_buf_ops {
5689 + * @fileio_read_once: report EOF after reading the first buffer
5690 + * @fileio_write_immediately: queue buffer after each write() call
5691 + * @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver
5692 ++ * @quirk_poll_must_check_waiting_for_buffers: Return POLLERR at poll when QBUF
5693 ++ * has not been called. This is a vb1 idiom that has been adopted
5694 ++ * also by vb2.
5695 + * @lock: pointer to a mutex that protects the vb2_queue struct. The
5696 + * driver can set this to a mutex to let the v4l2 core serialize
5697 + * the queuing ioctls. If the driver wants to handle locking
5698 +@@ -463,6 +470,7 @@ struct vb2_queue {
5699 + unsigned fileio_read_once:1;
5700 + unsigned fileio_write_immediately:1;
5701 + unsigned allow_zero_bytesused:1;
5702 ++ unsigned quirk_poll_must_check_waiting_for_buffers:1;
5703 +
5704 + struct mutex *lock;
5705 + void *owner;
5706 +diff --git a/include/rdma/ib.h b/include/rdma/ib.h
5707 +index cf8f9e700e48..a6b93706b0fc 100644
5708 +--- a/include/rdma/ib.h
5709 ++++ b/include/rdma/ib.h
5710 +@@ -34,6 +34,7 @@
5711 + #define _RDMA_IB_H
5712 +
5713 + #include <linux/types.h>
5714 ++#include <linux/sched.h>
5715 +
5716 + struct ib_addr {
5717 + union {
5718 +@@ -86,4 +87,19 @@ struct sockaddr_ib {
5719 + __u64 sib_scope_id;
5720 + };
5721 +
5722 ++/*
5723 ++ * The IB interfaces that use write() as bi-directional ioctl() are
5724 ++ * fundamentally unsafe, since there are lots of ways to trigger "write()"
5725 ++ * calls from various contexts with elevated privileges. That includes the
5726 ++ * traditional suid executable error message writes, but also various kernel
5727 ++ * interfaces that can write to file descriptors.
5728 ++ *
5729 ++ * This function provides protection for the legacy API by restricting the
5730 ++ * calling context.
5731 ++ */
5732 ++static inline bool ib_safe_file_access(struct file *filp)
5733 ++{
5734 ++ return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS);
5735 ++}
5736 ++
5737 + #endif /* _RDMA_IB_H */
5738 +diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
5739 +index fa341fcb5829..f5842bcd9c94 100644
5740 +--- a/include/sound/hda_i915.h
5741 ++++ b/include/sound/hda_i915.h
5742 +@@ -9,7 +9,7 @@
5743 + #ifdef CONFIG_SND_HDA_I915
5744 + int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
5745 + int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
5746 +-int snd_hdac_get_display_clk(struct hdac_bus *bus);
5747 ++void snd_hdac_i915_set_bclk(struct hdac_bus *bus);
5748 + int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate);
5749 + int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid,
5750 + bool *audio_enabled, char *buffer, int max_bytes);
5751 +@@ -25,9 +25,8 @@ static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
5752 + {
5753 + return 0;
5754 + }
5755 +-static inline int snd_hdac_get_display_clk(struct hdac_bus *bus)
5756 ++static inline void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
5757 + {
5758 +- return 0;
5759 + }
5760 + static inline int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid,
5761 + int rate)
5762 +diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
5763 +index c039f1d68a09..086168e18ca8 100644
5764 +--- a/include/uapi/linux/v4l2-dv-timings.h
5765 ++++ b/include/uapi/linux/v4l2-dv-timings.h
5766 +@@ -183,7 +183,8 @@
5767 +
5768 + #define V4L2_DV_BT_CEA_3840X2160P24 { \
5769 + .type = V4L2_DV_BT_656_1120, \
5770 +- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
5771 ++ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
5772 ++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
5773 + 297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
5774 + V4L2_DV_BT_STD_CEA861, \
5775 + V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
5776 +@@ -191,14 +192,16 @@
5777 +
5778 + #define V4L2_DV_BT_CEA_3840X2160P25 { \
5779 + .type = V4L2_DV_BT_656_1120, \
5780 +- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
5781 ++ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
5782 ++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
5783 + 297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
5784 + V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
5785 + }
5786 +
5787 + #define V4L2_DV_BT_CEA_3840X2160P30 { \
5788 + .type = V4L2_DV_BT_656_1120, \
5789 +- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
5790 ++ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
5791 ++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
5792 + 297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
5793 + V4L2_DV_BT_STD_CEA861, \
5794 + V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
5795 +@@ -206,14 +209,16 @@
5796 +
5797 + #define V4L2_DV_BT_CEA_3840X2160P50 { \
5798 + .type = V4L2_DV_BT_656_1120, \
5799 +- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
5800 ++ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
5801 ++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
5802 + 594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
5803 + V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
5804 + }
5805 +
5806 + #define V4L2_DV_BT_CEA_3840X2160P60 { \
5807 + .type = V4L2_DV_BT_656_1120, \
5808 +- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
5809 ++ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
5810 ++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
5811 + 594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
5812 + V4L2_DV_BT_STD_CEA861, \
5813 + V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
5814 +@@ -221,7 +226,8 @@
5815 +
5816 + #define V4L2_DV_BT_CEA_4096X2160P24 { \
5817 + .type = V4L2_DV_BT_656_1120, \
5818 +- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
5819 ++ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
5820 ++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
5821 + 297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
5822 + V4L2_DV_BT_STD_CEA861, \
5823 + V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
5824 +@@ -229,14 +235,16 @@
5825 +
5826 + #define V4L2_DV_BT_CEA_4096X2160P25 { \
5827 + .type = V4L2_DV_BT_656_1120, \
5828 +- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
5829 ++ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
5830 ++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
5831 + 297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
5832 + V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
5833 + }
5834 +
5835 + #define V4L2_DV_BT_CEA_4096X2160P30 { \
5836 + .type = V4L2_DV_BT_656_1120, \
5837 +- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
5838 ++ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
5839 ++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
5840 + 297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
5841 + V4L2_DV_BT_STD_CEA861, \
5842 + V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
5843 +@@ -244,14 +252,16 @@
5844 +
5845 + #define V4L2_DV_BT_CEA_4096X2160P50 { \
5846 + .type = V4L2_DV_BT_656_1120, \
5847 +- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
5848 ++ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
5849 ++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
5850 + 594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
5851 + V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
5852 + }
5853 +
5854 + #define V4L2_DV_BT_CEA_4096X2160P60 { \
5855 + .type = V4L2_DV_BT_656_1120, \
5856 +- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
5857 ++ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
5858 ++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
5859 + 594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
5860 + V4L2_DV_BT_STD_CEA861, \
5861 + V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
5862 +diff --git a/kernel/cgroup.c b/kernel/cgroup.c
5863 +index 6a498daf2eec..355cd5f2b416 100644
5864 +--- a/kernel/cgroup.c
5865 ++++ b/kernel/cgroup.c
5866 +@@ -2697,9 +2697,10 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
5867 + size_t nbytes, loff_t off, bool threadgroup)
5868 + {
5869 + struct task_struct *tsk;
5870 ++ struct cgroup_subsys *ss;
5871 + struct cgroup *cgrp;
5872 + pid_t pid;
5873 +- int ret;
5874 ++ int ssid, ret;
5875 +
5876 + if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
5877 + return -EINVAL;
5878 +@@ -2747,8 +2748,10 @@ out_unlock_rcu:
5879 + rcu_read_unlock();
5880 + out_unlock_threadgroup:
5881 + percpu_up_write(&cgroup_threadgroup_rwsem);
5882 ++ for_each_subsys(ss, ssid)
5883 ++ if (ss->post_attach)
5884 ++ ss->post_attach();
5885 + cgroup_kn_unlock(of->kn);
5886 +- cpuset_post_attach_flush();
5887 + return ret ?: nbytes;
5888 + }
5889 +
5890 +diff --git a/kernel/cpuset.c b/kernel/cpuset.c
5891 +index 41989ab4db57..df16d0c9349f 100644
5892 +--- a/kernel/cpuset.c
5893 ++++ b/kernel/cpuset.c
5894 +@@ -58,7 +58,6 @@
5895 + #include <asm/uaccess.h>
5896 + #include <linux/atomic.h>
5897 + #include <linux/mutex.h>
5898 +-#include <linux/workqueue.h>
5899 + #include <linux/cgroup.h>
5900 + #include <linux/wait.h>
5901 +
5902 +@@ -1016,7 +1015,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
5903 + }
5904 + }
5905 +
5906 +-void cpuset_post_attach_flush(void)
5907 ++static void cpuset_post_attach(void)
5908 + {
5909 + flush_workqueue(cpuset_migrate_mm_wq);
5910 + }
5911 +@@ -2087,6 +2086,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
5912 + .can_attach = cpuset_can_attach,
5913 + .cancel_attach = cpuset_cancel_attach,
5914 + .attach = cpuset_attach,
5915 ++ .post_attach = cpuset_post_attach,
5916 + .bind = cpuset_bind,
5917 + .legacy_cftypes = files,
5918 + .early_init = 1,
5919 +diff --git a/kernel/events/core.c b/kernel/events/core.c
5920 +index f0b4b328d8f5..a0ef98b258d7 100644
5921 +--- a/kernel/events/core.c
5922 ++++ b/kernel/events/core.c
5923 +@@ -2402,14 +2402,24 @@ static void ctx_sched_out(struct perf_event_context *ctx,
5924 + cpuctx->task_ctx = NULL;
5925 + }
5926 +
5927 +- is_active ^= ctx->is_active; /* changed bits */
5928 +-
5929 ++ /*
5930 ++ * Always update time if it was set; not only when it changes.
5931 ++ * Otherwise we can 'forget' to update time for any but the last
5932 ++ * context we sched out. For example:
5933 ++ *
5934 ++ * ctx_sched_out(.event_type = EVENT_FLEXIBLE)
5935 ++ * ctx_sched_out(.event_type = EVENT_PINNED)
5936 ++ *
5937 ++ * would only update time for the pinned events.
5938 ++ */
5939 + if (is_active & EVENT_TIME) {
5940 + /* update (and stop) ctx time */
5941 + update_context_time(ctx);
5942 + update_cgrp_time_from_cpuctx(cpuctx);
5943 + }
5944 +
5945 ++ is_active ^= ctx->is_active; /* changed bits */
5946 ++
5947 + if (!ctx->nr_active || !(is_active & EVENT_ALL))
5948 + return;
5949 +
5950 +@@ -8479,6 +8489,7 @@ SYSCALL_DEFINE5(perf_event_open,
5951 + f_flags);
5952 + if (IS_ERR(event_file)) {
5953 + err = PTR_ERR(event_file);
5954 ++ event_file = NULL;
5955 + goto err_context;
5956 + }
5957 +
5958 +diff --git a/kernel/futex.c b/kernel/futex.c
5959 +index 5d6ce6413ef1..11b502159f3a 100644
5960 +--- a/kernel/futex.c
5961 ++++ b/kernel/futex.c
5962 +@@ -1212,10 +1212,20 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
5963 + if (unlikely(should_fail_futex(true)))
5964 + ret = -EFAULT;
5965 +
5966 +- if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
5967 ++ if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
5968 + ret = -EFAULT;
5969 +- else if (curval != uval)
5970 +- ret = -EINVAL;
5971 ++ } else if (curval != uval) {
5972 ++ /*
5973 ++ * If a unconditional UNLOCK_PI operation (user space did not
5974 ++ * try the TID->0 transition) raced with a waiter setting the
5975 ++ * FUTEX_WAITERS flag between get_user() and locking the hash
5976 ++ * bucket lock, retry the operation.
5977 ++ */
5978 ++ if ((FUTEX_TID_MASK & curval) == uval)
5979 ++ ret = -EAGAIN;
5980 ++ else
5981 ++ ret = -EINVAL;
5982 ++ }
5983 + if (ret) {
5984 + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
5985 + return ret;
5986 +@@ -1442,8 +1452,8 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
5987 + if (likely(&hb1->chain != &hb2->chain)) {
5988 + plist_del(&q->list, &hb1->chain);
5989 + hb_waiters_dec(hb1);
5990 +- plist_add(&q->list, &hb2->chain);
5991 + hb_waiters_inc(hb2);
5992 ++ plist_add(&q->list, &hb2->chain);
5993 + q->lock_ptr = &hb2->lock;
5994 + }
5995 + get_futex_key_refs(key2);
5996 +@@ -2536,6 +2546,15 @@ retry:
5997 + if (ret == -EFAULT)
5998 + goto pi_faulted;
5999 + /*
6000 ++ * A unconditional UNLOCK_PI op raced against a waiter
6001 ++ * setting the FUTEX_WAITERS bit. Try again.
6002 ++ */
6003 ++ if (ret == -EAGAIN) {
6004 ++ spin_unlock(&hb->lock);
6005 ++ put_futex_key(&key);
6006 ++ goto retry;
6007 ++ }
6008 ++ /*
6009 + * wake_futex_pi has detected invalid state. Tell user
6010 + * space.
6011 + */
6012 +diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
6013 +index 5b9102a47ea5..c835270f0c2f 100644
6014 +--- a/kernel/locking/mcs_spinlock.h
6015 ++++ b/kernel/locking/mcs_spinlock.h
6016 +@@ -67,7 +67,13 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
6017 + node->locked = 0;
6018 + node->next = NULL;
6019 +
6020 +- prev = xchg_acquire(lock, node);
6021 ++ /*
6022 ++ * We rely on the full barrier with global transitivity implied by the
6023 ++ * below xchg() to order the initialization stores above against any
6024 ++ * observation of @node. And to provide the ACQUIRE ordering associated
6025 ++ * with a LOCK primitive.
6026 ++ */
6027 ++ prev = xchg(lock, node);
6028 + if (likely(prev == NULL)) {
6029 + /*
6030 + * Lock acquired, don't need to set node->locked to 1. Threads
6031 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
6032 +index a74073f8c08c..1c1d2a00ad95 100644
6033 +--- a/kernel/sched/core.c
6034 ++++ b/kernel/sched/core.c
6035 +@@ -7802,7 +7802,7 @@ void set_curr_task(int cpu, struct task_struct *p)
6036 + /* task_group_lock serializes the addition/removal of task groups */
6037 + static DEFINE_SPINLOCK(task_group_lock);
6038 +
6039 +-static void free_sched_group(struct task_group *tg)
6040 ++static void sched_free_group(struct task_group *tg)
6041 + {
6042 + free_fair_sched_group(tg);
6043 + free_rt_sched_group(tg);
6044 +@@ -7828,7 +7828,7 @@ struct task_group *sched_create_group(struct task_group *parent)
6045 + return tg;
6046 +
6047 + err:
6048 +- free_sched_group(tg);
6049 ++ sched_free_group(tg);
6050 + return ERR_PTR(-ENOMEM);
6051 + }
6052 +
6053 +@@ -7848,17 +7848,16 @@ void sched_online_group(struct task_group *tg, struct task_group *parent)
6054 + }
6055 +
6056 + /* rcu callback to free various structures associated with a task group */
6057 +-static void free_sched_group_rcu(struct rcu_head *rhp)
6058 ++static void sched_free_group_rcu(struct rcu_head *rhp)
6059 + {
6060 + /* now it should be safe to free those cfs_rqs */
6061 +- free_sched_group(container_of(rhp, struct task_group, rcu));
6062 ++ sched_free_group(container_of(rhp, struct task_group, rcu));
6063 + }
6064 +
6065 +-/* Destroy runqueue etc associated with a task group */
6066 + void sched_destroy_group(struct task_group *tg)
6067 + {
6068 + /* wait for possible concurrent references to cfs_rqs complete */
6069 +- call_rcu(&tg->rcu, free_sched_group_rcu);
6070 ++ call_rcu(&tg->rcu, sched_free_group_rcu);
6071 + }
6072 +
6073 + void sched_offline_group(struct task_group *tg)
6074 +@@ -8319,31 +8318,26 @@ cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
6075 + if (IS_ERR(tg))
6076 + return ERR_PTR(-ENOMEM);
6077 +
6078 ++ sched_online_group(tg, parent);
6079 ++
6080 + return &tg->css;
6081 + }
6082 +
6083 +-static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
6084 ++static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
6085 + {
6086 + struct task_group *tg = css_tg(css);
6087 +- struct task_group *parent = css_tg(css->parent);
6088 +
6089 +- if (parent)
6090 +- sched_online_group(tg, parent);
6091 +- return 0;
6092 ++ sched_offline_group(tg);
6093 + }
6094 +
6095 + static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
6096 + {
6097 + struct task_group *tg = css_tg(css);
6098 +
6099 +- sched_destroy_group(tg);
6100 +-}
6101 +-
6102 +-static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
6103 +-{
6104 +- struct task_group *tg = css_tg(css);
6105 +-
6106 +- sched_offline_group(tg);
6107 ++ /*
6108 ++ * Relies on the RCU grace period between css_released() and this.
6109 ++ */
6110 ++ sched_free_group(tg);
6111 + }
6112 +
6113 + static void cpu_cgroup_fork(struct task_struct *task)
6114 +@@ -8703,9 +8697,8 @@ static struct cftype cpu_files[] = {
6115 +
6116 + struct cgroup_subsys cpu_cgrp_subsys = {
6117 + .css_alloc = cpu_cgroup_css_alloc,
6118 ++ .css_released = cpu_cgroup_css_released,
6119 + .css_free = cpu_cgroup_css_free,
6120 +- .css_online = cpu_cgroup_css_online,
6121 +- .css_offline = cpu_cgroup_css_offline,
6122 + .fork = cpu_cgroup_fork,
6123 + .can_attach = cpu_cgroup_can_attach,
6124 + .attach = cpu_cgroup_attach,
6125 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
6126 +index 7ff5dc7d2ac5..9e82d0450fad 100644
6127 +--- a/kernel/workqueue.c
6128 ++++ b/kernel/workqueue.c
6129 +@@ -667,6 +667,35 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
6130 + */
6131 + smp_wmb();
6132 + set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
6133 ++ /*
6134 ++ * The following mb guarantees that previous clear of a PENDING bit
6135 ++ * will not be reordered with any speculative LOADS or STORES from
6136 ++ * work->current_func, which is executed afterwards. This possible
6137 ++ * reordering can lead to a missed execution on attempt to qeueue
6138 ++ * the same @work. E.g. consider this case:
6139 ++ *
6140 ++ * CPU#0 CPU#1
6141 ++ * ---------------------------- --------------------------------
6142 ++ *
6143 ++ * 1 STORE event_indicated
6144 ++ * 2 queue_work_on() {
6145 ++ * 3 test_and_set_bit(PENDING)
6146 ++ * 4 } set_..._and_clear_pending() {
6147 ++ * 5 set_work_data() # clear bit
6148 ++ * 6 smp_mb()
6149 ++ * 7 work->current_func() {
6150 ++ * 8 LOAD event_indicated
6151 ++ * }
6152 ++ *
6153 ++ * Without an explicit full barrier speculative LOAD on line 8 can
6154 ++ * be executed before CPU#0 does STORE on line 1. If that happens,
6155 ++ * CPU#0 observes the PENDING bit is still set and new execution of
6156 ++ * a @work is not queued in a hope, that CPU#1 will eventually
6157 ++ * finish the queued @work. Meanwhile CPU#1 does not see
6158 ++ * event_indicated is set, because speculative LOAD was executed
6159 ++ * before actual STORE.
6160 ++ */
6161 ++ smp_mb();
6162 + }
6163 +
6164 + static void clear_work_data(struct work_struct *work)
6165 +diff --git a/lib/assoc_array.c b/lib/assoc_array.c
6166 +index 03dd576e6773..59fd7c0b119c 100644
6167 +--- a/lib/assoc_array.c
6168 ++++ b/lib/assoc_array.c
6169 +@@ -524,7 +524,9 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
6170 + free_slot = i;
6171 + continue;
6172 + }
6173 +- if (ops->compare_object(assoc_array_ptr_to_leaf(ptr), index_key)) {
6174 ++ if (assoc_array_ptr_is_leaf(ptr) &&
6175 ++ ops->compare_object(assoc_array_ptr_to_leaf(ptr),
6176 ++ index_key)) {
6177 + pr_devel("replace in slot %d\n", i);
6178 + edit->leaf_p = &node->slots[i];
6179 + edit->dead_leaf = node->slots[i];
6180 +diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
6181 +index abcecdc2d0f2..0710a62ad2f6 100644
6182 +--- a/lib/lz4/lz4defs.h
6183 ++++ b/lib/lz4/lz4defs.h
6184 +@@ -11,8 +11,7 @@
6185 + /*
6186 + * Detects 64 bits mode
6187 + */
6188 +-#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) \
6189 +- || defined(__ppc64__) || defined(__LP64__))
6190 ++#if defined(CONFIG_64BIT)
6191 + #define LZ4_ARCH64 1
6192 + #else
6193 + #define LZ4_ARCH64 0
6194 +@@ -35,6 +34,10 @@ typedef struct _U64_S { u64 v; } U64_S;
6195 +
6196 + #define PUT4(s, d) (A32(d) = A32(s))
6197 + #define PUT8(s, d) (A64(d) = A64(s))
6198 ++
6199 ++#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
6200 ++ (d = s - A16(p))
6201 ++
6202 + #define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
6203 + do { \
6204 + A16(p) = v; \
6205 +@@ -51,10 +54,13 @@ typedef struct _U64_S { u64 v; } U64_S;
6206 + #define PUT8(s, d) \
6207 + put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
6208 +
6209 +-#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
6210 +- do { \
6211 +- put_unaligned(v, (u16 *)(p)); \
6212 +- p += 2; \
6213 ++#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
6214 ++ (d = s - get_unaligned_le16(p))
6215 ++
6216 ++#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
6217 ++ do { \
6218 ++ put_unaligned_le16(v, (u16 *)(p)); \
6219 ++ p += 2; \
6220 + } while (0)
6221 + #endif
6222 +
6223 +@@ -140,9 +146,6 @@ typedef struct _U64_S { u64 v; } U64_S;
6224 +
6225 + #endif
6226 +
6227 +-#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
6228 +- (d = s - get_unaligned_le16(p))
6229 +-
6230 + #define LZ4_WILDCOPY(s, d, e) \
6231 + do { \
6232 + LZ4_COPYPACKET(s, d); \
6233 +diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
6234 +index ec533a6c77b5..eb15e7dc7b65 100644
6235 +--- a/lib/mpi/mpicoder.c
6236 ++++ b/lib/mpi/mpicoder.c
6237 +@@ -128,6 +128,23 @@ leave:
6238 + }
6239 + EXPORT_SYMBOL_GPL(mpi_read_from_buffer);
6240 +
6241 ++static int count_lzeros(MPI a)
6242 ++{
6243 ++ mpi_limb_t alimb;
6244 ++ int i, lzeros = 0;
6245 ++
6246 ++ for (i = a->nlimbs - 1; i >= 0; i--) {
6247 ++ alimb = a->d[i];
6248 ++ if (alimb == 0) {
6249 ++ lzeros += sizeof(mpi_limb_t);
6250 ++ } else {
6251 ++ lzeros += count_leading_zeros(alimb) / 8;
6252 ++ break;
6253 ++ }
6254 ++ }
6255 ++ return lzeros;
6256 ++}
6257 ++
6258 + /**
6259 + * mpi_read_buffer() - read MPI to a bufer provided by user (msb first)
6260 + *
6261 +@@ -148,7 +165,7 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
6262 + uint8_t *p;
6263 + mpi_limb_t alimb;
6264 + unsigned int n = mpi_get_size(a);
6265 +- int i, lzeros = 0;
6266 ++ int i, lzeros;
6267 +
6268 + if (!buf || !nbytes)
6269 + return -EINVAL;
6270 +@@ -156,14 +173,7 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
6271 + if (sign)
6272 + *sign = a->sign;
6273 +
6274 +- p = (void *)&a->d[a->nlimbs] - 1;
6275 +-
6276 +- for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) {
6277 +- if (!*p)
6278 +- lzeros++;
6279 +- else
6280 +- break;
6281 +- }
6282 ++ lzeros = count_lzeros(a);
6283 +
6284 + if (buf_len < n - lzeros) {
6285 + *nbytes = n - lzeros;
6286 +@@ -351,7 +361,7 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
6287 + u8 *p, *p2;
6288 + mpi_limb_t alimb, alimb2;
6289 + unsigned int n = mpi_get_size(a);
6290 +- int i, x, y = 0, lzeros = 0, buf_len;
6291 ++ int i, x, y = 0, lzeros, buf_len;
6292 +
6293 + if (!nbytes)
6294 + return -EINVAL;
6295 +@@ -359,14 +369,7 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
6296 + if (sign)
6297 + *sign = a->sign;
6298 +
6299 +- p = (void *)&a->d[a->nlimbs] - 1;
6300 +-
6301 +- for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) {
6302 +- if (!*p)
6303 +- lzeros++;
6304 +- else
6305 +- break;
6306 +- }
6307 ++ lzeros = count_lzeros(a);
6308 +
6309 + if (*nbytes < n - lzeros) {
6310 + *nbytes = n - lzeros;
6311 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
6312 +index e10a4fee88d2..a7db0a2db1ab 100644
6313 +--- a/mm/huge_memory.c
6314 ++++ b/mm/huge_memory.c
6315 +@@ -1919,10 +1919,9 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
6316 + * page fault if needed.
6317 + */
6318 + return 0;
6319 +- if (vma->vm_ops)
6320 ++ if (vma->vm_ops || (vm_flags & VM_NO_THP))
6321 + /* khugepaged not yet working on file or special mappings */
6322 + return 0;
6323 +- VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
6324 + hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
6325 + hend = vma->vm_end & HPAGE_PMD_MASK;
6326 + if (hstart < hend)
6327 +@@ -2310,8 +2309,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
6328 + return false;
6329 + if (is_vma_temporary_stack(vma))
6330 + return false;
6331 +- VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
6332 +- return true;
6333 ++ return !(vma->vm_flags & VM_NO_THP);
6334 + }
6335 +
6336 + static void collapse_huge_page(struct mm_struct *mm,
6337 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
6338 +index caf3bf73b533..a65ad1d59232 100644
6339 +--- a/mm/memcontrol.c
6340 ++++ b/mm/memcontrol.c
6341 +@@ -207,6 +207,7 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
6342 + /* "mc" and its members are protected by cgroup_mutex */
6343 + static struct move_charge_struct {
6344 + spinlock_t lock; /* for from, to */
6345 ++ struct mm_struct *mm;
6346 + struct mem_cgroup *from;
6347 + struct mem_cgroup *to;
6348 + unsigned long flags;
6349 +@@ -4730,6 +4731,8 @@ static void __mem_cgroup_clear_mc(void)
6350 +
6351 + static void mem_cgroup_clear_mc(void)
6352 + {
6353 ++ struct mm_struct *mm = mc.mm;
6354 ++
6355 + /*
6356 + * we must clear moving_task before waking up waiters at the end of
6357 + * task migration.
6358 +@@ -4739,7 +4742,10 @@ static void mem_cgroup_clear_mc(void)
6359 + spin_lock(&mc.lock);
6360 + mc.from = NULL;
6361 + mc.to = NULL;
6362 ++ mc.mm = NULL;
6363 + spin_unlock(&mc.lock);
6364 ++
6365 ++ mmput(mm);
6366 + }
6367 +
6368 + static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6369 +@@ -4796,6 +4802,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6370 + VM_BUG_ON(mc.moved_swap);
6371 +
6372 + spin_lock(&mc.lock);
6373 ++ mc.mm = mm;
6374 + mc.from = from;
6375 + mc.to = memcg;
6376 + mc.flags = move_flags;
6377 +@@ -4805,8 +4812,9 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6378 + ret = mem_cgroup_precharge_mc(mm);
6379 + if (ret)
6380 + mem_cgroup_clear_mc();
6381 ++ } else {
6382 ++ mmput(mm);
6383 + }
6384 +- mmput(mm);
6385 + return ret;
6386 + }
6387 +
6388 +@@ -4915,11 +4923,11 @@ put: /* get_mctgt_type() gets the page */
6389 + return ret;
6390 + }
6391 +
6392 +-static void mem_cgroup_move_charge(struct mm_struct *mm)
6393 ++static void mem_cgroup_move_charge(void)
6394 + {
6395 + struct mm_walk mem_cgroup_move_charge_walk = {
6396 + .pmd_entry = mem_cgroup_move_charge_pte_range,
6397 +- .mm = mm,
6398 ++ .mm = mc.mm,
6399 + };
6400 +
6401 + lru_add_drain_all();
6402 +@@ -4931,7 +4939,7 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
6403 + atomic_inc(&mc.from->moving_account);
6404 + synchronize_rcu();
6405 + retry:
6406 +- if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
6407 ++ if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
6408 + /*
6409 + * Someone who are holding the mmap_sem might be waiting in
6410 + * waitq. So we cancel all extra charges, wake up all waiters,
6411 +@@ -4948,23 +4956,16 @@ retry:
6412 + * additional charge, the page walk just aborts.
6413 + */
6414 + walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
6415 +- up_read(&mm->mmap_sem);
6416 ++ up_read(&mc.mm->mmap_sem);
6417 + atomic_dec(&mc.from->moving_account);
6418 + }
6419 +
6420 +-static void mem_cgroup_move_task(struct cgroup_taskset *tset)
6421 ++static void mem_cgroup_move_task(void)
6422 + {
6423 +- struct cgroup_subsys_state *css;
6424 +- struct task_struct *p = cgroup_taskset_first(tset, &css);
6425 +- struct mm_struct *mm = get_task_mm(p);
6426 +-
6427 +- if (mm) {
6428 +- if (mc.to)
6429 +- mem_cgroup_move_charge(mm);
6430 +- mmput(mm);
6431 +- }
6432 +- if (mc.to)
6433 ++ if (mc.to) {
6434 ++ mem_cgroup_move_charge();
6435 + mem_cgroup_clear_mc();
6436 ++ }
6437 + }
6438 + #else /* !CONFIG_MMU */
6439 + static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6440 +@@ -4974,7 +4975,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6441 + static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6442 + {
6443 + }
6444 +-static void mem_cgroup_move_task(struct cgroup_taskset *tset)
6445 ++static void mem_cgroup_move_task(void)
6446 + {
6447 + }
6448 + #endif
6449 +@@ -5246,7 +5247,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
6450 + .css_reset = mem_cgroup_css_reset,
6451 + .can_attach = mem_cgroup_can_attach,
6452 + .cancel_attach = mem_cgroup_cancel_attach,
6453 +- .attach = mem_cgroup_move_task,
6454 ++ .post_attach = mem_cgroup_move_task,
6455 + .bind = mem_cgroup_bind,
6456 + .dfl_cftypes = memory_files,
6457 + .legacy_cftypes = mem_cgroup_legacy_files,
6458 +diff --git a/mm/memory.c b/mm/memory.c
6459 +index 8132787ae4d5..3345dcf862cf 100644
6460 +--- a/mm/memory.c
6461 ++++ b/mm/memory.c
6462 +@@ -792,6 +792,46 @@ out:
6463 + return pfn_to_page(pfn);
6464 + }
6465 +
6466 ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
6467 ++struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
6468 ++ pmd_t pmd)
6469 ++{
6470 ++ unsigned long pfn = pmd_pfn(pmd);
6471 ++
6472 ++ /*
6473 ++ * There is no pmd_special() but there may be special pmds, e.g.
6474 ++ * in a direct-access (dax) mapping, so let's just replicate the
6475 ++ * !HAVE_PTE_SPECIAL case from vm_normal_page() here.
6476 ++ */
6477 ++ if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
6478 ++ if (vma->vm_flags & VM_MIXEDMAP) {
6479 ++ if (!pfn_valid(pfn))
6480 ++ return NULL;
6481 ++ goto out;
6482 ++ } else {
6483 ++ unsigned long off;
6484 ++ off = (addr - vma->vm_start) >> PAGE_SHIFT;
6485 ++ if (pfn == vma->vm_pgoff + off)
6486 ++ return NULL;
6487 ++ if (!is_cow_mapping(vma->vm_flags))
6488 ++ return NULL;
6489 ++ }
6490 ++ }
6491 ++
6492 ++ if (is_zero_pfn(pfn))
6493 ++ return NULL;
6494 ++ if (unlikely(pfn > highest_memmap_pfn))
6495 ++ return NULL;
6496 ++
6497 ++ /*
6498 ++ * NOTE! We still have PageReserved() pages in the page tables.
6499 ++ * eg. VDSO mappings can cause them to exist.
6500 ++ */
6501 ++out:
6502 ++ return pfn_to_page(pfn);
6503 ++}
6504 ++#endif
6505 ++
6506 + /*
6507 + * copy one vm_area from one task to the other. Assumes the page tables
6508 + * already present in the new task to be cleared in the whole range
6509 +diff --git a/mm/migrate.c b/mm/migrate.c
6510 +index 3ad0fea5c438..625741faa068 100644
6511 +--- a/mm/migrate.c
6512 ++++ b/mm/migrate.c
6513 +@@ -967,7 +967,13 @@ out:
6514 + dec_zone_page_state(page, NR_ISOLATED_ANON +
6515 + page_is_file_cache(page));
6516 + /* Soft-offlined page shouldn't go through lru cache list */
6517 +- if (reason == MR_MEMORY_FAILURE) {
6518 ++ if (reason == MR_MEMORY_FAILURE && rc == MIGRATEPAGE_SUCCESS) {
6519 ++ /*
6520 ++ * With this release, we free successfully migrated
6521 ++ * page and set PG_HWPoison on just freed page
6522 ++ * intentionally. Although it's rather weird, it's how
6523 ++ * HWPoison flag works at the moment.
6524 ++ */
6525 + put_page(page);
6526 + if (!test_set_page_hwpoison(page))
6527 + num_poisoned_pages_inc();
6528 +diff --git a/mm/slub.c b/mm/slub.c
6529 +index d8fbd4a6ed59..2a722e141958 100644
6530 +--- a/mm/slub.c
6531 ++++ b/mm/slub.c
6532 +@@ -2815,6 +2815,7 @@ struct detached_freelist {
6533 + void *tail;
6534 + void *freelist;
6535 + int cnt;
6536 ++ struct kmem_cache *s;
6537 + };
6538 +
6539 + /*
6540 +@@ -2829,8 +2830,9 @@ struct detached_freelist {
6541 + * synchronization primitive. Look ahead in the array is limited due
6542 + * to performance reasons.
6543 + */
6544 +-static int build_detached_freelist(struct kmem_cache *s, size_t size,
6545 +- void **p, struct detached_freelist *df)
6546 ++static inline
6547 ++int build_detached_freelist(struct kmem_cache *s, size_t size,
6548 ++ void **p, struct detached_freelist *df)
6549 + {
6550 + size_t first_skipped_index = 0;
6551 + int lookahead = 3;
6552 +@@ -2846,8 +2848,11 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
6553 + if (!object)
6554 + return 0;
6555 +
6556 ++ /* Support for memcg, compiler can optimize this out */
6557 ++ df->s = cache_from_obj(s, object);
6558 ++
6559 + /* Start new detached freelist */
6560 +- set_freepointer(s, object, NULL);
6561 ++ set_freepointer(df->s, object, NULL);
6562 + df->page = virt_to_head_page(object);
6563 + df->tail = object;
6564 + df->freelist = object;
6565 +@@ -2862,7 +2867,7 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
6566 + /* df->page is always set at this point */
6567 + if (df->page == virt_to_head_page(object)) {
6568 + /* Opportunity build freelist */
6569 +- set_freepointer(s, object, df->freelist);
6570 ++ set_freepointer(df->s, object, df->freelist);
6571 + df->freelist = object;
6572 + df->cnt++;
6573 + p[size] = NULL; /* mark object processed */
6574 +@@ -2881,25 +2886,20 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
6575 + return first_skipped_index;
6576 + }
6577 +
6578 +-
6579 + /* Note that interrupts must be enabled when calling this function. */
6580 +-void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
6581 ++void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
6582 + {
6583 + if (WARN_ON(!size))
6584 + return;
6585 +
6586 + do {
6587 + struct detached_freelist df;
6588 +- struct kmem_cache *s;
6589 +-
6590 +- /* Support for memcg */
6591 +- s = cache_from_obj(orig_s, p[size - 1]);
6592 +
6593 + size = build_detached_freelist(s, size, p, &df);
6594 + if (unlikely(!df.page))
6595 + continue;
6596 +
6597 +- slab_free(s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_);
6598 ++ slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
6599 + } while (likely(size));
6600 + }
6601 + EXPORT_SYMBOL(kmem_cache_free_bulk);
6602 +diff --git a/mm/vmscan.c b/mm/vmscan.c
6603 +index 71b1c29948db..c712b016e0ab 100644
6604 +--- a/mm/vmscan.c
6605 ++++ b/mm/vmscan.c
6606 +@@ -2561,7 +2561,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
6607 + sc->gfp_mask |= __GFP_HIGHMEM;
6608 +
6609 + for_each_zone_zonelist_nodemask(zone, z, zonelist,
6610 +- requested_highidx, sc->nodemask) {
6611 ++ gfp_zone(sc->gfp_mask), sc->nodemask) {
6612 + enum zone_type classzone_idx;
6613 +
6614 + if (!populated_zone(zone))
6615 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
6616 +index f1ffb34e253f..d2bc03f0b4d7 100644
6617 +--- a/net/netlink/af_netlink.c
6618 ++++ b/net/netlink/af_netlink.c
6619 +@@ -1305,7 +1305,7 @@ static int netlink_release(struct socket *sock)
6620 +
6621 + skb_queue_purge(&sk->sk_write_queue);
6622 +
6623 +- if (nlk->portid) {
6624 ++ if (nlk->portid && nlk->bound) {
6625 + struct netlink_notify n = {
6626 + .net = sock_net(sk),
6627 + .protocol = sk->sk_protocol,
6628 +diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
6629 +index 273bc3a35425..008c25d1b9f9 100644
6630 +--- a/net/sunrpc/cache.c
6631 ++++ b/net/sunrpc/cache.c
6632 +@@ -1182,14 +1182,14 @@ int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
6633 + }
6634 +
6635 + crq->q.reader = 0;
6636 +- crq->item = cache_get(h);
6637 + crq->buf = buf;
6638 + crq->len = 0;
6639 + crq->readers = 0;
6640 + spin_lock(&queue_lock);
6641 +- if (test_bit(CACHE_PENDING, &h->flags))
6642 ++ if (test_bit(CACHE_PENDING, &h->flags)) {
6643 ++ crq->item = cache_get(h);
6644 + list_add_tail(&crq->q.list, &detail->queue);
6645 +- else
6646 ++ } else
6647 + /* Lost a race, no longer PENDING, so don't enqueue */
6648 + ret = -EAGAIN;
6649 + spin_unlock(&queue_lock);
6650 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
6651 +index 711cb7ad6ae0..ab62d305b48b 100644
6652 +--- a/net/wireless/nl80211.c
6653 ++++ b/net/wireless/nl80211.c
6654 +@@ -13201,7 +13201,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
6655 + struct wireless_dev *wdev;
6656 + struct cfg80211_beacon_registration *reg, *tmp;
6657 +
6658 +- if (state != NETLINK_URELEASE)
6659 ++ if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC)
6660 + return NOTIFY_DONE;
6661 +
6662 + rcu_read_lock();
6663 +diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
6664 +index 0b7dc2fd7bac..dd243d2abd87 100644
6665 +--- a/scripts/kconfig/confdata.c
6666 ++++ b/scripts/kconfig/confdata.c
6667 +@@ -267,10 +267,8 @@ int conf_read_simple(const char *name, int def)
6668 + if (in)
6669 + goto load;
6670 + sym_add_change_count(1);
6671 +- if (!sym_defconfig_list) {
6672 +- sym_calc_value(modules_sym);
6673 ++ if (!sym_defconfig_list)
6674 + return 1;
6675 +- }
6676 +
6677 + for_all_defaults(sym_defconfig_list, prop) {
6678 + if (expr_calc_value(prop->visible.expr) == no ||
6679 +@@ -403,7 +401,6 @@ setsym:
6680 + }
6681 + free(line);
6682 + fclose(in);
6683 +- sym_calc_value(modules_sym);
6684 + return 0;
6685 + }
6686 +
6687 +@@ -414,8 +411,12 @@ int conf_read(const char *name)
6688 +
6689 + sym_set_change_count(0);
6690 +
6691 +- if (conf_read_simple(name, S_DEF_USER))
6692 ++ if (conf_read_simple(name, S_DEF_USER)) {
6693 ++ sym_calc_value(modules_sym);
6694 + return 1;
6695 ++ }
6696 ++
6697 ++ sym_calc_value(modules_sym);
6698 +
6699 + for_all_symbols(i, sym) {
6700 + sym_calc_value(sym);
6701 +@@ -846,6 +847,7 @@ static int conf_split_config(void)
6702 +
6703 + name = conf_get_autoconfig_name();
6704 + conf_read_simple(name, S_DEF_AUTO);
6705 ++ sym_calc_value(modules_sym);
6706 +
6707 + if (chdir("include/config"))
6708 + return 1;
6709 +diff --git a/security/keys/trusted.c b/security/keys/trusted.c
6710 +index 0dcab20cdacd..90d61751ff12 100644
6711 +--- a/security/keys/trusted.c
6712 ++++ b/security/keys/trusted.c
6713 +@@ -744,6 +744,7 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
6714 + unsigned long handle;
6715 + unsigned long lock;
6716 + unsigned long token_mask = 0;
6717 ++ unsigned int digest_len;
6718 + int i;
6719 + int tpm2;
6720 +
6721 +@@ -752,7 +753,6 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
6722 + return tpm2;
6723 +
6724 + opt->hash = tpm2 ? HASH_ALGO_SHA256 : HASH_ALGO_SHA1;
6725 +- opt->digest_len = hash_digest_size[opt->hash];
6726 +
6727 + while ((p = strsep(&c, " \t"))) {
6728 + if (*p == '\0' || *p == ' ' || *p == '\t')
6729 +@@ -812,8 +812,6 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
6730 + for (i = 0; i < HASH_ALGO__LAST; i++) {
6731 + if (!strcmp(args[0].from, hash_algo_name[i])) {
6732 + opt->hash = i;
6733 +- opt->digest_len =
6734 +- hash_digest_size[opt->hash];
6735 + break;
6736 + }
6737 + }
6738 +@@ -825,13 +823,14 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
6739 + }
6740 + break;
6741 + case Opt_policydigest:
6742 +- if (!tpm2 ||
6743 +- strlen(args[0].from) != (2 * opt->digest_len))
6744 ++ digest_len = hash_digest_size[opt->hash];
6745 ++ if (!tpm2 || strlen(args[0].from) != (2 * digest_len))
6746 + return -EINVAL;
6747 + res = hex2bin(opt->policydigest, args[0].from,
6748 +- opt->digest_len);
6749 ++ digest_len);
6750 + if (res < 0)
6751 + return -EINVAL;
6752 ++ opt->policydigest_len = digest_len;
6753 + break;
6754 + case Opt_policyhandle:
6755 + if (!tpm2)
6756 +diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
6757 +index f6854dbd7d8d..69ead7150a5c 100644
6758 +--- a/sound/hda/hdac_i915.c
6759 ++++ b/sound/hda/hdac_i915.c
6760 +@@ -20,6 +20,7 @@
6761 + #include <sound/core.h>
6762 + #include <sound/hdaudio.h>
6763 + #include <sound/hda_i915.h>
6764 ++#include <sound/hda_register.h>
6765 +
6766 + static struct i915_audio_component *hdac_acomp;
6767 +
6768 +@@ -97,26 +98,65 @@ int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
6769 + }
6770 + EXPORT_SYMBOL_GPL(snd_hdac_display_power);
6771 +
6772 ++#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \
6773 ++ ((pci)->device == 0x0c0c) || \
6774 ++ ((pci)->device == 0x0d0c) || \
6775 ++ ((pci)->device == 0x160c))
6776 ++
6777 + /**
6778 +- * snd_hdac_get_display_clk - Get CDCLK in kHz
6779 ++ * snd_hdac_i915_set_bclk - Reprogram BCLK for HSW/BDW
6780 + * @bus: HDA core bus
6781 + *
6782 +- * This function is supposed to be used only by a HD-audio controller
6783 +- * driver that needs the interaction with i915 graphics.
6784 ++ * Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK
6785 ++ * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value)
6786 ++ * are used to convert CDClk (Core Display Clock) to 24MHz BCLK:
6787 ++ * BCLK = CDCLK * M / N
6788 ++ * The values will be lost when the display power well is disabled and need to
6789 ++ * be restored to avoid abnormal playback speed.
6790 + *
6791 +- * This function queries CDCLK value in kHz from the graphics driver and
6792 +- * returns the value. A negative code is returned in error.
6793 ++ * Call this function at initializing and changing power well, as well as
6794 ++ * at ELD notifier for the hotplug.
6795 + */
6796 +-int snd_hdac_get_display_clk(struct hdac_bus *bus)
6797 ++void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
6798 + {
6799 + struct i915_audio_component *acomp = bus->audio_component;
6800 ++ struct pci_dev *pci = to_pci_dev(bus->dev);
6801 ++ int cdclk_freq;
6802 ++ unsigned int bclk_m, bclk_n;
6803 ++
6804 ++ if (!acomp || !acomp->ops || !acomp->ops->get_cdclk_freq)
6805 ++ return; /* only for i915 binding */
6806 ++ if (!CONTROLLER_IN_GPU(pci))
6807 ++ return; /* only HSW/BDW */
6808 ++
6809 ++ cdclk_freq = acomp->ops->get_cdclk_freq(acomp->dev);
6810 ++ switch (cdclk_freq) {
6811 ++ case 337500:
6812 ++ bclk_m = 16;
6813 ++ bclk_n = 225;
6814 ++ break;
6815 ++
6816 ++ case 450000:
6817 ++ default: /* default CDCLK 450MHz */
6818 ++ bclk_m = 4;
6819 ++ bclk_n = 75;
6820 ++ break;
6821 ++
6822 ++ case 540000:
6823 ++ bclk_m = 4;
6824 ++ bclk_n = 90;
6825 ++ break;
6826 ++
6827 ++ case 675000:
6828 ++ bclk_m = 8;
6829 ++ bclk_n = 225;
6830 ++ break;
6831 ++ }
6832 +
6833 +- if (!acomp || !acomp->ops)
6834 +- return -ENODEV;
6835 +-
6836 +- return acomp->ops->get_cdclk_freq(acomp->dev);
6837 ++ snd_hdac_chip_writew(bus, HSW_EM4, bclk_m);
6838 ++ snd_hdac_chip_writew(bus, HSW_EM5, bclk_n);
6839 + }
6840 +-EXPORT_SYMBOL_GPL(snd_hdac_get_display_clk);
6841 ++EXPORT_SYMBOL_GPL(snd_hdac_i915_set_bclk);
6842 +
6843 + /* There is a fixed mapping between audio pin node and display port
6844 + * on current Intel platforms:
6845 +diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
6846 +index 7ca5b89f088a..dfaf1a93fb8a 100644
6847 +--- a/sound/pci/hda/hda_generic.c
6848 ++++ b/sound/pci/hda/hda_generic.c
6849 +@@ -826,7 +826,7 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
6850 + bool allow_powerdown)
6851 + {
6852 + hda_nid_t nid, changed = 0;
6853 +- int i, state;
6854 ++ int i, state, power;
6855 +
6856 + for (i = 0; i < path->depth; i++) {
6857 + nid = path->path[i];
6858 +@@ -838,7 +838,9 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
6859 + state = AC_PWRST_D0;
6860 + else
6861 + state = AC_PWRST_D3;
6862 +- if (!snd_hda_check_power_state(codec, nid, state)) {
6863 ++ power = snd_hda_codec_read(codec, nid, 0,
6864 ++ AC_VERB_GET_POWER_STATE, 0);
6865 ++ if (power != (state | (state << 4))) {
6866 + snd_hda_codec_write(codec, nid, 0,
6867 + AC_VERB_SET_POWER_STATE, state);
6868 + changed = nid;
6869 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
6870 +index e5240cb3749f..c0b772bb49af 100644
6871 +--- a/sound/pci/hda/hda_intel.c
6872 ++++ b/sound/pci/hda/hda_intel.c
6873 +@@ -857,50 +857,6 @@ static int param_set_xint(const char *val, const struct kernel_param *kp)
6874 + #define azx_del_card_list(chip) /* NOP */
6875 + #endif /* CONFIG_PM */
6876 +
6877 +-/* Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK
6878 +- * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value)
6879 +- * are used to convert CDClk (Core Display Clock) to 24MHz BCLK:
6880 +- * BCLK = CDCLK * M / N
6881 +- * The values will be lost when the display power well is disabled and need to
6882 +- * be restored to avoid abnormal playback speed.
6883 +- */
6884 +-static void haswell_set_bclk(struct hda_intel *hda)
6885 +-{
6886 +- struct azx *chip = &hda->chip;
6887 +- int cdclk_freq;
6888 +- unsigned int bclk_m, bclk_n;
6889 +-
6890 +- if (!hda->need_i915_power)
6891 +- return;
6892 +-
6893 +- cdclk_freq = snd_hdac_get_display_clk(azx_bus(chip));
6894 +- switch (cdclk_freq) {
6895 +- case 337500:
6896 +- bclk_m = 16;
6897 +- bclk_n = 225;
6898 +- break;
6899 +-
6900 +- case 450000:
6901 +- default: /* default CDCLK 450MHz */
6902 +- bclk_m = 4;
6903 +- bclk_n = 75;
6904 +- break;
6905 +-
6906 +- case 540000:
6907 +- bclk_m = 4;
6908 +- bclk_n = 90;
6909 +- break;
6910 +-
6911 +- case 675000:
6912 +- bclk_m = 8;
6913 +- bclk_n = 225;
6914 +- break;
6915 +- }
6916 +-
6917 +- azx_writew(chip, HSW_EM4, bclk_m);
6918 +- azx_writew(chip, HSW_EM5, bclk_n);
6919 +-}
6920 +-
6921 + #if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO)
6922 + /*
6923 + * power management
6924 +@@ -958,7 +914,7 @@ static int azx_resume(struct device *dev)
6925 + if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
6926 + && hda->need_i915_power) {
6927 + snd_hdac_display_power(azx_bus(chip), true);
6928 +- haswell_set_bclk(hda);
6929 ++ snd_hdac_i915_set_bclk(azx_bus(chip));
6930 + }
6931 + if (chip->msi)
6932 + if (pci_enable_msi(pci) < 0)
6933 +@@ -1058,7 +1014,7 @@ static int azx_runtime_resume(struct device *dev)
6934 + bus = azx_bus(chip);
6935 + if (hda->need_i915_power) {
6936 + snd_hdac_display_power(bus, true);
6937 +- haswell_set_bclk(hda);
6938 ++ snd_hdac_i915_set_bclk(bus);
6939 + } else {
6940 + /* toggle codec wakeup bit for STATESTS read */
6941 + snd_hdac_set_codec_wakeup(bus, true);
6942 +@@ -1796,12 +1752,8 @@ static int azx_first_init(struct azx *chip)
6943 + /* initialize chip */
6944 + azx_init_pci(chip);
6945 +
6946 +- if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
6947 +- struct hda_intel *hda;
6948 +-
6949 +- hda = container_of(chip, struct hda_intel, chip);
6950 +- haswell_set_bclk(hda);
6951 +- }
6952 ++ if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
6953 ++ snd_hdac_i915_set_bclk(bus);
6954 +
6955 + hda_intel_init_chip(chip, (probe_only[dev] & 2) == 0);
6956 +
6957 +@@ -2232,6 +2184,9 @@ static const struct pci_device_id azx_ids[] = {
6958 + /* Broxton-P(Apollolake) */
6959 + { PCI_DEVICE(0x8086, 0x5a98),
6960 + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
6961 ++ /* Broxton-T */
6962 ++ { PCI_DEVICE(0x8086, 0x1a98),
6963 ++ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
6964 + /* Haswell */
6965 + { PCI_DEVICE(0x8086, 0x0a0c),
6966 + .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
6967 +diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
6968 +index a47e8ae0eb30..80bbadc83721 100644
6969 +--- a/sound/pci/hda/patch_cirrus.c
6970 ++++ b/sound/pci/hda/patch_cirrus.c
6971 +@@ -361,6 +361,7 @@ static int cs_parse_auto_config(struct hda_codec *codec)
6972 + {
6973 + struct cs_spec *spec = codec->spec;
6974 + int err;
6975 ++ int i;
6976 +
6977 + err = snd_hda_parse_pin_defcfg(codec, &spec->gen.autocfg, NULL, 0);
6978 + if (err < 0)
6979 +@@ -370,6 +371,19 @@ static int cs_parse_auto_config(struct hda_codec *codec)
6980 + if (err < 0)
6981 + return err;
6982 +
6983 ++ /* keep the ADCs powered up when it's dynamically switchable */
6984 ++ if (spec->gen.dyn_adc_switch) {
6985 ++ unsigned int done = 0;
6986 ++ for (i = 0; i < spec->gen.input_mux.num_items; i++) {
6987 ++ int idx = spec->gen.dyn_adc_idx[i];
6988 ++ if (done & (1 << idx))
6989 ++ continue;
6990 ++ snd_hda_gen_fix_pin_power(codec,
6991 ++ spec->gen.adc_nids[idx]);
6992 ++ done |= 1 << idx;
6993 ++ }
6994 ++ }
6995 ++
6996 + return 0;
6997 + }
6998 +
6999 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
7000 +index 0c9585602bf3..c98e404afbe0 100644
7001 +--- a/sound/pci/hda/patch_hdmi.c
7002 ++++ b/sound/pci/hda/patch_hdmi.c
7003 +@@ -2452,6 +2452,7 @@ static void intel_pin_eld_notify(void *audio_ptr, int port)
7004 + if (atomic_read(&(codec)->core.in_pm))
7005 + return;
7006 +
7007 ++ snd_hdac_i915_set_bclk(&codec->bus->core);
7008 + check_presence_and_report(codec, pin_nid);
7009 + }
7010 +
7011 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
7012 +index 1402ba954b3d..ac4490a96863 100644
7013 +--- a/sound/pci/hda/patch_realtek.c
7014 ++++ b/sound/pci/hda/patch_realtek.c
7015 +@@ -5449,6 +5449,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
7016 + SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
7017 + SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
7018 + SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
7019 ++ SND_PCI_QUIRK(0x1028, 0x0669, "Dell Optiplex 9020m", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
7020 + SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK),
7021 + SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
7022 + SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
7023 +@@ -5583,6 +5584,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
7024 + SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
7025 + SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
7026 + SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
7027 ++ SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
7028 + SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
7029 + SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
7030 + SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
7031 +diff --git a/sound/pci/pcxhr/pcxhr_core.c b/sound/pci/pcxhr/pcxhr_core.c
7032 +index c5194f5b150a..d7e71f309299 100644
7033 +--- a/sound/pci/pcxhr/pcxhr_core.c
7034 ++++ b/sound/pci/pcxhr/pcxhr_core.c
7035 +@@ -1341,5 +1341,6 @@ irqreturn_t pcxhr_threaded_irq(int irq, void *dev_id)
7036 + }
7037 +
7038 + pcxhr_msg_thread(mgr);
7039 ++ mutex_unlock(&mgr->lock);
7040 + return IRQ_HANDLED;
7041 + }
7042 +diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
7043 +index 11d032cdc658..48dbb2fdeb09 100644
7044 +--- a/sound/soc/codecs/rt5640.c
7045 ++++ b/sound/soc/codecs/rt5640.c
7046 +@@ -359,7 +359,7 @@ static const DECLARE_TLV_DB_RANGE(bst_tlv,
7047 +
7048 + /* Interface data select */
7049 + static const char * const rt5640_data_select[] = {
7050 +- "Normal", "left copy to right", "right copy to left", "Swap"};
7051 ++ "Normal", "Swap", "left copy to right", "right copy to left"};
7052 +
7053 + static SOC_ENUM_SINGLE_DECL(rt5640_if1_dac_enum, RT5640_DIG_INF_DATA,
7054 + RT5640_IF1_DAC_SEL_SFT, rt5640_data_select);
7055 +diff --git a/sound/soc/codecs/rt5640.h b/sound/soc/codecs/rt5640.h
7056 +index 83a7150ddc24..f84231e7d1dd 100644
7057 +--- a/sound/soc/codecs/rt5640.h
7058 ++++ b/sound/soc/codecs/rt5640.h
7059 +@@ -442,39 +442,39 @@
7060 + #define RT5640_IF1_DAC_SEL_MASK (0x3 << 14)
7061 + #define RT5640_IF1_DAC_SEL_SFT 14
7062 + #define RT5640_IF1_DAC_SEL_NOR (0x0 << 14)
7063 +-#define RT5640_IF1_DAC_SEL_L2R (0x1 << 14)
7064 +-#define RT5640_IF1_DAC_SEL_R2L (0x2 << 14)
7065 +-#define RT5640_IF1_DAC_SEL_SWAP (0x3 << 14)
7066 ++#define RT5640_IF1_DAC_SEL_SWAP (0x1 << 14)
7067 ++#define RT5640_IF1_DAC_SEL_L2R (0x2 << 14)
7068 ++#define RT5640_IF1_DAC_SEL_R2L (0x3 << 14)
7069 + #define RT5640_IF1_ADC_SEL_MASK (0x3 << 12)
7070 + #define RT5640_IF1_ADC_SEL_SFT 12
7071 + #define RT5640_IF1_ADC_SEL_NOR (0x0 << 12)
7072 +-#define RT5640_IF1_ADC_SEL_L2R (0x1 << 12)
7073 +-#define RT5640_IF1_ADC_SEL_R2L (0x2 << 12)
7074 +-#define RT5640_IF1_ADC_SEL_SWAP (0x3 << 12)
7075 ++#define RT5640_IF1_ADC_SEL_SWAP (0x1 << 12)
7076 ++#define RT5640_IF1_ADC_SEL_L2R (0x2 << 12)
7077 ++#define RT5640_IF1_ADC_SEL_R2L (0x3 << 12)
7078 + #define RT5640_IF2_DAC_SEL_MASK (0x3 << 10)
7079 + #define RT5640_IF2_DAC_SEL_SFT 10
7080 + #define RT5640_IF2_DAC_SEL_NOR (0x0 << 10)
7081 +-#define RT5640_IF2_DAC_SEL_L2R (0x1 << 10)
7082 +-#define RT5640_IF2_DAC_SEL_R2L (0x2 << 10)
7083 +-#define RT5640_IF2_DAC_SEL_SWAP (0x3 << 10)
7084 ++#define RT5640_IF2_DAC_SEL_SWAP (0x1 << 10)
7085 ++#define RT5640_IF2_DAC_SEL_L2R (0x2 << 10)
7086 ++#define RT5640_IF2_DAC_SEL_R2L (0x3 << 10)
7087 + #define RT5640_IF2_ADC_SEL_MASK (0x3 << 8)
7088 + #define RT5640_IF2_ADC_SEL_SFT 8
7089 + #define RT5640_IF2_ADC_SEL_NOR (0x0 << 8)
7090 +-#define RT5640_IF2_ADC_SEL_L2R (0x1 << 8)
7091 +-#define RT5640_IF2_ADC_SEL_R2L (0x2 << 8)
7092 +-#define RT5640_IF2_ADC_SEL_SWAP (0x3 << 8)
7093 ++#define RT5640_IF2_ADC_SEL_SWAP (0x1 << 8)
7094 ++#define RT5640_IF2_ADC_SEL_L2R (0x2 << 8)
7095 ++#define RT5640_IF2_ADC_SEL_R2L (0x3 << 8)
7096 + #define RT5640_IF3_DAC_SEL_MASK (0x3 << 6)
7097 + #define RT5640_IF3_DAC_SEL_SFT 6
7098 + #define RT5640_IF3_DAC_SEL_NOR (0x0 << 6)
7099 +-#define RT5640_IF3_DAC_SEL_L2R (0x1 << 6)
7100 +-#define RT5640_IF3_DAC_SEL_R2L (0x2 << 6)
7101 +-#define RT5640_IF3_DAC_SEL_SWAP (0x3 << 6)
7102 ++#define RT5640_IF3_DAC_SEL_SWAP (0x1 << 6)
7103 ++#define RT5640_IF3_DAC_SEL_L2R (0x2 << 6)
7104 ++#define RT5640_IF3_DAC_SEL_R2L (0x3 << 6)
7105 + #define RT5640_IF3_ADC_SEL_MASK (0x3 << 4)
7106 + #define RT5640_IF3_ADC_SEL_SFT 4
7107 + #define RT5640_IF3_ADC_SEL_NOR (0x0 << 4)
7108 +-#define RT5640_IF3_ADC_SEL_L2R (0x1 << 4)
7109 +-#define RT5640_IF3_ADC_SEL_R2L (0x2 << 4)
7110 +-#define RT5640_IF3_ADC_SEL_SWAP (0x3 << 4)
7111 ++#define RT5640_IF3_ADC_SEL_SWAP (0x1 << 4)
7112 ++#define RT5640_IF3_ADC_SEL_L2R (0x2 << 4)
7113 ++#define RT5640_IF3_ADC_SEL_R2L (0x3 << 4)
7114 +
7115 + /* REC Left Mixer Control 1 (0x3b) */
7116 + #define RT5640_G_HP_L_RM_L_MASK (0x7 << 13)
7117 +diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
7118 +index e619d5651b09..080c78e88e10 100644
7119 +--- a/sound/soc/codecs/ssm4567.c
7120 ++++ b/sound/soc/codecs/ssm4567.c
7121 +@@ -352,6 +352,11 @@ static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable)
7122 + regcache_cache_only(ssm4567->regmap, !enable);
7123 +
7124 + if (enable) {
7125 ++ ret = regmap_write(ssm4567->regmap, SSM4567_REG_SOFT_RESET,
7126 ++ 0x00);
7127 ++ if (ret)
7128 ++ return ret;
7129 ++
7130 + ret = regmap_update_bits(ssm4567->regmap,
7131 + SSM4567_REG_POWER_CTRL,
7132 + SSM4567_POWER_SPWDN, 0x00);
7133 +diff --git a/sound/soc/samsung/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c
7134 +index df65c5b494b1..b6ab3fc5789e 100644
7135 +--- a/sound/soc/samsung/s3c-i2s-v2.c
7136 ++++ b/sound/soc/samsung/s3c-i2s-v2.c
7137 +@@ -709,7 +709,7 @@ static int s3c2412_i2s_resume(struct snd_soc_dai *dai)
7138 + #endif
7139 +
7140 + int s3c_i2sv2_register_component(struct device *dev, int id,
7141 +- struct snd_soc_component_driver *cmp_drv,
7142 ++ const struct snd_soc_component_driver *cmp_drv,
7143 + struct snd_soc_dai_driver *dai_drv)
7144 + {
7145 + struct snd_soc_dai_ops *ops = (struct snd_soc_dai_ops *)dai_drv->ops;
7146 +diff --git a/sound/soc/samsung/s3c-i2s-v2.h b/sound/soc/samsung/s3c-i2s-v2.h
7147 +index 90abab364b49..d0684145ed1f 100644
7148 +--- a/sound/soc/samsung/s3c-i2s-v2.h
7149 ++++ b/sound/soc/samsung/s3c-i2s-v2.h
7150 +@@ -101,7 +101,7 @@ extern int s3c_i2sv2_probe(struct snd_soc_dai *dai,
7151 + * soc core.
7152 + */
7153 + extern int s3c_i2sv2_register_component(struct device *dev, int id,
7154 +- struct snd_soc_component_driver *cmp_drv,
7155 ++ const struct snd_soc_component_driver *cmp_drv,
7156 + struct snd_soc_dai_driver *dai_drv);
7157 +
7158 + #endif /* __SND_SOC_S3C24XX_S3C_I2SV2_I2S_H */
7159 +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
7160 +index 581175a51ecf..5e811dc02fb9 100644
7161 +--- a/sound/soc/soc-dapm.c
7162 ++++ b/sound/soc/soc-dapm.c
7163 +@@ -2188,6 +2188,13 @@ static ssize_t dapm_widget_show_component(struct snd_soc_component *cmpnt,
7164 + int count = 0;
7165 + char *state = "not set";
7166 +
7167 ++ /* card won't be set for the dummy component, as a spot fix
7168 ++ * we're checking for that case specifically here but in future
7169 ++ * we will ensure that the dummy component looks like others.
7170 ++ */
7171 ++ if (!cmpnt->card)
7172 ++ return 0;
7173 ++
7174 + list_for_each_entry(w, &cmpnt->card->widgets, list) {
7175 + if (w->dapm != dapm)
7176 + continue;
7177 +diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
7178 +index 52ef7a9d50aa..14d9e8ffaff7 100644
7179 +--- a/tools/perf/Documentation/perf-stat.txt
7180 ++++ b/tools/perf/Documentation/perf-stat.txt
7181 +@@ -69,6 +69,14 @@ report::
7182 + --scale::
7183 + scale/normalize counter values
7184 +
7185 ++-d::
7186 ++--detailed::
7187 ++ print more detailed statistics, can be specified up to 3 times
7188 ++
7189 ++ -d: detailed events, L1 and LLC data cache
7190 ++ -d -d: more detailed events, dTLB and iTLB events
7191 ++ -d -d -d: very detailed events, adding prefetch events
7192 ++
7193 + -r::
7194 + --repeat=<n>::
7195 + repeat command and print average + stddev (max: 100). 0 means forever.
7196 +diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
7197 +index 08c09ad755d2..7bb47424bc49 100644
7198 +--- a/tools/perf/ui/browsers/hists.c
7199 ++++ b/tools/perf/ui/browsers/hists.c
7200 +@@ -302,7 +302,7 @@ static void callchain_node__init_have_children(struct callchain_node *node,
7201 + chain = list_entry(node->val.next, struct callchain_list, list);
7202 + chain->has_children = has_sibling;
7203 +
7204 +- if (node->val.next != node->val.prev) {
7205 ++ if (!list_empty(&node->val)) {
7206 + chain = list_entry(node->val.prev, struct callchain_list, list);
7207 + chain->has_children = !RB_EMPTY_ROOT(&node->rb_root);
7208 + }
7209 +@@ -844,7 +844,7 @@ next:
7210 + return row - first_row;
7211 + }
7212 +
7213 +-static int hist_browser__show_callchain(struct hist_browser *browser,
7214 ++static int hist_browser__show_callchain_graph(struct hist_browser *browser,
7215 + struct rb_root *root, int level,
7216 + unsigned short row, u64 total,
7217 + print_callchain_entry_fn print,
7218 +@@ -898,7 +898,7 @@ static int hist_browser__show_callchain(struct hist_browser *browser,
7219 + else
7220 + new_total = total;
7221 +
7222 +- row += hist_browser__show_callchain(browser, &child->rb_root,
7223 ++ row += hist_browser__show_callchain_graph(browser, &child->rb_root,
7224 + new_level, row, new_total,
7225 + print, arg, is_output_full);
7226 + }
7227 +@@ -910,6 +910,43 @@ out:
7228 + return row - first_row;
7229 + }
7230 +
7231 ++static int hist_browser__show_callchain(struct hist_browser *browser,
7232 ++ struct hist_entry *entry, int level,
7233 ++ unsigned short row,
7234 ++ print_callchain_entry_fn print,
7235 ++ struct callchain_print_arg *arg,
7236 ++ check_output_full_fn is_output_full)
7237 ++{
7238 ++ u64 total = hists__total_period(entry->hists);
7239 ++ int printed;
7240 ++
7241 ++ if (callchain_param.mode == CHAIN_GRAPH_REL) {
7242 ++ if (symbol_conf.cumulate_callchain)
7243 ++ total = entry->stat_acc->period;
7244 ++ else
7245 ++ total = entry->stat.period;
7246 ++ }
7247 ++
7248 ++ if (callchain_param.mode == CHAIN_FLAT) {
7249 ++ printed = hist_browser__show_callchain_flat(browser,
7250 ++ &entry->sorted_chain, row, total,
7251 ++ print, arg, is_output_full);
7252 ++ } else if (callchain_param.mode == CHAIN_FOLDED) {
7253 ++ printed = hist_browser__show_callchain_folded(browser,
7254 ++ &entry->sorted_chain, row, total,
7255 ++ print, arg, is_output_full);
7256 ++ } else {
7257 ++ printed = hist_browser__show_callchain_graph(browser,
7258 ++ &entry->sorted_chain, level, row, total,
7259 ++ print, arg, is_output_full);
7260 ++ }
7261 ++
7262 ++ if (arg->is_current_entry)
7263 ++ browser->he_selection = entry;
7264 ++
7265 ++ return printed;
7266 ++}
7267 ++
7268 + struct hpp_arg {
7269 + struct ui_browser *b;
7270 + char folded_sign;
7271 +@@ -1084,38 +1121,14 @@ static int hist_browser__show_entry(struct hist_browser *browser,
7272 + --row_offset;
7273 +
7274 + if (folded_sign == '-' && row != browser->b.rows) {
7275 +- u64 total = hists__total_period(entry->hists);
7276 + struct callchain_print_arg arg = {
7277 + .row_offset = row_offset,
7278 + .is_current_entry = current_entry,
7279 + };
7280 +
7281 +- if (callchain_param.mode == CHAIN_GRAPH_REL) {
7282 +- if (symbol_conf.cumulate_callchain)
7283 +- total = entry->stat_acc->period;
7284 +- else
7285 +- total = entry->stat.period;
7286 +- }
7287 +-
7288 +- if (callchain_param.mode == CHAIN_FLAT) {
7289 +- printed += hist_browser__show_callchain_flat(browser,
7290 +- &entry->sorted_chain, row, total,
7291 ++ printed += hist_browser__show_callchain(browser, entry, 1, row,
7292 + hist_browser__show_callchain_entry, &arg,
7293 + hist_browser__check_output_full);
7294 +- } else if (callchain_param.mode == CHAIN_FOLDED) {
7295 +- printed += hist_browser__show_callchain_folded(browser,
7296 +- &entry->sorted_chain, row, total,
7297 +- hist_browser__show_callchain_entry, &arg,
7298 +- hist_browser__check_output_full);
7299 +- } else {
7300 +- printed += hist_browser__show_callchain(browser,
7301 +- &entry->sorted_chain, 1, row, total,
7302 +- hist_browser__show_callchain_entry, &arg,
7303 +- hist_browser__check_output_full);
7304 +- }
7305 +-
7306 +- if (arg.is_current_entry)
7307 +- browser->he_selection = entry;
7308 + }
7309 +
7310 + return printed;
7311 +@@ -1380,15 +1393,11 @@ do_offset:
7312 + static int hist_browser__fprintf_callchain(struct hist_browser *browser,
7313 + struct hist_entry *he, FILE *fp)
7314 + {
7315 +- u64 total = hists__total_period(he->hists);
7316 + struct callchain_print_arg arg = {
7317 + .fp = fp,
7318 + };
7319 +
7320 +- if (symbol_conf.cumulate_callchain)
7321 +- total = he->stat_acc->period;
7322 +-
7323 +- hist_browser__show_callchain(browser, &he->sorted_chain, 1, 0, total,
7324 ++ hist_browser__show_callchain(browser, he, 1, 0,
7325 + hist_browser__fprintf_callchain_entry, &arg,
7326 + hist_browser__check_dump_full);
7327 + return arg.printed;
7328 +@@ -2320,10 +2329,12 @@ skip_annotation:
7329 + *
7330 + * See hist_browser__show_entry.
7331 + */
7332 +- nr_options += add_script_opt(browser,
7333 +- &actions[nr_options],
7334 +- &options[nr_options],
7335 +- NULL, browser->selection->sym);
7336 ++ if (sort__has_sym && browser->selection->sym) {
7337 ++ nr_options += add_script_opt(browser,
7338 ++ &actions[nr_options],
7339 ++ &options[nr_options],
7340 ++ NULL, browser->selection->sym);
7341 ++ }
7342 + }
7343 + nr_options += add_script_opt(browser, &actions[nr_options],
7344 + &options[nr_options], NULL, NULL);
7345 +diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
7346 +index 85155e91b61b..7bad5c3fa7b7 100644
7347 +--- a/tools/perf/util/event.c
7348 ++++ b/tools/perf/util/event.c
7349 +@@ -282,7 +282,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
7350 + strcpy(execname, "");
7351 +
7352 + /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
7353 +- n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n",
7354 ++ n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
7355 + &event->mmap2.start, &event->mmap2.len, prot,
7356 + &event->mmap2.pgoff, &event->mmap2.maj,
7357 + &event->mmap2.min,
7358 +diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
7359 +index d81f13de2476..a7eb0eae9938 100644
7360 +--- a/tools/perf/util/evlist.c
7361 ++++ b/tools/perf/util/evlist.c
7362 +@@ -1181,12 +1181,12 @@ void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
7363 + */
7364 + if (cpus != evlist->cpus) {
7365 + cpu_map__put(evlist->cpus);
7366 +- evlist->cpus = cpus;
7367 ++ evlist->cpus = cpu_map__get(cpus);
7368 + }
7369 +
7370 + if (threads != evlist->threads) {
7371 + thread_map__put(evlist->threads);
7372 +- evlist->threads = threads;
7373 ++ evlist->threads = thread_map__get(threads);
7374 + }
7375 +
7376 + perf_evlist__propagate_maps(evlist);
7377 +diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
7378 +index 8e75434bd01c..4d8037a3d8a4 100644
7379 +--- a/tools/perf/util/evsel.h
7380 ++++ b/tools/perf/util/evsel.h
7381 +@@ -93,10 +93,8 @@ struct perf_evsel {
7382 + const char *unit;
7383 + struct event_format *tp_format;
7384 + off_t id_offset;
7385 +- union {
7386 +- void *priv;
7387 +- u64 db_id;
7388 +- };
7389 ++ void *priv;
7390 ++ u64 db_id;
7391 + struct cgroup_sel *cgrp;
7392 + void *handler;
7393 + struct cpu_map *cpus;
7394 +diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
7395 +index 05d815851be1..4e1590ba8902 100644
7396 +--- a/tools/perf/util/intel-pt.c
7397 ++++ b/tools/perf/util/intel-pt.c
7398 +@@ -1127,7 +1127,7 @@ static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
7399 + pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n",
7400 + ret);
7401 +
7402 +- if (pt->synth_opts.callchain)
7403 ++ if (pt->synth_opts.last_branch)
7404 + intel_pt_reset_last_branch_rb(ptq);
7405 +
7406 + return ret;
7407 +diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
7408 +index ea6064696fe4..a7b9022b5c8f 100644
7409 +--- a/virt/kvm/arm/arch_timer.c
7410 ++++ b/virt/kvm/arm/arch_timer.c
7411 +@@ -86,6 +86,8 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
7412 + vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
7413 + vcpu->arch.timer_cpu.armed = false;
7414 +
7415 ++ WARN_ON(!kvm_timer_should_fire(vcpu));
7416 ++
7417 + /*
7418 + * If the vcpu is blocked we want to wake it up so that it will see
7419 + * the timer has expired when entering the guest.
7420 +@@ -93,10 +95,46 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
7421 + kvm_vcpu_kick(vcpu);
7422 + }
7423 +
7424 ++static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu)
7425 ++{
7426 ++ cycle_t cval, now;
7427 ++
7428 ++ cval = vcpu->arch.timer_cpu.cntv_cval;
7429 ++ now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
7430 ++
7431 ++ if (now < cval) {
7432 ++ u64 ns;
7433 ++
7434 ++ ns = cyclecounter_cyc2ns(timecounter->cc,
7435 ++ cval - now,
7436 ++ timecounter->mask,
7437 ++ &timecounter->frac);
7438 ++ return ns;
7439 ++ }
7440 ++
7441 ++ return 0;
7442 ++}
7443 ++
7444 + static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
7445 + {
7446 + struct arch_timer_cpu *timer;
7447 ++ struct kvm_vcpu *vcpu;
7448 ++ u64 ns;
7449 ++
7450 + timer = container_of(hrt, struct arch_timer_cpu, timer);
7451 ++ vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
7452 ++
7453 ++ /*
7454 ++ * Check that the timer has really expired from the guest's
7455 ++ * PoV (NTP on the host may have forced it to expire
7456 ++ * early). If we should have slept longer, restart it.
7457 ++ */
7458 ++ ns = kvm_timer_compute_delta(vcpu);
7459 ++ if (unlikely(ns)) {
7460 ++ hrtimer_forward_now(hrt, ns_to_ktime(ns));
7461 ++ return HRTIMER_RESTART;
7462 ++ }
7463 ++
7464 + queue_work(wqueue, &timer->expired);
7465 + return HRTIMER_NORESTART;
7466 + }
7467 +@@ -170,8 +208,6 @@ static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
7468 + void kvm_timer_schedule(struct kvm_vcpu *vcpu)
7469 + {
7470 + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
7471 +- u64 ns;
7472 +- cycle_t cval, now;
7473 +
7474 + BUG_ON(timer_is_armed(timer));
7475 +
7476 +@@ -191,14 +227,7 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu)
7477 + return;
7478 +
7479 + /* The timer has not yet expired, schedule a background timer */
7480 +- cval = timer->cntv_cval;
7481 +- now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
7482 +-
7483 +- ns = cyclecounter_cyc2ns(timecounter->cc,
7484 +- cval - now,
7485 +- timecounter->mask,
7486 +- &timecounter->frac);
7487 +- timer_arm(timer, ns);
7488 ++ timer_arm(timer, kvm_timer_compute_delta(vcpu));
7489 + }
7490 +
7491 + void kvm_timer_unschedule(struct kvm_vcpu *vcpu)