Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Wed, 04 May 2016 23:51:41
Message-Id: 1462405886.767fef5527df9042fe8c078084c89159c991d530.mpagano@gentoo
1 commit: 767fef5527df9042fe8c078084c89159c991d530
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed May 4 23:51:26 2016 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed May 4 23:51:26 2016 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=767fef55
7
8 Linux patch 4.4.9
9
10 0000_README | 4 +
11 1008_linux-4.4.9.patch | 6094 ++++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 6098 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 5596308..18110fa 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -75,6 +75,10 @@ Patch: 1007_linux-4.4.8.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.4.8
21
22 +Patch: 1008_linux-4.4.9.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.4.9
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1008_linux-4.4.9.patch b/1008_linux-4.4.9.patch
31 new file mode 100644
32 index 0000000..57b3baa
33 --- /dev/null
34 +++ b/1008_linux-4.4.9.patch
35 @@ -0,0 +1,6094 @@
36 +diff --git a/Makefile b/Makefile
37 +index 1928fcd539cc..0722cdf52152 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 4
43 +-SUBLEVEL = 8
44 ++SUBLEVEL = 9
45 + EXTRAVERSION =
46 + NAME = Blurry Fish Butt
47 +
48 +diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
49 +index 47954ed990f8..00707aac72fc 100644
50 +--- a/arch/arm/boot/dts/am43x-epos-evm.dts
51 ++++ b/arch/arm/boot/dts/am43x-epos-evm.dts
52 +@@ -792,3 +792,8 @@
53 + tx-num-evt = <32>;
54 + rx-num-evt = <32>;
55 + };
56 ++
57 ++&synctimer_32kclk {
58 ++ assigned-clocks = <&mux_synctimer32k_ck>;
59 ++ assigned-clock-parents = <&clkdiv32k_ick>;
60 ++};
61 +diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
62 +index 7ccce7529b0c..cc952cf8ec30 100644
63 +--- a/arch/arm/boot/dts/armada-375.dtsi
64 ++++ b/arch/arm/boot/dts/armada-375.dtsi
65 +@@ -529,7 +529,7 @@
66 + };
67 +
68 + sata@a0000 {
69 +- compatible = "marvell,orion-sata";
70 ++ compatible = "marvell,armada-370-sata";
71 + reg = <0xa0000 0x5000>;
72 + interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
73 + clocks = <&gateclk 14>, <&gateclk 20>;
74 +diff --git a/arch/arm/boot/dts/armada-385-linksys.dtsi b/arch/arm/boot/dts/armada-385-linksys.dtsi
75 +index 3710755c6d76..85d2c377c332 100644
76 +--- a/arch/arm/boot/dts/armada-385-linksys.dtsi
77 ++++ b/arch/arm/boot/dts/armada-385-linksys.dtsi
78 +@@ -117,7 +117,7 @@
79 + };
80 +
81 + /* USB part of the eSATA/USB 2.0 port */
82 +- usb@50000 {
83 ++ usb@58000 {
84 + status = "okay";
85 + };
86 +
87 +diff --git a/arch/arm/boot/dts/pxa3xx.dtsi b/arch/arm/boot/dts/pxa3xx.dtsi
88 +index cf6998a0804d..564341af7e97 100644
89 +--- a/arch/arm/boot/dts/pxa3xx.dtsi
90 ++++ b/arch/arm/boot/dts/pxa3xx.dtsi
91 +@@ -30,7 +30,7 @@
92 + reg = <0x43100000 90>;
93 + interrupts = <45>;
94 + clocks = <&clks CLK_NAND>;
95 +- dmas = <&pdma 97>;
96 ++ dmas = <&pdma 97 3>;
97 + dma-names = "data";
98 + #address-cells = <1>;
99 + #size-cells = <1>;
100 +diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
101 +index 3a10f1a8317a..bfd8bb371477 100644
102 +--- a/arch/arm/mach-exynos/Kconfig
103 ++++ b/arch/arm/mach-exynos/Kconfig
104 +@@ -26,6 +26,7 @@ menuconfig ARCH_EXYNOS
105 + select S5P_DEV_MFC
106 + select SRAM
107 + select THERMAL
108 ++ select THERMAL_OF
109 + select MFD_SYSCON
110 + help
111 + Support for SAMSUNG EXYNOS SoCs (EXYNOS4/5)
112 +diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
113 +index aa7b379e2661..2a3db0bd9e15 100644
114 +--- a/arch/arm/mach-omap2/cpuidle34xx.c
115 ++++ b/arch/arm/mach-omap2/cpuidle34xx.c
116 +@@ -34,6 +34,7 @@
117 + #include "pm.h"
118 + #include "control.h"
119 + #include "common.h"
120 ++#include "soc.h"
121 +
122 + /* Mach specific information to be recorded in the C-state driver_data */
123 + struct omap3_idle_statedata {
124 +@@ -315,6 +316,69 @@ static struct cpuidle_driver omap3_idle_driver = {
125 + .safe_state_index = 0,
126 + };
127 +
128 ++/*
129 ++ * Numbers based on measurements made in October 2009 for PM optimized kernel
130 ++ * with CPU freq enabled on device Nokia N900. Assumes OPP2 (main idle OPP,
131 ++ * and worst case latencies).
132 ++ */
133 ++static struct cpuidle_driver omap3430_idle_driver = {
134 ++ .name = "omap3430_idle",
135 ++ .owner = THIS_MODULE,
136 ++ .states = {
137 ++ {
138 ++ .enter = omap3_enter_idle_bm,
139 ++ .exit_latency = 110 + 162,
140 ++ .target_residency = 5,
141 ++ .name = "C1",
142 ++ .desc = "MPU ON + CORE ON",
143 ++ },
144 ++ {
145 ++ .enter = omap3_enter_idle_bm,
146 ++ .exit_latency = 106 + 180,
147 ++ .target_residency = 309,
148 ++ .name = "C2",
149 ++ .desc = "MPU ON + CORE ON",
150 ++ },
151 ++ {
152 ++ .enter = omap3_enter_idle_bm,
153 ++ .exit_latency = 107 + 410,
154 ++ .target_residency = 46057,
155 ++ .name = "C3",
156 ++ .desc = "MPU RET + CORE ON",
157 ++ },
158 ++ {
159 ++ .enter = omap3_enter_idle_bm,
160 ++ .exit_latency = 121 + 3374,
161 ++ .target_residency = 46057,
162 ++ .name = "C4",
163 ++ .desc = "MPU OFF + CORE ON",
164 ++ },
165 ++ {
166 ++ .enter = omap3_enter_idle_bm,
167 ++ .exit_latency = 855 + 1146,
168 ++ .target_residency = 46057,
169 ++ .name = "C5",
170 ++ .desc = "MPU RET + CORE RET",
171 ++ },
172 ++ {
173 ++ .enter = omap3_enter_idle_bm,
174 ++ .exit_latency = 7580 + 4134,
175 ++ .target_residency = 484329,
176 ++ .name = "C6",
177 ++ .desc = "MPU OFF + CORE RET",
178 ++ },
179 ++ {
180 ++ .enter = omap3_enter_idle_bm,
181 ++ .exit_latency = 7505 + 15274,
182 ++ .target_residency = 484329,
183 ++ .name = "C7",
184 ++ .desc = "MPU OFF + CORE OFF",
185 ++ },
186 ++ },
187 ++ .state_count = ARRAY_SIZE(omap3_idle_data),
188 ++ .safe_state_index = 0,
189 ++};
190 ++
191 + /* Public functions */
192 +
193 + /**
194 +@@ -333,5 +397,8 @@ int __init omap3_idle_init(void)
195 + if (!mpu_pd || !core_pd || !per_pd || !cam_pd)
196 + return -ENODEV;
197 +
198 +- return cpuidle_register(&omap3_idle_driver, NULL);
199 ++ if (cpu_is_omap3430())
200 ++ return cpuidle_register(&omap3430_idle_driver, NULL);
201 ++ else
202 ++ return cpuidle_register(&omap3_idle_driver, NULL);
203 + }
204 +diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
205 +index 3eaeaca5da05..3a911d8dea8b 100644
206 +--- a/arch/arm/mach-omap2/io.c
207 ++++ b/arch/arm/mach-omap2/io.c
208 +@@ -368,6 +368,7 @@ void __init omap5_map_io(void)
209 + void __init dra7xx_map_io(void)
210 + {
211 + iotable_init(dra7xx_io_desc, ARRAY_SIZE(dra7xx_io_desc));
212 ++ omap_barriers_init();
213 + }
214 + #endif
215 + /*
216 +diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
217 +index 8e0bd5939e5a..147c90e70b2e 100644
218 +--- a/arch/arm/mach-omap2/omap_hwmod.c
219 ++++ b/arch/arm/mach-omap2/omap_hwmod.c
220 +@@ -1416,9 +1416,7 @@ static void _enable_sysc(struct omap_hwmod *oh)
221 + (sf & SYSC_HAS_CLOCKACTIVITY))
222 + _set_clockactivity(oh, oh->class->sysc->clockact, &v);
223 +
224 +- /* If the cached value is the same as the new value, skip the write */
225 +- if (oh->_sysc_cache != v)
226 +- _write_sysconfig(v, oh);
227 ++ _write_sysconfig(v, oh);
228 +
229 + /*
230 + * Set the autoidle bit only after setting the smartidle bit
231 +@@ -1481,7 +1479,9 @@ static void _idle_sysc(struct omap_hwmod *oh)
232 + _set_master_standbymode(oh, idlemode, &v);
233 + }
234 +
235 +- _write_sysconfig(v, oh);
236 ++ /* If the cached value is the same as the new value, skip the write */
237 ++ if (oh->_sysc_cache != v)
238 ++ _write_sysconfig(v, oh);
239 + }
240 +
241 + /**
242 +diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig
243 +index 9ab8932403e5..56e55fd37d13 100644
244 +--- a/arch/arm/mach-prima2/Kconfig
245 ++++ b/arch/arm/mach-prima2/Kconfig
246 +@@ -1,6 +1,7 @@
247 + menuconfig ARCH_SIRF
248 + bool "CSR SiRF" if ARCH_MULTI_V7
249 + select ARCH_HAS_RESET_CONTROLLER
250 ++ select RESET_CONTROLLER
251 + select ARCH_REQUIRE_GPIOLIB
252 + select GENERIC_IRQ_CHIP
253 + select NO_IOPORT_MAP
254 +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
255 +index eaa9cabf4066..c63868ae9a4a 100644
256 +--- a/arch/arm64/include/asm/pgtable.h
257 ++++ b/arch/arm64/include/asm/pgtable.h
258 +@@ -69,11 +69,11 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
259 + #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
260 + #define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
261 +
262 +-#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
263 +-#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
264 +-#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC))
265 +-#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_WT))
266 +-#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL))
267 ++#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
268 ++#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
269 ++#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
270 ++#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
271 ++#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
272 +
273 + #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
274 + #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
275 +@@ -83,7 +83,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
276 +
277 + #define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
278 + #define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
279 +-#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
280 ++#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
281 + #define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
282 + #define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
283 +
284 +@@ -155,6 +155,7 @@ extern struct page *empty_zero_page;
285 + #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
286 + #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
287 + #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
288 ++#define pte_user(pte) (!!(pte_val(pte) & PTE_USER))
289 +
290 + #ifdef CONFIG_ARM64_HW_AFDBM
291 + #define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
292 +@@ -165,8 +166,6 @@ extern struct page *empty_zero_page;
293 + #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
294 +
295 + #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
296 +-#define pte_valid_user(pte) \
297 +- ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
298 + #define pte_valid_not_user(pte) \
299 + ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
300 +
301 +@@ -264,13 +263,13 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
302 + static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
303 + pte_t *ptep, pte_t pte)
304 + {
305 +- if (pte_valid_user(pte)) {
306 +- if (!pte_special(pte) && pte_exec(pte))
307 +- __sync_icache_dcache(pte, addr);
308 ++ if (pte_present(pte)) {
309 + if (pte_sw_dirty(pte) && pte_write(pte))
310 + pte_val(pte) &= ~PTE_RDONLY;
311 + else
312 + pte_val(pte) |= PTE_RDONLY;
313 ++ if (pte_user(pte) && pte_exec(pte) && !pte_special(pte))
314 ++ __sync_icache_dcache(pte, addr);
315 + }
316 +
317 + /*
318 +@@ -641,6 +640,7 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
319 + * bits 0-1: present (must be zero)
320 + * bits 2-7: swap type
321 + * bits 8-57: swap offset
322 ++ * bit 58: PTE_PROT_NONE (must be zero)
323 + */
324 + #define __SWP_TYPE_SHIFT 2
325 + #define __SWP_TYPE_BITS 6
326 +diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
327 +index 43686043e297..2734c005da21 100644
328 +--- a/arch/powerpc/include/uapi/asm/cputable.h
329 ++++ b/arch/powerpc/include/uapi/asm/cputable.h
330 +@@ -31,6 +31,7 @@
331 + #define PPC_FEATURE_PSERIES_PERFMON_COMPAT \
332 + 0x00000040
333 +
334 ++/* Reserved - do not use 0x00000004 */
335 + #define PPC_FEATURE_TRUE_LE 0x00000002
336 + #define PPC_FEATURE_PPC_LE 0x00000001
337 +
338 +diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
339 +index 7030b035905d..a15fe1d4e84a 100644
340 +--- a/arch/powerpc/kernel/prom.c
341 ++++ b/arch/powerpc/kernel/prom.c
342 +@@ -148,23 +148,25 @@ static struct ibm_pa_feature {
343 + unsigned long cpu_features; /* CPU_FTR_xxx bit */
344 + unsigned long mmu_features; /* MMU_FTR_xxx bit */
345 + unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
346 ++ unsigned int cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */
347 + unsigned char pabyte; /* byte number in ibm,pa-features */
348 + unsigned char pabit; /* bit number (big-endian) */
349 + unsigned char invert; /* if 1, pa bit set => clear feature */
350 + } ibm_pa_features[] __initdata = {
351 +- {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0},
352 +- {0, 0, PPC_FEATURE_HAS_FPU, 0, 1, 0},
353 +- {CPU_FTR_CTRL, 0, 0, 0, 3, 0},
354 +- {CPU_FTR_NOEXECUTE, 0, 0, 0, 6, 0},
355 +- {CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1},
356 +- {0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
357 +- {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
358 ++ {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0, 0},
359 ++ {0, 0, PPC_FEATURE_HAS_FPU, 0, 0, 1, 0},
360 ++ {CPU_FTR_CTRL, 0, 0, 0, 0, 3, 0},
361 ++ {CPU_FTR_NOEXECUTE, 0, 0, 0, 0, 6, 0},
362 ++ {CPU_FTR_NODSISRALIGN, 0, 0, 0, 1, 1, 1},
363 ++ {0, MMU_FTR_CI_LARGE_PAGE, 0, 0, 1, 2, 0},
364 ++ {CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0},
365 + /*
366 +- * If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n),
367 +- * we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP
368 +- * which is 0 if the kernel doesn't support TM.
369 ++ * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
370 ++ * we don't want to turn on TM here, so we use the *_COMP versions
371 ++ * which are 0 if the kernel doesn't support TM.
372 + */
373 +- {CPU_FTR_TM_COMP, 0, 0, 22, 0, 0},
374 ++ {CPU_FTR_TM_COMP, 0, 0,
375 ++ PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
376 + };
377 +
378 + static void __init scan_features(unsigned long node, const unsigned char *ftrs,
379 +@@ -195,10 +197,12 @@ static void __init scan_features(unsigned long node, const unsigned char *ftrs,
380 + if (bit ^ fp->invert) {
381 + cur_cpu_spec->cpu_features |= fp->cpu_features;
382 + cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
383 ++ cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
384 + cur_cpu_spec->mmu_features |= fp->mmu_features;
385 + } else {
386 + cur_cpu_spec->cpu_features &= ~fp->cpu_features;
387 + cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
388 ++ cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
389 + cur_cpu_spec->mmu_features &= ~fp->mmu_features;
390 + }
391 + }
392 +diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
393 +index 2b2ced9dc00a..6dafabb6ae1a 100644
394 +--- a/arch/s390/include/asm/pci.h
395 ++++ b/arch/s390/include/asm/pci.h
396 +@@ -45,7 +45,8 @@ struct zpci_fmb {
397 + u64 rpcit_ops;
398 + u64 dma_rbytes;
399 + u64 dma_wbytes;
400 +-} __packed __aligned(64);
401 ++ u64 pad[2];
402 ++} __packed __aligned(128);
403 +
404 + enum zpci_state {
405 + ZPCI_FN_STATE_RESERVED,
406 +diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c
407 +index a841e9765bd6..8381c09d2870 100644
408 +--- a/arch/x86/crypto/sha-mb/sha1_mb.c
409 ++++ b/arch/x86/crypto/sha-mb/sha1_mb.c
410 +@@ -453,10 +453,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
411 +
412 + req = cast_mcryptd_ctx_to_req(req_ctx);
413 + if (irqs_disabled())
414 +- rctx->complete(&req->base, ret);
415 ++ req_ctx->complete(&req->base, ret);
416 + else {
417 + local_bh_disable();
418 +- rctx->complete(&req->base, ret);
419 ++ req_ctx->complete(&req->base, ret);
420 + local_bh_enable();
421 + }
422 + }
423 +diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
424 +index f8a29d2c97b0..e6a8613fbfb0 100644
425 +--- a/arch/x86/include/asm/hugetlb.h
426 ++++ b/arch/x86/include/asm/hugetlb.h
427 +@@ -4,6 +4,7 @@
428 + #include <asm/page.h>
429 + #include <asm-generic/hugetlb.h>
430 +
431 ++#define hugepages_supported() cpu_has_pse
432 +
433 + static inline int is_hugepage_only_range(struct mm_struct *mm,
434 + unsigned long addr,
435 +diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
436 +index 7af2505f20c2..df6b4eeac0bd 100644
437 +--- a/arch/x86/kernel/apic/vector.c
438 ++++ b/arch/x86/kernel/apic/vector.c
439 +@@ -254,7 +254,8 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
440 + struct irq_desc *desc;
441 + int cpu, vector;
442 +
443 +- BUG_ON(!data->cfg.vector);
444 ++ if (!data->cfg.vector)
445 ++ return;
446 +
447 + vector = data->cfg.vector;
448 + for_each_cpu_and(cpu, data->domain, cpu_online_mask)
449 +diff --git a/arch/x86/kernel/cpu/mcheck/mce-genpool.c b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
450 +index 0a850100c594..2658e2af74ec 100644
451 +--- a/arch/x86/kernel/cpu/mcheck/mce-genpool.c
452 ++++ b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
453 +@@ -29,7 +29,7 @@ static char gen_pool_buf[MCE_POOLSZ];
454 + void mce_gen_pool_process(void)
455 + {
456 + struct llist_node *head;
457 +- struct mce_evt_llist *node;
458 ++ struct mce_evt_llist *node, *tmp;
459 + struct mce *mce;
460 +
461 + head = llist_del_all(&mce_event_llist);
462 +@@ -37,7 +37,7 @@ void mce_gen_pool_process(void)
463 + return;
464 +
465 + head = llist_reverse_order(head);
466 +- llist_for_each_entry(node, head, llnode) {
467 ++ llist_for_each_entry_safe(node, tmp, head, llnode) {
468 + mce = &node->mce;
469 + atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
470 + gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node));
471 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
472 +index 7eb4ebd3ebea..605cea75eb0d 100644
473 +--- a/arch/x86/kvm/x86.c
474 ++++ b/arch/x86/kvm/x86.c
475 +@@ -697,7 +697,6 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
476 + if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
477 + return 1;
478 + }
479 +- kvm_put_guest_xcr0(vcpu);
480 + vcpu->arch.xcr0 = xcr0;
481 +
482 + if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
483 +@@ -6495,8 +6494,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
484 + kvm_x86_ops->prepare_guest_switch(vcpu);
485 + if (vcpu->fpu_active)
486 + kvm_load_guest_fpu(vcpu);
487 +- kvm_load_guest_xcr0(vcpu);
488 +-
489 + vcpu->mode = IN_GUEST_MODE;
490 +
491 + srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
492 +@@ -6519,6 +6516,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
493 + goto cancel_injection;
494 + }
495 +
496 ++ kvm_load_guest_xcr0(vcpu);
497 ++
498 + if (req_immediate_exit)
499 + smp_send_reschedule(vcpu->cpu);
500 +
501 +@@ -6568,6 +6567,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
502 + vcpu->mode = OUTSIDE_GUEST_MODE;
503 + smp_wmb();
504 +
505 ++ kvm_put_guest_xcr0(vcpu);
506 ++
507 + /* Interrupt is enabled by handle_external_intr() */
508 + kvm_x86_ops->handle_external_intr(vcpu);
509 +
510 +@@ -7215,7 +7216,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
511 + * and assume host would use all available bits.
512 + * Guest xcr0 would be loaded later.
513 + */
514 +- kvm_put_guest_xcr0(vcpu);
515 + vcpu->guest_fpu_loaded = 1;
516 + __kernel_fpu_begin();
517 + __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
518 +@@ -7224,8 +7224,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
519 +
520 + void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
521 + {
522 +- kvm_put_guest_xcr0(vcpu);
523 +-
524 + if (!vcpu->guest_fpu_loaded) {
525 + vcpu->fpu_counter = 0;
526 + return;
527 +diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
528 +index 637ab34ed632..ddb2244b06a1 100644
529 +--- a/arch/x86/mm/kmmio.c
530 ++++ b/arch/x86/mm/kmmio.c
531 +@@ -33,7 +33,7 @@
532 + struct kmmio_fault_page {
533 + struct list_head list;
534 + struct kmmio_fault_page *release_next;
535 +- unsigned long page; /* location of the fault page */
536 ++ unsigned long addr; /* the requested address */
537 + pteval_t old_presence; /* page presence prior to arming */
538 + bool armed;
539 +
540 +@@ -70,9 +70,16 @@ unsigned int kmmio_count;
541 + static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
542 + static LIST_HEAD(kmmio_probes);
543 +
544 +-static struct list_head *kmmio_page_list(unsigned long page)
545 ++static struct list_head *kmmio_page_list(unsigned long addr)
546 + {
547 +- return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
548 ++ unsigned int l;
549 ++ pte_t *pte = lookup_address(addr, &l);
550 ++
551 ++ if (!pte)
552 ++ return NULL;
553 ++ addr &= page_level_mask(l);
554 ++
555 ++ return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
556 + }
557 +
558 + /* Accessed per-cpu */
559 +@@ -98,15 +105,19 @@ static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
560 + }
561 +
562 + /* You must be holding RCU read lock. */
563 +-static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
564 ++static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
565 + {
566 + struct list_head *head;
567 + struct kmmio_fault_page *f;
568 ++ unsigned int l;
569 ++ pte_t *pte = lookup_address(addr, &l);
570 +
571 +- page &= PAGE_MASK;
572 +- head = kmmio_page_list(page);
573 ++ if (!pte)
574 ++ return NULL;
575 ++ addr &= page_level_mask(l);
576 ++ head = kmmio_page_list(addr);
577 + list_for_each_entry_rcu(f, head, list) {
578 +- if (f->page == page)
579 ++ if (f->addr == addr)
580 + return f;
581 + }
582 + return NULL;
583 +@@ -137,10 +148,10 @@ static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
584 + static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
585 + {
586 + unsigned int level;
587 +- pte_t *pte = lookup_address(f->page, &level);
588 ++ pte_t *pte = lookup_address(f->addr, &level);
589 +
590 + if (!pte) {
591 +- pr_err("no pte for page 0x%08lx\n", f->page);
592 ++ pr_err("no pte for addr 0x%08lx\n", f->addr);
593 + return -1;
594 + }
595 +
596 +@@ -156,7 +167,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
597 + return -1;
598 + }
599 +
600 +- __flush_tlb_one(f->page);
601 ++ __flush_tlb_one(f->addr);
602 + return 0;
603 + }
604 +
605 +@@ -176,12 +187,12 @@ static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
606 + int ret;
607 + WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
608 + if (f->armed) {
609 +- pr_warning("double-arm: page 0x%08lx, ref %d, old %d\n",
610 +- f->page, f->count, !!f->old_presence);
611 ++ pr_warning("double-arm: addr 0x%08lx, ref %d, old %d\n",
612 ++ f->addr, f->count, !!f->old_presence);
613 + }
614 + ret = clear_page_presence(f, true);
615 +- WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming 0x%08lx failed.\n"),
616 +- f->page);
617 ++ WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming at 0x%08lx failed.\n"),
618 ++ f->addr);
619 + f->armed = true;
620 + return ret;
621 + }
622 +@@ -191,7 +202,7 @@ static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
623 + {
624 + int ret = clear_page_presence(f, false);
625 + WARN_ONCE(ret < 0,
626 +- KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
627 ++ KERN_ERR "kmmio disarming at 0x%08lx failed.\n", f->addr);
628 + f->armed = false;
629 + }
630 +
631 +@@ -215,6 +226,12 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
632 + struct kmmio_context *ctx;
633 + struct kmmio_fault_page *faultpage;
634 + int ret = 0; /* default to fault not handled */
635 ++ unsigned long page_base = addr;
636 ++ unsigned int l;
637 ++ pte_t *pte = lookup_address(addr, &l);
638 ++ if (!pte)
639 ++ return -EINVAL;
640 ++ page_base &= page_level_mask(l);
641 +
642 + /*
643 + * Preemption is now disabled to prevent process switch during
644 +@@ -227,7 +244,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
645 + preempt_disable();
646 + rcu_read_lock();
647 +
648 +- faultpage = get_kmmio_fault_page(addr);
649 ++ faultpage = get_kmmio_fault_page(page_base);
650 + if (!faultpage) {
651 + /*
652 + * Either this page fault is not caused by kmmio, or
653 +@@ -239,7 +256,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
654 +
655 + ctx = &get_cpu_var(kmmio_ctx);
656 + if (ctx->active) {
657 +- if (addr == ctx->addr) {
658 ++ if (page_base == ctx->addr) {
659 + /*
660 + * A second fault on the same page means some other
661 + * condition needs handling by do_page_fault(), the
662 +@@ -267,9 +284,9 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
663 + ctx->active++;
664 +
665 + ctx->fpage = faultpage;
666 +- ctx->probe = get_kmmio_probe(addr);
667 ++ ctx->probe = get_kmmio_probe(page_base);
668 + ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
669 +- ctx->addr = addr;
670 ++ ctx->addr = page_base;
671 +
672 + if (ctx->probe && ctx->probe->pre_handler)
673 + ctx->probe->pre_handler(ctx->probe, regs, addr);
674 +@@ -354,12 +371,11 @@ out:
675 + }
676 +
677 + /* You must be holding kmmio_lock. */
678 +-static int add_kmmio_fault_page(unsigned long page)
679 ++static int add_kmmio_fault_page(unsigned long addr)
680 + {
681 + struct kmmio_fault_page *f;
682 +
683 +- page &= PAGE_MASK;
684 +- f = get_kmmio_fault_page(page);
685 ++ f = get_kmmio_fault_page(addr);
686 + if (f) {
687 + if (!f->count)
688 + arm_kmmio_fault_page(f);
689 +@@ -372,26 +388,25 @@ static int add_kmmio_fault_page(unsigned long page)
690 + return -1;
691 +
692 + f->count = 1;
693 +- f->page = page;
694 ++ f->addr = addr;
695 +
696 + if (arm_kmmio_fault_page(f)) {
697 + kfree(f);
698 + return -1;
699 + }
700 +
701 +- list_add_rcu(&f->list, kmmio_page_list(f->page));
702 ++ list_add_rcu(&f->list, kmmio_page_list(f->addr));
703 +
704 + return 0;
705 + }
706 +
707 + /* You must be holding kmmio_lock. */
708 +-static void release_kmmio_fault_page(unsigned long page,
709 ++static void release_kmmio_fault_page(unsigned long addr,
710 + struct kmmio_fault_page **release_list)
711 + {
712 + struct kmmio_fault_page *f;
713 +
714 +- page &= PAGE_MASK;
715 +- f = get_kmmio_fault_page(page);
716 ++ f = get_kmmio_fault_page(addr);
717 + if (!f)
718 + return;
719 +
720 +@@ -420,18 +435,27 @@ int register_kmmio_probe(struct kmmio_probe *p)
721 + int ret = 0;
722 + unsigned long size = 0;
723 + const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
724 ++ unsigned int l;
725 ++ pte_t *pte;
726 +
727 + spin_lock_irqsave(&kmmio_lock, flags);
728 + if (get_kmmio_probe(p->addr)) {
729 + ret = -EEXIST;
730 + goto out;
731 + }
732 ++
733 ++ pte = lookup_address(p->addr, &l);
734 ++ if (!pte) {
735 ++ ret = -EINVAL;
736 ++ goto out;
737 ++ }
738 ++
739 + kmmio_count++;
740 + list_add_rcu(&p->list, &kmmio_probes);
741 + while (size < size_lim) {
742 + if (add_kmmio_fault_page(p->addr + size))
743 + pr_err("Unable to set page fault.\n");
744 +- size += PAGE_SIZE;
745 ++ size += page_level_size(l);
746 + }
747 + out:
748 + spin_unlock_irqrestore(&kmmio_lock, flags);
749 +@@ -506,11 +530,17 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
750 + const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
751 + struct kmmio_fault_page *release_list = NULL;
752 + struct kmmio_delayed_release *drelease;
753 ++ unsigned int l;
754 ++ pte_t *pte;
755 ++
756 ++ pte = lookup_address(p->addr, &l);
757 ++ if (!pte)
758 ++ return;
759 +
760 + spin_lock_irqsave(&kmmio_lock, flags);
761 + while (size < size_lim) {
762 + release_kmmio_fault_page(p->addr + size, &release_list);
763 +- size += PAGE_SIZE;
764 ++ size += page_level_size(l);
765 + }
766 + list_del_rcu(&p->list);
767 + kmmio_count--;
768 +diff --git a/block/partition-generic.c b/block/partition-generic.c
769 +index 746935a5973c..a241e3900bc9 100644
770 +--- a/block/partition-generic.c
771 ++++ b/block/partition-generic.c
772 +@@ -349,15 +349,20 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
773 + goto out_del;
774 + }
775 +
776 ++ err = hd_ref_init(p);
777 ++ if (err) {
778 ++ if (flags & ADDPART_FLAG_WHOLEDISK)
779 ++ goto out_remove_file;
780 ++ goto out_del;
781 ++ }
782 ++
783 + /* everything is up and running, commence */
784 + rcu_assign_pointer(ptbl->part[partno], p);
785 +
786 + /* suppress uevent if the disk suppresses it */
787 + if (!dev_get_uevent_suppress(ddev))
788 + kobject_uevent(&pdev->kobj, KOBJ_ADD);
789 +-
790 +- if (!hd_ref_init(p))
791 +- return p;
792 ++ return p;
793 +
794 + out_free_info:
795 + free_part_info(p);
796 +@@ -366,6 +371,8 @@ out_free_stats:
797 + out_free:
798 + kfree(p);
799 + return ERR_PTR(err);
800 ++out_remove_file:
801 ++ device_remove_file(pdev, &dev_attr_whole_disk);
802 + out_del:
803 + kobject_put(p->holder_dir);
804 + device_del(pdev);
805 +diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
806 +index 65f50eccd49b..a48824deabc5 100644
807 +--- a/drivers/base/power/domain.c
808 ++++ b/drivers/base/power/domain.c
809 +@@ -1381,7 +1381,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
810 +
811 + mutex_lock(&genpd->lock);
812 +
813 +- if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
814 ++ if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
815 + pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
816 + subdomain->name);
817 + ret = -EBUSY;
818 +diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
819 +index b8e76f75073b..f8580900c273 100644
820 +--- a/drivers/base/power/opp/core.c
821 ++++ b/drivers/base/power/opp/core.c
822 +@@ -809,8 +809,14 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev)
823 + }
824 +
825 + opp->u_volt = microvolt[0];
826 +- opp->u_volt_min = microvolt[1];
827 +- opp->u_volt_max = microvolt[2];
828 ++
829 ++ if (count == 1) {
830 ++ opp->u_volt_min = opp->u_volt;
831 ++ opp->u_volt_max = opp->u_volt;
832 ++ } else {
833 ++ opp->u_volt_min = microvolt[1];
834 ++ opp->u_volt_max = microvolt[2];
835 ++ }
836 +
837 + if (!of_property_read_u32(opp->np, "opp-microamp", &val))
838 + opp->u_amp = val;
839 +diff --git a/drivers/block/loop.c b/drivers/block/loop.c
840 +index 423f4ca7d712..80cf8add46ff 100644
841 +--- a/drivers/block/loop.c
842 ++++ b/drivers/block/loop.c
843 +@@ -488,6 +488,12 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
844 + bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
845 + iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
846 + bio_segments(bio), blk_rq_bytes(cmd->rq));
847 ++ /*
848 ++ * This bio may be started from the middle of the 'bvec'
849 ++ * because of bio splitting, so offset from the bvec must
850 ++ * be passed to iov iterator
851 ++ */
852 ++ iter.iov_offset = bio->bi_iter.bi_bvec_done;
853 +
854 + cmd->iocb.ki_pos = pos;
855 + cmd->iocb.ki_filp = file;
856 +diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
857 +index 562b5a4ca7b7..78a39f736c64 100644
858 +--- a/drivers/block/paride/pd.c
859 ++++ b/drivers/block/paride/pd.c
860 +@@ -126,7 +126,7 @@
861 + */
862 + #include <linux/types.h>
863 +
864 +-static bool verbose = 0;
865 ++static int verbose = 0;
866 + static int major = PD_MAJOR;
867 + static char *name = PD_NAME;
868 + static int cluster = 64;
869 +@@ -161,7 +161,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
870 + static DEFINE_MUTEX(pd_mutex);
871 + static DEFINE_SPINLOCK(pd_lock);
872 +
873 +-module_param(verbose, bool, 0);
874 ++module_param(verbose, int, 0);
875 + module_param(major, int, 0);
876 + module_param(name, charp, 0);
877 + module_param(cluster, int, 0);
878 +diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
879 +index 1740d75e8a32..216a94fed5b4 100644
880 +--- a/drivers/block/paride/pt.c
881 ++++ b/drivers/block/paride/pt.c
882 +@@ -117,7 +117,7 @@
883 +
884 + */
885 +
886 +-static bool verbose = 0;
887 ++static int verbose = 0;
888 + static int major = PT_MAJOR;
889 + static char *name = PT_NAME;
890 + static int disable = 0;
891 +@@ -152,7 +152,7 @@ static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
892 +
893 + #include <asm/uaccess.h>
894 +
895 +-module_param(verbose, bool, 0);
896 ++module_param(verbose, int, 0);
897 + module_param(major, int, 0);
898 + module_param(name, charp, 0);
899 + module_param_array(drive0, int, NULL, 0);
900 +diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
901 +index e98d15eaa799..1827fc4d15c1 100644
902 +--- a/drivers/bus/imx-weim.c
903 ++++ b/drivers/bus/imx-weim.c
904 +@@ -150,7 +150,7 @@ static int __init weim_parse_dt(struct platform_device *pdev,
905 + return ret;
906 + }
907 +
908 +- for_each_child_of_node(pdev->dev.of_node, child) {
909 ++ for_each_available_child_of_node(pdev->dev.of_node, child) {
910 + if (!child->name)
911 + continue;
912 +
913 +diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
914 +index 98fb8821382d..f53b02a6bc05 100644
915 +--- a/drivers/cpufreq/intel_pstate.c
916 ++++ b/drivers/cpufreq/intel_pstate.c
917 +@@ -667,6 +667,11 @@ static int core_get_max_pstate(void)
918 + if (err)
919 + goto skip_tar;
920 +
921 ++ /* For level 1 and 2, bits[23:16] contain the ratio */
922 ++ if (tdp_ctrl)
923 ++ tdp_ratio >>= 16;
924 ++
925 ++ tdp_ratio &= 0xff; /* ratios are only 8 bits long */
926 + if (tdp_ratio - 1 == tar) {
927 + max_pstate = tar;
928 + pr_debug("max_pstate=TAC %x\n", max_pstate);
929 +diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
930 +index 3d9acc53d247..60fc0fa26fd3 100644
931 +--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
932 ++++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
933 +@@ -225,6 +225,9 @@ static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
934 + struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
935 + struct ccp_aes_cmac_exp_ctx state;
936 +
937 ++ /* Don't let anything leak to 'out' */
938 ++ memset(&state, 0, sizeof(state));
939 ++
940 + state.null_msg = rctx->null_msg;
941 + memcpy(state.iv, rctx->iv, sizeof(state.iv));
942 + state.buf_count = rctx->buf_count;
943 +diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
944 +index 8ef06fad8b14..ab9945f2cb7a 100644
945 +--- a/drivers/crypto/ccp/ccp-crypto-sha.c
946 ++++ b/drivers/crypto/ccp/ccp-crypto-sha.c
947 +@@ -212,6 +212,9 @@ static int ccp_sha_export(struct ahash_request *req, void *out)
948 + struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
949 + struct ccp_sha_exp_ctx state;
950 +
951 ++ /* Don't let anything leak to 'out' */
952 ++ memset(&state, 0, sizeof(state));
953 ++
954 + state.type = rctx->type;
955 + state.msg_bits = rctx->msg_bits;
956 + state.first = rctx->first;
957 +diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
958 +index b6f9f42e2985..a04fea4d0063 100644
959 +--- a/drivers/crypto/talitos.c
960 ++++ b/drivers/crypto/talitos.c
961 +@@ -63,6 +63,14 @@ static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
962 + ptr->eptr = upper_32_bits(dma_addr);
963 + }
964 +
965 ++static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
966 ++ struct talitos_ptr *src_ptr, bool is_sec1)
967 ++{
968 ++ dst_ptr->ptr = src_ptr->ptr;
969 ++ if (!is_sec1)
970 ++ dst_ptr->eptr = src_ptr->eptr;
971 ++}
972 ++
973 + static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
974 + bool is_sec1)
975 + {
976 +@@ -1083,21 +1091,20 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
977 + sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
978 + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
979 + : DMA_TO_DEVICE);
980 +-
981 + /* hmac data */
982 + desc->ptr[1].len = cpu_to_be16(areq->assoclen);
983 + if (sg_count > 1 &&
984 + (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
985 + areq->assoclen,
986 + &edesc->link_tbl[tbl_off])) > 1) {
987 +- tbl_off += ret;
988 +-
989 + to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
990 + sizeof(struct talitos_ptr), 0);
991 + desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
992 +
993 + dma_sync_single_for_device(dev, edesc->dma_link_tbl,
994 + edesc->dma_len, DMA_BIDIRECTIONAL);
995 ++
996 ++ tbl_off += ret;
997 + } else {
998 + to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
999 + desc->ptr[1].j_extent = 0;
1000 +@@ -1126,11 +1133,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1001 + if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1002 + sg_link_tbl_len += authsize;
1003 +
1004 +- if (sg_count > 1 &&
1005 +- (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
1006 +- sg_link_tbl_len,
1007 +- &edesc->link_tbl[tbl_off])) > 1) {
1008 +- tbl_off += ret;
1009 ++ if (sg_count == 1) {
1010 ++ to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
1011 ++ areq->assoclen, 0);
1012 ++ } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
1013 ++ areq->assoclen, sg_link_tbl_len,
1014 ++ &edesc->link_tbl[tbl_off])) >
1015 ++ 1) {
1016 + desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1017 + to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1018 + tbl_off *
1019 +@@ -1138,8 +1147,10 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1020 + dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1021 + edesc->dma_len,
1022 + DMA_BIDIRECTIONAL);
1023 +- } else
1024 +- to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
1025 ++ tbl_off += ret;
1026 ++ } else {
1027 ++ copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
1028 ++ }
1029 +
1030 + /* cipher out */
1031 + desc->ptr[5].len = cpu_to_be16(cryptlen);
1032 +@@ -1151,11 +1162,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1033 +
1034 + edesc->icv_ool = false;
1035 +
1036 +- if (sg_count > 1 &&
1037 +- (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
1038 ++ if (sg_count == 1) {
1039 ++ to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
1040 ++ areq->assoclen, 0);
1041 ++ } else if ((sg_count =
1042 ++ sg_to_link_tbl_offset(areq->dst, sg_count,
1043 + areq->assoclen, cryptlen,
1044 +- &edesc->link_tbl[tbl_off])) >
1045 +- 1) {
1046 ++ &edesc->link_tbl[tbl_off])) > 1) {
1047 + struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1048 +
1049 + to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1050 +@@ -1178,8 +1191,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1051 + edesc->dma_len, DMA_BIDIRECTIONAL);
1052 +
1053 + edesc->icv_ool = true;
1054 +- } else
1055 +- to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
1056 ++ } else {
1057 ++ copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
1058 ++ }
1059 +
1060 + /* iv out */
1061 + map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1062 +@@ -2519,21 +2533,11 @@ struct talitos_crypto_alg {
1063 + struct talitos_alg_template algt;
1064 + };
1065 +
1066 +-static int talitos_cra_init(struct crypto_tfm *tfm)
1067 ++static int talitos_init_common(struct talitos_ctx *ctx,
1068 ++ struct talitos_crypto_alg *talitos_alg)
1069 + {
1070 +- struct crypto_alg *alg = tfm->__crt_alg;
1071 +- struct talitos_crypto_alg *talitos_alg;
1072 +- struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
1073 + struct talitos_private *priv;
1074 +
1075 +- if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
1076 +- talitos_alg = container_of(__crypto_ahash_alg(alg),
1077 +- struct talitos_crypto_alg,
1078 +- algt.alg.hash);
1079 +- else
1080 +- talitos_alg = container_of(alg, struct talitos_crypto_alg,
1081 +- algt.alg.crypto);
1082 +-
1083 + /* update context with ptr to dev */
1084 + ctx->dev = talitos_alg->dev;
1085 +
1086 +@@ -2551,10 +2555,33 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
1087 + return 0;
1088 + }
1089 +
1090 ++static int talitos_cra_init(struct crypto_tfm *tfm)
1091 ++{
1092 ++ struct crypto_alg *alg = tfm->__crt_alg;
1093 ++ struct talitos_crypto_alg *talitos_alg;
1094 ++ struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
1095 ++
1096 ++ if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
1097 ++ talitos_alg = container_of(__crypto_ahash_alg(alg),
1098 ++ struct talitos_crypto_alg,
1099 ++ algt.alg.hash);
1100 ++ else
1101 ++ talitos_alg = container_of(alg, struct talitos_crypto_alg,
1102 ++ algt.alg.crypto);
1103 ++
1104 ++ return talitos_init_common(ctx, talitos_alg);
1105 ++}
1106 ++
1107 + static int talitos_cra_init_aead(struct crypto_aead *tfm)
1108 + {
1109 +- talitos_cra_init(crypto_aead_tfm(tfm));
1110 +- return 0;
1111 ++ struct aead_alg *alg = crypto_aead_alg(tfm);
1112 ++ struct talitos_crypto_alg *talitos_alg;
1113 ++ struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
1114 ++
1115 ++ talitos_alg = container_of(alg, struct talitos_crypto_alg,
1116 ++ algt.alg.aead);
1117 ++
1118 ++ return talitos_init_common(ctx, talitos_alg);
1119 + }
1120 +
1121 + static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
1122 +diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
1123 +index 4f099ea29f83..c66133b5e852 100644
1124 +--- a/drivers/dma/dw/core.c
1125 ++++ b/drivers/dma/dw/core.c
1126 +@@ -130,26 +130,14 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
1127 + static void dwc_initialize(struct dw_dma_chan *dwc)
1128 + {
1129 + struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1130 +- struct dw_dma_slave *dws = dwc->chan.private;
1131 + u32 cfghi = DWC_CFGH_FIFO_MODE;
1132 + u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
1133 +
1134 + if (dwc->initialized == true)
1135 + return;
1136 +
1137 +- if (dws) {
1138 +- /*
1139 +- * We need controller-specific data to set up slave
1140 +- * transfers.
1141 +- */
1142 +- BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
1143 +-
1144 +- cfghi |= DWC_CFGH_DST_PER(dws->dst_id);
1145 +- cfghi |= DWC_CFGH_SRC_PER(dws->src_id);
1146 +- } else {
1147 +- cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
1148 +- cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
1149 +- }
1150 ++ cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
1151 ++ cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
1152 +
1153 + channel_writel(dwc, CFG_LO, cfglo);
1154 + channel_writel(dwc, CFG_HI, cfghi);
1155 +@@ -936,7 +924,7 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
1156 + struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1157 + struct dw_dma_slave *dws = param;
1158 +
1159 +- if (!dws || dws->dma_dev != chan->device->dev)
1160 ++ if (dws->dma_dev != chan->device->dev)
1161 + return false;
1162 +
1163 + /* We have to copy data since dws can be temporary storage */
1164 +@@ -1160,6 +1148,14 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
1165 + * doesn't mean what you think it means), and status writeback.
1166 + */
1167 +
1168 ++ /*
1169 ++ * We need controller-specific data to set up slave transfers.
1170 ++ */
1171 ++ if (chan->private && !dw_dma_filter(chan, chan->private)) {
1172 ++ dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
1173 ++ return -EINVAL;
1174 ++ }
1175 ++
1176 + /* Enable controller here if needed */
1177 + if (!dw->in_use)
1178 + dw_dma_on(dw);
1179 +@@ -1221,6 +1217,14 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
1180 + spin_lock_irqsave(&dwc->lock, flags);
1181 + list_splice_init(&dwc->free_list, &list);
1182 + dwc->descs_allocated = 0;
1183 ++
1184 ++ /* Clear custom channel configuration */
1185 ++ dwc->src_id = 0;
1186 ++ dwc->dst_id = 0;
1187 ++
1188 ++ dwc->src_master = 0;
1189 ++ dwc->dst_master = 0;
1190 ++
1191 + dwc->initialized = false;
1192 +
1193 + /* Disable interrupts */
1194 +diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
1195 +index 823ad728aecf..efc02b98e6ba 100644
1196 +--- a/drivers/dma/hsu/hsu.c
1197 ++++ b/drivers/dma/hsu/hsu.c
1198 +@@ -135,7 +135,7 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
1199 + sr = hsu_chan_readl(hsuc, HSU_CH_SR);
1200 + spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
1201 +
1202 +- return sr;
1203 ++ return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
1204 + }
1205 +
1206 + irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
1207 +diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
1208 +index f06579c6d548..26da2865b025 100644
1209 +--- a/drivers/dma/hsu/hsu.h
1210 ++++ b/drivers/dma/hsu/hsu.h
1211 +@@ -41,6 +41,9 @@
1212 + #define HSU_CH_SR_DESCTO(x) BIT(8 + (x))
1213 + #define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8))
1214 + #define HSU_CH_SR_CHE BIT(15)
1215 ++#define HSU_CH_SR_DESCE(x) BIT(16 + (x))
1216 ++#define HSU_CH_SR_DESCE_ANY (BIT(19) | BIT(18) | BIT(17) | BIT(16))
1217 ++#define HSU_CH_SR_CDESC_ANY (BIT(31) | BIT(30))
1218 +
1219 + /* Bits in HSU_CH_CR */
1220 + #define HSU_CH_CR_CHA BIT(0)
1221 +diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
1222 +index a59061e4221a..55f5d33f6dc7 100644
1223 +--- a/drivers/dma/pxa_dma.c
1224 ++++ b/drivers/dma/pxa_dma.c
1225 +@@ -122,6 +122,7 @@ struct pxad_chan {
1226 + struct pxad_device {
1227 + struct dma_device slave;
1228 + int nr_chans;
1229 ++ int nr_requestors;
1230 + void __iomem *base;
1231 + struct pxad_phy *phys;
1232 + spinlock_t phy_lock; /* Phy association */
1233 +@@ -473,7 +474,7 @@ static void pxad_free_phy(struct pxad_chan *chan)
1234 + return;
1235 +
1236 + /* clear the channel mapping in DRCMR */
1237 +- if (chan->drcmr <= DRCMR_CHLNUM) {
1238 ++ if (chan->drcmr <= pdev->nr_requestors) {
1239 + reg = pxad_drcmr(chan->drcmr);
1240 + writel_relaxed(0, chan->phy->base + reg);
1241 + }
1242 +@@ -509,6 +510,7 @@ static bool is_running_chan_misaligned(struct pxad_chan *chan)
1243 +
1244 + static void phy_enable(struct pxad_phy *phy, bool misaligned)
1245 + {
1246 ++ struct pxad_device *pdev;
1247 + u32 reg, dalgn;
1248 +
1249 + if (!phy->vchan)
1250 +@@ -518,7 +520,8 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned)
1251 + "%s(); phy=%p(%d) misaligned=%d\n", __func__,
1252 + phy, phy->idx, misaligned);
1253 +
1254 +- if (phy->vchan->drcmr <= DRCMR_CHLNUM) {
1255 ++ pdev = to_pxad_dev(phy->vchan->vc.chan.device);
1256 ++ if (phy->vchan->drcmr <= pdev->nr_requestors) {
1257 + reg = pxad_drcmr(phy->vchan->drcmr);
1258 + writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
1259 + }
1260 +@@ -914,6 +917,7 @@ static void pxad_get_config(struct pxad_chan *chan,
1261 + {
1262 + u32 maxburst = 0, dev_addr = 0;
1263 + enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
1264 ++ struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
1265 +
1266 + *dcmd = 0;
1267 + if (dir == DMA_DEV_TO_MEM) {
1268 +@@ -922,7 +926,7 @@ static void pxad_get_config(struct pxad_chan *chan,
1269 + dev_addr = chan->cfg.src_addr;
1270 + *dev_src = dev_addr;
1271 + *dcmd |= PXA_DCMD_INCTRGADDR;
1272 +- if (chan->drcmr <= DRCMR_CHLNUM)
1273 ++ if (chan->drcmr <= pdev->nr_requestors)
1274 + *dcmd |= PXA_DCMD_FLOWSRC;
1275 + }
1276 + if (dir == DMA_MEM_TO_DEV) {
1277 +@@ -931,7 +935,7 @@ static void pxad_get_config(struct pxad_chan *chan,
1278 + dev_addr = chan->cfg.dst_addr;
1279 + *dev_dst = dev_addr;
1280 + *dcmd |= PXA_DCMD_INCSRCADDR;
1281 +- if (chan->drcmr <= DRCMR_CHLNUM)
1282 ++ if (chan->drcmr <= pdev->nr_requestors)
1283 + *dcmd |= PXA_DCMD_FLOWTRG;
1284 + }
1285 + if (dir == DMA_MEM_TO_MEM)
1286 +@@ -1341,13 +1345,15 @@ static struct dma_chan *pxad_dma_xlate(struct of_phandle_args *dma_spec,
1287 +
1288 + static int pxad_init_dmadev(struct platform_device *op,
1289 + struct pxad_device *pdev,
1290 +- unsigned int nr_phy_chans)
1291 ++ unsigned int nr_phy_chans,
1292 ++ unsigned int nr_requestors)
1293 + {
1294 + int ret;
1295 + unsigned int i;
1296 + struct pxad_chan *c;
1297 +
1298 + pdev->nr_chans = nr_phy_chans;
1299 ++ pdev->nr_requestors = nr_requestors;
1300 + INIT_LIST_HEAD(&pdev->slave.channels);
1301 + pdev->slave.device_alloc_chan_resources = pxad_alloc_chan_resources;
1302 + pdev->slave.device_free_chan_resources = pxad_free_chan_resources;
1303 +@@ -1382,7 +1388,7 @@ static int pxad_probe(struct platform_device *op)
1304 + const struct of_device_id *of_id;
1305 + struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
1306 + struct resource *iores;
1307 +- int ret, dma_channels = 0;
1308 ++ int ret, dma_channels = 0, nb_requestors = 0;
1309 + const enum dma_slave_buswidth widths =
1310 + DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
1311 + DMA_SLAVE_BUSWIDTH_4_BYTES;
1312 +@@ -1399,13 +1405,23 @@ static int pxad_probe(struct platform_device *op)
1313 + return PTR_ERR(pdev->base);
1314 +
1315 + of_id = of_match_device(pxad_dt_ids, &op->dev);
1316 +- if (of_id)
1317 ++ if (of_id) {
1318 + of_property_read_u32(op->dev.of_node, "#dma-channels",
1319 + &dma_channels);
1320 +- else if (pdata && pdata->dma_channels)
1321 ++ ret = of_property_read_u32(op->dev.of_node, "#dma-requests",
1322 ++ &nb_requestors);
1323 ++ if (ret) {
1324 ++ dev_warn(pdev->slave.dev,
1325 ++ "#dma-requests set to default 32 as missing in OF: %d",
1326 ++ ret);
1327 ++ nb_requestors = 32;
1328 ++ };
1329 ++ } else if (pdata && pdata->dma_channels) {
1330 + dma_channels = pdata->dma_channels;
1331 +- else
1332 ++ nb_requestors = pdata->nb_requestors;
1333 ++ } else {
1334 + dma_channels = 32; /* default 32 channel */
1335 ++ }
1336 +
1337 + dma_cap_set(DMA_SLAVE, pdev->slave.cap_mask);
1338 + dma_cap_set(DMA_MEMCPY, pdev->slave.cap_mask);
1339 +@@ -1422,7 +1438,7 @@ static int pxad_probe(struct platform_device *op)
1340 + pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1341 +
1342 + pdev->slave.dev = &op->dev;
1343 +- ret = pxad_init_dmadev(op, pdev, dma_channels);
1344 ++ ret = pxad_init_dmadev(op, pdev, dma_channels, nb_requestors);
1345 + if (ret) {
1346 + dev_err(pdev->slave.dev, "unable to register\n");
1347 + return ret;
1348 +@@ -1441,7 +1457,8 @@ static int pxad_probe(struct platform_device *op)
1349 +
1350 + platform_set_drvdata(op, pdev);
1351 + pxad_init_debugfs(pdev);
1352 +- dev_info(pdev->slave.dev, "initialized %d channels\n", dma_channels);
1353 ++ dev_info(pdev->slave.dev, "initialized %d channels on %d requestors\n",
1354 ++ dma_channels, nb_requestors);
1355 + return 0;
1356 + }
1357 +
1358 +diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
1359 +index 01087a38da22..792bdae2b91d 100644
1360 +--- a/drivers/edac/i7core_edac.c
1361 ++++ b/drivers/edac/i7core_edac.c
1362 +@@ -1866,7 +1866,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
1363 +
1364 + i7_dev = get_i7core_dev(mce->socketid);
1365 + if (!i7_dev)
1366 +- return NOTIFY_BAD;
1367 ++ return NOTIFY_DONE;
1368 +
1369 + mci = i7_dev->mci;
1370 + pvt = mci->pvt_info;
1371 +diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
1372 +index cbee3179ec08..37649221f81c 100644
1373 +--- a/drivers/edac/sb_edac.c
1374 ++++ b/drivers/edac/sb_edac.c
1375 +@@ -1396,7 +1396,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
1376 + }
1377 +
1378 + ch_way = TAD_CH(reg) + 1;
1379 +- sck_way = 1 << TAD_SOCK(reg);
1380 ++ sck_way = TAD_SOCK(reg);
1381 +
1382 + if (ch_way == 3)
1383 + idx = addr >> 6;
1384 +@@ -1435,7 +1435,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
1385 + switch(ch_way) {
1386 + case 2:
1387 + case 4:
1388 +- sck_xch = 1 << sck_way * (ch_way >> 1);
1389 ++ sck_xch = (1 << sck_way) * (ch_way >> 1);
1390 + break;
1391 + default:
1392 + sprintf(msg, "Invalid mirror set. Can't decode addr");
1393 +@@ -1471,7 +1471,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
1394 +
1395 + ch_addr = addr - offset;
1396 + ch_addr >>= (6 + shiftup);
1397 +- ch_addr /= ch_way * sck_way;
1398 ++ ch_addr /= sck_xch;
1399 + ch_addr <<= (6 + shiftup);
1400 + ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
1401 +
1402 +@@ -2254,7 +2254,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
1403 +
1404 + mci = get_mci_for_node_id(mce->socketid);
1405 + if (!mci)
1406 +- return NOTIFY_BAD;
1407 ++ return NOTIFY_DONE;
1408 + pvt = mci->pvt_info;
1409 +
1410 + /*
1411 +diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
1412 +index 9f9ea334399c..b6cb30d207be 100644
1413 +--- a/drivers/extcon/extcon-max77843.c
1414 ++++ b/drivers/extcon/extcon-max77843.c
1415 +@@ -803,7 +803,7 @@ static int max77843_muic_probe(struct platform_device *pdev)
1416 + /* Clear IRQ bits before request IRQs */
1417 + ret = regmap_bulk_read(max77843->regmap_muic,
1418 + MAX77843_MUIC_REG_INT1, info->status,
1419 +- MAX77843_MUIC_IRQ_NUM);
1420 ++ MAX77843_MUIC_STATUS_NUM);
1421 + if (ret) {
1422 + dev_err(&pdev->dev, "Failed to Clear IRQ bits\n");
1423 + goto err_muic_irq;
1424 +diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
1425 +index 027ca212179f..3b52677f459a 100644
1426 +--- a/drivers/firmware/efi/efi.c
1427 ++++ b/drivers/firmware/efi/efi.c
1428 +@@ -180,6 +180,7 @@ static int generic_ops_register(void)
1429 + {
1430 + generic_ops.get_variable = efi.get_variable;
1431 + generic_ops.set_variable = efi.set_variable;
1432 ++ generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
1433 + generic_ops.get_next_variable = efi.get_next_variable;
1434 + generic_ops.query_variable_store = efi_query_variable_store;
1435 +
1436 +diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
1437 +index 7f2ea21c730d..6f182fd91a6d 100644
1438 +--- a/drivers/firmware/efi/vars.c
1439 ++++ b/drivers/firmware/efi/vars.c
1440 +@@ -202,29 +202,44 @@ static const struct variable_validate variable_validate[] = {
1441 + { NULL_GUID, "", NULL },
1442 + };
1443 +
1444 ++/*
1445 ++ * Check if @var_name matches the pattern given in @match_name.
1446 ++ *
1447 ++ * @var_name: an array of @len non-NUL characters.
1448 ++ * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
1449 ++ * final "*" character matches any trailing characters @var_name,
1450 ++ * including the case when there are none left in @var_name.
1451 ++ * @match: on output, the number of non-wildcard characters in @match_name
1452 ++ * that @var_name matches, regardless of the return value.
1453 ++ * @return: whether @var_name fully matches @match_name.
1454 ++ */
1455 + static bool
1456 + variable_matches(const char *var_name, size_t len, const char *match_name,
1457 + int *match)
1458 + {
1459 + for (*match = 0; ; (*match)++) {
1460 + char c = match_name[*match];
1461 +- char u = var_name[*match];
1462 +
1463 +- /* Wildcard in the matching name means we've matched */
1464 +- if (c == '*')
1465 ++ switch (c) {
1466 ++ case '*':
1467 ++ /* Wildcard in @match_name means we've matched. */
1468 + return true;
1469 +
1470 +- /* Case sensitive match */
1471 +- if (!c && *match == len)
1472 +- return true;
1473 ++ case '\0':
1474 ++ /* @match_name has ended. Has @var_name too? */
1475 ++ return (*match == len);
1476 +
1477 +- if (c != u)
1478 ++ default:
1479 ++ /*
1480 ++ * We've reached a non-wildcard char in @match_name.
1481 ++ * Continue only if there's an identical character in
1482 ++ * @var_name.
1483 ++ */
1484 ++ if (*match < len && c == var_name[*match])
1485 ++ continue;
1486 + return false;
1487 +-
1488 +- if (!c)
1489 +- return true;
1490 ++ }
1491 + }
1492 +- return true;
1493 + }
1494 +
1495 + bool
1496 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
1497 +index bb1099c549df..053fc2f465df 100644
1498 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
1499 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
1500 +@@ -1673,6 +1673,7 @@ struct amdgpu_uvd {
1501 + struct amdgpu_bo *vcpu_bo;
1502 + void *cpu_addr;
1503 + uint64_t gpu_addr;
1504 ++ unsigned fw_version;
1505 + atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
1506 + struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
1507 + struct delayed_work idle_work;
1508 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
1509 +index 8ac49812a716..5a8fbadbd27b 100644
1510 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
1511 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
1512 +@@ -63,10 +63,6 @@ bool amdgpu_has_atpx(void) {
1513 + return amdgpu_atpx_priv.atpx_detected;
1514 + }
1515 +
1516 +-bool amdgpu_has_atpx_dgpu_power_cntl(void) {
1517 +- return amdgpu_atpx_priv.atpx.functions.power_cntl;
1518 +-}
1519 +-
1520 + /**
1521 + * amdgpu_atpx_call - call an ATPX method
1522 + *
1523 +@@ -146,6 +142,10 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
1524 + */
1525 + static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
1526 + {
1527 ++ /* make sure required functions are enabled */
1528 ++ /* dGPU power control is required */
1529 ++ atpx->functions.power_cntl = true;
1530 ++
1531 + if (atpx->functions.px_params) {
1532 + union acpi_object *info;
1533 + struct atpx_px_params output;
1534 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1535 +index 9d88023df836..c961fe093e12 100644
1536 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1537 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1538 +@@ -61,12 +61,6 @@ static const char *amdgpu_asic_name[] = {
1539 + "LAST",
1540 + };
1541 +
1542 +-#if defined(CONFIG_VGA_SWITCHEROO)
1543 +-bool amdgpu_has_atpx_dgpu_power_cntl(void);
1544 +-#else
1545 +-static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
1546 +-#endif
1547 +-
1548 + bool amdgpu_device_is_px(struct drm_device *dev)
1549 + {
1550 + struct amdgpu_device *adev = dev->dev_private;
1551 +@@ -1475,7 +1469,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1552 +
1553 + if (amdgpu_runtime_pm == 1)
1554 + runtime = true;
1555 +- if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl())
1556 ++ if (amdgpu_device_is_px(ddev))
1557 + runtime = true;
1558 + vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
1559 + if (runtime)
1560 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
1561 +index e23843f4d877..4488e82f87b0 100644
1562 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
1563 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
1564 +@@ -303,7 +303,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
1565 + fw_info.feature = adev->vce.fb_version;
1566 + break;
1567 + case AMDGPU_INFO_FW_UVD:
1568 +- fw_info.ver = 0;
1569 ++ fw_info.ver = adev->uvd.fw_version;
1570 + fw_info.feature = 0;
1571 + break;
1572 + case AMDGPU_INFO_FW_GMC:
1573 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
1574 +index 064ebb347074..89df7871653d 100644
1575 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
1576 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
1577 +@@ -52,7 +52,7 @@ struct amdgpu_hpd;
1578 +
1579 + #define AMDGPU_MAX_HPD_PINS 6
1580 + #define AMDGPU_MAX_CRTCS 6
1581 +-#define AMDGPU_MAX_AFMT_BLOCKS 7
1582 ++#define AMDGPU_MAX_AFMT_BLOCKS 9
1583 +
1584 + enum amdgpu_rmx_type {
1585 + RMX_OFF,
1586 +@@ -308,8 +308,8 @@ struct amdgpu_mode_info {
1587 + struct atom_context *atom_context;
1588 + struct card_info *atom_card_info;
1589 + bool mode_config_initialized;
1590 +- struct amdgpu_crtc *crtcs[6];
1591 +- struct amdgpu_afmt *afmt[7];
1592 ++ struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
1593 ++ struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
1594 + /* DVI-I properties */
1595 + struct drm_property *coherent_mode_property;
1596 + /* DAC enable load detect */
1597 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
1598 +index 53f987aeeacf..3b35ad83867c 100644
1599 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
1600 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
1601 +@@ -156,6 +156,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
1602 + DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
1603 + version_major, version_minor, family_id);
1604 +
1605 ++ adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
1606 ++ (family_id << 8));
1607 ++
1608 + bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
1609 + + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
1610 + r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
1611 +@@ -273,6 +276,8 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
1612 + memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
1613 + (adev->uvd.fw->size) - offset);
1614 +
1615 ++ cancel_delayed_work_sync(&adev->uvd.idle_work);
1616 ++
1617 + size = amdgpu_bo_size(adev->uvd.vcpu_bo);
1618 + size -= le32_to_cpu(hdr->ucode_size_bytes);
1619 + ptr = adev->uvd.cpu_addr;
1620 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
1621 +index a745eeeb5d82..bb0da76051a1 100644
1622 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
1623 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
1624 +@@ -220,6 +220,7 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
1625 + if (i == AMDGPU_MAX_VCE_HANDLES)
1626 + return 0;
1627 +
1628 ++ cancel_delayed_work_sync(&adev->vce.idle_work);
1629 + /* TODO: suspending running encoding sessions isn't supported */
1630 + return -EINVAL;
1631 + }
1632 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
1633 +index aa491540ba85..946300764609 100644
1634 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
1635 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
1636 +@@ -3628,7 +3628,7 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1637 + unsigned vm_id, uint64_t pd_addr)
1638 + {
1639 + int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
1640 +- uint32_t seq = ring->fence_drv.sync_seq;
1641 ++ uint32_t seq = ring->fence_drv.sync_seq[ring->idx];
1642 + uint64_t addr = ring->fence_drv.gpu_addr;
1643 +
1644 + amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
1645 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
1646 +index c34c393e9aea..d5e19b5fbbfb 100644
1647 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
1648 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
1649 +@@ -513,7 +513,7 @@ static int dbgdev_wave_control_set_registers(
1650 + union SQ_CMD_BITS *in_reg_sq_cmd,
1651 + union GRBM_GFX_INDEX_BITS *in_reg_gfx_index)
1652 + {
1653 +- int status;
1654 ++ int status = 0;
1655 + union SQ_CMD_BITS reg_sq_cmd;
1656 + union GRBM_GFX_INDEX_BITS reg_gfx_index;
1657 + struct HsaDbgWaveMsgAMDGen2 *pMsg;
1658 +diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
1659 +index 39d7e2e15c11..d268bf18a662 100644
1660 +--- a/drivers/gpu/drm/drm_dp_mst_topology.c
1661 ++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
1662 +@@ -1665,13 +1665,19 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1663 + struct drm_dp_mst_branch *mstb;
1664 + int len, ret, port_num;
1665 +
1666 ++ port = drm_dp_get_validated_port_ref(mgr, port);
1667 ++ if (!port)
1668 ++ return -EINVAL;
1669 ++
1670 + port_num = port->port_num;
1671 + mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1672 + if (!mstb) {
1673 + mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
1674 +
1675 +- if (!mstb)
1676 ++ if (!mstb) {
1677 ++ drm_dp_put_port(port);
1678 + return -EINVAL;
1679 ++ }
1680 + }
1681 +
1682 + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1683 +@@ -1697,6 +1703,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1684 + kfree(txmsg);
1685 + fail_put:
1686 + drm_dp_put_mst_branch_device(mstb);
1687 ++ drm_dp_put_port(port);
1688 + return ret;
1689 + }
1690 +
1691 +@@ -1779,6 +1786,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1692 + req_payload.start_slot = cur_slots;
1693 + if (mgr->proposed_vcpis[i]) {
1694 + port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1695 ++ port = drm_dp_get_validated_port_ref(mgr, port);
1696 ++ if (!port) {
1697 ++ mutex_unlock(&mgr->payload_lock);
1698 ++ return -EINVAL;
1699 ++ }
1700 + req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
1701 + } else {
1702 + port = NULL;
1703 +@@ -1804,6 +1816,9 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1704 + mgr->payloads[i].payload_state = req_payload.payload_state;
1705 + }
1706 + cur_slots += req_payload.num_slots;
1707 ++
1708 ++ if (port)
1709 ++ drm_dp_put_port(port);
1710 + }
1711 +
1712 + for (i = 0; i < mgr->max_payloads; i++) {
1713 +@@ -2109,6 +2124,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
1714 +
1715 + if (mgr->mst_primary) {
1716 + int sret;
1717 ++ u8 guid[16];
1718 ++
1719 + sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
1720 + if (sret != DP_RECEIVER_CAP_SIZE) {
1721 + DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
1722 +@@ -2123,6 +2140,16 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
1723 + ret = -1;
1724 + goto out_unlock;
1725 + }
1726 ++
1727 ++ /* Some hubs forget their guids after they resume */
1728 ++ sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
1729 ++ if (sret != 16) {
1730 ++ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
1731 ++ ret = -1;
1732 ++ goto out_unlock;
1733 ++ }
1734 ++ drm_dp_check_mstb_guid(mgr->mst_primary, guid);
1735 ++
1736 + ret = 0;
1737 + } else
1738 + ret = -1;
1739 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1740 +index f859a5b87ed4..afa81691163d 100644
1741 +--- a/drivers/gpu/drm/i915/intel_display.c
1742 ++++ b/drivers/gpu/drm/i915/intel_display.c
1743 +@@ -4447,7 +4447,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
1744 + intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
1745 +
1746 + return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
1747 +- &state->scaler_state.scaler_id, DRM_ROTATE_0,
1748 ++ &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
1749 + state->pipe_src_w, state->pipe_src_h,
1750 + adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
1751 + }
1752 +diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
1753 +index 0639275fc471..06bd9257acdc 100644
1754 +--- a/drivers/gpu/drm/i915/intel_dp_mst.c
1755 ++++ b/drivers/gpu/drm/i915/intel_dp_mst.c
1756 +@@ -477,6 +477,8 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
1757 + struct intel_connector *intel_connector = to_intel_connector(connector);
1758 + struct drm_device *dev = connector->dev;
1759 +
1760 ++ intel_connector->unregister(intel_connector);
1761 ++
1762 + /* need to nuke the connector */
1763 + drm_modeset_lock_all(dev);
1764 + if (connector->state->crtc) {
1765 +@@ -490,11 +492,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
1766 +
1767 + WARN(ret, "Disabling mst crtc failed with %i\n", ret);
1768 + }
1769 +- drm_modeset_unlock_all(dev);
1770 +
1771 +- intel_connector->unregister(intel_connector);
1772 +-
1773 +- drm_modeset_lock_all(dev);
1774 + intel_connector_remove_from_fbdev(intel_connector);
1775 + drm_connector_cleanup(connector);
1776 + drm_modeset_unlock_all(dev);
1777 +diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
1778 +index d69547a65dbb..7058f75c7b42 100644
1779 +--- a/drivers/gpu/drm/i915/intel_lrc.c
1780 ++++ b/drivers/gpu/drm/i915/intel_lrc.c
1781 +@@ -776,11 +776,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
1782 + if (unlikely(total_bytes > remain_usable)) {
1783 + /*
1784 + * The base request will fit but the reserved space
1785 +- * falls off the end. So only need to to wait for the
1786 +- * reserved size after flushing out the remainder.
1787 ++ * falls off the end. So don't need an immediate wrap
1788 ++ * and only need to effectively wait for the reserved
1789 ++ * size space from the start of ringbuffer.
1790 + */
1791 + wait_bytes = remain_actual + ringbuf->reserved_size;
1792 +- need_wrap = true;
1793 + } else if (total_bytes > ringbuf->space) {
1794 + /* No wrapping required, just waiting. */
1795 + wait_bytes = total_bytes;
1796 +diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
1797 +index f6b2a814e629..9d48443bca2e 100644
1798 +--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
1799 ++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
1800 +@@ -1922,6 +1922,17 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
1801 + return 0;
1802 + }
1803 +
1804 ++static void cleanup_phys_status_page(struct intel_engine_cs *ring)
1805 ++{
1806 ++ struct drm_i915_private *dev_priv = to_i915(ring->dev);
1807 ++
1808 ++ if (!dev_priv->status_page_dmah)
1809 ++ return;
1810 ++
1811 ++ drm_pci_free(ring->dev, dev_priv->status_page_dmah);
1812 ++ ring->status_page.page_addr = NULL;
1813 ++}
1814 ++
1815 + static void cleanup_status_page(struct intel_engine_cs *ring)
1816 + {
1817 + struct drm_i915_gem_object *obj;
1818 +@@ -1938,9 +1949,9 @@ static void cleanup_status_page(struct intel_engine_cs *ring)
1819 +
1820 + static int init_status_page(struct intel_engine_cs *ring)
1821 + {
1822 +- struct drm_i915_gem_object *obj;
1823 ++ struct drm_i915_gem_object *obj = ring->status_page.obj;
1824 +
1825 +- if ((obj = ring->status_page.obj) == NULL) {
1826 ++ if (obj == NULL) {
1827 + unsigned flags;
1828 + int ret;
1829 +
1830 +@@ -2134,7 +2145,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1831 + if (ret)
1832 + goto error;
1833 + } else {
1834 +- BUG_ON(ring->id != RCS);
1835 ++ WARN_ON(ring->id != RCS);
1836 + ret = init_phys_status_page(ring);
1837 + if (ret)
1838 + goto error;
1839 +@@ -2179,7 +2190,12 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
1840 + if (ring->cleanup)
1841 + ring->cleanup(ring);
1842 +
1843 +- cleanup_status_page(ring);
1844 ++ if (I915_NEED_GFX_HWS(ring->dev)) {
1845 ++ cleanup_status_page(ring);
1846 ++ } else {
1847 ++ WARN_ON(ring->id != RCS);
1848 ++ cleanup_phys_status_page(ring);
1849 ++ }
1850 +
1851 + i915_cmd_parser_fini_ring(ring);
1852 + i915_gem_batch_pool_fini(&ring->batch_pool);
1853 +@@ -2341,11 +2357,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
1854 + if (unlikely(total_bytes > remain_usable)) {
1855 + /*
1856 + * The base request will fit but the reserved space
1857 +- * falls off the end. So only need to to wait for the
1858 +- * reserved size after flushing out the remainder.
1859 ++ * falls off the end. So don't need an immediate wrap
1860 ++ * and only need to effectively wait for the reserved
1861 ++ * size space from the start of ringbuffer.
1862 + */
1863 + wait_bytes = remain_actual + ringbuf->reserved_size;
1864 +- need_wrap = true;
1865 + } else if (total_bytes > ringbuf->space) {
1866 + /* No wrapping required, just waiting. */
1867 + wait_bytes = total_bytes;
1868 +diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
1869 +index 43cba129a0c0..cc91ae832ffb 100644
1870 +--- a/drivers/gpu/drm/i915/intel_uncore.c
1871 ++++ b/drivers/gpu/drm/i915/intel_uncore.c
1872 +@@ -1132,7 +1132,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
1873 + } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1874 + dev_priv->uncore.funcs.force_wake_get =
1875 + fw_domains_get_with_thread_status;
1876 +- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1877 ++ if (IS_HASWELL(dev))
1878 ++ dev_priv->uncore.funcs.force_wake_put =
1879 ++ fw_domains_put_with_fifo;
1880 ++ else
1881 ++ dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1882 + fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1883 + FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1884 + } else if (IS_IVYBRIDGE(dev)) {
1885 +diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
1886 +index 3216e157a8a0..89da47234016 100644
1887 +--- a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
1888 ++++ b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
1889 +@@ -131,7 +131,7 @@ nvkm_ramht_del(struct nvkm_ramht **pramht)
1890 + struct nvkm_ramht *ramht = *pramht;
1891 + if (ramht) {
1892 + nvkm_gpuobj_del(&ramht->gpuobj);
1893 +- kfree(*pramht);
1894 ++ vfree(*pramht);
1895 + *pramht = NULL;
1896 + }
1897 + }
1898 +@@ -143,8 +143,8 @@ nvkm_ramht_new(struct nvkm_device *device, u32 size, u32 align,
1899 + struct nvkm_ramht *ramht;
1900 + int ret, i;
1901 +
1902 +- if (!(ramht = *pramht = kzalloc(sizeof(*ramht) + (size >> 3) *
1903 +- sizeof(*ramht->data), GFP_KERNEL)))
1904 ++ if (!(ramht = *pramht = vzalloc(sizeof(*ramht) +
1905 ++ (size >> 3) * sizeof(*ramht->data))))
1906 + return -ENOMEM;
1907 +
1908 + ramht->device = device;
1909 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
1910 +index 9f5dfc85147a..36655a74c538 100644
1911 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
1912 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
1913 +@@ -1717,6 +1717,8 @@ gf100_gr_init(struct gf100_gr *gr)
1914 +
1915 + gf100_gr_mmio(gr, gr->func->mmio);
1916 +
1917 ++ nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
1918 ++
1919 + memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
1920 + for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
1921 + do {
1922 +diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
1923 +index 183aea1abebc..5edebf495c07 100644
1924 +--- a/drivers/gpu/drm/qxl/qxl_display.c
1925 ++++ b/drivers/gpu/drm/qxl/qxl_display.c
1926 +@@ -375,10 +375,15 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
1927 +
1928 + qxl_bo_kunmap(user_bo);
1929 +
1930 ++ qcrtc->cur_x += qcrtc->hot_spot_x - hot_x;
1931 ++ qcrtc->cur_y += qcrtc->hot_spot_y - hot_y;
1932 ++ qcrtc->hot_spot_x = hot_x;
1933 ++ qcrtc->hot_spot_y = hot_y;
1934 ++
1935 + cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
1936 + cmd->type = QXL_CURSOR_SET;
1937 +- cmd->u.set.position.x = qcrtc->cur_x;
1938 +- cmd->u.set.position.y = qcrtc->cur_y;
1939 ++ cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
1940 ++ cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
1941 +
1942 + cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
1943 +
1944 +@@ -441,8 +446,8 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
1945 +
1946 + cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
1947 + cmd->type = QXL_CURSOR_MOVE;
1948 +- cmd->u.position.x = qcrtc->cur_x;
1949 +- cmd->u.position.y = qcrtc->cur_y;
1950 ++ cmd->u.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
1951 ++ cmd->u.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
1952 + qxl_release_unmap(qdev, release, &cmd->release_info);
1953 +
1954 + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
1955 +diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
1956 +index 01a86948eb8c..3ab90179e9ab 100644
1957 +--- a/drivers/gpu/drm/qxl/qxl_drv.h
1958 ++++ b/drivers/gpu/drm/qxl/qxl_drv.h
1959 +@@ -135,6 +135,8 @@ struct qxl_crtc {
1960 + int index;
1961 + int cur_x;
1962 + int cur_y;
1963 ++ int hot_spot_x;
1964 ++ int hot_spot_y;
1965 + };
1966 +
1967 + struct qxl_output {
1968 +diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
1969 +index 2ad462896896..32491355a1d4 100644
1970 +--- a/drivers/gpu/drm/radeon/evergreen.c
1971 ++++ b/drivers/gpu/drm/radeon/evergreen.c
1972 +@@ -2608,10 +2608,152 @@ static void evergreen_agp_enable(struct radeon_device *rdev)
1973 + WREG32(VM_CONTEXT1_CNTL, 0);
1974 + }
1975 +
1976 ++static const unsigned ni_dig_offsets[] =
1977 ++{
1978 ++ NI_DIG0_REGISTER_OFFSET,
1979 ++ NI_DIG1_REGISTER_OFFSET,
1980 ++ NI_DIG2_REGISTER_OFFSET,
1981 ++ NI_DIG3_REGISTER_OFFSET,
1982 ++ NI_DIG4_REGISTER_OFFSET,
1983 ++ NI_DIG5_REGISTER_OFFSET
1984 ++};
1985 ++
1986 ++static const unsigned ni_tx_offsets[] =
1987 ++{
1988 ++ NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
1989 ++ NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
1990 ++ NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
1991 ++ NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
1992 ++ NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
1993 ++ NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
1994 ++};
1995 ++
1996 ++static const unsigned evergreen_dp_offsets[] =
1997 ++{
1998 ++ EVERGREEN_DP0_REGISTER_OFFSET,
1999 ++ EVERGREEN_DP1_REGISTER_OFFSET,
2000 ++ EVERGREEN_DP2_REGISTER_OFFSET,
2001 ++ EVERGREEN_DP3_REGISTER_OFFSET,
2002 ++ EVERGREEN_DP4_REGISTER_OFFSET,
2003 ++ EVERGREEN_DP5_REGISTER_OFFSET
2004 ++};
2005 ++
2006 ++
2007 ++/*
2008 ++ * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
2009 ++ * We go from crtc to connector and it is not relible since it
2010 ++ * should be an opposite direction .If crtc is enable then
2011 ++ * find the dig_fe which selects this crtc and insure that it enable.
2012 ++ * if such dig_fe is found then find dig_be which selects found dig_be and
2013 ++ * insure that it enable and in DP_SST mode.
2014 ++ * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
2015 ++ * from dp symbols clocks .
2016 ++ */
2017 ++static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
2018 ++ unsigned crtc_id, unsigned *ret_dig_fe)
2019 ++{
2020 ++ unsigned i;
2021 ++ unsigned dig_fe;
2022 ++ unsigned dig_be;
2023 ++ unsigned dig_en_be;
2024 ++ unsigned uniphy_pll;
2025 ++ unsigned digs_fe_selected;
2026 ++ unsigned dig_be_mode;
2027 ++ unsigned dig_fe_mask;
2028 ++ bool is_enabled = false;
2029 ++ bool found_crtc = false;
2030 ++
2031 ++ /* loop through all running dig_fe to find selected crtc */
2032 ++ for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
2033 ++ dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
2034 ++ if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
2035 ++ crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
2036 ++ /* found running pipe */
2037 ++ found_crtc = true;
2038 ++ dig_fe_mask = 1 << i;
2039 ++ dig_fe = i;
2040 ++ break;
2041 ++ }
2042 ++ }
2043 ++
2044 ++ if (found_crtc) {
2045 ++ /* loop through all running dig_be to find selected dig_fe */
2046 ++ for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
2047 ++ dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
2048 ++ /* if dig_fe_selected by dig_be? */
2049 ++ digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
2050 ++ dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
2051 ++ if (dig_fe_mask & digs_fe_selected &&
2052 ++ /* if dig_be in sst mode? */
2053 ++ dig_be_mode == NI_DIG_BE_DPSST) {
2054 ++ dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
2055 ++ ni_dig_offsets[i]);
2056 ++ uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
2057 ++ ni_tx_offsets[i]);
2058 ++ /* dig_be enable and tx is running */
2059 ++ if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
2060 ++ dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
2061 ++ uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
2062 ++ is_enabled = true;
2063 ++ *ret_dig_fe = dig_fe;
2064 ++ break;
2065 ++ }
2066 ++ }
2067 ++ }
2068 ++ }
2069 ++
2070 ++ return is_enabled;
2071 ++}
2072 ++
2073 ++/*
2074 ++ * Blank dig when in dp sst mode
2075 ++ * Dig ignores crtc timing
2076 ++ */
2077 ++static void evergreen_blank_dp_output(struct radeon_device *rdev,
2078 ++ unsigned dig_fe)
2079 ++{
2080 ++ unsigned stream_ctrl;
2081 ++ unsigned fifo_ctrl;
2082 ++ unsigned counter = 0;
2083 ++
2084 ++ if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
2085 ++ DRM_ERROR("invalid dig_fe %d\n", dig_fe);
2086 ++ return;
2087 ++ }
2088 ++
2089 ++ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2090 ++ evergreen_dp_offsets[dig_fe]);
2091 ++ if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
2092 ++ DRM_ERROR("dig %d , should be enable\n", dig_fe);
2093 ++ return;
2094 ++ }
2095 ++
2096 ++ stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
2097 ++ WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2098 ++ evergreen_dp_offsets[dig_fe], stream_ctrl);
2099 ++
2100 ++ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2101 ++ evergreen_dp_offsets[dig_fe]);
2102 ++ while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
2103 ++ msleep(1);
2104 ++ counter++;
2105 ++ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2106 ++ evergreen_dp_offsets[dig_fe]);
2107 ++ }
2108 ++ if (counter >= 32 )
2109 ++ DRM_ERROR("counter exceeds %d\n", counter);
2110 ++
2111 ++ fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
2112 ++ fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
2113 ++ WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
2114 ++
2115 ++}
2116 ++
2117 + void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
2118 + {
2119 + u32 crtc_enabled, tmp, frame_count, blackout;
2120 + int i, j;
2121 ++ unsigned dig_fe;
2122 +
2123 + if (!ASIC_IS_NODCE(rdev)) {
2124 + save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
2125 +@@ -2651,7 +2793,17 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
2126 + break;
2127 + udelay(1);
2128 + }
2129 +-
2130 ++ /*we should disable dig if it drives dp sst*/
2131 ++ /*but we are in radeon_device_init and the topology is unknown*/
2132 ++ /*and it is available after radeon_modeset_init*/
2133 ++ /*the following method radeon_atom_encoder_dpms_dig*/
2134 ++ /*does the job if we initialize it properly*/
2135 ++ /*for now we do it this manually*/
2136 ++ /**/
2137 ++ if (ASIC_IS_DCE5(rdev) &&
2138 ++ evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
2139 ++ evergreen_blank_dp_output(rdev, dig_fe);
2140 ++ /*we could remove 6 lines below*/
2141 + /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
2142 + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
2143 + tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
2144 +diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
2145 +index aa939dfed3a3..b436badf9efa 100644
2146 +--- a/drivers/gpu/drm/radeon/evergreen_reg.h
2147 ++++ b/drivers/gpu/drm/radeon/evergreen_reg.h
2148 +@@ -250,8 +250,43 @@
2149 +
2150 + /* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
2151 + #define EVERGREEN_HDMI_BASE 0x7030
2152 ++/*DIG block*/
2153 ++#define NI_DIG0_REGISTER_OFFSET (0x7000 - 0x7000)
2154 ++#define NI_DIG1_REGISTER_OFFSET (0x7C00 - 0x7000)
2155 ++#define NI_DIG2_REGISTER_OFFSET (0x10800 - 0x7000)
2156 ++#define NI_DIG3_REGISTER_OFFSET (0x11400 - 0x7000)
2157 ++#define NI_DIG4_REGISTER_OFFSET (0x12000 - 0x7000)
2158 ++#define NI_DIG5_REGISTER_OFFSET (0x12C00 - 0x7000)
2159 ++
2160 ++
2161 ++#define NI_DIG_FE_CNTL 0x7000
2162 ++# define NI_DIG_FE_CNTL_SOURCE_SELECT(x) ((x) & 0x3)
2163 ++# define NI_DIG_FE_CNTL_SYMCLK_FE_ON (1<<24)
2164 ++
2165 ++
2166 ++#define NI_DIG_BE_CNTL 0x7140
2167 ++# define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x) (((x) >> 8 ) & 0x3F)
2168 ++# define NI_DIG_FE_CNTL_MODE(x) (((x) >> 16) & 0x7 )
2169 ++
2170 ++#define NI_DIG_BE_EN_CNTL 0x7144
2171 ++# define NI_DIG_BE_EN_CNTL_ENABLE (1 << 0)
2172 ++# define NI_DIG_BE_EN_CNTL_SYMBCLK_ON (1 << 8)
2173 ++# define NI_DIG_BE_DPSST 0
2174 +
2175 + /* Display Port block */
2176 ++#define EVERGREEN_DP0_REGISTER_OFFSET (0x730C - 0x730C)
2177 ++#define EVERGREEN_DP1_REGISTER_OFFSET (0x7F0C - 0x730C)
2178 ++#define EVERGREEN_DP2_REGISTER_OFFSET (0x10B0C - 0x730C)
2179 ++#define EVERGREEN_DP3_REGISTER_OFFSET (0x1170C - 0x730C)
2180 ++#define EVERGREEN_DP4_REGISTER_OFFSET (0x1230C - 0x730C)
2181 ++#define EVERGREEN_DP5_REGISTER_OFFSET (0x12F0C - 0x730C)
2182 ++
2183 ++
2184 ++#define EVERGREEN_DP_VID_STREAM_CNTL 0x730C
2185 ++# define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE (1 << 0)
2186 ++# define EVERGREEN_DP_VID_STREAM_STATUS (1 <<16)
2187 ++#define EVERGREEN_DP_STEER_FIFO 0x7310
2188 ++# define EVERGREEN_DP_STEER_FIFO_RESET (1 << 0)
2189 + #define EVERGREEN_DP_SEC_CNTL 0x7280
2190 + # define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0)
2191 + # define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4)
2192 +@@ -266,4 +301,15 @@
2193 + # define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24)
2194 + # define EVERGREEN_DP_SEC_SS_EN (1 << 28)
2195 +
2196 ++/*DCIO_UNIPHY block*/
2197 ++#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1 (0x6600 -0x6600)
2198 ++#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1 (0x6640 -0x6600)
2199 ++#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1 (0x6680 - 0x6600)
2200 ++#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1 (0x66C0 - 0x6600)
2201 ++#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1 (0x6700 - 0x6600)
2202 ++#define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1 (0x6740 - 0x6600)
2203 ++
2204 ++#define NI_DCIO_UNIPHY0_PLL_CONTROL1 0x6618
2205 ++# define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE (1 << 0)
2206 ++
2207 + #endif
2208 +diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
2209 +index 9bc408c9f9f6..c4b4f298a283 100644
2210 +--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
2211 ++++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
2212 +@@ -62,10 +62,6 @@ bool radeon_has_atpx(void) {
2213 + return radeon_atpx_priv.atpx_detected;
2214 + }
2215 +
2216 +-bool radeon_has_atpx_dgpu_power_cntl(void) {
2217 +- return radeon_atpx_priv.atpx.functions.power_cntl;
2218 +-}
2219 +-
2220 + /**
2221 + * radeon_atpx_call - call an ATPX method
2222 + *
2223 +@@ -145,6 +141,10 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
2224 + */
2225 + static int radeon_atpx_validate(struct radeon_atpx *atpx)
2226 + {
2227 ++ /* make sure required functions are enabled */
2228 ++ /* dGPU power control is required */
2229 ++ atpx->functions.power_cntl = true;
2230 ++
2231 + if (atpx->functions.px_params) {
2232 + union acpi_object *info;
2233 + struct atpx_px_params output;
2234 +diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
2235 +index 340f3f549f29..9cfc1c3e1965 100644
2236 +--- a/drivers/gpu/drm/radeon/radeon_connectors.c
2237 ++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
2238 +@@ -1996,10 +1996,12 @@ radeon_add_atom_connector(struct drm_device *dev,
2239 + rdev->mode_info.dither_property,
2240 + RADEON_FMT_DITHER_DISABLE);
2241 +
2242 +- if (radeon_audio != 0)
2243 ++ if (radeon_audio != 0) {
2244 + drm_object_attach_property(&radeon_connector->base.base,
2245 + rdev->mode_info.audio_property,
2246 + RADEON_AUDIO_AUTO);
2247 ++ radeon_connector->audio = RADEON_AUDIO_AUTO;
2248 ++ }
2249 + if (ASIC_IS_DCE5(rdev))
2250 + drm_object_attach_property(&radeon_connector->base.base,
2251 + rdev->mode_info.output_csc_property,
2252 +@@ -2124,6 +2126,7 @@ radeon_add_atom_connector(struct drm_device *dev,
2253 + drm_object_attach_property(&radeon_connector->base.base,
2254 + rdev->mode_info.audio_property,
2255 + RADEON_AUDIO_AUTO);
2256 ++ radeon_connector->audio = RADEON_AUDIO_AUTO;
2257 + }
2258 + if (connector_type == DRM_MODE_CONNECTOR_DVII) {
2259 + radeon_connector->dac_load_detect = true;
2260 +@@ -2179,6 +2182,7 @@ radeon_add_atom_connector(struct drm_device *dev,
2261 + drm_object_attach_property(&radeon_connector->base.base,
2262 + rdev->mode_info.audio_property,
2263 + RADEON_AUDIO_AUTO);
2264 ++ radeon_connector->audio = RADEON_AUDIO_AUTO;
2265 + }
2266 + if (ASIC_IS_DCE5(rdev))
2267 + drm_object_attach_property(&radeon_connector->base.base,
2268 +@@ -2231,6 +2235,7 @@ radeon_add_atom_connector(struct drm_device *dev,
2269 + drm_object_attach_property(&radeon_connector->base.base,
2270 + rdev->mode_info.audio_property,
2271 + RADEON_AUDIO_AUTO);
2272 ++ radeon_connector->audio = RADEON_AUDIO_AUTO;
2273 + }
2274 + if (ASIC_IS_DCE5(rdev))
2275 + drm_object_attach_property(&radeon_connector->base.base,
2276 +diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
2277 +index f78f111e68de..c566993a2ec3 100644
2278 +--- a/drivers/gpu/drm/radeon/radeon_device.c
2279 ++++ b/drivers/gpu/drm/radeon/radeon_device.c
2280 +@@ -103,12 +103,6 @@ static const char radeon_family_name[][16] = {
2281 + "LAST",
2282 + };
2283 +
2284 +-#if defined(CONFIG_VGA_SWITCHEROO)
2285 +-bool radeon_has_atpx_dgpu_power_cntl(void);
2286 +-#else
2287 +-static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
2288 +-#endif
2289 +-
2290 + #define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
2291 + #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
2292 +
2293 +@@ -1439,7 +1433,7 @@ int radeon_device_init(struct radeon_device *rdev,
2294 + * ignore it */
2295 + vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
2296 +
2297 +- if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl())
2298 ++ if (rdev->flags & RADEON_IS_PX)
2299 + runtime = true;
2300 + vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
2301 + if (runtime)
2302 +diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
2303 +index e06ac546a90f..f342aad79cc6 100644
2304 +--- a/drivers/gpu/drm/radeon/radeon_ttm.c
2305 ++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
2306 +@@ -235,6 +235,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
2307 + {
2308 + struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
2309 +
2310 ++ if (radeon_ttm_tt_has_userptr(bo->ttm))
2311 ++ return -EPERM;
2312 + return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
2313 + }
2314 +
2315 +diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
2316 +index 7285adb27099..caa73de584a5 100644
2317 +--- a/drivers/gpu/drm/radeon/si_dpm.c
2318 ++++ b/drivers/gpu/drm/radeon/si_dpm.c
2319 +@@ -2931,6 +2931,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
2320 + { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
2321 + { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
2322 + { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
2323 ++ { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
2324 + { 0, 0, 0, 0 },
2325 + };
2326 +
2327 +diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig
2328 +index 83e9f591a54b..e7a348807f0c 100644
2329 +--- a/drivers/hwtracing/stm/Kconfig
2330 ++++ b/drivers/hwtracing/stm/Kconfig
2331 +@@ -1,6 +1,7 @@
2332 + config STM
2333 + tristate "System Trace Module devices"
2334 + select CONFIGFS_FS
2335 ++ select SRCU
2336 + help
2337 + A System Trace Module (STM) is a device exporting data in System
2338 + Trace Protocol (STP) format as defined by MIPI STP standards.
2339 +diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
2340 +index 714bdc837769..b167ab25310a 100644
2341 +--- a/drivers/i2c/busses/i2c-cpm.c
2342 ++++ b/drivers/i2c/busses/i2c-cpm.c
2343 +@@ -116,8 +116,8 @@ struct cpm_i2c {
2344 + cbd_t __iomem *rbase;
2345 + u_char *txbuf[CPM_MAXBD];
2346 + u_char *rxbuf[CPM_MAXBD];
2347 +- u32 txdma[CPM_MAXBD];
2348 +- u32 rxdma[CPM_MAXBD];
2349 ++ dma_addr_t txdma[CPM_MAXBD];
2350 ++ dma_addr_t rxdma[CPM_MAXBD];
2351 + };
2352 +
2353 + static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id)
2354 +diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
2355 +index b29c7500461a..f54ece8fce78 100644
2356 +--- a/drivers/i2c/busses/i2c-exynos5.c
2357 ++++ b/drivers/i2c/busses/i2c-exynos5.c
2358 +@@ -671,7 +671,9 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
2359 + return -EIO;
2360 + }
2361 +
2362 +- clk_prepare_enable(i2c->clk);
2363 ++ ret = clk_enable(i2c->clk);
2364 ++ if (ret)
2365 ++ return ret;
2366 +
2367 + for (i = 0; i < num; i++, msgs++) {
2368 + stop = (i == num - 1);
2369 +@@ -695,7 +697,7 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
2370 + }
2371 +
2372 + out:
2373 +- clk_disable_unprepare(i2c->clk);
2374 ++ clk_disable(i2c->clk);
2375 + return ret;
2376 + }
2377 +
2378 +@@ -747,7 +749,9 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
2379 + return -ENOENT;
2380 + }
2381 +
2382 +- clk_prepare_enable(i2c->clk);
2383 ++ ret = clk_prepare_enable(i2c->clk);
2384 ++ if (ret)
2385 ++ return ret;
2386 +
2387 + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2388 + i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
2389 +@@ -799,6 +803,10 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
2390 +
2391 + platform_set_drvdata(pdev, i2c);
2392 +
2393 ++ clk_disable(i2c->clk);
2394 ++
2395 ++ return 0;
2396 ++
2397 + err_clk:
2398 + clk_disable_unprepare(i2c->clk);
2399 + return ret;
2400 +@@ -810,6 +818,8 @@ static int exynos5_i2c_remove(struct platform_device *pdev)
2401 +
2402 + i2c_del_adapter(&i2c->adap);
2403 +
2404 ++ clk_unprepare(i2c->clk);
2405 ++
2406 + return 0;
2407 + }
2408 +
2409 +@@ -821,6 +831,8 @@ static int exynos5_i2c_suspend_noirq(struct device *dev)
2410 +
2411 + i2c->suspended = 1;
2412 +
2413 ++ clk_unprepare(i2c->clk);
2414 ++
2415 + return 0;
2416 + }
2417 +
2418 +@@ -830,7 +842,9 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
2419 + struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
2420 + int ret = 0;
2421 +
2422 +- clk_prepare_enable(i2c->clk);
2423 ++ ret = clk_prepare_enable(i2c->clk);
2424 ++ if (ret)
2425 ++ return ret;
2426 +
2427 + ret = exynos5_hsi2c_clock_setup(i2c);
2428 + if (ret) {
2429 +@@ -839,7 +853,7 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
2430 + }
2431 +
2432 + exynos5_i2c_init(i2c);
2433 +- clk_disable_unprepare(i2c->clk);
2434 ++ clk_disable(i2c->clk);
2435 + i2c->suspended = 0;
2436 +
2437 + return 0;
2438 +diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
2439 +index 6b4e8a008bc0..564adf3116e8 100644
2440 +--- a/drivers/infiniband/core/ucm.c
2441 ++++ b/drivers/infiniband/core/ucm.c
2442 +@@ -48,6 +48,7 @@
2443 +
2444 + #include <asm/uaccess.h>
2445 +
2446 ++#include <rdma/ib.h>
2447 + #include <rdma/ib_cm.h>
2448 + #include <rdma/ib_user_cm.h>
2449 + #include <rdma/ib_marshall.h>
2450 +@@ -1103,6 +1104,9 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
2451 + struct ib_ucm_cmd_hdr hdr;
2452 + ssize_t result;
2453 +
2454 ++ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
2455 ++ return -EACCES;
2456 ++
2457 + if (len < sizeof(hdr))
2458 + return -EINVAL;
2459 +
2460 +diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
2461 +index 8b5a934e1133..886f61ea6cc7 100644
2462 +--- a/drivers/infiniband/core/ucma.c
2463 ++++ b/drivers/infiniband/core/ucma.c
2464 +@@ -1574,6 +1574,9 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
2465 + struct rdma_ucm_cmd_hdr hdr;
2466 + ssize_t ret;
2467 +
2468 ++ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
2469 ++ return -EACCES;
2470 ++
2471 + if (len < sizeof(hdr))
2472 + return -EINVAL;
2473 +
2474 +diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
2475 +index e3ef28861be6..24f3ca2c4ad7 100644
2476 +--- a/drivers/infiniband/core/uverbs_main.c
2477 ++++ b/drivers/infiniband/core/uverbs_main.c
2478 +@@ -48,6 +48,8 @@
2479 +
2480 + #include <asm/uaccess.h>
2481 +
2482 ++#include <rdma/ib.h>
2483 ++
2484 + #include "uverbs.h"
2485 +
2486 + MODULE_AUTHOR("Roland Dreier");
2487 +@@ -682,6 +684,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
2488 + int srcu_key;
2489 + ssize_t ret;
2490 +
2491 ++ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
2492 ++ return -EACCES;
2493 ++
2494 + if (count < sizeof hdr)
2495 + return -EINVAL;
2496 +
2497 +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
2498 +index c4e091528390..721d63f5b461 100644
2499 +--- a/drivers/infiniband/hw/mlx5/main.c
2500 ++++ b/drivers/infiniband/hw/mlx5/main.c
2501 +@@ -273,7 +273,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
2502 + sizeof(struct mlx5_wqe_ctrl_seg)) /
2503 + sizeof(struct mlx5_wqe_data_seg);
2504 + props->max_sge = min(max_rq_sg, max_sq_sg);
2505 +- props->max_sge_rd = props->max_sge;
2506 ++ props->max_sge_rd = MLX5_MAX_SGE_RD;
2507 + props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
2508 + props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
2509 + props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
2510 +diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
2511 +index e449e394963f..24f4a782e0f4 100644
2512 +--- a/drivers/infiniband/hw/qib/qib_file_ops.c
2513 ++++ b/drivers/infiniband/hw/qib/qib_file_ops.c
2514 +@@ -45,6 +45,8 @@
2515 + #include <linux/export.h>
2516 + #include <linux/uio.h>
2517 +
2518 ++#include <rdma/ib.h>
2519 ++
2520 + #include "qib.h"
2521 + #include "qib_common.h"
2522 + #include "qib_user_sdma.h"
2523 +@@ -2067,6 +2069,9 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
2524 + ssize_t ret = 0;
2525 + void *dest;
2526 +
2527 ++ if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
2528 ++ return -EACCES;
2529 ++
2530 + if (count < sizeof(cmd.type)) {
2531 + ret = -EINVAL;
2532 + goto bail;
2533 +diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
2534 +index 3f02e0e03d12..67aab86048ad 100644
2535 +--- a/drivers/input/misc/pmic8xxx-pwrkey.c
2536 ++++ b/drivers/input/misc/pmic8xxx-pwrkey.c
2537 +@@ -353,7 +353,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
2538 + if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay))
2539 + kpd_delay = 15625;
2540 +
2541 +- if (kpd_delay > 62500 || kpd_delay == 0) {
2542 ++ /* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */
2543 ++ if (kpd_delay > USEC_PER_SEC * 2 || kpd_delay < USEC_PER_SEC / 64) {
2544 + dev_err(&pdev->dev, "invalid power key trigger delay\n");
2545 + return -EINVAL;
2546 + }
2547 +@@ -385,8 +386,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
2548 + pwr->name = "pmic8xxx_pwrkey";
2549 + pwr->phys = "pmic8xxx_pwrkey/input0";
2550 +
2551 +- delay = (kpd_delay << 10) / USEC_PER_SEC;
2552 +- delay = 1 + ilog2(delay);
2553 ++ delay = (kpd_delay << 6) / USEC_PER_SEC;
2554 ++ delay = ilog2(delay);
2555 +
2556 + err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
2557 + if (err < 0) {
2558 +diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
2559 +index 3a7f3a4a4396..7c18249d6c8e 100644
2560 +--- a/drivers/input/tablet/gtco.c
2561 ++++ b/drivers/input/tablet/gtco.c
2562 +@@ -858,6 +858,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
2563 + goto err_free_buf;
2564 + }
2565 +
2566 ++ /* Sanity check that a device has an endpoint */
2567 ++ if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
2568 ++ dev_err(&usbinterface->dev,
2569 ++ "Invalid number of endpoints\n");
2570 ++ error = -EINVAL;
2571 ++ goto err_free_urb;
2572 ++ }
2573 ++
2574 + /*
2575 + * The endpoint is always altsetting 0, we know this since we know
2576 + * this device only has one interrupt endpoint
2577 +@@ -879,7 +887,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
2578 + * HID report descriptor
2579 + */
2580 + if (usb_get_extra_descriptor(usbinterface->cur_altsetting,
2581 +- HID_DEVICE_TYPE, &hid_desc) != 0){
2582 ++ HID_DEVICE_TYPE, &hid_desc) != 0) {
2583 + dev_err(&usbinterface->dev,
2584 + "Can't retrieve exta USB descriptor to get hid report descriptor length\n");
2585 + error = -EIO;
2586 +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
2587 +index fc836f523afa..b9319b76a8a1 100644
2588 +--- a/drivers/iommu/amd_iommu.c
2589 ++++ b/drivers/iommu/amd_iommu.c
2590 +@@ -91,6 +91,7 @@ struct iommu_dev_data {
2591 + struct list_head dev_data_list; /* For global dev_data_list */
2592 + struct protection_domain *domain; /* Domain the device is bound to */
2593 + u16 devid; /* PCI Device ID */
2594 ++ u16 alias; /* Alias Device ID */
2595 + bool iommu_v2; /* Device can make use of IOMMUv2 */
2596 + bool passthrough; /* Device is identity mapped */
2597 + struct {
2598 +@@ -125,6 +126,13 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
2599 + return container_of(dom, struct protection_domain, domain);
2600 + }
2601 +
2602 ++static inline u16 get_device_id(struct device *dev)
2603 ++{
2604 ++ struct pci_dev *pdev = to_pci_dev(dev);
2605 ++
2606 ++ return PCI_DEVID(pdev->bus->number, pdev->devfn);
2607 ++}
2608 ++
2609 + static struct iommu_dev_data *alloc_dev_data(u16 devid)
2610 + {
2611 + struct iommu_dev_data *dev_data;
2612 +@@ -162,6 +170,68 @@ out_unlock:
2613 + return dev_data;
2614 + }
2615 +
2616 ++static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
2617 ++{
2618 ++ *(u16 *)data = alias;
2619 ++ return 0;
2620 ++}
2621 ++
2622 ++static u16 get_alias(struct device *dev)
2623 ++{
2624 ++ struct pci_dev *pdev = to_pci_dev(dev);
2625 ++ u16 devid, ivrs_alias, pci_alias;
2626 ++
2627 ++ devid = get_device_id(dev);
2628 ++ ivrs_alias = amd_iommu_alias_table[devid];
2629 ++ pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
2630 ++
2631 ++ if (ivrs_alias == pci_alias)
2632 ++ return ivrs_alias;
2633 ++
2634 ++ /*
2635 ++ * DMA alias showdown
2636 ++ *
2637 ++ * The IVRS is fairly reliable in telling us about aliases, but it
2638 ++ * can't know about every screwy device. If we don't have an IVRS
2639 ++ * reported alias, use the PCI reported alias. In that case we may
2640 ++ * still need to initialize the rlookup and dev_table entries if the
2641 ++ * alias is to a non-existent device.
2642 ++ */
2643 ++ if (ivrs_alias == devid) {
2644 ++ if (!amd_iommu_rlookup_table[pci_alias]) {
2645 ++ amd_iommu_rlookup_table[pci_alias] =
2646 ++ amd_iommu_rlookup_table[devid];
2647 ++ memcpy(amd_iommu_dev_table[pci_alias].data,
2648 ++ amd_iommu_dev_table[devid].data,
2649 ++ sizeof(amd_iommu_dev_table[pci_alias].data));
2650 ++ }
2651 ++
2652 ++ return pci_alias;
2653 ++ }
2654 ++
2655 ++ pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
2656 ++ "for device %s[%04x:%04x], kernel reported alias "
2657 ++ "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
2658 ++ PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
2659 ++ PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
2660 ++ PCI_FUNC(pci_alias));
2661 ++
2662 ++ /*
2663 ++ * If we don't have a PCI DMA alias and the IVRS alias is on the same
2664 ++ * bus, then the IVRS table may know about a quirk that we don't.
2665 ++ */
2666 ++ if (pci_alias == devid &&
2667 ++ PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
2668 ++ pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
2669 ++ pdev->dma_alias_devfn = ivrs_alias & 0xff;
2670 ++ pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
2671 ++ PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
2672 ++ dev_name(dev));
2673 ++ }
2674 ++
2675 ++ return ivrs_alias;
2676 ++}
2677 ++
2678 + static struct iommu_dev_data *find_dev_data(u16 devid)
2679 + {
2680 + struct iommu_dev_data *dev_data;
2681 +@@ -174,13 +244,6 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
2682 + return dev_data;
2683 + }
2684 +
2685 +-static inline u16 get_device_id(struct device *dev)
2686 +-{
2687 +- struct pci_dev *pdev = to_pci_dev(dev);
2688 +-
2689 +- return PCI_DEVID(pdev->bus->number, pdev->devfn);
2690 +-}
2691 +-
2692 + static struct iommu_dev_data *get_dev_data(struct device *dev)
2693 + {
2694 + return dev->archdata.iommu;
2695 +@@ -308,6 +371,8 @@ static int iommu_init_device(struct device *dev)
2696 + if (!dev_data)
2697 + return -ENOMEM;
2698 +
2699 ++ dev_data->alias = get_alias(dev);
2700 ++
2701 + if (pci_iommuv2_capable(pdev)) {
2702 + struct amd_iommu *iommu;
2703 +
2704 +@@ -328,7 +393,7 @@ static void iommu_ignore_device(struct device *dev)
2705 + u16 devid, alias;
2706 +
2707 + devid = get_device_id(dev);
2708 +- alias = amd_iommu_alias_table[devid];
2709 ++ alias = get_alias(dev);
2710 +
2711 + memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
2712 + memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
2713 +@@ -1017,7 +1082,7 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
2714 + int ret;
2715 +
2716 + iommu = amd_iommu_rlookup_table[dev_data->devid];
2717 +- alias = amd_iommu_alias_table[dev_data->devid];
2718 ++ alias = dev_data->alias;
2719 +
2720 + ret = iommu_flush_dte(iommu, dev_data->devid);
2721 + if (!ret && alias != dev_data->devid)
2722 +@@ -1891,7 +1956,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
2723 + bool ats;
2724 +
2725 + iommu = amd_iommu_rlookup_table[dev_data->devid];
2726 +- alias = amd_iommu_alias_table[dev_data->devid];
2727 ++ alias = dev_data->alias;
2728 + ats = dev_data->ats.enabled;
2729 +
2730 + /* Update data structures */
2731 +@@ -1925,7 +1990,7 @@ static void do_detach(struct iommu_dev_data *dev_data)
2732 + return;
2733 +
2734 + iommu = amd_iommu_rlookup_table[dev_data->devid];
2735 +- alias = amd_iommu_alias_table[dev_data->devid];
2736 ++ alias = dev_data->alias;
2737 +
2738 + /* decrease reference counters */
2739 + dev_data->domain->dev_iommu[iommu->index] -= 1;
2740 +diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
2741 +index 72d6182666cb..58f2fe687a24 100644
2742 +--- a/drivers/iommu/dma-iommu.c
2743 ++++ b/drivers/iommu/dma-iommu.c
2744 +@@ -403,7 +403,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
2745 + unsigned int s_length = sg_dma_len(s);
2746 + unsigned int s_dma_len = s->length;
2747 +
2748 +- s->offset = s_offset;
2749 ++ s->offset += s_offset;
2750 + s->length = s_length;
2751 + sg_dma_address(s) = dma_addr + s_offset;
2752 + dma_addr += s_dma_len;
2753 +@@ -422,7 +422,7 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
2754 +
2755 + for_each_sg(sg, s, nents, i) {
2756 + if (sg_dma_address(s) != DMA_ERROR_CODE)
2757 +- s->offset = sg_dma_address(s);
2758 ++ s->offset += sg_dma_address(s);
2759 + if (sg_dma_len(s))
2760 + s->length = sg_dma_len(s);
2761 + sg_dma_address(s) = DMA_ERROR_CODE;
2762 +diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
2763 +index efe50845939d..17304705f2cf 100644
2764 +--- a/drivers/irqchip/irq-mxs.c
2765 ++++ b/drivers/irqchip/irq-mxs.c
2766 +@@ -183,7 +183,7 @@ static void __iomem * __init icoll_init_iobase(struct device_node *np)
2767 + void __iomem *icoll_base;
2768 +
2769 + icoll_base = of_io_request_and_map(np, 0, np->name);
2770 +- if (!icoll_base)
2771 ++ if (IS_ERR(icoll_base))
2772 + panic("%s: unable to map resource", np->full_name);
2773 + return icoll_base;
2774 + }
2775 +diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
2776 +index 4ef178078e5b..1254e98f6b57 100644
2777 +--- a/drivers/irqchip/irq-sunxi-nmi.c
2778 ++++ b/drivers/irqchip/irq-sunxi-nmi.c
2779 +@@ -154,9 +154,9 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
2780 +
2781 + gc = irq_get_domain_generic_chip(domain, 0);
2782 + gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node));
2783 +- if (!gc->reg_base) {
2784 ++ if (IS_ERR(gc->reg_base)) {
2785 + pr_err("unable to map resource\n");
2786 +- ret = -ENOMEM;
2787 ++ ret = PTR_ERR(gc->reg_base);
2788 + goto fail_irqd_remove;
2789 + }
2790 +
2791 +diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
2792 +index 27f2ef300f8b..3970cda10080 100644
2793 +--- a/drivers/md/dm-cache-metadata.c
2794 ++++ b/drivers/md/dm-cache-metadata.c
2795 +@@ -867,39 +867,55 @@ static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
2796 + return 0;
2797 + }
2798 +
2799 +-#define WRITE_LOCK(cmd) \
2800 +- down_write(&cmd->root_lock); \
2801 +- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
2802 +- up_write(&cmd->root_lock); \
2803 +- return -EINVAL; \
2804 ++static bool cmd_write_lock(struct dm_cache_metadata *cmd)
2805 ++{
2806 ++ down_write(&cmd->root_lock);
2807 ++ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
2808 ++ up_write(&cmd->root_lock);
2809 ++ return false;
2810 + }
2811 ++ return true;
2812 ++}
2813 +
2814 +-#define WRITE_LOCK_VOID(cmd) \
2815 +- down_write(&cmd->root_lock); \
2816 +- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
2817 +- up_write(&cmd->root_lock); \
2818 +- return; \
2819 +- }
2820 ++#define WRITE_LOCK(cmd) \
2821 ++ do { \
2822 ++ if (!cmd_write_lock((cmd))) \
2823 ++ return -EINVAL; \
2824 ++ } while(0)
2825 ++
2826 ++#define WRITE_LOCK_VOID(cmd) \
2827 ++ do { \
2828 ++ if (!cmd_write_lock((cmd))) \
2829 ++ return; \
2830 ++ } while(0)
2831 +
2832 + #define WRITE_UNLOCK(cmd) \
2833 +- up_write(&cmd->root_lock)
2834 ++ up_write(&(cmd)->root_lock)
2835 +
2836 +-#define READ_LOCK(cmd) \
2837 +- down_read(&cmd->root_lock); \
2838 +- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
2839 +- up_read(&cmd->root_lock); \
2840 +- return -EINVAL; \
2841 ++static bool cmd_read_lock(struct dm_cache_metadata *cmd)
2842 ++{
2843 ++ down_read(&cmd->root_lock);
2844 ++ if (cmd->fail_io) {
2845 ++ up_read(&cmd->root_lock);
2846 ++ return false;
2847 + }
2848 ++ return true;
2849 ++}
2850 +
2851 +-#define READ_LOCK_VOID(cmd) \
2852 +- down_read(&cmd->root_lock); \
2853 +- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
2854 +- up_read(&cmd->root_lock); \
2855 +- return; \
2856 +- }
2857 ++#define READ_LOCK(cmd) \
2858 ++ do { \
2859 ++ if (!cmd_read_lock((cmd))) \
2860 ++ return -EINVAL; \
2861 ++ } while(0)
2862 ++
2863 ++#define READ_LOCK_VOID(cmd) \
2864 ++ do { \
2865 ++ if (!cmd_read_lock((cmd))) \
2866 ++ return; \
2867 ++ } while(0)
2868 +
2869 + #define READ_UNLOCK(cmd) \
2870 +- up_read(&cmd->root_lock)
2871 ++ up_read(&(cmd)->root_lock)
2872 +
2873 + int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
2874 + {
2875 +diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
2876 +index 33bdd81065e8..11f39791ec33 100644
2877 +--- a/drivers/media/v4l2-core/videobuf2-core.c
2878 ++++ b/drivers/media/v4l2-core/videobuf2-core.c
2879 +@@ -1502,7 +1502,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
2880 + * Will sleep if required for nonblocking == false.
2881 + */
2882 + static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
2883 +- int nonblocking)
2884 ++ void *pb, int nonblocking)
2885 + {
2886 + unsigned long flags;
2887 + int ret;
2888 +@@ -1523,10 +1523,10 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
2889 + /*
2890 + * Only remove the buffer from done_list if v4l2_buffer can handle all
2891 + * the planes.
2892 +- * Verifying planes is NOT necessary since it already has been checked
2893 +- * before the buffer is queued/prepared. So it can never fail.
2894 + */
2895 +- list_del(&(*vb)->done_entry);
2896 ++ ret = call_bufop(q, verify_planes_array, *vb, pb);
2897 ++ if (!ret)
2898 ++ list_del(&(*vb)->done_entry);
2899 + spin_unlock_irqrestore(&q->done_lock, flags);
2900 +
2901 + return ret;
2902 +@@ -1604,7 +1604,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, void *pb, bool nonblocking)
2903 + struct vb2_buffer *vb = NULL;
2904 + int ret;
2905 +
2906 +- ret = __vb2_get_done_vb(q, &vb, nonblocking);
2907 ++ ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
2908 + if (ret < 0)
2909 + return ret;
2910 +
2911 +diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
2912 +index dbec5923fcf0..3c3b517f1d1c 100644
2913 +--- a/drivers/media/v4l2-core/videobuf2-memops.c
2914 ++++ b/drivers/media/v4l2-core/videobuf2-memops.c
2915 +@@ -49,7 +49,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
2916 + vec = frame_vector_create(nr);
2917 + if (!vec)
2918 + return ERR_PTR(-ENOMEM);
2919 +- ret = get_vaddr_frames(start, nr, write, 1, vec);
2920 ++ ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
2921 + if (ret < 0)
2922 + goto out_destroy;
2923 + /* We accept only complete set of PFNs */
2924 +diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
2925 +index 502984c724ff..6c441be8f893 100644
2926 +--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
2927 ++++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
2928 +@@ -67,6 +67,11 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
2929 + return 0;
2930 + }
2931 +
2932 ++static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
2933 ++{
2934 ++ return __verify_planes_array(vb, pb);
2935 ++}
2936 ++
2937 + /**
2938 + * __verify_length() - Verify that the bytesused value for each plane fits in
2939 + * the plane length and that the data offset doesn't exceed the bytesused value.
2940 +@@ -432,6 +437,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
2941 + }
2942 +
2943 + static const struct vb2_buf_ops v4l2_buf_ops = {
2944 ++ .verify_planes_array = __verify_planes_array_core,
2945 + .fill_user_buffer = __fill_v4l2_buffer,
2946 + .fill_vb2_buffer = __fill_vb2_buffer,
2947 + .set_timestamp = __set_timestamp,
2948 +diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
2949 +index 22892c701c63..4bf7d50b1bc7 100644
2950 +--- a/drivers/misc/Kconfig
2951 ++++ b/drivers/misc/Kconfig
2952 +@@ -439,7 +439,7 @@ config ARM_CHARLCD
2953 + still useful.
2954 +
2955 + config BMP085
2956 +- bool
2957 ++ tristate
2958 + depends on SYSFS
2959 +
2960 + config BMP085_I2C
2961 +diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
2962 +index 15e88078ba1e..f1a0b99f5a9a 100644
2963 +--- a/drivers/misc/ad525x_dpot.c
2964 ++++ b/drivers/misc/ad525x_dpot.c
2965 +@@ -216,7 +216,7 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
2966 + */
2967 + value = swab16(value);
2968 +
2969 +- if (dpot->uid == DPOT_UID(AD5271_ID))
2970 ++ if (dpot->uid == DPOT_UID(AD5274_ID))
2971 + value = value >> 2;
2972 + return value;
2973 + default:
2974 +diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
2975 +index 09a406058c46..efbb6945eb18 100644
2976 +--- a/drivers/misc/cxl/irq.c
2977 ++++ b/drivers/misc/cxl/irq.c
2978 +@@ -288,7 +288,6 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
2979 + void cxl_unmap_irq(unsigned int virq, void *cookie)
2980 + {
2981 + free_irq(virq, cookie);
2982 +- irq_dispose_mapping(virq);
2983 + }
2984 +
2985 + static int cxl_register_one_irq(struct cxl *adapter,
2986 +diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
2987 +index 8310b4dbff06..6a451bd65bf3 100644
2988 +--- a/drivers/misc/mic/scif/scif_rma.c
2989 ++++ b/drivers/misc/mic/scif/scif_rma.c
2990 +@@ -1511,7 +1511,7 @@ off_t scif_register_pinned_pages(scif_epd_t epd,
2991 + if ((map_flags & SCIF_MAP_FIXED) &&
2992 + ((ALIGN(offset, PAGE_SIZE) != offset) ||
2993 + (offset < 0) ||
2994 +- (offset + (off_t)len < offset)))
2995 ++ (len > LONG_MAX - offset)))
2996 + return -EINVAL;
2997 +
2998 + might_sleep();
2999 +@@ -1614,7 +1614,7 @@ off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset,
3000 + if ((map_flags & SCIF_MAP_FIXED) &&
3001 + ((ALIGN(offset, PAGE_SIZE) != offset) ||
3002 + (offset < 0) ||
3003 +- (offset + (off_t)len < offset)))
3004 ++ (len > LONG_MAX - offset)))
3005 + return -EINVAL;
3006 +
3007 + /* Unsupported protection requested */
3008 +@@ -1732,7 +1732,8 @@ scif_unregister(scif_epd_t epd, off_t offset, size_t len)
3009 +
3010 + /* Offset is not page aligned or offset+len wraps around */
3011 + if ((ALIGN(offset, PAGE_SIZE) != offset) ||
3012 +- (offset + (off_t)len < offset))
3013 ++ (offset < 0) ||
3014 ++ (len > LONG_MAX - offset))
3015 + return -EINVAL;
3016 +
3017 + err = scif_verify_epd(ep);
3018 +diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
3019 +index 12c6190c6e33..4a07ba1195b5 100644
3020 +--- a/drivers/mtd/nand/brcmnand/brcmnand.c
3021 ++++ b/drivers/mtd/nand/brcmnand/brcmnand.c
3022 +@@ -309,6 +309,36 @@ static const u16 brcmnand_regs_v60[] = {
3023 + [BRCMNAND_FC_BASE] = 0x400,
3024 + };
3025 +
3026 ++/* BRCMNAND v7.1 */
3027 ++static const u16 brcmnand_regs_v71[] = {
3028 ++ [BRCMNAND_CMD_START] = 0x04,
3029 ++ [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
3030 ++ [BRCMNAND_CMD_ADDRESS] = 0x0c,
3031 ++ [BRCMNAND_INTFC_STATUS] = 0x14,
3032 ++ [BRCMNAND_CS_SELECT] = 0x18,
3033 ++ [BRCMNAND_CS_XOR] = 0x1c,
3034 ++ [BRCMNAND_LL_OP] = 0x20,
3035 ++ [BRCMNAND_CS0_BASE] = 0x50,
3036 ++ [BRCMNAND_CS1_BASE] = 0,
3037 ++ [BRCMNAND_CORR_THRESHOLD] = 0xdc,
3038 ++ [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
3039 ++ [BRCMNAND_UNCORR_COUNT] = 0xfc,
3040 ++ [BRCMNAND_CORR_COUNT] = 0x100,
3041 ++ [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
3042 ++ [BRCMNAND_CORR_ADDR] = 0x110,
3043 ++ [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
3044 ++ [BRCMNAND_UNCORR_ADDR] = 0x118,
3045 ++ [BRCMNAND_SEMAPHORE] = 0x150,
3046 ++ [BRCMNAND_ID] = 0x194,
3047 ++ [BRCMNAND_ID_EXT] = 0x198,
3048 ++ [BRCMNAND_LL_RDATA] = 0x19c,
3049 ++ [BRCMNAND_OOB_READ_BASE] = 0x200,
3050 ++ [BRCMNAND_OOB_READ_10_BASE] = 0,
3051 ++ [BRCMNAND_OOB_WRITE_BASE] = 0x280,
3052 ++ [BRCMNAND_OOB_WRITE_10_BASE] = 0,
3053 ++ [BRCMNAND_FC_BASE] = 0x400,
3054 ++};
3055 ++
3056 + enum brcmnand_cs_reg {
3057 + BRCMNAND_CS_CFG_EXT = 0,
3058 + BRCMNAND_CS_CFG,
3059 +@@ -404,7 +434,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
3060 + }
3061 +
3062 + /* Register offsets */
3063 +- if (ctrl->nand_version >= 0x0600)
3064 ++ if (ctrl->nand_version >= 0x0701)
3065 ++ ctrl->reg_offsets = brcmnand_regs_v71;
3066 ++ else if (ctrl->nand_version >= 0x0600)
3067 + ctrl->reg_offsets = brcmnand_regs_v60;
3068 + else if (ctrl->nand_version >= 0x0500)
3069 + ctrl->reg_offsets = brcmnand_regs_v50;
3070 +diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
3071 +index 3ff583f165cd..ce7b2cab5762 100644
3072 +--- a/drivers/mtd/nand/nand_base.c
3073 ++++ b/drivers/mtd/nand/nand_base.c
3074 +@@ -3979,7 +3979,6 @@ static int nand_dt_init(struct mtd_info *mtd, struct nand_chip *chip,
3075 + * This is the first phase of the normal nand_scan() function. It reads the
3076 + * flash ID and sets up MTD fields accordingly.
3077 + *
3078 +- * The mtd->owner field must be set to the module of the caller.
3079 + */
3080 + int nand_scan_ident(struct mtd_info *mtd, int maxchips,
3081 + struct nand_flash_dev *table)
3082 +@@ -4403,19 +4402,12 @@ EXPORT_SYMBOL(nand_scan_tail);
3083 + *
3084 + * This fills out all the uninitialized function pointers with the defaults.
3085 + * The flash ID is read and the mtd/chip structures are filled with the
3086 +- * appropriate values. The mtd->owner field must be set to the module of the
3087 +- * caller.
3088 ++ * appropriate values.
3089 + */
3090 + int nand_scan(struct mtd_info *mtd, int maxchips)
3091 + {
3092 + int ret;
3093 +
3094 +- /* Many callers got this wrong, so check for it for a while... */
3095 +- if (!mtd->owner && caller_is_module()) {
3096 +- pr_crit("%s called with NULL mtd->owner!\n", __func__);
3097 +- BUG();
3098 +- }
3099 +-
3100 + ret = nand_scan_ident(mtd, maxchips, NULL);
3101 + if (!ret)
3102 + ret = nand_scan_tail(mtd);
3103 +diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
3104 +index 32477c4eb421..37e4135ab213 100644
3105 +--- a/drivers/mtd/spi-nor/spi-nor.c
3106 ++++ b/drivers/mtd/spi-nor/spi-nor.c
3107 +@@ -1067,45 +1067,6 @@ static int spansion_quad_enable(struct spi_nor *nor)
3108 + return 0;
3109 + }
3110 +
3111 +-static int micron_quad_enable(struct spi_nor *nor)
3112 +-{
3113 +- int ret;
3114 +- u8 val;
3115 +-
3116 +- ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
3117 +- if (ret < 0) {
3118 +- dev_err(nor->dev, "error %d reading EVCR\n", ret);
3119 +- return ret;
3120 +- }
3121 +-
3122 +- write_enable(nor);
3123 +-
3124 +- /* set EVCR, enable quad I/O */
3125 +- nor->cmd_buf[0] = val & ~EVCR_QUAD_EN_MICRON;
3126 +- ret = nor->write_reg(nor, SPINOR_OP_WD_EVCR, nor->cmd_buf, 1);
3127 +- if (ret < 0) {
3128 +- dev_err(nor->dev, "error while writing EVCR register\n");
3129 +- return ret;
3130 +- }
3131 +-
3132 +- ret = spi_nor_wait_till_ready(nor);
3133 +- if (ret)
3134 +- return ret;
3135 +-
3136 +- /* read EVCR and check it */
3137 +- ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
3138 +- if (ret < 0) {
3139 +- dev_err(nor->dev, "error %d reading EVCR\n", ret);
3140 +- return ret;
3141 +- }
3142 +- if (val & EVCR_QUAD_EN_MICRON) {
3143 +- dev_err(nor->dev, "Micron EVCR Quad bit not clear\n");
3144 +- return -EINVAL;
3145 +- }
3146 +-
3147 +- return 0;
3148 +-}
3149 +-
3150 + static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
3151 + {
3152 + int status;
3153 +@@ -1119,12 +1080,7 @@ static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
3154 + }
3155 + return status;
3156 + case SNOR_MFR_MICRON:
3157 +- status = micron_quad_enable(nor);
3158 +- if (status) {
3159 +- dev_err(nor->dev, "Micron quad-read not enabled\n");
3160 +- return -EINVAL;
3161 +- }
3162 +- return status;
3163 ++ return 0;
3164 + default:
3165 + status = spansion_quad_enable(nor);
3166 + if (status) {
3167 +diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
3168 +index 973dade2d07f..1257b18e6b90 100644
3169 +--- a/drivers/net/ethernet/jme.c
3170 ++++ b/drivers/net/ethernet/jme.c
3171 +@@ -270,11 +270,17 @@ jme_reset_mac_processor(struct jme_adapter *jme)
3172 + }
3173 +
3174 + static inline void
3175 +-jme_clear_pm(struct jme_adapter *jme)
3176 ++jme_clear_pm_enable_wol(struct jme_adapter *jme)
3177 + {
3178 + jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs);
3179 + }
3180 +
3181 ++static inline void
3182 ++jme_clear_pm_disable_wol(struct jme_adapter *jme)
3183 ++{
3184 ++ jwrite32(jme, JME_PMCS, PMCS_STMASK);
3185 ++}
3186 ++
3187 + static int
3188 + jme_reload_eeprom(struct jme_adapter *jme)
3189 + {
3190 +@@ -1853,7 +1859,7 @@ jme_open(struct net_device *netdev)
3191 + struct jme_adapter *jme = netdev_priv(netdev);
3192 + int rc;
3193 +
3194 +- jme_clear_pm(jme);
3195 ++ jme_clear_pm_disable_wol(jme);
3196 + JME_NAPI_ENABLE(jme);
3197 +
3198 + tasklet_init(&jme->linkch_task, jme_link_change_tasklet,
3199 +@@ -1925,11 +1931,11 @@ jme_wait_link(struct jme_adapter *jme)
3200 + static void
3201 + jme_powersave_phy(struct jme_adapter *jme)
3202 + {
3203 +- if (jme->reg_pmcs) {
3204 ++ if (jme->reg_pmcs && device_may_wakeup(&jme->pdev->dev)) {
3205 + jme_set_100m_half(jme);
3206 + if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
3207 + jme_wait_link(jme);
3208 +- jme_clear_pm(jme);
3209 ++ jme_clear_pm_enable_wol(jme);
3210 + } else {
3211 + jme_phy_off(jme);
3212 + }
3213 +@@ -2646,9 +2652,6 @@ jme_set_wol(struct net_device *netdev,
3214 + if (wol->wolopts & WAKE_MAGIC)
3215 + jme->reg_pmcs |= PMCS_MFEN;
3216 +
3217 +- jwrite32(jme, JME_PMCS, jme->reg_pmcs);
3218 +- device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs));
3219 +-
3220 + return 0;
3221 + }
3222 +
3223 +@@ -3172,8 +3175,8 @@ jme_init_one(struct pci_dev *pdev,
3224 + jme->mii_if.mdio_read = jme_mdio_read;
3225 + jme->mii_if.mdio_write = jme_mdio_write;
3226 +
3227 +- jme_clear_pm(jme);
3228 +- device_set_wakeup_enable(&pdev->dev, true);
3229 ++ jme_clear_pm_disable_wol(jme);
3230 ++ device_init_wakeup(&pdev->dev, true);
3231 +
3232 + jme_set_phyfifo_5level(jme);
3233 + jme->pcirev = pdev->revision;
3234 +@@ -3304,7 +3307,7 @@ jme_resume(struct device *dev)
3235 + if (!netif_running(netdev))
3236 + return 0;
3237 +
3238 +- jme_clear_pm(jme);
3239 ++ jme_clear_pm_disable_wol(jme);
3240 + jme_phy_on(jme);
3241 + if (test_bit(JME_FLAG_SSET, &jme->flags))
3242 + jme_set_settings(netdev, &jme->old_ecmd);
3243 +diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
3244 +index e88afac51c5d..f96ab2f4b90e 100644
3245 +--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
3246 ++++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
3247 +@@ -1557,6 +1557,8 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
3248 + /* the fw is stopped, the aux sta is dead: clean up driver state */
3249 + iwl_mvm_del_aux_sta(mvm);
3250 +
3251 ++ iwl_free_fw_paging(mvm);
3252 ++
3253 + /*
3254 + * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
3255 + * won't be called in this case).
3256 +diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
3257 +index c3adf2bcdc85..13c97f665ba8 100644
3258 +--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
3259 ++++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
3260 +@@ -645,8 +645,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
3261 + for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
3262 + kfree(mvm->nvm_sections[i].data);
3263 +
3264 +- iwl_free_fw_paging(mvm);
3265 +-
3266 + iwl_mvm_tof_clean(mvm);
3267 +
3268 + ieee80211_free_hw(mvm->hw);
3269 +diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
3270 +index 8c7204738aa3..00e0332e2544 100644
3271 +--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
3272 ++++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
3273 +@@ -731,8 +731,8 @@ static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
3274 + */
3275 + val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
3276 + if (val & (BIT(1) | BIT(17))) {
3277 +- IWL_INFO(trans,
3278 +- "can't access the RSA semaphore it is write protected\n");
3279 ++ IWL_DEBUG_INFO(trans,
3280 ++ "can't access the RSA semaphore it is write protected\n");
3281 + return 0;
3282 + }
3283 +
3284 +diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
3285 +index 5c717275a7fa..3d8019eb3d84 100644
3286 +--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
3287 ++++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
3288 +@@ -939,7 +939,8 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
3289 + struct mtk_pinctrl *pctl = dev_get_drvdata(chip->dev);
3290 + int eint_num, virq, eint_offset;
3291 + unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc;
3292 +- static const unsigned int dbnc_arr[] = {0 , 1, 16, 32, 64, 128, 256};
3293 ++ static const unsigned int debounce_time[] = {500, 1000, 16000, 32000, 64000,
3294 ++ 128000, 256000};
3295 + const struct mtk_desc_pin *pin;
3296 + struct irq_data *d;
3297 +
3298 +@@ -957,9 +958,9 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
3299 + if (!mtk_eint_can_en_debounce(pctl, eint_num))
3300 + return -ENOSYS;
3301 +
3302 +- dbnc = ARRAY_SIZE(dbnc_arr);
3303 +- for (i = 0; i < ARRAY_SIZE(dbnc_arr); i++) {
3304 +- if (debounce <= dbnc_arr[i]) {
3305 ++ dbnc = ARRAY_SIZE(debounce_time);
3306 ++ for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
3307 ++ if (debounce <= debounce_time[i]) {
3308 + dbnc = i;
3309 + break;
3310 + }
3311 +diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
3312 +index ef04b962c3d5..23b6b8c29a99 100644
3313 +--- a/drivers/pinctrl/pinctrl-single.c
3314 ++++ b/drivers/pinctrl/pinctrl-single.c
3315 +@@ -1273,9 +1273,9 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
3316 +
3317 + /* Parse pins in each row from LSB */
3318 + while (mask) {
3319 +- bit_pos = ffs(mask);
3320 ++ bit_pos = __ffs(mask);
3321 + pin_num_from_lsb = bit_pos / pcs->bits_per_pin;
3322 +- mask_pos = ((pcs->fmask) << (bit_pos - 1));
3323 ++ mask_pos = ((pcs->fmask) << bit_pos);
3324 + val_pos = val & mask_pos;
3325 + submask = mask & mask_pos;
3326 +
3327 +@@ -1847,7 +1847,7 @@ static int pcs_probe(struct platform_device *pdev)
3328 + ret = of_property_read_u32(np, "pinctrl-single,function-mask",
3329 + &pcs->fmask);
3330 + if (!ret) {
3331 +- pcs->fshift = ffs(pcs->fmask) - 1;
3332 ++ pcs->fshift = __ffs(pcs->fmask);
3333 + pcs->fmax = pcs->fmask >> pcs->fshift;
3334 + } else {
3335 + /* If mask property doesn't exist, function mux is invalid. */
3336 +diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
3337 +index b0f62141ea4d..f774cb576ffa 100644
3338 +--- a/drivers/platform/x86/toshiba_acpi.c
3339 ++++ b/drivers/platform/x86/toshiba_acpi.c
3340 +@@ -131,7 +131,7 @@ MODULE_LICENSE("GPL");
3341 + /* Field definitions */
3342 + #define HCI_ACCEL_MASK 0x7fff
3343 + #define HCI_HOTKEY_DISABLE 0x0b
3344 +-#define HCI_HOTKEY_ENABLE 0x01
3345 ++#define HCI_HOTKEY_ENABLE 0x09
3346 + #define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10
3347 + #define HCI_LCD_BRIGHTNESS_BITS 3
3348 + #define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS)
3349 +diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c
3350 +index 423ce087cd9c..5d5adee16886 100644
3351 +--- a/drivers/pwm/pwm-brcmstb.c
3352 ++++ b/drivers/pwm/pwm-brcmstb.c
3353 +@@ -274,8 +274,8 @@ static int brcmstb_pwm_probe(struct platform_device *pdev)
3354 +
3355 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3356 + p->base = devm_ioremap_resource(&pdev->dev, res);
3357 +- if (!p->base) {
3358 +- ret = -ENOMEM;
3359 ++ if (IS_ERR(p->base)) {
3360 ++ ret = PTR_ERR(p->base);
3361 + goto out_clk;
3362 + }
3363 +
3364 +diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
3365 +index 7b94b8ee087c..732ac71b82cd 100644
3366 +--- a/drivers/regulator/core.c
3367 ++++ b/drivers/regulator/core.c
3368 +@@ -148,7 +148,7 @@ static void regulator_lock_supply(struct regulator_dev *rdev)
3369 + {
3370 + int i;
3371 +
3372 +- for (i = 0; rdev->supply; rdev = rdev_get_supply(rdev), i++)
3373 ++ for (i = 0; rdev; rdev = rdev_get_supply(rdev), i++)
3374 + mutex_lock_nested(&rdev->mutex, i);
3375 + }
3376 +
3377 +diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
3378 +index 58f5d3b8e981..27343e1c43ef 100644
3379 +--- a/drivers/regulator/s5m8767.c
3380 ++++ b/drivers/regulator/s5m8767.c
3381 +@@ -202,9 +202,10 @@ static int s5m8767_get_register(struct s5m8767_info *s5m8767, int reg_id,
3382 + }
3383 + }
3384 +
3385 +- if (i < s5m8767->num_regulators)
3386 +- *enable_ctrl =
3387 +- s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
3388 ++ if (i >= s5m8767->num_regulators)
3389 ++ return -EINVAL;
3390 ++
3391 ++ *enable_ctrl = s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
3392 +
3393 + return 0;
3394 + }
3395 +@@ -937,8 +938,12 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
3396 + else
3397 + regulators[id].vsel_mask = 0xff;
3398 +
3399 +- s5m8767_get_register(s5m8767, id, &enable_reg,
3400 ++ ret = s5m8767_get_register(s5m8767, id, &enable_reg,
3401 + &enable_val);
3402 ++ if (ret) {
3403 ++ dev_err(s5m8767->dev, "error reading registers\n");
3404 ++ return ret;
3405 ++ }
3406 + regulators[id].enable_reg = enable_reg;
3407 + regulators[id].enable_mask = S5M8767_ENCTRL_MASK;
3408 + regulators[id].enable_val = enable_val;
3409 +diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
3410 +index 05a51ef52703..d5c1b057a739 100644
3411 +--- a/drivers/rtc/rtc-ds1685.c
3412 ++++ b/drivers/rtc/rtc-ds1685.c
3413 +@@ -187,9 +187,9 @@ ds1685_rtc_end_data_access(struct ds1685_priv *rtc)
3414 + * Only use this where you are certain another lock will not be held.
3415 + */
3416 + static inline void
3417 +-ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long flags)
3418 ++ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long *flags)
3419 + {
3420 +- spin_lock_irqsave(&rtc->lock, flags);
3421 ++ spin_lock_irqsave(&rtc->lock, *flags);
3422 + ds1685_rtc_switch_to_bank1(rtc);
3423 + }
3424 +
3425 +@@ -1304,7 +1304,7 @@ ds1685_rtc_sysfs_ctrl_regs_store(struct device *dev,
3426 + {
3427 + struct ds1685_priv *rtc = dev_get_drvdata(dev);
3428 + u8 reg = 0, bit = 0, tmp;
3429 +- unsigned long flags = 0;
3430 ++ unsigned long flags;
3431 + long int val = 0;
3432 + const struct ds1685_rtc_ctrl_regs *reg_info =
3433 + ds1685_rtc_sysfs_ctrl_regs_lookup(attr->attr.name);
3434 +@@ -1325,7 +1325,7 @@ ds1685_rtc_sysfs_ctrl_regs_store(struct device *dev,
3435 + bit = reg_info->bit;
3436 +
3437 + /* Safe to spinlock during a write. */
3438 +- ds1685_rtc_begin_ctrl_access(rtc, flags);
3439 ++ ds1685_rtc_begin_ctrl_access(rtc, &flags);
3440 + tmp = rtc->read(rtc, reg);
3441 + rtc->write(rtc, reg, (val ? (tmp | bit) : (tmp & ~(bit))));
3442 + ds1685_rtc_end_ctrl_access(rtc, flags);
3443 +diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
3444 +index 097325d96db5..b1b4746a0eab 100644
3445 +--- a/drivers/rtc/rtc-hym8563.c
3446 ++++ b/drivers/rtc/rtc-hym8563.c
3447 +@@ -144,7 +144,7 @@ static int hym8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
3448 + * it does not seem to carry it over a subsequent write/read.
3449 + * So we'll limit ourself to 100 years, starting at 2000 for now.
3450 + */
3451 +- buf[6] = tm->tm_year - 100;
3452 ++ buf[6] = bin2bcd(tm->tm_year - 100);
3453 +
3454 + /*
3455 + * CTL1 only contains TEST-mode bits apart from stop,
3456 +diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
3457 +index 7184a0eda793..725dccae24e7 100644
3458 +--- a/drivers/rtc/rtc-max77686.c
3459 ++++ b/drivers/rtc/rtc-max77686.c
3460 +@@ -465,7 +465,7 @@ static int max77686_rtc_probe(struct platform_device *pdev)
3461 +
3462 + info->virq = regmap_irq_get_virq(max77686->rtc_irq_data,
3463 + MAX77686_RTCIRQ_RTCA1);
3464 +- if (!info->virq) {
3465 ++ if (info->virq <= 0) {
3466 + ret = -ENXIO;
3467 + goto err_rtc;
3468 + }
3469 +diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
3470 +index bd911bafb809..17341feadad1 100644
3471 +--- a/drivers/rtc/rtc-rx8025.c
3472 ++++ b/drivers/rtc/rtc-rx8025.c
3473 +@@ -65,7 +65,6 @@
3474 +
3475 + static const struct i2c_device_id rx8025_id[] = {
3476 + { "rx8025", 0 },
3477 +- { "rv8803", 1 },
3478 + { }
3479 + };
3480 + MODULE_DEVICE_TABLE(i2c, rx8025_id);
3481 +diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
3482 +index f64c282275b3..e1b86bb01062 100644
3483 +--- a/drivers/rtc/rtc-vr41xx.c
3484 ++++ b/drivers/rtc/rtc-vr41xx.c
3485 +@@ -272,12 +272,13 @@ static irqreturn_t rtclong1_interrupt(int irq, void *dev_id)
3486 + }
3487 +
3488 + static const struct rtc_class_ops vr41xx_rtc_ops = {
3489 +- .release = vr41xx_rtc_release,
3490 +- .ioctl = vr41xx_rtc_ioctl,
3491 +- .read_time = vr41xx_rtc_read_time,
3492 +- .set_time = vr41xx_rtc_set_time,
3493 +- .read_alarm = vr41xx_rtc_read_alarm,
3494 +- .set_alarm = vr41xx_rtc_set_alarm,
3495 ++ .release = vr41xx_rtc_release,
3496 ++ .ioctl = vr41xx_rtc_ioctl,
3497 ++ .read_time = vr41xx_rtc_read_time,
3498 ++ .set_time = vr41xx_rtc_set_time,
3499 ++ .read_alarm = vr41xx_rtc_read_alarm,
3500 ++ .set_alarm = vr41xx_rtc_set_alarm,
3501 ++ .alarm_irq_enable = vr41xx_rtc_alarm_irq_enable,
3502 + };
3503 +
3504 + static int rtc_probe(struct platform_device *pdev)
3505 +diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
3506 +index e5647d59224f..0b331c9c0a8f 100644
3507 +--- a/drivers/scsi/device_handler/Kconfig
3508 ++++ b/drivers/scsi/device_handler/Kconfig
3509 +@@ -13,13 +13,13 @@ menuconfig SCSI_DH
3510 +
3511 + config SCSI_DH_RDAC
3512 + tristate "LSI RDAC Device Handler"
3513 +- depends on SCSI_DH
3514 ++ depends on SCSI_DH && SCSI
3515 + help
3516 + If you have a LSI RDAC select y. Otherwise, say N.
3517 +
3518 + config SCSI_DH_HP_SW
3519 + tristate "HP/COMPAQ MSA Device Handler"
3520 +- depends on SCSI_DH
3521 ++ depends on SCSI_DH && SCSI
3522 + help
3523 + If you have a HP/COMPAQ MSA device that requires START_STOP to
3524 + be sent to start it and cannot upgrade the firmware then select y.
3525 +@@ -27,13 +27,13 @@ config SCSI_DH_HP_SW
3526 +
3527 + config SCSI_DH_EMC
3528 + tristate "EMC CLARiiON Device Handler"
3529 +- depends on SCSI_DH
3530 ++ depends on SCSI_DH && SCSI
3531 + help
3532 + If you have a EMC CLARiiON select y. Otherwise, say N.
3533 +
3534 + config SCSI_DH_ALUA
3535 + tristate "SPC-3 ALUA Device Handler"
3536 +- depends on SCSI_DH
3537 ++ depends on SCSI_DH && SCSI
3538 + help
3539 + SCSI Device handler for generic SPC-3 Asymmetric Logical Unit
3540 + Access (ALUA).
3541 +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
3542 +index 97a1c1c33b05..00ce3e269a43 100644
3543 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c
3544 ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
3545 +@@ -6282,12 +6282,13 @@ out:
3546 + }
3547 +
3548 + for (i = 0; i < ioc->sge_count; i++) {
3549 +- if (kbuff_arr[i])
3550 ++ if (kbuff_arr[i]) {
3551 + dma_free_coherent(&instance->pdev->dev,
3552 + le32_to_cpu(kern_sge32[i].length),
3553 + kbuff_arr[i],
3554 + le32_to_cpu(kern_sge32[i].phys_addr));
3555 + kbuff_arr[i] = NULL;
3556 ++ }
3557 + }
3558 +
3559 + megasas_return_cmd(instance, cmd);
3560 +diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
3561 +index 79a8bc4f6cec..035767c02072 100644
3562 +--- a/drivers/spi/spi-rockchip.c
3563 ++++ b/drivers/spi/spi-rockchip.c
3564 +@@ -265,7 +265,10 @@ static inline u32 rx_max(struct rockchip_spi *rs)
3565 + static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
3566 + {
3567 + u32 ser;
3568 +- struct rockchip_spi *rs = spi_master_get_devdata(spi->master);
3569 ++ struct spi_master *master = spi->master;
3570 ++ struct rockchip_spi *rs = spi_master_get_devdata(master);
3571 ++
3572 ++ pm_runtime_get_sync(rs->dev);
3573 +
3574 + ser = readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & SER_MASK;
3575 +
3576 +@@ -290,6 +293,8 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
3577 + ser &= ~(1 << spi->chip_select);
3578 +
3579 + writel_relaxed(ser, rs->regs + ROCKCHIP_SPI_SER);
3580 ++
3581 ++ pm_runtime_put_sync(rs->dev);
3582 + }
3583 +
3584 + static int rockchip_spi_prepare_message(struct spi_master *master,
3585 +diff --git a/drivers/staging/rdma/hfi1/TODO b/drivers/staging/rdma/hfi1/TODO
3586 +index 05de0dad8762..4c6f1d7d2eaf 100644
3587 +--- a/drivers/staging/rdma/hfi1/TODO
3588 ++++ b/drivers/staging/rdma/hfi1/TODO
3589 +@@ -3,4 +3,4 @@ July, 2015
3590 + - Remove unneeded file entries in sysfs
3591 + - Remove software processing of IB protocol and place in library for use
3592 + by qib, ipath (if still present), hfi1, and eventually soft-roce
3593 +-
3594 ++- Replace incorrect uAPI
3595 +diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c
3596 +index aae9826ec62b..c851e51b1dc3 100644
3597 +--- a/drivers/staging/rdma/hfi1/file_ops.c
3598 ++++ b/drivers/staging/rdma/hfi1/file_ops.c
3599 +@@ -62,6 +62,8 @@
3600 + #include <linux/cred.h>
3601 + #include <linux/uio.h>
3602 +
3603 ++#include <rdma/ib.h>
3604 ++
3605 + #include "hfi.h"
3606 + #include "pio.h"
3607 + #include "device.h"
3608 +@@ -214,6 +216,10 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
3609 + int uctxt_required = 1;
3610 + int must_be_root = 0;
3611 +
3612 ++ /* FIXME: This interface cannot continue out of staging */
3613 ++ if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
3614 ++ return -EACCES;
3615 ++
3616 + if (count < sizeof(cmd)) {
3617 + ret = -EINVAL;
3618 + goto bail;
3619 +diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
3620 +index e845841ab036..7106288efae3 100644
3621 +--- a/drivers/thermal/rockchip_thermal.c
3622 ++++ b/drivers/thermal/rockchip_thermal.c
3623 +@@ -545,15 +545,14 @@ static int rockchip_configure_from_dt(struct device *dev,
3624 + thermal->chip->tshut_temp);
3625 + thermal->tshut_temp = thermal->chip->tshut_temp;
3626 + } else {
3627 ++ if (shut_temp > INT_MAX) {
3628 ++ dev_err(dev, "Invalid tshut temperature specified: %d\n",
3629 ++ shut_temp);
3630 ++ return -ERANGE;
3631 ++ }
3632 + thermal->tshut_temp = shut_temp;
3633 + }
3634 +
3635 +- if (thermal->tshut_temp > INT_MAX) {
3636 +- dev_err(dev, "Invalid tshut temperature specified: %d\n",
3637 +- thermal->tshut_temp);
3638 +- return -ERANGE;
3639 +- }
3640 +-
3641 + if (of_property_read_u32(np, "rockchip,hw-tshut-mode", &tshut_mode)) {
3642 + dev_warn(dev,
3643 + "Missing tshut mode property, using default (%s)\n",
3644 +diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
3645 +index 51c7507b0444..63a06ab6ba03 100644
3646 +--- a/drivers/tty/serial/sh-sci.c
3647 ++++ b/drivers/tty/serial/sh-sci.c
3648 +@@ -38,7 +38,6 @@
3649 + #include <linux/major.h>
3650 + #include <linux/module.h>
3651 + #include <linux/mm.h>
3652 +-#include <linux/notifier.h>
3653 + #include <linux/of.h>
3654 + #include <linux/platform_device.h>
3655 + #include <linux/pm_runtime.h>
3656 +@@ -116,8 +115,6 @@ struct sci_port {
3657 + struct timer_list rx_timer;
3658 + unsigned int rx_timeout;
3659 + #endif
3660 +-
3661 +- struct notifier_block freq_transition;
3662 + };
3663 +
3664 + #define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
3665 +@@ -1606,29 +1603,6 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
3666 + return ret;
3667 + }
3668 +
3669 +-/*
3670 +- * Here we define a transition notifier so that we can update all of our
3671 +- * ports' baud rate when the peripheral clock changes.
3672 +- */
3673 +-static int sci_notifier(struct notifier_block *self,
3674 +- unsigned long phase, void *p)
3675 +-{
3676 +- struct sci_port *sci_port;
3677 +- unsigned long flags;
3678 +-
3679 +- sci_port = container_of(self, struct sci_port, freq_transition);
3680 +-
3681 +- if (phase == CPUFREQ_POSTCHANGE) {
3682 +- struct uart_port *port = &sci_port->port;
3683 +-
3684 +- spin_lock_irqsave(&port->lock, flags);
3685 +- port->uartclk = clk_get_rate(sci_port->iclk);
3686 +- spin_unlock_irqrestore(&port->lock, flags);
3687 +- }
3688 +-
3689 +- return NOTIFY_OK;
3690 +-}
3691 +-
3692 + static const struct sci_irq_desc {
3693 + const char *desc;
3694 + irq_handler_t handler;
3695 +@@ -2559,9 +2533,6 @@ static int sci_remove(struct platform_device *dev)
3696 + {
3697 + struct sci_port *port = platform_get_drvdata(dev);
3698 +
3699 +- cpufreq_unregister_notifier(&port->freq_transition,
3700 +- CPUFREQ_TRANSITION_NOTIFIER);
3701 +-
3702 + uart_remove_one_port(&sci_uart_driver, &port->port);
3703 +
3704 + sci_cleanup_single(port);
3705 +@@ -2714,16 +2685,6 @@ static int sci_probe(struct platform_device *dev)
3706 + if (ret)
3707 + return ret;
3708 +
3709 +- sp->freq_transition.notifier_call = sci_notifier;
3710 +-
3711 +- ret = cpufreq_register_notifier(&sp->freq_transition,
3712 +- CPUFREQ_TRANSITION_NOTIFIER);
3713 +- if (unlikely(ret < 0)) {
3714 +- uart_remove_one_port(&sci_uart_driver, &sp->port);
3715 +- sci_cleanup_single(sp);
3716 +- return ret;
3717 +- }
3718 +-
3719 + #ifdef CONFIG_SH_STANDARD_BIOS
3720 + sh_bios_gdb_detach();
3721 + #endif
3722 +diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
3723 +index 9eb1cff28bd4..b8b580e5ae6e 100644
3724 +--- a/drivers/usb/core/hcd-pci.c
3725 ++++ b/drivers/usb/core/hcd-pci.c
3726 +@@ -74,6 +74,15 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd,
3727 + if (companion->bus != pdev->bus ||
3728 + PCI_SLOT(companion->devfn) != slot)
3729 + continue;
3730 ++
3731 ++ /*
3732 ++ * Companion device should be either UHCI,OHCI or EHCI host
3733 ++ * controller, otherwise skip.
3734 ++ */
3735 ++ if (companion->class != CL_UHCI && companion->class != CL_OHCI &&
3736 ++ companion->class != CL_EHCI)
3737 ++ continue;
3738 ++
3739 + companion_hcd = pci_get_drvdata(companion);
3740 + if (!companion_hcd || !companion_hcd->self.root_hub)
3741 + continue;
3742 +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
3743 +index cf43e9e18368..79d895c2dd71 100644
3744 +--- a/drivers/usb/gadget/function/f_fs.c
3745 ++++ b/drivers/usb/gadget/function/f_fs.c
3746 +@@ -646,6 +646,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
3747 + work);
3748 + int ret = io_data->req->status ? io_data->req->status :
3749 + io_data->req->actual;
3750 ++ bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
3751 +
3752 + if (io_data->read && ret > 0) {
3753 + use_mm(io_data->mm);
3754 +@@ -657,13 +658,11 @@ static void ffs_user_copy_worker(struct work_struct *work)
3755 +
3756 + io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
3757 +
3758 +- if (io_data->ffs->ffs_eventfd &&
3759 +- !(io_data->kiocb->ki_flags & IOCB_EVENTFD))
3760 ++ if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
3761 + eventfd_signal(io_data->ffs->ffs_eventfd, 1);
3762 +
3763 + usb_ep_free_request(io_data->ep, io_data->req);
3764 +
3765 +- io_data->kiocb->private = NULL;
3766 + if (io_data->read)
3767 + kfree(io_data->to_free);
3768 + kfree(io_data->buf);
3769 +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
3770 +index c48cbe731356..d8dbd7e5194b 100644
3771 +--- a/drivers/usb/host/xhci-mem.c
3772 ++++ b/drivers/usb/host/xhci-mem.c
3773 +@@ -1875,6 +1875,12 @@ no_bw:
3774 + kfree(xhci->rh_bw);
3775 + kfree(xhci->ext_caps);
3776 +
3777 ++ xhci->usb2_ports = NULL;
3778 ++ xhci->usb3_ports = NULL;
3779 ++ xhci->port_array = NULL;
3780 ++ xhci->rh_bw = NULL;
3781 ++ xhci->ext_caps = NULL;
3782 ++
3783 + xhci->page_size = 0;
3784 + xhci->page_shift = 0;
3785 + xhci->bus_state[0].bus_suspended = 0;
3786 +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
3787 +index c2d65206ec6c..ea4fb4b0cd44 100644
3788 +--- a/drivers/usb/host/xhci-pci.c
3789 ++++ b/drivers/usb/host/xhci-pci.c
3790 +@@ -48,6 +48,7 @@
3791 + #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
3792 + #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
3793 + #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
3794 ++#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
3795 +
3796 + static const char hcd_name[] = "xhci_hcd";
3797 +
3798 +@@ -156,7 +157,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
3799 + (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
3800 + pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
3801 + pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
3802 +- pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) {
3803 ++ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
3804 ++ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI)) {
3805 + xhci->quirks |= XHCI_PME_STUCK_QUIRK;
3806 + }
3807 + if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
3808 +@@ -299,6 +301,7 @@ static void xhci_pci_remove(struct pci_dev *dev)
3809 + struct xhci_hcd *xhci;
3810 +
3811 + xhci = hcd_to_xhci(pci_get_drvdata(dev));
3812 ++ xhci->xhc_state |= XHCI_STATE_REMOVING;
3813 + if (xhci->shared_hcd) {
3814 + usb_remove_hcd(xhci->shared_hcd);
3815 + usb_put_hcd(xhci->shared_hcd);
3816 +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
3817 +index db0f0831b94f..2b63969c2bbf 100644
3818 +--- a/drivers/usb/host/xhci-ring.c
3819 ++++ b/drivers/usb/host/xhci-ring.c
3820 +@@ -4008,7 +4008,8 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
3821 + int reserved_trbs = xhci->cmd_ring_reserved_trbs;
3822 + int ret;
3823 +
3824 +- if (xhci->xhc_state) {
3825 ++ if ((xhci->xhc_state & XHCI_STATE_DYING) ||
3826 ++ (xhci->xhc_state & XHCI_STATE_HALTED)) {
3827 + xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
3828 + return -ESHUTDOWN;
3829 + }
3830 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
3831 +index 776d59c32bc5..ec9e758d5fcd 100644
3832 +--- a/drivers/usb/host/xhci.c
3833 ++++ b/drivers/usb/host/xhci.c
3834 +@@ -146,7 +146,8 @@ static int xhci_start(struct xhci_hcd *xhci)
3835 + "waited %u microseconds.\n",
3836 + XHCI_MAX_HALT_USEC);
3837 + if (!ret)
3838 +- xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
3839 ++ /* clear state flags. Including dying, halted or removing */
3840 ++ xhci->xhc_state = 0;
3841 +
3842 + return ret;
3843 + }
3844 +@@ -1103,8 +1104,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
3845 + /* Resume root hubs only when have pending events. */
3846 + status = readl(&xhci->op_regs->status);
3847 + if (status & STS_EINT) {
3848 +- usb_hcd_resume_root_hub(hcd);
3849 + usb_hcd_resume_root_hub(xhci->shared_hcd);
3850 ++ usb_hcd_resume_root_hub(hcd);
3851 + }
3852 + }
3853 +
3854 +@@ -1119,10 +1120,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
3855 +
3856 + /* Re-enable port polling. */
3857 + xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
3858 +- set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
3859 +- usb_hcd_poll_rh_status(hcd);
3860 + set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
3861 + usb_hcd_poll_rh_status(xhci->shared_hcd);
3862 ++ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
3863 ++ usb_hcd_poll_rh_status(hcd);
3864 +
3865 + return retval;
3866 + }
3867 +@@ -2753,7 +2754,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
3868 + if (ret <= 0)
3869 + return ret;
3870 + xhci = hcd_to_xhci(hcd);
3871 +- if (xhci->xhc_state & XHCI_STATE_DYING)
3872 ++ if ((xhci->xhc_state & XHCI_STATE_DYING) ||
3873 ++ (xhci->xhc_state & XHCI_STATE_REMOVING))
3874 + return -ENODEV;
3875 +
3876 + xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
3877 +@@ -3800,7 +3802,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3878 +
3879 + mutex_lock(&xhci->mutex);
3880 +
3881 +- if (xhci->xhc_state) /* dying or halted */
3882 ++ if (xhci->xhc_state) /* dying, removing or halted */
3883 + goto out;
3884 +
3885 + if (!udev->slot_id) {
3886 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
3887 +index 0b9451250e33..99ac2289dbf3 100644
3888 +--- a/drivers/usb/host/xhci.h
3889 ++++ b/drivers/usb/host/xhci.h
3890 +@@ -1596,6 +1596,7 @@ struct xhci_hcd {
3891 + */
3892 + #define XHCI_STATE_DYING (1 << 0)
3893 + #define XHCI_STATE_HALTED (1 << 1)
3894 ++#define XHCI_STATE_REMOVING (1 << 2)
3895 + /* Statistics */
3896 + int error_bitmask;
3897 + unsigned int quirks;
3898 +diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
3899 +index facaaf003f19..e40da7759a0e 100644
3900 +--- a/drivers/usb/usbip/usbip_common.c
3901 ++++ b/drivers/usb/usbip/usbip_common.c
3902 +@@ -741,6 +741,17 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
3903 + if (!(size > 0))
3904 + return 0;
3905 +
3906 ++ if (size > urb->transfer_buffer_length) {
3907 ++ /* should not happen, probably malicious packet */
3908 ++ if (ud->side == USBIP_STUB) {
3909 ++ usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
3910 ++ return 0;
3911 ++ } else {
3912 ++ usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
3913 ++ return -EPIPE;
3914 ++ }
3915 ++ }
3916 ++
3917 + ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
3918 + if (ret != size) {
3919 + dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
3920 +diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
3921 +index e6d16d65e4e6..f07a0974fda2 100644
3922 +--- a/drivers/video/fbdev/Kconfig
3923 ++++ b/drivers/video/fbdev/Kconfig
3924 +@@ -2249,7 +2249,6 @@ config XEN_FBDEV_FRONTEND
3925 + select FB_SYS_IMAGEBLIT
3926 + select FB_SYS_FOPS
3927 + select FB_DEFERRED_IO
3928 +- select INPUT_XEN_KBDDEV_FRONTEND if INPUT_MISC
3929 + select XEN_XENBUS_FRONTEND
3930 + default y
3931 + help
3932 +diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
3933 +index 0081725c6b5b..d00510029c93 100644
3934 +--- a/drivers/video/fbdev/da8xx-fb.c
3935 ++++ b/drivers/video/fbdev/da8xx-fb.c
3936 +@@ -209,8 +209,7 @@ static struct fb_videomode known_lcd_panels[] = {
3937 + .lower_margin = 2,
3938 + .hsync_len = 0,
3939 + .vsync_len = 0,
3940 +- .sync = FB_SYNC_CLK_INVERT |
3941 +- FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
3942 ++ .sync = FB_SYNC_CLK_INVERT,
3943 + },
3944 + /* Sharp LK043T1DG01 */
3945 + [1] = {
3946 +@@ -224,7 +223,7 @@ static struct fb_videomode known_lcd_panels[] = {
3947 + .lower_margin = 2,
3948 + .hsync_len = 41,
3949 + .vsync_len = 10,
3950 +- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
3951 ++ .sync = 0,
3952 + .flag = 0,
3953 + },
3954 + [2] = {
3955 +@@ -239,7 +238,7 @@ static struct fb_videomode known_lcd_panels[] = {
3956 + .lower_margin = 10,
3957 + .hsync_len = 10,
3958 + .vsync_len = 10,
3959 +- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
3960 ++ .sync = 0,
3961 + .flag = 0,
3962 + },
3963 + [3] = {
3964 +diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
3965 +index b7fcc0de0b2f..0f5d05bf2131 100644
3966 +--- a/fs/debugfs/inode.c
3967 ++++ b/fs/debugfs/inode.c
3968 +@@ -457,7 +457,7 @@ struct dentry *debugfs_create_automount(const char *name,
3969 + if (unlikely(!inode))
3970 + return failed_creating(dentry);
3971 +
3972 +- inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
3973 ++ make_empty_dir_inode(inode);
3974 + inode->i_flags |= S_AUTOMOUNT;
3975 + inode->i_private = data;
3976 + dentry->d_fsdata = (void *)f;
3977 +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
3978 +index d4156e1c128d..b7e921d207fb 100644
3979 +--- a/fs/ext4/ext4.h
3980 ++++ b/fs/ext4/ext4.h
3981 +@@ -933,6 +933,15 @@ struct ext4_inode_info {
3982 + * by other means, so we have i_data_sem.
3983 + */
3984 + struct rw_semaphore i_data_sem;
3985 ++ /*
3986 ++ * i_mmap_sem is for serializing page faults with truncate / punch hole
3987 ++ * operations. We have to make sure that new page cannot be faulted in
3988 ++ * a section of the inode that is being punched. We cannot easily use
3989 ++ * i_data_sem for this since we need protection for the whole punch
3990 ++ * operation and i_data_sem ranks below transaction start so we have
3991 ++ * to occasionally drop it.
3992 ++ */
3993 ++ struct rw_semaphore i_mmap_sem;
3994 + struct inode vfs_inode;
3995 + struct jbd2_inode *jinode;
3996 +
3997 +@@ -2507,6 +2516,7 @@ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
3998 + extern int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
3999 + loff_t lstart, loff_t lend);
4000 + extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
4001 ++extern int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
4002 + extern qsize_t *ext4_get_reserved_space(struct inode *inode);
4003 + extern void ext4_da_update_reserve_space(struct inode *inode,
4004 + int used, int quota_claim);
4005 +@@ -2871,6 +2881,9 @@ static inline int ext4_update_inode_size(struct inode *inode, loff_t newsize)
4006 + return changed;
4007 + }
4008 +
4009 ++int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
4010 ++ loff_t len);
4011 ++
4012 + struct ext4_group_info {
4013 + unsigned long bb_state;
4014 + struct rb_root bb_free_root;
4015 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
4016 +index 551353b1b17a..3578b25fccfd 100644
4017 +--- a/fs/ext4/extents.c
4018 ++++ b/fs/ext4/extents.c
4019 +@@ -4685,10 +4685,6 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
4020 + if (len <= EXT_UNWRITTEN_MAX_LEN)
4021 + flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
4022 +
4023 +- /* Wait all existing dio workers, newcomers will block on i_mutex */
4024 +- ext4_inode_block_unlocked_dio(inode);
4025 +- inode_dio_wait(inode);
4026 +-
4027 + /*
4028 + * credits to insert 1 extent into extent tree
4029 + */
4030 +@@ -4752,8 +4748,6 @@ retry:
4031 + goto retry;
4032 + }
4033 +
4034 +- ext4_inode_resume_unlocked_dio(inode);
4035 +-
4036 + return ret > 0 ? ret2 : ret;
4037 + }
4038 +
4039 +@@ -4770,7 +4764,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
4040 + int partial_begin, partial_end;
4041 + loff_t start, end;
4042 + ext4_lblk_t lblk;
4043 +- struct address_space *mapping = inode->i_mapping;
4044 + unsigned int blkbits = inode->i_blkbits;
4045 +
4046 + trace_ext4_zero_range(inode, offset, len, mode);
4047 +@@ -4786,17 +4779,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
4048 + }
4049 +
4050 + /*
4051 +- * Write out all dirty pages to avoid race conditions
4052 +- * Then release them.
4053 +- */
4054 +- if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
4055 +- ret = filemap_write_and_wait_range(mapping, offset,
4056 +- offset + len - 1);
4057 +- if (ret)
4058 +- return ret;
4059 +- }
4060 +-
4061 +- /*
4062 + * Round up offset. This is not fallocate, we neet to zero out
4063 + * blocks, so convert interior block aligned part of the range to
4064 + * unwritten and possibly manually zero out unaligned parts of the
4065 +@@ -4839,6 +4821,10 @@ static long ext4_zero_range(struct file *file, loff_t offset,
4066 + if (mode & FALLOC_FL_KEEP_SIZE)
4067 + flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
4068 +
4069 ++ /* Wait all existing dio workers, newcomers will block on i_mutex */
4070 ++ ext4_inode_block_unlocked_dio(inode);
4071 ++ inode_dio_wait(inode);
4072 ++
4073 + /* Preallocate the range including the unaligned edges */
4074 + if (partial_begin || partial_end) {
4075 + ret = ext4_alloc_file_blocks(file,
4076 +@@ -4847,7 +4833,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
4077 + round_down(offset, 1 << blkbits)) >> blkbits,
4078 + new_size, flags, mode);
4079 + if (ret)
4080 +- goto out_mutex;
4081 ++ goto out_dio;
4082 +
4083 + }
4084 +
4085 +@@ -4856,16 +4842,23 @@ static long ext4_zero_range(struct file *file, loff_t offset,
4086 + flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
4087 + EXT4_EX_NOCACHE);
4088 +
4089 +- /* Now release the pages and zero block aligned part of pages*/
4090 ++ /*
4091 ++ * Prevent page faults from reinstantiating pages we have
4092 ++ * released from page cache.
4093 ++ */
4094 ++ down_write(&EXT4_I(inode)->i_mmap_sem);
4095 ++ ret = ext4_update_disksize_before_punch(inode, offset, len);
4096 ++ if (ret) {
4097 ++ up_write(&EXT4_I(inode)->i_mmap_sem);
4098 ++ goto out_dio;
4099 ++ }
4100 ++ /* Now release the pages and zero block aligned part of pages */
4101 + truncate_pagecache_range(inode, start, end - 1);
4102 + inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4103 +
4104 +- /* Wait all existing dio workers, newcomers will block on i_mutex */
4105 +- ext4_inode_block_unlocked_dio(inode);
4106 +- inode_dio_wait(inode);
4107 +-
4108 + ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
4109 + flags, mode);
4110 ++ up_write(&EXT4_I(inode)->i_mmap_sem);
4111 + if (ret)
4112 + goto out_dio;
4113 + }
4114 +@@ -4998,8 +4991,13 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4115 + goto out;
4116 + }
4117 +
4118 ++ /* Wait all existing dio workers, newcomers will block on i_mutex */
4119 ++ ext4_inode_block_unlocked_dio(inode);
4120 ++ inode_dio_wait(inode);
4121 ++
4122 + ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
4123 + flags, mode);
4124 ++ ext4_inode_resume_unlocked_dio(inode);
4125 + if (ret)
4126 + goto out;
4127 +
4128 +@@ -5494,21 +5492,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
4129 + return ret;
4130 + }
4131 +
4132 +- /*
4133 +- * Need to round down offset to be aligned with page size boundary
4134 +- * for page size > block size.
4135 +- */
4136 +- ioffset = round_down(offset, PAGE_SIZE);
4137 +-
4138 +- /* Write out all dirty pages */
4139 +- ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
4140 +- LLONG_MAX);
4141 +- if (ret)
4142 +- return ret;
4143 +-
4144 +- /* Take mutex lock */
4145 + mutex_lock(&inode->i_mutex);
4146 +-
4147 + /*
4148 + * There is no need to overlap collapse range with EOF, in which case
4149 + * it is effectively a truncate operation
4150 +@@ -5524,17 +5508,43 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
4151 + goto out_mutex;
4152 + }
4153 +
4154 +- truncate_pagecache(inode, ioffset);
4155 +-
4156 + /* Wait for existing dio to complete */
4157 + ext4_inode_block_unlocked_dio(inode);
4158 + inode_dio_wait(inode);
4159 +
4160 ++ /*
4161 ++ * Prevent page faults from reinstantiating pages we have released from
4162 ++ * page cache.
4163 ++ */
4164 ++ down_write(&EXT4_I(inode)->i_mmap_sem);
4165 ++ /*
4166 ++ * Need to round down offset to be aligned with page size boundary
4167 ++ * for page size > block size.
4168 ++ */
4169 ++ ioffset = round_down(offset, PAGE_SIZE);
4170 ++ /*
4171 ++ * Write tail of the last page before removed range since it will get
4172 ++ * removed from the page cache below.
4173 ++ */
4174 ++ ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, offset);
4175 ++ if (ret)
4176 ++ goto out_mmap;
4177 ++ /*
4178 ++ * Write data that will be shifted to preserve them when discarding
4179 ++ * page cache below. We are also protected from pages becoming dirty
4180 ++ * by i_mmap_sem.
4181 ++ */
4182 ++ ret = filemap_write_and_wait_range(inode->i_mapping, offset + len,
4183 ++ LLONG_MAX);
4184 ++ if (ret)
4185 ++ goto out_mmap;
4186 ++ truncate_pagecache(inode, ioffset);
4187 ++
4188 + credits = ext4_writepage_trans_blocks(inode);
4189 + handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4190 + if (IS_ERR(handle)) {
4191 + ret = PTR_ERR(handle);
4192 +- goto out_dio;
4193 ++ goto out_mmap;
4194 + }
4195 +
4196 + down_write(&EXT4_I(inode)->i_data_sem);
4197 +@@ -5573,7 +5583,8 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
4198 +
4199 + out_stop:
4200 + ext4_journal_stop(handle);
4201 +-out_dio:
4202 ++out_mmap:
4203 ++ up_write(&EXT4_I(inode)->i_mmap_sem);
4204 + ext4_inode_resume_unlocked_dio(inode);
4205 + out_mutex:
4206 + mutex_unlock(&inode->i_mutex);
4207 +@@ -5627,21 +5638,7 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
4208 + return ret;
4209 + }
4210 +
4211 +- /*
4212 +- * Need to round down to align start offset to page size boundary
4213 +- * for page size > block size.
4214 +- */
4215 +- ioffset = round_down(offset, PAGE_SIZE);
4216 +-
4217 +- /* Write out all dirty pages */
4218 +- ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
4219 +- LLONG_MAX);
4220 +- if (ret)
4221 +- return ret;
4222 +-
4223 +- /* Take mutex lock */
4224 + mutex_lock(&inode->i_mutex);
4225 +-
4226 + /* Currently just for extent based files */
4227 + if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
4228 + ret = -EOPNOTSUPP;
4229 +@@ -5660,17 +5657,32 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
4230 + goto out_mutex;
4231 + }
4232 +
4233 +- truncate_pagecache(inode, ioffset);
4234 +-
4235 + /* Wait for existing dio to complete */
4236 + ext4_inode_block_unlocked_dio(inode);
4237 + inode_dio_wait(inode);
4238 +
4239 ++ /*
4240 ++ * Prevent page faults from reinstantiating pages we have released from
4241 ++ * page cache.
4242 ++ */
4243 ++ down_write(&EXT4_I(inode)->i_mmap_sem);
4244 ++ /*
4245 ++ * Need to round down to align start offset to page size boundary
4246 ++ * for page size > block size.
4247 ++ */
4248 ++ ioffset = round_down(offset, PAGE_SIZE);
4249 ++ /* Write out all dirty pages */
4250 ++ ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
4251 ++ LLONG_MAX);
4252 ++ if (ret)
4253 ++ goto out_mmap;
4254 ++ truncate_pagecache(inode, ioffset);
4255 ++
4256 + credits = ext4_writepage_trans_blocks(inode);
4257 + handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4258 + if (IS_ERR(handle)) {
4259 + ret = PTR_ERR(handle);
4260 +- goto out_dio;
4261 ++ goto out_mmap;
4262 + }
4263 +
4264 + /* Expand file to avoid data loss if there is error while shifting */
4265 +@@ -5741,7 +5753,8 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
4266 +
4267 + out_stop:
4268 + ext4_journal_stop(handle);
4269 +-out_dio:
4270 ++out_mmap:
4271 ++ up_write(&EXT4_I(inode)->i_mmap_sem);
4272 + ext4_inode_resume_unlocked_dio(inode);
4273 + out_mutex:
4274 + mutex_unlock(&inode->i_mutex);
4275 +diff --git a/fs/ext4/file.c b/fs/ext4/file.c
4276 +index 113837e7ba98..0d24ebcd7c9e 100644
4277 +--- a/fs/ext4/file.c
4278 ++++ b/fs/ext4/file.c
4279 +@@ -209,15 +209,18 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4280 + {
4281 + int result;
4282 + handle_t *handle = NULL;
4283 +- struct super_block *sb = file_inode(vma->vm_file)->i_sb;
4284 ++ struct inode *inode = file_inode(vma->vm_file);
4285 ++ struct super_block *sb = inode->i_sb;
4286 + bool write = vmf->flags & FAULT_FLAG_WRITE;
4287 +
4288 + if (write) {
4289 + sb_start_pagefault(sb);
4290 + file_update_time(vma->vm_file);
4291 ++ down_read(&EXT4_I(inode)->i_mmap_sem);
4292 + handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
4293 + EXT4_DATA_TRANS_BLOCKS(sb));
4294 +- }
4295 ++ } else
4296 ++ down_read(&EXT4_I(inode)->i_mmap_sem);
4297 +
4298 + if (IS_ERR(handle))
4299 + result = VM_FAULT_SIGBUS;
4300 +@@ -228,8 +231,10 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4301 + if (write) {
4302 + if (!IS_ERR(handle))
4303 + ext4_journal_stop(handle);
4304 ++ up_read(&EXT4_I(inode)->i_mmap_sem);
4305 + sb_end_pagefault(sb);
4306 +- }
4307 ++ } else
4308 ++ up_read(&EXT4_I(inode)->i_mmap_sem);
4309 +
4310 + return result;
4311 + }
4312 +@@ -246,10 +251,12 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
4313 + if (write) {
4314 + sb_start_pagefault(sb);
4315 + file_update_time(vma->vm_file);
4316 ++ down_read(&EXT4_I(inode)->i_mmap_sem);
4317 + handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
4318 + ext4_chunk_trans_blocks(inode,
4319 + PMD_SIZE / PAGE_SIZE));
4320 +- }
4321 ++ } else
4322 ++ down_read(&EXT4_I(inode)->i_mmap_sem);
4323 +
4324 + if (IS_ERR(handle))
4325 + result = VM_FAULT_SIGBUS;
4326 +@@ -260,30 +267,71 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
4327 + if (write) {
4328 + if (!IS_ERR(handle))
4329 + ext4_journal_stop(handle);
4330 ++ up_read(&EXT4_I(inode)->i_mmap_sem);
4331 + sb_end_pagefault(sb);
4332 +- }
4333 ++ } else
4334 ++ up_read(&EXT4_I(inode)->i_mmap_sem);
4335 +
4336 + return result;
4337 + }
4338 +
4339 + static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
4340 + {
4341 +- return dax_mkwrite(vma, vmf, ext4_get_block_dax,
4342 +- ext4_end_io_unwritten);
4343 ++ int err;
4344 ++ struct inode *inode = file_inode(vma->vm_file);
4345 ++
4346 ++ sb_start_pagefault(inode->i_sb);
4347 ++ file_update_time(vma->vm_file);
4348 ++ down_read(&EXT4_I(inode)->i_mmap_sem);
4349 ++ err = __dax_mkwrite(vma, vmf, ext4_get_block_dax,
4350 ++ ext4_end_io_unwritten);
4351 ++ up_read(&EXT4_I(inode)->i_mmap_sem);
4352 ++ sb_end_pagefault(inode->i_sb);
4353 ++
4354 ++ return err;
4355 ++}
4356 ++
4357 ++/*
4358 ++ * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_mkwrite()
4359 ++ * handler we check for races agaist truncate. Note that since we cycle through
4360 ++ * i_mmap_sem, we are sure that also any hole punching that began before we
4361 ++ * were called is finished by now and so if it included part of the file we
4362 ++ * are working on, our pte will get unmapped and the check for pte_same() in
4363 ++ * wp_pfn_shared() fails. Thus fault gets retried and things work out as
4364 ++ * desired.
4365 ++ */
4366 ++static int ext4_dax_pfn_mkwrite(struct vm_area_struct *vma,
4367 ++ struct vm_fault *vmf)
4368 ++{
4369 ++ struct inode *inode = file_inode(vma->vm_file);
4370 ++ struct super_block *sb = inode->i_sb;
4371 ++ int ret = VM_FAULT_NOPAGE;
4372 ++ loff_t size;
4373 ++
4374 ++ sb_start_pagefault(sb);
4375 ++ file_update_time(vma->vm_file);
4376 ++ down_read(&EXT4_I(inode)->i_mmap_sem);
4377 ++ size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
4378 ++ if (vmf->pgoff >= size)
4379 ++ ret = VM_FAULT_SIGBUS;
4380 ++ up_read(&EXT4_I(inode)->i_mmap_sem);
4381 ++ sb_end_pagefault(sb);
4382 ++
4383 ++ return ret;
4384 + }
4385 +
4386 + static const struct vm_operations_struct ext4_dax_vm_ops = {
4387 + .fault = ext4_dax_fault,
4388 + .pmd_fault = ext4_dax_pmd_fault,
4389 + .page_mkwrite = ext4_dax_mkwrite,
4390 +- .pfn_mkwrite = dax_pfn_mkwrite,
4391 ++ .pfn_mkwrite = ext4_dax_pfn_mkwrite,
4392 + };
4393 + #else
4394 + #define ext4_dax_vm_ops ext4_file_vm_ops
4395 + #endif
4396 +
4397 + static const struct vm_operations_struct ext4_file_vm_ops = {
4398 +- .fault = filemap_fault,
4399 ++ .fault = ext4_filemap_fault,
4400 + .map_pages = filemap_map_pages,
4401 + .page_mkwrite = ext4_page_mkwrite,
4402 + };
4403 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
4404 +index 06bda0361e7c..e31d762eedce 100644
4405 +--- a/fs/ext4/inode.c
4406 ++++ b/fs/ext4/inode.c
4407 +@@ -3587,6 +3587,35 @@ int ext4_can_truncate(struct inode *inode)
4408 + }
4409 +
4410 + /*
4411 ++ * We have to make sure i_disksize gets properly updated before we truncate
4412 ++ * page cache due to hole punching or zero range. Otherwise i_disksize update
4413 ++ * can get lost as it may have been postponed to submission of writeback but
4414 ++ * that will never happen after we truncate page cache.
4415 ++ */
4416 ++int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
4417 ++ loff_t len)
4418 ++{
4419 ++ handle_t *handle;
4420 ++ loff_t size = i_size_read(inode);
4421 ++
4422 ++ WARN_ON(!mutex_is_locked(&inode->i_mutex));
4423 ++ if (offset > size || offset + len < size)
4424 ++ return 0;
4425 ++
4426 ++ if (EXT4_I(inode)->i_disksize >= size)
4427 ++ return 0;
4428 ++
4429 ++ handle = ext4_journal_start(inode, EXT4_HT_MISC, 1);
4430 ++ if (IS_ERR(handle))
4431 ++ return PTR_ERR(handle);
4432 ++ ext4_update_i_disksize(inode, size);
4433 ++ ext4_mark_inode_dirty(handle, inode);
4434 ++ ext4_journal_stop(handle);
4435 ++
4436 ++ return 0;
4437 ++}
4438 ++
4439 ++/*
4440 + * ext4_punch_hole: punches a hole in a file by releaseing the blocks
4441 + * associated with the given offset and length
4442 + *
4443 +@@ -3651,17 +3680,26 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
4444 +
4445 + }
4446 +
4447 ++ /* Wait all existing dio workers, newcomers will block on i_mutex */
4448 ++ ext4_inode_block_unlocked_dio(inode);
4449 ++ inode_dio_wait(inode);
4450 ++
4451 ++ /*
4452 ++ * Prevent page faults from reinstantiating pages we have released from
4453 ++ * page cache.
4454 ++ */
4455 ++ down_write(&EXT4_I(inode)->i_mmap_sem);
4456 + first_block_offset = round_up(offset, sb->s_blocksize);
4457 + last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
4458 +
4459 + /* Now release the pages and zero block aligned part of pages*/
4460 +- if (last_block_offset > first_block_offset)
4461 ++ if (last_block_offset > first_block_offset) {
4462 ++ ret = ext4_update_disksize_before_punch(inode, offset, length);
4463 ++ if (ret)
4464 ++ goto out_dio;
4465 + truncate_pagecache_range(inode, first_block_offset,
4466 + last_block_offset);
4467 +-
4468 +- /* Wait all existing dio workers, newcomers will block on i_mutex */
4469 +- ext4_inode_block_unlocked_dio(inode);
4470 +- inode_dio_wait(inode);
4471 ++ }
4472 +
4473 + if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4474 + credits = ext4_writepage_trans_blocks(inode);
4475 +@@ -3708,16 +3746,12 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
4476 + if (IS_SYNC(inode))
4477 + ext4_handle_sync(handle);
4478 +
4479 +- /* Now release the pages again to reduce race window */
4480 +- if (last_block_offset > first_block_offset)
4481 +- truncate_pagecache_range(inode, first_block_offset,
4482 +- last_block_offset);
4483 +-
4484 + inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4485 + ext4_mark_inode_dirty(handle, inode);
4486 + out_stop:
4487 + ext4_journal_stop(handle);
4488 + out_dio:
4489 ++ up_write(&EXT4_I(inode)->i_mmap_sem);
4490 + ext4_inode_resume_unlocked_dio(inode);
4491 + out_mutex:
4492 + mutex_unlock(&inode->i_mutex);
4493 +@@ -4851,6 +4885,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
4494 + } else
4495 + ext4_wait_for_tail_page_commit(inode);
4496 + }
4497 ++ down_write(&EXT4_I(inode)->i_mmap_sem);
4498 + /*
4499 + * Truncate pagecache after we've waited for commit
4500 + * in data=journal mode to make pages freeable.
4501 +@@ -4858,6 +4893,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
4502 + truncate_pagecache(inode, inode->i_size);
4503 + if (shrink)
4504 + ext4_truncate(inode);
4505 ++ up_write(&EXT4_I(inode)->i_mmap_sem);
4506 + }
4507 +
4508 + if (!rc) {
4509 +@@ -5109,6 +5145,8 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
4510 + might_sleep();
4511 + trace_ext4_mark_inode_dirty(inode, _RET_IP_);
4512 + err = ext4_reserve_inode_write(handle, inode, &iloc);
4513 ++ if (err)
4514 ++ return err;
4515 + if (ext4_handle_valid(handle) &&
4516 + EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
4517 + !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
4518 +@@ -5139,9 +5177,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
4519 + }
4520 + }
4521 + }
4522 +- if (!err)
4523 +- err = ext4_mark_iloc_dirty(handle, inode, &iloc);
4524 +- return err;
4525 ++ return ext4_mark_iloc_dirty(handle, inode, &iloc);
4526 + }
4527 +
4528 + /*
4529 +@@ -5306,6 +5342,8 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
4530 +
4531 + sb_start_pagefault(inode->i_sb);
4532 + file_update_time(vma->vm_file);
4533 ++
4534 ++ down_read(&EXT4_I(inode)->i_mmap_sem);
4535 + /* Delalloc case is easy... */
4536 + if (test_opt(inode->i_sb, DELALLOC) &&
4537 + !ext4_should_journal_data(inode) &&
4538 +@@ -5375,6 +5413,19 @@ retry_alloc:
4539 + out_ret:
4540 + ret = block_page_mkwrite_return(ret);
4541 + out:
4542 ++ up_read(&EXT4_I(inode)->i_mmap_sem);
4543 + sb_end_pagefault(inode->i_sb);
4544 + return ret;
4545 + }
4546 ++
4547 ++int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4548 ++{
4549 ++ struct inode *inode = file_inode(vma->vm_file);
4550 ++ int err;
4551 ++
4552 ++ down_read(&EXT4_I(inode)->i_mmap_sem);
4553 ++ err = filemap_fault(vma, vmf);
4554 ++ up_read(&EXT4_I(inode)->i_mmap_sem);
4555 ++
4556 ++ return err;
4557 ++}
4558 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
4559 +index ba1cf0bf2f81..852c26806af2 100644
4560 +--- a/fs/ext4/super.c
4561 ++++ b/fs/ext4/super.c
4562 +@@ -958,6 +958,7 @@ static void init_once(void *foo)
4563 + INIT_LIST_HEAD(&ei->i_orphan);
4564 + init_rwsem(&ei->xattr_sem);
4565 + init_rwsem(&ei->i_data_sem);
4566 ++ init_rwsem(&ei->i_mmap_sem);
4567 + inode_init_once(&ei->vfs_inode);
4568 + }
4569 +
4570 +diff --git a/fs/ext4/truncate.h b/fs/ext4/truncate.h
4571 +index 011ba6670d99..c70d06a383e2 100644
4572 +--- a/fs/ext4/truncate.h
4573 ++++ b/fs/ext4/truncate.h
4574 +@@ -10,8 +10,10 @@
4575 + */
4576 + static inline void ext4_truncate_failed_write(struct inode *inode)
4577 + {
4578 ++ down_write(&EXT4_I(inode)->i_mmap_sem);
4579 + truncate_inode_pages(inode->i_mapping, inode->i_size);
4580 + ext4_truncate(inode);
4581 ++ up_write(&EXT4_I(inode)->i_mmap_sem);
4582 + }
4583 +
4584 + /*
4585 +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
4586 +index 09cd3edde08a..f6478301db00 100644
4587 +--- a/fs/proc/task_mmu.c
4588 ++++ b/fs/proc/task_mmu.c
4589 +@@ -1435,6 +1435,32 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
4590 + return page;
4591 + }
4592 +
4593 ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4594 ++static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
4595 ++ struct vm_area_struct *vma,
4596 ++ unsigned long addr)
4597 ++{
4598 ++ struct page *page;
4599 ++ int nid;
4600 ++
4601 ++ if (!pmd_present(pmd))
4602 ++ return NULL;
4603 ++
4604 ++ page = vm_normal_page_pmd(vma, addr, pmd);
4605 ++ if (!page)
4606 ++ return NULL;
4607 ++
4608 ++ if (PageReserved(page))
4609 ++ return NULL;
4610 ++
4611 ++ nid = page_to_nid(page);
4612 ++ if (!node_isset(nid, node_states[N_MEMORY]))
4613 ++ return NULL;
4614 ++
4615 ++ return page;
4616 ++}
4617 ++#endif
4618 ++
4619 + static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
4620 + unsigned long end, struct mm_walk *walk)
4621 + {
4622 +@@ -1444,13 +1470,13 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
4623 + pte_t *orig_pte;
4624 + pte_t *pte;
4625 +
4626 ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4627 + if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
4628 +- pte_t huge_pte = *(pte_t *)pmd;
4629 + struct page *page;
4630 +
4631 +- page = can_gather_numa_stats(huge_pte, vma, addr);
4632 ++ page = can_gather_numa_stats_pmd(*pmd, vma, addr);
4633 + if (page)
4634 +- gather_stats(page, md, pte_dirty(huge_pte),
4635 ++ gather_stats(page, md, pmd_dirty(*pmd),
4636 + HPAGE_PMD_SIZE/PAGE_SIZE);
4637 + spin_unlock(ptl);
4638 + return 0;
4639 +@@ -1458,6 +1484,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
4640 +
4641 + if (pmd_trans_unstable(pmd))
4642 + return 0;
4643 ++#endif
4644 + orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
4645 + do {
4646 + struct page *page = can_gather_numa_stats(*pte, vma, addr);
4647 +diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
4648 +index e56272c919b5..bf2d34c9d804 100644
4649 +--- a/include/asm-generic/futex.h
4650 ++++ b/include/asm-generic/futex.h
4651 +@@ -108,11 +108,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
4652 + u32 val;
4653 +
4654 + preempt_disable();
4655 +- if (unlikely(get_user(val, uaddr) != 0))
4656 ++ if (unlikely(get_user(val, uaddr) != 0)) {
4657 ++ preempt_enable();
4658 + return -EFAULT;
4659 ++ }
4660 +
4661 +- if (val == oldval && unlikely(put_user(newval, uaddr) != 0))
4662 ++ if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
4663 ++ preempt_enable();
4664 + return -EFAULT;
4665 ++ }
4666 +
4667 + *uval = val;
4668 + preempt_enable();
4669 +diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
4670 +index 461a0558bca4..cebecff536a3 100644
4671 +--- a/include/drm/drm_cache.h
4672 ++++ b/include/drm/drm_cache.h
4673 +@@ -39,6 +39,8 @@ static inline bool drm_arch_can_wc_memory(void)
4674 + {
4675 + #if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
4676 + return false;
4677 ++#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
4678 ++ return false;
4679 + #else
4680 + return true;
4681 + #endif
4682 +diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
4683 +index a7c7f74808a4..8da263299754 100644
4684 +--- a/include/linux/cgroup-defs.h
4685 ++++ b/include/linux/cgroup-defs.h
4686 +@@ -434,6 +434,7 @@ struct cgroup_subsys {
4687 + int (*can_attach)(struct cgroup_taskset *tset);
4688 + void (*cancel_attach)(struct cgroup_taskset *tset);
4689 + void (*attach)(struct cgroup_taskset *tset);
4690 ++ void (*post_attach)(void);
4691 + int (*can_fork)(struct task_struct *task, void **priv_p);
4692 + void (*cancel_fork)(struct task_struct *task, void *priv);
4693 + void (*fork)(struct task_struct *task, void *priv);
4694 +diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
4695 +index fea160ee5803..85a868ccb493 100644
4696 +--- a/include/linux/cpuset.h
4697 ++++ b/include/linux/cpuset.h
4698 +@@ -137,8 +137,6 @@ static inline void set_mems_allowed(nodemask_t nodemask)
4699 + task_unlock(current);
4700 + }
4701 +
4702 +-extern void cpuset_post_attach_flush(void);
4703 +-
4704 + #else /* !CONFIG_CPUSETS */
4705 +
4706 + static inline bool cpusets_enabled(void) { return false; }
4707 +@@ -245,10 +243,6 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
4708 + return false;
4709 + }
4710 +
4711 +-static inline void cpuset_post_attach_flush(void)
4712 +-{
4713 +-}
4714 +-
4715 + #endif /* !CONFIG_CPUSETS */
4716 +
4717 + #endif /* _LINUX_CPUSET_H */
4718 +diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
4719 +index 0b473cbfa7ef..a91b67b18a73 100644
4720 +--- a/include/linux/mlx5/device.h
4721 ++++ b/include/linux/mlx5/device.h
4722 +@@ -334,6 +334,17 @@ enum {
4723 + MLX5_CAP_OFF_CMDIF_CSUM = 46,
4724 + };
4725 +
4726 ++enum {
4727 ++ /*
4728 ++ * Max wqe size for rdma read is 512 bytes, so this
4729 ++ * limits our max_sge_rd as the wqe needs to fit:
4730 ++ * - ctrl segment (16 bytes)
4731 ++ * - rdma segment (16 bytes)
4732 ++ * - scatter elements (16 bytes each)
4733 ++ */
4734 ++ MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
4735 ++};
4736 ++
4737 + struct mlx5_inbox_hdr {
4738 + __be16 opcode;
4739 + u8 rsvd[4];
4740 +diff --git a/include/linux/mm.h b/include/linux/mm.h
4741 +index 00bad7793788..fb8b20e5d021 100644
4742 +--- a/include/linux/mm.h
4743 ++++ b/include/linux/mm.h
4744 +@@ -1084,6 +1084,8 @@ struct zap_details {
4745 +
4746 + struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
4747 + pte_t pte);
4748 ++struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
4749 ++ pmd_t pmd);
4750 +
4751 + int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
4752 + unsigned long size);
4753 +diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
4754 +index 647ebfe5174f..d4227a8a2a23 100644
4755 +--- a/include/media/videobuf2-core.h
4756 ++++ b/include/media/videobuf2-core.h
4757 +@@ -363,6 +363,7 @@ struct vb2_ops {
4758 + };
4759 +
4760 + struct vb2_buf_ops {
4761 ++ int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb);
4762 + int (*fill_user_buffer)(struct vb2_buffer *vb, void *pb);
4763 + int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb,
4764 + struct vb2_plane *planes);
4765 +diff --git a/include/rdma/ib.h b/include/rdma/ib.h
4766 +index cf8f9e700e48..a6b93706b0fc 100644
4767 +--- a/include/rdma/ib.h
4768 ++++ b/include/rdma/ib.h
4769 +@@ -34,6 +34,7 @@
4770 + #define _RDMA_IB_H
4771 +
4772 + #include <linux/types.h>
4773 ++#include <linux/sched.h>
4774 +
4775 + struct ib_addr {
4776 + union {
4777 +@@ -86,4 +87,19 @@ struct sockaddr_ib {
4778 + __u64 sib_scope_id;
4779 + };
4780 +
4781 ++/*
4782 ++ * The IB interfaces that use write() as bi-directional ioctl() are
4783 ++ * fundamentally unsafe, since there are lots of ways to trigger "write()"
4784 ++ * calls from various contexts with elevated privileges. That includes the
4785 ++ * traditional suid executable error message writes, but also various kernel
4786 ++ * interfaces that can write to file descriptors.
4787 ++ *
4788 ++ * This function provides protection for the legacy API by restricting the
4789 ++ * calling context.
4790 ++ */
4791 ++static inline bool ib_safe_file_access(struct file *filp)
4792 ++{
4793 ++ return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS);
4794 ++}
4795 ++
4796 + #endif /* _RDMA_IB_H */
4797 +diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
4798 +index c039f1d68a09..086168e18ca8 100644
4799 +--- a/include/uapi/linux/v4l2-dv-timings.h
4800 ++++ b/include/uapi/linux/v4l2-dv-timings.h
4801 +@@ -183,7 +183,8 @@
4802 +
4803 + #define V4L2_DV_BT_CEA_3840X2160P24 { \
4804 + .type = V4L2_DV_BT_656_1120, \
4805 +- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
4806 ++ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
4807 ++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
4808 + 297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
4809 + V4L2_DV_BT_STD_CEA861, \
4810 + V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
4811 +@@ -191,14 +192,16 @@
4812 +
4813 + #define V4L2_DV_BT_CEA_3840X2160P25 { \
4814 + .type = V4L2_DV_BT_656_1120, \
4815 +- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
4816 ++ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
4817 ++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
4818 + 297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
4819 + V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
4820 + }
4821 +
4822 + #define V4L2_DV_BT_CEA_3840X2160P30 { \
4823 + .type = V4L2_DV_BT_656_1120, \
4824 +- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
4825 ++ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
4826 ++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
4827 + 297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
4828 + V4L2_DV_BT_STD_CEA861, \
4829 + V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
4830 +@@ -206,14 +209,16 @@
4831 +
4832 + #define V4L2_DV_BT_CEA_3840X2160P50 { \
4833 + .type = V4L2_DV_BT_656_1120, \
4834 +- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
4835 ++ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
4836 ++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
4837 + 594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
4838 + V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
4839 + }
4840 +
4841 + #define V4L2_DV_BT_CEA_3840X2160P60 { \
4842 + .type = V4L2_DV_BT_656_1120, \
4843 +- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
4844 ++ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
4845 ++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
4846 + 594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
4847 + V4L2_DV_BT_STD_CEA861, \
4848 + V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
4849 +@@ -221,7 +226,8 @@
4850 +
4851 + #define V4L2_DV_BT_CEA_4096X2160P24 { \
4852 + .type = V4L2_DV_BT_656_1120, \
4853 +- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
4854 ++ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
4855 ++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
4856 + 297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
4857 + V4L2_DV_BT_STD_CEA861, \
4858 + V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
4859 +@@ -229,14 +235,16 @@
4860 +
4861 + #define V4L2_DV_BT_CEA_4096X2160P25 { \
4862 + .type = V4L2_DV_BT_656_1120, \
4863 +- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
4864 ++ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
4865 ++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
4866 + 297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
4867 + V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
4868 + }
4869 +
4870 + #define V4L2_DV_BT_CEA_4096X2160P30 { \
4871 + .type = V4L2_DV_BT_656_1120, \
4872 +- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
4873 ++ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
4874 ++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
4875 + 297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
4876 + V4L2_DV_BT_STD_CEA861, \
4877 + V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
4878 +@@ -244,14 +252,16 @@
4879 +
4880 + #define V4L2_DV_BT_CEA_4096X2160P50 { \
4881 + .type = V4L2_DV_BT_656_1120, \
4882 +- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
4883 ++ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
4884 ++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
4885 + 594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
4886 + V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
4887 + }
4888 +
4889 + #define V4L2_DV_BT_CEA_4096X2160P60 { \
4890 + .type = V4L2_DV_BT_656_1120, \
4891 +- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
4892 ++ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
4893 ++ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
4894 + 594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
4895 + V4L2_DV_BT_STD_CEA861, \
4896 + V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
4897 +diff --git a/kernel/cgroup.c b/kernel/cgroup.c
4898 +index dc94f8beb097..1c9d701f7a72 100644
4899 +--- a/kernel/cgroup.c
4900 ++++ b/kernel/cgroup.c
4901 +@@ -2721,9 +2721,10 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
4902 + size_t nbytes, loff_t off, bool threadgroup)
4903 + {
4904 + struct task_struct *tsk;
4905 ++ struct cgroup_subsys *ss;
4906 + struct cgroup *cgrp;
4907 + pid_t pid;
4908 +- int ret;
4909 ++ int ssid, ret;
4910 +
4911 + if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
4912 + return -EINVAL;
4913 +@@ -2771,8 +2772,10 @@ out_unlock_rcu:
4914 + rcu_read_unlock();
4915 + out_unlock_threadgroup:
4916 + percpu_up_write(&cgroup_threadgroup_rwsem);
4917 ++ for_each_subsys(ss, ssid)
4918 ++ if (ss->post_attach)
4919 ++ ss->post_attach();
4920 + cgroup_kn_unlock(of->kn);
4921 +- cpuset_post_attach_flush();
4922 + return ret ?: nbytes;
4923 + }
4924 +
4925 +@@ -4689,14 +4692,15 @@ static void css_free_work_fn(struct work_struct *work)
4926 +
4927 + if (ss) {
4928 + /* css free path */
4929 ++ struct cgroup_subsys_state *parent = css->parent;
4930 + int id = css->id;
4931 +
4932 +- if (css->parent)
4933 +- css_put(css->parent);
4934 +-
4935 + ss->css_free(css);
4936 + cgroup_idr_remove(&ss->css_idr, id);
4937 + cgroup_put(cgrp);
4938 ++
4939 ++ if (parent)
4940 ++ css_put(parent);
4941 + } else {
4942 + /* cgroup free path */
4943 + atomic_dec(&cgrp->root->nr_cgrps);
4944 +diff --git a/kernel/cpuset.c b/kernel/cpuset.c
4945 +index 2ade632197d5..11eaf14b52c2 100644
4946 +--- a/kernel/cpuset.c
4947 ++++ b/kernel/cpuset.c
4948 +@@ -57,7 +57,6 @@
4949 + #include <asm/uaccess.h>
4950 + #include <linux/atomic.h>
4951 + #include <linux/mutex.h>
4952 +-#include <linux/workqueue.h>
4953 + #include <linux/cgroup.h>
4954 + #include <linux/wait.h>
4955 +
4956 +@@ -1015,7 +1014,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
4957 + }
4958 + }
4959 +
4960 +-void cpuset_post_attach_flush(void)
4961 ++static void cpuset_post_attach(void)
4962 + {
4963 + flush_workqueue(cpuset_migrate_mm_wq);
4964 + }
4965 +@@ -2083,6 +2082,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
4966 + .can_attach = cpuset_can_attach,
4967 + .cancel_attach = cpuset_cancel_attach,
4968 + .attach = cpuset_attach,
4969 ++ .post_attach = cpuset_post_attach,
4970 + .bind = cpuset_bind,
4971 + .legacy_cftypes = files,
4972 + .early_init = 1,
4973 +diff --git a/kernel/futex.c b/kernel/futex.c
4974 +index 461c72b2dac2..9d8163afd87c 100644
4975 +--- a/kernel/futex.c
4976 ++++ b/kernel/futex.c
4977 +@@ -1244,10 +1244,20 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
4978 + if (unlikely(should_fail_futex(true)))
4979 + ret = -EFAULT;
4980 +
4981 +- if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
4982 ++ if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
4983 + ret = -EFAULT;
4984 +- else if (curval != uval)
4985 +- ret = -EINVAL;
4986 ++ } else if (curval != uval) {
4987 ++ /*
4988 ++ * If a unconditional UNLOCK_PI operation (user space did not
4989 ++ * try the TID->0 transition) raced with a waiter setting the
4990 ++ * FUTEX_WAITERS flag between get_user() and locking the hash
4991 ++ * bucket lock, retry the operation.
4992 ++ */
4993 ++ if ((FUTEX_TID_MASK & curval) == uval)
4994 ++ ret = -EAGAIN;
4995 ++ else
4996 ++ ret = -EINVAL;
4997 ++ }
4998 + if (ret) {
4999 + raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
5000 + return ret;
5001 +@@ -1474,8 +1484,8 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
5002 + if (likely(&hb1->chain != &hb2->chain)) {
5003 + plist_del(&q->list, &hb1->chain);
5004 + hb_waiters_dec(hb1);
5005 +- plist_add(&q->list, &hb2->chain);
5006 + hb_waiters_inc(hb2);
5007 ++ plist_add(&q->list, &hb2->chain);
5008 + q->lock_ptr = &hb2->lock;
5009 + }
5010 + get_futex_key_refs(key2);
5011 +@@ -2538,6 +2548,15 @@ retry:
5012 + if (ret == -EFAULT)
5013 + goto pi_faulted;
5014 + /*
5015 ++ * A unconditional UNLOCK_PI op raced against a waiter
5016 ++ * setting the FUTEX_WAITERS bit. Try again.
5017 ++ */
5018 ++ if (ret == -EAGAIN) {
5019 ++ spin_unlock(&hb->lock);
5020 ++ put_futex_key(&key);
5021 ++ goto retry;
5022 ++ }
5023 ++ /*
5024 + * wake_futex_pi has detected invalid state. Tell user
5025 + * space.
5026 + */
5027 +diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
5028 +index 5b9102a47ea5..c835270f0c2f 100644
5029 +--- a/kernel/locking/mcs_spinlock.h
5030 ++++ b/kernel/locking/mcs_spinlock.h
5031 +@@ -67,7 +67,13 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
5032 + node->locked = 0;
5033 + node->next = NULL;
5034 +
5035 +- prev = xchg_acquire(lock, node);
5036 ++ /*
5037 ++ * We rely on the full barrier with global transitivity implied by the
5038 ++ * below xchg() to order the initialization stores above against any
5039 ++ * observation of @node. And to provide the ACQUIRE ordering associated
5040 ++ * with a LOCK primitive.
5041 ++ */
5042 ++ prev = xchg(lock, node);
5043 + if (likely(prev == NULL)) {
5044 + /*
5045 + * Lock acquired, don't need to set node->locked to 1. Threads
5046 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
5047 +index 70e5e09341f1..55bebf924946 100644
5048 +--- a/kernel/sched/core.c
5049 ++++ b/kernel/sched/core.c
5050 +@@ -7693,7 +7693,7 @@ void set_curr_task(int cpu, struct task_struct *p)
5051 + /* task_group_lock serializes the addition/removal of task groups */
5052 + static DEFINE_SPINLOCK(task_group_lock);
5053 +
5054 +-static void free_sched_group(struct task_group *tg)
5055 ++static void sched_free_group(struct task_group *tg)
5056 + {
5057 + free_fair_sched_group(tg);
5058 + free_rt_sched_group(tg);
5059 +@@ -7719,7 +7719,7 @@ struct task_group *sched_create_group(struct task_group *parent)
5060 + return tg;
5061 +
5062 + err:
5063 +- free_sched_group(tg);
5064 ++ sched_free_group(tg);
5065 + return ERR_PTR(-ENOMEM);
5066 + }
5067 +
5068 +@@ -7739,17 +7739,16 @@ void sched_online_group(struct task_group *tg, struct task_group *parent)
5069 + }
5070 +
5071 + /* rcu callback to free various structures associated with a task group */
5072 +-static void free_sched_group_rcu(struct rcu_head *rhp)
5073 ++static void sched_free_group_rcu(struct rcu_head *rhp)
5074 + {
5075 + /* now it should be safe to free those cfs_rqs */
5076 +- free_sched_group(container_of(rhp, struct task_group, rcu));
5077 ++ sched_free_group(container_of(rhp, struct task_group, rcu));
5078 + }
5079 +
5080 +-/* Destroy runqueue etc associated with a task group */
5081 + void sched_destroy_group(struct task_group *tg)
5082 + {
5083 + /* wait for possible concurrent references to cfs_rqs complete */
5084 +- call_rcu(&tg->rcu, free_sched_group_rcu);
5085 ++ call_rcu(&tg->rcu, sched_free_group_rcu);
5086 + }
5087 +
5088 + void sched_offline_group(struct task_group *tg)
5089 +@@ -8210,31 +8209,26 @@ cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5090 + if (IS_ERR(tg))
5091 + return ERR_PTR(-ENOMEM);
5092 +
5093 ++ sched_online_group(tg, parent);
5094 ++
5095 + return &tg->css;
5096 + }
5097 +
5098 +-static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
5099 ++static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
5100 + {
5101 + struct task_group *tg = css_tg(css);
5102 +- struct task_group *parent = css_tg(css->parent);
5103 +
5104 +- if (parent)
5105 +- sched_online_group(tg, parent);
5106 +- return 0;
5107 ++ sched_offline_group(tg);
5108 + }
5109 +
5110 + static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
5111 + {
5112 + struct task_group *tg = css_tg(css);
5113 +
5114 +- sched_destroy_group(tg);
5115 +-}
5116 +-
5117 +-static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
5118 +-{
5119 +- struct task_group *tg = css_tg(css);
5120 +-
5121 +- sched_offline_group(tg);
5122 ++ /*
5123 ++ * Relies on the RCU grace period between css_released() and this.
5124 ++ */
5125 ++ sched_free_group(tg);
5126 + }
5127 +
5128 + static void cpu_cgroup_fork(struct task_struct *task, void *private)
5129 +@@ -8594,9 +8588,8 @@ static struct cftype cpu_files[] = {
5130 +
5131 + struct cgroup_subsys cpu_cgrp_subsys = {
5132 + .css_alloc = cpu_cgroup_css_alloc,
5133 ++ .css_released = cpu_cgroup_css_released,
5134 + .css_free = cpu_cgroup_css_free,
5135 +- .css_online = cpu_cgroup_css_online,
5136 +- .css_offline = cpu_cgroup_css_offline,
5137 + .fork = cpu_cgroup_fork,
5138 + .can_attach = cpu_cgroup_can_attach,
5139 + .attach = cpu_cgroup_attach,
5140 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
5141 +index 450c21fd0e6e..0ec05948a97b 100644
5142 +--- a/kernel/workqueue.c
5143 ++++ b/kernel/workqueue.c
5144 +@@ -649,6 +649,35 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
5145 + */
5146 + smp_wmb();
5147 + set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
5148 ++ /*
5149 ++ * The following mb guarantees that previous clear of a PENDING bit
5150 ++ * will not be reordered with any speculative LOADS or STORES from
5151 ++ * work->current_func, which is executed afterwards. This possible
5152 ++ * reordering can lead to a missed execution on attempt to qeueue
5153 ++ * the same @work. E.g. consider this case:
5154 ++ *
5155 ++ * CPU#0 CPU#1
5156 ++ * ---------------------------- --------------------------------
5157 ++ *
5158 ++ * 1 STORE event_indicated
5159 ++ * 2 queue_work_on() {
5160 ++ * 3 test_and_set_bit(PENDING)
5161 ++ * 4 } set_..._and_clear_pending() {
5162 ++ * 5 set_work_data() # clear bit
5163 ++ * 6 smp_mb()
5164 ++ * 7 work->current_func() {
5165 ++ * 8 LOAD event_indicated
5166 ++ * }
5167 ++ *
5168 ++ * Without an explicit full barrier speculative LOAD on line 8 can
5169 ++ * be executed before CPU#0 does STORE on line 1. If that happens,
5170 ++ * CPU#0 observes the PENDING bit is still set and new execution of
5171 ++ * a @work is not queued in a hope, that CPU#1 will eventually
5172 ++ * finish the queued @work. Meanwhile CPU#1 does not see
5173 ++ * event_indicated is set, because speculative LOAD was executed
5174 ++ * before actual STORE.
5175 ++ */
5176 ++ smp_mb();
5177 + }
5178 +
5179 + static void clear_work_data(struct work_struct *work)
5180 +diff --git a/lib/assoc_array.c b/lib/assoc_array.c
5181 +index 03dd576e6773..59fd7c0b119c 100644
5182 +--- a/lib/assoc_array.c
5183 ++++ b/lib/assoc_array.c
5184 +@@ -524,7 +524,9 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
5185 + free_slot = i;
5186 + continue;
5187 + }
5188 +- if (ops->compare_object(assoc_array_ptr_to_leaf(ptr), index_key)) {
5189 ++ if (assoc_array_ptr_is_leaf(ptr) &&
5190 ++ ops->compare_object(assoc_array_ptr_to_leaf(ptr),
5191 ++ index_key)) {
5192 + pr_devel("replace in slot %d\n", i);
5193 + edit->leaf_p = &node->slots[i];
5194 + edit->dead_leaf = node->slots[i];
5195 +diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
5196 +index abcecdc2d0f2..0710a62ad2f6 100644
5197 +--- a/lib/lz4/lz4defs.h
5198 ++++ b/lib/lz4/lz4defs.h
5199 +@@ -11,8 +11,7 @@
5200 + /*
5201 + * Detects 64 bits mode
5202 + */
5203 +-#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) \
5204 +- || defined(__ppc64__) || defined(__LP64__))
5205 ++#if defined(CONFIG_64BIT)
5206 + #define LZ4_ARCH64 1
5207 + #else
5208 + #define LZ4_ARCH64 0
5209 +@@ -35,6 +34,10 @@ typedef struct _U64_S { u64 v; } U64_S;
5210 +
5211 + #define PUT4(s, d) (A32(d) = A32(s))
5212 + #define PUT8(s, d) (A64(d) = A64(s))
5213 ++
5214 ++#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
5215 ++ (d = s - A16(p))
5216 ++
5217 + #define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
5218 + do { \
5219 + A16(p) = v; \
5220 +@@ -51,10 +54,13 @@ typedef struct _U64_S { u64 v; } U64_S;
5221 + #define PUT8(s, d) \
5222 + put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
5223 +
5224 +-#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
5225 +- do { \
5226 +- put_unaligned(v, (u16 *)(p)); \
5227 +- p += 2; \
5228 ++#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
5229 ++ (d = s - get_unaligned_le16(p))
5230 ++
5231 ++#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
5232 ++ do { \
5233 ++ put_unaligned_le16(v, (u16 *)(p)); \
5234 ++ p += 2; \
5235 + } while (0)
5236 + #endif
5237 +
5238 +@@ -140,9 +146,6 @@ typedef struct _U64_S { u64 v; } U64_S;
5239 +
5240 + #endif
5241 +
5242 +-#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
5243 +- (d = s - get_unaligned_le16(p))
5244 +-
5245 + #define LZ4_WILDCOPY(s, d, e) \
5246 + do { \
5247 + LZ4_COPYPACKET(s, d); \
5248 +diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
5249 +index 3db76b8c1115..e00ff00e861c 100644
5250 +--- a/lib/mpi/mpicoder.c
5251 ++++ b/lib/mpi/mpicoder.c
5252 +@@ -128,6 +128,23 @@ leave:
5253 + }
5254 + EXPORT_SYMBOL_GPL(mpi_read_from_buffer);
5255 +
5256 ++static int count_lzeros(MPI a)
5257 ++{
5258 ++ mpi_limb_t alimb;
5259 ++ int i, lzeros = 0;
5260 ++
5261 ++ for (i = a->nlimbs - 1; i >= 0; i--) {
5262 ++ alimb = a->d[i];
5263 ++ if (alimb == 0) {
5264 ++ lzeros += sizeof(mpi_limb_t);
5265 ++ } else {
5266 ++ lzeros += count_leading_zeros(alimb) / 8;
5267 ++ break;
5268 ++ }
5269 ++ }
5270 ++ return lzeros;
5271 ++}
5272 ++
5273 + /**
5274 + * mpi_read_buffer() - read MPI to a bufer provided by user (msb first)
5275 + *
5276 +@@ -146,7 +163,7 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
5277 + uint8_t *p;
5278 + mpi_limb_t alimb;
5279 + unsigned int n = mpi_get_size(a);
5280 +- int i, lzeros = 0;
5281 ++ int i, lzeros;
5282 +
5283 + if (buf_len < n || !buf || !nbytes)
5284 + return -EINVAL;
5285 +@@ -154,14 +171,7 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
5286 + if (sign)
5287 + *sign = a->sign;
5288 +
5289 +- p = (void *)&a->d[a->nlimbs] - 1;
5290 +-
5291 +- for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) {
5292 +- if (!*p)
5293 +- lzeros++;
5294 +- else
5295 +- break;
5296 +- }
5297 ++ lzeros = count_lzeros(a);
5298 +
5299 + p = buf;
5300 + *nbytes = n - lzeros;
5301 +@@ -343,7 +353,7 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
5302 + u8 *p, *p2;
5303 + mpi_limb_t alimb, alimb2;
5304 + unsigned int n = mpi_get_size(a);
5305 +- int i, x, y = 0, lzeros = 0, buf_len;
5306 ++ int i, x, y = 0, lzeros, buf_len;
5307 +
5308 + if (!nbytes || *nbytes < n)
5309 + return -EINVAL;
5310 +@@ -351,14 +361,7 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
5311 + if (sign)
5312 + *sign = a->sign;
5313 +
5314 +- p = (void *)&a->d[a->nlimbs] - 1;
5315 +-
5316 +- for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) {
5317 +- if (!*p)
5318 +- lzeros++;
5319 +- else
5320 +- break;
5321 +- }
5322 ++ lzeros = count_lzeros(a);
5323 +
5324 + *nbytes = n - lzeros;
5325 + buf_len = sgl->length;
5326 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
5327 +index 62fe06bb7d04..530e6427f823 100644
5328 +--- a/mm/huge_memory.c
5329 ++++ b/mm/huge_memory.c
5330 +@@ -2134,10 +2134,9 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
5331 + * page fault if needed.
5332 + */
5333 + return 0;
5334 +- if (vma->vm_ops)
5335 ++ if (vma->vm_ops || (vm_flags & VM_NO_THP))
5336 + /* khugepaged not yet working on file or special mappings */
5337 + return 0;
5338 +- VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
5339 + hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
5340 + hend = vma->vm_end & HPAGE_PMD_MASK;
5341 + if (hstart < hend)
5342 +@@ -2498,8 +2497,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
5343 + return false;
5344 + if (is_vma_temporary_stack(vma))
5345 + return false;
5346 +- VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
5347 +- return true;
5348 ++ return !(vma->vm_flags & VM_NO_THP);
5349 + }
5350 +
5351 + static void collapse_huge_page(struct mm_struct *mm,
5352 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
5353 +index fc0bcc41d57f..6ba4dd988e2e 100644
5354 +--- a/mm/memcontrol.c
5355 ++++ b/mm/memcontrol.c
5356 +@@ -196,6 +196,7 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
5357 + /* "mc" and its members are protected by cgroup_mutex */
5358 + static struct move_charge_struct {
5359 + spinlock_t lock; /* for from, to */
5360 ++ struct mm_struct *mm;
5361 + struct mem_cgroup *from;
5362 + struct mem_cgroup *to;
5363 + unsigned long flags;
5364 +@@ -4800,6 +4801,8 @@ static void __mem_cgroup_clear_mc(void)
5365 +
5366 + static void mem_cgroup_clear_mc(void)
5367 + {
5368 ++ struct mm_struct *mm = mc.mm;
5369 ++
5370 + /*
5371 + * we must clear moving_task before waking up waiters at the end of
5372 + * task migration.
5373 +@@ -4809,7 +4812,10 @@ static void mem_cgroup_clear_mc(void)
5374 + spin_lock(&mc.lock);
5375 + mc.from = NULL;
5376 + mc.to = NULL;
5377 ++ mc.mm = NULL;
5378 + spin_unlock(&mc.lock);
5379 ++
5380 ++ mmput(mm);
5381 + }
5382 +
5383 + static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5384 +@@ -4866,6 +4872,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5385 + VM_BUG_ON(mc.moved_swap);
5386 +
5387 + spin_lock(&mc.lock);
5388 ++ mc.mm = mm;
5389 + mc.from = from;
5390 + mc.to = memcg;
5391 + mc.flags = move_flags;
5392 +@@ -4875,8 +4882,9 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5393 + ret = mem_cgroup_precharge_mc(mm);
5394 + if (ret)
5395 + mem_cgroup_clear_mc();
5396 ++ } else {
5397 ++ mmput(mm);
5398 + }
5399 +- mmput(mm);
5400 + return ret;
5401 + }
5402 +
5403 +@@ -4985,11 +4993,11 @@ put: /* get_mctgt_type() gets the page */
5404 + return ret;
5405 + }
5406 +
5407 +-static void mem_cgroup_move_charge(struct mm_struct *mm)
5408 ++static void mem_cgroup_move_charge(void)
5409 + {
5410 + struct mm_walk mem_cgroup_move_charge_walk = {
5411 + .pmd_entry = mem_cgroup_move_charge_pte_range,
5412 +- .mm = mm,
5413 ++ .mm = mc.mm,
5414 + };
5415 +
5416 + lru_add_drain_all();
5417 +@@ -5001,7 +5009,7 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
5418 + atomic_inc(&mc.from->moving_account);
5419 + synchronize_rcu();
5420 + retry:
5421 +- if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
5422 ++ if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
5423 + /*
5424 + * Someone who are holding the mmap_sem might be waiting in
5425 + * waitq. So we cancel all extra charges, wake up all waiters,
5426 +@@ -5018,23 +5026,16 @@ retry:
5427 + * additional charge, the page walk just aborts.
5428 + */
5429 + walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
5430 +- up_read(&mm->mmap_sem);
5431 ++ up_read(&mc.mm->mmap_sem);
5432 + atomic_dec(&mc.from->moving_account);
5433 + }
5434 +
5435 +-static void mem_cgroup_move_task(struct cgroup_taskset *tset)
5436 ++static void mem_cgroup_move_task(void)
5437 + {
5438 +- struct cgroup_subsys_state *css;
5439 +- struct task_struct *p = cgroup_taskset_first(tset, &css);
5440 +- struct mm_struct *mm = get_task_mm(p);
5441 +-
5442 +- if (mm) {
5443 +- if (mc.to)
5444 +- mem_cgroup_move_charge(mm);
5445 +- mmput(mm);
5446 +- }
5447 +- if (mc.to)
5448 ++ if (mc.to) {
5449 ++ mem_cgroup_move_charge();
5450 + mem_cgroup_clear_mc();
5451 ++ }
5452 + }
5453 + #else /* !CONFIG_MMU */
5454 + static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5455 +@@ -5044,7 +5045,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5456 + static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5457 + {
5458 + }
5459 +-static void mem_cgroup_move_task(struct cgroup_taskset *tset)
5460 ++static void mem_cgroup_move_task(void)
5461 + {
5462 + }
5463 + #endif
5464 +@@ -5258,7 +5259,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
5465 + .css_reset = mem_cgroup_css_reset,
5466 + .can_attach = mem_cgroup_can_attach,
5467 + .cancel_attach = mem_cgroup_cancel_attach,
5468 +- .attach = mem_cgroup_move_task,
5469 ++ .post_attach = mem_cgroup_move_task,
5470 + .bind = mem_cgroup_bind,
5471 + .dfl_cftypes = memory_files,
5472 + .legacy_cftypes = mem_cgroup_legacy_files,
5473 +diff --git a/mm/memory.c b/mm/memory.c
5474 +index b80bf4746b67..76dcee317714 100644
5475 +--- a/mm/memory.c
5476 ++++ b/mm/memory.c
5477 +@@ -797,6 +797,46 @@ out:
5478 + return pfn_to_page(pfn);
5479 + }
5480 +
5481 ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5482 ++struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
5483 ++ pmd_t pmd)
5484 ++{
5485 ++ unsigned long pfn = pmd_pfn(pmd);
5486 ++
5487 ++ /*
5488 ++ * There is no pmd_special() but there may be special pmds, e.g.
5489 ++ * in a direct-access (dax) mapping, so let's just replicate the
5490 ++ * !HAVE_PTE_SPECIAL case from vm_normal_page() here.
5491 ++ */
5492 ++ if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
5493 ++ if (vma->vm_flags & VM_MIXEDMAP) {
5494 ++ if (!pfn_valid(pfn))
5495 ++ return NULL;
5496 ++ goto out;
5497 ++ } else {
5498 ++ unsigned long off;
5499 ++ off = (addr - vma->vm_start) >> PAGE_SHIFT;
5500 ++ if (pfn == vma->vm_pgoff + off)
5501 ++ return NULL;
5502 ++ if (!is_cow_mapping(vma->vm_flags))
5503 ++ return NULL;
5504 ++ }
5505 ++ }
5506 ++
5507 ++ if (is_zero_pfn(pfn))
5508 ++ return NULL;
5509 ++ if (unlikely(pfn > highest_memmap_pfn))
5510 ++ return NULL;
5511 ++
5512 ++ /*
5513 ++ * NOTE! We still have PageReserved() pages in the page tables.
5514 ++ * eg. VDSO mappings can cause them to exist.
5515 ++ */
5516 ++out:
5517 ++ return pfn_to_page(pfn);
5518 ++}
5519 ++#endif
5520 ++
5521 + /*
5522 + * copy one vm_area from one task to the other. Assumes the page tables
5523 + * already present in the new task to be cleared in the whole range
5524 +diff --git a/mm/migrate.c b/mm/migrate.c
5525 +index 6d17e0ab42d4..bbeb0b71fcf4 100644
5526 +--- a/mm/migrate.c
5527 ++++ b/mm/migrate.c
5528 +@@ -963,7 +963,13 @@ out:
5529 + dec_zone_page_state(page, NR_ISOLATED_ANON +
5530 + page_is_file_cache(page));
5531 + /* Soft-offlined page shouldn't go through lru cache list */
5532 +- if (reason == MR_MEMORY_FAILURE) {
5533 ++ if (reason == MR_MEMORY_FAILURE && rc == MIGRATEPAGE_SUCCESS) {
5534 ++ /*
5535 ++ * With this release, we free successfully migrated
5536 ++ * page and set PG_HWPoison on just freed page
5537 ++ * intentionally. Although it's rather weird, it's how
5538 ++ * HWPoison flag works at the moment.
5539 ++ */
5540 + put_page(page);
5541 + if (!test_set_page_hwpoison(page))
5542 + num_poisoned_pages_inc();
5543 +diff --git a/mm/slub.c b/mm/slub.c
5544 +index 46997517406e..65d5f92d51d2 100644
5545 +--- a/mm/slub.c
5546 ++++ b/mm/slub.c
5547 +@@ -2819,6 +2819,7 @@ struct detached_freelist {
5548 + void *tail;
5549 + void *freelist;
5550 + int cnt;
5551 ++ struct kmem_cache *s;
5552 + };
5553 +
5554 + /*
5555 +@@ -2833,8 +2834,9 @@ struct detached_freelist {
5556 + * synchronization primitive. Look ahead in the array is limited due
5557 + * to performance reasons.
5558 + */
5559 +-static int build_detached_freelist(struct kmem_cache *s, size_t size,
5560 +- void **p, struct detached_freelist *df)
5561 ++static inline
5562 ++int build_detached_freelist(struct kmem_cache *s, size_t size,
5563 ++ void **p, struct detached_freelist *df)
5564 + {
5565 + size_t first_skipped_index = 0;
5566 + int lookahead = 3;
5567 +@@ -2850,8 +2852,11 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
5568 + if (!object)
5569 + return 0;
5570 +
5571 ++ /* Support for memcg, compiler can optimize this out */
5572 ++ df->s = cache_from_obj(s, object);
5573 ++
5574 + /* Start new detached freelist */
5575 +- set_freepointer(s, object, NULL);
5576 ++ set_freepointer(df->s, object, NULL);
5577 + df->page = virt_to_head_page(object);
5578 + df->tail = object;
5579 + df->freelist = object;
5580 +@@ -2866,7 +2871,7 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
5581 + /* df->page is always set at this point */
5582 + if (df->page == virt_to_head_page(object)) {
5583 + /* Opportunity build freelist */
5584 +- set_freepointer(s, object, df->freelist);
5585 ++ set_freepointer(df->s, object, df->freelist);
5586 + df->freelist = object;
5587 + df->cnt++;
5588 + p[size] = NULL; /* mark object processed */
5589 +@@ -2885,25 +2890,20 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
5590 + return first_skipped_index;
5591 + }
5592 +
5593 +-
5594 + /* Note that interrupts must be enabled when calling this function. */
5595 +-void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
5596 ++void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
5597 + {
5598 + if (WARN_ON(!size))
5599 + return;
5600 +
5601 + do {
5602 + struct detached_freelist df;
5603 +- struct kmem_cache *s;
5604 +-
5605 +- /* Support for memcg */
5606 +- s = cache_from_obj(orig_s, p[size - 1]);
5607 +
5608 + size = build_detached_freelist(s, size, p, &df);
5609 + if (unlikely(!df.page))
5610 + continue;
5611 +
5612 +- slab_free(s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_);
5613 ++ slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
5614 + } while (likely(size));
5615 + }
5616 + EXPORT_SYMBOL(kmem_cache_free_bulk);
5617 +diff --git a/mm/vmscan.c b/mm/vmscan.c
5618 +index 2aec4241b42a..0c114e2b01d3 100644
5619 +--- a/mm/vmscan.c
5620 ++++ b/mm/vmscan.c
5621 +@@ -2534,7 +2534,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
5622 + sc->gfp_mask |= __GFP_HIGHMEM;
5623 +
5624 + for_each_zone_zonelist_nodemask(zone, z, zonelist,
5625 +- requested_highidx, sc->nodemask) {
5626 ++ gfp_zone(sc->gfp_mask), sc->nodemask) {
5627 + enum zone_type classzone_idx;
5628 +
5629 + if (!populated_zone(zone))
5630 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
5631 +index 59651af8cc27..992b35fb8615 100644
5632 +--- a/net/netlink/af_netlink.c
5633 ++++ b/net/netlink/af_netlink.c
5634 +@@ -1305,7 +1305,7 @@ static int netlink_release(struct socket *sock)
5635 +
5636 + skb_queue_purge(&sk->sk_write_queue);
5637 +
5638 +- if (nlk->portid) {
5639 ++ if (nlk->portid && nlk->bound) {
5640 + struct netlink_notify n = {
5641 + .net = sock_net(sk),
5642 + .protocol = sk->sk_protocol,
5643 +diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
5644 +index 21e20353178e..63fb5ee212cf 100644
5645 +--- a/net/sunrpc/cache.c
5646 ++++ b/net/sunrpc/cache.c
5647 +@@ -1182,14 +1182,14 @@ int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
5648 + }
5649 +
5650 + crq->q.reader = 0;
5651 +- crq->item = cache_get(h);
5652 + crq->buf = buf;
5653 + crq->len = 0;
5654 + crq->readers = 0;
5655 + spin_lock(&queue_lock);
5656 +- if (test_bit(CACHE_PENDING, &h->flags))
5657 ++ if (test_bit(CACHE_PENDING, &h->flags)) {
5658 ++ crq->item = cache_get(h);
5659 + list_add_tail(&crq->q.list, &detail->queue);
5660 +- else
5661 ++ } else
5662 + /* Lost a race, no longer PENDING, so don't enqueue */
5663 + ret = -EAGAIN;
5664 + spin_unlock(&queue_lock);
5665 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
5666 +index 75b0d23ee882..5d89f13a98db 100644
5667 +--- a/net/wireless/nl80211.c
5668 ++++ b/net/wireless/nl80211.c
5669 +@@ -13161,7 +13161,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
5670 + struct wireless_dev *wdev;
5671 + struct cfg80211_beacon_registration *reg, *tmp;
5672 +
5673 +- if (state != NETLINK_URELEASE)
5674 ++ if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC)
5675 + return NOTIFY_DONE;
5676 +
5677 + rcu_read_lock();
5678 +diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
5679 +index 0b7dc2fd7bac..dd243d2abd87 100644
5680 +--- a/scripts/kconfig/confdata.c
5681 ++++ b/scripts/kconfig/confdata.c
5682 +@@ -267,10 +267,8 @@ int conf_read_simple(const char *name, int def)
5683 + if (in)
5684 + goto load;
5685 + sym_add_change_count(1);
5686 +- if (!sym_defconfig_list) {
5687 +- sym_calc_value(modules_sym);
5688 ++ if (!sym_defconfig_list)
5689 + return 1;
5690 +- }
5691 +
5692 + for_all_defaults(sym_defconfig_list, prop) {
5693 + if (expr_calc_value(prop->visible.expr) == no ||
5694 +@@ -403,7 +401,6 @@ setsym:
5695 + }
5696 + free(line);
5697 + fclose(in);
5698 +- sym_calc_value(modules_sym);
5699 + return 0;
5700 + }
5701 +
5702 +@@ -414,8 +411,12 @@ int conf_read(const char *name)
5703 +
5704 + sym_set_change_count(0);
5705 +
5706 +- if (conf_read_simple(name, S_DEF_USER))
5707 ++ if (conf_read_simple(name, S_DEF_USER)) {
5708 ++ sym_calc_value(modules_sym);
5709 + return 1;
5710 ++ }
5711 ++
5712 ++ sym_calc_value(modules_sym);
5713 +
5714 + for_all_symbols(i, sym) {
5715 + sym_calc_value(sym);
5716 +@@ -846,6 +847,7 @@ static int conf_split_config(void)
5717 +
5718 + name = conf_get_autoconfig_name();
5719 + conf_read_simple(name, S_DEF_AUTO);
5720 ++ sym_calc_value(modules_sym);
5721 +
5722 + if (chdir("include/config"))
5723 + return 1;
5724 +diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
5725 +index 5c4fa8eba1d0..367dbf0d285e 100644
5726 +--- a/sound/pci/hda/hda_generic.c
5727 ++++ b/sound/pci/hda/hda_generic.c
5728 +@@ -843,7 +843,7 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
5729 + bool allow_powerdown)
5730 + {
5731 + hda_nid_t nid, changed = 0;
5732 +- int i, state;
5733 ++ int i, state, power;
5734 +
5735 + for (i = 0; i < path->depth; i++) {
5736 + nid = path->path[i];
5737 +@@ -855,7 +855,9 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
5738 + state = AC_PWRST_D0;
5739 + else
5740 + state = AC_PWRST_D3;
5741 +- if (!snd_hda_check_power_state(codec, nid, state)) {
5742 ++ power = snd_hda_codec_read(codec, nid, 0,
5743 ++ AC_VERB_GET_POWER_STATE, 0);
5744 ++ if (power != (state | (state << 4))) {
5745 + snd_hda_codec_write(codec, nid, 0,
5746 + AC_VERB_SET_POWER_STATE, state);
5747 + changed = nid;
5748 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
5749 +index 2ff692dd2c5f..411630e9c034 100644
5750 +--- a/sound/pci/hda/hda_intel.c
5751 ++++ b/sound/pci/hda/hda_intel.c
5752 +@@ -2207,6 +2207,9 @@ static const struct pci_device_id azx_ids[] = {
5753 + /* Broxton-P(Apollolake) */
5754 + { PCI_DEVICE(0x8086, 0x5a98),
5755 + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
5756 ++ /* Broxton-T */
5757 ++ { PCI_DEVICE(0x8086, 0x1a98),
5758 ++ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
5759 + /* Haswell */
5760 + { PCI_DEVICE(0x8086, 0x0a0c),
5761 + .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
5762 +diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
5763 +index a47e8ae0eb30..80bbadc83721 100644
5764 +--- a/sound/pci/hda/patch_cirrus.c
5765 ++++ b/sound/pci/hda/patch_cirrus.c
5766 +@@ -361,6 +361,7 @@ static int cs_parse_auto_config(struct hda_codec *codec)
5767 + {
5768 + struct cs_spec *spec = codec->spec;
5769 + int err;
5770 ++ int i;
5771 +
5772 + err = snd_hda_parse_pin_defcfg(codec, &spec->gen.autocfg, NULL, 0);
5773 + if (err < 0)
5774 +@@ -370,6 +371,19 @@ static int cs_parse_auto_config(struct hda_codec *codec)
5775 + if (err < 0)
5776 + return err;
5777 +
5778 ++ /* keep the ADCs powered up when it's dynamically switchable */
5779 ++ if (spec->gen.dyn_adc_switch) {
5780 ++ unsigned int done = 0;
5781 ++ for (i = 0; i < spec->gen.input_mux.num_items; i++) {
5782 ++ int idx = spec->gen.dyn_adc_idx[i];
5783 ++ if (done & (1 << idx))
5784 ++ continue;
5785 ++ snd_hda_gen_fix_pin_power(codec,
5786 ++ spec->gen.adc_nids[idx]);
5787 ++ done |= 1 << idx;
5788 ++ }
5789 ++ }
5790 ++
5791 + return 0;
5792 + }
5793 +
5794 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5795 +index 1402ba954b3d..ac4490a96863 100644
5796 +--- a/sound/pci/hda/patch_realtek.c
5797 ++++ b/sound/pci/hda/patch_realtek.c
5798 +@@ -5449,6 +5449,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5799 + SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5800 + SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5801 + SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
5802 ++ SND_PCI_QUIRK(0x1028, 0x0669, "Dell Optiplex 9020m", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
5803 + SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK),
5804 + SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
5805 + SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5806 +@@ -5583,6 +5584,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5807 + SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
5808 + SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
5809 + SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
5810 ++ SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
5811 + SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
5812 + SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5813 + SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
5814 +diff --git a/sound/pci/pcxhr/pcxhr_core.c b/sound/pci/pcxhr/pcxhr_core.c
5815 +index c5194f5b150a..d7e71f309299 100644
5816 +--- a/sound/pci/pcxhr/pcxhr_core.c
5817 ++++ b/sound/pci/pcxhr/pcxhr_core.c
5818 +@@ -1341,5 +1341,6 @@ irqreturn_t pcxhr_threaded_irq(int irq, void *dev_id)
5819 + }
5820 +
5821 + pcxhr_msg_thread(mgr);
5822 ++ mutex_unlock(&mgr->lock);
5823 + return IRQ_HANDLED;
5824 + }
5825 +diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
5826 +index f2beb1aa5763..b1c8bb39cdf1 100644
5827 +--- a/sound/soc/codecs/rt5640.c
5828 ++++ b/sound/soc/codecs/rt5640.c
5829 +@@ -359,7 +359,7 @@ static const DECLARE_TLV_DB_RANGE(bst_tlv,
5830 +
5831 + /* Interface data select */
5832 + static const char * const rt5640_data_select[] = {
5833 +- "Normal", "left copy to right", "right copy to left", "Swap"};
5834 ++ "Normal", "Swap", "left copy to right", "right copy to left"};
5835 +
5836 + static SOC_ENUM_SINGLE_DECL(rt5640_if1_dac_enum, RT5640_DIG_INF_DATA,
5837 + RT5640_IF1_DAC_SEL_SFT, rt5640_data_select);
5838 +diff --git a/sound/soc/codecs/rt5640.h b/sound/soc/codecs/rt5640.h
5839 +index 3deb8babeabb..243f42633989 100644
5840 +--- a/sound/soc/codecs/rt5640.h
5841 ++++ b/sound/soc/codecs/rt5640.h
5842 +@@ -442,39 +442,39 @@
5843 + #define RT5640_IF1_DAC_SEL_MASK (0x3 << 14)
5844 + #define RT5640_IF1_DAC_SEL_SFT 14
5845 + #define RT5640_IF1_DAC_SEL_NOR (0x0 << 14)
5846 +-#define RT5640_IF1_DAC_SEL_L2R (0x1 << 14)
5847 +-#define RT5640_IF1_DAC_SEL_R2L (0x2 << 14)
5848 +-#define RT5640_IF1_DAC_SEL_SWAP (0x3 << 14)
5849 ++#define RT5640_IF1_DAC_SEL_SWAP (0x1 << 14)
5850 ++#define RT5640_IF1_DAC_SEL_L2R (0x2 << 14)
5851 ++#define RT5640_IF1_DAC_SEL_R2L (0x3 << 14)
5852 + #define RT5640_IF1_ADC_SEL_MASK (0x3 << 12)
5853 + #define RT5640_IF1_ADC_SEL_SFT 12
5854 + #define RT5640_IF1_ADC_SEL_NOR (0x0 << 12)
5855 +-#define RT5640_IF1_ADC_SEL_L2R (0x1 << 12)
5856 +-#define RT5640_IF1_ADC_SEL_R2L (0x2 << 12)
5857 +-#define RT5640_IF1_ADC_SEL_SWAP (0x3 << 12)
5858 ++#define RT5640_IF1_ADC_SEL_SWAP (0x1 << 12)
5859 ++#define RT5640_IF1_ADC_SEL_L2R (0x2 << 12)
5860 ++#define RT5640_IF1_ADC_SEL_R2L (0x3 << 12)
5861 + #define RT5640_IF2_DAC_SEL_MASK (0x3 << 10)
5862 + #define RT5640_IF2_DAC_SEL_SFT 10
5863 + #define RT5640_IF2_DAC_SEL_NOR (0x0 << 10)
5864 +-#define RT5640_IF2_DAC_SEL_L2R (0x1 << 10)
5865 +-#define RT5640_IF2_DAC_SEL_R2L (0x2 << 10)
5866 +-#define RT5640_IF2_DAC_SEL_SWAP (0x3 << 10)
5867 ++#define RT5640_IF2_DAC_SEL_SWAP (0x1 << 10)
5868 ++#define RT5640_IF2_DAC_SEL_L2R (0x2 << 10)
5869 ++#define RT5640_IF2_DAC_SEL_R2L (0x3 << 10)
5870 + #define RT5640_IF2_ADC_SEL_MASK (0x3 << 8)
5871 + #define RT5640_IF2_ADC_SEL_SFT 8
5872 + #define RT5640_IF2_ADC_SEL_NOR (0x0 << 8)
5873 +-#define RT5640_IF2_ADC_SEL_L2R (0x1 << 8)
5874 +-#define RT5640_IF2_ADC_SEL_R2L (0x2 << 8)
5875 +-#define RT5640_IF2_ADC_SEL_SWAP (0x3 << 8)
5876 ++#define RT5640_IF2_ADC_SEL_SWAP (0x1 << 8)
5877 ++#define RT5640_IF2_ADC_SEL_L2R (0x2 << 8)
5878 ++#define RT5640_IF2_ADC_SEL_R2L (0x3 << 8)
5879 + #define RT5640_IF3_DAC_SEL_MASK (0x3 << 6)
5880 + #define RT5640_IF3_DAC_SEL_SFT 6
5881 + #define RT5640_IF3_DAC_SEL_NOR (0x0 << 6)
5882 +-#define RT5640_IF3_DAC_SEL_L2R (0x1 << 6)
5883 +-#define RT5640_IF3_DAC_SEL_R2L (0x2 << 6)
5884 +-#define RT5640_IF3_DAC_SEL_SWAP (0x3 << 6)
5885 ++#define RT5640_IF3_DAC_SEL_SWAP (0x1 << 6)
5886 ++#define RT5640_IF3_DAC_SEL_L2R (0x2 << 6)
5887 ++#define RT5640_IF3_DAC_SEL_R2L (0x3 << 6)
5888 + #define RT5640_IF3_ADC_SEL_MASK (0x3 << 4)
5889 + #define RT5640_IF3_ADC_SEL_SFT 4
5890 + #define RT5640_IF3_ADC_SEL_NOR (0x0 << 4)
5891 +-#define RT5640_IF3_ADC_SEL_L2R (0x1 << 4)
5892 +-#define RT5640_IF3_ADC_SEL_R2L (0x2 << 4)
5893 +-#define RT5640_IF3_ADC_SEL_SWAP (0x3 << 4)
5894 ++#define RT5640_IF3_ADC_SEL_SWAP (0x1 << 4)
5895 ++#define RT5640_IF3_ADC_SEL_L2R (0x2 << 4)
5896 ++#define RT5640_IF3_ADC_SEL_R2L (0x3 << 4)
5897 +
5898 + /* REC Left Mixer Control 1 (0x3b) */
5899 + #define RT5640_G_HP_L_RM_L_MASK (0x7 << 13)
5900 +diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
5901 +index e619d5651b09..080c78e88e10 100644
5902 +--- a/sound/soc/codecs/ssm4567.c
5903 ++++ b/sound/soc/codecs/ssm4567.c
5904 +@@ -352,6 +352,11 @@ static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable)
5905 + regcache_cache_only(ssm4567->regmap, !enable);
5906 +
5907 + if (enable) {
5908 ++ ret = regmap_write(ssm4567->regmap, SSM4567_REG_SOFT_RESET,
5909 ++ 0x00);
5910 ++ if (ret)
5911 ++ return ret;
5912 ++
5913 + ret = regmap_update_bits(ssm4567->regmap,
5914 + SSM4567_REG_POWER_CTRL,
5915 + SSM4567_POWER_SPWDN, 0x00);
5916 +diff --git a/sound/soc/samsung/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c
5917 +index df65c5b494b1..b6ab3fc5789e 100644
5918 +--- a/sound/soc/samsung/s3c-i2s-v2.c
5919 ++++ b/sound/soc/samsung/s3c-i2s-v2.c
5920 +@@ -709,7 +709,7 @@ static int s3c2412_i2s_resume(struct snd_soc_dai *dai)
5921 + #endif
5922 +
5923 + int s3c_i2sv2_register_component(struct device *dev, int id,
5924 +- struct snd_soc_component_driver *cmp_drv,
5925 ++ const struct snd_soc_component_driver *cmp_drv,
5926 + struct snd_soc_dai_driver *dai_drv)
5927 + {
5928 + struct snd_soc_dai_ops *ops = (struct snd_soc_dai_ops *)dai_drv->ops;
5929 +diff --git a/sound/soc/samsung/s3c-i2s-v2.h b/sound/soc/samsung/s3c-i2s-v2.h
5930 +index 90abab364b49..d0684145ed1f 100644
5931 +--- a/sound/soc/samsung/s3c-i2s-v2.h
5932 ++++ b/sound/soc/samsung/s3c-i2s-v2.h
5933 +@@ -101,7 +101,7 @@ extern int s3c_i2sv2_probe(struct snd_soc_dai *dai,
5934 + * soc core.
5935 + */
5936 + extern int s3c_i2sv2_register_component(struct device *dev, int id,
5937 +- struct snd_soc_component_driver *cmp_drv,
5938 ++ const struct snd_soc_component_driver *cmp_drv,
5939 + struct snd_soc_dai_driver *dai_drv);
5940 +
5941 + #endif /* __SND_SOC_S3C24XX_S3C_I2SV2_I2S_H */
5942 +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
5943 +index 416514fe9e63..afb70a5d4fd3 100644
5944 +--- a/sound/soc/soc-dapm.c
5945 ++++ b/sound/soc/soc-dapm.c
5946 +@@ -2188,6 +2188,13 @@ static ssize_t dapm_widget_show_component(struct snd_soc_component *cmpnt,
5947 + int count = 0;
5948 + char *state = "not set";
5949 +
5950 ++ /* card won't be set for the dummy component, as a spot fix
5951 ++ * we're checking for that case specifically here but in future
5952 ++ * we will ensure that the dummy component looks like others.
5953 ++ */
5954 ++ if (!cmpnt->card)
5955 ++ return 0;
5956 ++
5957 + list_for_each_entry(w, &cmpnt->card->widgets, list) {
5958 + if (w->dapm != dapm)
5959 + continue;
5960 +diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
5961 +index 4e074a660826..90c3558c2c12 100644
5962 +--- a/tools/perf/Documentation/perf-stat.txt
5963 ++++ b/tools/perf/Documentation/perf-stat.txt
5964 +@@ -62,6 +62,14 @@ OPTIONS
5965 + --scale::
5966 + scale/normalize counter values
5967 +
5968 ++-d::
5969 ++--detailed::
5970 ++ print more detailed statistics, can be specified up to 3 times
5971 ++
5972 ++ -d: detailed events, L1 and LLC data cache
5973 ++ -d -d: more detailed events, dTLB and iTLB events
5974 ++ -d -d -d: very detailed events, adding prefetch events
5975 ++
5976 + -r::
5977 + --repeat=<n>::
5978 + repeat command and print average + stddev (max: 100). 0 means forever.
5979 +diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
5980 +index 81def6c3f24b..3900386a3629 100644
5981 +--- a/tools/perf/ui/browsers/hists.c
5982 ++++ b/tools/perf/ui/browsers/hists.c
5983 +@@ -2059,10 +2059,12 @@ skip_annotation:
5984 + *
5985 + * See hist_browser__show_entry.
5986 + */
5987 +- nr_options += add_script_opt(browser,
5988 +- &actions[nr_options],
5989 +- &options[nr_options],
5990 +- NULL, browser->selection->sym);
5991 ++ if (sort__has_sym && browser->selection->sym) {
5992 ++ nr_options += add_script_opt(browser,
5993 ++ &actions[nr_options],
5994 ++ &options[nr_options],
5995 ++ NULL, browser->selection->sym);
5996 ++ }
5997 + }
5998 + nr_options += add_script_opt(browser, &actions[nr_options],
5999 + &options[nr_options], NULL, NULL);
6000 +diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
6001 +index 8b10621b415c..956187bf1a85 100644
6002 +--- a/tools/perf/util/event.c
6003 ++++ b/tools/perf/util/event.c
6004 +@@ -274,7 +274,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
6005 + strcpy(execname, "");
6006 +
6007 + /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
6008 +- n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n",
6009 ++ n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
6010 + &event->mmap2.start, &event->mmap2.len, prot,
6011 + &event->mmap2.pgoff, &event->mmap2.maj,
6012 + &event->mmap2.min,
6013 +diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
6014 +index d1392194a9a9..b4b96120fc3b 100644
6015 +--- a/tools/perf/util/evlist.c
6016 ++++ b/tools/perf/util/evlist.c
6017 +@@ -1211,12 +1211,12 @@ void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
6018 + */
6019 + if (cpus != evlist->cpus) {
6020 + cpu_map__put(evlist->cpus);
6021 +- evlist->cpus = cpus;
6022 ++ evlist->cpus = cpu_map__get(cpus);
6023 + }
6024 +
6025 + if (threads != evlist->threads) {
6026 + thread_map__put(evlist->threads);
6027 +- evlist->threads = threads;
6028 ++ evlist->threads = thread_map__get(threads);
6029 + }
6030 +
6031 + perf_evlist__propagate_maps(evlist);
6032 +diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
6033 +index 97f963a3dcb9..9227c2f076c3 100644
6034 +--- a/tools/perf/util/intel-pt.c
6035 ++++ b/tools/perf/util/intel-pt.c
6036 +@@ -1127,7 +1127,7 @@ static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
6037 + pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n",
6038 + ret);
6039 +
6040 +- if (pt->synth_opts.callchain)
6041 ++ if (pt->synth_opts.last_branch)
6042 + intel_pt_reset_last_branch_rb(ptq);
6043 +
6044 + return ret;
6045 +diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
6046 +index ea6064696fe4..a7b9022b5c8f 100644
6047 +--- a/virt/kvm/arm/arch_timer.c
6048 ++++ b/virt/kvm/arm/arch_timer.c
6049 +@@ -86,6 +86,8 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
6050 + vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
6051 + vcpu->arch.timer_cpu.armed = false;
6052 +
6053 ++ WARN_ON(!kvm_timer_should_fire(vcpu));
6054 ++
6055 + /*
6056 + * If the vcpu is blocked we want to wake it up so that it will see
6057 + * the timer has expired when entering the guest.
6058 +@@ -93,10 +95,46 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
6059 + kvm_vcpu_kick(vcpu);
6060 + }
6061 +
6062 ++static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu)
6063 ++{
6064 ++ cycle_t cval, now;
6065 ++
6066 ++ cval = vcpu->arch.timer_cpu.cntv_cval;
6067 ++ now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
6068 ++
6069 ++ if (now < cval) {
6070 ++ u64 ns;
6071 ++
6072 ++ ns = cyclecounter_cyc2ns(timecounter->cc,
6073 ++ cval - now,
6074 ++ timecounter->mask,
6075 ++ &timecounter->frac);
6076 ++ return ns;
6077 ++ }
6078 ++
6079 ++ return 0;
6080 ++}
6081 ++
6082 + static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
6083 + {
6084 + struct arch_timer_cpu *timer;
6085 ++ struct kvm_vcpu *vcpu;
6086 ++ u64 ns;
6087 ++
6088 + timer = container_of(hrt, struct arch_timer_cpu, timer);
6089 ++ vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
6090 ++
6091 ++ /*
6092 ++ * Check that the timer has really expired from the guest's
6093 ++ * PoV (NTP on the host may have forced it to expire
6094 ++ * early). If we should have slept longer, restart it.
6095 ++ */
6096 ++ ns = kvm_timer_compute_delta(vcpu);
6097 ++ if (unlikely(ns)) {
6098 ++ hrtimer_forward_now(hrt, ns_to_ktime(ns));
6099 ++ return HRTIMER_RESTART;
6100 ++ }
6101 ++
6102 + queue_work(wqueue, &timer->expired);
6103 + return HRTIMER_NORESTART;
6104 + }
6105 +@@ -170,8 +208,6 @@ static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
6106 + void kvm_timer_schedule(struct kvm_vcpu *vcpu)
6107 + {
6108 + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
6109 +- u64 ns;
6110 +- cycle_t cval, now;
6111 +
6112 + BUG_ON(timer_is_armed(timer));
6113 +
6114 +@@ -191,14 +227,7 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu)
6115 + return;
6116 +
6117 + /* The timer has not yet expired, schedule a background timer */
6118 +- cval = timer->cntv_cval;
6119 +- now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
6120 +-
6121 +- ns = cyclecounter_cyc2ns(timecounter->cc,
6122 +- cval - now,
6123 +- timecounter->mask,
6124 +- &timecounter->frac);
6125 +- timer_arm(timer, ns);
6126 ++ timer_arm(timer, kvm_timer_compute_delta(vcpu));
6127 + }
6128 +
6129 + void kvm_timer_unschedule(struct kvm_vcpu *vcpu)