Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.3 commit in: /
Date: Tue, 29 Oct 2019 12:06:14
Message-Id: 1572350750.b5c397963982ec8b83950f7e9a2ed6c989fa8678.mpagano@gentoo
1 commit: b5c397963982ec8b83950f7e9a2ed6c989fa8678
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Oct 29 12:05:50 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Oct 29 12:05:50 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b5c39796
7
8 Linux patch 5.3.8
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1007_linux-5.3.8.patch | 7745 ++++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 7749 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index e15ba25..bc9694a 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -71,6 +71,10 @@ Patch: 1006_linux-5.3.7.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.3.7
23
24 +Patch: 1007_linux-5.3.8.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.3.8
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1007_linux-5.3.8.patch b/1007_linux-5.3.8.patch
33 new file mode 100644
34 index 0000000..8323ef7
35 --- /dev/null
36 +++ b/1007_linux-5.3.8.patch
37 @@ -0,0 +1,7745 @@
38 +diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst
39 +index 3e57d09246e6..6e52d334bc55 100644
40 +--- a/Documentation/arm64/silicon-errata.rst
41 ++++ b/Documentation/arm64/silicon-errata.rst
42 +@@ -107,6 +107,8 @@ stable kernels.
43 + +----------------+-----------------+-----------------+-----------------------------+
44 + | Cavium | ThunderX2 SMMUv3| #126 | N/A |
45 + +----------------+-----------------+-----------------+-----------------------------+
46 ++| Cavium | ThunderX2 Core | #219 | CAVIUM_TX2_ERRATUM_219 |
47 +++----------------+-----------------+-----------------+-----------------------------+
48 + +----------------+-----------------+-----------------+-----------------------------+
49 + | Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 |
50 + +----------------+-----------------+-----------------+-----------------------------+
51 +diff --git a/Makefile b/Makefile
52 +index 7a3e659c79ae..445f9488d8ba 100644
53 +--- a/Makefile
54 ++++ b/Makefile
55 +@@ -1,7 +1,7 @@
56 + # SPDX-License-Identifier: GPL-2.0
57 + VERSION = 5
58 + PATCHLEVEL = 3
59 +-SUBLEVEL = 7
60 ++SUBLEVEL = 8
61 + EXTRAVERSION =
62 + NAME = Bobtail Squid
63 +
64 +diff --git a/arch/arm/boot/dts/am335x-icev2.dts b/arch/arm/boot/dts/am335x-icev2.dts
65 +index 18f70b35da4c..204bccfcc110 100644
66 +--- a/arch/arm/boot/dts/am335x-icev2.dts
67 ++++ b/arch/arm/boot/dts/am335x-icev2.dts
68 +@@ -432,7 +432,7 @@
69 + pinctrl-0 = <&mmc0_pins_default>;
70 + };
71 +
72 +-&gpio0 {
73 ++&gpio0_target {
74 + /* Do not idle the GPIO used for holding the VTT regulator */
75 + ti,no-reset-on-init;
76 + ti,no-idle-on-init;
77 +diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi
78 +index 46849d6ecb3e..3287cf695b5a 100644
79 +--- a/arch/arm/boot/dts/am33xx-l4.dtsi
80 ++++ b/arch/arm/boot/dts/am33xx-l4.dtsi
81 +@@ -127,7 +127,7 @@
82 + ranges = <0x0 0x5000 0x1000>;
83 + };
84 +
85 +- target-module@7000 { /* 0x44e07000, ap 14 20.0 */
86 ++ gpio0_target: target-module@7000 { /* 0x44e07000, ap 14 20.0 */
87 + compatible = "ti,sysc-omap2", "ti,sysc";
88 + ti,hwmods = "gpio1";
89 + reg = <0x7000 0x4>,
90 +@@ -2038,7 +2038,9 @@
91 + reg = <0xe000 0x4>,
92 + <0xe054 0x4>;
93 + reg-names = "rev", "sysc";
94 +- ti,sysc-midle ;
95 ++ ti,sysc-midle = <SYSC_IDLE_FORCE>,
96 ++ <SYSC_IDLE_NO>,
97 ++ <SYSC_IDLE_SMART>;
98 + ti,sysc-sidle = <SYSC_IDLE_FORCE>,
99 + <SYSC_IDLE_NO>,
100 + <SYSC_IDLE_SMART>;
101 +diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
102 +index 848e2a8884e2..14bbc438055f 100644
103 +--- a/arch/arm/boot/dts/am4372.dtsi
104 ++++ b/arch/arm/boot/dts/am4372.dtsi
105 +@@ -337,6 +337,8 @@
106 + ti,hwmods = "dss_dispc";
107 + clocks = <&disp_clk>;
108 + clock-names = "fck";
109 ++
110 ++ max-memory-bandwidth = <230000000>;
111 + };
112 +
113 + rfbi: rfbi@4832a800 {
114 +diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi
115 +index 21e5914fdd62..099d6fe2a57a 100644
116 +--- a/arch/arm/boot/dts/dra7-l4.dtsi
117 ++++ b/arch/arm/boot/dts/dra7-l4.dtsi
118 +@@ -2762,7 +2762,7 @@
119 + interrupt-names = "tx", "rx";
120 + dmas = <&edma_xbar 129 1>, <&edma_xbar 128 1>;
121 + dma-names = "tx", "rx";
122 +- clocks = <&ipu_clkctrl DRA7_IPU_MCASP1_CLKCTRL 22>,
123 ++ clocks = <&ipu_clkctrl DRA7_IPU_MCASP1_CLKCTRL 0>,
124 + <&ipu_clkctrl DRA7_IPU_MCASP1_CLKCTRL 24>,
125 + <&ipu_clkctrl DRA7_IPU_MCASP1_CLKCTRL 28>;
126 + clock-names = "fck", "ahclkx", "ahclkr";
127 +@@ -2799,8 +2799,8 @@
128 + interrupt-names = "tx", "rx";
129 + dmas = <&edma_xbar 131 1>, <&edma_xbar 130 1>;
130 + dma-names = "tx", "rx";
131 +- clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP2_CLKCTRL 22>,
132 +- <&l4per2_clkctrl DRA7_L4PER2_MCASP2_CLKCTRL 24>,
133 ++ clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP2_CLKCTRL 0>,
134 ++ <&ipu_clkctrl DRA7_IPU_MCASP1_CLKCTRL 24>,
135 + <&l4per2_clkctrl DRA7_L4PER2_MCASP2_CLKCTRL 28>;
136 + clock-names = "fck", "ahclkx", "ahclkr";
137 + status = "disabled";
138 +@@ -2818,9 +2818,8 @@
139 + <SYSC_IDLE_SMART>;
140 + /* Domains (P, C): l4per_pwrdm, l4per2_clkdm */
141 + clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 0>,
142 +- <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 24>,
143 +- <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 28>;
144 +- clock-names = "fck", "ahclkx", "ahclkr";
145 ++ <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 24>;
146 ++ clock-names = "fck", "ahclkx";
147 + #address-cells = <1>;
148 + #size-cells = <1>;
149 + ranges = <0x0 0x68000 0x2000>,
150 +@@ -2836,7 +2835,7 @@
151 + interrupt-names = "tx", "rx";
152 + dmas = <&edma_xbar 133 1>, <&edma_xbar 132 1>;
153 + dma-names = "tx", "rx";
154 +- clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 22>,
155 ++ clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 0>,
156 + <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 24>;
157 + clock-names = "fck", "ahclkx";
158 + status = "disabled";
159 +@@ -2854,9 +2853,8 @@
160 + <SYSC_IDLE_SMART>;
161 + /* Domains (P, C): l4per_pwrdm, l4per2_clkdm */
162 + clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 0>,
163 +- <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 24>,
164 +- <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 28>;
165 +- clock-names = "fck", "ahclkx", "ahclkr";
166 ++ <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 24>;
167 ++ clock-names = "fck", "ahclkx";
168 + #address-cells = <1>;
169 + #size-cells = <1>;
170 + ranges = <0x0 0x6c000 0x2000>,
171 +@@ -2872,7 +2870,7 @@
172 + interrupt-names = "tx", "rx";
173 + dmas = <&edma_xbar 135 1>, <&edma_xbar 134 1>;
174 + dma-names = "tx", "rx";
175 +- clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 22>,
176 ++ clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 0>,
177 + <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 24>;
178 + clock-names = "fck", "ahclkx";
179 + status = "disabled";
180 +@@ -2890,9 +2888,8 @@
181 + <SYSC_IDLE_SMART>;
182 + /* Domains (P, C): l4per_pwrdm, l4per2_clkdm */
183 + clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 0>,
184 +- <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 24>,
185 +- <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 28>;
186 +- clock-names = "fck", "ahclkx", "ahclkr";
187 ++ <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 24>;
188 ++ clock-names = "fck", "ahclkx";
189 + #address-cells = <1>;
190 + #size-cells = <1>;
191 + ranges = <0x0 0x70000 0x2000>,
192 +@@ -2908,7 +2905,7 @@
193 + interrupt-names = "tx", "rx";
194 + dmas = <&edma_xbar 137 1>, <&edma_xbar 136 1>;
195 + dma-names = "tx", "rx";
196 +- clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 22>,
197 ++ clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 0>,
198 + <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 24>;
199 + clock-names = "fck", "ahclkx";
200 + status = "disabled";
201 +@@ -2926,9 +2923,8 @@
202 + <SYSC_IDLE_SMART>;
203 + /* Domains (P, C): l4per_pwrdm, l4per2_clkdm */
204 + clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 0>,
205 +- <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 24>,
206 +- <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 28>;
207 +- clock-names = "fck", "ahclkx", "ahclkr";
208 ++ <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 24>;
209 ++ clock-names = "fck", "ahclkx";
210 + #address-cells = <1>;
211 + #size-cells = <1>;
212 + ranges = <0x0 0x74000 0x2000>,
213 +@@ -2944,7 +2940,7 @@
214 + interrupt-names = "tx", "rx";
215 + dmas = <&edma_xbar 139 1>, <&edma_xbar 138 1>;
216 + dma-names = "tx", "rx";
217 +- clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 22>,
218 ++ clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 0>,
219 + <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 24>;
220 + clock-names = "fck", "ahclkx";
221 + status = "disabled";
222 +@@ -2962,9 +2958,8 @@
223 + <SYSC_IDLE_SMART>;
224 + /* Domains (P, C): l4per_pwrdm, l4per2_clkdm */
225 + clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 0>,
226 +- <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 24>,
227 +- <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 28>;
228 +- clock-names = "fck", "ahclkx", "ahclkr";
229 ++ <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 24>;
230 ++ clock-names = "fck", "ahclkx";
231 + #address-cells = <1>;
232 + #size-cells = <1>;
233 + ranges = <0x0 0x78000 0x2000>,
234 +@@ -2980,7 +2975,7 @@
235 + interrupt-names = "tx", "rx";
236 + dmas = <&edma_xbar 141 1>, <&edma_xbar 140 1>;
237 + dma-names = "tx", "rx";
238 +- clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 22>,
239 ++ clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 0>,
240 + <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 24>;
241 + clock-names = "fck", "ahclkx";
242 + status = "disabled";
243 +@@ -2998,9 +2993,8 @@
244 + <SYSC_IDLE_SMART>;
245 + /* Domains (P, C): l4per_pwrdm, l4per2_clkdm */
246 + clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 0>,
247 +- <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 24>,
248 +- <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 28>;
249 +- clock-names = "fck", "ahclkx", "ahclkr";
250 ++ <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 24>;
251 ++ clock-names = "fck", "ahclkx";
252 + #address-cells = <1>;
253 + #size-cells = <1>;
254 + ranges = <0x0 0x7c000 0x2000>,
255 +@@ -3016,7 +3010,7 @@
256 + interrupt-names = "tx", "rx";
257 + dmas = <&edma_xbar 143 1>, <&edma_xbar 142 1>;
258 + dma-names = "tx", "rx";
259 +- clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 22>,
260 ++ clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 0>,
261 + <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 24>;
262 + clock-names = "fck", "ahclkx";
263 + status = "disabled";
264 +diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
265 +index adb6271f819b..7773876d165f 100644
266 +--- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
267 ++++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
268 +@@ -811,7 +811,8 @@ static struct omap_hwmod_class_sysconfig am33xx_timer_sysc = {
269 + .rev_offs = 0x0000,
270 + .sysc_offs = 0x0010,
271 + .syss_offs = 0x0014,
272 +- .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
273 ++ .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
274 ++ SYSC_HAS_RESET_STATUS,
275 + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
276 + SIDLE_SMART_WKUP),
277 + .sysc_fields = &omap_hwmod_sysc_type2,
278 +diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
279 +index c965af275e34..81d9912f17c8 100644
280 +--- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
281 ++++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
282 +@@ -231,8 +231,9 @@ static struct omap_hwmod am33xx_control_hwmod = {
283 + static struct omap_hwmod_class_sysconfig lcdc_sysc = {
284 + .rev_offs = 0x0,
285 + .sysc_offs = 0x54,
286 +- .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE),
287 +- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
288 ++ .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE,
289 ++ .idlemodes = SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
290 ++ MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART,
291 + .sysc_fields = &omap_hwmod_sysc_type2,
292 + };
293 +
294 +diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
295 +index 1fde1bf53fb6..7ac9af56762d 100644
296 +--- a/arch/arm/mach-omap2/pm.c
297 ++++ b/arch/arm/mach-omap2/pm.c
298 +@@ -74,83 +74,6 @@ int omap_pm_clkdms_setup(struct clockdomain *clkdm, void *unused)
299 + return 0;
300 + }
301 +
302 +-/*
303 +- * This API is to be called during init to set the various voltage
304 +- * domains to the voltage as per the opp table. Typically we boot up
305 +- * at the nominal voltage. So this function finds out the rate of
306 +- * the clock associated with the voltage domain, finds out the correct
307 +- * opp entry and sets the voltage domain to the voltage specified
308 +- * in the opp entry
309 +- */
310 +-static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name,
311 +- const char *oh_name)
312 +-{
313 +- struct voltagedomain *voltdm;
314 +- struct clk *clk;
315 +- struct dev_pm_opp *opp;
316 +- unsigned long freq, bootup_volt;
317 +- struct device *dev;
318 +-
319 +- if (!vdd_name || !clk_name || !oh_name) {
320 +- pr_err("%s: invalid parameters\n", __func__);
321 +- goto exit;
322 +- }
323 +-
324 +- if (!strncmp(oh_name, "mpu", 3))
325 +- /*
326 +- * All current OMAPs share voltage rail and clock
327 +- * source, so CPU0 is used to represent the MPU-SS.
328 +- */
329 +- dev = get_cpu_device(0);
330 +- else
331 +- dev = omap_device_get_by_hwmod_name(oh_name);
332 +-
333 +- if (IS_ERR(dev)) {
334 +- pr_err("%s: Unable to get dev pointer for hwmod %s\n",
335 +- __func__, oh_name);
336 +- goto exit;
337 +- }
338 +-
339 +- voltdm = voltdm_lookup(vdd_name);
340 +- if (!voltdm) {
341 +- pr_err("%s: unable to get vdd pointer for vdd_%s\n",
342 +- __func__, vdd_name);
343 +- goto exit;
344 +- }
345 +-
346 +- clk = clk_get(NULL, clk_name);
347 +- if (IS_ERR(clk)) {
348 +- pr_err("%s: unable to get clk %s\n", __func__, clk_name);
349 +- goto exit;
350 +- }
351 +-
352 +- freq = clk_get_rate(clk);
353 +- clk_put(clk);
354 +-
355 +- opp = dev_pm_opp_find_freq_ceil(dev, &freq);
356 +- if (IS_ERR(opp)) {
357 +- pr_err("%s: unable to find boot up OPP for vdd_%s\n",
358 +- __func__, vdd_name);
359 +- goto exit;
360 +- }
361 +-
362 +- bootup_volt = dev_pm_opp_get_voltage(opp);
363 +- dev_pm_opp_put(opp);
364 +-
365 +- if (!bootup_volt) {
366 +- pr_err("%s: unable to find voltage corresponding to the bootup OPP for vdd_%s\n",
367 +- __func__, vdd_name);
368 +- goto exit;
369 +- }
370 +-
371 +- voltdm_scale(voltdm, bootup_volt);
372 +- return 0;
373 +-
374 +-exit:
375 +- pr_err("%s: unable to set vdd_%s\n", __func__, vdd_name);
376 +- return -EINVAL;
377 +-}
378 +-
379 + #ifdef CONFIG_SUSPEND
380 + static int omap_pm_enter(suspend_state_t suspend_state)
381 + {
382 +@@ -208,25 +131,6 @@ void omap_common_suspend_init(void *pm_suspend)
383 + }
384 + #endif /* CONFIG_SUSPEND */
385 +
386 +-static void __init omap3_init_voltages(void)
387 +-{
388 +- if (!soc_is_omap34xx())
389 +- return;
390 +-
391 +- omap2_set_init_voltage("mpu_iva", "dpll1_ck", "mpu");
392 +- omap2_set_init_voltage("core", "l3_ick", "l3_main");
393 +-}
394 +-
395 +-static void __init omap4_init_voltages(void)
396 +-{
397 +- if (!soc_is_omap44xx())
398 +- return;
399 +-
400 +- omap2_set_init_voltage("mpu", "dpll_mpu_ck", "mpu");
401 +- omap2_set_init_voltage("core", "l3_div_ck", "l3_main_1");
402 +- omap2_set_init_voltage("iva", "dpll_iva_m5x2_ck", "iva");
403 +-}
404 +-
405 + int __maybe_unused omap_pm_nop_init(void)
406 + {
407 + return 0;
408 +@@ -246,10 +150,6 @@ int __init omap2_common_pm_late_init(void)
409 + omap4_twl_init();
410 + omap_voltage_late_init();
411 +
412 +- /* Initialize the voltages */
413 +- omap3_init_voltages();
414 +- omap4_init_voltages();
415 +-
416 + /* Smartreflex device init */
417 + omap_devinit_smartreflex();
418 +
419 +diff --git a/arch/arm/xen/efi.c b/arch/arm/xen/efi.c
420 +index d687a73044bf..cb2aaf98e243 100644
421 +--- a/arch/arm/xen/efi.c
422 ++++ b/arch/arm/xen/efi.c
423 +@@ -19,7 +19,9 @@ void __init xen_efi_runtime_setup(void)
424 + efi.get_variable = xen_efi_get_variable;
425 + efi.get_next_variable = xen_efi_get_next_variable;
426 + efi.set_variable = xen_efi_set_variable;
427 ++ efi.set_variable_nonblocking = xen_efi_set_variable;
428 + efi.query_variable_info = xen_efi_query_variable_info;
429 ++ efi.query_variable_info_nonblocking = xen_efi_query_variable_info;
430 + efi.update_capsule = xen_efi_update_capsule;
431 + efi.query_capsule_caps = xen_efi_query_capsule_caps;
432 + efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count;
433 +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
434 +index 3adcec05b1f6..e8cf56283871 100644
435 +--- a/arch/arm64/Kconfig
436 ++++ b/arch/arm64/Kconfig
437 +@@ -601,6 +601,23 @@ config CAVIUM_ERRATUM_30115
438 +
439 + If unsure, say Y.
440 +
441 ++config CAVIUM_TX2_ERRATUM_219
442 ++ bool "Cavium ThunderX2 erratum 219: PRFM between TTBR change and ISB fails"
443 ++ default y
444 ++ help
445 ++ On Cavium ThunderX2, a load, store or prefetch instruction between a
446 ++ TTBR update and the corresponding context synchronizing operation can
447 ++ cause a spurious Data Abort to be delivered to any hardware thread in
448 ++ the CPU core.
449 ++
450 ++ Work around the issue by avoiding the problematic code sequence and
451 ++ trapping KVM guest TTBRx_EL1 writes to EL2 when SMT is enabled. The
452 ++ trap handler performs the corresponding register access, skips the
453 ++ instruction and ensures context synchronization by virtue of the
454 ++ exception return.
455 ++
456 ++ If unsure, say Y.
457 ++
458 + config QCOM_FALKOR_ERRATUM_1003
459 + bool "Falkor E1003: Incorrect translation due to ASID change"
460 + default y
461 +diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
462 +index f19fe4b9acc4..ac1dbca3d0cd 100644
463 +--- a/arch/arm64/include/asm/cpucaps.h
464 ++++ b/arch/arm64/include/asm/cpucaps.h
465 +@@ -52,7 +52,9 @@
466 + #define ARM64_HAS_IRQ_PRIO_MASKING 42
467 + #define ARM64_HAS_DCPODP 43
468 + #define ARM64_WORKAROUND_1463225 44
469 ++#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45
470 ++#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46
471 +
472 +-#define ARM64_NCAPS 45
473 ++#define ARM64_NCAPS 47
474 +
475 + #endif /* __ASM_CPUCAPS_H */
476 +diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
477 +index 1e43ba5c79b7..27b4a973f16d 100644
478 +--- a/arch/arm64/kernel/cpu_errata.c
479 ++++ b/arch/arm64/kernel/cpu_errata.c
480 +@@ -12,6 +12,7 @@
481 + #include <asm/cpu.h>
482 + #include <asm/cputype.h>
483 + #include <asm/cpufeature.h>
484 ++#include <asm/smp_plat.h>
485 +
486 + static bool __maybe_unused
487 + is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
488 +@@ -623,6 +624,30 @@ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
489 + return (need_wa > 0);
490 + }
491 +
492 ++static const __maybe_unused struct midr_range tx2_family_cpus[] = {
493 ++ MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
494 ++ MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
495 ++ {},
496 ++};
497 ++
498 ++static bool __maybe_unused
499 ++needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
500 ++ int scope)
501 ++{
502 ++ int i;
503 ++
504 ++ if (!is_affected_midr_range_list(entry, scope) ||
505 ++ !is_hyp_mode_available())
506 ++ return false;
507 ++
508 ++ for_each_possible_cpu(i) {
509 ++ if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
510 ++ return true;
511 ++ }
512 ++
513 ++ return false;
514 ++}
515 ++
516 + #ifdef CONFIG_HARDEN_EL2_VECTORS
517 +
518 + static const struct midr_range arm64_harden_el2_vectors[] = {
519 +@@ -851,6 +876,19 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
520 + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
521 + .matches = has_cortex_a76_erratum_1463225,
522 + },
523 ++ {
524 ++ .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
525 ++ .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
526 ++ ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
527 ++ },
528 ++#endif
529 ++#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
530 ++ {
531 ++ .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
532 ++ .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
533 ++ ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
534 ++ .matches = needs_tx2_tvm_workaround,
535 ++ },
536 + #endif
537 + {
538 + }
539 +diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
540 +index 84a822748c84..109894bd3194 100644
541 +--- a/arch/arm64/kernel/entry.S
542 ++++ b/arch/arm64/kernel/entry.S
543 +@@ -1070,7 +1070,9 @@ alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
544 + #else
545 + ldr x30, =vectors
546 + #endif
547 ++alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
548 + prfm plil1strm, [x30, #(1b - tramp_vectors)]
549 ++alternative_else_nop_endif
550 + msr vbar_el1, x30
551 + add x30, x30, #(1b - tramp_vectors)
552 + isb
553 +diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
554 +index adaf266d8de8..7fdc821ebb78 100644
555 +--- a/arch/arm64/kvm/hyp/switch.c
556 ++++ b/arch/arm64/kvm/hyp/switch.c
557 +@@ -124,6 +124,9 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
558 + {
559 + u64 hcr = vcpu->arch.hcr_el2;
560 +
561 ++ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
562 ++ hcr |= HCR_TVM;
563 ++
564 + write_sysreg(hcr, hcr_el2);
565 +
566 + if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
567 +@@ -174,8 +177,10 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
568 + * the crucial bit is "On taking a vSError interrupt,
569 + * HCR_EL2.VSE is cleared to 0."
570 + */
571 +- if (vcpu->arch.hcr_el2 & HCR_VSE)
572 +- vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
573 ++ if (vcpu->arch.hcr_el2 & HCR_VSE) {
574 ++ vcpu->arch.hcr_el2 &= ~HCR_VSE;
575 ++ vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
576 ++ }
577 +
578 + if (has_vhe())
579 + deactivate_traps_vhe();
580 +@@ -393,6 +398,61 @@ static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
581 + return true;
582 + }
583 +
584 ++static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
585 ++{
586 ++ u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu));
587 ++ int rt = kvm_vcpu_sys_get_rt(vcpu);
588 ++ u64 val = vcpu_get_reg(vcpu, rt);
589 ++
590 ++ /*
591 ++ * The normal sysreg handling code expects to see the traps,
592 ++ * let's not do anything here.
593 ++ */
594 ++ if (vcpu->arch.hcr_el2 & HCR_TVM)
595 ++ return false;
596 ++
597 ++ switch (sysreg) {
598 ++ case SYS_SCTLR_EL1:
599 ++ write_sysreg_el1(val, SYS_SCTLR);
600 ++ break;
601 ++ case SYS_TTBR0_EL1:
602 ++ write_sysreg_el1(val, SYS_TTBR0);
603 ++ break;
604 ++ case SYS_TTBR1_EL1:
605 ++ write_sysreg_el1(val, SYS_TTBR1);
606 ++ break;
607 ++ case SYS_TCR_EL1:
608 ++ write_sysreg_el1(val, SYS_TCR);
609 ++ break;
610 ++ case SYS_ESR_EL1:
611 ++ write_sysreg_el1(val, SYS_ESR);
612 ++ break;
613 ++ case SYS_FAR_EL1:
614 ++ write_sysreg_el1(val, SYS_FAR);
615 ++ break;
616 ++ case SYS_AFSR0_EL1:
617 ++ write_sysreg_el1(val, SYS_AFSR0);
618 ++ break;
619 ++ case SYS_AFSR1_EL1:
620 ++ write_sysreg_el1(val, SYS_AFSR1);
621 ++ break;
622 ++ case SYS_MAIR_EL1:
623 ++ write_sysreg_el1(val, SYS_MAIR);
624 ++ break;
625 ++ case SYS_AMAIR_EL1:
626 ++ write_sysreg_el1(val, SYS_AMAIR);
627 ++ break;
628 ++ case SYS_CONTEXTIDR_EL1:
629 ++ write_sysreg_el1(val, SYS_CONTEXTIDR);
630 ++ break;
631 ++ default:
632 ++ return false;
633 ++ }
634 ++
635 ++ __kvm_skip_instr(vcpu);
636 ++ return true;
637 ++}
638 ++
639 + /*
640 + * Return true when we were able to fixup the guest exit and should return to
641 + * the guest, false when we should restore the host state and return to the
642 +@@ -412,6 +472,11 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
643 + if (*exit_code != ARM_EXCEPTION_TRAP)
644 + goto exit;
645 +
646 ++ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
647 ++ kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
648 ++ handle_tx2_tvm(vcpu))
649 ++ return true;
650 ++
651 + /*
652 + * We trap the first access to the FP/SIMD to save the host context
653 + * and restore the guest context lazily.
654 +diff --git a/arch/mips/boot/dts/qca/ar9331.dtsi b/arch/mips/boot/dts/qca/ar9331.dtsi
655 +index 63a9f33aa43e..5cfc9d347826 100644
656 +--- a/arch/mips/boot/dts/qca/ar9331.dtsi
657 ++++ b/arch/mips/boot/dts/qca/ar9331.dtsi
658 +@@ -99,7 +99,7 @@
659 +
660 + miscintc: interrupt-controller@18060010 {
661 + compatible = "qca,ar7240-misc-intc";
662 +- reg = <0x18060010 0x4>;
663 ++ reg = <0x18060010 0x8>;
664 +
665 + interrupt-parent = <&cpuintc>;
666 + interrupts = <6>;
667 +diff --git a/arch/mips/loongson64/common/serial.c b/arch/mips/loongson64/common/serial.c
668 +index ffefc1cb2612..98c3a7feb10f 100644
669 +--- a/arch/mips/loongson64/common/serial.c
670 ++++ b/arch/mips/loongson64/common/serial.c
671 +@@ -110,7 +110,7 @@ static int __init serial_init(void)
672 + }
673 + module_init(serial_init);
674 +
675 +-static void __init serial_exit(void)
676 ++static void __exit serial_exit(void)
677 + {
678 + platform_device_unregister(&uart8250_device);
679 + }
680 +diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
681 +index bece1264d1c5..b0f70006bd85 100644
682 +--- a/arch/mips/mm/tlbex.c
683 ++++ b/arch/mips/mm/tlbex.c
684 +@@ -655,6 +655,13 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
685 + int restore_scratch)
686 + {
687 + if (restore_scratch) {
688 ++ /*
689 ++ * Ensure the MFC0 below observes the value written to the
690 ++ * KScratch register by the prior MTC0.
691 ++ */
692 ++ if (scratch_reg >= 0)
693 ++ uasm_i_ehb(p);
694 ++
695 + /* Reset default page size */
696 + if (PM_DEFAULT_MASK >> 16) {
697 + uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
698 +@@ -669,12 +676,10 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
699 + uasm_i_mtc0(p, 0, C0_PAGEMASK);
700 + uasm_il_b(p, r, lid);
701 + }
702 +- if (scratch_reg >= 0) {
703 +- uasm_i_ehb(p);
704 ++ if (scratch_reg >= 0)
705 + UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
706 +- } else {
707 ++ else
708 + UASM_i_LW(p, 1, scratchpad_offset(0), 0);
709 +- }
710 + } else {
711 + /* Reset default page size */
712 + if (PM_DEFAULT_MASK >> 16) {
713 +@@ -923,6 +928,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
714 + }
715 + if (mode != not_refill && check_for_high_segbits) {
716 + uasm_l_large_segbits_fault(l, *p);
717 ++
718 ++ if (mode == refill_scratch && scratch_reg >= 0)
719 ++ uasm_i_ehb(p);
720 ++
721 + /*
722 + * We get here if we are an xsseg address, or if we are
723 + * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
724 +@@ -941,12 +950,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
725 + uasm_i_jr(p, ptr);
726 +
727 + if (mode == refill_scratch) {
728 +- if (scratch_reg >= 0) {
729 +- uasm_i_ehb(p);
730 ++ if (scratch_reg >= 0)
731 + UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
732 +- } else {
733 ++ else
734 + UASM_i_LW(p, 1, scratchpad_offset(0), 0);
735 +- }
736 + } else {
737 + uasm_i_nop(p);
738 + }
739 +diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c
740 +index 92a9b5f12f98..f29f682352f0 100644
741 +--- a/arch/parisc/mm/ioremap.c
742 ++++ b/arch/parisc/mm/ioremap.c
743 +@@ -3,7 +3,7 @@
744 + * arch/parisc/mm/ioremap.c
745 + *
746 + * (C) Copyright 1995 1996 Linus Torvalds
747 +- * (C) Copyright 2001-2006 Helge Deller <deller@×××.de>
748 ++ * (C) Copyright 2001-2019 Helge Deller <deller@×××.de>
749 + * (C) Copyright 2005 Kyle McMartin <kyle@××××××××××××.org>
750 + */
751 +
752 +@@ -84,7 +84,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
753 + addr = (void __iomem *) area->addr;
754 + if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
755 + phys_addr, pgprot)) {
756 +- vfree(addr);
757 ++ vunmap(addr);
758 + return NULL;
759 + }
760 +
761 +@@ -92,9 +92,11 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
762 + }
763 + EXPORT_SYMBOL(__ioremap);
764 +
765 +-void iounmap(const volatile void __iomem *addr)
766 ++void iounmap(const volatile void __iomem *io_addr)
767 + {
768 +- if (addr > high_memory)
769 +- return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));
770 ++ unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
771 ++
772 ++ if (is_vmalloc_addr((void *)addr))
773 ++ vunmap((void *)addr);
774 + }
775 + EXPORT_SYMBOL(iounmap);
776 +diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
777 +index 591bfb4bfd0f..a3f9c665bb5b 100644
778 +--- a/arch/powerpc/kvm/book3s_xive.c
779 ++++ b/arch/powerpc/kvm/book3s_xive.c
780 +@@ -1217,6 +1217,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
781 + struct kvmppc_xive *xive = dev->private;
782 + struct kvmppc_xive_vcpu *xc;
783 + int i, r = -EBUSY;
784 ++ u32 vp_id;
785 +
786 + pr_devel("connect_vcpu(cpu=%d)\n", cpu);
787 +
788 +@@ -1228,25 +1229,32 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
789 + return -EPERM;
790 + if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
791 + return -EBUSY;
792 +- if (kvmppc_xive_find_server(vcpu->kvm, cpu)) {
793 +- pr_devel("Duplicate !\n");
794 +- return -EEXIST;
795 +- }
796 + if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
797 + pr_devel("Out of bounds !\n");
798 + return -EINVAL;
799 + }
800 +- xc = kzalloc(sizeof(*xc), GFP_KERNEL);
801 +- if (!xc)
802 +- return -ENOMEM;
803 +
804 + /* We need to synchronize with queue provisioning */
805 + mutex_lock(&xive->lock);
806 ++
807 ++ vp_id = kvmppc_xive_vp(xive, cpu);
808 ++ if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
809 ++ pr_devel("Duplicate !\n");
810 ++ r = -EEXIST;
811 ++ goto bail;
812 ++ }
813 ++
814 ++ xc = kzalloc(sizeof(*xc), GFP_KERNEL);
815 ++ if (!xc) {
816 ++ r = -ENOMEM;
817 ++ goto bail;
818 ++ }
819 ++
820 + vcpu->arch.xive_vcpu = xc;
821 + xc->xive = xive;
822 + xc->vcpu = vcpu;
823 + xc->server_num = cpu;
824 +- xc->vp_id = kvmppc_xive_vp(xive, cpu);
825 ++ xc->vp_id = vp_id;
826 + xc->mfrr = 0xff;
827 + xc->valid = true;
828 +
829 +diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h
830 +index 955b820ffd6d..fe3ed50e0818 100644
831 +--- a/arch/powerpc/kvm/book3s_xive.h
832 ++++ b/arch/powerpc/kvm/book3s_xive.h
833 +@@ -220,6 +220,18 @@ static inline u32 kvmppc_xive_vp(struct kvmppc_xive *xive, u32 server)
834 + return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server);
835 + }
836 +
837 ++static inline bool kvmppc_xive_vp_in_use(struct kvm *kvm, u32 vp_id)
838 ++{
839 ++ struct kvm_vcpu *vcpu = NULL;
840 ++ int i;
841 ++
842 ++ kvm_for_each_vcpu(i, vcpu, kvm) {
843 ++ if (vcpu->arch.xive_vcpu && vp_id == vcpu->arch.xive_vcpu->vp_id)
844 ++ return true;
845 ++ }
846 ++ return false;
847 ++}
848 ++
849 + /*
850 + * Mapping between guest priorities and host priorities
851 + * is as follow.
852 +diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
853 +index 248c1ea9e788..78b906ffa0d2 100644
854 +--- a/arch/powerpc/kvm/book3s_xive_native.c
855 ++++ b/arch/powerpc/kvm/book3s_xive_native.c
856 +@@ -106,6 +106,7 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
857 + struct kvmppc_xive *xive = dev->private;
858 + struct kvmppc_xive_vcpu *xc = NULL;
859 + int rc;
860 ++ u32 vp_id;
861 +
862 + pr_devel("native_connect_vcpu(server=%d)\n", server_num);
863 +
864 +@@ -124,7 +125,8 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
865 +
866 + mutex_lock(&xive->lock);
867 +
868 +- if (kvmppc_xive_find_server(vcpu->kvm, server_num)) {
869 ++ vp_id = kvmppc_xive_vp(xive, server_num);
870 ++ if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
871 + pr_devel("Duplicate !\n");
872 + rc = -EEXIST;
873 + goto bail;
874 +@@ -141,7 +143,7 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
875 + xc->vcpu = vcpu;
876 + xc->server_num = server_num;
877 +
878 +- xc->vp_id = kvmppc_xive_vp(xive, server_num);
879 ++ xc->vp_id = vp_id;
880 + xc->valid = true;
881 + vcpu->arch.irq_type = KVMPPC_IRQ_XIVE;
882 +
883 +diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
884 +index 5a02b7d50940..9c992a88d858 100644
885 +--- a/arch/riscv/include/asm/asm.h
886 ++++ b/arch/riscv/include/asm/asm.h
887 +@@ -22,6 +22,7 @@
888 +
889 + #define REG_L __REG_SEL(ld, lw)
890 + #define REG_S __REG_SEL(sd, sw)
891 ++#define REG_SC __REG_SEL(sc.d, sc.w)
892 + #define SZREG __REG_SEL(8, 4)
893 + #define LGREG __REG_SEL(3, 2)
894 +
895 +diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
896 +index 9b60878a4469..2a82e0a5af46 100644
897 +--- a/arch/riscv/kernel/entry.S
898 ++++ b/arch/riscv/kernel/entry.S
899 +@@ -98,7 +98,26 @@ _save_context:
900 + */
901 + .macro RESTORE_ALL
902 + REG_L a0, PT_SSTATUS(sp)
903 +- REG_L a2, PT_SEPC(sp)
904 ++ /*
905 ++ * The current load reservation is effectively part of the processor's
906 ++ * state, in the sense that load reservations cannot be shared between
907 ++ * different hart contexts. We can't actually save and restore a load
908 ++ * reservation, so instead here we clear any existing reservation --
909 ++ * it's always legal for implementations to clear load reservations at
910 ++ * any point (as long as the forward progress guarantee is kept, but
911 ++ * we'll ignore that here).
912 ++ *
913 ++ * Dangling load reservations can be the result of taking a trap in the
914 ++ * middle of an LR/SC sequence, but can also be the result of a taken
915 ++ * forward branch around an SC -- which is how we implement CAS. As a
916 ++ * result we need to clear reservations between the last CAS and the
917 ++ * jump back to the new context. While it is unlikely the store
918 ++ * completes, implementations are allowed to expand reservations to be
919 ++ * arbitrarily large.
920 ++ */
921 ++ REG_L a2, PT_SEPC(sp)
922 ++ REG_SC x0, a2, PT_SEPC(sp)
923 ++
924 + csrw CSR_SSTATUS, a0
925 + csrw CSR_SEPC, a2
926 +
927 +diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
928 +index 42bf939693d3..ed9cd9944d4f 100644
929 +--- a/arch/riscv/mm/init.c
930 ++++ b/arch/riscv/mm/init.c
931 +@@ -11,6 +11,7 @@
932 + #include <linux/swap.h>
933 + #include <linux/sizes.h>
934 + #include <linux/of_fdt.h>
935 ++#include <linux/libfdt.h>
936 +
937 + #include <asm/fixmap.h>
938 + #include <asm/tlbflush.h>
939 +@@ -82,6 +83,8 @@ disable:
940 + }
941 + #endif /* CONFIG_BLK_DEV_INITRD */
942 +
943 ++static phys_addr_t dtb_early_pa __initdata;
944 ++
945 + void __init setup_bootmem(void)
946 + {
947 + struct memblock_region *reg;
948 +@@ -117,7 +120,12 @@ void __init setup_bootmem(void)
949 + setup_initrd();
950 + #endif /* CONFIG_BLK_DEV_INITRD */
951 +
952 +- early_init_fdt_reserve_self();
953 ++ /*
954 ++ * Avoid using early_init_fdt_reserve_self() since __pa() does
955 ++ * not work for DTB pointers that are fixmap addresses
956 ++ */
957 ++ memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
958 ++
959 + early_init_fdt_scan_reserved_mem();
960 + memblock_allow_resize();
961 + memblock_dump_all();
962 +@@ -393,6 +401,8 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
963 +
964 + /* Save pointer to DTB for early FDT parsing */
965 + dtb_early_va = (void *)fix_to_virt(FIX_FDT) + (dtb_pa & ~PAGE_MASK);
966 ++ /* Save physical address for memblock reservation */
967 ++ dtb_early_pa = dtb_pa;
968 + }
969 +
970 + static void __init setup_vm_final(void)
971 +diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
972 +index 7b0d05414618..ceeacbeff600 100644
973 +--- a/arch/s390/boot/startup.c
974 ++++ b/arch/s390/boot/startup.c
975 +@@ -101,10 +101,18 @@ static void handle_relocs(unsigned long offset)
976 + dynsym = (Elf64_Sym *) vmlinux.dynsym_start;
977 + for (rela = rela_start; rela < rela_end; rela++) {
978 + loc = rela->r_offset + offset;
979 +- val = rela->r_addend + offset;
980 ++ val = rela->r_addend;
981 + r_sym = ELF64_R_SYM(rela->r_info);
982 +- if (r_sym)
983 +- val += dynsym[r_sym].st_value;
984 ++ if (r_sym) {
985 ++ if (dynsym[r_sym].st_shndx != SHN_UNDEF)
986 ++ val += dynsym[r_sym].st_value + offset;
987 ++ } else {
988 ++ /*
989 ++ * 0 == undefined symbol table index (STN_UNDEF),
990 ++ * used for R_390_RELATIVE, only add KASLR offset
991 ++ */
992 ++ val += offset;
993 ++ }
994 + r_type = ELF64_R_TYPE(rela->r_info);
995 + rc = arch_kexec_do_relocs(r_type, (void *) loc, val, 0);
996 + if (rc)
997 +diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
998 +index bb59dd964590..de8f0bf5f238 100644
999 +--- a/arch/s390/include/asm/hugetlb.h
1000 ++++ b/arch/s390/include/asm/hugetlb.h
1001 +@@ -12,8 +12,6 @@
1002 + #include <asm/page.h>
1003 + #include <asm/pgtable.h>
1004 +
1005 +-
1006 +-#define is_hugepage_only_range(mm, addr, len) 0
1007 + #define hugetlb_free_pgd_range free_pgd_range
1008 + #define hugepages_supported() (MACHINE_HAS_EDAT1)
1009 +
1010 +@@ -23,6 +21,13 @@ pte_t huge_ptep_get(pte_t *ptep);
1011 + pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
1012 + unsigned long addr, pte_t *ptep);
1013 +
1014 ++static inline bool is_hugepage_only_range(struct mm_struct *mm,
1015 ++ unsigned long addr,
1016 ++ unsigned long len)
1017 ++{
1018 ++ return false;
1019 ++}
1020 ++
1021 + /*
1022 + * If the arch doesn't supply something else, assume that hugepage
1023 + * size aligned regions are ok without further preparation.
1024 +diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
1025 +index 9b274fcaacb6..70ac23e50cae 100644
1026 +--- a/arch/s390/include/asm/pgtable.h
1027 ++++ b/arch/s390/include/asm/pgtable.h
1028 +@@ -1268,7 +1268,8 @@ static inline pte_t *pte_offset(pmd_t *pmd, unsigned long address)
1029 +
1030 + #define pte_offset_kernel(pmd, address) pte_offset(pmd, address)
1031 + #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1032 +-#define pte_unmap(pte) do { } while (0)
1033 ++
1034 ++static inline void pte_unmap(pte_t *pte) { }
1035 +
1036 + static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
1037 + {
1038 +diff --git a/arch/s390/kernel/machine_kexec_reloc.c b/arch/s390/kernel/machine_kexec_reloc.c
1039 +index 3b664cb3ec4d..d5035de9020e 100644
1040 +--- a/arch/s390/kernel/machine_kexec_reloc.c
1041 ++++ b/arch/s390/kernel/machine_kexec_reloc.c
1042 +@@ -27,6 +27,7 @@ int arch_kexec_do_relocs(int r_type, void *loc, unsigned long val,
1043 + *(u32 *)loc = val;
1044 + break;
1045 + case R_390_64: /* Direct 64 bit. */
1046 ++ case R_390_GLOB_DAT:
1047 + *(u64 *)loc = val;
1048 + break;
1049 + case R_390_PC16: /* PC relative 16 bit. */
1050 +diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
1051 +index 5c056b8aebef..e01078e93dd3 100644
1052 +--- a/arch/x86/hyperv/hv_apic.c
1053 ++++ b/arch/x86/hyperv/hv_apic.c
1054 +@@ -260,11 +260,21 @@ void __init hv_apic_init(void)
1055 + }
1056 +
1057 + if (ms_hyperv.hints & HV_X64_APIC_ACCESS_RECOMMENDED) {
1058 +- pr_info("Hyper-V: Using MSR based APIC access\n");
1059 ++ pr_info("Hyper-V: Using enlightened APIC (%s mode)",
1060 ++ x2apic_enabled() ? "x2apic" : "xapic");
1061 ++ /*
1062 ++ * With x2apic, architectural x2apic MSRs are equivalent to the
1063 ++ * respective synthetic MSRs, so there's no need to override
1064 ++ * the apic accessors. The only exception is
1065 ++ * hv_apic_eoi_write, because it benefits from lazy EOI when
1066 ++ * available, but it works for both xapic and x2apic modes.
1067 ++ */
1068 + apic_set_eoi_write(hv_apic_eoi_write);
1069 +- apic->read = hv_apic_read;
1070 +- apic->write = hv_apic_write;
1071 +- apic->icr_write = hv_apic_icr_write;
1072 +- apic->icr_read = hv_apic_icr_read;
1073 ++ if (!x2apic_enabled()) {
1074 ++ apic->read = hv_apic_read;
1075 ++ apic->write = hv_apic_write;
1076 ++ apic->icr_write = hv_apic_icr_write;
1077 ++ apic->icr_read = hv_apic_icr_read;
1078 ++ }
1079 + }
1080 + }
1081 +diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
1082 +index 35c225ede0e4..61d93f062a36 100644
1083 +--- a/arch/x86/include/asm/uaccess.h
1084 ++++ b/arch/x86/include/asm/uaccess.h
1085 +@@ -734,5 +734,28 @@ do { \
1086 + if (unlikely(__gu_err)) goto err_label; \
1087 + } while (0)
1088 +
1089 ++/*
1090 ++ * We want the unsafe accessors to always be inlined and use
1091 ++ * the error labels - thus the macro games.
1092 ++ */
1093 ++#define unsafe_copy_loop(dst, src, len, type, label) \
1094 ++ while (len >= sizeof(type)) { \
1095 ++ unsafe_put_user(*(type *)src,(type __user *)dst,label); \
1096 ++ dst += sizeof(type); \
1097 ++ src += sizeof(type); \
1098 ++ len -= sizeof(type); \
1099 ++ }
1100 ++
1101 ++#define unsafe_copy_to_user(_dst,_src,_len,label) \
1102 ++do { \
1103 ++ char __user *__ucu_dst = (_dst); \
1104 ++ const char *__ucu_src = (_src); \
1105 ++ size_t __ucu_len = (_len); \
1106 ++ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \
1107 ++ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \
1108 ++ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \
1109 ++ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \
1110 ++} while (0)
1111 ++
1112 + #endif /* _ASM_X86_UACCESS_H */
1113 +
1114 +diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
1115 +index 609e499387a1..0cad36d1457a 100644
1116 +--- a/arch/x86/kernel/apic/x2apic_cluster.c
1117 ++++ b/arch/x86/kernel/apic/x2apic_cluster.c
1118 +@@ -158,7 +158,8 @@ static int x2apic_dead_cpu(unsigned int dead_cpu)
1119 + {
1120 + struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu);
1121 +
1122 +- cpumask_clear_cpu(dead_cpu, &cmsk->mask);
1123 ++ if (cmsk)
1124 ++ cpumask_clear_cpu(dead_cpu, &cmsk->mask);
1125 + free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
1126 + return 0;
1127 + }
1128 +diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
1129 +index 29ffa495bd1c..206a4b6144c2 100644
1130 +--- a/arch/x86/kernel/head64.c
1131 ++++ b/arch/x86/kernel/head64.c
1132 +@@ -222,13 +222,31 @@ unsigned long __head __startup_64(unsigned long physaddr,
1133 + * we might write invalid pmds, when the kernel is relocated
1134 + * cleanup_highmap() fixes this up along with the mappings
1135 + * beyond _end.
1136 ++ *
1137 ++ * Only the region occupied by the kernel image has so far
1138 ++ * been checked against the table of usable memory regions
1139 ++ * provided by the firmware, so invalidate pages outside that
1140 ++ * region. A page table entry that maps to a reserved area of
1141 ++ * memory would allow processor speculation into that area,
1142 ++ * and on some hardware (particularly the UV platform) even
1143 ++ * speculative access to some reserved areas is caught as an
1144 ++ * error, causing the BIOS to halt the system.
1145 + */
1146 +
1147 + pmd = fixup_pointer(level2_kernel_pgt, physaddr);
1148 +- for (i = 0; i < PTRS_PER_PMD; i++) {
1149 ++
1150 ++ /* invalidate pages before the kernel image */
1151 ++ for (i = 0; i < pmd_index((unsigned long)_text); i++)
1152 ++ pmd[i] &= ~_PAGE_PRESENT;
1153 ++
1154 ++ /* fixup pages that are part of the kernel image */
1155 ++ for (; i <= pmd_index((unsigned long)_end); i++)
1156 + if (pmd[i] & _PAGE_PRESENT)
1157 + pmd[i] += load_delta;
1158 +- }
1159 ++
1160 ++ /* invalidate pages after the kernel image */
1161 ++ for (; i < PTRS_PER_PMD; i++)
1162 ++ pmd[i] &= ~_PAGE_PRESENT;
1163 +
1164 + /*
1165 + * Fixup phys_base - remove the memory encryption mask to obtain
1166 +diff --git a/arch/x86/xen/efi.c b/arch/x86/xen/efi.c
1167 +index 0d3365cb64de..7e3eb70f411a 100644
1168 +--- a/arch/x86/xen/efi.c
1169 ++++ b/arch/x86/xen/efi.c
1170 +@@ -65,7 +65,9 @@ static efi_system_table_t __init *xen_efi_probe(void)
1171 + efi.get_variable = xen_efi_get_variable;
1172 + efi.get_next_variable = xen_efi_get_next_variable;
1173 + efi.set_variable = xen_efi_set_variable;
1174 ++ efi.set_variable_nonblocking = xen_efi_set_variable;
1175 + efi.query_variable_info = xen_efi_query_variable_info;
1176 ++ efi.query_variable_info_nonblocking = xen_efi_query_variable_info;
1177 + efi.update_capsule = xen_efi_update_capsule;
1178 + efi.query_capsule_caps = xen_efi_query_capsule_caps;
1179 + efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count;
1180 +diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h
1181 +index aeb15f4c755b..be8b2be5a98b 100644
1182 +--- a/arch/xtensa/include/asm/bitops.h
1183 ++++ b/arch/xtensa/include/asm/bitops.h
1184 +@@ -148,7 +148,7 @@ static inline void change_bit(unsigned int bit, volatile unsigned long *p)
1185 + " getex %0\n"
1186 + " beqz %0, 1b\n"
1187 + : "=&a" (tmp)
1188 +- : "a" (~mask), "a" (p)
1189 ++ : "a" (mask), "a" (p)
1190 + : "memory");
1191 + }
1192 +
1193 +diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
1194 +index 04f19de46700..4092555828b1 100644
1195 +--- a/arch/xtensa/kernel/xtensa_ksyms.c
1196 ++++ b/arch/xtensa/kernel/xtensa_ksyms.c
1197 +@@ -119,13 +119,6 @@ EXPORT_SYMBOL(__invalidate_icache_range);
1198 + // FIXME EXPORT_SYMBOL(screen_info);
1199 + #endif
1200 +
1201 +-EXPORT_SYMBOL(outsb);
1202 +-EXPORT_SYMBOL(outsw);
1203 +-EXPORT_SYMBOL(outsl);
1204 +-EXPORT_SYMBOL(insb);
1205 +-EXPORT_SYMBOL(insw);
1206 +-EXPORT_SYMBOL(insl);
1207 +-
1208 + extern long common_exception_return;
1209 + EXPORT_SYMBOL(common_exception_return);
1210 +
1211 +diff --git a/block/blk-mq.c b/block/blk-mq.c
1212 +index a79b9ad1aba1..ed41cde93641 100644
1213 +--- a/block/blk-mq.c
1214 ++++ b/block/blk-mq.c
1215 +@@ -1998,6 +1998,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1216 + }
1217 +
1218 + blk_add_rq_to_plug(plug, rq);
1219 ++ } else if (q->elevator) {
1220 ++ blk_mq_sched_insert_request(rq, false, true, true);
1221 + } else if (plug && !blk_queue_nomerges(q)) {
1222 + /*
1223 + * We do limited plugging. If the bio can be merged, do that.
1224 +@@ -2021,8 +2023,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1225 + blk_mq_try_issue_directly(data.hctx, same_queue_rq,
1226 + &cookie);
1227 + }
1228 +- } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
1229 +- !data.hctx->dispatch_busy)) {
1230 ++ } else if ((q->nr_hw_queues > 1 && is_sync) ||
1231 ++ !data.hctx->dispatch_busy) {
1232 + blk_mq_try_issue_directly(data.hctx, rq, &cookie);
1233 + } else {
1234 + blk_mq_sched_insert_request(rq, false, true, true);
1235 +diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
1236 +index c0f0778d5396..8378f68a21ac 100644
1237 +--- a/block/blk-rq-qos.h
1238 ++++ b/block/blk-rq-qos.h
1239 +@@ -103,16 +103,13 @@ static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
1240 +
1241 + static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
1242 + {
1243 +- struct rq_qos *cur, *prev = NULL;
1244 +- for (cur = q->rq_qos; cur; cur = cur->next) {
1245 +- if (cur == rqos) {
1246 +- if (prev)
1247 +- prev->next = rqos->next;
1248 +- else
1249 +- q->rq_qos = cur;
1250 ++ struct rq_qos **cur;
1251 ++
1252 ++ for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
1253 ++ if (*cur == rqos) {
1254 ++ *cur = rqos->next;
1255 + break;
1256 + }
1257 +- prev = cur;
1258 + }
1259 +
1260 + blk_mq_debugfs_unregister_rqos(rqos);
1261 +diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
1262 +index 3b2525908dd8..a1a858ad4d18 100644
1263 +--- a/drivers/acpi/cppc_acpi.c
1264 ++++ b/drivers/acpi/cppc_acpi.c
1265 +@@ -905,8 +905,8 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr)
1266 + pcc_data[pcc_ss_id]->refcount--;
1267 + if (!pcc_data[pcc_ss_id]->refcount) {
1268 + pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
1269 +- pcc_data[pcc_ss_id]->pcc_channel_acquired = 0;
1270 + kfree(pcc_data[pcc_ss_id]);
1271 ++ pcc_data[pcc_ss_id] = NULL;
1272 + }
1273 + }
1274 + }
1275 +diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
1276 +index 1413324982f0..14e68f202f81 100644
1277 +--- a/drivers/acpi/nfit/core.c
1278 ++++ b/drivers/acpi/nfit/core.c
1279 +@@ -1322,7 +1322,7 @@ static ssize_t scrub_show(struct device *dev,
1280 + nfit_device_lock(dev);
1281 + nd_desc = dev_get_drvdata(dev);
1282 + if (!nd_desc) {
1283 +- device_unlock(dev);
1284 ++ nfit_device_unlock(dev);
1285 + return rc;
1286 + }
1287 + acpi_desc = to_acpi_desc(nd_desc);
1288 +diff --git a/drivers/android/binder.c b/drivers/android/binder.c
1289 +index dc1c83eafc22..1c5278207153 100644
1290 +--- a/drivers/android/binder.c
1291 ++++ b/drivers/android/binder.c
1292 +@@ -95,10 +95,6 @@ DEFINE_SHOW_ATTRIBUTE(proc);
1293 + #define SZ_1K 0x400
1294 + #endif
1295 +
1296 +-#ifndef SZ_4M
1297 +-#define SZ_4M 0x400000
1298 +-#endif
1299 +-
1300 + #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
1301 +
1302 + enum {
1303 +@@ -5195,9 +5191,6 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
1304 + if (proc->tsk != current->group_leader)
1305 + return -EINVAL;
1306 +
1307 +- if ((vma->vm_end - vma->vm_start) > SZ_4M)
1308 +- vma->vm_end = vma->vm_start + SZ_4M;
1309 +-
1310 + binder_debug(BINDER_DEBUG_OPEN_CLOSE,
1311 + "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
1312 + __func__, proc->pid, vma->vm_start, vma->vm_end,
1313 +diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
1314 +index 6d79a1b0d446..8fe99b20ca02 100644
1315 +--- a/drivers/android/binder_alloc.c
1316 ++++ b/drivers/android/binder_alloc.c
1317 +@@ -22,6 +22,7 @@
1318 + #include <asm/cacheflush.h>
1319 + #include <linux/uaccess.h>
1320 + #include <linux/highmem.h>
1321 ++#include <linux/sizes.h>
1322 + #include "binder_alloc.h"
1323 + #include "binder_trace.h"
1324 +
1325 +@@ -689,7 +690,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
1326 + alloc->buffer = (void __user *)vma->vm_start;
1327 + mutex_unlock(&binder_alloc_mmap_lock);
1328 +
1329 +- alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE,
1330 ++ alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
1331 ++ SZ_4M);
1332 ++ alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
1333 + sizeof(alloc->pages[0]),
1334 + GFP_KERNEL);
1335 + if (alloc->pages == NULL) {
1336 +@@ -697,7 +700,6 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
1337 + failure_string = "alloc page array";
1338 + goto err_alloc_pages_failed;
1339 + }
1340 +- alloc->buffer_size = vma->vm_end - vma->vm_start;
1341 +
1342 + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
1343 + if (!buffer) {
1344 +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
1345 +index 3e63294304c7..691852b8bb41 100644
1346 +--- a/drivers/ata/ahci.c
1347 ++++ b/drivers/ata/ahci.c
1348 +@@ -1617,7 +1617,9 @@ static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hp
1349 + */
1350 + if (!id || id->vendor != PCI_VENDOR_ID_INTEL)
1351 + return;
1352 +- if (((enum board_ids) id->driver_data) < board_ahci_pcs7)
1353 ++
1354 ++ /* Skip applying the quirk on Denverton and beyond */
1355 ++ if (((enum board_ids) id->driver_data) >= board_ahci_pcs7)
1356 + return;
1357 +
1358 + /*
1359 +diff --git a/drivers/base/core.c b/drivers/base/core.c
1360 +index 1669d41fcddc..810329523c28 100644
1361 +--- a/drivers/base/core.c
1362 ++++ b/drivers/base/core.c
1363 +@@ -9,6 +9,7 @@
1364 + */
1365 +
1366 + #include <linux/acpi.h>
1367 ++#include <linux/cpufreq.h>
1368 + #include <linux/device.h>
1369 + #include <linux/err.h>
1370 + #include <linux/fwnode.h>
1371 +@@ -3150,6 +3151,8 @@ void device_shutdown(void)
1372 + wait_for_device_probe();
1373 + device_block_probing();
1374 +
1375 ++ cpufreq_suspend();
1376 ++
1377 + spin_lock(&devices_kset->list_lock);
1378 + /*
1379 + * Walk the devices list backward, shutting down each in turn.
1380 +diff --git a/drivers/base/memory.c b/drivers/base/memory.c
1381 +index 20c39d1bcef8..9b9abc4fcfb7 100644
1382 +--- a/drivers/base/memory.c
1383 ++++ b/drivers/base/memory.c
1384 +@@ -554,6 +554,9 @@ static ssize_t soft_offline_page_store(struct device *dev,
1385 + pfn >>= PAGE_SHIFT;
1386 + if (!pfn_valid(pfn))
1387 + return -ENXIO;
1388 ++ /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
1389 ++ if (!pfn_to_online_page(pfn))
1390 ++ return -EIO;
1391 + ret = soft_offline_page(pfn_to_page(pfn), 0);
1392 + return ret == 0 ? count : ret;
1393 + }
1394 +diff --git a/drivers/block/loop.c b/drivers/block/loop.c
1395 +index 1410fa893653..f6f77eaa7217 100644
1396 +--- a/drivers/block/loop.c
1397 ++++ b/drivers/block/loop.c
1398 +@@ -994,6 +994,16 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
1399 + if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
1400 + blk_queue_write_cache(lo->lo_queue, true, false);
1401 +
1402 ++ if (io_is_direct(lo->lo_backing_file) && inode->i_sb->s_bdev) {
1403 ++ /* In case of direct I/O, match underlying block size */
1404 ++ unsigned short bsize = bdev_logical_block_size(
1405 ++ inode->i_sb->s_bdev);
1406 ++
1407 ++ blk_queue_logical_block_size(lo->lo_queue, bsize);
1408 ++ blk_queue_physical_block_size(lo->lo_queue, bsize);
1409 ++ blk_queue_io_min(lo->lo_queue, bsize);
1410 ++ }
1411 ++
1412 + loop_update_rotational(lo);
1413 + loop_update_dio(lo);
1414 + set_capacity(lo->lo_disk, size);
1415 +diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
1416 +index d58a359a6622..4285e75e52c3 100644
1417 +--- a/drivers/block/zram/zram_drv.c
1418 ++++ b/drivers/block/zram/zram_drv.c
1419 +@@ -413,13 +413,14 @@ static void reset_bdev(struct zram *zram)
1420 + static ssize_t backing_dev_show(struct device *dev,
1421 + struct device_attribute *attr, char *buf)
1422 + {
1423 ++ struct file *file;
1424 + struct zram *zram = dev_to_zram(dev);
1425 +- struct file *file = zram->backing_dev;
1426 + char *p;
1427 + ssize_t ret;
1428 +
1429 + down_read(&zram->init_lock);
1430 +- if (!zram->backing_dev) {
1431 ++ file = zram->backing_dev;
1432 ++ if (!file) {
1433 + memcpy(buf, "none\n", 5);
1434 + up_read(&zram->init_lock);
1435 + return 5;
1436 +diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
1437 +index b57fe09b428b..9dd6185a4b4e 100644
1438 +--- a/drivers/clk/ti/clk-7xx.c
1439 ++++ b/drivers/clk/ti/clk-7xx.c
1440 +@@ -683,7 +683,7 @@ static const struct omap_clkctrl_reg_data dra7_l4per2_clkctrl_regs[] __initconst
1441 + { DRA7_L4PER2_MCASP2_CLKCTRL, dra7_mcasp2_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:0154:22" },
1442 + { DRA7_L4PER2_MCASP3_CLKCTRL, dra7_mcasp3_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:015c:22" },
1443 + { DRA7_L4PER2_MCASP5_CLKCTRL, dra7_mcasp5_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:016c:22" },
1444 +- { DRA7_L4PER2_MCASP8_CLKCTRL, dra7_mcasp8_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:0184:24" },
1445 ++ { DRA7_L4PER2_MCASP8_CLKCTRL, dra7_mcasp8_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:0184:22" },
1446 + { DRA7_L4PER2_MCASP4_CLKCTRL, dra7_mcasp4_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:018c:22" },
1447 + { DRA7_L4PER2_UART7_CLKCTRL, dra7_uart7_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01c4:24" },
1448 + { DRA7_L4PER2_UART8_CLKCTRL, dra7_uart8_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01d4:24" },
1449 +@@ -828,8 +828,8 @@ static struct ti_dt_clk dra7xx_clks[] = {
1450 + DT_CLK(NULL, "mcasp6_aux_gfclk_mux", "l4per2-clkctrl:01f8:22"),
1451 + DT_CLK(NULL, "mcasp7_ahclkx_mux", "l4per2-clkctrl:01fc:24"),
1452 + DT_CLK(NULL, "mcasp7_aux_gfclk_mux", "l4per2-clkctrl:01fc:22"),
1453 +- DT_CLK(NULL, "mcasp8_ahclkx_mux", "l4per2-clkctrl:0184:22"),
1454 +- DT_CLK(NULL, "mcasp8_aux_gfclk_mux", "l4per2-clkctrl:0184:24"),
1455 ++ DT_CLK(NULL, "mcasp8_ahclkx_mux", "l4per2-clkctrl:0184:24"),
1456 ++ DT_CLK(NULL, "mcasp8_aux_gfclk_mux", "l4per2-clkctrl:0184:22"),
1457 + DT_CLK(NULL, "mmc1_clk32k", "l3init-clkctrl:0008:8"),
1458 + DT_CLK(NULL, "mmc1_fclk_div", "l3init-clkctrl:0008:25"),
1459 + DT_CLK(NULL, "mmc1_fclk_mux", "l3init-clkctrl:0008:24"),
1460 +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
1461 +index c28ebf2810f1..f970f87ce86e 100644
1462 +--- a/drivers/cpufreq/cpufreq.c
1463 ++++ b/drivers/cpufreq/cpufreq.c
1464 +@@ -2746,14 +2746,6 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1465 + }
1466 + EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
1467 +
1468 +-/*
1469 +- * Stop cpufreq at shutdown to make sure it isn't holding any locks
1470 +- * or mutexes when secondary CPUs are halted.
1471 +- */
1472 +-static struct syscore_ops cpufreq_syscore_ops = {
1473 +- .shutdown = cpufreq_suspend,
1474 +-};
1475 +-
1476 + struct kobject *cpufreq_global_kobject;
1477 + EXPORT_SYMBOL(cpufreq_global_kobject);
1478 +
1479 +@@ -2765,8 +2757,6 @@ static int __init cpufreq_core_init(void)
1480 + cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
1481 + BUG_ON(!cpufreq_global_kobject);
1482 +
1483 +- register_syscore_ops(&cpufreq_syscore_ops);
1484 +-
1485 + return 0;
1486 + }
1487 + module_param(off, int, 0444);
1488 +diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
1489 +index 7f19f1c672c3..2059e43ccc01 100644
1490 +--- a/drivers/edac/ghes_edac.c
1491 ++++ b/drivers/edac/ghes_edac.c
1492 +@@ -553,7 +553,11 @@ void ghes_edac_unregister(struct ghes *ghes)
1493 + if (!ghes_pvt)
1494 + return;
1495 +
1496 ++ if (atomic_dec_return(&ghes_init))
1497 ++ return;
1498 ++
1499 + mci = ghes_pvt->mci;
1500 ++ ghes_pvt = NULL;
1501 + edac_mc_del_mc(mci->pdev);
1502 + edac_mc_free(mci);
1503 + }
1504 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
1505 +index eba42c752bca..82155ac3288a 100644
1506 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
1507 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
1508 +@@ -189,7 +189,7 @@ static int acp_hw_init(void *handle)
1509 + u32 val = 0;
1510 + u32 count = 0;
1511 + struct device *dev;
1512 +- struct i2s_platform_data *i2s_pdata;
1513 ++ struct i2s_platform_data *i2s_pdata = NULL;
1514 +
1515 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1516 +
1517 +@@ -231,20 +231,21 @@ static int acp_hw_init(void *handle)
1518 + adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
1519 + GFP_KERNEL);
1520 +
1521 +- if (adev->acp.acp_cell == NULL)
1522 +- return -ENOMEM;
1523 ++ if (adev->acp.acp_cell == NULL) {
1524 ++ r = -ENOMEM;
1525 ++ goto failure;
1526 ++ }
1527 +
1528 + adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
1529 + if (adev->acp.acp_res == NULL) {
1530 +- kfree(adev->acp.acp_cell);
1531 +- return -ENOMEM;
1532 ++ r = -ENOMEM;
1533 ++ goto failure;
1534 + }
1535 +
1536 + i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
1537 + if (i2s_pdata == NULL) {
1538 +- kfree(adev->acp.acp_res);
1539 +- kfree(adev->acp.acp_cell);
1540 +- return -ENOMEM;
1541 ++ r = -ENOMEM;
1542 ++ goto failure;
1543 + }
1544 +
1545 + switch (adev->asic_type) {
1546 +@@ -341,14 +342,14 @@ static int acp_hw_init(void *handle)
1547 + r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
1548 + ACP_DEVS);
1549 + if (r)
1550 +- return r;
1551 ++ goto failure;
1552 +
1553 + for (i = 0; i < ACP_DEVS ; i++) {
1554 + dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
1555 + r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
1556 + if (r) {
1557 + dev_err(dev, "Failed to add dev to genpd\n");
1558 +- return r;
1559 ++ goto failure;
1560 + }
1561 + }
1562 +
1563 +@@ -367,7 +368,8 @@ static int acp_hw_init(void *handle)
1564 + break;
1565 + if (--count == 0) {
1566 + dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
1567 +- return -ETIMEDOUT;
1568 ++ r = -ETIMEDOUT;
1569 ++ goto failure;
1570 + }
1571 + udelay(100);
1572 + }
1573 +@@ -384,7 +386,8 @@ static int acp_hw_init(void *handle)
1574 + break;
1575 + if (--count == 0) {
1576 + dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
1577 +- return -ETIMEDOUT;
1578 ++ r = -ETIMEDOUT;
1579 ++ goto failure;
1580 + }
1581 + udelay(100);
1582 + }
1583 +@@ -393,6 +396,13 @@ static int acp_hw_init(void *handle)
1584 + val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
1585 + cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
1586 + return 0;
1587 ++
1588 ++failure:
1589 ++ kfree(i2s_pdata);
1590 ++ kfree(adev->acp.acp_res);
1591 ++ kfree(adev->acp.acp_cell);
1592 ++ kfree(adev->acp.acp_genpd);
1593 ++ return r;
1594 + }
1595 +
1596 + /**
1597 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1598 +index 8b26c970a3cb..90df22081a25 100644
1599 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1600 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1601 +@@ -536,7 +536,6 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
1602 +
1603 + list_for_each_entry(lobj, validated, tv.head) {
1604 + struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
1605 +- bool binding_userptr = false;
1606 + struct mm_struct *usermm;
1607 +
1608 + usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
1609 +@@ -553,7 +552,6 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
1610 +
1611 + amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
1612 + lobj->user_pages);
1613 +- binding_userptr = true;
1614 + }
1615 +
1616 + if (p->evictable == lobj)
1617 +@@ -563,10 +561,8 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
1618 + if (r)
1619 + return r;
1620 +
1621 +- if (binding_userptr) {
1622 +- kvfree(lobj->user_pages);
1623 +- lobj->user_pages = NULL;
1624 +- }
1625 ++ kvfree(lobj->user_pages);
1626 ++ lobj->user_pages = NULL;
1627 + }
1628 + return 0;
1629 + }
1630 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
1631 +index 5376328d3fd0..a7cd4a03bf38 100644
1632 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
1633 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
1634 +@@ -1030,6 +1030,41 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
1635 + return -ENODEV;
1636 + }
1637 +
1638 ++#ifdef CONFIG_DRM_AMDGPU_SI
1639 ++ if (!amdgpu_si_support) {
1640 ++ switch (flags & AMD_ASIC_MASK) {
1641 ++ case CHIP_TAHITI:
1642 ++ case CHIP_PITCAIRN:
1643 ++ case CHIP_VERDE:
1644 ++ case CHIP_OLAND:
1645 ++ case CHIP_HAINAN:
1646 ++ dev_info(&pdev->dev,
1647 ++ "SI support provided by radeon.\n");
1648 ++ dev_info(&pdev->dev,
1649 ++ "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
1650 ++ );
1651 ++ return -ENODEV;
1652 ++ }
1653 ++ }
1654 ++#endif
1655 ++#ifdef CONFIG_DRM_AMDGPU_CIK
1656 ++ if (!amdgpu_cik_support) {
1657 ++ switch (flags & AMD_ASIC_MASK) {
1658 ++ case CHIP_KAVERI:
1659 ++ case CHIP_BONAIRE:
1660 ++ case CHIP_HAWAII:
1661 ++ case CHIP_KABINI:
1662 ++ case CHIP_MULLINS:
1663 ++ dev_info(&pdev->dev,
1664 ++ "CIK support provided by radeon.\n");
1665 ++ dev_info(&pdev->dev,
1666 ++ "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
1667 ++ );
1668 ++ return -ENODEV;
1669 ++ }
1670 ++ }
1671 ++#endif
1672 ++
1673 + /* Get rid of things like offb */
1674 + ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "amdgpudrmfb");
1675 + if (ret)
1676 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
1677 +index 00beba533582..56b4c241a14b 100644
1678 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
1679 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
1680 +@@ -144,41 +144,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
1681 + struct amdgpu_device *adev;
1682 + int r, acpi_status;
1683 +
1684 +-#ifdef CONFIG_DRM_AMDGPU_SI
1685 +- if (!amdgpu_si_support) {
1686 +- switch (flags & AMD_ASIC_MASK) {
1687 +- case CHIP_TAHITI:
1688 +- case CHIP_PITCAIRN:
1689 +- case CHIP_VERDE:
1690 +- case CHIP_OLAND:
1691 +- case CHIP_HAINAN:
1692 +- dev_info(dev->dev,
1693 +- "SI support provided by radeon.\n");
1694 +- dev_info(dev->dev,
1695 +- "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
1696 +- );
1697 +- return -ENODEV;
1698 +- }
1699 +- }
1700 +-#endif
1701 +-#ifdef CONFIG_DRM_AMDGPU_CIK
1702 +- if (!amdgpu_cik_support) {
1703 +- switch (flags & AMD_ASIC_MASK) {
1704 +- case CHIP_KAVERI:
1705 +- case CHIP_BONAIRE:
1706 +- case CHIP_HAWAII:
1707 +- case CHIP_KABINI:
1708 +- case CHIP_MULLINS:
1709 +- dev_info(dev->dev,
1710 +- "CIK support provided by radeon.\n");
1711 +- dev_info(dev->dev,
1712 +- "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
1713 +- );
1714 +- return -ENODEV;
1715 +- }
1716 +- }
1717 +-#endif
1718 +-
1719 + adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
1720 + if (adev == NULL) {
1721 + return -ENOMEM;
1722 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
1723 +index b70b3c45bb29..65044b1b3d4c 100644
1724 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
1725 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
1726 +@@ -429,13 +429,14 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
1727 + * Open up a stream for HW test
1728 + */
1729 + int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
1730 ++ struct amdgpu_bo *bo,
1731 + struct dma_fence **fence)
1732 + {
1733 + const unsigned ib_size_dw = 1024;
1734 + struct amdgpu_job *job;
1735 + struct amdgpu_ib *ib;
1736 + struct dma_fence *f = NULL;
1737 +- uint64_t dummy;
1738 ++ uint64_t addr;
1739 + int i, r;
1740 +
1741 + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
1742 +@@ -444,7 +445,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
1743 +
1744 + ib = &job->ibs[0];
1745 +
1746 +- dummy = ib->gpu_addr + 1024;
1747 ++ addr = amdgpu_bo_gpu_offset(bo);
1748 +
1749 + /* stitch together an VCE create msg */
1750 + ib->length_dw = 0;
1751 +@@ -476,8 +477,8 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
1752 +
1753 + ib->ptr[ib->length_dw++] = 0x00000014; /* len */
1754 + ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
1755 +- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
1756 +- ib->ptr[ib->length_dw++] = dummy;
1757 ++ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1758 ++ ib->ptr[ib->length_dw++] = addr;
1759 + ib->ptr[ib->length_dw++] = 0x00000001;
1760 +
1761 + for (i = ib->length_dw; i < ib_size_dw; ++i)
1762 +@@ -1110,13 +1111,20 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
1763 + int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1764 + {
1765 + struct dma_fence *fence = NULL;
1766 ++ struct amdgpu_bo *bo = NULL;
1767 + long r;
1768 +
1769 + /* skip vce ring1/2 ib test for now, since it's not reliable */
1770 + if (ring != &ring->adev->vce.ring[0])
1771 + return 0;
1772 +
1773 +- r = amdgpu_vce_get_create_msg(ring, 1, NULL);
1774 ++ r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
1775 ++ AMDGPU_GEM_DOMAIN_VRAM,
1776 ++ &bo, NULL, NULL);
1777 ++ if (r)
1778 ++ return r;
1779 ++
1780 ++ r = amdgpu_vce_get_create_msg(ring, 1, bo, NULL);
1781 + if (r)
1782 + goto error;
1783 +
1784 +@@ -1132,5 +1140,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1785 +
1786 + error:
1787 + dma_fence_put(fence);
1788 ++ amdgpu_bo_unreserve(bo);
1789 ++ amdgpu_bo_unref(&bo);
1790 + return r;
1791 + }
1792 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
1793 +index 30ea54dd9117..e802f7d9db0a 100644
1794 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
1795 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
1796 +@@ -59,6 +59,7 @@ int amdgpu_vce_entity_init(struct amdgpu_device *adev);
1797 + int amdgpu_vce_suspend(struct amdgpu_device *adev);
1798 + int amdgpu_vce_resume(struct amdgpu_device *adev);
1799 + int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
1800 ++ struct amdgpu_bo *bo,
1801 + struct dma_fence **fence);
1802 + int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
1803 + bool direct, struct dma_fence **fence);
1804 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
1805 +index 2e12eeb314a7..a3fe8b01d234 100644
1806 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
1807 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
1808 +@@ -517,13 +517,14 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
1809 + }
1810 +
1811 + static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
1812 +- struct dma_fence **fence)
1813 ++ struct amdgpu_bo *bo,
1814 ++ struct dma_fence **fence)
1815 + {
1816 + const unsigned ib_size_dw = 16;
1817 + struct amdgpu_job *job;
1818 + struct amdgpu_ib *ib;
1819 + struct dma_fence *f = NULL;
1820 +- uint64_t dummy;
1821 ++ uint64_t addr;
1822 + int i, r;
1823 +
1824 + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
1825 +@@ -531,14 +532,14 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
1826 + return r;
1827 +
1828 + ib = &job->ibs[0];
1829 +- dummy = ib->gpu_addr + 1024;
1830 ++ addr = amdgpu_bo_gpu_offset(bo);
1831 +
1832 + ib->length_dw = 0;
1833 + ib->ptr[ib->length_dw++] = 0x00000018;
1834 + ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
1835 + ib->ptr[ib->length_dw++] = handle;
1836 +- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
1837 +- ib->ptr[ib->length_dw++] = dummy;
1838 ++ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1839 ++ ib->ptr[ib->length_dw++] = addr;
1840 + ib->ptr[ib->length_dw++] = 0x0000000b;
1841 +
1842 + ib->ptr[ib->length_dw++] = 0x00000014;
1843 +@@ -569,13 +570,14 @@ err:
1844 + }
1845 +
1846 + static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
1847 +- struct dma_fence **fence)
1848 ++ struct amdgpu_bo *bo,
1849 ++ struct dma_fence **fence)
1850 + {
1851 + const unsigned ib_size_dw = 16;
1852 + struct amdgpu_job *job;
1853 + struct amdgpu_ib *ib;
1854 + struct dma_fence *f = NULL;
1855 +- uint64_t dummy;
1856 ++ uint64_t addr;
1857 + int i, r;
1858 +
1859 + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
1860 +@@ -583,14 +585,14 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
1861 + return r;
1862 +
1863 + ib = &job->ibs[0];
1864 +- dummy = ib->gpu_addr + 1024;
1865 ++ addr = amdgpu_bo_gpu_offset(bo);
1866 +
1867 + ib->length_dw = 0;
1868 + ib->ptr[ib->length_dw++] = 0x00000018;
1869 + ib->ptr[ib->length_dw++] = 0x00000001;
1870 + ib->ptr[ib->length_dw++] = handle;
1871 +- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
1872 +- ib->ptr[ib->length_dw++] = dummy;
1873 ++ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1874 ++ ib->ptr[ib->length_dw++] = addr;
1875 + ib->ptr[ib->length_dw++] = 0x0000000b;
1876 +
1877 + ib->ptr[ib->length_dw++] = 0x00000014;
1878 +@@ -623,13 +625,20 @@ err:
1879 + int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1880 + {
1881 + struct dma_fence *fence = NULL;
1882 ++ struct amdgpu_bo *bo = NULL;
1883 + long r;
1884 +
1885 +- r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
1886 ++ r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
1887 ++ AMDGPU_GEM_DOMAIN_VRAM,
1888 ++ &bo, NULL, NULL);
1889 ++ if (r)
1890 ++ return r;
1891 ++
1892 ++ r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
1893 + if (r)
1894 + goto error;
1895 +
1896 +- r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
1897 ++ r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
1898 + if (r)
1899 + goto error;
1900 +
1901 +@@ -641,6 +650,8 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1902 +
1903 + error:
1904 + dma_fence_put(fence);
1905 ++ amdgpu_bo_unreserve(bo);
1906 ++ amdgpu_bo_unref(&bo);
1907 + return r;
1908 + }
1909 +
1910 +diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
1911 +index 15c371fac469..0d131e1d6efc 100644
1912 +--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
1913 ++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
1914 +@@ -1086,7 +1086,7 @@ static void sdma_v5_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1915 + amdgpu_ring_write(ring, addr & 0xfffffffc);
1916 + amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1917 + amdgpu_ring_write(ring, seq); /* reference */
1918 +- amdgpu_ring_write(ring, 0xfffffff); /* mask */
1919 ++ amdgpu_ring_write(ring, 0xffffffff); /* mask */
1920 + amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1921 + SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1922 + }
1923 +diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
1924 +index 670784a78512..217084d56ab8 100644
1925 +--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
1926 ++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
1927 +@@ -206,13 +206,14 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
1928 + * Open up a stream for HW test
1929 + */
1930 + static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
1931 ++ struct amdgpu_bo *bo,
1932 + struct dma_fence **fence)
1933 + {
1934 + const unsigned ib_size_dw = 16;
1935 + struct amdgpu_job *job;
1936 + struct amdgpu_ib *ib;
1937 + struct dma_fence *f = NULL;
1938 +- uint64_t dummy;
1939 ++ uint64_t addr;
1940 + int i, r;
1941 +
1942 + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
1943 +@@ -220,15 +221,15 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
1944 + return r;
1945 +
1946 + ib = &job->ibs[0];
1947 +- dummy = ib->gpu_addr + 1024;
1948 ++ addr = amdgpu_bo_gpu_offset(bo);
1949 +
1950 + ib->length_dw = 0;
1951 + ib->ptr[ib->length_dw++] = 0x00000018;
1952 + ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
1953 + ib->ptr[ib->length_dw++] = handle;
1954 + ib->ptr[ib->length_dw++] = 0x00010000;
1955 +- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
1956 +- ib->ptr[ib->length_dw++] = dummy;
1957 ++ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1958 ++ ib->ptr[ib->length_dw++] = addr;
1959 +
1960 + ib->ptr[ib->length_dw++] = 0x00000014;
1961 + ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
1962 +@@ -268,13 +269,14 @@ err:
1963 + */
1964 + static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
1965 + uint32_t handle,
1966 ++ struct amdgpu_bo *bo,
1967 + struct dma_fence **fence)
1968 + {
1969 + const unsigned ib_size_dw = 16;
1970 + struct amdgpu_job *job;
1971 + struct amdgpu_ib *ib;
1972 + struct dma_fence *f = NULL;
1973 +- uint64_t dummy;
1974 ++ uint64_t addr;
1975 + int i, r;
1976 +
1977 + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
1978 +@@ -282,15 +284,15 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
1979 + return r;
1980 +
1981 + ib = &job->ibs[0];
1982 +- dummy = ib->gpu_addr + 1024;
1983 ++ addr = amdgpu_bo_gpu_offset(bo);
1984 +
1985 + ib->length_dw = 0;
1986 + ib->ptr[ib->length_dw++] = 0x00000018;
1987 + ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
1988 + ib->ptr[ib->length_dw++] = handle;
1989 + ib->ptr[ib->length_dw++] = 0x00010000;
1990 +- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
1991 +- ib->ptr[ib->length_dw++] = dummy;
1992 ++ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1993 ++ ib->ptr[ib->length_dw++] = addr;
1994 +
1995 + ib->ptr[ib->length_dw++] = 0x00000014;
1996 + ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
1997 +@@ -327,13 +329,20 @@ err:
1998 + static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1999 + {
2000 + struct dma_fence *fence = NULL;
2001 ++ struct amdgpu_bo *bo = NULL;
2002 + long r;
2003 +
2004 +- r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL);
2005 ++ r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
2006 ++ AMDGPU_GEM_DOMAIN_VRAM,
2007 ++ &bo, NULL, NULL);
2008 ++ if (r)
2009 ++ return r;
2010 ++
2011 ++ r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL);
2012 + if (r)
2013 + goto error;
2014 +
2015 +- r = uvd_v6_0_enc_get_destroy_msg(ring, 1, &fence);
2016 ++ r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence);
2017 + if (r)
2018 + goto error;
2019 +
2020 +@@ -345,6 +354,8 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
2021 +
2022 + error:
2023 + dma_fence_put(fence);
2024 ++ amdgpu_bo_unreserve(bo);
2025 ++ amdgpu_bo_unref(&bo);
2026 + return r;
2027 + }
2028 +
2029 +diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
2030 +index a6bfe7651d07..c5e2f8c1741b 100644
2031 +--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
2032 ++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
2033 +@@ -214,13 +214,14 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
2034 + * Open up a stream for HW test
2035 + */
2036 + static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
2037 ++ struct amdgpu_bo *bo,
2038 + struct dma_fence **fence)
2039 + {
2040 + const unsigned ib_size_dw = 16;
2041 + struct amdgpu_job *job;
2042 + struct amdgpu_ib *ib;
2043 + struct dma_fence *f = NULL;
2044 +- uint64_t dummy;
2045 ++ uint64_t addr;
2046 + int i, r;
2047 +
2048 + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
2049 +@@ -228,15 +229,15 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
2050 + return r;
2051 +
2052 + ib = &job->ibs[0];
2053 +- dummy = ib->gpu_addr + 1024;
2054 ++ addr = amdgpu_bo_gpu_offset(bo);
2055 +
2056 + ib->length_dw = 0;
2057 + ib->ptr[ib->length_dw++] = 0x00000018;
2058 + ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
2059 + ib->ptr[ib->length_dw++] = handle;
2060 + ib->ptr[ib->length_dw++] = 0x00000000;
2061 +- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
2062 +- ib->ptr[ib->length_dw++] = dummy;
2063 ++ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
2064 ++ ib->ptr[ib->length_dw++] = addr;
2065 +
2066 + ib->ptr[ib->length_dw++] = 0x00000014;
2067 + ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
2068 +@@ -275,13 +276,14 @@ err:
2069 + * Close up a stream for HW test or if userspace failed to do so
2070 + */
2071 + static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
2072 +- struct dma_fence **fence)
2073 ++ struct amdgpu_bo *bo,
2074 ++ struct dma_fence **fence)
2075 + {
2076 + const unsigned ib_size_dw = 16;
2077 + struct amdgpu_job *job;
2078 + struct amdgpu_ib *ib;
2079 + struct dma_fence *f = NULL;
2080 +- uint64_t dummy;
2081 ++ uint64_t addr;
2082 + int i, r;
2083 +
2084 + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
2085 +@@ -289,15 +291,15 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl
2086 + return r;
2087 +
2088 + ib = &job->ibs[0];
2089 +- dummy = ib->gpu_addr + 1024;
2090 ++ addr = amdgpu_bo_gpu_offset(bo);
2091 +
2092 + ib->length_dw = 0;
2093 + ib->ptr[ib->length_dw++] = 0x00000018;
2094 + ib->ptr[ib->length_dw++] = 0x00000001;
2095 + ib->ptr[ib->length_dw++] = handle;
2096 + ib->ptr[ib->length_dw++] = 0x00000000;
2097 +- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
2098 +- ib->ptr[ib->length_dw++] = dummy;
2099 ++ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
2100 ++ ib->ptr[ib->length_dw++] = addr;
2101 +
2102 + ib->ptr[ib->length_dw++] = 0x00000014;
2103 + ib->ptr[ib->length_dw++] = 0x00000002;
2104 +@@ -334,13 +336,20 @@ err:
2105 + static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
2106 + {
2107 + struct dma_fence *fence = NULL;
2108 ++ struct amdgpu_bo *bo = NULL;
2109 + long r;
2110 +
2111 +- r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL);
2112 ++ r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
2113 ++ AMDGPU_GEM_DOMAIN_VRAM,
2114 ++ &bo, NULL, NULL);
2115 ++ if (r)
2116 ++ return r;
2117 ++
2118 ++ r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
2119 + if (r)
2120 + goto error;
2121 +
2122 +- r = uvd_v7_0_enc_get_destroy_msg(ring, 1, &fence);
2123 ++ r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
2124 + if (r)
2125 + goto error;
2126 +
2127 +@@ -352,6 +361,8 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
2128 +
2129 + error:
2130 + dma_fence_put(fence);
2131 ++ amdgpu_bo_unreserve(bo);
2132 ++ amdgpu_bo_unref(&bo);
2133 + return r;
2134 + }
2135 +
2136 +diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
2137 +index 6248c8455314..45f74219e79e 100644
2138 +--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
2139 ++++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
2140 +@@ -668,6 +668,7 @@ struct clock_source *dce100_clock_source_create(
2141 + return &clk_src->base;
2142 + }
2143 +
2144 ++ kfree(clk_src);
2145 + BREAK_TO_DEBUGGER();
2146 + return NULL;
2147 + }
2148 +diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
2149 +index 764329264c3b..0cb83b0e0e1e 100644
2150 +--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
2151 ++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
2152 +@@ -714,6 +714,7 @@ struct clock_source *dce110_clock_source_create(
2153 + return &clk_src->base;
2154 + }
2155 +
2156 ++ kfree(clk_src);
2157 + BREAK_TO_DEBUGGER();
2158 + return NULL;
2159 + }
2160 +diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
2161 +index 7a04be74c9cf..918455caa9a6 100644
2162 +--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
2163 ++++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
2164 +@@ -687,6 +687,7 @@ struct clock_source *dce112_clock_source_create(
2165 + return &clk_src->base;
2166 + }
2167 +
2168 ++ kfree(clk_src);
2169 + BREAK_TO_DEBUGGER();
2170 + return NULL;
2171 + }
2172 +diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
2173 +index ae38c9c7277c..49f3f0fad763 100644
2174 +--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
2175 ++++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
2176 +@@ -500,6 +500,7 @@ static struct clock_source *dce120_clock_source_create(
2177 + return &clk_src->base;
2178 + }
2179 +
2180 ++ kfree(clk_src);
2181 + BREAK_TO_DEBUGGER();
2182 + return NULL;
2183 + }
2184 +diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
2185 +index 860a524ebcfa..952440893fbb 100644
2186 +--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
2187 ++++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
2188 +@@ -701,6 +701,7 @@ struct clock_source *dce80_clock_source_create(
2189 + return &clk_src->base;
2190 + }
2191 +
2192 ++ kfree(clk_src);
2193 + BREAK_TO_DEBUGGER();
2194 + return NULL;
2195 + }
2196 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
2197 +index a12530a3ab9c..3f25e8da5396 100644
2198 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
2199 ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
2200 +@@ -786,6 +786,7 @@ struct clock_source *dcn10_clock_source_create(
2201 + return &clk_src->base;
2202 + }
2203 +
2204 ++ kfree(clk_src);
2205 + BREAK_TO_DEBUGGER();
2206 + return NULL;
2207 + }
2208 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
2209 +index b949e202d6cb..5b7ff6c549f1 100644
2210 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
2211 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
2212 +@@ -955,6 +955,7 @@ struct clock_source *dcn20_clock_source_create(
2213 + return &clk_src->base;
2214 + }
2215 +
2216 ++ kfree(clk_src);
2217 + BREAK_TO_DEBUGGER();
2218 + return NULL;
2219 + }
2220 +diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
2221 +index 2851cac94d86..b72840c06ab7 100644
2222 +--- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
2223 ++++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
2224 +@@ -43,9 +43,8 @@ komeda_wb_encoder_atomic_check(struct drm_encoder *encoder,
2225 + struct komeda_data_flow_cfg dflow;
2226 + int err;
2227 +
2228 +- if (!writeback_job || !writeback_job->fb) {
2229 ++ if (!writeback_job)
2230 + return 0;
2231 +- }
2232 +
2233 + if (!crtc_st->active) {
2234 + DRM_DEBUG_ATOMIC("Cannot write the composition result out on a inactive CRTC.\n");
2235 +@@ -166,8 +165,10 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
2236 + &komeda_wb_encoder_helper_funcs,
2237 + formats, n_formats);
2238 + komeda_put_fourcc_list(formats);
2239 +- if (err)
2240 ++ if (err) {
2241 ++ kfree(kwb_conn);
2242 + return err;
2243 ++ }
2244 +
2245 + drm_connector_helper_add(&wb_conn->base, &komeda_wb_conn_helper_funcs);
2246 +
2247 +diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
2248 +index 2e812525025d..a59227b2cdb5 100644
2249 +--- a/drivers/gpu/drm/arm/malidp_mw.c
2250 ++++ b/drivers/gpu/drm/arm/malidp_mw.c
2251 +@@ -130,7 +130,7 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
2252 + struct drm_framebuffer *fb;
2253 + int i, n_planes;
2254 +
2255 +- if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
2256 ++ if (!conn_state->writeback_job)
2257 + return 0;
2258 +
2259 + fb = conn_state->writeback_job->fb;
2260 +@@ -247,7 +247,7 @@ void malidp_mw_atomic_commit(struct drm_device *drm,
2261 +
2262 + mw_state = to_mw_state(conn_state);
2263 +
2264 +- if (conn_state->writeback_job && conn_state->writeback_job->fb) {
2265 ++ if (conn_state->writeback_job) {
2266 + struct drm_framebuffer *fb = conn_state->writeback_job->fb;
2267 +
2268 + DRM_DEV_DEBUG_DRIVER(drm->dev,
2269 +diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
2270 +index 419381abbdd1..14aeaf736321 100644
2271 +--- a/drivers/gpu/drm/drm_atomic.c
2272 ++++ b/drivers/gpu/drm/drm_atomic.c
2273 +@@ -430,10 +430,15 @@ static int drm_atomic_connector_check(struct drm_connector *connector,
2274 + return -EINVAL;
2275 + }
2276 +
2277 +- if (writeback_job->out_fence && !writeback_job->fb) {
2278 +- DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
2279 +- connector->base.id, connector->name);
2280 +- return -EINVAL;
2281 ++ if (!writeback_job->fb) {
2282 ++ if (writeback_job->out_fence) {
2283 ++ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
2284 ++ connector->base.id, connector->name);
2285 ++ return -EINVAL;
2286 ++ }
2287 ++
2288 ++ drm_writeback_cleanup_job(writeback_job);
2289 ++ state->writeback_job = NULL;
2290 + }
2291 +
2292 + return 0;
2293 +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
2294 +index 82a4ceed3fcf..6b0177112e18 100644
2295 +--- a/drivers/gpu/drm/drm_edid.c
2296 ++++ b/drivers/gpu/drm/drm_edid.c
2297 +@@ -159,6 +159,9 @@ static const struct edid_quirk {
2298 + /* Medion MD 30217 PG */
2299 + { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
2300 +
2301 ++ /* Lenovo G50 */
2302 ++ { "SDC", 18514, EDID_QUIRK_FORCE_6BPC },
2303 ++
2304 + /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
2305 + { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
2306 +
2307 +diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
2308 +index ff138b6ec48b..43d9e3bb3a94 100644
2309 +--- a/drivers/gpu/drm/drm_writeback.c
2310 ++++ b/drivers/gpu/drm/drm_writeback.c
2311 +@@ -324,6 +324,9 @@ void drm_writeback_cleanup_job(struct drm_writeback_job *job)
2312 + if (job->fb)
2313 + drm_framebuffer_put(job->fb);
2314 +
2315 ++ if (job->out_fence)
2316 ++ dma_fence_put(job->out_fence);
2317 ++
2318 + kfree(job);
2319 + }
2320 + EXPORT_SYMBOL(drm_writeback_cleanup_job);
2321 +@@ -366,25 +369,29 @@ drm_writeback_signal_completion(struct drm_writeback_connector *wb_connector,
2322 + {
2323 + unsigned long flags;
2324 + struct drm_writeback_job *job;
2325 ++ struct dma_fence *out_fence;
2326 +
2327 + spin_lock_irqsave(&wb_connector->job_lock, flags);
2328 + job = list_first_entry_or_null(&wb_connector->job_queue,
2329 + struct drm_writeback_job,
2330 + list_entry);
2331 +- if (job) {
2332 ++ if (job)
2333 + list_del(&job->list_entry);
2334 +- if (job->out_fence) {
2335 +- if (status)
2336 +- dma_fence_set_error(job->out_fence, status);
2337 +- dma_fence_signal(job->out_fence);
2338 +- dma_fence_put(job->out_fence);
2339 +- }
2340 +- }
2341 ++
2342 + spin_unlock_irqrestore(&wb_connector->job_lock, flags);
2343 +
2344 + if (WARN_ON(!job))
2345 + return;
2346 +
2347 ++ out_fence = job->out_fence;
2348 ++ if (out_fence) {
2349 ++ if (status)
2350 ++ dma_fence_set_error(out_fence, status);
2351 ++ dma_fence_signal(out_fence);
2352 ++ dma_fence_put(out_fence);
2353 ++ job->out_fence = NULL;
2354 ++ }
2355 ++
2356 + INIT_WORK(&job->cleanup_work, cleanup_work);
2357 + queue_work(system_long_wq, &job->cleanup_work);
2358 + }
2359 +diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
2360 +index 3ef4e9f573cf..b1025c248bb9 100644
2361 +--- a/drivers/gpu/drm/i915/display/intel_bios.c
2362 ++++ b/drivers/gpu/drm/i915/display/intel_bios.c
2363 +@@ -1269,7 +1269,7 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
2364 + DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
2365 + "disabling port %c DVI/HDMI support\n",
2366 + port_name(port), info->alternate_ddc_pin,
2367 +- port_name(p), port_name(port));
2368 ++ port_name(p), port_name(p));
2369 +
2370 + /*
2371 + * If we have multiple ports supposedly sharing the
2372 +@@ -1277,9 +1277,14 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
2373 + * port. Otherwise they share the same ddc bin and
2374 + * system couldn't communicate with them separately.
2375 + *
2376 +- * Give child device order the priority, first come first
2377 +- * served.
2378 ++ * Give inverse child device order the priority,
2379 ++ * last one wins. Yes, there are real machines
2380 ++ * (eg. Asrock B250M-HDV) where VBT has both
2381 ++ * port A and port E with the same AUX ch and
2382 ++ * we must pick port E :(
2383 + */
2384 ++ info = &dev_priv->vbt.ddi_port_info[p];
2385 ++
2386 + info->supports_dvi = false;
2387 + info->supports_hdmi = false;
2388 + info->alternate_ddc_pin = 0;
2389 +@@ -1315,7 +1320,7 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
2390 + DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
2391 + "disabling port %c DP support\n",
2392 + port_name(port), info->alternate_aux_channel,
2393 +- port_name(p), port_name(port));
2394 ++ port_name(p), port_name(p));
2395 +
2396 + /*
2397 + * If we have multiple ports supposedlt sharing the
2398 +@@ -1323,9 +1328,14 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
2399 + * port. Otherwise they share the same aux channel
2400 + * and system couldn't communicate with them separately.
2401 + *
2402 +- * Give child device order the priority, first come first
2403 +- * served.
2404 ++ * Give inverse child device order the priority,
2405 ++ * last one wins. Yes, there are real machines
2406 ++ * (eg. Asrock B250M-HDV) where VBT has both
2407 ++ * port A and port E with the same AUX ch and
2408 ++ * we must pick port E :(
2409 + */
2410 ++ info = &dev_priv->vbt.ddi_port_info[p];
2411 ++
2412 + info->supports_dp = false;
2413 + info->alternate_aux_channel = 0;
2414 + }
2415 +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
2416 +index c201289039fe..5bd27941811f 100644
2417 +--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
2418 ++++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
2419 +@@ -365,6 +365,7 @@ err:
2420 + return VM_FAULT_OOM;
2421 + case -ENOSPC:
2422 + case -EFAULT:
2423 ++ case -ENODEV: /* bad object, how did you get here! */
2424 + return VM_FAULT_SIGBUS;
2425 + default:
2426 + WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret);
2427 +@@ -475,10 +476,16 @@ i915_gem_mmap_gtt(struct drm_file *file,
2428 + if (!obj)
2429 + return -ENOENT;
2430 +
2431 ++ if (i915_gem_object_never_bind_ggtt(obj)) {
2432 ++ ret = -ENODEV;
2433 ++ goto out;
2434 ++ }
2435 ++
2436 + ret = create_mmap_offset(obj);
2437 + if (ret == 0)
2438 + *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2439 +
2440 ++out:
2441 + i915_gem_object_put(obj);
2442 + return ret;
2443 + }
2444 +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
2445 +index dfebd5706f16..e44d3f49c1d6 100644
2446 +--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
2447 ++++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
2448 +@@ -152,6 +152,12 @@ i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
2449 + return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
2450 + }
2451 +
2452 ++static inline bool
2453 ++i915_gem_object_never_bind_ggtt(const struct drm_i915_gem_object *obj)
2454 ++{
2455 ++ return obj->ops->flags & I915_GEM_OBJECT_NO_GGTT;
2456 ++}
2457 ++
2458 + static inline bool
2459 + i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
2460 + {
2461 +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
2462 +index 18bf4f8d6d80..d5453e85df5e 100644
2463 +--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
2464 ++++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
2465 +@@ -31,7 +31,8 @@ struct drm_i915_gem_object_ops {
2466 + #define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
2467 + #define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
2468 + #define I915_GEM_OBJECT_IS_PROXY BIT(2)
2469 +-#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(3)
2470 ++#define I915_GEM_OBJECT_NO_GGTT BIT(3)
2471 ++#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(4)
2472 +
2473 + /* Interface between the GEM object and its backing storage.
2474 + * get_pages() is called once prior to the use of the associated set
2475 +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
2476 +index 528b61678334..cd30e83c3205 100644
2477 +--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
2478 ++++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
2479 +@@ -694,6 +694,7 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
2480 + static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
2481 + .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
2482 + I915_GEM_OBJECT_IS_SHRINKABLE |
2483 ++ I915_GEM_OBJECT_NO_GGTT |
2484 + I915_GEM_OBJECT_ASYNC_CANCEL,
2485 + .get_pages = i915_gem_userptr_get_pages,
2486 + .put_pages = i915_gem_userptr_put_pages,
2487 +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
2488 +index 8a659d3d7435..7f6af4ca0968 100644
2489 +--- a/drivers/gpu/drm/i915/i915_gem.c
2490 ++++ b/drivers/gpu/drm/i915/i915_gem.c
2491 +@@ -1030,6 +1030,9 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
2492 +
2493 + lockdep_assert_held(&obj->base.dev->struct_mutex);
2494 +
2495 ++ if (i915_gem_object_never_bind_ggtt(obj))
2496 ++ return ERR_PTR(-ENODEV);
2497 ++
2498 + if (flags & PIN_MAPPABLE &&
2499 + (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
2500 + /* If the required space is larger than the available
2501 +diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
2502 +index 9bb9260d9181..b05c7c513436 100644
2503 +--- a/drivers/gpu/drm/panfrost/panfrost_job.c
2504 ++++ b/drivers/gpu/drm/panfrost/panfrost_job.c
2505 +@@ -384,13 +384,19 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
2506 + job_read(pfdev, JS_TAIL_LO(js)),
2507 + sched_job);
2508 +
2509 +- mutex_lock(&pfdev->reset_lock);
2510 ++ if (!mutex_trylock(&pfdev->reset_lock))
2511 ++ return;
2512 +
2513 +- for (i = 0; i < NUM_JOB_SLOTS; i++)
2514 +- drm_sched_stop(&pfdev->js->queue[i].sched, sched_job);
2515 ++ for (i = 0; i < NUM_JOB_SLOTS; i++) {
2516 ++ struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched;
2517 ++
2518 ++ drm_sched_stop(sched, sched_job);
2519 ++ if (js != i)
2520 ++ /* Ensure any timeouts on other slots have finished */
2521 ++ cancel_delayed_work_sync(&sched->work_tdr);
2522 ++ }
2523 +
2524 +- if (sched_job)
2525 +- drm_sched_increase_karma(sched_job);
2526 ++ drm_sched_increase_karma(sched_job);
2527 +
2528 + /* panfrost_core_dump(pfdev); */
2529 +
2530 +diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
2531 +index 5cc0fbb04ab1..7033f3a38c87 100644
2532 +--- a/drivers/gpu/drm/radeon/radeon_drv.c
2533 ++++ b/drivers/gpu/drm/radeon/radeon_drv.c
2534 +@@ -380,19 +380,11 @@ radeon_pci_remove(struct pci_dev *pdev)
2535 + static void
2536 + radeon_pci_shutdown(struct pci_dev *pdev)
2537 + {
2538 +- struct drm_device *ddev = pci_get_drvdata(pdev);
2539 +-
2540 + /* if we are running in a VM, make sure the device
2541 + * torn down properly on reboot/shutdown
2542 + */
2543 + if (radeon_device_is_virtual())
2544 + radeon_pci_remove(pdev);
2545 +-
2546 +- /* Some adapters need to be suspended before a
2547 +- * shutdown occurs in order to prevent an error
2548 +- * during kexec.
2549 +- */
2550 +- radeon_suspend_kms(ddev, true, true, false);
2551 + }
2552 +
2553 + static int radeon_pmops_suspend(struct device *dev)
2554 +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
2555 +index ae07290bba6a..04efa78d70b6 100644
2556 +--- a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
2557 ++++ b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
2558 +@@ -147,7 +147,7 @@ static int rcar_du_wb_enc_atomic_check(struct drm_encoder *encoder,
2559 + struct drm_device *dev = encoder->dev;
2560 + struct drm_framebuffer *fb;
2561 +
2562 +- if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
2563 ++ if (!conn_state->writeback_job)
2564 + return 0;
2565 +
2566 + fb = conn_state->writeback_job->fb;
2567 +@@ -221,7 +221,7 @@ void rcar_du_writeback_setup(struct rcar_du_crtc *rcrtc,
2568 + unsigned int i;
2569 +
2570 + state = rcrtc->writeback.base.state;
2571 +- if (!state || !state->writeback_job || !state->writeback_job->fb)
2572 ++ if (!state || !state->writeback_job)
2573 + return;
2574 +
2575 + fb = state->writeback_job->fb;
2576 +diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
2577 +index 6dacff49c1cc..a77cd0344d22 100644
2578 +--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
2579 ++++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
2580 +@@ -278,15 +278,13 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
2581 + else
2582 + ret = vmf_insert_pfn(&cvma, address, pfn);
2583 +
2584 +- /*
2585 +- * Somebody beat us to this PTE or prefaulting to
2586 +- * an already populated PTE, or prefaulting error.
2587 +- */
2588 +-
2589 +- if (unlikely((ret == VM_FAULT_NOPAGE && i > 0)))
2590 +- break;
2591 +- else if (unlikely(ret & VM_FAULT_ERROR))
2592 +- goto out_io_unlock;
2593 ++ /* Never error on prefaulted PTEs */
2594 ++ if (unlikely((ret & VM_FAULT_ERROR))) {
2595 ++ if (i == 0)
2596 ++ goto out_io_unlock;
2597 ++ else
2598 ++ break;
2599 ++ }
2600 +
2601 + address += PAGE_SIZE;
2602 + if (unlikely(++page_offset >= page_last))
2603 +diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
2604 +index 96f91c1b4b6e..e92fa1275034 100644
2605 +--- a/drivers/gpu/drm/vc4/vc4_txp.c
2606 ++++ b/drivers/gpu/drm/vc4/vc4_txp.c
2607 +@@ -229,7 +229,7 @@ static int vc4_txp_connector_atomic_check(struct drm_connector *conn,
2608 + int i;
2609 +
2610 + conn_state = drm_atomic_get_new_connector_state(state, conn);
2611 +- if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
2612 ++ if (!conn_state->writeback_job)
2613 + return 0;
2614 +
2615 + crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
2616 +@@ -269,8 +269,7 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
2617 + u32 ctrl;
2618 + int i;
2619 +
2620 +- if (WARN_ON(!conn_state->writeback_job ||
2621 +- !conn_state->writeback_job->fb))
2622 ++ if (WARN_ON(!conn_state->writeback_job))
2623 + return;
2624 +
2625 + mode = &conn_state->crtc->state->adjusted_mode;
2626 +diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
2627 +index aa772ee0706f..35c284af574d 100644
2628 +--- a/drivers/infiniband/hw/cxgb4/mem.c
2629 ++++ b/drivers/infiniband/hw/cxgb4/mem.c
2630 +@@ -275,13 +275,17 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
2631 + struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp)
2632 + {
2633 + int err;
2634 +- struct fw_ri_tpte tpt;
2635 ++ struct fw_ri_tpte *tpt;
2636 + u32 stag_idx;
2637 + static atomic_t key;
2638 +
2639 + if (c4iw_fatal_error(rdev))
2640 + return -EIO;
2641 +
2642 ++ tpt = kmalloc(sizeof(*tpt), GFP_KERNEL);
2643 ++ if (!tpt)
2644 ++ return -ENOMEM;
2645 ++
2646 + stag_state = stag_state > 0;
2647 + stag_idx = (*stag) >> 8;
2648 +
2649 +@@ -291,6 +295,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
2650 + mutex_lock(&rdev->stats.lock);
2651 + rdev->stats.stag.fail++;
2652 + mutex_unlock(&rdev->stats.lock);
2653 ++ kfree(tpt);
2654 + return -ENOMEM;
2655 + }
2656 + mutex_lock(&rdev->stats.lock);
2657 +@@ -305,28 +310,28 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
2658 +
2659 + /* write TPT entry */
2660 + if (reset_tpt_entry)
2661 +- memset(&tpt, 0, sizeof(tpt));
2662 ++ memset(tpt, 0, sizeof(*tpt));
2663 + else {
2664 +- tpt.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
2665 ++ tpt->valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
2666 + FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) |
2667 + FW_RI_TPTE_STAGSTATE_V(stag_state) |
2668 + FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid));
2669 +- tpt.locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
2670 ++ tpt->locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
2671 + (bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) |
2672 + FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO :
2673 + FW_RI_VA_BASED_TO))|
2674 + FW_RI_TPTE_PS_V(page_size));
2675 +- tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
2676 ++ tpt->nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
2677 + FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3));
2678 +- tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
2679 +- tpt.va_hi = cpu_to_be32((u32)(to >> 32));
2680 +- tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
2681 +- tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
2682 +- tpt.len_hi = cpu_to_be32((u32)(len >> 32));
2683 ++ tpt->len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
2684 ++ tpt->va_hi = cpu_to_be32((u32)(to >> 32));
2685 ++ tpt->va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
2686 ++ tpt->dca_mwbcnt_pstag = cpu_to_be32(0);
2687 ++ tpt->len_hi = cpu_to_be32((u32)(len >> 32));
2688 + }
2689 + err = write_adapter_mem(rdev, stag_idx +
2690 + (rdev->lldi.vr->stag.start >> 5),
2691 +- sizeof(tpt), &tpt, skb, wr_waitp);
2692 ++ sizeof(*tpt), tpt, skb, wr_waitp);
2693 +
2694 + if (reset_tpt_entry) {
2695 + c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
2696 +@@ -334,6 +339,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
2697 + rdev->stats.stag.cur -= 32;
2698 + mutex_unlock(&rdev->stats.lock);
2699 + }
2700 ++ kfree(tpt);
2701 + return err;
2702 + }
2703 +
2704 +diff --git a/drivers/input/misc/da9063_onkey.c b/drivers/input/misc/da9063_onkey.c
2705 +index fd355cf59397..3daf11a7df25 100644
2706 +--- a/drivers/input/misc/da9063_onkey.c
2707 ++++ b/drivers/input/misc/da9063_onkey.c
2708 +@@ -232,10 +232,7 @@ static int da9063_onkey_probe(struct platform_device *pdev)
2709 + onkey->input->phys = onkey->phys;
2710 + onkey->input->dev.parent = &pdev->dev;
2711 +
2712 +- if (onkey->key_power)
2713 +- input_set_capability(onkey->input, EV_KEY, KEY_POWER);
2714 +-
2715 +- input_set_capability(onkey->input, EV_KEY, KEY_SLEEP);
2716 ++ input_set_capability(onkey->input, EV_KEY, KEY_POWER);
2717 +
2718 + INIT_DELAYED_WORK(&onkey->work, da9063_poll_on);
2719 +
2720 +diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
2721 +index 04fe43440a3c..2d8434b7b623 100644
2722 +--- a/drivers/input/mouse/elantech.c
2723 ++++ b/drivers/input/mouse/elantech.c
2724 +@@ -1827,31 +1827,6 @@ static int elantech_create_smbus(struct psmouse *psmouse,
2725 + leave_breadcrumbs);
2726 + }
2727 +
2728 +-static bool elantech_use_host_notify(struct psmouse *psmouse,
2729 +- struct elantech_device_info *info)
2730 +-{
2731 +- if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
2732 +- return true;
2733 +-
2734 +- switch (info->bus) {
2735 +- case ETP_BUS_PS2_ONLY:
2736 +- /* expected case */
2737 +- break;
2738 +- case ETP_BUS_SMB_HST_NTFY_ONLY:
2739 +- case ETP_BUS_PS2_SMB_HST_NTFY:
2740 +- /* SMbus implementation is stable since 2018 */
2741 +- if (dmi_get_bios_year() >= 2018)
2742 +- return true;
2743 +- /* fall through */
2744 +- default:
2745 +- psmouse_dbg(psmouse,
2746 +- "Ignoring SMBus bus provider %d\n", info->bus);
2747 +- break;
2748 +- }
2749 +-
2750 +- return false;
2751 +-}
2752 +-
2753 + /**
2754 + * elantech_setup_smbus - called once the PS/2 devices are enumerated
2755 + * and decides to instantiate a SMBus InterTouch device.
2756 +@@ -1871,7 +1846,7 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
2757 + * i2c_blacklist_pnp_ids.
2758 + * Old ICs are up to the user to decide.
2759 + */
2760 +- if (!elantech_use_host_notify(psmouse, info) ||
2761 ++ if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version) ||
2762 + psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids))
2763 + return -ENXIO;
2764 + }
2765 +@@ -1891,6 +1866,34 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
2766 + return 0;
2767 + }
2768 +
2769 ++static bool elantech_use_host_notify(struct psmouse *psmouse,
2770 ++ struct elantech_device_info *info)
2771 ++{
2772 ++ if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
2773 ++ return true;
2774 ++
2775 ++ switch (info->bus) {
2776 ++ case ETP_BUS_PS2_ONLY:
2777 ++ /* expected case */
2778 ++ break;
2779 ++ case ETP_BUS_SMB_ALERT_ONLY:
2780 ++ /* fall-through */
2781 ++ case ETP_BUS_PS2_SMB_ALERT:
2782 ++ psmouse_dbg(psmouse, "Ignoring SMBus provider through alert protocol.\n");
2783 ++ break;
2784 ++ case ETP_BUS_SMB_HST_NTFY_ONLY:
2785 ++ /* fall-through */
2786 ++ case ETP_BUS_PS2_SMB_HST_NTFY:
2787 ++ return true;
2788 ++ default:
2789 ++ psmouse_dbg(psmouse,
2790 ++ "Ignoring SMBus bus provider %d.\n",
2791 ++ info->bus);
2792 ++ }
2793 ++
2794 ++ return false;
2795 ++}
2796 ++
2797 + int elantech_init_smbus(struct psmouse *psmouse)
2798 + {
2799 + struct elantech_device_info info;
2800 +diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
2801 +index 772493b1f665..190b9974526b 100644
2802 +--- a/drivers/input/rmi4/rmi_driver.c
2803 ++++ b/drivers/input/rmi4/rmi_driver.c
2804 +@@ -146,7 +146,7 @@ static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
2805 + }
2806 +
2807 + mutex_lock(&data->irq_mutex);
2808 +- bitmap_and(data->irq_status, data->irq_status, data->current_irq_mask,
2809 ++ bitmap_and(data->irq_status, data->irq_status, data->fn_irq_bits,
2810 + data->irq_count);
2811 + /*
2812 + * At this point, irq_status has all bits that are set in the
2813 +@@ -385,6 +385,8 @@ static int rmi_driver_set_irq_bits(struct rmi_device *rmi_dev,
2814 + bitmap_copy(data->current_irq_mask, data->new_irq_mask,
2815 + data->num_of_irq_regs);
2816 +
2817 ++ bitmap_or(data->fn_irq_bits, data->fn_irq_bits, mask, data->irq_count);
2818 ++
2819 + error_unlock:
2820 + mutex_unlock(&data->irq_mutex);
2821 + return error;
2822 +@@ -398,6 +400,8 @@ static int rmi_driver_clear_irq_bits(struct rmi_device *rmi_dev,
2823 + struct device *dev = &rmi_dev->dev;
2824 +
2825 + mutex_lock(&data->irq_mutex);
2826 ++ bitmap_andnot(data->fn_irq_bits,
2827 ++ data->fn_irq_bits, mask, data->irq_count);
2828 + bitmap_andnot(data->new_irq_mask,
2829 + data->current_irq_mask, mask, data->irq_count);
2830 +
2831 +diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
2832 +index 34923399ece4..1139714e72e2 100644
2833 +--- a/drivers/input/touchscreen/st1232.c
2834 ++++ b/drivers/input/touchscreen/st1232.c
2835 +@@ -81,8 +81,10 @@ static int st1232_ts_read_data(struct st1232_ts_data *ts)
2836 + for (i = 0, y = 0; i < ts->chip_info->max_fingers; i++, y += 3) {
2837 + finger[i].is_valid = buf[i + y] >> 7;
2838 + if (finger[i].is_valid) {
2839 +- finger[i].x = ((buf[i + y] & 0x0070) << 4) | buf[i + 1];
2840 +- finger[i].y = ((buf[i + y] & 0x0007) << 8) | buf[i + 2];
2841 ++ finger[i].x = ((buf[i + y] & 0x0070) << 4) |
2842 ++ buf[i + y + 1];
2843 ++ finger[i].y = ((buf[i + y] & 0x0007) << 8) |
2844 ++ buf[i + y + 2];
2845 +
2846 + /* st1232 includes a z-axis / touch strength */
2847 + if (ts->chip_info->have_z)
2848 +diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
2849 +index c72c036aea76..daefc52b0ec5 100644
2850 +--- a/drivers/irqchip/irq-sifive-plic.c
2851 ++++ b/drivers/irqchip/irq-sifive-plic.c
2852 +@@ -97,7 +97,7 @@ static inline void plic_irq_toggle(const struct cpumask *mask,
2853 + }
2854 + }
2855 +
2856 +-static void plic_irq_enable(struct irq_data *d)
2857 ++static void plic_irq_unmask(struct irq_data *d)
2858 + {
2859 + unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
2860 + cpu_online_mask);
2861 +@@ -106,7 +106,7 @@ static void plic_irq_enable(struct irq_data *d)
2862 + plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
2863 + }
2864 +
2865 +-static void plic_irq_disable(struct irq_data *d)
2866 ++static void plic_irq_mask(struct irq_data *d)
2867 + {
2868 + plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
2869 + }
2870 +@@ -125,10 +125,8 @@ static int plic_set_affinity(struct irq_data *d,
2871 + if (cpu >= nr_cpu_ids)
2872 + return -EINVAL;
2873 +
2874 +- if (!irqd_irq_disabled(d)) {
2875 +- plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
2876 +- plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
2877 +- }
2878 ++ plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
2879 ++ plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
2880 +
2881 + irq_data_update_effective_affinity(d, cpumask_of(cpu));
2882 +
2883 +@@ -136,14 +134,18 @@ static int plic_set_affinity(struct irq_data *d,
2884 + }
2885 + #endif
2886 +
2887 ++static void plic_irq_eoi(struct irq_data *d)
2888 ++{
2889 ++ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
2890 ++
2891 ++ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
2892 ++}
2893 ++
2894 + static struct irq_chip plic_chip = {
2895 + .name = "SiFive PLIC",
2896 +- /*
2897 +- * There is no need to mask/unmask PLIC interrupts. They are "masked"
2898 +- * by reading claim and "unmasked" when writing it back.
2899 +- */
2900 +- .irq_enable = plic_irq_enable,
2901 +- .irq_disable = plic_irq_disable,
2902 ++ .irq_mask = plic_irq_mask,
2903 ++ .irq_unmask = plic_irq_unmask,
2904 ++ .irq_eoi = plic_irq_eoi,
2905 + #ifdef CONFIG_SMP
2906 + .irq_set_affinity = plic_set_affinity,
2907 + #endif
2908 +@@ -152,7 +154,7 @@ static struct irq_chip plic_chip = {
2909 + static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
2910 + irq_hw_number_t hwirq)
2911 + {
2912 +- irq_set_chip_and_handler(irq, &plic_chip, handle_simple_irq);
2913 ++ irq_set_chip_and_handler(irq, &plic_chip, handle_fasteoi_irq);
2914 + irq_set_chip_data(irq, NULL);
2915 + irq_set_noprobe(irq);
2916 + return 0;
2917 +@@ -188,7 +190,6 @@ static void plic_handle_irq(struct pt_regs *regs)
2918 + hwirq);
2919 + else
2920 + generic_handle_irq(irq);
2921 +- writel(hwirq, claim);
2922 + }
2923 + csr_set(sie, SIE_SEIE);
2924 + }
2925 +diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
2926 +index d249cf8ac277..8346e6d1816c 100644
2927 +--- a/drivers/md/dm-cache-target.c
2928 ++++ b/drivers/md/dm-cache-target.c
2929 +@@ -542,7 +542,7 @@ static void wake_migration_worker(struct cache *cache)
2930 +
2931 + static struct dm_bio_prison_cell_v2 *alloc_prison_cell(struct cache *cache)
2932 + {
2933 +- return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOWAIT);
2934 ++ return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOIO);
2935 + }
2936 +
2937 + static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell_v2 *cell)
2938 +@@ -554,9 +554,7 @@ static struct dm_cache_migration *alloc_migration(struct cache *cache)
2939 + {
2940 + struct dm_cache_migration *mg;
2941 +
2942 +- mg = mempool_alloc(&cache->migration_pool, GFP_NOWAIT);
2943 +- if (!mg)
2944 +- return NULL;
2945 ++ mg = mempool_alloc(&cache->migration_pool, GFP_NOIO);
2946 +
2947 + memset(mg, 0, sizeof(*mg));
2948 +
2949 +@@ -664,10 +662,6 @@ static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bi
2950 + struct dm_bio_prison_cell_v2 *cell_prealloc, *cell;
2951 +
2952 + cell_prealloc = alloc_prison_cell(cache); /* FIXME: allow wait if calling from worker */
2953 +- if (!cell_prealloc) {
2954 +- defer_bio(cache, bio);
2955 +- return false;
2956 +- }
2957 +
2958 + build_key(oblock, end, &key);
2959 + r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell);
2960 +@@ -1493,11 +1487,6 @@ static int mg_lock_writes(struct dm_cache_migration *mg)
2961 + struct dm_bio_prison_cell_v2 *prealloc;
2962 +
2963 + prealloc = alloc_prison_cell(cache);
2964 +- if (!prealloc) {
2965 +- DMERR_LIMIT("%s: alloc_prison_cell failed", cache_device_name(cache));
2966 +- mg_complete(mg, false);
2967 +- return -ENOMEM;
2968 +- }
2969 +
2970 + /*
2971 + * Prevent writes to the block, but allow reads to continue.
2972 +@@ -1535,11 +1524,6 @@ static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio
2973 + }
2974 +
2975 + mg = alloc_migration(cache);
2976 +- if (!mg) {
2977 +- policy_complete_background_work(cache->policy, op, false);
2978 +- background_work_end(cache);
2979 +- return -ENOMEM;
2980 +- }
2981 +
2982 + mg->op = op;
2983 + mg->overwrite_bio = bio;
2984 +@@ -1628,10 +1612,6 @@ static int invalidate_lock(struct dm_cache_migration *mg)
2985 + struct dm_bio_prison_cell_v2 *prealloc;
2986 +
2987 + prealloc = alloc_prison_cell(cache);
2988 +- if (!prealloc) {
2989 +- invalidate_complete(mg, false);
2990 +- return -ENOMEM;
2991 +- }
2992 +
2993 + build_key(mg->invalidate_oblock, oblock_succ(mg->invalidate_oblock), &key);
2994 + r = dm_cell_lock_v2(cache->prison, &key,
2995 +@@ -1669,10 +1649,6 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
2996 + return -EPERM;
2997 +
2998 + mg = alloc_migration(cache);
2999 +- if (!mg) {
3000 +- background_work_end(cache);
3001 +- return -ENOMEM;
3002 +- }
3003 +
3004 + mg->overwrite_bio = bio;
3005 + mg->invalidate_cblock = cblock;
3006 +diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
3007 +index 297bbc0f41f0..c3445d2cedb9 100644
3008 +--- a/drivers/md/raid0.c
3009 ++++ b/drivers/md/raid0.c
3010 +@@ -151,7 +151,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
3011 + } else {
3012 + pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
3013 + mdname(mddev));
3014 +- pr_err("md/raid0: please set raid.default_layout to 1 or 2\n");
3015 ++ pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
3016 + err = -ENOTSUPP;
3017 + goto abort;
3018 + }
3019 +diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
3020 +index 32747425297d..64fff6abe60e 100644
3021 +--- a/drivers/memstick/host/jmb38x_ms.c
3022 ++++ b/drivers/memstick/host/jmb38x_ms.c
3023 +@@ -941,7 +941,7 @@ static int jmb38x_ms_probe(struct pci_dev *pdev,
3024 + if (!cnt) {
3025 + rc = -ENODEV;
3026 + pci_dev_busy = 1;
3027 +- goto err_out;
3028 ++ goto err_out_int;
3029 + }
3030 +
3031 + jm = kzalloc(sizeof(struct jmb38x_ms)
3032 +diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c
3033 +index f7bdae5354c3..5047f7343ffc 100644
3034 +--- a/drivers/mmc/host/cqhci.c
3035 ++++ b/drivers/mmc/host/cqhci.c
3036 +@@ -611,7 +611,8 @@ static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
3037 + cq_host->slot[tag].flags = 0;
3038 +
3039 + cq_host->qcnt += 1;
3040 +-
3041 ++ /* Make sure descriptors are ready before ringing the doorbell */
3042 ++ wmb();
3043 + cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
3044 + if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
3045 + pr_debug("%s: cqhci: doorbell not set for tag %d\n",
3046 +diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
3047 +index b334e81c5cab..9a0bc0c5fa4b 100644
3048 +--- a/drivers/mmc/host/mxs-mmc.c
3049 ++++ b/drivers/mmc/host/mxs-mmc.c
3050 +@@ -17,6 +17,7 @@
3051 + #include <linux/interrupt.h>
3052 + #include <linux/dma-mapping.h>
3053 + #include <linux/dmaengine.h>
3054 ++#include <linux/dma/mxs-dma.h>
3055 + #include <linux/highmem.h>
3056 + #include <linux/clk.h>
3057 + #include <linux/err.h>
3058 +@@ -266,7 +267,7 @@ static void mxs_mmc_bc(struct mxs_mmc_host *host)
3059 + ssp->ssp_pio_words[2] = cmd1;
3060 + ssp->dma_dir = DMA_NONE;
3061 + ssp->slave_dirn = DMA_TRANS_NONE;
3062 +- desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
3063 ++ desc = mxs_mmc_prep_dma(host, MXS_DMA_CTRL_WAIT4END);
3064 + if (!desc)
3065 + goto out;
3066 +
3067 +@@ -311,7 +312,7 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
3068 + ssp->ssp_pio_words[2] = cmd1;
3069 + ssp->dma_dir = DMA_NONE;
3070 + ssp->slave_dirn = DMA_TRANS_NONE;
3071 +- desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
3072 ++ desc = mxs_mmc_prep_dma(host, MXS_DMA_CTRL_WAIT4END);
3073 + if (!desc)
3074 + goto out;
3075 +
3076 +@@ -441,7 +442,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
3077 + host->data = data;
3078 + ssp->dma_dir = dma_data_dir;
3079 + ssp->slave_dirn = slave_dirn;
3080 +- desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
3081 ++ desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | MXS_DMA_CTRL_WAIT4END);
3082 + if (!desc)
3083 + goto out;
3084 +
3085 +diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
3086 +index 41c2677c587f..083e7e053c95 100644
3087 +--- a/drivers/mmc/host/sdhci-omap.c
3088 ++++ b/drivers/mmc/host/sdhci-omap.c
3089 +@@ -372,7 +372,7 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode)
3090 + * on temperature
3091 + */
3092 + if (temperature < -20000)
3093 +- phase_delay = min(max_window + 4 * max_len - 24,
3094 ++ phase_delay = min(max_window + 4 * (max_len - 1) - 24,
3095 + max_window +
3096 + DIV_ROUND_UP(13 * max_len, 16) * 4);
3097 + else if (temperature < 20000)
3098 +diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
3099 +index 16f15c93a102..bbeeb8618c80 100644
3100 +--- a/drivers/net/dsa/qca8k.c
3101 ++++ b/drivers/net/dsa/qca8k.c
3102 +@@ -705,7 +705,7 @@ qca8k_setup(struct dsa_switch *ds)
3103 + BIT(0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
3104 +
3105 + /* Setup connection between CPU port & user ports */
3106 +- for (i = 0; i < DSA_MAX_PORTS; i++) {
3107 ++ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
3108 + /* CPU port gets connected to all user ports of the switch */
3109 + if (dsa_is_cpu_port(ds, i)) {
3110 + qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT),
3111 +@@ -1074,7 +1074,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
3112 + if (id != QCA8K_ID_QCA8337)
3113 + return -ENODEV;
3114 +
3115 +- priv->ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS);
3116 ++ priv->ds = dsa_switch_alloc(&mdiodev->dev, QCA8K_NUM_PORTS);
3117 + if (!priv->ds)
3118 + return -ENOMEM;
3119 +
3120 +diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
3121 +index a268085ffad2..f5cc8b0a7c74 100644
3122 +--- a/drivers/net/dsa/rtl8366rb.c
3123 ++++ b/drivers/net/dsa/rtl8366rb.c
3124 +@@ -507,7 +507,8 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
3125 + irq = of_irq_get(intc, 0);
3126 + if (irq <= 0) {
3127 + dev_err(smi->dev, "failed to get parent IRQ\n");
3128 +- return irq ? irq : -EINVAL;
3129 ++ ret = irq ? irq : -EINVAL;
3130 ++ goto out_put_node;
3131 + }
3132 +
3133 + /* This clears the IRQ status register */
3134 +@@ -515,7 +516,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
3135 + &val);
3136 + if (ret) {
3137 + dev_err(smi->dev, "can't read interrupt status\n");
3138 +- return ret;
3139 ++ goto out_put_node;
3140 + }
3141 +
3142 + /* Fetch IRQ edge information from the descriptor */
3143 +@@ -537,7 +538,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
3144 + val);
3145 + if (ret) {
3146 + dev_err(smi->dev, "could not configure IRQ polarity\n");
3147 +- return ret;
3148 ++ goto out_put_node;
3149 + }
3150 +
3151 + ret = devm_request_threaded_irq(smi->dev, irq, NULL,
3152 +@@ -545,7 +546,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
3153 + "RTL8366RB", smi);
3154 + if (ret) {
3155 + dev_err(smi->dev, "unable to request irq: %d\n", ret);
3156 +- return ret;
3157 ++ goto out_put_node;
3158 + }
3159 + smi->irqdomain = irq_domain_add_linear(intc,
3160 + RTL8366RB_NUM_INTERRUPT,
3161 +@@ -553,12 +554,15 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
3162 + smi);
3163 + if (!smi->irqdomain) {
3164 + dev_err(smi->dev, "failed to create IRQ domain\n");
3165 +- return -EINVAL;
3166 ++ ret = -EINVAL;
3167 ++ goto out_put_node;
3168 + }
3169 + for (i = 0; i < smi->num_ports; i++)
3170 + irq_set_parent(irq_create_mapping(smi->irqdomain, i), irq);
3171 +
3172 +- return 0;
3173 ++out_put_node:
3174 ++ of_node_put(intc);
3175 ++ return ret;
3176 + }
3177 +
3178 + static int rtl8366rb_set_addr(struct realtek_smi *smi)
3179 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
3180 +index b4a0fb281e69..bb65dd39f847 100644
3181 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
3182 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
3183 +@@ -194,9 +194,7 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
3184 + {
3185 + struct aq_nic_s *aq_nic = netdev_priv(ndev);
3186 +
3187 +- aq_nic_set_packet_filter(aq_nic, ndev->flags);
3188 +-
3189 +- aq_nic_set_multicast_list(aq_nic, ndev);
3190 ++ (void)aq_nic_set_multicast_list(aq_nic, ndev);
3191 + }
3192 +
3193 + static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto,
3194 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
3195 +index 8f66e7817811..2a18439b36fb 100644
3196 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
3197 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
3198 +@@ -631,9 +631,12 @@ err_exit:
3199 +
3200 + int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
3201 + {
3202 +- unsigned int packet_filter = self->packet_filter;
3203 ++ const struct aq_hw_ops *hw_ops = self->aq_hw_ops;
3204 ++ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
3205 ++ unsigned int packet_filter = ndev->flags;
3206 + struct netdev_hw_addr *ha = NULL;
3207 + unsigned int i = 0U;
3208 ++ int err = 0;
3209 +
3210 + self->mc_list.count = 0;
3211 + if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
3212 +@@ -641,29 +644,26 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
3213 + } else {
3214 + netdev_for_each_uc_addr(ha, ndev) {
3215 + ether_addr_copy(self->mc_list.ar[i++], ha->addr);
3216 +-
3217 +- if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
3218 +- break;
3219 + }
3220 + }
3221 +
3222 +- if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
3223 +- packet_filter |= IFF_ALLMULTI;
3224 +- } else {
3225 +- netdev_for_each_mc_addr(ha, ndev) {
3226 +- ether_addr_copy(self->mc_list.ar[i++], ha->addr);
3227 +-
3228 +- if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
3229 +- break;
3230 ++ cfg->is_mc_list_enabled = !!(packet_filter & IFF_MULTICAST);
3231 ++ if (cfg->is_mc_list_enabled) {
3232 ++ if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
3233 ++ packet_filter |= IFF_ALLMULTI;
3234 ++ } else {
3235 ++ netdev_for_each_mc_addr(ha, ndev) {
3236 ++ ether_addr_copy(self->mc_list.ar[i++],
3237 ++ ha->addr);
3238 ++ }
3239 + }
3240 + }
3241 +
3242 + if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
3243 +- packet_filter |= IFF_MULTICAST;
3244 + self->mc_list.count = i;
3245 +- self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
3246 +- self->mc_list.ar,
3247 +- self->mc_list.count);
3248 ++ err = hw_ops->hw_multicast_list_set(self->aq_hw,
3249 ++ self->mc_list.ar,
3250 ++ self->mc_list.count);
3251 + }
3252 + return aq_nic_set_packet_filter(self, packet_filter);
3253 + }
3254 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
3255 +index 3901d7994ca1..76bdbe1596d6 100644
3256 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
3257 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
3258 +@@ -313,6 +313,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
3259 + break;
3260 +
3261 + buff->is_error |= buff_->is_error;
3262 ++ buff->is_cso_err |= buff_->is_cso_err;
3263 +
3264 + } while (!buff_->is_eop);
3265 +
3266 +@@ -320,7 +321,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
3267 + err = 0;
3268 + goto err_exit;
3269 + }
3270 +- if (buff->is_error) {
3271 ++ if (buff->is_error || buff->is_cso_err) {
3272 + buff_ = buff;
3273 + do {
3274 + next_ = buff_->next,
3275 +diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
3276 +index 30f7fc4c97ff..2ad3fa6316ce 100644
3277 +--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
3278 ++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
3279 +@@ -818,14 +818,15 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
3280 + cfg->is_vlan_force_promisc);
3281 +
3282 + hw_atl_rpfl2multicast_flr_en_set(self,
3283 +- IS_FILTER_ENABLED(IFF_ALLMULTI), 0);
3284 ++ IS_FILTER_ENABLED(IFF_ALLMULTI) &&
3285 ++ IS_FILTER_ENABLED(IFF_MULTICAST), 0);
3286 +
3287 + hw_atl_rpfl2_accept_all_mc_packets_set(self,
3288 +- IS_FILTER_ENABLED(IFF_ALLMULTI));
3289 ++ IS_FILTER_ENABLED(IFF_ALLMULTI) &&
3290 ++ IS_FILTER_ENABLED(IFF_MULTICAST));
3291 +
3292 + hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
3293 +
3294 +- cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
3295 +
3296 + for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
3297 + hw_atl_rpfl2_uc_flr_en_set(self,
3298 +@@ -968,14 +969,26 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
3299 +
3300 + static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
3301 + {
3302 ++ int err;
3303 ++ u32 val;
3304 ++
3305 + hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
3306 +
3307 + /* Invalidate Descriptor Cache to prevent writing to the cached
3308 + * descriptors and to the data pointer of those descriptors
3309 + */
3310 +- hw_atl_rdm_rx_dma_desc_cache_init_set(self, 1);
3311 ++ hw_atl_rdm_rx_dma_desc_cache_init_tgl(self);
3312 +
3313 +- return aq_hw_err_from_flags(self);
3314 ++ err = aq_hw_err_from_flags(self);
3315 ++
3316 ++ if (err)
3317 ++ goto err_exit;
3318 ++
3319 ++ readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
3320 ++ self, val, val == 1, 1000U, 10000U);
3321 ++
3322 ++err_exit:
3323 ++ return err;
3324 + }
3325 +
3326 + static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
3327 +diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
3328 +index 1149812ae463..6f340695e6bd 100644
3329 +--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
3330 ++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
3331 +@@ -606,12 +606,25 @@ void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode
3332 + HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode);
3333 + }
3334 +
3335 +-void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init)
3336 ++void hw_atl_rdm_rx_dma_desc_cache_init_tgl(struct aq_hw_s *aq_hw)
3337 + {
3338 ++ u32 val;
3339 ++
3340 ++ val = aq_hw_read_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR,
3341 ++ HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK,
3342 ++ HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT);
3343 ++
3344 + aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR,
3345 + HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK,
3346 + HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT,
3347 +- init);
3348 ++ val ^ 1);
3349 ++}
3350 ++
3351 ++u32 hw_atl_rdm_rx_dma_desc_cache_init_done_get(struct aq_hw_s *aq_hw)
3352 ++{
3353 ++ return aq_hw_read_reg_bit(aq_hw, RDM_RX_DMA_DESC_CACHE_INIT_DONE_ADR,
3354 ++ RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSK,
3355 ++ RDM_RX_DMA_DESC_CACHE_INIT_DONE_SHIFT);
3356 + }
3357 +
3358 + void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
3359 +diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
3360 +index 0c37abbabca5..c3ee278c3747 100644
3361 +--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
3362 ++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
3363 +@@ -313,8 +313,11 @@ void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
3364 + u32 rx_pkt_buff_size_per_tc,
3365 + u32 buffer);
3366 +
3367 +-/* set rdm rx dma descriptor cache init */
3368 +-void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init);
3369 ++/* toggle rdm rx dma descriptor cache init */
3370 ++void hw_atl_rdm_rx_dma_desc_cache_init_tgl(struct aq_hw_s *aq_hw);
3371 ++
3372 ++/* get rdm rx dma descriptor cache init done */
3373 ++u32 hw_atl_rdm_rx_dma_desc_cache_init_done_get(struct aq_hw_s *aq_hw);
3374 +
3375 + /* set rx xoff enable (per tc) */
3376 + void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
3377 +diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
3378 +index c3febcdfa92e..35887ad89025 100644
3379 +--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
3380 ++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
3381 +@@ -318,6 +318,25 @@
3382 + /* default value of bitfield rdm_desc_init_i */
3383 + #define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_DEFAULT 0x0
3384 +
3385 ++/* rdm_desc_init_done_i bitfield definitions
3386 ++ * preprocessor definitions for the bitfield rdm_desc_init_done_i.
3387 ++ * port="pif_rdm_desc_init_done_i"
3388 ++ */
3389 ++
3390 ++/* register address for bitfield rdm_desc_init_done_i */
3391 ++#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_ADR 0x00005a10
3392 ++/* bitmask for bitfield rdm_desc_init_done_i */
3393 ++#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSK 0x00000001U
3394 ++/* inverted bitmask for bitfield rdm_desc_init_done_i */
3395 ++#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSKN 0xfffffffe
3396 ++/* lower bit position of bitfield rdm_desc_init_done_i */
3397 ++#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_SHIFT 0U
3398 ++/* width of bitfield rdm_desc_init_done_i */
3399 ++#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_WIDTH 1
3400 ++/* default value of bitfield rdm_desc_init_done_i */
3401 ++#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_DEFAULT 0x0
3402 ++
3403 ++
3404 + /* rx int_desc_wrb_en bitfield definitions
3405 + * preprocessor definitions for the bitfield "int_desc_wrb_en".
3406 + * port="pif_rdm_int_desc_wrb_en_i"
3407 +diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
3408 +index da726489e3c8..7bc51f8d6f2f 100644
3409 +--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
3410 ++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
3411 +@@ -337,7 +337,7 @@ static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp)
3412 + /* Convert PHY temperature from 1/256 degree Celsius
3413 + * to 1/1000 degree Celsius.
3414 + */
3415 +- *temp = temp_res * 1000 / 256;
3416 ++ *temp = (temp_res & 0xFFFF) * 1000 / 256;
3417 +
3418 + return 0;
3419 + }
3420 +diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
3421 +index 6703960c7cf5..d1101eea15c2 100644
3422 +--- a/drivers/net/ethernet/atheros/ag71xx.c
3423 ++++ b/drivers/net/ethernet/atheros/ag71xx.c
3424 +@@ -526,7 +526,7 @@ static int ag71xx_mdio_probe(struct ag71xx *ag)
3425 + struct device *dev = &ag->pdev->dev;
3426 + struct net_device *ndev = ag->ndev;
3427 + static struct mii_bus *mii_bus;
3428 +- struct device_node *np;
3429 ++ struct device_node *np, *mnp;
3430 + int err;
3431 +
3432 + np = dev->of_node;
3433 +@@ -571,7 +571,9 @@ static int ag71xx_mdio_probe(struct ag71xx *ag)
3434 + msleep(200);
3435 + }
3436 +
3437 +- err = of_mdiobus_register(mii_bus, np);
3438 ++ mnp = of_get_child_by_name(np, "mdio");
3439 ++ err = of_mdiobus_register(mii_bus, mnp);
3440 ++ of_node_put(mnp);
3441 + if (err)
3442 + goto mdio_err_put_clk;
3443 +
3444 +diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
3445 +index e24f5d2b6afe..53055ce5dfd6 100644
3446 +--- a/drivers/net/ethernet/broadcom/Kconfig
3447 ++++ b/drivers/net/ethernet/broadcom/Kconfig
3448 +@@ -8,7 +8,6 @@ config NET_VENDOR_BROADCOM
3449 + default y
3450 + depends on (SSB_POSSIBLE && HAS_DMA) || PCI || BCM63XX || \
3451 + SIBYTE_SB1xxx_SOC
3452 +- select DIMLIB
3453 + ---help---
3454 + If you have a network (Ethernet) chipset belonging to this class,
3455 + say Y.
3456 +@@ -69,6 +68,7 @@ config BCMGENET
3457 + select FIXED_PHY
3458 + select BCM7XXX_PHY
3459 + select MDIO_BCM_UNIMAC
3460 ++ select DIMLIB
3461 + help
3462 + This driver supports the built-in Ethernet MACs found in the
3463 + Broadcom BCM7xxx Set Top Box family chipset.
3464 +@@ -188,6 +188,7 @@ config SYSTEMPORT
3465 + select MII
3466 + select PHYLIB
3467 + select FIXED_PHY
3468 ++ select DIMLIB
3469 + help
3470 + This driver supports the built-in Ethernet MACs found in the
3471 + Broadcom BCM7xxx Set Top Box family chipset using an internal
3472 +@@ -200,6 +201,7 @@ config BNXT
3473 + select LIBCRC32C
3474 + select NET_DEVLINK
3475 + select PAGE_POOL
3476 ++ select DIMLIB
3477 + ---help---
3478 + This driver supports Broadcom NetXtreme-C/E 10/25/40/50 gigabit
3479 + Ethernet cards. To compile this driver as a module, choose M here:
3480 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
3481 +index 4a8fc03d82fd..dbc69d8fa05f 100644
3482 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
3483 ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
3484 +@@ -366,6 +366,7 @@ struct bcmgenet_mib_counters {
3485 + #define EXT_PWR_DOWN_PHY_EN (1 << 20)
3486 +
3487 + #define EXT_RGMII_OOB_CTRL 0x0C
3488 ++#define RGMII_MODE_EN_V123 (1 << 0)
3489 + #define RGMII_LINK (1 << 4)
3490 + #define OOB_DISABLE (1 << 5)
3491 + #define RGMII_MODE_EN (1 << 6)
3492 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
3493 +index 970e478a9017..e7c291bf4ed1 100644
3494 +--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
3495 ++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
3496 +@@ -258,7 +258,11 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
3497 + */
3498 + if (priv->ext_phy) {
3499 + reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
3500 +- reg |= RGMII_MODE_EN | id_mode_dis;
3501 ++ reg |= id_mode_dis;
3502 ++ if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
3503 ++ reg |= RGMII_MODE_EN_V123;
3504 ++ else
3505 ++ reg |= RGMII_MODE_EN;
3506 + bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
3507 + }
3508 +
3509 +@@ -273,11 +277,12 @@ int bcmgenet_mii_probe(struct net_device *dev)
3510 + struct bcmgenet_priv *priv = netdev_priv(dev);
3511 + struct device_node *dn = priv->pdev->dev.of_node;
3512 + struct phy_device *phydev;
3513 +- u32 phy_flags;
3514 ++ u32 phy_flags = 0;
3515 + int ret;
3516 +
3517 + /* Communicate the integrated PHY revision */
3518 +- phy_flags = priv->gphy_rev;
3519 ++ if (priv->internal_phy)
3520 ++ phy_flags = priv->gphy_rev;
3521 +
3522 + /* Initialize link state variables that bcmgenet_mii_setup() uses */
3523 + priv->old_link = -1;
3524 +diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
3525 +index 3e863a71c513..7df5d7d211d4 100644
3526 +--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
3527 ++++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
3528 +@@ -148,11 +148,15 @@ static int mdio_sc_cfg_reg_write(struct hns_mdio_device *mdio_dev,
3529 + {
3530 + u32 time_cnt;
3531 + u32 reg_value;
3532 ++ int ret;
3533 +
3534 + regmap_write(mdio_dev->subctrl_vbase, cfg_reg, set_val);
3535 +
3536 + for (time_cnt = MDIO_TIMEOUT; time_cnt; time_cnt--) {
3537 +- regmap_read(mdio_dev->subctrl_vbase, st_reg, &reg_value);
3538 ++ ret = regmap_read(mdio_dev->subctrl_vbase, st_reg, &reg_value);
3539 ++ if (ret)
3540 ++ return ret;
3541 ++
3542 + reg_value &= st_msk;
3543 + if ((!!check_st) == (!!reg_value))
3544 + break;
3545 +diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c
3546 +index 211c5f74b4c8..aec7e98bcc85 100644
3547 +--- a/drivers/net/ethernet/i825xx/lasi_82596.c
3548 ++++ b/drivers/net/ethernet/i825xx/lasi_82596.c
3549 +@@ -96,6 +96,8 @@
3550 +
3551 + #define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */
3552 +
3553 ++#define LIB82596_DMA_ATTR DMA_ATTR_NON_CONSISTENT
3554 ++
3555 + #define DMA_WBACK(ndev, addr, len) \
3556 + do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_TO_DEVICE); } while (0)
3557 +
3558 +@@ -200,7 +202,7 @@ static int __exit lan_remove_chip(struct parisc_device *pdev)
3559 +
3560 + unregister_netdev (dev);
3561 + dma_free_attrs(&pdev->dev, sizeof(struct i596_private), lp->dma,
3562 +- lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
3563 ++ lp->dma_addr, LIB82596_DMA_ATTR);
3564 + free_netdev (dev);
3565 + return 0;
3566 + }
3567 +diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
3568 +index 1274ad24d6af..f9742af7f142 100644
3569 +--- a/drivers/net/ethernet/i825xx/lib82596.c
3570 ++++ b/drivers/net/ethernet/i825xx/lib82596.c
3571 +@@ -1065,7 +1065,7 @@ static int i82596_probe(struct net_device *dev)
3572 +
3573 + dma = dma_alloc_attrs(dev->dev.parent, sizeof(struct i596_dma),
3574 + &lp->dma_addr, GFP_KERNEL,
3575 +- DMA_ATTR_NON_CONSISTENT);
3576 ++ LIB82596_DMA_ATTR);
3577 + if (!dma) {
3578 + printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
3579 + return -ENOMEM;
3580 +@@ -1087,7 +1087,7 @@ static int i82596_probe(struct net_device *dev)
3581 + i = register_netdev(dev);
3582 + if (i) {
3583 + dma_free_attrs(dev->dev.parent, sizeof(struct i596_dma),
3584 +- dma, lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
3585 ++ dma, lp->dma_addr, LIB82596_DMA_ATTR);
3586 + return i;
3587 + }
3588 +
3589 +diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
3590 +index 6eb6c2ff7f09..6436a98c5953 100644
3591 +--- a/drivers/net/ethernet/i825xx/sni_82596.c
3592 ++++ b/drivers/net/ethernet/i825xx/sni_82596.c
3593 +@@ -24,6 +24,8 @@
3594 +
3595 + static const char sni_82596_string[] = "snirm_82596";
3596 +
3597 ++#define LIB82596_DMA_ATTR 0
3598 ++
3599 + #define DMA_WBACK(priv, addr, len) do { } while (0)
3600 + #define DMA_INV(priv, addr, len) do { } while (0)
3601 + #define DMA_WBACK_INV(priv, addr, len) do { } while (0)
3602 +@@ -152,7 +154,7 @@ static int sni_82596_driver_remove(struct platform_device *pdev)
3603 +
3604 + unregister_netdev(dev);
3605 + dma_free_attrs(dev->dev.parent, sizeof(struct i596_private), lp->dma,
3606 +- lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
3607 ++ lp->dma_addr, LIB82596_DMA_ATTR);
3608 + iounmap(lp->ca);
3609 + iounmap(lp->mpu_port);
3610 + free_netdev (dev);
3611 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
3612 +index 5cb55ea671e3..964e7d62f4b1 100644
3613 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
3614 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
3615 +@@ -2772,12 +2772,10 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
3616 +
3617 + if (adapter->resetting &&
3618 + adapter->reset_reason == VNIC_RESET_MOBILITY) {
3619 +- u64 val = (0xff000000) | scrq->hw_irq;
3620 ++ struct irq_desc *desc = irq_to_desc(scrq->irq);
3621 ++ struct irq_chip *chip = irq_desc_get_chip(desc);
3622 +
3623 +- rc = plpar_hcall_norets(H_EOI, val);
3624 +- if (rc)
3625 +- dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
3626 +- val, rc);
3627 ++ chip->irq_eoi(&desc->irq_data);
3628 + }
3629 +
3630 + rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3631 +diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
3632 +index 2451d4a96490..041fb9f38eca 100644
3633 +--- a/drivers/net/ethernet/mscc/ocelot_board.c
3634 ++++ b/drivers/net/ethernet/mscc/ocelot_board.c
3635 +@@ -287,13 +287,14 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
3636 + continue;
3637 +
3638 + phy = of_phy_find_device(phy_node);
3639 ++ of_node_put(phy_node);
3640 + if (!phy)
3641 + continue;
3642 +
3643 + err = ocelot_probe_port(ocelot, port, regs, phy);
3644 + if (err) {
3645 + of_node_put(portnp);
3646 +- return err;
3647 ++ goto out_put_ports;
3648 + }
3649 +
3650 + phy_mode = of_get_phy_mode(portnp);
3651 +@@ -321,7 +322,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
3652 + "invalid phy mode for port%d, (Q)SGMII only\n",
3653 + port);
3654 + of_node_put(portnp);
3655 +- return -EINVAL;
3656 ++ err = -EINVAL;
3657 ++ goto out_put_ports;
3658 + }
3659 +
3660 + serdes = devm_of_phy_get(ocelot->dev, portnp, NULL);
3661 +@@ -334,7 +336,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
3662 + "missing SerDes phys for port%d\n",
3663 + port);
3664 +
3665 +- goto err_probe_ports;
3666 ++ of_node_put(portnp);
3667 ++ goto out_put_ports;
3668 + }
3669 +
3670 + ocelot->ports[port]->serdes = serdes;
3671 +@@ -346,9 +349,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
3672 +
3673 + dev_info(&pdev->dev, "Ocelot switch probed\n");
3674 +
3675 +- return 0;
3676 +-
3677 +-err_probe_ports:
3678 ++out_put_ports:
3679 ++ of_node_put(ports);
3680 + return err;
3681 + }
3682 +
3683 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
3684 +index fc9954e4a772..9c73fb759b57 100644
3685 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
3686 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
3687 +@@ -407,8 +407,11 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
3688 + int numhashregs = (hw->multicast_filter_bins >> 5);
3689 + int mcbitslog2 = hw->mcast_bits_log2;
3690 + unsigned int value;
3691 ++ u32 mc_filter[8];
3692 + int i;
3693 +
3694 ++ memset(mc_filter, 0, sizeof(mc_filter));
3695 ++
3696 + value = readl(ioaddr + GMAC_PACKET_FILTER);
3697 + value &= ~GMAC_PACKET_FILTER_HMC;
3698 + value &= ~GMAC_PACKET_FILTER_HPF;
3699 +@@ -422,16 +425,13 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
3700 + /* Pass all multi */
3701 + value |= GMAC_PACKET_FILTER_PM;
3702 + /* Set all the bits of the HASH tab */
3703 +- for (i = 0; i < numhashregs; i++)
3704 +- writel(0xffffffff, ioaddr + GMAC_HASH_TAB(i));
3705 ++ memset(mc_filter, 0xff, sizeof(mc_filter));
3706 + } else if (!netdev_mc_empty(dev)) {
3707 + struct netdev_hw_addr *ha;
3708 +- u32 mc_filter[8];
3709 +
3710 + /* Hash filter for multicast */
3711 + value |= GMAC_PACKET_FILTER_HMC;
3712 +
3713 +- memset(mc_filter, 0, sizeof(mc_filter));
3714 + netdev_for_each_mc_addr(ha, dev) {
3715 + /* The upper n bits of the calculated CRC are used to
3716 + * index the contents of the hash table. The number of
3717 +@@ -446,10 +446,11 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
3718 + */
3719 + mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1f));
3720 + }
3721 +- for (i = 0; i < numhashregs; i++)
3722 +- writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
3723 + }
3724 +
3725 ++ for (i = 0; i < numhashregs; i++)
3726 ++ writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
3727 ++
3728 + value |= GMAC_PACKET_FILTER_HPF;
3729 +
3730 + /* Handle multiple unicast addresses */
3731 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
3732 +index 85c68b7ee8c6..46d74f407aab 100644
3733 +--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
3734 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
3735 +@@ -370,7 +370,7 @@ static void dwxgmac2_set_filter(struct mac_device_info *hw,
3736 + dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2);
3737 +
3738 + /* Handle multiple unicast addresses */
3739 +- if (netdev_uc_count(dev) > XGMAC_ADDR_MAX) {
3740 ++ if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
3741 + value |= XGMAC_FILTER_PR;
3742 + } else {
3743 + struct netdev_hw_addr *ha;
3744 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3745 +index 5c4408bdc843..fe2d3029de5e 100644
3746 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3747 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3748 +@@ -626,6 +626,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
3749 + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
3750 + ptp_v2 = PTP_TCR_TSVER2ENA;
3751 + snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
3752 ++ ts_event_en = PTP_TCR_TSEVNTENA;
3753 + ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
3754 + ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
3755 + ptp_over_ethernet = PTP_TCR_TSIPENA;
3756 +@@ -4453,11 +4454,9 @@ int stmmac_suspend(struct device *dev)
3757 + if (!ndev || !netif_running(ndev))
3758 + return 0;
3759 +
3760 +- mutex_lock(&priv->lock);
3761 ++ phylink_mac_change(priv->phylink, false);
3762 +
3763 +- rtnl_lock();
3764 +- phylink_stop(priv->phylink);
3765 +- rtnl_unlock();
3766 ++ mutex_lock(&priv->lock);
3767 +
3768 + netif_device_detach(ndev);
3769 + stmmac_stop_all_queues(priv);
3770 +@@ -4472,11 +4471,19 @@ int stmmac_suspend(struct device *dev)
3771 + stmmac_pmt(priv, priv->hw, priv->wolopts);
3772 + priv->irq_wake = 1;
3773 + } else {
3774 ++ mutex_unlock(&priv->lock);
3775 ++ rtnl_lock();
3776 ++ phylink_stop(priv->phylink);
3777 ++ rtnl_unlock();
3778 ++ mutex_lock(&priv->lock);
3779 ++
3780 + stmmac_mac_set(priv, priv->ioaddr, false);
3781 + pinctrl_pm_select_sleep_state(priv->device);
3782 + /* Disable clock in case of PWM is off */
3783 +- clk_disable(priv->plat->pclk);
3784 +- clk_disable(priv->plat->stmmac_clk);
3785 ++ if (priv->plat->clk_ptp_ref)
3786 ++ clk_disable_unprepare(priv->plat->clk_ptp_ref);
3787 ++ clk_disable_unprepare(priv->plat->pclk);
3788 ++ clk_disable_unprepare(priv->plat->stmmac_clk);
3789 + }
3790 + mutex_unlock(&priv->lock);
3791 +
3792 +@@ -4539,8 +4546,10 @@ int stmmac_resume(struct device *dev)
3793 + } else {
3794 + pinctrl_pm_select_default_state(priv->device);
3795 + /* enable the clk previously disabled */
3796 +- clk_enable(priv->plat->stmmac_clk);
3797 +- clk_enable(priv->plat->pclk);
3798 ++ clk_prepare_enable(priv->plat->stmmac_clk);
3799 ++ clk_prepare_enable(priv->plat->pclk);
3800 ++ if (priv->plat->clk_ptp_ref)
3801 ++ clk_prepare_enable(priv->plat->clk_ptp_ref);
3802 + /* reset the phy so that it's ready */
3803 + if (priv->mii)
3804 + stmmac_mdio_reset(priv->mii);
3805 +@@ -4562,12 +4571,16 @@ int stmmac_resume(struct device *dev)
3806 +
3807 + stmmac_start_all_queues(priv);
3808 +
3809 +- rtnl_lock();
3810 +- phylink_start(priv->phylink);
3811 +- rtnl_unlock();
3812 +-
3813 + mutex_unlock(&priv->lock);
3814 +
3815 ++ if (!device_may_wakeup(priv->device)) {
3816 ++ rtnl_lock();
3817 ++ phylink_start(priv->phylink);
3818 ++ rtnl_unlock();
3819 ++ }
3820 ++
3821 ++ phylink_mac_change(priv->phylink, true);
3822 ++
3823 + return 0;
3824 + }
3825 + EXPORT_SYMBOL_GPL(stmmac_resume);
3826 +diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
3827 +index b188fce3f641..658b399ac9ea 100644
3828 +--- a/drivers/net/ieee802154/ca8210.c
3829 ++++ b/drivers/net/ieee802154/ca8210.c
3830 +@@ -3152,12 +3152,12 @@ static int ca8210_probe(struct spi_device *spi_device)
3831 + goto error;
3832 + }
3833 +
3834 ++ priv->spi->dev.platform_data = pdata;
3835 + ret = ca8210_get_platform_data(priv->spi, pdata);
3836 + if (ret) {
3837 + dev_crit(&spi_device->dev, "ca8210_get_platform_data failed\n");
3838 + goto error;
3839 + }
3840 +- priv->spi->dev.platform_data = pdata;
3841 +
3842 + ret = ca8210_dev_com_init(priv);
3843 + if (ret) {
3844 +diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
3845 +index f61d094746c0..1a251f76d09b 100644
3846 +--- a/drivers/net/netdevsim/fib.c
3847 ++++ b/drivers/net/netdevsim/fib.c
3848 +@@ -241,8 +241,8 @@ static struct pernet_operations nsim_fib_net_ops = {
3849 +
3850 + void nsim_fib_exit(void)
3851 + {
3852 +- unregister_pernet_subsys(&nsim_fib_net_ops);
3853 + unregister_fib_notifier(&nsim_fib_nb);
3854 ++ unregister_pernet_subsys(&nsim_fib_net_ops);
3855 + }
3856 +
3857 + int nsim_fib_init(void)
3858 +@@ -258,6 +258,7 @@ int nsim_fib_init(void)
3859 + err = register_fib_notifier(&nsim_fib_nb, nsim_fib_dump_inconsistent);
3860 + if (err < 0) {
3861 + pr_err("Failed to register fib notifier\n");
3862 ++ unregister_pernet_subsys(&nsim_fib_net_ops);
3863 + goto err_out;
3864 + }
3865 +
3866 +diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c
3867 +index e282600bd83e..c1d345c3cab3 100644
3868 +--- a/drivers/net/phy/mdio_device.c
3869 ++++ b/drivers/net/phy/mdio_device.c
3870 +@@ -121,7 +121,7 @@ void mdio_device_reset(struct mdio_device *mdiodev, int value)
3871 + return;
3872 +
3873 + if (mdiodev->reset_gpio)
3874 +- gpiod_set_value(mdiodev->reset_gpio, value);
3875 ++ gpiod_set_value_cansleep(mdiodev->reset_gpio, value);
3876 +
3877 + if (mdiodev->reset_ctrl) {
3878 + if (value)
3879 +diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
3880 +index 2fea5541c35a..63dedec0433d 100644
3881 +--- a/drivers/net/phy/micrel.c
3882 ++++ b/drivers/net/phy/micrel.c
3883 +@@ -341,6 +341,35 @@ static int ksz8041_config_aneg(struct phy_device *phydev)
3884 + return genphy_config_aneg(phydev);
3885 + }
3886 +
3887 ++static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev,
3888 ++ const u32 ksz_phy_id)
3889 ++{
3890 ++ int ret;
3891 ++
3892 ++ if ((phydev->phy_id & MICREL_PHY_ID_MASK) != ksz_phy_id)
3893 ++ return 0;
3894 ++
3895 ++ ret = phy_read(phydev, MII_BMSR);
3896 ++ if (ret < 0)
3897 ++ return ret;
3898 ++
3899 ++ /* KSZ8051 PHY and KSZ8794/KSZ8795/KSZ8765 switch share the same
3900 ++ * exact PHY ID. However, they can be told apart by the extended
3901 ++ * capability registers presence. The KSZ8051 PHY has them while
3902 ++ * the switch does not.
3903 ++ */
3904 ++ ret &= BMSR_ERCAP;
3905 ++ if (ksz_phy_id == PHY_ID_KSZ8051)
3906 ++ return ret;
3907 ++ else
3908 ++ return !ret;
3909 ++}
3910 ++
3911 ++static int ksz8051_match_phy_device(struct phy_device *phydev)
3912 ++{
3913 ++ return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ8051);
3914 ++}
3915 ++
3916 + static int ksz8081_config_init(struct phy_device *phydev)
3917 + {
3918 + /* KSZPHY_OMSO_FACTORY_TEST is set at de-assertion of the reset line
3919 +@@ -364,6 +393,11 @@ static int ksz8061_config_init(struct phy_device *phydev)
3920 + return kszphy_config_init(phydev);
3921 + }
3922 +
3923 ++static int ksz8795_match_phy_device(struct phy_device *phydev)
3924 ++{
3925 ++ return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ87XX);
3926 ++}
3927 ++
3928 + static int ksz9021_load_values_from_of(struct phy_device *phydev,
3929 + const struct device_node *of_node,
3930 + u16 reg,
3931 +@@ -1017,8 +1051,6 @@ static struct phy_driver ksphy_driver[] = {
3932 + .suspend = genphy_suspend,
3933 + .resume = genphy_resume,
3934 + }, {
3935 +- .phy_id = PHY_ID_KSZ8051,
3936 +- .phy_id_mask = MICREL_PHY_ID_MASK,
3937 + .name = "Micrel KSZ8051",
3938 + /* PHY_BASIC_FEATURES */
3939 + .driver_data = &ksz8051_type,
3940 +@@ -1029,6 +1061,7 @@ static struct phy_driver ksphy_driver[] = {
3941 + .get_sset_count = kszphy_get_sset_count,
3942 + .get_strings = kszphy_get_strings,
3943 + .get_stats = kszphy_get_stats,
3944 ++ .match_phy_device = ksz8051_match_phy_device,
3945 + .suspend = genphy_suspend,
3946 + .resume = genphy_resume,
3947 + }, {
3948 +@@ -1141,13 +1174,12 @@ static struct phy_driver ksphy_driver[] = {
3949 + .suspend = genphy_suspend,
3950 + .resume = genphy_resume,
3951 + }, {
3952 +- .phy_id = PHY_ID_KSZ8795,
3953 +- .phy_id_mask = MICREL_PHY_ID_MASK,
3954 +- .name = "Micrel KSZ8795",
3955 ++ .name = "Micrel KSZ87XX Switch",
3956 + /* PHY_BASIC_FEATURES */
3957 + .config_init = kszphy_config_init,
3958 + .config_aneg = ksz8873mll_config_aneg,
3959 + .read_status = ksz8873mll_read_status,
3960 ++ .match_phy_device = ksz8795_match_phy_device,
3961 + .suspend = genphy_suspend,
3962 + .resume = genphy_resume,
3963 + }, {
3964 +diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
3965 +index 7935593debb1..a1caeee12236 100644
3966 +--- a/drivers/net/phy/phy-c45.c
3967 ++++ b/drivers/net/phy/phy-c45.c
3968 +@@ -323,6 +323,8 @@ int genphy_c45_read_pma(struct phy_device *phydev)
3969 + {
3970 + int val;
3971 +
3972 ++ linkmode_zero(phydev->lp_advertising);
3973 ++
3974 + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1);
3975 + if (val < 0)
3976 + return val;
3977 +diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
3978 +index 6b0f89369b46..0ff8df35c779 100644
3979 +--- a/drivers/net/phy/phy.c
3980 ++++ b/drivers/net/phy/phy.c
3981 +@@ -457,6 +457,11 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
3982 + val);
3983 + change_autoneg = true;
3984 + break;
3985 ++ case MII_CTRL1000:
3986 ++ mii_ctrl1000_mod_linkmode_adv_t(phydev->advertising,
3987 ++ val);
3988 ++ change_autoneg = true;
3989 ++ break;
3990 + default:
3991 + /* do nothing */
3992 + break;
3993 +@@ -561,9 +566,6 @@ int phy_start_aneg(struct phy_device *phydev)
3994 + if (AUTONEG_DISABLE == phydev->autoneg)
3995 + phy_sanitize_settings(phydev);
3996 +
3997 +- /* Invalidate LP advertising flags */
3998 +- linkmode_zero(phydev->lp_advertising);
3999 +-
4000 + err = phy_config_aneg(phydev);
4001 + if (err < 0)
4002 + goto out_unlock;
4003 +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
4004 +index 27ebc2c6c2d0..d6c9350b65bf 100644
4005 +--- a/drivers/net/phy/phy_device.c
4006 ++++ b/drivers/net/phy/phy_device.c
4007 +@@ -1823,7 +1823,14 @@ int genphy_read_status(struct phy_device *phydev)
4008 +
4009 + linkmode_zero(phydev->lp_advertising);
4010 +
4011 +- if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
4012 ++ if (phydev->autoneg == AUTONEG_ENABLE) {
4013 ++ if (!phydev->autoneg_complete) {
4014 ++ mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising,
4015 ++ 0);
4016 ++ mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, 0);
4017 ++ return 0;
4018 ++ }
4019 ++
4020 + if (phydev->is_gigabit_capable) {
4021 + lpagb = phy_read(phydev, MII_STAT1000);
4022 + if (lpagb < 0)
4023 +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
4024 +index 04137ac373b0..9eedc0714422 100644
4025 +--- a/drivers/net/usb/r8152.c
4026 ++++ b/drivers/net/usb/r8152.c
4027 +@@ -4533,10 +4533,9 @@ static int rtl8152_reset_resume(struct usb_interface *intf)
4028 + struct r8152 *tp = usb_get_intfdata(intf);
4029 +
4030 + clear_bit(SELECTIVE_SUSPEND, &tp->flags);
4031 +- mutex_lock(&tp->control);
4032 + tp->rtl_ops.init(tp);
4033 + queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
4034 +- mutex_unlock(&tp->control);
4035 ++ set_ethernet_addr(tp);
4036 + return rtl8152_resume(intf);
4037 + }
4038 +
4039 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
4040 +index 3b12e7ad35e1..acbadfdbdd3f 100644
4041 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
4042 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
4043 +@@ -513,31 +513,33 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
4044 + {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
4045 +
4046 + /* 9000 Series */
4047 +- {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
4048 +- {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
4049 +- {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
4050 +- {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
4051 +- {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
4052 +- {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
4053 +- {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
4054 +- {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
4055 +- {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
4056 +- {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
4057 +- {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
4058 +- {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
4059 +- {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
4060 +- {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
4061 +- {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
4062 +- {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
4063 +- {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc)},
4064 +- {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc)},
4065 +- {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
4066 +- {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
4067 +- {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
4068 +- {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
4069 +- {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
4070 +- {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
4071 +- {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
4072 ++ {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4073 ++ {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4074 ++ {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4075 ++ {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4076 ++ {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
4077 ++ {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
4078 ++ {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4079 ++ {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4080 ++ {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4081 ++ {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4082 ++ {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4083 ++ {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4084 ++ {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
4085 ++ {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
4086 ++ {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4087 ++ {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4088 ++ {IWL_PCI_DEVICE(0x02F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4089 ++ {IWL_PCI_DEVICE(0x02F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
4090 ++ {IWL_PCI_DEVICE(0x02F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
4091 ++ {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4092 ++ {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4093 ++ {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4094 ++ {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4095 ++ {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4096 ++ {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4097 ++ {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4098 ++
4099 + {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
4100 + {IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
4101 + {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
4102 +@@ -643,34 +645,34 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
4103 + {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)},
4104 + {IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_soc)},
4105 + {IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_soc)},
4106 +- {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_soc)},
4107 +- {IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_soc)},
4108 +- {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_soc)},
4109 +- {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_160_cfg_soc)},
4110 +- {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)},
4111 +- {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)},
4112 +- {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)},
4113 +- {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)},
4114 +- {IWL_PCI_DEVICE(0x30DC, 0x0230, iwl9560_2ac_cfg_soc)},
4115 +- {IWL_PCI_DEVICE(0x30DC, 0x0234, iwl9560_2ac_cfg_soc)},
4116 +- {IWL_PCI_DEVICE(0x30DC, 0x0238, iwl9560_2ac_cfg_soc)},
4117 +- {IWL_PCI_DEVICE(0x30DC, 0x023C, iwl9560_2ac_cfg_soc)},
4118 +- {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)},
4119 +- {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)},
4120 +- {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)},
4121 +- {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)},
4122 +- {IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)},
4123 +- {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)},
4124 +- {IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)},
4125 +- {IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
4126 +- {IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)},
4127 +- {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_160_cfg_soc)},
4128 +- {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_160_cfg_soc)},
4129 +- {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_160_cfg_soc)},
4130 +- {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_160_cfg_soc)},
4131 +- {IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_soc)},
4132 +- {IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_soc)},
4133 +- {IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_soc)},
4134 ++
4135 ++ {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4136 ++ {IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4137 ++ {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4138 ++ {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4139 ++ {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
4140 ++ {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
4141 ++ {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4142 ++ {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4143 ++ {IWL_PCI_DEVICE(0x30DC, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4144 ++ {IWL_PCI_DEVICE(0x30DC, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4145 ++ {IWL_PCI_DEVICE(0x30DC, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4146 ++ {IWL_PCI_DEVICE(0x30DC, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4147 ++ {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
4148 ++ {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
4149 ++ {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4150 ++ {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4151 ++ {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4152 ++ {IWL_PCI_DEVICE(0x30DC, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
4153 ++ {IWL_PCI_DEVICE(0x30DC, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
4154 ++ {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4155 ++ {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4156 ++ {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4157 ++ {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4158 ++ {IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4159 ++ {IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4160 ++ {IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4161 ++
4162 + {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_160_cfg_shared_clk)},
4163 + {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_shared_clk)},
4164 + {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_160_cfg_shared_clk)},
4165 +@@ -726,62 +728,60 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
4166 + {IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4167 + {IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4168 +
4169 +- {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_160_cfg_soc)},
4170 +- {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_soc)},
4171 +- {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_160_cfg_soc)},
4172 +- {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_160_cfg_soc)},
4173 +- {IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_soc)},
4174 +- {IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_soc)},
4175 +- {IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
4176 +- {IWL_PCI_DEVICE(0x3DF0, 0x00A4, iwl9462_2ac_cfg_soc)},
4177 +- {IWL_PCI_DEVICE(0x3DF0, 0x0230, iwl9560_2ac_cfg_soc)},
4178 +- {IWL_PCI_DEVICE(0x3DF0, 0x0234, iwl9560_2ac_cfg_soc)},
4179 +- {IWL_PCI_DEVICE(0x3DF0, 0x0238, iwl9560_2ac_cfg_soc)},
4180 +- {IWL_PCI_DEVICE(0x3DF0, 0x023C, iwl9560_2ac_cfg_soc)},
4181 +- {IWL_PCI_DEVICE(0x3DF0, 0x0260, iwl9461_2ac_cfg_soc)},
4182 +- {IWL_PCI_DEVICE(0x3DF0, 0x0264, iwl9461_2ac_cfg_soc)},
4183 +- {IWL_PCI_DEVICE(0x3DF0, 0x02A0, iwl9462_2ac_cfg_soc)},
4184 +- {IWL_PCI_DEVICE(0x3DF0, 0x02A4, iwl9462_2ac_cfg_soc)},
4185 +- {IWL_PCI_DEVICE(0x3DF0, 0x1010, iwl9260_2ac_cfg)},
4186 +- {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_soc)},
4187 +- {IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)},
4188 +- {IWL_PCI_DEVICE(0x3DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
4189 +- {IWL_PCI_DEVICE(0x3DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
4190 +- {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_160_cfg_soc)},
4191 +- {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_160_cfg_soc)},
4192 +- {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_160_cfg_soc)},
4193 +- {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_160_cfg_soc)},
4194 +- {IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
4195 +- {IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_soc)},
4196 +- {IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
4197 +- {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
4198 +- {IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_soc)},
4199 +- {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
4200 +- {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
4201 +- {IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_soc)},
4202 +- {IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_soc)},
4203 +- {IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_soc)},
4204 +- {IWL_PCI_DEVICE(0x43F0, 0x00A4, iwl9462_2ac_cfg_soc)},
4205 +- {IWL_PCI_DEVICE(0x43F0, 0x0230, iwl9560_2ac_cfg_soc)},
4206 +- {IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_soc)},
4207 +- {IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_soc)},
4208 +- {IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_soc)},
4209 +- {IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_soc)},
4210 +- {IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_soc)},
4211 +- {IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_soc)},
4212 +- {IWL_PCI_DEVICE(0x43F0, 0x02A4, iwl9462_2ac_cfg_soc)},
4213 +- {IWL_PCI_DEVICE(0x43F0, 0x1010, iwl9260_2ac_cfg)},
4214 +- {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_soc)},
4215 +- {IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)},
4216 +- {IWL_PCI_DEVICE(0x43F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
4217 +- {IWL_PCI_DEVICE(0x43F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
4218 +- {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
4219 +- {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
4220 +- {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
4221 +- {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
4222 +- {IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_soc)},
4223 +- {IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_soc)},
4224 +- {IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_soc)},
4225 ++ {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4226 ++ {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4227 ++ {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4228 ++ {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4229 ++ {IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
4230 ++ {IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
4231 ++ {IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4232 ++ {IWL_PCI_DEVICE(0x3DF0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4233 ++ {IWL_PCI_DEVICE(0x3DF0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4234 ++ {IWL_PCI_DEVICE(0x3DF0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4235 ++ {IWL_PCI_DEVICE(0x3DF0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4236 ++ {IWL_PCI_DEVICE(0x3DF0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4237 ++ {IWL_PCI_DEVICE(0x3DF0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
4238 ++ {IWL_PCI_DEVICE(0x3DF0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
4239 ++ {IWL_PCI_DEVICE(0x3DF0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4240 ++ {IWL_PCI_DEVICE(0x3DF0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4241 ++ {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4242 ++ {IWL_PCI_DEVICE(0x3DF0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
4243 ++ {IWL_PCI_DEVICE(0x3DF0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
4244 ++ {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4245 ++ {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4246 ++ {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4247 ++ {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4248 ++ {IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4249 ++ {IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4250 ++ {IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4251 ++
4252 ++ {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4253 ++ {IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4254 ++ {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4255 ++ {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4256 ++ {IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
4257 ++ {IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
4258 ++ {IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4259 ++ {IWL_PCI_DEVICE(0x43F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4260 ++ {IWL_PCI_DEVICE(0x43F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4261 ++ {IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4262 ++ {IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4263 ++ {IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4264 ++ {IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
4265 ++ {IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
4266 ++ {IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4267 ++ {IWL_PCI_DEVICE(0x43F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4268 ++ {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4269 ++ {IWL_PCI_DEVICE(0x43F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
4270 ++ {IWL_PCI_DEVICE(0x43F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
4271 ++ {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4272 ++ {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4273 ++ {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4274 ++ {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4275 ++ {IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4276 ++ {IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4277 ++ {IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4278 ++
4279 + {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)},
4280 + {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)},
4281 + {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_160_cfg_soc)},
4282 +@@ -821,34 +821,34 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
4283 + {IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
4284 + {IWL_PCI_DEVICE(0x9DF0, 0x4234, iwl9560_2ac_cfg_soc)},
4285 + {IWL_PCI_DEVICE(0x9DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
4286 +- {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
4287 +- {IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_soc)},
4288 +- {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
4289 +- {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
4290 +- {IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_soc)},
4291 +- {IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_soc)},
4292 +- {IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_soc)},
4293 +- {IWL_PCI_DEVICE(0xA0F0, 0x00A4, iwl9462_2ac_cfg_soc)},
4294 +- {IWL_PCI_DEVICE(0xA0F0, 0x0230, iwl9560_2ac_cfg_soc)},
4295 +- {IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_soc)},
4296 +- {IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_soc)},
4297 +- {IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_soc)},
4298 +- {IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_soc)},
4299 +- {IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_soc)},
4300 +- {IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_soc)},
4301 +- {IWL_PCI_DEVICE(0xA0F0, 0x02A4, iwl9462_2ac_cfg_soc)},
4302 +- {IWL_PCI_DEVICE(0xA0F0, 0x1010, iwl9260_2ac_cfg)},
4303 +- {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_soc)},
4304 +- {IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)},
4305 +- {IWL_PCI_DEVICE(0xA0F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
4306 +- {IWL_PCI_DEVICE(0xA0F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
4307 +- {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
4308 +- {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
4309 +- {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
4310 +- {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
4311 +- {IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_soc)},
4312 +- {IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_soc)},
4313 +- {IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_soc)},
4314 ++
4315 ++ {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4316 ++ {IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4317 ++ {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4318 ++ {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4319 ++ {IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
4320 ++ {IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
4321 ++ {IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4322 ++ {IWL_PCI_DEVICE(0xA0F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4323 ++ {IWL_PCI_DEVICE(0xA0F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4324 ++ {IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4325 ++ {IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4326 ++ {IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4327 ++ {IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
4328 ++ {IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
4329 ++ {IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4330 ++ {IWL_PCI_DEVICE(0xA0F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4331 ++ {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4332 ++ {IWL_PCI_DEVICE(0xA0F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
4333 ++ {IWL_PCI_DEVICE(0xA0F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
4334 ++ {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4335 ++ {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4336 ++ {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4337 ++ {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
4338 ++ {IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4339 ++ {IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4340 ++ {IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4341 ++
4342 + {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_160_cfg_soc)},
4343 + {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)},
4344 + {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_160_cfg_soc)},
4345 +diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
4346 +index 240f762b3749..103ed00775eb 100644
4347 +--- a/drivers/net/xen-netback/interface.c
4348 ++++ b/drivers/net/xen-netback/interface.c
4349 +@@ -719,7 +719,6 @@ err_unmap:
4350 + xenvif_unmap_frontend_data_rings(queue);
4351 + netif_napi_del(&queue->napi);
4352 + err:
4353 +- module_put(THIS_MODULE);
4354 + return err;
4355 + }
4356 +
4357 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
4358 +index d3d6b7bd6903..36a5ed1eacbe 100644
4359 +--- a/drivers/nvme/host/core.c
4360 ++++ b/drivers/nvme/host/core.c
4361 +@@ -103,10 +103,13 @@ static void nvme_set_queue_dying(struct nvme_ns *ns)
4362 + */
4363 + if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
4364 + return;
4365 +- revalidate_disk(ns->disk);
4366 + blk_set_queue_dying(ns->queue);
4367 + /* Forcibly unquiesce queues to avoid blocking dispatch */
4368 + blk_mq_unquiesce_queue(ns->queue);
4369 ++ /*
4370 ++ * Revalidate after unblocking dispatchers that may be holding bd_butex
4371 ++ */
4372 ++ revalidate_disk(ns->disk);
4373 + }
4374 +
4375 + static void nvme_queue_scan(struct nvme_ctrl *ctrl)
4376 +@@ -849,7 +852,7 @@ out:
4377 + static int nvme_submit_user_cmd(struct request_queue *q,
4378 + struct nvme_command *cmd, void __user *ubuffer,
4379 + unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
4380 +- u32 meta_seed, u32 *result, unsigned timeout)
4381 ++ u32 meta_seed, u64 *result, unsigned timeout)
4382 + {
4383 + bool write = nvme_is_write(cmd);
4384 + struct nvme_ns *ns = q->queuedata;
4385 +@@ -890,7 +893,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
4386 + else
4387 + ret = nvme_req(req)->status;
4388 + if (result)
4389 +- *result = le32_to_cpu(nvme_req(req)->result.u32);
4390 ++ *result = le64_to_cpu(nvme_req(req)->result.u64);
4391 + if (meta && !ret && !write) {
4392 + if (copy_to_user(meta_buffer, meta, meta_len))
4393 + ret = -EFAULT;
4394 +@@ -1336,6 +1339,54 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
4395 + struct nvme_command c;
4396 + unsigned timeout = 0;
4397 + u32 effects;
4398 ++ u64 result;
4399 ++ int status;
4400 ++
4401 ++ if (!capable(CAP_SYS_ADMIN))
4402 ++ return -EACCES;
4403 ++ if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
4404 ++ return -EFAULT;
4405 ++ if (cmd.flags)
4406 ++ return -EINVAL;
4407 ++
4408 ++ memset(&c, 0, sizeof(c));
4409 ++ c.common.opcode = cmd.opcode;
4410 ++ c.common.flags = cmd.flags;
4411 ++ c.common.nsid = cpu_to_le32(cmd.nsid);
4412 ++ c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
4413 ++ c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
4414 ++ c.common.cdw10 = cpu_to_le32(cmd.cdw10);
4415 ++ c.common.cdw11 = cpu_to_le32(cmd.cdw11);
4416 ++ c.common.cdw12 = cpu_to_le32(cmd.cdw12);
4417 ++ c.common.cdw13 = cpu_to_le32(cmd.cdw13);
4418 ++ c.common.cdw14 = cpu_to_le32(cmd.cdw14);
4419 ++ c.common.cdw15 = cpu_to_le32(cmd.cdw15);
4420 ++
4421 ++ if (cmd.timeout_ms)
4422 ++ timeout = msecs_to_jiffies(cmd.timeout_ms);
4423 ++
4424 ++ effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
4425 ++ status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
4426 ++ (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
4427 ++ (void __user *)(uintptr_t)cmd.metadata,
4428 ++ cmd.metadata_len, 0, &result, timeout);
4429 ++ nvme_passthru_end(ctrl, effects);
4430 ++
4431 ++ if (status >= 0) {
4432 ++ if (put_user(result, &ucmd->result))
4433 ++ return -EFAULT;
4434 ++ }
4435 ++
4436 ++ return status;
4437 ++}
4438 ++
4439 ++static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
4440 ++ struct nvme_passthru_cmd64 __user *ucmd)
4441 ++{
4442 ++ struct nvme_passthru_cmd64 cmd;
4443 ++ struct nvme_command c;
4444 ++ unsigned timeout = 0;
4445 ++ u32 effects;
4446 + int status;
4447 +
4448 + if (!capable(CAP_SYS_ADMIN))
4449 +@@ -1406,6 +1457,41 @@ static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
4450 + srcu_read_unlock(&head->srcu, idx);
4451 + }
4452 +
4453 ++static bool is_ctrl_ioctl(unsigned int cmd)
4454 ++{
4455 ++ if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
4456 ++ return true;
4457 ++ if (is_sed_ioctl(cmd))
4458 ++ return true;
4459 ++ return false;
4460 ++}
4461 ++
4462 ++static int nvme_handle_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
4463 ++ void __user *argp,
4464 ++ struct nvme_ns_head *head,
4465 ++ int srcu_idx)
4466 ++{
4467 ++ struct nvme_ctrl *ctrl = ns->ctrl;
4468 ++ int ret;
4469 ++
4470 ++ nvme_get_ctrl(ns->ctrl);
4471 ++ nvme_put_ns_from_disk(head, srcu_idx);
4472 ++
4473 ++ switch (cmd) {
4474 ++ case NVME_IOCTL_ADMIN_CMD:
4475 ++ ret = nvme_user_cmd(ctrl, NULL, argp);
4476 ++ break;
4477 ++ case NVME_IOCTL_ADMIN64_CMD:
4478 ++ ret = nvme_user_cmd64(ctrl, NULL, argp);
4479 ++ break;
4480 ++ default:
4481 ++ ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
4482 ++ break;
4483 ++ }
4484 ++ nvme_put_ctrl(ctrl);
4485 ++ return ret;
4486 ++}
4487 ++
4488 + static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
4489 + unsigned int cmd, unsigned long arg)
4490 + {
4491 +@@ -1423,20 +1509,8 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
4492 + * seperately and drop the ns SRCU reference early. This avoids a
4493 + * deadlock when deleting namespaces using the passthrough interface.
4494 + */
4495 +- if (cmd == NVME_IOCTL_ADMIN_CMD || is_sed_ioctl(cmd)) {
4496 +- struct nvme_ctrl *ctrl = ns->ctrl;
4497 +-
4498 +- nvme_get_ctrl(ns->ctrl);
4499 +- nvme_put_ns_from_disk(head, srcu_idx);
4500 +-
4501 +- if (cmd == NVME_IOCTL_ADMIN_CMD)
4502 +- ret = nvme_user_cmd(ctrl, NULL, argp);
4503 +- else
4504 +- ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
4505 +-
4506 +- nvme_put_ctrl(ctrl);
4507 +- return ret;
4508 +- }
4509 ++ if (is_ctrl_ioctl(cmd))
4510 ++ return nvme_handle_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
4511 +
4512 + switch (cmd) {
4513 + case NVME_IOCTL_ID:
4514 +@@ -1449,6 +1523,9 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
4515 + case NVME_IOCTL_SUBMIT_IO:
4516 + ret = nvme_submit_io(ns, argp);
4517 + break;
4518 ++ case NVME_IOCTL_IO64_CMD:
4519 ++ ret = nvme_user_cmd64(ns->ctrl, ns, argp);
4520 ++ break;
4521 + default:
4522 + if (ns->ndev)
4523 + ret = nvme_nvm_ioctl(ns, cmd, arg);
4524 +@@ -2267,6 +2344,16 @@ static const struct nvme_core_quirk_entry core_quirks[] = {
4525 + .vid = 0x14a4,
4526 + .fr = "22301111",
4527 + .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
4528 ++ },
4529 ++ {
4530 ++ /*
4531 ++ * This Kingston E8FK11.T firmware version has no interrupt
4532 ++ * after resume with actions related to suspend to idle
4533 ++ * https://bugzilla.kernel.org/show_bug.cgi?id=204887
4534 ++ */
4535 ++ .vid = 0x2646,
4536 ++ .fr = "E8FK11.T",
4537 ++ .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
4538 + }
4539 + };
4540 +
4541 +@@ -2510,8 +2597,9 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
4542 + list_add_tail(&subsys->entry, &nvme_subsystems);
4543 + }
4544 +
4545 +- if (sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
4546 +- dev_name(ctrl->device))) {
4547 ++ ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
4548 ++ dev_name(ctrl->device));
4549 ++ if (ret) {
4550 + dev_err(ctrl->device,
4551 + "failed to create sysfs link from subsystem.\n");
4552 + goto out_put_subsystem;
4553 +@@ -2812,6 +2900,8 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
4554 + switch (cmd) {
4555 + case NVME_IOCTL_ADMIN_CMD:
4556 + return nvme_user_cmd(ctrl, NULL, argp);
4557 ++ case NVME_IOCTL_ADMIN64_CMD:
4558 ++ return nvme_user_cmd64(ctrl, NULL, argp);
4559 + case NVME_IOCTL_IO_CMD:
4560 + return nvme_dev_user_cmd(ctrl, argp);
4561 + case NVME_IOCTL_RESET:
4562 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
4563 +index 732d5b63ec05..2303d44fc3cb 100644
4564 +--- a/drivers/nvme/host/pci.c
4565 ++++ b/drivers/nvme/host/pci.c
4566 +@@ -769,7 +769,8 @@ static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
4567 + struct bio_vec *bv)
4568 + {
4569 + struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
4570 +- unsigned int first_prp_len = dev->ctrl.page_size - bv->bv_offset;
4571 ++ unsigned int offset = bv->bv_offset & (dev->ctrl.page_size - 1);
4572 ++ unsigned int first_prp_len = dev->ctrl.page_size - offset;
4573 +
4574 + iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
4575 + if (dma_mapping_error(dev->dev, iod->first_dma))
4576 +@@ -2894,11 +2895,21 @@ static int nvme_suspend(struct device *dev)
4577 + if (ret < 0)
4578 + goto unfreeze;
4579 +
4580 ++ /*
4581 ++ * A saved state prevents pci pm from generically controlling the
4582 ++ * device's power. If we're using protocol specific settings, we don't
4583 ++ * want pci interfering.
4584 ++ */
4585 ++ pci_save_state(pdev);
4586 ++
4587 + ret = nvme_set_power_state(ctrl, ctrl->npss);
4588 + if (ret < 0)
4589 + goto unfreeze;
4590 +
4591 + if (ret) {
4592 ++ /* discard the saved state */
4593 ++ pci_load_saved_state(pdev, NULL);
4594 ++
4595 + /*
4596 + * Clearing npss forces a controller reset on resume. The
4597 + * correct value will be resdicovered then.
4598 +@@ -2906,14 +2917,7 @@ static int nvme_suspend(struct device *dev)
4599 + nvme_dev_disable(ndev, true);
4600 + ctrl->npss = 0;
4601 + ret = 0;
4602 +- goto unfreeze;
4603 + }
4604 +- /*
4605 +- * A saved state prevents pci pm from generically controlling the
4606 +- * device's power. If we're using protocol specific settings, we don't
4607 +- * want pci interfering.
4608 +- */
4609 +- pci_save_state(pdev);
4610 + unfreeze:
4611 + nvme_unfreeze(ctrl);
4612 + return ret;
4613 +@@ -3038,6 +3042,9 @@ static const struct pci_device_id nvme_id_table[] = {
4614 + .driver_data = NVME_QUIRK_LIGHTNVM, },
4615 + { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
4616 + .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
4617 ++ { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */
4618 ++ .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
4619 ++ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
4620 + { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
4621 + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
4622 + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
4623 +diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
4624 +index 1a6449bc547b..842ef876724f 100644
4625 +--- a/drivers/nvme/host/rdma.c
4626 ++++ b/drivers/nvme/host/rdma.c
4627 +@@ -427,7 +427,7 @@ static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
4628 + static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev)
4629 + {
4630 + return min_t(u32, NVME_RDMA_MAX_SEGMENTS,
4631 +- ibdev->attrs.max_fast_reg_page_list_len);
4632 ++ ibdev->attrs.max_fast_reg_page_list_len - 1);
4633 + }
4634 +
4635 + static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
4636 +@@ -437,7 +437,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
4637 + const int cq_factor = send_wr_factor + 1; /* + RECV */
4638 + int comp_vector, idx = nvme_rdma_queue_idx(queue);
4639 + enum ib_poll_context poll_ctx;
4640 +- int ret;
4641 ++ int ret, pages_per_mr;
4642 +
4643 + queue->device = nvme_rdma_find_get_device(queue->cm_id);
4644 + if (!queue->device) {
4645 +@@ -479,10 +479,16 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
4646 + goto out_destroy_qp;
4647 + }
4648 +
4649 ++ /*
4650 ++ * Currently we don't use SG_GAPS MR's so if the first entry is
4651 ++ * misaligned we'll end up using two entries for a single data page,
4652 ++ * so one additional entry is required.
4653 ++ */
4654 ++ pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev) + 1;
4655 + ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs,
4656 + queue->queue_size,
4657 + IB_MR_TYPE_MEM_REG,
4658 +- nvme_rdma_get_max_fr_pages(ibdev), 0);
4659 ++ pages_per_mr, 0);
4660 + if (ret) {
4661 + dev_err(queue->ctrl->ctrl.device,
4662 + "failed to initialize MR pool sized %d for QID %d\n",
4663 +@@ -614,7 +620,8 @@ static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
4664 + if (!ret) {
4665 + set_bit(NVME_RDMA_Q_LIVE, &queue->flags);
4666 + } else {
4667 +- __nvme_rdma_stop_queue(queue);
4668 ++ if (test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
4669 ++ __nvme_rdma_stop_queue(queue);
4670 + dev_info(ctrl->ctrl.device,
4671 + "failed to connect queue: %d ret=%d\n", idx, ret);
4672 + }
4673 +@@ -824,8 +831,8 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
4674 + if (error)
4675 + goto out_stop_queue;
4676 +
4677 +- ctrl->ctrl.max_hw_sectors =
4678 +- (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9);
4679 ++ ctrl->ctrl.max_segments = ctrl->max_fr_pages;
4680 ++ ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9);
4681 +
4682 + error = nvme_init_identify(&ctrl->ctrl);
4683 + if (error)
4684 +diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
4685 +index 606b13d35d16..bdadb27b28bb 100644
4686 +--- a/drivers/nvme/host/tcp.c
4687 ++++ b/drivers/nvme/host/tcp.c
4688 +@@ -1039,7 +1039,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
4689 + {
4690 + struct nvme_tcp_queue *queue =
4691 + container_of(w, struct nvme_tcp_queue, io_work);
4692 +- unsigned long start = jiffies + msecs_to_jiffies(1);
4693 ++ unsigned long deadline = jiffies + msecs_to_jiffies(1);
4694 +
4695 + do {
4696 + bool pending = false;
4697 +@@ -1064,7 +1064,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
4698 + if (!pending)
4699 + return;
4700 +
4701 +- } while (time_after(jiffies, start)); /* quota is exhausted */
4702 ++ } while (!time_after(jiffies, deadline)); /* quota is exhausted */
4703 +
4704 + queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
4705 + }
4706 +diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
4707 +index 7989703b883c..6bd610ee2cd7 100644
4708 +--- a/drivers/of/of_reserved_mem.c
4709 ++++ b/drivers/of/of_reserved_mem.c
4710 +@@ -324,8 +324,10 @@ int of_reserved_mem_device_init_by_idx(struct device *dev,
4711 + if (!target)
4712 + return -ENODEV;
4713 +
4714 +- if (!of_device_is_available(target))
4715 ++ if (!of_device_is_available(target)) {
4716 ++ of_node_put(target);
4717 + return 0;
4718 ++ }
4719 +
4720 + rmem = __find_rmem(target);
4721 + of_node_put(target);
4722 +diff --git a/drivers/opp/of.c b/drivers/opp/of.c
4723 +index b313aca9894f..4c7feb3ac4cd 100644
4724 +--- a/drivers/opp/of.c
4725 ++++ b/drivers/opp/of.c
4726 +@@ -77,8 +77,6 @@ static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table,
4727 + {
4728 + struct dev_pm_opp *opp;
4729 +
4730 +- lockdep_assert_held(&opp_table_lock);
4731 +-
4732 + mutex_lock(&opp_table->lock);
4733 +
4734 + list_for_each_entry(opp, &opp_table->opp_list, node) {
4735 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
4736 +index b97d9e10c9cc..57f15a7e6f0b 100644
4737 +--- a/drivers/pci/pci.c
4738 ++++ b/drivers/pci/pci.c
4739 +@@ -958,19 +958,6 @@ void pci_refresh_power_state(struct pci_dev *dev)
4740 + pci_update_current_state(dev, dev->current_state);
4741 + }
4742 +
4743 +-/**
4744 +- * pci_power_up - Put the given device into D0 forcibly
4745 +- * @dev: PCI device to power up
4746 +- */
4747 +-void pci_power_up(struct pci_dev *dev)
4748 +-{
4749 +- if (platform_pci_power_manageable(dev))
4750 +- platform_pci_set_power_state(dev, PCI_D0);
4751 +-
4752 +- pci_raw_set_power_state(dev, PCI_D0);
4753 +- pci_update_current_state(dev, PCI_D0);
4754 +-}
4755 +-
4756 + /**
4757 + * pci_platform_power_transition - Use platform to change device power state
4758 + * @dev: PCI device to handle.
4759 +@@ -1153,6 +1140,17 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
4760 + }
4761 + EXPORT_SYMBOL(pci_set_power_state);
4762 +
4763 ++/**
4764 ++ * pci_power_up - Put the given device into D0 forcibly
4765 ++ * @dev: PCI device to power up
4766 ++ */
4767 ++void pci_power_up(struct pci_dev *dev)
4768 ++{
4769 ++ __pci_start_power_transition(dev, PCI_D0);
4770 ++ pci_raw_set_power_state(dev, PCI_D0);
4771 ++ pci_update_current_state(dev, PCI_D0);
4772 ++}
4773 ++
4774 + /**
4775 + * pci_choose_state - Choose the power state of a PCI device
4776 + * @dev: PCI device to be suspended
4777 +diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
4778 +index 03ec7a5d9d0b..bf049d1bbb87 100644
4779 +--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
4780 ++++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
4781 +@@ -1513,7 +1513,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
4782 + .matches = {
4783 + DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
4784 + DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
4785 +- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
4786 + },
4787 + },
4788 + {
4789 +@@ -1521,7 +1520,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
4790 + .matches = {
4791 + DMI_MATCH(DMI_SYS_VENDOR, "HP"),
4792 + DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
4793 +- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
4794 + },
4795 + },
4796 + {
4797 +@@ -1529,7 +1527,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
4798 + .matches = {
4799 + DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
4800 + DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
4801 +- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
4802 + },
4803 + },
4804 + {
4805 +@@ -1537,7 +1534,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
4806 + .matches = {
4807 + DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
4808 + DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
4809 +- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
4810 + },
4811 + },
4812 + {}
4813 +diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
4814 +index 6462d3ca7ceb..f2f5fcd9a237 100644
4815 +--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
4816 ++++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
4817 +@@ -183,10 +183,10 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
4818 + PIN_GRP_EXTRA("uart2", 9, 2, BIT(1) | BIT(13) | BIT(14) | BIT(19),
4819 + BIT(1) | BIT(13) | BIT(14), BIT(1) | BIT(19),
4820 + 18, 2, "gpio", "uart"),
4821 +- PIN_GRP_GPIO("led0_od", 11, 1, BIT(20), "led"),
4822 +- PIN_GRP_GPIO("led1_od", 12, 1, BIT(21), "led"),
4823 +- PIN_GRP_GPIO("led2_od", 13, 1, BIT(22), "led"),
4824 +- PIN_GRP_GPIO("led3_od", 14, 1, BIT(23), "led"),
4825 ++ PIN_GRP_GPIO_2("led0_od", 11, 1, BIT(20), BIT(20), 0, "led"),
4826 ++ PIN_GRP_GPIO_2("led1_od", 12, 1, BIT(21), BIT(21), 0, "led"),
4827 ++ PIN_GRP_GPIO_2("led2_od", 13, 1, BIT(22), BIT(22), 0, "led"),
4828 ++ PIN_GRP_GPIO_2("led3_od", 14, 1, BIT(23), BIT(23), 0, "led"),
4829 +
4830 + };
4831 +
4832 +@@ -221,11 +221,11 @@ static const struct armada_37xx_pin_data armada_37xx_pin_sb = {
4833 + };
4834 +
4835 + static inline void armada_37xx_update_reg(unsigned int *reg,
4836 +- unsigned int offset)
4837 ++ unsigned int *offset)
4838 + {
4839 + /* We never have more than 2 registers */
4840 +- if (offset >= GPIO_PER_REG) {
4841 +- offset -= GPIO_PER_REG;
4842 ++ if (*offset >= GPIO_PER_REG) {
4843 ++ *offset -= GPIO_PER_REG;
4844 + *reg += sizeof(u32);
4845 + }
4846 + }
4847 +@@ -376,7 +376,7 @@ static inline void armada_37xx_irq_update_reg(unsigned int *reg,
4848 + {
4849 + int offset = irqd_to_hwirq(d);
4850 +
4851 +- armada_37xx_update_reg(reg, offset);
4852 ++ armada_37xx_update_reg(reg, &offset);
4853 + }
4854 +
4855 + static int armada_37xx_gpio_direction_input(struct gpio_chip *chip,
4856 +@@ -386,7 +386,7 @@ static int armada_37xx_gpio_direction_input(struct gpio_chip *chip,
4857 + unsigned int reg = OUTPUT_EN;
4858 + unsigned int mask;
4859 +
4860 +- armada_37xx_update_reg(&reg, offset);
4861 ++ armada_37xx_update_reg(&reg, &offset);
4862 + mask = BIT(offset);
4863 +
4864 + return regmap_update_bits(info->regmap, reg, mask, 0);
4865 +@@ -399,7 +399,7 @@ static int armada_37xx_gpio_get_direction(struct gpio_chip *chip,
4866 + unsigned int reg = OUTPUT_EN;
4867 + unsigned int val, mask;
4868 +
4869 +- armada_37xx_update_reg(&reg, offset);
4870 ++ armada_37xx_update_reg(&reg, &offset);
4871 + mask = BIT(offset);
4872 + regmap_read(info->regmap, reg, &val);
4873 +
4874 +@@ -413,7 +413,7 @@ static int armada_37xx_gpio_direction_output(struct gpio_chip *chip,
4875 + unsigned int reg = OUTPUT_EN;
4876 + unsigned int mask, val, ret;
4877 +
4878 +- armada_37xx_update_reg(&reg, offset);
4879 ++ armada_37xx_update_reg(&reg, &offset);
4880 + mask = BIT(offset);
4881 +
4882 + ret = regmap_update_bits(info->regmap, reg, mask, mask);
4883 +@@ -434,7 +434,7 @@ static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
4884 + unsigned int reg = INPUT_VAL;
4885 + unsigned int val, mask;
4886 +
4887 +- armada_37xx_update_reg(&reg, offset);
4888 ++ armada_37xx_update_reg(&reg, &offset);
4889 + mask = BIT(offset);
4890 +
4891 + regmap_read(info->regmap, reg, &val);
4892 +@@ -449,7 +449,7 @@ static void armada_37xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
4893 + unsigned int reg = OUTPUT_VAL;
4894 + unsigned int mask, val;
4895 +
4896 +- armada_37xx_update_reg(&reg, offset);
4897 ++ armada_37xx_update_reg(&reg, &offset);
4898 + mask = BIT(offset);
4899 + val = value ? mask : 0;
4900 +
4901 +diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
4902 +index 1058b4b5cc1e..35a0e9569239 100644
4903 +--- a/drivers/s390/crypto/zcrypt_api.c
4904 ++++ b/drivers/s390/crypto/zcrypt_api.c
4905 +@@ -539,8 +539,7 @@ static int zcrypt_release(struct inode *inode, struct file *filp)
4906 + if (filp->f_inode->i_cdev == &zcrypt_cdev) {
4907 + struct zcdn_device *zcdndev;
4908 +
4909 +- if (mutex_lock_interruptible(&ap_perms_mutex))
4910 +- return -ERESTARTSYS;
4911 ++ mutex_lock(&ap_perms_mutex);
4912 + zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
4913 + mutex_unlock(&ap_perms_mutex);
4914 + if (zcdndev) {
4915 +diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
4916 +index 296bbc3c4606..cf63916814cc 100644
4917 +--- a/drivers/s390/scsi/zfcp_fsf.c
4918 ++++ b/drivers/s390/scsi/zfcp_fsf.c
4919 +@@ -27,6 +27,11 @@
4920 +
4921 + struct kmem_cache *zfcp_fsf_qtcb_cache;
4922 +
4923 ++static bool ber_stop = true;
4924 ++module_param(ber_stop, bool, 0600);
4925 ++MODULE_PARM_DESC(ber_stop,
4926 ++ "Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)");
4927 ++
4928 + static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
4929 + {
4930 + struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
4931 +@@ -236,10 +241,15 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
4932 + case FSF_STATUS_READ_SENSE_DATA_AVAIL:
4933 + break;
4934 + case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
4935 +- dev_warn(&adapter->ccw_device->dev,
4936 +- "The error threshold for checksum statistics "
4937 +- "has been exceeded\n");
4938 + zfcp_dbf_hba_bit_err("fssrh_3", req);
4939 ++ if (ber_stop) {
4940 ++ dev_warn(&adapter->ccw_device->dev,
4941 ++ "All paths over this FCP device are disused because of excessive bit errors\n");
4942 ++ zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b");
4943 ++ } else {
4944 ++ dev_warn(&adapter->ccw_device->dev,
4945 ++ "The error threshold for checksum statistics has been exceeded\n");
4946 ++ }
4947 + break;
4948 + case FSF_STATUS_READ_LINK_DOWN:
4949 + zfcp_fsf_status_read_link_down(req);
4950 +diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
4951 +index 5f8153c37f77..76751d6c7f0d 100644
4952 +--- a/drivers/scsi/ch.c
4953 ++++ b/drivers/scsi/ch.c
4954 +@@ -579,7 +579,6 @@ ch_release(struct inode *inode, struct file *file)
4955 + scsi_changer *ch = file->private_data;
4956 +
4957 + scsi_device_put(ch->device);
4958 +- ch->device = NULL;
4959 + file->private_data = NULL;
4960 + kref_put(&ch->ref, ch_destroy);
4961 + return 0;
4962 +diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
4963 +index 45a66048801b..ff6d4aa92421 100644
4964 +--- a/drivers/scsi/megaraid.c
4965 ++++ b/drivers/scsi/megaraid.c
4966 +@@ -4183,11 +4183,11 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4967 + */
4968 + if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ &&
4969 + pdev->subsystem_device == 0xC000)
4970 +- return -ENODEV;
4971 ++ goto out_disable_device;
4972 + /* Now check the magic signature byte */
4973 + pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic);
4974 + if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE)
4975 +- return -ENODEV;
4976 ++ goto out_disable_device;
4977 + /* Ok it is probably a megaraid */
4978 + }
4979 +
4980 +diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
4981 +index bad2b12604f1..a2922b17b55b 100644
4982 +--- a/drivers/scsi/qla2xxx/qla_def.h
4983 ++++ b/drivers/scsi/qla2xxx/qla_def.h
4984 +@@ -2338,6 +2338,7 @@ typedef struct fc_port {
4985 + unsigned int query:1;
4986 + unsigned int id_changed:1;
4987 + unsigned int scan_needed:1;
4988 ++ unsigned int n2n_flag:1;
4989 +
4990 + struct completion nvme_del_done;
4991 + uint32_t nvme_prli_service_param;
4992 +@@ -2388,7 +2389,6 @@ typedef struct fc_port {
4993 + uint8_t fc4_type;
4994 + uint8_t fc4f_nvme;
4995 + uint8_t scan_state;
4996 +- uint8_t n2n_flag;
4997 +
4998 + unsigned long last_queue_full;
4999 + unsigned long last_ramp_up;
5000 +@@ -2979,6 +2979,7 @@ enum scan_flags_t {
5001 + enum fc4type_t {
5002 + FS_FC4TYPE_FCP = BIT_0,
5003 + FS_FC4TYPE_NVME = BIT_1,
5004 ++ FS_FCP_IS_N2N = BIT_7,
5005 + };
5006 +
5007 + struct fab_scan_rp {
5008 +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
5009 +index afcd9a885884..cd74cc9651de 100644
5010 +--- a/drivers/scsi/qla2xxx/qla_init.c
5011 ++++ b/drivers/scsi/qla2xxx/qla_init.c
5012 +@@ -746,12 +746,15 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
5013 + break;
5014 + default:
5015 + if ((id.b24 != fcport->d_id.b24 &&
5016 +- fcport->d_id.b24) ||
5017 ++ fcport->d_id.b24 &&
5018 ++ fcport->loop_id != FC_NO_LOOP_ID) ||
5019 + (fcport->loop_id != FC_NO_LOOP_ID &&
5020 + fcport->loop_id != loop_id)) {
5021 + ql_dbg(ql_dbg_disc, vha, 0x20e3,
5022 + "%s %d %8phC post del sess\n",
5023 + __func__, __LINE__, fcport->port_name);
5024 ++ if (fcport->n2n_flag)
5025 ++ fcport->d_id.b24 = 0;
5026 + qlt_schedule_sess_for_deletion(fcport);
5027 + return;
5028 + }
5029 +@@ -759,6 +762,8 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
5030 + }
5031 +
5032 + fcport->loop_id = loop_id;
5033 ++ if (fcport->n2n_flag)
5034 ++ fcport->d_id.b24 = id.b24;
5035 +
5036 + wwn = wwn_to_u64(fcport->port_name);
5037 + qlt_find_sess_invalidate_other(vha, wwn,
5038 +@@ -966,7 +971,7 @@ qla24xx_async_gnl_sp_done(void *s, int res)
5039 + wwn = wwn_to_u64(e->port_name);
5040 +
5041 + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8,
5042 +- "%s %8phC %02x:%02x:%02x state %d/%d lid %x \n",
5043 ++ "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n",
5044 + __func__, (void *)&wwn, e->port_id[2], e->port_id[1],
5045 + e->port_id[0], e->current_login_state, e->last_login_state,
5046 + (loop_id & 0x7fff));
5047 +@@ -1498,7 +1503,8 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
5048 + (fcport->fw_login_state == DSC_LS_PRLI_PEND)))
5049 + return 0;
5050 +
5051 +- if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
5052 ++ if (fcport->fw_login_state == DSC_LS_PLOGI_COMP &&
5053 ++ !N2N_TOPO(vha->hw)) {
5054 + if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
5055 + set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
5056 + return 0;
5057 +@@ -1569,8 +1575,9 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
5058 + qla24xx_post_gpdb_work(vha, fcport, 0);
5059 + } else {
5060 + ql_dbg(ql_dbg_disc, vha, 0x2118,
5061 +- "%s %d %8phC post NVMe PRLI\n",
5062 +- __func__, __LINE__, fcport->port_name);
5063 ++ "%s %d %8phC post %s PRLI\n",
5064 ++ __func__, __LINE__, fcport->port_name,
5065 ++ fcport->fc4f_nvme ? "NVME" : "FC");
5066 + qla24xx_post_prli_work(vha, fcport);
5067 + }
5068 + break;
5069 +@@ -1924,17 +1931,38 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
5070 + break;
5071 + }
5072 +
5073 +- if (ea->fcport->n2n_flag) {
5074 ++ if (ea->fcport->fc4f_nvme) {
5075 + ql_dbg(ql_dbg_disc, vha, 0x2118,
5076 + "%s %d %8phC post fc4 prli\n",
5077 + __func__, __LINE__, ea->fcport->port_name);
5078 + ea->fcport->fc4f_nvme = 0;
5079 +- ea->fcport->n2n_flag = 0;
5080 + qla24xx_post_prli_work(vha, ea->fcport);
5081 ++ return;
5082 ++ }
5083 ++
5084 ++ /* at this point both PRLI NVME & PRLI FCP failed */
5085 ++ if (N2N_TOPO(vha->hw)) {
5086 ++ if (ea->fcport->n2n_link_reset_cnt < 3) {
5087 ++ ea->fcport->n2n_link_reset_cnt++;
5088 ++ /*
5089 ++ * remote port is not sending Plogi. Reset
5090 ++ * link to kick start his state machine
5091 ++ */
5092 ++ set_bit(N2N_LINK_RESET, &vha->dpc_flags);
5093 ++ } else {
5094 ++ ql_log(ql_log_warn, vha, 0x2119,
5095 ++ "%s %d %8phC Unable to reconnect\n",
5096 ++ __func__, __LINE__, ea->fcport->port_name);
5097 ++ }
5098 ++ } else {
5099 ++ /*
5100 ++ * switch connect. login failed. Take connection
5101 ++ * down and allow relogin to retrigger
5102 ++ */
5103 ++ ea->fcport->flags &= ~FCF_ASYNC_SENT;
5104 ++ ea->fcport->keep_nport_handle = 0;
5105 ++ qlt_schedule_sess_for_deletion(ea->fcport);
5106 + }
5107 +- ql_dbg(ql_dbg_disc, vha, 0x2119,
5108 +- "%s %d %8phC unhandle event of %x\n",
5109 +- __func__, __LINE__, ea->fcport->port_name, ea->data[0]);
5110 + break;
5111 + }
5112 + }
5113 +@@ -3268,7 +3296,7 @@ try_eft:
5114 +
5115 + for (j = 0; j < 2; j++, fwdt++) {
5116 + if (!fwdt->template) {
5117 +- ql_log(ql_log_warn, vha, 0x00ba,
5118 ++ ql_dbg(ql_dbg_init, vha, 0x00ba,
5119 + "-> fwdt%u no template\n", j);
5120 + continue;
5121 + }
5122 +@@ -5078,28 +5106,47 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
5123 + unsigned long flags;
5124 +
5125 + /* Inititae N2N login. */
5126 +- if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
5127 +- /* borrowing */
5128 +- u32 *bp, i, sz;
5129 +-
5130 +- memset(ha->init_cb, 0, ha->init_cb_size);
5131 +- sz = min_t(int, sizeof(struct els_plogi_payload),
5132 +- ha->init_cb_size);
5133 +- rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
5134 +- (void *)ha->init_cb, sz);
5135 +- if (rval == QLA_SUCCESS) {
5136 +- bp = (uint32_t *)ha->init_cb;
5137 +- for (i = 0; i < sz/4 ; i++, bp++)
5138 +- *bp = cpu_to_be32(*bp);
5139 ++ if (N2N_TOPO(ha)) {
5140 ++ if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
5141 ++ /* borrowing */
5142 ++ u32 *bp, i, sz;
5143 ++
5144 ++ memset(ha->init_cb, 0, ha->init_cb_size);
5145 ++ sz = min_t(int, sizeof(struct els_plogi_payload),
5146 ++ ha->init_cb_size);
5147 ++ rval = qla24xx_get_port_login_templ(vha,
5148 ++ ha->init_cb_dma, (void *)ha->init_cb, sz);
5149 ++ if (rval == QLA_SUCCESS) {
5150 ++ bp = (uint32_t *)ha->init_cb;
5151 ++ for (i = 0; i < sz/4 ; i++, bp++)
5152 ++ *bp = cpu_to_be32(*bp);
5153 +
5154 +- memcpy(&ha->plogi_els_payld.data, (void *)ha->init_cb,
5155 +- sizeof(ha->plogi_els_payld.data));
5156 +- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
5157 +- } else {
5158 +- ql_dbg(ql_dbg_init, vha, 0x00d1,
5159 +- "PLOGI ELS param read fail.\n");
5160 ++ memcpy(&ha->plogi_els_payld.data,
5161 ++ (void *)ha->init_cb,
5162 ++ sizeof(ha->plogi_els_payld.data));
5163 ++ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
5164 ++ } else {
5165 ++ ql_dbg(ql_dbg_init, vha, 0x00d1,
5166 ++ "PLOGI ELS param read fail.\n");
5167 ++ goto skip_login;
5168 ++ }
5169 ++ }
5170 ++
5171 ++ list_for_each_entry(fcport, &vha->vp_fcports, list) {
5172 ++ if (fcport->n2n_flag) {
5173 ++ qla24xx_fcport_handle_login(vha, fcport);
5174 ++ return QLA_SUCCESS;
5175 ++ }
5176 ++ }
5177 ++skip_login:
5178 ++ spin_lock_irqsave(&vha->work_lock, flags);
5179 ++ vha->scan.scan_retry++;
5180 ++ spin_unlock_irqrestore(&vha->work_lock, flags);
5181 ++
5182 ++ if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
5183 ++ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5184 ++ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5185 + }
5186 +- return QLA_SUCCESS;
5187 + }
5188 +
5189 + found_devs = 0;
5190 +diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
5191 +index 133f5f6270ff..abfb9c800ce2 100644
5192 +--- a/drivers/scsi/qla2xxx/qla_mbx.c
5193 ++++ b/drivers/scsi/qla2xxx/qla_mbx.c
5194 +@@ -2257,7 +2257,7 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
5195 + mbx_cmd_t mc;
5196 + mbx_cmd_t *mcp = &mc;
5197 +
5198 +- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
5199 ++ ql_dbg(ql_dbg_disc, vha, 0x105a,
5200 + "Entered %s.\n", __func__);
5201 +
5202 + if (IS_CNA_CAPABLE(vha->hw)) {
5203 +@@ -3891,14 +3891,24 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
5204 + case TOPO_N2N:
5205 + ha->current_topology = ISP_CFG_N;
5206 + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5207 ++ list_for_each_entry(fcport, &vha->vp_fcports, list) {
5208 ++ fcport->scan_state = QLA_FCPORT_SCAN;
5209 ++ fcport->n2n_flag = 0;
5210 ++ }
5211 ++
5212 + fcport = qla2x00_find_fcport_by_wwpn(vha,
5213 + rptid_entry->u.f1.port_name, 1);
5214 + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5215 +
5216 + if (fcport) {
5217 + fcport->plogi_nack_done_deadline = jiffies + HZ;
5218 +- fcport->dm_login_expire = jiffies + 3*HZ;
5219 ++ fcport->dm_login_expire = jiffies + 2*HZ;
5220 + fcport->scan_state = QLA_FCPORT_FOUND;
5221 ++ fcport->n2n_flag = 1;
5222 ++ fcport->keep_nport_handle = 1;
5223 ++ if (vha->flags.nvme_enabled)
5224 ++ fcport->fc4f_nvme = 1;
5225 ++
5226 + switch (fcport->disc_state) {
5227 + case DSC_DELETED:
5228 + set_bit(RELOGIN_NEEDED,
5229 +@@ -3932,7 +3942,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
5230 + rptid_entry->u.f1.port_name,
5231 + rptid_entry->u.f1.node_name,
5232 + NULL,
5233 +- FC4_TYPE_UNKNOWN);
5234 ++ FS_FCP_IS_N2N);
5235 + }
5236 +
5237 + /* if our portname is higher then initiate N2N login */
5238 +@@ -4031,6 +4041,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
5239 +
5240 + list_for_each_entry(fcport, &vha->vp_fcports, list) {
5241 + fcport->scan_state = QLA_FCPORT_SCAN;
5242 ++ fcport->n2n_flag = 0;
5243 + }
5244 +
5245 + fcport = qla2x00_find_fcport_by_wwpn(vha,
5246 +@@ -4040,6 +4051,14 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
5247 + fcport->login_retry = vha->hw->login_retry_count;
5248 + fcport->plogi_nack_done_deadline = jiffies + HZ;
5249 + fcport->scan_state = QLA_FCPORT_FOUND;
5250 ++ fcport->keep_nport_handle = 1;
5251 ++ fcport->n2n_flag = 1;
5252 ++ fcport->d_id.b.domain =
5253 ++ rptid_entry->u.f2.remote_nport_id[2];
5254 ++ fcport->d_id.b.area =
5255 ++ rptid_entry->u.f2.remote_nport_id[1];
5256 ++ fcport->d_id.b.al_pa =
5257 ++ rptid_entry->u.f2.remote_nport_id[0];
5258 + }
5259 + }
5260 + }
5261 +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
5262 +index 4fda308c3ef5..2835afbd2edc 100644
5263 +--- a/drivers/scsi/qla2xxx/qla_os.c
5264 ++++ b/drivers/scsi/qla2xxx/qla_os.c
5265 +@@ -1153,6 +1153,7 @@ qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
5266 + qla2x00_mark_all_devices_lost(vha, 0);
5267 +
5268 + wait_event_timeout(vha->fcport_waitQ, test_fcport_count(vha), 10*HZ);
5269 ++ flush_workqueue(vha->hw->wq);
5270 + }
5271 +
5272 + /*
5273 +@@ -5049,6 +5050,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
5274 +
5275 + memcpy(fcport->port_name, e->u.new_sess.port_name,
5276 + WWN_SIZE);
5277 ++
5278 ++ if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N)
5279 ++ fcport->n2n_flag = 1;
5280 ++
5281 + } else {
5282 + ql_dbg(ql_dbg_disc, vha, 0xffff,
5283 + "%s %8phC mem alloc fail.\n",
5284 +@@ -5145,11 +5150,9 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
5285 + if (dfcp)
5286 + qlt_schedule_sess_for_deletion(tfcp);
5287 +
5288 +-
5289 +- if (N2N_TOPO(vha->hw))
5290 +- fcport->flags &= ~FCF_FABRIC_DEVICE;
5291 +-
5292 + if (N2N_TOPO(vha->hw)) {
5293 ++ fcport->flags &= ~FCF_FABRIC_DEVICE;
5294 ++ fcport->keep_nport_handle = 1;
5295 + if (vha->flags.nvme_enabled) {
5296 + fcport->fc4f_nvme = 1;
5297 + fcport->n2n_flag = 1;
5298 +diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
5299 +index 459c28aa3b94..1bb0fc9324ea 100644
5300 +--- a/drivers/scsi/qla2xxx/qla_target.c
5301 ++++ b/drivers/scsi/qla2xxx/qla_target.c
5302 +@@ -954,7 +954,7 @@ void qlt_free_session_done(struct work_struct *work)
5303 + struct qla_hw_data *ha = vha->hw;
5304 + unsigned long flags;
5305 + bool logout_started = false;
5306 +- scsi_qla_host_t *base_vha;
5307 ++ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
5308 + struct qlt_plogi_ack_t *own =
5309 + sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
5310 +
5311 +@@ -1021,6 +1021,7 @@ void qlt_free_session_done(struct work_struct *work)
5312 +
5313 + if (logout_started) {
5314 + bool traced = false;
5315 ++ u16 cnt = 0;
5316 +
5317 + while (!READ_ONCE(sess->logout_completed)) {
5318 + if (!traced) {
5319 +@@ -1030,6 +1031,9 @@ void qlt_free_session_done(struct work_struct *work)
5320 + traced = true;
5321 + }
5322 + msleep(100);
5323 ++ cnt++;
5324 ++ if (cnt > 200)
5325 ++ break;
5326 + }
5327 +
5328 + ql_dbg(ql_dbg_disc, vha, 0xf087,
5329 +@@ -1102,6 +1106,7 @@ void qlt_free_session_done(struct work_struct *work)
5330 + }
5331 +
5332 + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5333 ++ sess->free_pending = 0;
5334 +
5335 + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
5336 + "Unregistration of sess %p %8phC finished fcp_cnt %d\n",
5337 +@@ -1110,17 +1115,8 @@ void qlt_free_session_done(struct work_struct *work)
5338 + if (tgt && (tgt->sess_count == 0))
5339 + wake_up_all(&tgt->waitQ);
5340 +
5341 +- if (vha->fcport_count == 0)
5342 +- wake_up_all(&vha->fcport_waitQ);
5343 +-
5344 +- base_vha = pci_get_drvdata(ha->pdev);
5345 +-
5346 +- sess->free_pending = 0;
5347 +-
5348 +- if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags))
5349 +- return;
5350 +-
5351 +- if ((!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
5352 ++ if (!test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags) &&
5353 ++ (!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
5354 + switch (vha->host->active_mode) {
5355 + case MODE_INITIATOR:
5356 + case MODE_DUAL:
5357 +@@ -1133,6 +1129,9 @@ void qlt_free_session_done(struct work_struct *work)
5358 + break;
5359 + }
5360 + }
5361 ++
5362 ++ if (vha->fcport_count == 0)
5363 ++ wake_up_all(&vha->fcport_waitQ);
5364 + }
5365 +
5366 + /* ha->tgt.sess_lock supposed to be held on entry */
5367 +@@ -1162,7 +1161,7 @@ void qlt_unreg_sess(struct fc_port *sess)
5368 + sess->last_login_gen = sess->login_gen;
5369 +
5370 + INIT_WORK(&sess->free_work, qlt_free_session_done);
5371 +- schedule_work(&sess->free_work);
5372 ++ queue_work(sess->vha->hw->wq, &sess->free_work);
5373 + }
5374 + EXPORT_SYMBOL(qlt_unreg_sess);
5375 +
5376 +diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
5377 +index 1c470e31ae81..ae2fa170f6ad 100644
5378 +--- a/drivers/scsi/scsi_error.c
5379 ++++ b/drivers/scsi/scsi_error.c
5380 +@@ -967,6 +967,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
5381 + ses->data_direction = scmd->sc_data_direction;
5382 + ses->sdb = scmd->sdb;
5383 + ses->result = scmd->result;
5384 ++ ses->resid_len = scmd->req.resid_len;
5385 + ses->underflow = scmd->underflow;
5386 + ses->prot_op = scmd->prot_op;
5387 + ses->eh_eflags = scmd->eh_eflags;
5388 +@@ -977,6 +978,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
5389 + memset(scmd->cmnd, 0, BLK_MAX_CDB);
5390 + memset(&scmd->sdb, 0, sizeof(scmd->sdb));
5391 + scmd->result = 0;
5392 ++ scmd->req.resid_len = 0;
5393 +
5394 + if (sense_bytes) {
5395 + scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
5396 +@@ -1029,6 +1031,7 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
5397 + scmd->sc_data_direction = ses->data_direction;
5398 + scmd->sdb = ses->sdb;
5399 + scmd->result = ses->result;
5400 ++ scmd->req.resid_len = ses->resid_len;
5401 + scmd->underflow = ses->underflow;
5402 + scmd->prot_op = ses->prot_op;
5403 + scmd->eh_eflags = ses->eh_eflags;
5404 +diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
5405 +index 64c96c7828ee..6d7362e7367e 100644
5406 +--- a/drivers/scsi/scsi_sysfs.c
5407 ++++ b/drivers/scsi/scsi_sysfs.c
5408 +@@ -730,6 +730,14 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr,
5409 + const char *buf, size_t count)
5410 + {
5411 + struct kernfs_node *kn;
5412 ++ struct scsi_device *sdev = to_scsi_device(dev);
5413 ++
5414 ++ /*
5415 ++ * We need to try to get module, avoiding the module been removed
5416 ++ * during delete.
5417 ++ */
5418 ++ if (scsi_device_get(sdev))
5419 ++ return -ENODEV;
5420 +
5421 + kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
5422 + WARN_ON_ONCE(!kn);
5423 +@@ -744,9 +752,10 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr,
5424 + * state into SDEV_DEL.
5425 + */
5426 + device_remove_file(dev, attr);
5427 +- scsi_remove_device(to_scsi_device(dev));
5428 ++ scsi_remove_device(sdev);
5429 + if (kn)
5430 + sysfs_unbreak_active_protection(kn);
5431 ++ scsi_device_put(sdev);
5432 + return count;
5433 + };
5434 + static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
5435 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
5436 +index 149d406aacc9..2d77f32e13d5 100644
5437 +--- a/drivers/scsi/sd.c
5438 ++++ b/drivers/scsi/sd.c
5439 +@@ -1655,7 +1655,8 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
5440 + /* we need to evaluate the error return */
5441 + if (scsi_sense_valid(sshdr) &&
5442 + (sshdr->asc == 0x3a || /* medium not present */
5443 +- sshdr->asc == 0x20)) /* invalid command */
5444 ++ sshdr->asc == 0x20 || /* invalid command */
5445 ++ (sshdr->asc == 0x74 && sshdr->ascq == 0x71))) /* drive is password locked */
5446 + /* this is no error here */
5447 + return 0;
5448 +
5449 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
5450 +index 029da74bb2f5..e674f6148f69 100644
5451 +--- a/drivers/scsi/ufs/ufshcd.c
5452 ++++ b/drivers/scsi/ufs/ufshcd.c
5453 +@@ -8095,6 +8095,9 @@ int ufshcd_shutdown(struct ufs_hba *hba)
5454 + {
5455 + int ret = 0;
5456 +
5457 ++ if (!hba->is_powered)
5458 ++ goto out;
5459 ++
5460 + if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
5461 + goto out;
5462 +
5463 +diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
5464 +index eee1998c4b18..fac38c842ac5 100644
5465 +--- a/drivers/staging/wlan-ng/cfg80211.c
5466 ++++ b/drivers/staging/wlan-ng/cfg80211.c
5467 +@@ -469,10 +469,8 @@ static int prism2_connect(struct wiphy *wiphy, struct net_device *dev,
5468 + /* Set the encryption - we only support wep */
5469 + if (is_wep) {
5470 + if (sme->key) {
5471 +- if (sme->key_idx >= NUM_WEPKEYS) {
5472 +- err = -EINVAL;
5473 +- goto exit;
5474 +- }
5475 ++ if (sme->key_idx >= NUM_WEPKEYS)
5476 ++ return -EINVAL;
5477 +
5478 + result = prism2_domibset_uint32(wlandev,
5479 + DIDMIB_DOT11SMT_PRIVACYTABLE_WEPDEFAULTKEYID,
5480 +diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
5481 +index 502e9bf1746f..4a80103675d5 100644
5482 +--- a/drivers/usb/class/usblp.c
5483 ++++ b/drivers/usb/class/usblp.c
5484 +@@ -445,6 +445,7 @@ static void usblp_cleanup(struct usblp *usblp)
5485 + kfree(usblp->readbuf);
5486 + kfree(usblp->device_id_string);
5487 + kfree(usblp->statusbuf);
5488 ++ usb_put_intf(usblp->intf);
5489 + kfree(usblp);
5490 + }
5491 +
5492 +@@ -1107,7 +1108,7 @@ static int usblp_probe(struct usb_interface *intf,
5493 + init_waitqueue_head(&usblp->wwait);
5494 + init_usb_anchor(&usblp->urbs);
5495 + usblp->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
5496 +- usblp->intf = intf;
5497 ++ usblp->intf = usb_get_intf(intf);
5498 +
5499 + /* Malloc device ID string buffer to the largest expected length,
5500 + * since we can re-query it on an ioctl and a dynamic string
5501 +@@ -1196,6 +1197,7 @@ abort:
5502 + kfree(usblp->readbuf);
5503 + kfree(usblp->statusbuf);
5504 + kfree(usblp->device_id_string);
5505 ++ usb_put_intf(usblp->intf);
5506 + kfree(usblp);
5507 + abort_ret:
5508 + return retval;
5509 +diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
5510 +index bb6af6b5ac97..4f1ac9f59f1c 100644
5511 +--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
5512 ++++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
5513 +@@ -1180,11 +1180,11 @@ static void udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
5514 + tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
5515 +
5516 + bl = bytes - n;
5517 +- if (bl > 3)
5518 +- bl = 3;
5519 ++ if (bl > 4)
5520 ++ bl = 4;
5521 +
5522 + for (i = 0; i < bl; i++)
5523 +- data[n + i] = (u8) ((tmp >> (n * 8)) & 0xFF);
5524 ++ data[n + i] = (u8) ((tmp >> (i * 8)) & 0xFF);
5525 + }
5526 + break;
5527 +
5528 +diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
5529 +index f3108d85e768..15b5f06fb0b3 100644
5530 +--- a/drivers/usb/misc/ldusb.c
5531 ++++ b/drivers/usb/misc/ldusb.c
5532 +@@ -380,10 +380,7 @@ static int ld_usb_release(struct inode *inode, struct file *file)
5533 + goto exit;
5534 + }
5535 +
5536 +- if (mutex_lock_interruptible(&dev->mutex)) {
5537 +- retval = -ERESTARTSYS;
5538 +- goto exit;
5539 +- }
5540 ++ mutex_lock(&dev->mutex);
5541 +
5542 + if (dev->open_count != 1) {
5543 + retval = -ENODEV;
5544 +@@ -467,7 +464,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
5545 +
5546 + /* wait for data */
5547 + spin_lock_irq(&dev->rbsl);
5548 +- if (dev->ring_head == dev->ring_tail) {
5549 ++ while (dev->ring_head == dev->ring_tail) {
5550 + dev->interrupt_in_done = 0;
5551 + spin_unlock_irq(&dev->rbsl);
5552 + if (file->f_flags & O_NONBLOCK) {
5553 +@@ -477,12 +474,17 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
5554 + retval = wait_event_interruptible(dev->read_wait, dev->interrupt_in_done);
5555 + if (retval < 0)
5556 + goto unlock_exit;
5557 +- } else {
5558 +- spin_unlock_irq(&dev->rbsl);
5559 ++
5560 ++ spin_lock_irq(&dev->rbsl);
5561 + }
5562 ++ spin_unlock_irq(&dev->rbsl);
5563 +
5564 + /* actual_buffer contains actual_length + interrupt_in_buffer */
5565 + actual_buffer = (size_t *)(dev->ring_buffer + dev->ring_tail * (sizeof(size_t)+dev->interrupt_in_endpoint_size));
5566 ++ if (*actual_buffer > dev->interrupt_in_endpoint_size) {
5567 ++ retval = -EIO;
5568 ++ goto unlock_exit;
5569 ++ }
5570 + bytes_to_read = min(count, *actual_buffer);
5571 + if (bytes_to_read < *actual_buffer)
5572 + dev_warn(&dev->intf->dev, "Read buffer overflow, %zd bytes dropped\n",
5573 +@@ -693,10 +695,9 @@ static int ld_usb_probe(struct usb_interface *intf, const struct usb_device_id *
5574 + dev_warn(&intf->dev, "Interrupt out endpoint not found (using control endpoint instead)\n");
5575 +
5576 + dev->interrupt_in_endpoint_size = usb_endpoint_maxp(dev->interrupt_in_endpoint);
5577 +- dev->ring_buffer =
5578 +- kmalloc_array(ring_buffer_size,
5579 +- sizeof(size_t) + dev->interrupt_in_endpoint_size,
5580 +- GFP_KERNEL);
5581 ++ dev->ring_buffer = kcalloc(ring_buffer_size,
5582 ++ sizeof(size_t) + dev->interrupt_in_endpoint_size,
5583 ++ GFP_KERNEL);
5584 + if (!dev->ring_buffer)
5585 + goto error;
5586 + dev->interrupt_in_buffer = kmalloc(dev->interrupt_in_endpoint_size, GFP_KERNEL);
5587 +diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
5588 +index 9d4c52a7ebe0..62dab2441ec4 100644
5589 +--- a/drivers/usb/misc/legousbtower.c
5590 ++++ b/drivers/usb/misc/legousbtower.c
5591 +@@ -419,10 +419,7 @@ static int tower_release (struct inode *inode, struct file *file)
5592 + goto exit;
5593 + }
5594 +
5595 +- if (mutex_lock_interruptible(&dev->lock)) {
5596 +- retval = -ERESTARTSYS;
5597 +- goto exit;
5598 +- }
5599 ++ mutex_lock(&dev->lock);
5600 +
5601 + if (dev->open_count != 1) {
5602 + dev_dbg(&dev->udev->dev, "%s: device not opened exactly once\n",
5603 +diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
5604 +index dd0ad67aa71e..9174ba2e06da 100644
5605 +--- a/drivers/usb/serial/ti_usb_3410_5052.c
5606 ++++ b/drivers/usb/serial/ti_usb_3410_5052.c
5607 +@@ -776,7 +776,6 @@ static void ti_close(struct usb_serial_port *port)
5608 + struct ti_port *tport;
5609 + int port_number;
5610 + int status;
5611 +- int do_unlock;
5612 + unsigned long flags;
5613 +
5614 + tdev = usb_get_serial_data(port->serial);
5615 +@@ -800,16 +799,13 @@ static void ti_close(struct usb_serial_port *port)
5616 + "%s - cannot send close port command, %d\n"
5617 + , __func__, status);
5618 +
5619 +- /* if mutex_lock is interrupted, continue anyway */
5620 +- do_unlock = !mutex_lock_interruptible(&tdev->td_open_close_lock);
5621 ++ mutex_lock(&tdev->td_open_close_lock);
5622 + --tport->tp_tdev->td_open_port_count;
5623 +- if (tport->tp_tdev->td_open_port_count <= 0) {
5624 ++ if (tport->tp_tdev->td_open_port_count == 0) {
5625 + /* last port is closed, shut down interrupt urb */
5626 + usb_kill_urb(port->serial->port[0]->interrupt_in_urb);
5627 +- tport->tp_tdev->td_open_port_count = 0;
5628 + }
5629 +- if (do_unlock)
5630 +- mutex_unlock(&tdev->td_open_close_lock);
5631 ++ mutex_unlock(&tdev->td_open_close_lock);
5632 + }
5633 +
5634 +
5635 +diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
5636 +index f131651502b8..c62903290f3a 100644
5637 +--- a/fs/binfmt_elf.c
5638 ++++ b/fs/binfmt_elf.c
5639 +@@ -899,7 +899,7 @@ out_free_interp:
5640 + the correct location in memory. */
5641 + for(i = 0, elf_ppnt = elf_phdata;
5642 + i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
5643 +- int elf_prot, elf_flags, elf_fixed = MAP_FIXED_NOREPLACE;
5644 ++ int elf_prot, elf_flags;
5645 + unsigned long k, vaddr;
5646 + unsigned long total_size = 0;
5647 +
5648 +@@ -931,13 +931,6 @@ out_free_interp:
5649 + */
5650 + }
5651 + }
5652 +-
5653 +- /*
5654 +- * Some binaries have overlapping elf segments and then
5655 +- * we have to forcefully map over an existing mapping
5656 +- * e.g. over this newly established brk mapping.
5657 +- */
5658 +- elf_fixed = MAP_FIXED;
5659 + }
5660 +
5661 + elf_prot = make_prot(elf_ppnt->p_flags);
5662 +@@ -950,7 +943,7 @@ out_free_interp:
5663 + * the ET_DYN load_addr calculations, proceed normally.
5664 + */
5665 + if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
5666 +- elf_flags |= elf_fixed;
5667 ++ elf_flags |= MAP_FIXED;
5668 + } else if (loc->elf_ex.e_type == ET_DYN) {
5669 + /*
5670 + * This logic is run once for the first LOAD Program
5671 +@@ -986,7 +979,7 @@ out_free_interp:
5672 + load_bias = ELF_ET_DYN_BASE;
5673 + if (current->flags & PF_RANDOMIZE)
5674 + load_bias += arch_mmap_rnd();
5675 +- elf_flags |= elf_fixed;
5676 ++ elf_flags |= MAP_FIXED;
5677 + } else
5678 + load_bias = 0;
5679 +
5680 +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
5681 +index d9541d58ce3d..e7a1ec075c65 100644
5682 +--- a/fs/btrfs/ctree.h
5683 ++++ b/fs/btrfs/ctree.h
5684 +@@ -908,8 +908,6 @@ struct btrfs_fs_info {
5685 + struct btrfs_workqueue *fixup_workers;
5686 + struct btrfs_workqueue *delayed_workers;
5687 +
5688 +- /* the extent workers do delayed refs on the extent allocation tree */
5689 +- struct btrfs_workqueue *extent_workers;
5690 + struct task_struct *transaction_kthread;
5691 + struct task_struct *cleaner_kthread;
5692 + u32 thread_pool_size;
5693 +diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
5694 +index 17f7c0d38768..934521fe7e71 100644
5695 +--- a/fs/btrfs/delalloc-space.c
5696 ++++ b/fs/btrfs/delalloc-space.c
5697 +@@ -371,7 +371,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
5698 + out_qgroup:
5699 + btrfs_qgroup_free_meta_prealloc(root, qgroup_reserve);
5700 + out_fail:
5701 +- btrfs_inode_rsv_release(inode, true);
5702 + if (delalloc_lock)
5703 + mutex_unlock(&inode->delalloc_mutex);
5704 + return ret;
5705 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
5706 +index 65af7eb3f7bd..46eac7ddf0f7 100644
5707 +--- a/fs/btrfs/disk-io.c
5708 ++++ b/fs/btrfs/disk-io.c
5709 +@@ -2036,7 +2036,6 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
5710 + btrfs_destroy_workqueue(fs_info->readahead_workers);
5711 + btrfs_destroy_workqueue(fs_info->flush_workers);
5712 + btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
5713 +- btrfs_destroy_workqueue(fs_info->extent_workers);
5714 + /*
5715 + * Now that all other work queues are destroyed, we can safely destroy
5716 + * the queues used for metadata I/O, since tasks from those other work
5717 +@@ -2242,10 +2241,6 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
5718 + max_active, 2);
5719 + fs_info->qgroup_rescan_workers =
5720 + btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
5721 +- fs_info->extent_workers =
5722 +- btrfs_alloc_workqueue(fs_info, "extent-refs", flags,
5723 +- min_t(u64, fs_devices->num_devices,
5724 +- max_active), 8);
5725 +
5726 + if (!(fs_info->workers && fs_info->delalloc_workers &&
5727 + fs_info->submit_workers && fs_info->flush_workers &&
5728 +@@ -2256,7 +2251,6 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
5729 + fs_info->endio_freespace_worker && fs_info->rmw_workers &&
5730 + fs_info->caching_workers && fs_info->readahead_workers &&
5731 + fs_info->fixup_workers && fs_info->delayed_workers &&
5732 +- fs_info->extent_workers &&
5733 + fs_info->qgroup_rescan_workers)) {
5734 + return -ENOMEM;
5735 + }
5736 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
5737 +index ef2f80825c82..d5a3a66c8f1d 100644
5738 +--- a/fs/btrfs/extent-tree.c
5739 ++++ b/fs/btrfs/extent-tree.c
5740 +@@ -8117,6 +8117,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
5741 + btrfs_err(info,
5742 + "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
5743 + cache->key.objectid);
5744 ++ btrfs_put_block_group(cache);
5745 + ret = -EINVAL;
5746 + goto error;
5747 + }
5748 +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
5749 +index abcda051eee2..d68add0bf346 100644
5750 +--- a/fs/btrfs/file.c
5751 ++++ b/fs/btrfs/file.c
5752 +@@ -2067,25 +2067,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
5753 + struct btrfs_trans_handle *trans;
5754 + struct btrfs_log_ctx ctx;
5755 + int ret = 0, err;
5756 +- u64 len;
5757 +
5758 +- /*
5759 +- * If the inode needs a full sync, make sure we use a full range to
5760 +- * avoid log tree corruption, due to hole detection racing with ordered
5761 +- * extent completion for adjacent ranges, and assertion failures during
5762 +- * hole detection.
5763 +- */
5764 +- if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5765 +- &BTRFS_I(inode)->runtime_flags)) {
5766 +- start = 0;
5767 +- end = LLONG_MAX;
5768 +- }
5769 +-
5770 +- /*
5771 +- * The range length can be represented by u64, we have to do the typecasts
5772 +- * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
5773 +- */
5774 +- len = (u64)end - (u64)start + 1;
5775 + trace_btrfs_sync_file(file, datasync);
5776 +
5777 + btrfs_init_log_ctx(&ctx, inode);
5778 +@@ -2111,6 +2093,19 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
5779 +
5780 + atomic_inc(&root->log_batch);
5781 +
5782 ++ /*
5783 ++ * If the inode needs a full sync, make sure we use a full range to
5784 ++ * avoid log tree corruption, due to hole detection racing with ordered
5785 ++ * extent completion for adjacent ranges, and assertion failures during
5786 ++ * hole detection. Do this while holding the inode lock, to avoid races
5787 ++ * with other tasks.
5788 ++ */
5789 ++ if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5790 ++ &BTRFS_I(inode)->runtime_flags)) {
5791 ++ start = 0;
5792 ++ end = LLONG_MAX;
5793 ++ }
5794 ++
5795 + /*
5796 + * Before we acquired the inode's lock, someone may have dirtied more
5797 + * pages in the target range. We need to make sure that writeback for
5798 +@@ -2138,8 +2133,11 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
5799 + /*
5800 + * We have to do this here to avoid the priority inversion of waiting on
5801 + * IO of a lower priority task while holding a transaction open.
5802 ++ *
5803 ++ * Also, the range length can be represented by u64, we have to do the
5804 ++ * typecasts to avoid signed overflow if it's [0, LLONG_MAX].
5805 + */
5806 +- ret = btrfs_wait_ordered_range(inode, start, len);
5807 ++ ret = btrfs_wait_ordered_range(inode, start, (u64)end - (u64)start + 1);
5808 + if (ret) {
5809 + up_write(&BTRFS_I(inode)->dio_sem);
5810 + inode_unlock(inode);
5811 +diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
5812 +index 001efc9ba1e7..60a00f6ca18f 100644
5813 +--- a/fs/btrfs/qgroup.c
5814 ++++ b/fs/btrfs/qgroup.c
5815 +@@ -3617,7 +3617,7 @@ int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
5816 + return 0;
5817 +
5818 + BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
5819 +- trace_qgroup_meta_reserve(root, type, (s64)num_bytes);
5820 ++ trace_qgroup_meta_reserve(root, (s64)num_bytes, type);
5821 + ret = qgroup_reserve(root, num_bytes, enforce, type);
5822 + if (ret < 0)
5823 + return ret;
5824 +@@ -3664,7 +3664,7 @@ void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
5825 + */
5826 + num_bytes = sub_root_meta_rsv(root, num_bytes, type);
5827 + BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
5828 +- trace_qgroup_meta_reserve(root, type, -(s64)num_bytes);
5829 ++ trace_qgroup_meta_reserve(root, -(s64)num_bytes, type);
5830 + btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid,
5831 + num_bytes, type);
5832 + }
5833 +diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
5834 +index fbd66c33dd63..074947bebd16 100644
5835 +--- a/fs/btrfs/relocation.c
5836 ++++ b/fs/btrfs/relocation.c
5837 +@@ -3276,6 +3276,8 @@ static int relocate_file_extent_cluster(struct inode *inode,
5838 + if (!page) {
5839 + btrfs_delalloc_release_metadata(BTRFS_I(inode),
5840 + PAGE_SIZE, true);
5841 ++ btrfs_delalloc_release_extents(BTRFS_I(inode),
5842 ++ PAGE_SIZE, true);
5843 + ret = -ENOMEM;
5844 + goto out;
5845 + }
5846 +diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
5847 +index b11af7d8e8e9..61282b77950f 100644
5848 +--- a/fs/ceph/mds_client.c
5849 ++++ b/fs/ceph/mds_client.c
5850 +@@ -384,8 +384,8 @@ static int parse_reply_info_readdir(void **p, void *end,
5851 + }
5852 +
5853 + done:
5854 +- if (*p != end)
5855 +- goto bad;
5856 ++ /* Skip over any unrecognized fields */
5857 ++ *p = end;
5858 + return 0;
5859 +
5860 + bad:
5861 +@@ -406,12 +406,10 @@ static int parse_reply_info_filelock(void **p, void *end,
5862 + goto bad;
5863 +
5864 + info->filelock_reply = *p;
5865 +- *p += sizeof(*info->filelock_reply);
5866 +
5867 +- if (unlikely(*p != end))
5868 +- goto bad;
5869 ++ /* Skip over any unrecognized fields */
5870 ++ *p = end;
5871 + return 0;
5872 +-
5873 + bad:
5874 + return -EIO;
5875 + }
5876 +@@ -425,18 +423,21 @@ static int parse_reply_info_create(void **p, void *end,
5877 + {
5878 + if (features == (u64)-1 ||
5879 + (features & CEPH_FEATURE_REPLY_CREATE_INODE)) {
5880 ++ /* Malformed reply? */
5881 + if (*p == end) {
5882 + info->has_create_ino = false;
5883 + } else {
5884 + info->has_create_ino = true;
5885 +- info->ino = ceph_decode_64(p);
5886 ++ ceph_decode_64_safe(p, end, info->ino, bad);
5887 + }
5888 ++ } else {
5889 ++ if (*p != end)
5890 ++ goto bad;
5891 + }
5892 +
5893 +- if (unlikely(*p != end))
5894 +- goto bad;
5895 ++ /* Skip over any unrecognized fields */
5896 ++ *p = end;
5897 + return 0;
5898 +-
5899 + bad:
5900 + return -EIO;
5901 + }
5902 +diff --git a/fs/cifs/file.c b/fs/cifs/file.c
5903 +index 4c1aeb2cf7f5..53dbb6e0d390 100644
5904 +--- a/fs/cifs/file.c
5905 ++++ b/fs/cifs/file.c
5906 +@@ -405,10 +405,11 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
5907 + bool oplock_break_cancelled;
5908 +
5909 + spin_lock(&tcon->open_file_lock);
5910 +-
5911 ++ spin_lock(&cifsi->open_file_lock);
5912 + spin_lock(&cifs_file->file_info_lock);
5913 + if (--cifs_file->count > 0) {
5914 + spin_unlock(&cifs_file->file_info_lock);
5915 ++ spin_unlock(&cifsi->open_file_lock);
5916 + spin_unlock(&tcon->open_file_lock);
5917 + return;
5918 + }
5919 +@@ -421,9 +422,7 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
5920 + cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
5921 +
5922 + /* remove it from the lists */
5923 +- spin_lock(&cifsi->open_file_lock);
5924 + list_del(&cifs_file->flist);
5925 +- spin_unlock(&cifsi->open_file_lock);
5926 + list_del(&cifs_file->tlist);
5927 + atomic_dec(&tcon->num_local_opens);
5928 +
5929 +@@ -440,6 +439,7 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
5930 + cifs_set_oplock_level(cifsi, 0);
5931 + }
5932 +
5933 ++ spin_unlock(&cifsi->open_file_lock);
5934 + spin_unlock(&tcon->open_file_lock);
5935 +
5936 + oplock_break_cancelled = wait_oplock_handler ?
5937 +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
5938 +index 79d9a60f21ba..3c952024e10f 100644
5939 +--- a/fs/cifs/inode.c
5940 ++++ b/fs/cifs/inode.c
5941 +@@ -2465,9 +2465,9 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
5942 + rc = tcon->ses->server->ops->flush(xid, tcon, &wfile->fid);
5943 + cifsFileInfo_put(wfile);
5944 + if (rc)
5945 +- return rc;
5946 ++ goto cifs_setattr_exit;
5947 + } else if (rc != -EBADF)
5948 +- return rc;
5949 ++ goto cifs_setattr_exit;
5950 + else
5951 + rc = 0;
5952 + }
5953 +diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
5954 +index b7421a096319..514810694c0f 100644
5955 +--- a/fs/cifs/smb1ops.c
5956 ++++ b/fs/cifs/smb1ops.c
5957 +@@ -171,6 +171,9 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
5958 + /* we do not want to loop forever */
5959 + last_mid = cur_mid;
5960 + cur_mid++;
5961 ++ /* avoid 0xFFFF MID */
5962 ++ if (cur_mid == 0xffff)
5963 ++ cur_mid++;
5964 +
5965 + /*
5966 + * This nested loop looks more expensive than it is.
5967 +diff --git a/fs/dax.c b/fs/dax.c
5968 +index 6bf81f931de3..2cc43cd914eb 100644
5969 +--- a/fs/dax.c
5970 ++++ b/fs/dax.c
5971 +@@ -220,10 +220,11 @@ static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
5972 +
5973 + for (;;) {
5974 + entry = xas_find_conflict(xas);
5975 ++ if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
5976 ++ return entry;
5977 + if (dax_entry_order(entry) < order)
5978 + return XA_RETRY_ENTRY;
5979 +- if (!entry || WARN_ON_ONCE(!xa_is_value(entry)) ||
5980 +- !dax_is_locked(entry))
5981 ++ if (!dax_is_locked(entry))
5982 + return entry;
5983 +
5984 + wq = dax_entry_waitqueue(xas, entry, &ewait.key);
5985 +diff --git a/fs/io_uring.c b/fs/io_uring.c
5986 +index 30149652c379..ed223c33dd89 100644
5987 +--- a/fs/io_uring.c
5988 ++++ b/fs/io_uring.c
5989 +@@ -221,6 +221,7 @@ struct io_ring_ctx {
5990 + unsigned sq_entries;
5991 + unsigned sq_mask;
5992 + unsigned sq_thread_idle;
5993 ++ unsigned cached_sq_dropped;
5994 + struct io_uring_sqe *sq_sqes;
5995 +
5996 + struct list_head defer_list;
5997 +@@ -237,6 +238,7 @@ struct io_ring_ctx {
5998 + /* CQ ring */
5999 + struct io_cq_ring *cq_ring;
6000 + unsigned cached_cq_tail;
6001 ++ atomic_t cached_cq_overflow;
6002 + unsigned cq_entries;
6003 + unsigned cq_mask;
6004 + struct wait_queue_head cq_wait;
6005 +@@ -431,7 +433,8 @@ static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
6006 + if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
6007 + return false;
6008 +
6009 +- return req->sequence != ctx->cached_cq_tail + ctx->sq_ring->dropped;
6010 ++ return req->sequence != ctx->cached_cq_tail + ctx->sq_ring->dropped
6011 ++ + atomic_read(&ctx->cached_cq_overflow);
6012 + }
6013 +
6014 + static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
6015 +@@ -511,9 +514,8 @@ static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
6016 + WRITE_ONCE(cqe->res, res);
6017 + WRITE_ONCE(cqe->flags, 0);
6018 + } else {
6019 +- unsigned overflow = READ_ONCE(ctx->cq_ring->overflow);
6020 +-
6021 +- WRITE_ONCE(ctx->cq_ring->overflow, overflow + 1);
6022 ++ WRITE_ONCE(ctx->cq_ring->overflow,
6023 ++ atomic_inc_return(&ctx->cached_cq_overflow));
6024 + }
6025 + }
6026 +
6027 +@@ -687,6 +689,14 @@ static unsigned io_cqring_events(struct io_cq_ring *ring)
6028 + return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head);
6029 + }
6030 +
6031 ++static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
6032 ++{
6033 ++ struct io_sq_ring *ring = ctx->sq_ring;
6034 ++
6035 ++ /* make sure SQ entry isn't read before tail */
6036 ++ return smp_load_acquire(&ring->r.tail) - ctx->cached_sq_head;
6037 ++}
6038 ++
6039 + /*
6040 + * Find and free completed poll iocbs
6041 + */
6042 +@@ -816,19 +826,11 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
6043 + mutex_unlock(&ctx->uring_lock);
6044 + }
6045 +
6046 +-static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
6047 +- long min)
6048 ++static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
6049 ++ long min)
6050 + {
6051 +- int iters, ret = 0;
6052 ++ int iters = 0, ret = 0;
6053 +
6054 +- /*
6055 +- * We disallow the app entering submit/complete with polling, but we
6056 +- * still need to lock the ring to prevent racing with polled issue
6057 +- * that got punted to a workqueue.
6058 +- */
6059 +- mutex_lock(&ctx->uring_lock);
6060 +-
6061 +- iters = 0;
6062 + do {
6063 + int tmin = 0;
6064 +
6065 +@@ -864,6 +866,21 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
6066 + ret = 0;
6067 + } while (min && !*nr_events && !need_resched());
6068 +
6069 ++ return ret;
6070 ++}
6071 ++
6072 ++static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
6073 ++ long min)
6074 ++{
6075 ++ int ret;
6076 ++
6077 ++ /*
6078 ++ * We disallow the app entering submit/complete with polling, but we
6079 ++ * still need to lock the ring to prevent racing with polled issue
6080 ++ * that got punted to a workqueue.
6081 ++ */
6082 ++ mutex_lock(&ctx->uring_lock);
6083 ++ ret = __io_iopoll_check(ctx, nr_events, min);
6084 + mutex_unlock(&ctx->uring_lock);
6085 + return ret;
6086 + }
6087 +@@ -2150,6 +2167,8 @@ err:
6088 + return;
6089 + }
6090 +
6091 ++ req->user_data = s->sqe->user_data;
6092 ++
6093 + /*
6094 + * If we already have a head request, queue this one for async
6095 + * submittal once the head completes. If we don't have a head but
6096 +@@ -2255,12 +2274,13 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
6097 +
6098 + /* drop invalid entries */
6099 + ctx->cached_sq_head++;
6100 +- ring->dropped++;
6101 ++ ctx->cached_sq_dropped++;
6102 ++ WRITE_ONCE(ring->dropped, ctx->cached_sq_dropped);
6103 + return false;
6104 + }
6105 +
6106 +-static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
6107 +- unsigned int nr, bool has_user, bool mm_fault)
6108 ++static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
6109 ++ bool has_user, bool mm_fault)
6110 + {
6111 + struct io_submit_state state, *statep = NULL;
6112 + struct io_kiocb *link = NULL;
6113 +@@ -2273,6 +2293,11 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
6114 + }
6115 +
6116 + for (i = 0; i < nr; i++) {
6117 ++ struct sqe_submit s;
6118 ++
6119 ++ if (!io_get_sqring(ctx, &s))
6120 ++ break;
6121 ++
6122 + /*
6123 + * If previous wasn't linked and we have a linked command,
6124 + * that's the end of the chain. Submit the previous link.
6125 +@@ -2281,16 +2306,16 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
6126 + io_queue_sqe(ctx, link, &link->submit);
6127 + link = NULL;
6128 + }
6129 +- prev_was_link = (sqes[i].sqe->flags & IOSQE_IO_LINK) != 0;
6130 ++ prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
6131 +
6132 + if (unlikely(mm_fault)) {
6133 +- io_cqring_add_event(ctx, sqes[i].sqe->user_data,
6134 ++ io_cqring_add_event(ctx, s.sqe->user_data,
6135 + -EFAULT);
6136 + } else {
6137 +- sqes[i].has_user = has_user;
6138 +- sqes[i].needs_lock = true;
6139 +- sqes[i].needs_fixed_file = true;
6140 +- io_submit_sqe(ctx, &sqes[i], statep, &link);
6141 ++ s.has_user = has_user;
6142 ++ s.needs_lock = true;
6143 ++ s.needs_fixed_file = true;
6144 ++ io_submit_sqe(ctx, &s, statep, &link);
6145 + submitted++;
6146 + }
6147 + }
6148 +@@ -2305,7 +2330,6 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
6149 +
6150 + static int io_sq_thread(void *data)
6151 + {
6152 +- struct sqe_submit sqes[IO_IOPOLL_BATCH];
6153 + struct io_ring_ctx *ctx = data;
6154 + struct mm_struct *cur_mm = NULL;
6155 + mm_segment_t old_fs;
6156 +@@ -2320,14 +2344,27 @@ static int io_sq_thread(void *data)
6157 +
6158 + timeout = inflight = 0;
6159 + while (!kthread_should_park()) {
6160 +- bool all_fixed, mm_fault = false;
6161 +- int i;
6162 ++ bool mm_fault = false;
6163 ++ unsigned int to_submit;
6164 +
6165 + if (inflight) {
6166 + unsigned nr_events = 0;
6167 +
6168 + if (ctx->flags & IORING_SETUP_IOPOLL) {
6169 +- io_iopoll_check(ctx, &nr_events, 0);
6170 ++ /*
6171 ++ * inflight is the count of the maximum possible
6172 ++ * entries we submitted, but it can be smaller
6173 ++ * if we dropped some of them. If we don't have
6174 ++ * poll entries available, then we know that we
6175 ++ * have nothing left to poll for. Reset the
6176 ++ * inflight count to zero in that case.
6177 ++ */
6178 ++ mutex_lock(&ctx->uring_lock);
6179 ++ if (!list_empty(&ctx->poll_list))
6180 ++ __io_iopoll_check(ctx, &nr_events, 0);
6181 ++ else
6182 ++ inflight = 0;
6183 ++ mutex_unlock(&ctx->uring_lock);
6184 + } else {
6185 + /*
6186 + * Normal IO, just pretend everything completed.
6187 +@@ -2341,7 +2378,8 @@ static int io_sq_thread(void *data)
6188 + timeout = jiffies + ctx->sq_thread_idle;
6189 + }
6190 +
6191 +- if (!io_get_sqring(ctx, &sqes[0])) {
6192 ++ to_submit = io_sqring_entries(ctx);
6193 ++ if (!to_submit) {
6194 + /*
6195 + * We're polling. If we're within the defined idle
6196 + * period, then let us spin without work before going
6197 +@@ -2372,7 +2410,8 @@ static int io_sq_thread(void *data)
6198 + /* make sure to read SQ tail after writing flags */
6199 + smp_mb();
6200 +
6201 +- if (!io_get_sqring(ctx, &sqes[0])) {
6202 ++ to_submit = io_sqring_entries(ctx);
6203 ++ if (!to_submit) {
6204 + if (kthread_should_park()) {
6205 + finish_wait(&ctx->sqo_wait, &wait);
6206 + break;
6207 +@@ -2390,19 +2429,8 @@ static int io_sq_thread(void *data)
6208 + ctx->sq_ring->flags &= ~IORING_SQ_NEED_WAKEUP;
6209 + }
6210 +
6211 +- i = 0;
6212 +- all_fixed = true;
6213 +- do {
6214 +- if (all_fixed && io_sqe_needs_user(sqes[i].sqe))
6215 +- all_fixed = false;
6216 +-
6217 +- i++;
6218 +- if (i == ARRAY_SIZE(sqes))
6219 +- break;
6220 +- } while (io_get_sqring(ctx, &sqes[i]));
6221 +-
6222 + /* Unless all new commands are FIXED regions, grab mm */
6223 +- if (!all_fixed && !cur_mm) {
6224 ++ if (!cur_mm) {
6225 + mm_fault = !mmget_not_zero(ctx->sqo_mm);
6226 + if (!mm_fault) {
6227 + use_mm(ctx->sqo_mm);
6228 +@@ -2410,8 +2438,9 @@ static int io_sq_thread(void *data)
6229 + }
6230 + }
6231 +
6232 +- inflight += io_submit_sqes(ctx, sqes, i, cur_mm != NULL,
6233 +- mm_fault);
6234 ++ to_submit = min(to_submit, ctx->sq_entries);
6235 ++ inflight += io_submit_sqes(ctx, to_submit, cur_mm != NULL,
6236 ++ mm_fault);
6237 +
6238 + /* Commit SQ ring head once we've consumed all SQEs */
6239 + io_commit_sqring(ctx);
6240 +@@ -2462,13 +2491,14 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
6241 + submit++;
6242 + io_submit_sqe(ctx, &s, statep, &link);
6243 + }
6244 +- io_commit_sqring(ctx);
6245 +
6246 + if (link)
6247 + io_queue_sqe(ctx, link, &link->submit);
6248 + if (statep)
6249 + io_submit_state_end(statep);
6250 +
6251 ++ io_commit_sqring(ctx);
6252 ++
6253 + return submit;
6254 + }
6255 +
6256 +diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
6257 +index 930e3d388579..699a560efbb0 100644
6258 +--- a/fs/ocfs2/journal.c
6259 ++++ b/fs/ocfs2/journal.c
6260 +@@ -217,7 +217,8 @@ void ocfs2_recovery_exit(struct ocfs2_super *osb)
6261 + /* At this point, we know that no more recovery threads can be
6262 + * launched, so wait for any recovery completion work to
6263 + * complete. */
6264 +- flush_workqueue(osb->ocfs2_wq);
6265 ++ if (osb->ocfs2_wq)
6266 ++ flush_workqueue(osb->ocfs2_wq);
6267 +
6268 + /*
6269 + * Now that recovery is shut down, and the osb is about to be
6270 +diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
6271 +index 158e5af767fd..720e9f94957e 100644
6272 +--- a/fs/ocfs2/localalloc.c
6273 ++++ b/fs/ocfs2/localalloc.c
6274 +@@ -377,7 +377,8 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
6275 + struct ocfs2_dinode *alloc = NULL;
6276 +
6277 + cancel_delayed_work(&osb->la_enable_wq);
6278 +- flush_workqueue(osb->ocfs2_wq);
6279 ++ if (osb->ocfs2_wq)
6280 ++ flush_workqueue(osb->ocfs2_wq);
6281 +
6282 + if (osb->local_alloc_state == OCFS2_LA_UNUSED)
6283 + goto out;
6284 +diff --git a/fs/proc/page.c b/fs/proc/page.c
6285 +index 544d1ee15aee..7c952ee732e6 100644
6286 +--- a/fs/proc/page.c
6287 ++++ b/fs/proc/page.c
6288 +@@ -42,10 +42,12 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
6289 + return -EINVAL;
6290 +
6291 + while (count > 0) {
6292 +- if (pfn_valid(pfn))
6293 +- ppage = pfn_to_page(pfn);
6294 +- else
6295 +- ppage = NULL;
6296 ++ /*
6297 ++ * TODO: ZONE_DEVICE support requires to identify
6298 ++ * memmaps that were actually initialized.
6299 ++ */
6300 ++ ppage = pfn_to_online_page(pfn);
6301 ++
6302 + if (!ppage || PageSlab(ppage) || page_has_type(ppage))
6303 + pcount = 0;
6304 + else
6305 +@@ -216,10 +218,11 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
6306 + return -EINVAL;
6307 +
6308 + while (count > 0) {
6309 +- if (pfn_valid(pfn))
6310 +- ppage = pfn_to_page(pfn);
6311 +- else
6312 +- ppage = NULL;
6313 ++ /*
6314 ++ * TODO: ZONE_DEVICE support requires to identify
6315 ++ * memmaps that were actually initialized.
6316 ++ */
6317 ++ ppage = pfn_to_online_page(pfn);
6318 +
6319 + if (put_user(stable_page_flags(ppage), out)) {
6320 + ret = -EFAULT;
6321 +@@ -261,10 +264,11 @@ static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
6322 + return -EINVAL;
6323 +
6324 + while (count > 0) {
6325 +- if (pfn_valid(pfn))
6326 +- ppage = pfn_to_page(pfn);
6327 +- else
6328 +- ppage = NULL;
6329 ++ /*
6330 ++ * TODO: ZONE_DEVICE support requires to identify
6331 ++ * memmaps that were actually initialized.
6332 ++ */
6333 ++ ppage = pfn_to_online_page(pfn);
6334 +
6335 + if (ppage)
6336 + ino = page_cgroup_ino(ppage);
6337 +diff --git a/fs/readdir.c b/fs/readdir.c
6338 +index 2f6a4534e0df..d26d5ea4de7b 100644
6339 +--- a/fs/readdir.c
6340 ++++ b/fs/readdir.c
6341 +@@ -20,9 +20,23 @@
6342 + #include <linux/syscalls.h>
6343 + #include <linux/unistd.h>
6344 + #include <linux/compat.h>
6345 +-
6346 + #include <linux/uaccess.h>
6347 +
6348 ++#include <asm/unaligned.h>
6349 ++
6350 ++/*
6351 ++ * Note the "unsafe_put_user() semantics: we goto a
6352 ++ * label for errors.
6353 ++ */
6354 ++#define unsafe_copy_dirent_name(_dst, _src, _len, label) do { \
6355 ++ char __user *dst = (_dst); \
6356 ++ const char *src = (_src); \
6357 ++ size_t len = (_len); \
6358 ++ unsafe_put_user(0, dst+len, label); \
6359 ++ unsafe_copy_to_user(dst, src, len, label); \
6360 ++} while (0)
6361 ++
6362 ++
6363 + int iterate_dir(struct file *file, struct dir_context *ctx)
6364 + {
6365 + struct inode *inode = file_inode(file);
6366 +@@ -64,6 +78,40 @@ out:
6367 + }
6368 + EXPORT_SYMBOL(iterate_dir);
6369 +
6370 ++/*
6371 ++ * POSIX says that a dirent name cannot contain NULL or a '/'.
6372 ++ *
6373 ++ * It's not 100% clear what we should really do in this case.
6374 ++ * The filesystem is clearly corrupted, but returning a hard
6375 ++ * error means that you now don't see any of the other names
6376 ++ * either, so that isn't a perfect alternative.
6377 ++ *
6378 ++ * And if you return an error, what error do you use? Several
6379 ++ * filesystems seem to have decided on EUCLEAN being the error
6380 ++ * code for EFSCORRUPTED, and that may be the error to use. Or
6381 ++ * just EIO, which is perhaps more obvious to users.
6382 ++ *
6383 ++ * In order to see the other file names in the directory, the
6384 ++ * caller might want to make this a "soft" error: skip the
6385 ++ * entry, and return the error at the end instead.
6386 ++ *
6387 ++ * Note that this should likely do a "memchr(name, 0, len)"
6388 ++ * check too, since that would be filesystem corruption as
6389 ++ * well. However, that case can't actually confuse user space,
6390 ++ * which has to do a strlen() on the name anyway to find the
6391 ++ * filename length, and the above "soft error" worry means
6392 ++ * that it's probably better left alone until we have that
6393 ++ * issue clarified.
6394 ++ */
6395 ++static int verify_dirent_name(const char *name, int len)
6396 ++{
6397 ++ if (!len)
6398 ++ return -EIO;
6399 ++ if (memchr(name, '/', len))
6400 ++ return -EIO;
6401 ++ return 0;
6402 ++}
6403 ++
6404 + /*
6405 + * Traditional linux readdir() handling..
6406 + *
6407 +@@ -173,6 +221,9 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
6408 + int reclen = ALIGN(offsetof(struct linux_dirent, d_name) + namlen + 2,
6409 + sizeof(long));
6410 +
6411 ++ buf->error = verify_dirent_name(name, namlen);
6412 ++ if (unlikely(buf->error))
6413 ++ return buf->error;
6414 + buf->error = -EINVAL; /* only used if we fail.. */
6415 + if (reclen > buf->count)
6416 + return -EINVAL;
6417 +@@ -182,28 +233,31 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
6418 + return -EOVERFLOW;
6419 + }
6420 + dirent = buf->previous;
6421 +- if (dirent) {
6422 +- if (signal_pending(current))
6423 +- return -EINTR;
6424 +- if (__put_user(offset, &dirent->d_off))
6425 +- goto efault;
6426 +- }
6427 +- dirent = buf->current_dir;
6428 +- if (__put_user(d_ino, &dirent->d_ino))
6429 +- goto efault;
6430 +- if (__put_user(reclen, &dirent->d_reclen))
6431 +- goto efault;
6432 +- if (copy_to_user(dirent->d_name, name, namlen))
6433 +- goto efault;
6434 +- if (__put_user(0, dirent->d_name + namlen))
6435 +- goto efault;
6436 +- if (__put_user(d_type, (char __user *) dirent + reclen - 1))
6437 ++ if (dirent && signal_pending(current))
6438 ++ return -EINTR;
6439 ++
6440 ++ /*
6441 ++ * Note! This range-checks 'previous' (which may be NULL).
6442 ++ * The real range was checked in getdents
6443 ++ */
6444 ++ if (!user_access_begin(dirent, sizeof(*dirent)))
6445 + goto efault;
6446 ++ if (dirent)
6447 ++ unsafe_put_user(offset, &dirent->d_off, efault_end);
6448 ++ dirent = buf->current_dir;
6449 ++ unsafe_put_user(d_ino, &dirent->d_ino, efault_end);
6450 ++ unsafe_put_user(reclen, &dirent->d_reclen, efault_end);
6451 ++ unsafe_put_user(d_type, (char __user *) dirent + reclen - 1, efault_end);
6452 ++ unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end);
6453 ++ user_access_end();
6454 ++
6455 + buf->previous = dirent;
6456 + dirent = (void __user *)dirent + reclen;
6457 + buf->current_dir = dirent;
6458 + buf->count -= reclen;
6459 + return 0;
6460 ++efault_end:
6461 ++ user_access_end();
6462 + efault:
6463 + buf->error = -EFAULT;
6464 + return -EFAULT;
6465 +@@ -259,34 +313,38 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
6466 + int reclen = ALIGN(offsetof(struct linux_dirent64, d_name) + namlen + 1,
6467 + sizeof(u64));
6468 +
6469 ++ buf->error = verify_dirent_name(name, namlen);
6470 ++ if (unlikely(buf->error))
6471 ++ return buf->error;
6472 + buf->error = -EINVAL; /* only used if we fail.. */
6473 + if (reclen > buf->count)
6474 + return -EINVAL;
6475 + dirent = buf->previous;
6476 +- if (dirent) {
6477 +- if (signal_pending(current))
6478 +- return -EINTR;
6479 +- if (__put_user(offset, &dirent->d_off))
6480 +- goto efault;
6481 +- }
6482 +- dirent = buf->current_dir;
6483 +- if (__put_user(ino, &dirent->d_ino))
6484 +- goto efault;
6485 +- if (__put_user(0, &dirent->d_off))
6486 +- goto efault;
6487 +- if (__put_user(reclen, &dirent->d_reclen))
6488 +- goto efault;
6489 +- if (__put_user(d_type, &dirent->d_type))
6490 +- goto efault;
6491 +- if (copy_to_user(dirent->d_name, name, namlen))
6492 +- goto efault;
6493 +- if (__put_user(0, dirent->d_name + namlen))
6494 ++ if (dirent && signal_pending(current))
6495 ++ return -EINTR;
6496 ++
6497 ++ /*
6498 ++ * Note! This range-checks 'previous' (which may be NULL).
6499 ++ * The real range was checked in getdents
6500 ++ */
6501 ++ if (!user_access_begin(dirent, sizeof(*dirent)))
6502 + goto efault;
6503 ++ if (dirent)
6504 ++ unsafe_put_user(offset, &dirent->d_off, efault_end);
6505 ++ dirent = buf->current_dir;
6506 ++ unsafe_put_user(ino, &dirent->d_ino, efault_end);
6507 ++ unsafe_put_user(reclen, &dirent->d_reclen, efault_end);
6508 ++ unsafe_put_user(d_type, &dirent->d_type, efault_end);
6509 ++ unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end);
6510 ++ user_access_end();
6511 ++
6512 + buf->previous = dirent;
6513 + dirent = (void __user *)dirent + reclen;
6514 + buf->current_dir = dirent;
6515 + buf->count -= reclen;
6516 + return 0;
6517 ++efault_end:
6518 ++ user_access_end();
6519 + efault:
6520 + buf->error = -EFAULT;
6521 + return -EFAULT;
6522 +diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
6523 +index ad24554f11f9..75f880c25bb8 100644
6524 +--- a/include/linux/micrel_phy.h
6525 ++++ b/include/linux/micrel_phy.h
6526 +@@ -31,7 +31,7 @@
6527 + #define PHY_ID_KSZ886X 0x00221430
6528 + #define PHY_ID_KSZ8863 0x00221435
6529 +
6530 +-#define PHY_ID_KSZ8795 0x00221550
6531 ++#define PHY_ID_KSZ87XX 0x00221550
6532 +
6533 + #define PHY_ID_KSZ9477 0x00221631
6534 +
6535 +diff --git a/include/linux/mii.h b/include/linux/mii.h
6536 +index 5cd824c1c0ca..4ce8901a1af6 100644
6537 +--- a/include/linux/mii.h
6538 ++++ b/include/linux/mii.h
6539 +@@ -455,6 +455,15 @@ static inline void mii_lpa_mod_linkmode_lpa_t(unsigned long *lp_advertising,
6540 + lp_advertising, lpa & LPA_LPACK);
6541 + }
6542 +
6543 ++static inline void mii_ctrl1000_mod_linkmode_adv_t(unsigned long *advertising,
6544 ++ u32 ctrl1000)
6545 ++{
6546 ++ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, advertising,
6547 ++ ctrl1000 & ADVERTISE_1000HALF);
6548 ++ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, advertising,
6549 ++ ctrl1000 & ADVERTISE_1000FULL);
6550 ++}
6551 ++
6552 + /**
6553 + * linkmode_adv_to_lcl_adv_t
6554 + * @advertising:pointer to linkmode advertising
6555 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
6556 +index ba5583522d24..9b18d33681c2 100644
6557 +--- a/include/linux/skbuff.h
6558 ++++ b/include/linux/skbuff.h
6559 +@@ -3465,8 +3465,9 @@ int skb_ensure_writable(struct sk_buff *skb, int write_len);
6560 + int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
6561 + int skb_vlan_pop(struct sk_buff *skb);
6562 + int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
6563 +-int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto);
6564 +-int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto);
6565 ++int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
6566 ++ int mac_len);
6567 ++int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len);
6568 + int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
6569 + int skb_mpls_dec_ttl(struct sk_buff *skb);
6570 + struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
6571 +diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
6572 +index 34a038563d97..d38051dd414f 100644
6573 +--- a/include/linux/uaccess.h
6574 ++++ b/include/linux/uaccess.h
6575 +@@ -284,8 +284,10 @@ extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count);
6576 + #ifndef user_access_begin
6577 + #define user_access_begin(ptr,len) access_ok(ptr, len)
6578 + #define user_access_end() do { } while (0)
6579 +-#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
6580 +-#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
6581 ++#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
6582 ++#define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
6583 ++#define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
6584 ++#define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
6585 + static inline unsigned long user_access_save(void) { return 0UL; }
6586 + static inline void user_access_restore(unsigned long flags) { }
6587 + #endif
6588 +diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
6589 +index 3810b340551c..6bd5ed695a5e 100644
6590 +--- a/include/scsi/scsi_eh.h
6591 ++++ b/include/scsi/scsi_eh.h
6592 +@@ -32,6 +32,7 @@ extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
6593 + struct scsi_eh_save {
6594 + /* saved state */
6595 + int result;
6596 ++ unsigned int resid_len;
6597 + int eh_eflags;
6598 + enum dma_data_direction data_direction;
6599 + unsigned underflow;
6600 +diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
6601 +index 2f6a669408bb..e83dee3212bd 100644
6602 +--- a/include/trace/events/btrfs.h
6603 ++++ b/include/trace/events/btrfs.h
6604 +@@ -1687,6 +1687,7 @@ TRACE_EVENT(qgroup_update_reserve,
6605 + __entry->qgid = qgroup->qgroupid;
6606 + __entry->cur_reserved = qgroup->rsv.values[type];
6607 + __entry->diff = diff;
6608 ++ __entry->type = type;
6609 + ),
6610 +
6611 + TP_printk_btrfs("qgid=%llu type=%s cur_reserved=%llu diff=%lld",
6612 +@@ -1709,6 +1710,7 @@ TRACE_EVENT(qgroup_meta_reserve,
6613 + TP_fast_assign_btrfs(root->fs_info,
6614 + __entry->refroot = root->root_key.objectid;
6615 + __entry->diff = diff;
6616 ++ __entry->type = type;
6617 + ),
6618 +
6619 + TP_printk_btrfs("refroot=%llu(%s) type=%s diff=%lld",
6620 +@@ -1725,7 +1727,6 @@ TRACE_EVENT(qgroup_meta_convert,
6621 + TP_STRUCT__entry_btrfs(
6622 + __field( u64, refroot )
6623 + __field( s64, diff )
6624 +- __field( int, type )
6625 + ),
6626 +
6627 + TP_fast_assign_btrfs(root->fs_info,
6628 +diff --git a/include/uapi/linux/nvme_ioctl.h b/include/uapi/linux/nvme_ioctl.h
6629 +index 1c215ea1798e..e168dc59e9a0 100644
6630 +--- a/include/uapi/linux/nvme_ioctl.h
6631 ++++ b/include/uapi/linux/nvme_ioctl.h
6632 +@@ -45,6 +45,27 @@ struct nvme_passthru_cmd {
6633 + __u32 result;
6634 + };
6635 +
6636 ++struct nvme_passthru_cmd64 {
6637 ++ __u8 opcode;
6638 ++ __u8 flags;
6639 ++ __u16 rsvd1;
6640 ++ __u32 nsid;
6641 ++ __u32 cdw2;
6642 ++ __u32 cdw3;
6643 ++ __u64 metadata;
6644 ++ __u64 addr;
6645 ++ __u32 metadata_len;
6646 ++ __u32 data_len;
6647 ++ __u32 cdw10;
6648 ++ __u32 cdw11;
6649 ++ __u32 cdw12;
6650 ++ __u32 cdw13;
6651 ++ __u32 cdw14;
6652 ++ __u32 cdw15;
6653 ++ __u32 timeout_ms;
6654 ++ __u64 result;
6655 ++};
6656 ++
6657 + #define nvme_admin_cmd nvme_passthru_cmd
6658 +
6659 + #define NVME_IOCTL_ID _IO('N', 0x40)
6660 +@@ -54,5 +75,7 @@ struct nvme_passthru_cmd {
6661 + #define NVME_IOCTL_RESET _IO('N', 0x44)
6662 + #define NVME_IOCTL_SUBSYS_RESET _IO('N', 0x45)
6663 + #define NVME_IOCTL_RESCAN _IO('N', 0x46)
6664 ++#define NVME_IOCTL_ADMIN64_CMD _IOWR('N', 0x47, struct nvme_passthru_cmd64)
6665 ++#define NVME_IOCTL_IO64_CMD _IOWR('N', 0x48, struct nvme_passthru_cmd64)
6666 +
6667 + #endif /* _UAPI_LINUX_NVME_IOCTL_H */
6668 +diff --git a/kernel/events/core.c b/kernel/events/core.c
6669 +index 0463c1151bae..a2a50b668ef3 100644
6670 +--- a/kernel/events/core.c
6671 ++++ b/kernel/events/core.c
6672 +@@ -6839,7 +6839,7 @@ static void __perf_event_output_stop(struct perf_event *event, void *data)
6673 + static int __perf_pmu_output_stop(void *info)
6674 + {
6675 + struct perf_event *event = info;
6676 +- struct pmu *pmu = event->pmu;
6677 ++ struct pmu *pmu = event->ctx->pmu;
6678 + struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
6679 + struct remote_output ro = {
6680 + .rb = event->rb,
6681 +diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
6682 +index 0892e38ed6fb..a9dfa04ffa44 100644
6683 +--- a/kernel/trace/trace_event_perf.c
6684 ++++ b/kernel/trace/trace_event_perf.c
6685 +@@ -272,9 +272,11 @@ int perf_kprobe_init(struct perf_event *p_event, bool is_retprobe)
6686 + goto out;
6687 + }
6688 +
6689 ++ mutex_lock(&event_mutex);
6690 + ret = perf_trace_event_init(tp_event, p_event);
6691 + if (ret)
6692 + destroy_local_trace_kprobe(tp_event);
6693 ++ mutex_unlock(&event_mutex);
6694 + out:
6695 + kfree(func);
6696 + return ret;
6697 +@@ -282,8 +284,10 @@ out:
6698 +
6699 + void perf_kprobe_destroy(struct perf_event *p_event)
6700 + {
6701 ++ mutex_lock(&event_mutex);
6702 + perf_trace_event_close(p_event);
6703 + perf_trace_event_unreg(p_event);
6704 ++ mutex_unlock(&event_mutex);
6705 +
6706 + destroy_local_trace_kprobe(p_event->tp_event);
6707 + }
6708 +diff --git a/lib/textsearch.c b/lib/textsearch.c
6709 +index 4f16eec5d554..f68dea8806be 100644
6710 +--- a/lib/textsearch.c
6711 ++++ b/lib/textsearch.c
6712 +@@ -89,9 +89,9 @@
6713 + * goto errout;
6714 + * }
6715 + *
6716 +- * pos = textsearch_find_continuous(conf, \&state, example, strlen(example));
6717 ++ * pos = textsearch_find_continuous(conf, &state, example, strlen(example));
6718 + * if (pos != UINT_MAX)
6719 +- * panic("Oh my god, dancing chickens at \%d\n", pos);
6720 ++ * panic("Oh my god, dancing chickens at %d\n", pos);
6721 + *
6722 + * textsearch_destroy(conf);
6723 + */
6724 +diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
6725 +index e630e7ff57f1..45f57fd2db64 100644
6726 +--- a/lib/vdso/gettimeofday.c
6727 ++++ b/lib/vdso/gettimeofday.c
6728 +@@ -214,9 +214,10 @@ int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res)
6729 + return -1;
6730 + }
6731 +
6732 +- res->tv_sec = 0;
6733 +- res->tv_nsec = ns;
6734 +-
6735 ++ if (likely(res)) {
6736 ++ res->tv_sec = 0;
6737 ++ res->tv_nsec = ns;
6738 ++ }
6739 + return 0;
6740 + }
6741 +
6742 +@@ -245,7 +246,7 @@ __cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
6743 + ret = clock_getres_fallback(clock, &ts);
6744 + #endif
6745 +
6746 +- if (likely(!ret)) {
6747 ++ if (likely(!ret && res)) {
6748 + res->tv_sec = ts.tv_sec;
6749 + res->tv_nsec = ts.tv_nsec;
6750 + }
6751 +diff --git a/mm/compaction.c b/mm/compaction.c
6752 +index 1e994920e6ff..5ab9c2b22693 100644
6753 +--- a/mm/compaction.c
6754 ++++ b/mm/compaction.c
6755 +@@ -270,14 +270,15 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
6756 +
6757 + /* Ensure the start of the pageblock or zone is online and valid */
6758 + block_pfn = pageblock_start_pfn(pfn);
6759 +- block_page = pfn_to_online_page(max(block_pfn, zone->zone_start_pfn));
6760 ++ block_pfn = max(block_pfn, zone->zone_start_pfn);
6761 ++ block_page = pfn_to_online_page(block_pfn);
6762 + if (block_page) {
6763 + page = block_page;
6764 + pfn = block_pfn;
6765 + }
6766 +
6767 + /* Ensure the end of the pageblock or zone is online and valid */
6768 +- block_pfn += pageblock_nr_pages;
6769 ++ block_pfn = pageblock_end_pfn(pfn) - 1;
6770 + block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
6771 + end_page = pfn_to_online_page(block_pfn);
6772 + if (!end_page)
6773 +@@ -303,7 +304,7 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
6774 +
6775 + page += (1 << PAGE_ALLOC_COSTLY_ORDER);
6776 + pfn += (1 << PAGE_ALLOC_COSTLY_ORDER);
6777 +- } while (page < end_page);
6778 ++ } while (page <= end_page);
6779 +
6780 + return false;
6781 + }
6782 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
6783 +index 6d7296dd11b8..843ee2f8d356 100644
6784 +--- a/mm/hugetlb.c
6785 ++++ b/mm/hugetlb.c
6786 +@@ -1084,11 +1084,10 @@ static bool pfn_range_valid_gigantic(struct zone *z,
6787 + struct page *page;
6788 +
6789 + for (i = start_pfn; i < end_pfn; i++) {
6790 +- if (!pfn_valid(i))
6791 ++ page = pfn_to_online_page(i);
6792 ++ if (!page)
6793 + return false;
6794 +
6795 +- page = pfn_to_page(i);
6796 +-
6797 + if (page_zone(page) != z)
6798 + return false;
6799 +
6800 +diff --git a/mm/memblock.c b/mm/memblock.c
6801 +index 7d4f61ae666a..c4b16cae2bc9 100644
6802 +--- a/mm/memblock.c
6803 ++++ b/mm/memblock.c
6804 +@@ -1356,9 +1356,6 @@ static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
6805 + align = SMP_CACHE_BYTES;
6806 + }
6807 +
6808 +- if (end > memblock.current_limit)
6809 +- end = memblock.current_limit;
6810 +-
6811 + again:
6812 + found = memblock_find_in_range_node(size, align, start, end, nid,
6813 + flags);
6814 +@@ -1469,6 +1466,9 @@ static void * __init memblock_alloc_internal(
6815 + if (WARN_ON_ONCE(slab_is_available()))
6816 + return kzalloc_node(size, GFP_NOWAIT, nid);
6817 +
6818 ++ if (max_addr > memblock.current_limit)
6819 ++ max_addr = memblock.current_limit;
6820 ++
6821 + alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid);
6822 +
6823 + /* retry allocation without lower limit */
6824 +diff --git a/mm/memory-failure.c b/mm/memory-failure.c
6825 +index 7ef849da8278..3151c87dff73 100644
6826 +--- a/mm/memory-failure.c
6827 ++++ b/mm/memory-failure.c
6828 +@@ -199,7 +199,6 @@ struct to_kill {
6829 + struct task_struct *tsk;
6830 + unsigned long addr;
6831 + short size_shift;
6832 +- char addr_valid;
6833 + };
6834 +
6835 + /*
6836 +@@ -324,22 +323,27 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
6837 + }
6838 + }
6839 + tk->addr = page_address_in_vma(p, vma);
6840 +- tk->addr_valid = 1;
6841 + if (is_zone_device_page(p))
6842 + tk->size_shift = dev_pagemap_mapping_shift(p, vma);
6843 + else
6844 + tk->size_shift = compound_order(compound_head(p)) + PAGE_SHIFT;
6845 +
6846 + /*
6847 +- * In theory we don't have to kill when the page was
6848 +- * munmaped. But it could be also a mremap. Since that's
6849 +- * likely very rare kill anyways just out of paranoia, but use
6850 +- * a SIGKILL because the error is not contained anymore.
6851 ++ * Send SIGKILL if "tk->addr == -EFAULT". Also, as
6852 ++ * "tk->size_shift" is always non-zero for !is_zone_device_page(),
6853 ++ * so "tk->size_shift == 0" effectively checks no mapping on
6854 ++ * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times
6855 ++ * to a process' address space, it's possible not all N VMAs
6856 ++ * contain mappings for the page, but at least one VMA does.
6857 ++ * Only deliver SIGBUS with payload derived from the VMA that
6858 ++ * has a mapping for the page.
6859 + */
6860 +- if (tk->addr == -EFAULT || tk->size_shift == 0) {
6861 ++ if (tk->addr == -EFAULT) {
6862 + pr_info("Memory failure: Unable to find user space address %lx in %s\n",
6863 + page_to_pfn(p), tsk->comm);
6864 +- tk->addr_valid = 0;
6865 ++ } else if (tk->size_shift == 0) {
6866 ++ kfree(tk);
6867 ++ return;
6868 + }
6869 + get_task_struct(tsk);
6870 + tk->tsk = tsk;
6871 +@@ -366,7 +370,7 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
6872 + * make sure the process doesn't catch the
6873 + * signal and then access the memory. Just kill it.
6874 + */
6875 +- if (fail || tk->addr_valid == 0) {
6876 ++ if (fail || tk->addr == -EFAULT) {
6877 + pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
6878 + pfn, tk->tsk->comm, tk->tsk->pid);
6879 + do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
6880 +@@ -1253,17 +1257,19 @@ int memory_failure(unsigned long pfn, int flags)
6881 + if (!sysctl_memory_failure_recovery)
6882 + panic("Memory failure on page %lx", pfn);
6883 +
6884 +- if (!pfn_valid(pfn)) {
6885 ++ p = pfn_to_online_page(pfn);
6886 ++ if (!p) {
6887 ++ if (pfn_valid(pfn)) {
6888 ++ pgmap = get_dev_pagemap(pfn, NULL);
6889 ++ if (pgmap)
6890 ++ return memory_failure_dev_pagemap(pfn, flags,
6891 ++ pgmap);
6892 ++ }
6893 + pr_err("Memory failure: %#lx: memory outside kernel control\n",
6894 + pfn);
6895 + return -ENXIO;
6896 + }
6897 +
6898 +- pgmap = get_dev_pagemap(pfn, NULL);
6899 +- if (pgmap)
6900 +- return memory_failure_dev_pagemap(pfn, flags, pgmap);
6901 +-
6902 +- p = pfn_to_page(pfn);
6903 + if (PageHuge(p))
6904 + return memory_failure_hugetlb(pfn, flags);
6905 + if (TestSetPageHWPoison(p)) {
6906 +diff --git a/mm/memremap.c b/mm/memremap.c
6907 +index ed70c4e8e52a..31f1b2953c64 100644
6908 +--- a/mm/memremap.c
6909 ++++ b/mm/memremap.c
6910 +@@ -104,6 +104,7 @@ static void devm_memremap_pages_release(void *data)
6911 + struct dev_pagemap *pgmap = data;
6912 + struct device *dev = pgmap->dev;
6913 + struct resource *res = &pgmap->res;
6914 ++ struct page *first_page;
6915 + unsigned long pfn;
6916 + int nid;
6917 +
6918 +@@ -112,14 +113,16 @@ static void devm_memremap_pages_release(void *data)
6919 + put_page(pfn_to_page(pfn));
6920 + dev_pagemap_cleanup(pgmap);
6921 +
6922 ++ /* make sure to access a memmap that was actually initialized */
6923 ++ first_page = pfn_to_page(pfn_first(pgmap));
6924 ++
6925 + /* pages are dead and unused, undo the arch mapping */
6926 +- nid = page_to_nid(pfn_to_page(PHYS_PFN(res->start)));
6927 ++ nid = page_to_nid(first_page);
6928 +
6929 + mem_hotplug_begin();
6930 + if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
6931 +- pfn = PHYS_PFN(res->start);
6932 +- __remove_pages(page_zone(pfn_to_page(pfn)), pfn,
6933 +- PHYS_PFN(resource_size(res)), NULL);
6934 ++ __remove_pages(page_zone(first_page), PHYS_PFN(res->start),
6935 ++ PHYS_PFN(resource_size(res)), NULL);
6936 + } else {
6937 + arch_remove_memory(nid, res->start, resource_size(res),
6938 + pgmap_altmap(pgmap));
6939 +diff --git a/mm/page_owner.c b/mm/page_owner.c
6940 +index addcbb2ae4e4..8088ab29bc2d 100644
6941 +--- a/mm/page_owner.c
6942 ++++ b/mm/page_owner.c
6943 +@@ -258,7 +258,8 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
6944 + * not matter as the mixed block count will still be correct
6945 + */
6946 + for (; pfn < end_pfn; ) {
6947 +- if (!pfn_valid(pfn)) {
6948 ++ page = pfn_to_online_page(pfn);
6949 ++ if (!page) {
6950 + pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
6951 + continue;
6952 + }
6953 +@@ -266,13 +267,13 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
6954 + block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
6955 + block_end_pfn = min(block_end_pfn, end_pfn);
6956 +
6957 +- page = pfn_to_page(pfn);
6958 + pageblock_mt = get_pageblock_migratetype(page);
6959 +
6960 + for (; pfn < block_end_pfn; pfn++) {
6961 + if (!pfn_valid_within(pfn))
6962 + continue;
6963 +
6964 ++ /* The pageblock is online, no need to recheck. */
6965 + page = pfn_to_page(pfn);
6966 +
6967 + if (page_zone(page) != zone)
6968 +diff --git a/mm/slab_common.c b/mm/slab_common.c
6969 +index 807490fe217a..7f492e53a7db 100644
6970 +--- a/mm/slab_common.c
6971 ++++ b/mm/slab_common.c
6972 +@@ -178,10 +178,13 @@ static int init_memcg_params(struct kmem_cache *s,
6973 +
6974 + static void destroy_memcg_params(struct kmem_cache *s)
6975 + {
6976 +- if (is_root_cache(s))
6977 ++ if (is_root_cache(s)) {
6978 + kvfree(rcu_access_pointer(s->memcg_params.memcg_caches));
6979 +- else
6980 ++ } else {
6981 ++ mem_cgroup_put(s->memcg_params.memcg);
6982 ++ WRITE_ONCE(s->memcg_params.memcg, NULL);
6983 + percpu_ref_exit(&s->memcg_params.refcnt);
6984 ++ }
6985 + }
6986 +
6987 + static void free_memcg_params(struct rcu_head *rcu)
6988 +@@ -253,8 +256,6 @@ static void memcg_unlink_cache(struct kmem_cache *s)
6989 + } else {
6990 + list_del(&s->memcg_params.children_node);
6991 + list_del(&s->memcg_params.kmem_caches_node);
6992 +- mem_cgroup_put(s->memcg_params.memcg);
6993 +- WRITE_ONCE(s->memcg_params.memcg, NULL);
6994 + }
6995 + }
6996 + #else
6997 +diff --git a/mm/slub.c b/mm/slub.c
6998 +index 8834563cdb4b..dac41cf0b94a 100644
6999 +--- a/mm/slub.c
7000 ++++ b/mm/slub.c
7001 +@@ -4836,7 +4836,17 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
7002 + }
7003 + }
7004 +
7005 +- get_online_mems();
7006 ++ /*
7007 ++ * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
7008 ++ * already held which will conflict with an existing lock order:
7009 ++ *
7010 ++ * mem_hotplug_lock->slab_mutex->kernfs_mutex
7011 ++ *
7012 ++ * We don't really need mem_hotplug_lock (to hold off
7013 ++ * slab_mem_going_offline_callback) here because slab's memory hot
7014 ++ * unplug code doesn't destroy the kmem_cache->node[] data.
7015 ++ */
7016 ++
7017 + #ifdef CONFIG_SLUB_DEBUG
7018 + if (flags & SO_ALL) {
7019 + struct kmem_cache_node *n;
7020 +@@ -4877,7 +4887,6 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
7021 + x += sprintf(buf + x, " N%d=%lu",
7022 + node, nodes[node]);
7023 + #endif
7024 +- put_online_mems();
7025 + kfree(nodes);
7026 + return x + sprintf(buf + x, "\n");
7027 + }
7028 +diff --git a/mm/vmscan.c b/mm/vmscan.c
7029 +index a6c5d0b28321..8d03013b6c59 100644
7030 +--- a/mm/vmscan.c
7031 ++++ b/mm/vmscan.c
7032 +@@ -354,12 +354,13 @@ unsigned long zone_reclaimable_pages(struct zone *zone)
7033 + */
7034 + unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx)
7035 + {
7036 +- unsigned long lru_size;
7037 ++ unsigned long lru_size = 0;
7038 + int zid;
7039 +
7040 +- if (!mem_cgroup_disabled())
7041 +- lru_size = lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
7042 +- else
7043 ++ if (!mem_cgroup_disabled()) {
7044 ++ for (zid = 0; zid < MAX_NR_ZONES; zid++)
7045 ++ lru_size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
7046 ++ } else
7047 + lru_size = node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru);
7048 +
7049 + for (zid = zone_idx + 1; zid < MAX_NR_ZONES; zid++) {
7050 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
7051 +index 982d8d12830e..d4a47c44daf0 100644
7052 +--- a/net/core/skbuff.c
7053 ++++ b/net/core/skbuff.c
7054 +@@ -5465,12 +5465,14 @@ static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr,
7055 + * @skb: buffer
7056 + * @mpls_lse: MPLS label stack entry to push
7057 + * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848)
7058 ++ * @mac_len: length of the MAC header
7059 + *
7060 + * Expects skb->data at mac header.
7061 + *
7062 + * Returns 0 on success, -errno otherwise.
7063 + */
7064 +-int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto)
7065 ++int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
7066 ++ int mac_len)
7067 + {
7068 + struct mpls_shim_hdr *lse;
7069 + int err;
7070 +@@ -5487,15 +5489,15 @@ int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto)
7071 + return err;
7072 +
7073 + if (!skb->inner_protocol) {
7074 +- skb_set_inner_network_header(skb, skb->mac_len);
7075 ++ skb_set_inner_network_header(skb, mac_len);
7076 + skb_set_inner_protocol(skb, skb->protocol);
7077 + }
7078 +
7079 + skb_push(skb, MPLS_HLEN);
7080 + memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
7081 +- skb->mac_len);
7082 ++ mac_len);
7083 + skb_reset_mac_header(skb);
7084 +- skb_set_network_header(skb, skb->mac_len);
7085 ++ skb_set_network_header(skb, mac_len);
7086 +
7087 + lse = mpls_hdr(skb);
7088 + lse->label_stack_entry = mpls_lse;
7089 +@@ -5514,29 +5516,30 @@ EXPORT_SYMBOL_GPL(skb_mpls_push);
7090 + *
7091 + * @skb: buffer
7092 + * @next_proto: ethertype of header after popped MPLS header
7093 ++ * @mac_len: length of the MAC header
7094 + *
7095 + * Expects skb->data at mac header.
7096 + *
7097 + * Returns 0 on success, -errno otherwise.
7098 + */
7099 +-int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto)
7100 ++int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len)
7101 + {
7102 + int err;
7103 +
7104 + if (unlikely(!eth_p_mpls(skb->protocol)))
7105 +- return -EINVAL;
7106 ++ return 0;
7107 +
7108 +- err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
7109 ++ err = skb_ensure_writable(skb, mac_len + MPLS_HLEN);
7110 + if (unlikely(err))
7111 + return err;
7112 +
7113 + skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
7114 + memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
7115 +- skb->mac_len);
7116 ++ mac_len);
7117 +
7118 + __skb_pull(skb, MPLS_HLEN);
7119 + skb_reset_mac_header(skb);
7120 +- skb_set_network_header(skb, skb->mac_len);
7121 ++ skb_set_network_header(skb, mac_len);
7122 +
7123 + if (skb->dev && skb->dev->type == ARPHRD_ETHER) {
7124 + struct ethhdr *hdr;
7125 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
7126 +index 14654876127e..621f83434b24 100644
7127 +--- a/net/ipv4/route.c
7128 ++++ b/net/ipv4/route.c
7129 +@@ -1482,7 +1482,7 @@ static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt)
7130 + prev = cmpxchg(p, orig, rt);
7131 + if (prev == orig) {
7132 + if (orig) {
7133 +- dst_dev_put(&orig->dst);
7134 ++ rt_add_uncached_list(orig);
7135 + dst_release(&orig->dst);
7136 + }
7137 + } else {
7138 +@@ -2470,14 +2470,17 @@ struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
7139 + int orig_oif = fl4->flowi4_oif;
7140 + unsigned int flags = 0;
7141 + struct rtable *rth;
7142 +- int err = -ENETUNREACH;
7143 ++ int err;
7144 +
7145 + if (fl4->saddr) {
7146 +- rth = ERR_PTR(-EINVAL);
7147 + if (ipv4_is_multicast(fl4->saddr) ||
7148 + ipv4_is_lbcast(fl4->saddr) ||
7149 +- ipv4_is_zeronet(fl4->saddr))
7150 ++ ipv4_is_zeronet(fl4->saddr)) {
7151 ++ rth = ERR_PTR(-EINVAL);
7152 + goto out;
7153 ++ }
7154 ++
7155 ++ rth = ERR_PTR(-ENETUNREACH);
7156 +
7157 + /* I removed check for oif == dev_out->oif here.
7158 + It was wrong for two reasons:
7159 +diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
7160 +index a593aaf25748..2bb0b66181a7 100644
7161 +--- a/net/ipv6/ip6_input.c
7162 ++++ b/net/ipv6/ip6_input.c
7163 +@@ -80,8 +80,10 @@ static void ip6_sublist_rcv_finish(struct list_head *head)
7164 + {
7165 + struct sk_buff *skb, *next;
7166 +
7167 +- list_for_each_entry_safe(skb, next, head, list)
7168 ++ list_for_each_entry_safe(skb, next, head, list) {
7169 ++ skb_list_del_init(skb);
7170 + dst_input(skb);
7171 ++ }
7172 + }
7173 +
7174 + static void ip6_list_rcv_finish(struct net *net, struct sock *sk,
7175 +diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
7176 +index b1438fd4d876..64b544ae9966 100644
7177 +--- a/net/mac80211/debugfs_netdev.c
7178 ++++ b/net/mac80211/debugfs_netdev.c
7179 +@@ -487,9 +487,14 @@ static ssize_t ieee80211_if_fmt_aqm(
7180 + const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
7181 + {
7182 + struct ieee80211_local *local = sdata->local;
7183 +- struct txq_info *txqi = to_txq_info(sdata->vif.txq);
7184 ++ struct txq_info *txqi;
7185 + int len;
7186 +
7187 ++ if (!sdata->vif.txq)
7188 ++ return 0;
7189 ++
7190 ++ txqi = to_txq_info(sdata->vif.txq);
7191 ++
7192 + spin_lock_bh(&local->fq.lock);
7193 + rcu_read_lock();
7194 +
7195 +@@ -658,7 +663,9 @@ static void add_common_files(struct ieee80211_sub_if_data *sdata)
7196 + DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_5ghz);
7197 + DEBUGFS_ADD(hw_queues);
7198 +
7199 +- if (sdata->local->ops->wake_tx_queue)
7200 ++ if (sdata->local->ops->wake_tx_queue &&
7201 ++ sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
7202 ++ sdata->vif.type != NL80211_IFTYPE_NAN)
7203 + DEBUGFS_ADD(aqm);
7204 + }
7205 +
7206 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
7207 +index 4c888dc9bd81..a826f9ccc03f 100644
7208 +--- a/net/mac80211/mlme.c
7209 ++++ b/net/mac80211/mlme.c
7210 +@@ -2629,7 +2629,8 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
7211 +
7212 + rcu_read_lock();
7213 + ssid = ieee80211_bss_get_ie(cbss, WLAN_EID_SSID);
7214 +- if (WARN_ON_ONCE(ssid == NULL))
7215 ++ if (WARN_ONCE(!ssid || ssid[1] > IEEE80211_MAX_SSID_LEN,
7216 ++ "invalid SSID element (len=%d)", ssid ? ssid[1] : -1))
7217 + ssid_len = 0;
7218 + else
7219 + ssid_len = ssid[1];
7220 +@@ -5227,7 +5228,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
7221 +
7222 + rcu_read_lock();
7223 + ssidie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
7224 +- if (!ssidie) {
7225 ++ if (!ssidie || ssidie[1] > sizeof(assoc_data->ssid)) {
7226 + rcu_read_unlock();
7227 + kfree(assoc_data);
7228 + return -EINVAL;
7229 +diff --git a/net/netfilter/nft_connlimit.c b/net/netfilter/nft_connlimit.c
7230 +index af1497ab9464..69d6173f91e2 100644
7231 +--- a/net/netfilter/nft_connlimit.c
7232 ++++ b/net/netfilter/nft_connlimit.c
7233 +@@ -218,8 +218,13 @@ static void nft_connlimit_destroy_clone(const struct nft_ctx *ctx,
7234 + static bool nft_connlimit_gc(struct net *net, const struct nft_expr *expr)
7235 + {
7236 + struct nft_connlimit *priv = nft_expr_priv(expr);
7237 ++ bool ret;
7238 +
7239 +- return nf_conncount_gc_list(net, &priv->list);
7240 ++ local_bh_disable();
7241 ++ ret = nf_conncount_gc_list(net, &priv->list);
7242 ++ local_bh_enable();
7243 ++
7244 ++ return ret;
7245 + }
7246 +
7247 + static struct nft_expr_type nft_connlimit_type;
7248 +diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
7249 +index 3572e11b6f21..1c77f520f474 100644
7250 +--- a/net/openvswitch/actions.c
7251 ++++ b/net/openvswitch/actions.c
7252 +@@ -165,7 +165,8 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
7253 + {
7254 + int err;
7255 +
7256 +- err = skb_mpls_push(skb, mpls->mpls_lse, mpls->mpls_ethertype);
7257 ++ err = skb_mpls_push(skb, mpls->mpls_lse, mpls->mpls_ethertype,
7258 ++ skb->mac_len);
7259 + if (err)
7260 + return err;
7261 +
7262 +@@ -178,7 +179,7 @@ static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
7263 + {
7264 + int err;
7265 +
7266 +- err = skb_mpls_pop(skb, ethertype);
7267 ++ err = skb_mpls_pop(skb, ethertype, skb->mac_len);
7268 + if (err)
7269 + return err;
7270 +
7271 +diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
7272 +index c97ebdc043e4..48f67a9b1037 100644
7273 +--- a/net/rxrpc/peer_event.c
7274 ++++ b/net/rxrpc/peer_event.c
7275 +@@ -147,10 +147,16 @@ void rxrpc_error_report(struct sock *sk)
7276 + {
7277 + struct sock_exterr_skb *serr;
7278 + struct sockaddr_rxrpc srx;
7279 +- struct rxrpc_local *local = sk->sk_user_data;
7280 ++ struct rxrpc_local *local;
7281 + struct rxrpc_peer *peer;
7282 + struct sk_buff *skb;
7283 +
7284 ++ rcu_read_lock();
7285 ++ local = rcu_dereference_sk_user_data(sk);
7286 ++ if (unlikely(!local)) {
7287 ++ rcu_read_unlock();
7288 ++ return;
7289 ++ }
7290 + _enter("%p{%d}", sk, local->debug_id);
7291 +
7292 + /* Clear the outstanding error value on the socket so that it doesn't
7293 +@@ -160,6 +166,7 @@ void rxrpc_error_report(struct sock *sk)
7294 +
7295 + skb = sock_dequeue_err_skb(sk);
7296 + if (!skb) {
7297 ++ rcu_read_unlock();
7298 + _leave("UDP socket errqueue empty");
7299 + return;
7300 + }
7301 +@@ -167,11 +174,11 @@ void rxrpc_error_report(struct sock *sk)
7302 + serr = SKB_EXT_ERR(skb);
7303 + if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
7304 + _leave("UDP empty message");
7305 ++ rcu_read_unlock();
7306 + rxrpc_free_skb(skb, rxrpc_skb_freed);
7307 + return;
7308 + }
7309 +
7310 +- rcu_read_lock();
7311 + peer = rxrpc_lookup_peer_icmp_rcu(local, skb, &srx);
7312 + if (peer && !rxrpc_get_peer_maybe(peer))
7313 + peer = NULL;
7314 +diff --git a/net/sched/act_api.c b/net/sched/act_api.c
7315 +index 2558f00f6b3e..69d4676a402f 100644
7316 +--- a/net/sched/act_api.c
7317 ++++ b/net/sched/act_api.c
7318 +@@ -832,8 +832,7 @@ static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
7319 + }
7320 +
7321 + static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
7322 +- [TCA_ACT_KIND] = { .type = NLA_NUL_STRING,
7323 +- .len = IFNAMSIZ - 1 },
7324 ++ [TCA_ACT_KIND] = { .type = NLA_STRING },
7325 + [TCA_ACT_INDEX] = { .type = NLA_U32 },
7326 + [TCA_ACT_COOKIE] = { .type = NLA_BINARY,
7327 + .len = TC_COOKIE_MAX_SIZE },
7328 +@@ -865,8 +864,10 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
7329 + NL_SET_ERR_MSG(extack, "TC action kind must be specified");
7330 + goto err_out;
7331 + }
7332 +- nla_strlcpy(act_name, kind, IFNAMSIZ);
7333 +-
7334 ++ if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) {
7335 ++ NL_SET_ERR_MSG(extack, "TC action name too long");
7336 ++ goto err_out;
7337 ++ }
7338 + if (tb[TCA_ACT_COOKIE]) {
7339 + cookie = nla_memdup_cookie(tb);
7340 + if (!cookie) {
7341 +@@ -1352,11 +1353,16 @@ static int tcf_action_add(struct net *net, struct nlattr *nla,
7342 + struct netlink_ext_ack *extack)
7343 + {
7344 + size_t attr_size = 0;
7345 +- int ret = 0;
7346 ++ int loop, ret;
7347 + struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
7348 +
7349 +- ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, actions,
7350 +- &attr_size, true, extack);
7351 ++ for (loop = 0; loop < 10; loop++) {
7352 ++ ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0,
7353 ++ actions, &attr_size, true, extack);
7354 ++ if (ret != -EAGAIN)
7355 ++ break;
7356 ++ }
7357 ++
7358 + if (ret < 0)
7359 + return ret;
7360 + ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
7361 +@@ -1406,11 +1412,8 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
7362 + */
7363 + if (n->nlmsg_flags & NLM_F_REPLACE)
7364 + ovr = 1;
7365 +-replay:
7366 + ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr,
7367 + extack);
7368 +- if (ret == -EAGAIN)
7369 +- goto replay;
7370 + break;
7371 + case RTM_DELACTION:
7372 + ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
7373 +diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
7374 +index e168df0e008a..4cf6c553bb0b 100644
7375 +--- a/net/sched/act_mpls.c
7376 ++++ b/net/sched/act_mpls.c
7377 +@@ -55,7 +55,7 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
7378 + struct tcf_mpls *m = to_mpls(a);
7379 + struct tcf_mpls_params *p;
7380 + __be32 new_lse;
7381 +- int ret;
7382 ++ int ret, mac_len;
7383 +
7384 + tcf_lastuse_update(&m->tcf_tm);
7385 + bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
7386 +@@ -63,8 +63,12 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
7387 + /* Ensure 'data' points at mac_header prior calling mpls manipulating
7388 + * functions.
7389 + */
7390 +- if (skb_at_tc_ingress(skb))
7391 ++ if (skb_at_tc_ingress(skb)) {
7392 + skb_push_rcsum(skb, skb->mac_len);
7393 ++ mac_len = skb->mac_len;
7394 ++ } else {
7395 ++ mac_len = skb_network_header(skb) - skb_mac_header(skb);
7396 ++ }
7397 +
7398 + ret = READ_ONCE(m->tcf_action);
7399 +
7400 +@@ -72,12 +76,12 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
7401 +
7402 + switch (p->tcfm_action) {
7403 + case TCA_MPLS_ACT_POP:
7404 +- if (skb_mpls_pop(skb, p->tcfm_proto))
7405 ++ if (skb_mpls_pop(skb, p->tcfm_proto, mac_len))
7406 + goto drop;
7407 + break;
7408 + case TCA_MPLS_ACT_PUSH:
7409 + new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb->protocol));
7410 +- if (skb_mpls_push(skb, new_lse, p->tcfm_proto))
7411 ++ if (skb_mpls_push(skb, new_lse, p->tcfm_proto, mac_len))
7412 + goto drop;
7413 + break;
7414 + case TCA_MPLS_ACT_MODIFY:
7415 +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
7416 +index 9aef93300f1c..6b12883e04b8 100644
7417 +--- a/net/sched/cls_api.c
7418 ++++ b/net/sched/cls_api.c
7419 +@@ -160,11 +160,22 @@ static inline u32 tcf_auto_prio(struct tcf_proto *tp)
7420 + return TC_H_MAJ(first);
7421 + }
7422 +
7423 ++static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
7424 ++{
7425 ++ if (kind)
7426 ++ return nla_strlcpy(name, kind, IFNAMSIZ) >= IFNAMSIZ;
7427 ++ memset(name, 0, IFNAMSIZ);
7428 ++ return false;
7429 ++}
7430 ++
7431 + static bool tcf_proto_is_unlocked(const char *kind)
7432 + {
7433 + const struct tcf_proto_ops *ops;
7434 + bool ret;
7435 +
7436 ++ if (strlen(kind) == 0)
7437 ++ return false;
7438 ++
7439 + ops = tcf_proto_lookup_ops(kind, false, NULL);
7440 + /* On error return false to take rtnl lock. Proto lookup/create
7441 + * functions will perform lookup again and properly handle errors.
7442 +@@ -1976,6 +1987,7 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
7443 + {
7444 + struct net *net = sock_net(skb->sk);
7445 + struct nlattr *tca[TCA_MAX + 1];
7446 ++ char name[IFNAMSIZ];
7447 + struct tcmsg *t;
7448 + u32 protocol;
7449 + u32 prio;
7450 +@@ -2032,13 +2044,19 @@ replay:
7451 + if (err)
7452 + return err;
7453 +
7454 ++ if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
7455 ++ NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
7456 ++ err = -EINVAL;
7457 ++ goto errout;
7458 ++ }
7459 ++
7460 + /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
7461 + * block is shared (no qdisc found), qdisc is not unlocked, classifier
7462 + * type is not specified, classifier is not unlocked.
7463 + */
7464 + if (rtnl_held ||
7465 + (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
7466 +- !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
7467 ++ !tcf_proto_is_unlocked(name)) {
7468 + rtnl_held = true;
7469 + rtnl_lock();
7470 + }
7471 +@@ -2196,6 +2214,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
7472 + {
7473 + struct net *net = sock_net(skb->sk);
7474 + struct nlattr *tca[TCA_MAX + 1];
7475 ++ char name[IFNAMSIZ];
7476 + struct tcmsg *t;
7477 + u32 protocol;
7478 + u32 prio;
7479 +@@ -2235,13 +2254,18 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
7480 + if (err)
7481 + return err;
7482 +
7483 ++ if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
7484 ++ NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
7485 ++ err = -EINVAL;
7486 ++ goto errout;
7487 ++ }
7488 + /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
7489 + * found), qdisc is not unlocked, classifier type is not specified,
7490 + * classifier is not unlocked.
7491 + */
7492 + if (!prio ||
7493 + (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
7494 +- !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
7495 ++ !tcf_proto_is_unlocked(name)) {
7496 + rtnl_held = true;
7497 + rtnl_lock();
7498 + }
7499 +@@ -2349,6 +2373,7 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
7500 + {
7501 + struct net *net = sock_net(skb->sk);
7502 + struct nlattr *tca[TCA_MAX + 1];
7503 ++ char name[IFNAMSIZ];
7504 + struct tcmsg *t;
7505 + u32 protocol;
7506 + u32 prio;
7507 +@@ -2385,12 +2410,17 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
7508 + if (err)
7509 + return err;
7510 +
7511 ++ if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
7512 ++ NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
7513 ++ err = -EINVAL;
7514 ++ goto errout;
7515 ++ }
7516 + /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
7517 + * unlocked, classifier type is not specified, classifier is not
7518 + * unlocked.
7519 + */
7520 + if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
7521 +- !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
7522 ++ !tcf_proto_is_unlocked(name)) {
7523 + rtnl_held = true;
7524 + rtnl_lock();
7525 + }
7526 +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
7527 +index 81d58b280612..1047825d9f48 100644
7528 +--- a/net/sched/sch_api.c
7529 ++++ b/net/sched/sch_api.c
7530 +@@ -1390,8 +1390,7 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
7531 + }
7532 +
7533 + const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
7534 +- [TCA_KIND] = { .type = NLA_NUL_STRING,
7535 +- .len = IFNAMSIZ - 1 },
7536 ++ [TCA_KIND] = { .type = NLA_STRING },
7537 + [TCA_RATE] = { .type = NLA_BINARY,
7538 + .len = sizeof(struct tc_estimator) },
7539 + [TCA_STAB] = { .type = NLA_NESTED },
7540 +diff --git a/net/sched/sch_etf.c b/net/sched/sch_etf.c
7541 +index cebfb65d8556..b1da5589a0c6 100644
7542 +--- a/net/sched/sch_etf.c
7543 ++++ b/net/sched/sch_etf.c
7544 +@@ -177,7 +177,7 @@ static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch,
7545 +
7546 + parent = *p;
7547 + skb = rb_to_skb(parent);
7548 +- if (ktime_after(txtime, skb->tstamp)) {
7549 ++ if (ktime_compare(txtime, skb->tstamp) >= 0) {
7550 + p = &parent->rb_right;
7551 + leftmost = false;
7552 + } else {
7553 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
7554 +index b083d4e66230..8fd7b0e6ce9f 100644
7555 +--- a/net/sctp/socket.c
7556 ++++ b/net/sctp/socket.c
7557 +@@ -9353,7 +9353,7 @@ struct proto sctp_prot = {
7558 + .backlog_rcv = sctp_backlog_rcv,
7559 + .hash = sctp_hash,
7560 + .unhash = sctp_unhash,
7561 +- .get_port = sctp_get_port,
7562 ++ .no_autobind = true,
7563 + .obj_size = sizeof(struct sctp_sock),
7564 + .useroffset = offsetof(struct sctp_sock, subscribe),
7565 + .usersize = offsetof(struct sctp_sock, initmsg) -
7566 +@@ -9395,7 +9395,7 @@ struct proto sctpv6_prot = {
7567 + .backlog_rcv = sctp_backlog_rcv,
7568 + .hash = sctp_hash,
7569 + .unhash = sctp_unhash,
7570 +- .get_port = sctp_get_port,
7571 ++ .no_autobind = true,
7572 + .obj_size = sizeof(struct sctp6_sock),
7573 + .useroffset = offsetof(struct sctp6_sock, sctp.subscribe),
7574 + .usersize = offsetof(struct sctp6_sock, sctp.initmsg) -
7575 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
7576 +index f03459ddc840..c2ce582ea143 100644
7577 +--- a/net/wireless/nl80211.c
7578 ++++ b/net/wireless/nl80211.c
7579 +@@ -6184,6 +6184,9 @@ static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info)
7580 + if (!rdev->ops->del_mpath)
7581 + return -EOPNOTSUPP;
7582 +
7583 ++ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
7584 ++ return -EOPNOTSUPP;
7585 ++
7586 + return rdev_del_mpath(rdev, dev, dst);
7587 + }
7588 +
7589 +diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
7590 +index c67d7a82ab13..73fd0eae08ca 100644
7591 +--- a/net/wireless/wext-sme.c
7592 ++++ b/net/wireless/wext-sme.c
7593 +@@ -202,6 +202,7 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
7594 + struct iw_point *data, char *ssid)
7595 + {
7596 + struct wireless_dev *wdev = dev->ieee80211_ptr;
7597 ++ int ret = 0;
7598 +
7599 + /* call only for station! */
7600 + if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
7601 +@@ -219,7 +220,10 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
7602 + if (ie) {
7603 + data->flags = 1;
7604 + data->length = ie[1];
7605 +- memcpy(ssid, ie + 2, data->length);
7606 ++ if (data->length > IW_ESSID_MAX_SIZE)
7607 ++ ret = -EINVAL;
7608 ++ else
7609 ++ memcpy(ssid, ie + 2, data->length);
7610 + }
7611 + rcu_read_unlock();
7612 + } else if (wdev->wext.connect.ssid && wdev->wext.connect.ssid_len) {
7613 +@@ -229,7 +233,7 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
7614 + }
7615 + wdev_unlock(wdev);
7616 +
7617 +- return 0;
7618 ++ return ret;
7619 + }
7620 +
7621 + int cfg80211_mgd_wext_siwap(struct net_device *dev,
7622 +diff --git a/scripts/namespace.pl b/scripts/namespace.pl
7623 +index 6135574a6f39..1da7bca201a4 100755
7624 +--- a/scripts/namespace.pl
7625 ++++ b/scripts/namespace.pl
7626 +@@ -65,13 +65,14 @@
7627 + use warnings;
7628 + use strict;
7629 + use File::Find;
7630 ++use File::Spec;
7631 +
7632 + my $nm = ($ENV{'NM'} || "nm") . " -p";
7633 + my $objdump = ($ENV{'OBJDUMP'} || "objdump") . " -s -j .comment";
7634 +-my $srctree = "";
7635 +-my $objtree = "";
7636 +-$srctree = "$ENV{'srctree'}/" if (exists($ENV{'srctree'}));
7637 +-$objtree = "$ENV{'objtree'}/" if (exists($ENV{'objtree'}));
7638 ++my $srctree = File::Spec->curdir();
7639 ++my $objtree = File::Spec->curdir();
7640 ++$srctree = File::Spec->rel2abs($ENV{'srctree'}) if (exists($ENV{'srctree'}));
7641 ++$objtree = File::Spec->rel2abs($ENV{'objtree'}) if (exists($ENV{'objtree'}));
7642 +
7643 + if ($#ARGV != -1) {
7644 + print STDERR "usage: $0 takes no parameters\n";
7645 +@@ -231,9 +232,9 @@ sub do_nm
7646 + }
7647 + ($source = $basename) =~ s/\.o$//;
7648 + if (-e "$source.c" || -e "$source.S") {
7649 +- $source = "$objtree$File::Find::dir/$source";
7650 ++ $source = File::Spec->catfile($objtree, $File::Find::dir, $source)
7651 + } else {
7652 +- $source = "$srctree$File::Find::dir/$source";
7653 ++ $source = File::Spec->catfile($srctree, $File::Find::dir, $source)
7654 + }
7655 + if (! -e "$source.c" && ! -e "$source.S") {
7656 + # No obvious source, exclude the object if it is conglomerate
7657 +diff --git a/security/safesetid/securityfs.c b/security/safesetid/securityfs.c
7658 +index d568e17dd773..74a13d432ed8 100644
7659 +--- a/security/safesetid/securityfs.c
7660 ++++ b/security/safesetid/securityfs.c
7661 +@@ -187,7 +187,8 @@ out_free_rule:
7662 + out_free_buf:
7663 + kfree(buf);
7664 + out_free_pol:
7665 +- release_ruleset(pol);
7666 ++ if (pol)
7667 ++ release_ruleset(pol);
7668 + return err;
7669 + }
7670 +
7671 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
7672 +index 36240def9bf5..00796c7727ea 100644
7673 +--- a/sound/pci/hda/patch_hdmi.c
7674 ++++ b/sound/pci/hda/patch_hdmi.c
7675 +@@ -3307,6 +3307,8 @@ static int patch_nvhdmi(struct hda_codec *codec)
7676 + nvhdmi_chmap_cea_alloc_validate_get_type;
7677 + spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
7678 +
7679 ++ codec->link_down_at_suspend = 1;
7680 ++
7681 + return 0;
7682 + }
7683 +
7684 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
7685 +index 36aee8ad2054..26249c607f2c 100644
7686 +--- a/sound/pci/hda/patch_realtek.c
7687 ++++ b/sound/pci/hda/patch_realtek.c
7688 +@@ -393,6 +393,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
7689 + case 0x10ec0700:
7690 + case 0x10ec0701:
7691 + case 0x10ec0703:
7692 ++ case 0x10ec0711:
7693 + alc_update_coef_idx(codec, 0x10, 1<<15, 0);
7694 + break;
7695 + case 0x10ec0662:
7696 +@@ -5867,6 +5868,7 @@ enum {
7697 + ALC225_FIXUP_WYSE_AUTO_MUTE,
7698 + ALC225_FIXUP_WYSE_DISABLE_MIC_VREF,
7699 + ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
7700 ++ ALC256_FIXUP_ASUS_HEADSET_MIC,
7701 + ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
7702 + ALC299_FIXUP_PREDATOR_SPK,
7703 + ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
7704 +@@ -6901,6 +6903,15 @@ static const struct hda_fixup alc269_fixups[] = {
7705 + .chained = true,
7706 + .chain_id = ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE
7707 + },
7708 ++ [ALC256_FIXUP_ASUS_HEADSET_MIC] = {
7709 ++ .type = HDA_FIXUP_PINS,
7710 ++ .v.pins = (const struct hda_pintbl[]) {
7711 ++ { 0x19, 0x03a11020 }, /* headset mic with jack detect */
7712 ++ { }
7713 ++ },
7714 ++ .chained = true,
7715 ++ .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
7716 ++ },
7717 + [ALC256_FIXUP_ASUS_MIC_NO_PRESENCE] = {
7718 + .type = HDA_FIXUP_PINS,
7719 + .v.pins = (const struct hda_pintbl[]) {
7720 +@@ -7097,6 +7108,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
7721 + SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
7722 + SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
7723 + SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC),
7724 ++ SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
7725 + SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
7726 + SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
7727 + SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
7728 +@@ -7965,6 +7977,7 @@ static int patch_alc269(struct hda_codec *codec)
7729 + case 0x10ec0700:
7730 + case 0x10ec0701:
7731 + case 0x10ec0703:
7732 ++ case 0x10ec0711:
7733 + spec->codec_variant = ALC269_TYPE_ALC700;
7734 + spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
7735 + alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
7736 +@@ -9105,6 +9118,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
7737 + HDA_CODEC_ENTRY(0x10ec0700, "ALC700", patch_alc269),
7738 + HDA_CODEC_ENTRY(0x10ec0701, "ALC701", patch_alc269),
7739 + HDA_CODEC_ENTRY(0x10ec0703, "ALC703", patch_alc269),
7740 ++ HDA_CODEC_ENTRY(0x10ec0711, "ALC711", patch_alc269),
7741 + HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc662),
7742 + HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880),
7743 + HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882),
7744 +diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
7745 +index 56e8dae9a15c..217f2aa06139 100644
7746 +--- a/sound/soc/sh/rcar/core.c
7747 ++++ b/sound/soc/sh/rcar/core.c
7748 +@@ -761,6 +761,7 @@ static int rsnd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
7749 + }
7750 +
7751 + /* set format */
7752 ++ rdai->bit_clk_inv = 0;
7753 + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
7754 + case SND_SOC_DAIFMT_I2S:
7755 + rdai->sys_delay = 0;
7756 +diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
7757 +index 33cd26763c0e..ff5ab24f3bd1 100644
7758 +--- a/sound/usb/pcm.c
7759 ++++ b/sound/usb/pcm.c
7760 +@@ -348,6 +348,9 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
7761 + ep = 0x84;
7762 + ifnum = 0;
7763 + goto add_sync_ep_from_ifnum;
7764 ++ case USB_ID(0x0582, 0x01d8): /* BOSS Katana */
7765 ++ /* BOSS Katana amplifiers do not need quirks */
7766 ++ return 0;
7767 + }
7768 +
7769 + if (attr == USB_ENDPOINT_SYNC_ASYNC &&
7770 +diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
7771 +index ba7849751989..fc8aeb224c03 100644
7772 +--- a/tools/testing/selftests/kvm/Makefile
7773 ++++ b/tools/testing/selftests/kvm/Makefile
7774 +@@ -46,7 +46,7 @@ CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
7775 + -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I..
7776 +
7777 + no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \
7778 +- $(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie)
7779 ++ $(CC) -Werror -no-pie -x c - -o "$$TMP", -no-pie)
7780 +
7781 + # On s390, build the testcases KVM-enabled
7782 + pgste-option = $(call try-run, echo 'int main() { return 0; }' | \