Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.14 commit in: /
Date: Wed, 15 Sep 2021 11:58:36
Message-Id: 1631707100.756955cf3ec599943c85ce5eed917d9441d0d6a9.mpagano@gentoo
1 commit: 756955cf3ec599943c85ce5eed917d9441d0d6a9
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Sep 15 11:58:20 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Sep 15 11:58:20 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=756955cf
7
8 Linuxpatch 5.14.4
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 8 +
13 1003_linux-5.14.4.patch | 13171 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 13179 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index f4fbe66..79faaf3 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -55,6 +55,14 @@ Patch: 1002_linux-5.14.3.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.14.3
23
24 +Patch: 1002_linux-5.14.3.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.14.3
27 +
28 +Patch: 1003_linux-5.14.4.patch
29 +From: http://www.kernel.org
30 +Desc: Linux 5.14.4
31 +
32 Patch: 1500_XATTR_USER_PREFIX.patch
33 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
34 Desc: Support for namespace user.pax.* on tmpfs.
35
36 diff --git a/1003_linux-5.14.4.patch b/1003_linux-5.14.4.patch
37 new file mode 100644
38 index 0000000..2f4c377
39 --- /dev/null
40 +++ b/1003_linux-5.14.4.patch
41 @@ -0,0 +1,13171 @@
42 +diff --git a/Documentation/fault-injection/provoke-crashes.rst b/Documentation/fault-injection/provoke-crashes.rst
43 +index a20ba5d939320..18de17354206a 100644
44 +--- a/Documentation/fault-injection/provoke-crashes.rst
45 ++++ b/Documentation/fault-injection/provoke-crashes.rst
46 +@@ -29,7 +29,7 @@ recur_count
47 + cpoint_name
48 + Where in the kernel to trigger the action. It can be
49 + one of INT_HARDWARE_ENTRY, INT_HW_IRQ_EN, INT_TASKLET_ENTRY,
50 +- FS_DEVRW, MEM_SWAPOUT, TIMERADD, SCSI_DISPATCH_CMD,
51 ++ FS_DEVRW, MEM_SWAPOUT, TIMERADD, SCSI_QUEUE_RQ,
52 + IDE_CORE_CP, or DIRECT
53 +
54 + cpoint_type
55 +diff --git a/Makefile b/Makefile
56 +index 8715942fccb4a..e16a1a80074cd 100644
57 +--- a/Makefile
58 ++++ b/Makefile
59 +@@ -1,7 +1,7 @@
60 + # SPDX-License-Identifier: GPL-2.0
61 + VERSION = 5
62 + PATCHLEVEL = 14
63 +-SUBLEVEL = 3
64 ++SUBLEVEL = 4
65 + EXTRAVERSION =
66 + NAME = Opossums on Parade
67 +
68 +diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
69 +index aa24cac8e5be5..44b03a5e24166 100644
70 +--- a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
71 ++++ b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
72 +@@ -2832,7 +2832,7 @@
73 +
74 + &emmc {
75 + status = "okay";
76 +- clk-phase-mmc-hs200 = <180>, <180>;
77 ++ clk-phase-mmc-hs200 = <210>, <228>;
78 + };
79 +
80 + &fsim0 {
81 +diff --git a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
82 +index 7e90d713f5e58..6dde51c2aed3f 100644
83 +--- a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
84 ++++ b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
85 +@@ -208,12 +208,12 @@
86 + };
87 +
88 + pinctrl_hvi3c3_default: hvi3c3_default {
89 +- function = "HVI3C3";
90 ++ function = "I3C3";
91 + groups = "HVI3C3";
92 + };
93 +
94 + pinctrl_hvi3c4_default: hvi3c4_default {
95 +- function = "HVI3C4";
96 ++ function = "I3C4";
97 + groups = "HVI3C4";
98 + };
99 +
100 +diff --git a/arch/arm/boot/dts/at91-sam9x60ek.dts b/arch/arm/boot/dts/at91-sam9x60ek.dts
101 +index edca66c232c15..ebbc9b23aef1c 100644
102 +--- a/arch/arm/boot/dts/at91-sam9x60ek.dts
103 ++++ b/arch/arm/boot/dts/at91-sam9x60ek.dts
104 +@@ -92,6 +92,8 @@
105 +
106 + leds {
107 + compatible = "gpio-leds";
108 ++ pinctrl-names = "default";
109 ++ pinctrl-0 = <&pinctrl_gpio_leds>;
110 + status = "okay"; /* Conflict with pwm0. */
111 +
112 + red {
113 +@@ -537,6 +539,10 @@
114 + AT91_PIOA 19 AT91_PERIPH_A (AT91_PINCTRL_PULL_UP | AT91_PINCTRL_DRIVE_STRENGTH_HI) /* PA19 DAT2 periph A with pullup */
115 + AT91_PIOA 20 AT91_PERIPH_A (AT91_PINCTRL_PULL_UP | AT91_PINCTRL_DRIVE_STRENGTH_HI)>; /* PA20 DAT3 periph A with pullup */
116 + };
117 ++ pinctrl_sdmmc0_cd: sdmmc0_cd {
118 ++ atmel,pins =
119 ++ <AT91_PIOA 23 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
120 ++ };
121 + };
122 +
123 + sdmmc1 {
124 +@@ -569,6 +575,14 @@
125 + AT91_PIOD 16 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
126 + };
127 + };
128 ++
129 ++ leds {
130 ++ pinctrl_gpio_leds: gpio_leds {
131 ++ atmel,pins = <AT91_PIOB 11 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
132 ++ AT91_PIOB 12 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
133 ++ AT91_PIOB 13 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
134 ++ };
135 ++ };
136 + }; /* pinctrl */
137 +
138 + &pwm0 {
139 +@@ -580,7 +594,7 @@
140 + &sdmmc0 {
141 + bus-width = <4>;
142 + pinctrl-names = "default";
143 +- pinctrl-0 = <&pinctrl_sdmmc0_default>;
144 ++ pinctrl-0 = <&pinctrl_sdmmc0_default &pinctrl_sdmmc0_cd>;
145 + status = "okay";
146 + cd-gpios = <&pioA 23 GPIO_ACTIVE_LOW>;
147 + disable-wp;
148 +diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
149 +index 9c55a921263bd..cc55d1684322b 100644
150 +--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
151 ++++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
152 +@@ -57,6 +57,8 @@
153 + };
154 +
155 + spi0: spi@f0004000 {
156 ++ pinctrl-names = "default";
157 ++ pinctrl-0 = <&pinctrl_spi0_cs>;
158 + cs-gpios = <&pioD 13 0>, <0>, <0>, <&pioD 16 0>;
159 + status = "okay";
160 + };
161 +@@ -169,6 +171,8 @@
162 + };
163 +
164 + spi1: spi@f8008000 {
165 ++ pinctrl-names = "default";
166 ++ pinctrl-0 = <&pinctrl_spi1_cs>;
167 + cs-gpios = <&pioC 25 0>;
168 + status = "okay";
169 + };
170 +@@ -248,6 +252,26 @@
171 + <AT91_PIOE 3 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
172 + AT91_PIOE 4 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
173 + };
174 ++
175 ++ pinctrl_gpio_leds: gpio_leds_default {
176 ++ atmel,pins =
177 ++ <AT91_PIOE 23 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
178 ++ AT91_PIOE 24 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
179 ++ };
180 ++
181 ++ pinctrl_spi0_cs: spi0_cs_default {
182 ++ atmel,pins =
183 ++ <AT91_PIOD 13 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
184 ++ AT91_PIOD 16 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
185 ++ };
186 ++
187 ++ pinctrl_spi1_cs: spi1_cs_default {
188 ++ atmel,pins = <AT91_PIOC 25 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
189 ++ };
190 ++
191 ++ pinctrl_vcc_mmc0_reg_gpio: vcc_mmc0_reg_gpio_default {
192 ++ atmel,pins = <AT91_PIOE 2 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
193 ++ };
194 + };
195 + };
196 + };
197 +@@ -339,6 +363,8 @@
198 +
199 + vcc_mmc0_reg: fixedregulator_mmc0 {
200 + compatible = "regulator-fixed";
201 ++ pinctrl-names = "default";
202 ++ pinctrl-0 = <&pinctrl_vcc_mmc0_reg_gpio>;
203 + gpio = <&pioE 2 GPIO_ACTIVE_LOW>;
204 + regulator-name = "mmc0-card-supply";
205 + regulator-min-microvolt = <3300000>;
206 +@@ -362,6 +388,9 @@
207 +
208 + leds {
209 + compatible = "gpio-leds";
210 ++ pinctrl-names = "default";
211 ++ pinctrl-0 = <&pinctrl_gpio_leds>;
212 ++ status = "okay";
213 +
214 + d2 {
215 + label = "d2";
216 +diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
217 +index 0b3ad1b580b83..e42dae06b5826 100644
218 +--- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
219 ++++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
220 +@@ -90,6 +90,8 @@
221 + };
222 +
223 + spi1: spi@fc018000 {
224 ++ pinctrl-names = "default";
225 ++ pinctrl-0 = <&pinctrl_spi0_cs>;
226 + cs-gpios = <&pioB 21 0>;
227 + status = "okay";
228 + };
229 +@@ -147,6 +149,19 @@
230 + atmel,pins =
231 + <AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
232 + };
233 ++ pinctrl_spi0_cs: spi0_cs_default {
234 ++ atmel,pins =
235 ++ <AT91_PIOB 21 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
236 ++ };
237 ++ pinctrl_gpio_leds: gpio_leds_default {
238 ++ atmel,pins =
239 ++ <AT91_PIOD 30 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
240 ++ AT91_PIOE 15 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
241 ++ };
242 ++ pinctrl_vcc_mmc1_reg: vcc_mmc1_reg {
243 ++ atmel,pins =
244 ++ <AT91_PIOE 4 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
245 ++ };
246 + };
247 + };
248 + };
249 +@@ -252,6 +267,8 @@
250 +
251 + leds {
252 + compatible = "gpio-leds";
253 ++ pinctrl-names = "default";
254 ++ pinctrl-0 = <&pinctrl_gpio_leds>;
255 + status = "okay";
256 +
257 + d8 {
258 +@@ -278,6 +295,8 @@
259 +
260 + vcc_mmc1_reg: fixedregulator_mmc1 {
261 + compatible = "regulator-fixed";
262 ++ pinctrl-names = "default";
263 ++ pinctrl-0 = <&pinctrl_vcc_mmc1_reg>;
264 + gpio = <&pioE 4 GPIO_ACTIVE_LOW>;
265 + regulator-name = "VDD MCI1";
266 + regulator-min-microvolt = <3300000>;
267 +diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi
268 +index 157a950a55d38..686c7b7c79d55 100644
269 +--- a/arch/arm/boot/dts/meson8.dtsi
270 ++++ b/arch/arm/boot/dts/meson8.dtsi
271 +@@ -304,8 +304,13 @@
272 + "pp2", "ppmmu2", "pp4", "ppmmu4",
273 + "pp5", "ppmmu5", "pp6", "ppmmu6";
274 + resets = <&reset RESET_MALI>;
275 ++
276 + clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_MALI>;
277 + clock-names = "bus", "core";
278 ++
279 ++ assigned-clocks = <&clkc CLKID_MALI>;
280 ++ assigned-clock-rates = <318750000>;
281 ++
282 + operating-points-v2 = <&gpu_opp_table>;
283 + #cooling-cells = <2>; /* min followed by max */
284 + };
285 +diff --git a/arch/arm/boot/dts/meson8b-ec100.dts b/arch/arm/boot/dts/meson8b-ec100.dts
286 +index 8e48ccc6b634e..7e8ddc6f1252b 100644
287 +--- a/arch/arm/boot/dts/meson8b-ec100.dts
288 ++++ b/arch/arm/boot/dts/meson8b-ec100.dts
289 +@@ -148,7 +148,7 @@
290 + regulator-min-microvolt = <860000>;
291 + regulator-max-microvolt = <1140000>;
292 +
293 +- vin-supply = <&vcc_5v>;
294 ++ pwm-supply = <&vcc_5v>;
295 +
296 + pwms = <&pwm_cd 0 1148 0>;
297 + pwm-dutycycle-range = <100 0>;
298 +@@ -232,7 +232,7 @@
299 + regulator-min-microvolt = <860000>;
300 + regulator-max-microvolt = <1140000>;
301 +
302 +- vin-supply = <&vcc_5v>;
303 ++ pwm-supply = <&vcc_5v>;
304 +
305 + pwms = <&pwm_cd 1 1148 0>;
306 + pwm-dutycycle-range = <100 0>;
307 +diff --git a/arch/arm/boot/dts/meson8b-mxq.dts b/arch/arm/boot/dts/meson8b-mxq.dts
308 +index f3937d55472d4..7adedd3258c33 100644
309 +--- a/arch/arm/boot/dts/meson8b-mxq.dts
310 ++++ b/arch/arm/boot/dts/meson8b-mxq.dts
311 +@@ -34,6 +34,8 @@
312 + regulator-min-microvolt = <860000>;
313 + regulator-max-microvolt = <1140000>;
314 +
315 ++ pwm-supply = <&vcc_5v>;
316 ++
317 + pwms = <&pwm_cd 0 1148 0>;
318 + pwm-dutycycle-range = <100 0>;
319 +
320 +@@ -79,7 +81,7 @@
321 + regulator-min-microvolt = <860000>;
322 + regulator-max-microvolt = <1140000>;
323 +
324 +- vin-supply = <&vcc_5v>;
325 ++ pwm-supply = <&vcc_5v>;
326 +
327 + pwms = <&pwm_cd 1 1148 0>;
328 + pwm-dutycycle-range = <100 0>;
329 +diff --git a/arch/arm/boot/dts/meson8b-odroidc1.dts b/arch/arm/boot/dts/meson8b-odroidc1.dts
330 +index c440ef94e0820..04356bc639faf 100644
331 +--- a/arch/arm/boot/dts/meson8b-odroidc1.dts
332 ++++ b/arch/arm/boot/dts/meson8b-odroidc1.dts
333 +@@ -131,7 +131,7 @@
334 + regulator-min-microvolt = <860000>;
335 + regulator-max-microvolt = <1140000>;
336 +
337 +- vin-supply = <&p5v0>;
338 ++ pwm-supply = <&p5v0>;
339 +
340 + pwms = <&pwm_cd 0 12218 0>;
341 + pwm-dutycycle-range = <91 0>;
342 +@@ -163,7 +163,7 @@
343 + regulator-min-microvolt = <860000>;
344 + regulator-max-microvolt = <1140000>;
345 +
346 +- vin-supply = <&p5v0>;
347 ++ pwm-supply = <&p5v0>;
348 +
349 + pwms = <&pwm_cd 1 12218 0>;
350 + pwm-dutycycle-range = <91 0>;
351 +diff --git a/arch/arm64/boot/dts/exynos/exynos7.dtsi b/arch/arm64/boot/dts/exynos/exynos7.dtsi
352 +index 10244e59d56dd..56a0bb7eb0e69 100644
353 +--- a/arch/arm64/boot/dts/exynos/exynos7.dtsi
354 ++++ b/arch/arm64/boot/dts/exynos/exynos7.dtsi
355 +@@ -102,7 +102,7 @@
356 + #address-cells = <0>;
357 + interrupt-controller;
358 + reg = <0x11001000 0x1000>,
359 +- <0x11002000 0x1000>,
360 ++ <0x11002000 0x2000>,
361 + <0x11004000 0x2000>,
362 + <0x11006000 0x2000>;
363 + };
364 +diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
365 +index a05b1ab2dd12c..04da07ae44208 100644
366 +--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
367 ++++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
368 +@@ -135,6 +135,23 @@
369 + pinctrl-0 = <&pcie_reset_pins &pcie_clkreq_pins>;
370 + status = "okay";
371 + reset-gpios = <&gpiosb 3 GPIO_ACTIVE_LOW>;
372 ++ /*
373 ++ * U-Boot port for Turris Mox has a bug which always expects that "ranges" DT property
374 ++ * contains exactly 2 ranges with 3 (child) address cells, 2 (parent) address cells and
375 ++ * 2 size cells and also expects that the second range starts at 16 MB offset. If these
376 ++ * conditions are not met then U-Boot crashes during loading kernel DTB file. PCIe address
377 ++ * space is 128 MB long, so the best split between MEM and IO is to use fixed 16 MB window
378 ++ * for IO and the rest 112 MB (64+32+16) for MEM, despite that maximal IO size is just 64 kB.
379 ++ * This bug is not present in U-Boot ports for other Armada 3700 devices and is fixed in
380 ++ * U-Boot version 2021.07. See relevant U-Boot commits (the last one contains fix):
381 ++ * https://source.denx.de/u-boot/u-boot/-/commit/cb2ddb291ee6fcbddd6d8f4ff49089dfe580f5d7
382 ++ * https://source.denx.de/u-boot/u-boot/-/commit/c64ac3b3185aeb3846297ad7391fc6df8ecd73bf
383 ++ * https://source.denx.de/u-boot/u-boot/-/commit/4a82fca8e330157081fc132a591ebd99ba02ee33
384 ++ */
385 ++ #address-cells = <3>;
386 ++ #size-cells = <2>;
387 ++ ranges = <0x81000000 0 0xe8000000 0 0xe8000000 0 0x01000000 /* Port 0 IO */
388 ++ 0x82000000 0 0xe9000000 0 0xe9000000 0 0x07000000>; /* Port 0 MEM */
389 +
390 + /* enabled by U-Boot if PCIe module is present */
391 + status = "disabled";
392 +diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
393 +index 5db81a416cd65..9acc5d2b5a002 100644
394 +--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
395 ++++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
396 +@@ -489,8 +489,15 @@
397 + #interrupt-cells = <1>;
398 + msi-parent = <&pcie0>;
399 + msi-controller;
400 +- ranges = <0x82000000 0 0xe8000000 0 0xe8000000 0 0x1000000 /* Port 0 MEM */
401 +- 0x81000000 0 0xe9000000 0 0xe9000000 0 0x10000>; /* Port 0 IO*/
402 ++ /*
403 ++ * The 128 MiB address range [0xe8000000-0xf0000000] is
404 ++ * dedicated for PCIe and can be assigned to 8 windows
405 ++ * with size a power of two. Use one 64 KiB window for
406 ++ * IO at the end and the remaining seven windows
407 ++ * (totaling 127 MiB) for MEM.
408 ++ */
409 ++ ranges = <0x82000000 0 0xe8000000 0 0xe8000000 0 0x07f00000 /* Port 0 MEM */
410 ++ 0x81000000 0 0xefff0000 0 0xefff0000 0 0x00010000>; /* Port 0 IO */
411 + interrupt-map-mask = <0 0 0 7>;
412 + interrupt-map = <0 0 0 1 &pcie_intc 0>,
413 + <0 0 0 2 &pcie_intc 1>,
414 +diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi
415 +index 6f9c071475513..a758e4d226122 100644
416 +--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi
417 ++++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi
418 +@@ -23,7 +23,7 @@ ap_h1_spi: &spi0 {};
419 + adau7002: audio-codec-1 {
420 + compatible = "adi,adau7002";
421 + IOVDD-supply = <&pp1800_l15a>;
422 +- wakeup-delay-ms = <15>;
423 ++ wakeup-delay-ms = <80>;
424 + #sound-dai-cells = <0>;
425 + };
426 +
427 +diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
428 +index 188c5768a55ae..c08f074106994 100644
429 +--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
430 ++++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
431 +@@ -1437,9 +1437,9 @@
432 +
433 + cpufreq_hw: cpufreq@18591000 {
434 + compatible = "qcom,cpufreq-epss";
435 +- reg = <0 0x18591000 0 0x1000>,
436 +- <0 0x18592000 0 0x1000>,
437 +- <0 0x18593000 0 0x1000>;
438 ++ reg = <0 0x18591100 0 0x900>,
439 ++ <0 0x18592100 0 0x900>,
440 ++ <0 0x18593100 0 0x900>;
441 + clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GCC_GPLL0>;
442 + clock-names = "xo", "alternate";
443 + #freq-domain-cells = <1>;
444 +diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
445 +index 4798368b02efb..9a6eff1813a68 100644
446 +--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
447 ++++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
448 +@@ -2210,7 +2210,7 @@
449 + <&gcc GCC_USB3_PHY_SEC_BCR>;
450 + reset-names = "phy", "common";
451 +
452 +- usb_2_ssphy: lane@88eb200 {
453 ++ usb_2_ssphy: lanes@88eb200 {
454 + reg = <0 0x088eb200 0 0x200>,
455 + <0 0x088eb400 0 0x200>,
456 + <0 0x088eb800 0 0x800>;
457 +diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
458 +index 0d16392bb9767..dbc174d424e26 100644
459 +--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
460 ++++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
461 +@@ -666,12 +666,10 @@
462 + clocks = <&rpmhcc RPMH_IPA_CLK>;
463 + clock-names = "core";
464 +
465 +- interconnects = <&aggre2_noc MASTER_IPA &gem_noc SLAVE_LLCC>,
466 +- <&mc_virt MASTER_LLCC &mc_virt SLAVE_EBI1>,
467 ++ interconnects = <&aggre2_noc MASTER_IPA &mc_virt SLAVE_EBI1>,
468 + <&gem_noc MASTER_APPSS_PROC &config_noc SLAVE_IPA_CFG>;
469 +- interconnect-names = "ipa_to_llcc",
470 +- "llcc_to_ebi1",
471 +- "appss_to_ipa";
472 ++ interconnect-names = "memory",
473 ++ "config";
474 +
475 + qcom,smem-states = <&ipa_smp2p_out 0>,
476 + <&ipa_smp2p_out 1>;
477 +diff --git a/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi b/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi
478 +index 202c4fc88bd51..dde3a07bc417c 100644
479 +--- a/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi
480 ++++ b/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi
481 +@@ -20,6 +20,7 @@
482 + pinctrl-names = "default";
483 + phy-handle = <&phy0>;
484 + tx-internal-delay-ps = <2000>;
485 ++ rx-internal-delay-ps = <1800>;
486 + status = "okay";
487 +
488 + phy0: ethernet-phy@0 {
489 +diff --git a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
490 +index 6783c3ad08567..57784341f39d7 100644
491 +--- a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
492 ++++ b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
493 +@@ -277,10 +277,6 @@
494 + interrupt-parent = <&gpio1>;
495 + interrupts = <28 IRQ_TYPE_LEVEL_LOW>;
496 +
497 +- /* Depends on LVDS */
498 +- max-clock = <135000000>;
499 +- min-vrefresh = <50>;
500 +-
501 + adi,input-depth = <8>;
502 + adi,input-colorspace = "rgb";
503 + adi,input-clock = "1x";
504 +diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
505 +index 0ca72f5cda41b..5d1fc9c4bca5e 100644
506 +--- a/arch/arm64/kvm/arm.c
507 ++++ b/arch/arm64/kvm/arm.c
508 +@@ -15,6 +15,7 @@
509 + #include <linux/fs.h>
510 + #include <linux/mman.h>
511 + #include <linux/sched.h>
512 ++#include <linux/kmemleak.h>
513 + #include <linux/kvm.h>
514 + #include <linux/kvm_irqfd.h>
515 + #include <linux/irqbypass.h>
516 +@@ -1986,6 +1987,12 @@ static int finalize_hyp_mode(void)
517 + if (ret)
518 + return ret;
519 +
520 ++ /*
521 ++ * Exclude HYP BSS from kmemleak so that it doesn't get peeked
522 ++ * at, which would end badly once the section is inaccessible.
523 ++ * None of other sections should ever be introspected.
524 ++ */
525 ++ kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start);
526 + ret = pkvm_mark_hyp_section(__hyp_bss);
527 + if (ret)
528 + return ret;
529 +diff --git a/arch/arm64/kvm/vgic/vgic-v2.c b/arch/arm64/kvm/vgic/vgic-v2.c
530 +index 2c580204f1dc9..95a18cec14a35 100644
531 +--- a/arch/arm64/kvm/vgic/vgic-v2.c
532 ++++ b/arch/arm64/kvm/vgic/vgic-v2.c
533 +@@ -60,6 +60,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
534 + u32 val = cpuif->vgic_lr[lr];
535 + u32 cpuid, intid = val & GICH_LR_VIRTUALID;
536 + struct vgic_irq *irq;
537 ++ bool deactivated;
538 +
539 + /* Extract the source vCPU id from the LR */
540 + cpuid = val & GICH_LR_PHYSID_CPUID;
541 +@@ -75,7 +76,8 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
542 +
543 + raw_spin_lock(&irq->irq_lock);
544 +
545 +- /* Always preserve the active bit */
546 ++ /* Always preserve the active bit, note deactivation */
547 ++ deactivated = irq->active && !(val & GICH_LR_ACTIVE_BIT);
548 + irq->active = !!(val & GICH_LR_ACTIVE_BIT);
549 +
550 + if (irq->active && vgic_irq_is_sgi(intid))
551 +@@ -96,36 +98,8 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
552 + if (irq->config == VGIC_CONFIG_LEVEL && !(val & GICH_LR_STATE))
553 + irq->pending_latch = false;
554 +
555 +- /*
556 +- * Level-triggered mapped IRQs are special because we only
557 +- * observe rising edges as input to the VGIC.
558 +- *
559 +- * If the guest never acked the interrupt we have to sample
560 +- * the physical line and set the line level, because the
561 +- * device state could have changed or we simply need to
562 +- * process the still pending interrupt later.
563 +- *
564 +- * If this causes us to lower the level, we have to also clear
565 +- * the physical active state, since we will otherwise never be
566 +- * told when the interrupt becomes asserted again.
567 +- *
568 +- * Another case is when the interrupt requires a helping hand
569 +- * on deactivation (no HW deactivation, for example).
570 +- */
571 +- if (vgic_irq_is_mapped_level(irq)) {
572 +- bool resample = false;
573 +-
574 +- if (val & GICH_LR_PENDING_BIT) {
575 +- irq->line_level = vgic_get_phys_line_level(irq);
576 +- resample = !irq->line_level;
577 +- } else if (vgic_irq_needs_resampling(irq) &&
578 +- !(irq->active || irq->pending_latch)) {
579 +- resample = true;
580 +- }
581 +-
582 +- if (resample)
583 +- vgic_irq_set_phys_active(irq, false);
584 +- }
585 ++ /* Handle resampling for mapped interrupts if required */
586 ++ vgic_irq_handle_resampling(irq, deactivated, val & GICH_LR_PENDING_BIT);
587 +
588 + raw_spin_unlock(&irq->irq_lock);
589 + vgic_put_irq(vcpu->kvm, irq);
590 +diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
591 +index 66004f61cd83d..21a6207fb2eed 100644
592 +--- a/arch/arm64/kvm/vgic/vgic-v3.c
593 ++++ b/arch/arm64/kvm/vgic/vgic-v3.c
594 +@@ -46,6 +46,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
595 + u32 intid, cpuid;
596 + struct vgic_irq *irq;
597 + bool is_v2_sgi = false;
598 ++ bool deactivated;
599 +
600 + cpuid = val & GICH_LR_PHYSID_CPUID;
601 + cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
602 +@@ -68,7 +69,8 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
603 +
604 + raw_spin_lock(&irq->irq_lock);
605 +
606 +- /* Always preserve the active bit */
607 ++ /* Always preserve the active bit, note deactivation */
608 ++ deactivated = irq->active && !(val & ICH_LR_ACTIVE_BIT);
609 + irq->active = !!(val & ICH_LR_ACTIVE_BIT);
610 +
611 + if (irq->active && is_v2_sgi)
612 +@@ -89,36 +91,8 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
613 + if (irq->config == VGIC_CONFIG_LEVEL && !(val & ICH_LR_STATE))
614 + irq->pending_latch = false;
615 +
616 +- /*
617 +- * Level-triggered mapped IRQs are special because we only
618 +- * observe rising edges as input to the VGIC.
619 +- *
620 +- * If the guest never acked the interrupt we have to sample
621 +- * the physical line and set the line level, because the
622 +- * device state could have changed or we simply need to
623 +- * process the still pending interrupt later.
624 +- *
625 +- * If this causes us to lower the level, we have to also clear
626 +- * the physical active state, since we will otherwise never be
627 +- * told when the interrupt becomes asserted again.
628 +- *
629 +- * Another case is when the interrupt requires a helping hand
630 +- * on deactivation (no HW deactivation, for example).
631 +- */
632 +- if (vgic_irq_is_mapped_level(irq)) {
633 +- bool resample = false;
634 +-
635 +- if (val & ICH_LR_PENDING_BIT) {
636 +- irq->line_level = vgic_get_phys_line_level(irq);
637 +- resample = !irq->line_level;
638 +- } else if (vgic_irq_needs_resampling(irq) &&
639 +- !(irq->active || irq->pending_latch)) {
640 +- resample = true;
641 +- }
642 +-
643 +- if (resample)
644 +- vgic_irq_set_phys_active(irq, false);
645 +- }
646 ++ /* Handle resampling for mapped interrupts if required */
647 ++ vgic_irq_handle_resampling(irq, deactivated, val & ICH_LR_PENDING_BIT);
648 +
649 + raw_spin_unlock(&irq->irq_lock);
650 + vgic_put_irq(vcpu->kvm, irq);
651 +diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c
652 +index 111bff47e4710..42a6ac78fe959 100644
653 +--- a/arch/arm64/kvm/vgic/vgic.c
654 ++++ b/arch/arm64/kvm/vgic/vgic.c
655 +@@ -1022,3 +1022,41 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
656 +
657 + return map_is_active;
658 + }
659 ++
660 ++/*
661 ++ * Level-triggered mapped IRQs are special because we only observe rising
662 ++ * edges as input to the VGIC.
663 ++ *
664 ++ * If the guest never acked the interrupt we have to sample the physical
665 ++ * line and set the line level, because the device state could have changed
666 ++ * or we simply need to process the still pending interrupt later.
667 ++ *
668 ++ * We could also have entered the guest with the interrupt active+pending.
669 ++ * On the next exit, we need to re-evaluate the pending state, as it could
670 ++ * otherwise result in a spurious interrupt by injecting a now potentially
671 ++ * stale pending state.
672 ++ *
673 ++ * If this causes us to lower the level, we have to also clear the physical
674 ++ * active state, since we will otherwise never be told when the interrupt
675 ++ * becomes asserted again.
676 ++ *
677 ++ * Another case is when the interrupt requires a helping hand on
678 ++ * deactivation (no HW deactivation, for example).
679 ++ */
680 ++void vgic_irq_handle_resampling(struct vgic_irq *irq,
681 ++ bool lr_deactivated, bool lr_pending)
682 ++{
683 ++ if (vgic_irq_is_mapped_level(irq)) {
684 ++ bool resample = false;
685 ++
686 ++ if (unlikely(vgic_irq_needs_resampling(irq))) {
687 ++ resample = !(irq->active || irq->pending_latch);
688 ++ } else if (lr_pending || (lr_deactivated && irq->line_level)) {
689 ++ irq->line_level = vgic_get_phys_line_level(irq);
690 ++ resample = !irq->line_level;
691 ++ }
692 ++
693 ++ if (resample)
694 ++ vgic_irq_set_phys_active(irq, false);
695 ++ }
696 ++}
697 +diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
698 +index dc1f3d1657ee9..14a9218641f57 100644
699 +--- a/arch/arm64/kvm/vgic/vgic.h
700 ++++ b/arch/arm64/kvm/vgic/vgic.h
701 +@@ -169,6 +169,8 @@ void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active);
702 + bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
703 + unsigned long flags);
704 + void vgic_kick_vcpus(struct kvm *kvm);
705 ++void vgic_irq_handle_resampling(struct vgic_irq *irq,
706 ++ bool lr_deactivated, bool lr_pending);
707 +
708 + int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
709 + phys_addr_t addr, phys_addr_t alignment);
710 +diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
711 +index 29e946394fdb4..277d61a094637 100644
712 +--- a/arch/m68k/Kconfig.cpu
713 ++++ b/arch/m68k/Kconfig.cpu
714 +@@ -26,6 +26,7 @@ config COLDFIRE
715 + bool "Coldfire CPU family support"
716 + select ARCH_HAVE_CUSTOM_GPIO_H
717 + select CPU_HAS_NO_BITFIELDS
718 ++ select CPU_HAS_NO_CAS
719 + select CPU_HAS_NO_MULDIV64
720 + select GENERIC_CSUM
721 + select GPIOLIB
722 +@@ -39,6 +40,7 @@ config M68000
723 + bool
724 + depends on !MMU
725 + select CPU_HAS_NO_BITFIELDS
726 ++ select CPU_HAS_NO_CAS
727 + select CPU_HAS_NO_MULDIV64
728 + select CPU_HAS_NO_UNALIGNED
729 + select GENERIC_CSUM
730 +@@ -54,6 +56,7 @@ config M68000
731 + config MCPU32
732 + bool
733 + select CPU_HAS_NO_BITFIELDS
734 ++ select CPU_HAS_NO_CAS
735 + select CPU_HAS_NO_UNALIGNED
736 + select CPU_NO_EFFICIENT_FFS
737 + help
738 +@@ -383,7 +386,7 @@ config ADVANCED
739 +
740 + config RMW_INSNS
741 + bool "Use read-modify-write instructions"
742 +- depends on ADVANCED
743 ++ depends on ADVANCED && !CPU_HAS_NO_CAS
744 + help
745 + This allows to use certain instructions that work with indivisible
746 + read-modify-write bus cycles. While this is faster than the
747 +@@ -450,6 +453,9 @@ config M68K_L2_CACHE
748 + config CPU_HAS_NO_BITFIELDS
749 + bool
750 +
751 ++config CPU_HAS_NO_CAS
752 ++ bool
753 ++
754 + config CPU_HAS_NO_MULDIV64
755 + bool
756 +
757 +diff --git a/arch/m68k/coldfire/clk.c b/arch/m68k/coldfire/clk.c
758 +index 2ed841e941113..d03b6c4aa86b4 100644
759 +--- a/arch/m68k/coldfire/clk.c
760 ++++ b/arch/m68k/coldfire/clk.c
761 +@@ -78,7 +78,7 @@ int clk_enable(struct clk *clk)
762 + unsigned long flags;
763 +
764 + if (!clk)
765 +- return -EINVAL;
766 ++ return 0;
767 +
768 + spin_lock_irqsave(&clk_lock, flags);
769 + if ((clk->enabled++ == 0) && clk->clk_ops)
770 +diff --git a/arch/m68k/emu/nfeth.c b/arch/m68k/emu/nfeth.c
771 +index d2875e32abfca..79e55421cfb18 100644
772 +--- a/arch/m68k/emu/nfeth.c
773 ++++ b/arch/m68k/emu/nfeth.c
774 +@@ -254,8 +254,8 @@ static void __exit nfeth_cleanup(void)
775 +
776 + for (i = 0; i < MAX_UNIT; i++) {
777 + if (nfeth_dev[i]) {
778 +- unregister_netdev(nfeth_dev[0]);
779 +- free_netdev(nfeth_dev[0]);
780 ++ unregister_netdev(nfeth_dev[i]);
781 ++ free_netdev(nfeth_dev[i]);
782 + }
783 + }
784 + free_irq(nfEtherIRQ, nfeth_interrupt);
785 +diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
786 +index 8637bf8a2f652..cfba83d230fde 100644
787 +--- a/arch/m68k/include/asm/atomic.h
788 ++++ b/arch/m68k/include/asm/atomic.h
789 +@@ -48,7 +48,7 @@ static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
790 + " casl %2,%1,%0\n" \
791 + " jne 1b" \
792 + : "+m" (*v), "=&d" (t), "=&d" (tmp) \
793 +- : "g" (i), "2" (arch_atomic_read(v))); \
794 ++ : "di" (i), "2" (arch_atomic_read(v))); \
795 + return t; \
796 + }
797 +
798 +@@ -63,7 +63,7 @@ static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
799 + " casl %2,%1,%0\n" \
800 + " jne 1b" \
801 + : "+m" (*v), "=&d" (t), "=&d" (tmp) \
802 +- : "g" (i), "2" (arch_atomic_read(v))); \
803 ++ : "di" (i), "2" (arch_atomic_read(v))); \
804 + return tmp; \
805 + }
806 +
807 +diff --git a/arch/parisc/boot/compressed/misc.c b/arch/parisc/boot/compressed/misc.c
808 +index 2d395998f524a..7ee49f5881d15 100644
809 +--- a/arch/parisc/boot/compressed/misc.c
810 ++++ b/arch/parisc/boot/compressed/misc.c
811 +@@ -26,7 +26,7 @@
812 + extern char input_data[];
813 + extern int input_len;
814 + /* output_len is inserted by the linker possibly at an unaligned address */
815 +-extern __le32 output_len __aligned(1);
816 ++extern char output_len;
817 + extern char _text, _end;
818 + extern char _bss, _ebss;
819 + extern char _startcode_end;
820 +diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
821 +index 161a9e12bfb86..630eab0fa1760 100644
822 +--- a/arch/s390/include/asm/kvm_host.h
823 ++++ b/arch/s390/include/asm/kvm_host.h
824 +@@ -957,6 +957,7 @@ struct kvm_arch{
825 + atomic64_t cmma_dirty_pages;
826 + /* subset of available cpu features enabled by user space */
827 + DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
828 ++ /* indexed by vcpu_idx */
829 + DECLARE_BITMAP(idle_mask, KVM_MAX_VCPUS);
830 + struct kvm_s390_gisa_interrupt gisa_int;
831 + struct kvm_s390_pv pv;
832 +diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
833 +index 47bde5a20a41c..11213c8bfca56 100644
834 +--- a/arch/s390/include/asm/lowcore.h
835 ++++ b/arch/s390/include/asm/lowcore.h
836 +@@ -124,7 +124,8 @@ struct lowcore {
837 + /* Restart function and parameter. */
838 + __u64 restart_fn; /* 0x0370 */
839 + __u64 restart_data; /* 0x0378 */
840 +- __u64 restart_source; /* 0x0380 */
841 ++ __u32 restart_source; /* 0x0380 */
842 ++ __u32 restart_flags; /* 0x0384 */
843 +
844 + /* Address space pointer. */
845 + __u64 kernel_asce; /* 0x0388 */
846 +diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
847 +index ddc7858bbce40..879b8e3f609cd 100644
848 +--- a/arch/s390/include/asm/processor.h
849 ++++ b/arch/s390/include/asm/processor.h
850 +@@ -26,6 +26,8 @@
851 + #define _CIF_MCCK_GUEST BIT(CIF_MCCK_GUEST)
852 + #define _CIF_DEDICATED_CPU BIT(CIF_DEDICATED_CPU)
853 +
854 ++#define RESTART_FLAG_CTLREGS _AC(1 << 0, U)
855 ++
856 + #ifndef __ASSEMBLY__
857 +
858 + #include <linux/cpumask.h>
859 +diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
860 +index 77ff2130cb045..dc53b0452ce2f 100644
861 +--- a/arch/s390/kernel/asm-offsets.c
862 ++++ b/arch/s390/kernel/asm-offsets.c
863 +@@ -116,6 +116,7 @@ int main(void)
864 + OFFSET(__LC_RESTART_FN, lowcore, restart_fn);
865 + OFFSET(__LC_RESTART_DATA, lowcore, restart_data);
866 + OFFSET(__LC_RESTART_SOURCE, lowcore, restart_source);
867 ++ OFFSET(__LC_RESTART_FLAGS, lowcore, restart_flags);
868 + OFFSET(__LC_KERNEL_ASCE, lowcore, kernel_asce);
869 + OFFSET(__LC_USER_ASCE, lowcore, user_asce);
870 + OFFSET(__LC_LPP, lowcore, lpp);
871 +diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
872 +index 09b6c6402f9b7..05b765b8038eb 100644
873 +--- a/arch/s390/kernel/debug.c
874 ++++ b/arch/s390/kernel/debug.c
875 +@@ -24,6 +24,7 @@
876 + #include <linux/export.h>
877 + #include <linux/init.h>
878 + #include <linux/fs.h>
879 ++#include <linux/minmax.h>
880 + #include <linux/debugfs.h>
881 +
882 + #include <asm/debug.h>
883 +@@ -92,6 +93,8 @@ static int debug_hex_ascii_format_fn(debug_info_t *id, struct debug_view *view,
884 + char *out_buf, const char *in_buf);
885 + static int debug_sprintf_format_fn(debug_info_t *id, struct debug_view *view,
886 + char *out_buf, debug_sprintf_entry_t *curr_event);
887 ++static void debug_areas_swap(debug_info_t *a, debug_info_t *b);
888 ++static void debug_events_append(debug_info_t *dest, debug_info_t *src);
889 +
890 + /* globals */
891 +
892 +@@ -311,24 +314,6 @@ static debug_info_t *debug_info_create(const char *name, int pages_per_area,
893 + goto out;
894 +
895 + rc->mode = mode & ~S_IFMT;
896 +-
897 +- /* create root directory */
898 +- rc->debugfs_root_entry = debugfs_create_dir(rc->name,
899 +- debug_debugfs_root_entry);
900 +-
901 +- /* append new element to linked list */
902 +- if (!debug_area_first) {
903 +- /* first element in list */
904 +- debug_area_first = rc;
905 +- rc->prev = NULL;
906 +- } else {
907 +- /* append element to end of list */
908 +- debug_area_last->next = rc;
909 +- rc->prev = debug_area_last;
910 +- }
911 +- debug_area_last = rc;
912 +- rc->next = NULL;
913 +-
914 + refcount_set(&rc->ref_count, 1);
915 + out:
916 + return rc;
917 +@@ -388,27 +373,10 @@ static void debug_info_get(debug_info_t *db_info)
918 + */
919 + static void debug_info_put(debug_info_t *db_info)
920 + {
921 +- int i;
922 +-
923 + if (!db_info)
924 + return;
925 +- if (refcount_dec_and_test(&db_info->ref_count)) {
926 +- for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
927 +- if (!db_info->views[i])
928 +- continue;
929 +- debugfs_remove(db_info->debugfs_entries[i]);
930 +- }
931 +- debugfs_remove(db_info->debugfs_root_entry);
932 +- if (db_info == debug_area_first)
933 +- debug_area_first = db_info->next;
934 +- if (db_info == debug_area_last)
935 +- debug_area_last = db_info->prev;
936 +- if (db_info->prev)
937 +- db_info->prev->next = db_info->next;
938 +- if (db_info->next)
939 +- db_info->next->prev = db_info->prev;
940 ++ if (refcount_dec_and_test(&db_info->ref_count))
941 + debug_info_free(db_info);
942 +- }
943 + }
944 +
945 + /*
946 +@@ -632,6 +600,31 @@ static int debug_close(struct inode *inode, struct file *file)
947 + return 0; /* success */
948 + }
949 +
950 ++/* Create debugfs entries and add to internal list. */
951 ++static void _debug_register(debug_info_t *id)
952 ++{
953 ++ /* create root directory */
954 ++ id->debugfs_root_entry = debugfs_create_dir(id->name,
955 ++ debug_debugfs_root_entry);
956 ++
957 ++ /* append new element to linked list */
958 ++ if (!debug_area_first) {
959 ++ /* first element in list */
960 ++ debug_area_first = id;
961 ++ id->prev = NULL;
962 ++ } else {
963 ++ /* append element to end of list */
964 ++ debug_area_last->next = id;
965 ++ id->prev = debug_area_last;
966 ++ }
967 ++ debug_area_last = id;
968 ++ id->next = NULL;
969 ++
970 ++ debug_register_view(id, &debug_level_view);
971 ++ debug_register_view(id, &debug_flush_view);
972 ++ debug_register_view(id, &debug_pages_view);
973 ++}
974 ++
975 + /**
976 + * debug_register_mode() - creates and initializes debug area.
977 + *
978 +@@ -661,19 +654,16 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area,
979 + if ((uid != 0) || (gid != 0))
980 + pr_warn("Root becomes the owner of all s390dbf files in sysfs\n");
981 + BUG_ON(!initialized);
982 +- mutex_lock(&debug_mutex);
983 +
984 + /* create new debug_info */
985 + rc = debug_info_create(name, pages_per_area, nr_areas, buf_size, mode);
986 +- if (!rc)
987 +- goto out;
988 +- debug_register_view(rc, &debug_level_view);
989 +- debug_register_view(rc, &debug_flush_view);
990 +- debug_register_view(rc, &debug_pages_view);
991 +-out:
992 +- if (!rc)
993 ++ if (rc) {
994 ++ mutex_lock(&debug_mutex);
995 ++ _debug_register(rc);
996 ++ mutex_unlock(&debug_mutex);
997 ++ } else {
998 + pr_err("Registering debug feature %s failed\n", name);
999 +- mutex_unlock(&debug_mutex);
1000 ++ }
1001 + return rc;
1002 + }
1003 + EXPORT_SYMBOL(debug_register_mode);
1004 +@@ -702,6 +692,27 @@ debug_info_t *debug_register(const char *name, int pages_per_area,
1005 + }
1006 + EXPORT_SYMBOL(debug_register);
1007 +
1008 ++/* Remove debugfs entries and remove from internal list. */
1009 ++static void _debug_unregister(debug_info_t *id)
1010 ++{
1011 ++ int i;
1012 ++
1013 ++ for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
1014 ++ if (!id->views[i])
1015 ++ continue;
1016 ++ debugfs_remove(id->debugfs_entries[i]);
1017 ++ }
1018 ++ debugfs_remove(id->debugfs_root_entry);
1019 ++ if (id == debug_area_first)
1020 ++ debug_area_first = id->next;
1021 ++ if (id == debug_area_last)
1022 ++ debug_area_last = id->prev;
1023 ++ if (id->prev)
1024 ++ id->prev->next = id->next;
1025 ++ if (id->next)
1026 ++ id->next->prev = id->prev;
1027 ++}
1028 ++
1029 + /**
1030 + * debug_unregister() - give back debug area.
1031 + *
1032 +@@ -715,8 +726,10 @@ void debug_unregister(debug_info_t *id)
1033 + if (!id)
1034 + return;
1035 + mutex_lock(&debug_mutex);
1036 +- debug_info_put(id);
1037 ++ _debug_unregister(id);
1038 + mutex_unlock(&debug_mutex);
1039 ++
1040 ++ debug_info_put(id);
1041 + }
1042 + EXPORT_SYMBOL(debug_unregister);
1043 +
1044 +@@ -726,35 +739,28 @@ EXPORT_SYMBOL(debug_unregister);
1045 + */
1046 + static int debug_set_size(debug_info_t *id, int nr_areas, int pages_per_area)
1047 + {
1048 +- debug_entry_t ***new_areas;
1049 ++ debug_info_t *new_id;
1050 + unsigned long flags;
1051 +- int rc = 0;
1052 +
1053 + if (!id || (nr_areas <= 0) || (pages_per_area < 0))
1054 + return -EINVAL;
1055 +- if (pages_per_area > 0) {
1056 +- new_areas = debug_areas_alloc(pages_per_area, nr_areas);
1057 +- if (!new_areas) {
1058 +- pr_info("Allocating memory for %i pages failed\n",
1059 +- pages_per_area);
1060 +- rc = -ENOMEM;
1061 +- goto out;
1062 +- }
1063 +- } else {
1064 +- new_areas = NULL;
1065 ++
1066 ++ new_id = debug_info_alloc("", pages_per_area, nr_areas, id->buf_size,
1067 ++ id->level, ALL_AREAS);
1068 ++ if (!new_id) {
1069 ++ pr_info("Allocating memory for %i pages failed\n",
1070 ++ pages_per_area);
1071 ++ return -ENOMEM;
1072 + }
1073 ++
1074 + spin_lock_irqsave(&id->lock, flags);
1075 +- debug_areas_free(id);
1076 +- id->areas = new_areas;
1077 +- id->nr_areas = nr_areas;
1078 +- id->pages_per_area = pages_per_area;
1079 +- id->active_area = 0;
1080 +- memset(id->active_entries, 0, sizeof(int)*id->nr_areas);
1081 +- memset(id->active_pages, 0, sizeof(int)*id->nr_areas);
1082 ++ debug_events_append(new_id, id);
1083 ++ debug_areas_swap(new_id, id);
1084 ++ debug_info_free(new_id);
1085 + spin_unlock_irqrestore(&id->lock, flags);
1086 + pr_info("%s: set new size (%i pages)\n", id->name, pages_per_area);
1087 +-out:
1088 +- return rc;
1089 ++
1090 ++ return 0;
1091 + }
1092 +
1093 + /**
1094 +@@ -821,6 +827,42 @@ static inline debug_entry_t *get_active_entry(debug_info_t *id)
1095 + id->active_entries[id->active_area]);
1096 + }
1097 +
1098 ++/* Swap debug areas of a and b. */
1099 ++static void debug_areas_swap(debug_info_t *a, debug_info_t *b)
1100 ++{
1101 ++ swap(a->nr_areas, b->nr_areas);
1102 ++ swap(a->pages_per_area, b->pages_per_area);
1103 ++ swap(a->areas, b->areas);
1104 ++ swap(a->active_area, b->active_area);
1105 ++ swap(a->active_pages, b->active_pages);
1106 ++ swap(a->active_entries, b->active_entries);
1107 ++}
1108 ++
1109 ++/* Append all debug events in active area from source to destination log. */
1110 ++static void debug_events_append(debug_info_t *dest, debug_info_t *src)
1111 ++{
1112 ++ debug_entry_t *from, *to, *last;
1113 ++
1114 ++ if (!src->areas || !dest->areas)
1115 ++ return;
1116 ++
1117 ++ /* Loop over all entries in src, starting with oldest. */
1118 ++ from = get_active_entry(src);
1119 ++ last = from;
1120 ++ do {
1121 ++ if (from->clock != 0LL) {
1122 ++ to = get_active_entry(dest);
1123 ++ memset(to, 0, dest->entry_size);
1124 ++ memcpy(to, from, min(src->entry_size,
1125 ++ dest->entry_size));
1126 ++ proceed_active_entry(dest);
1127 ++ }
1128 ++
1129 ++ proceed_active_entry(src);
1130 ++ from = get_active_entry(src);
1131 ++ } while (from != last);
1132 ++}
1133 ++
1134 + /*
1135 + * debug_finish_entry:
1136 + * - set timestamp, caller address, cpu number etc.
1137 +diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
1138 +index 5a2f70cbd3a9d..b9716a7e326d0 100644
1139 +--- a/arch/s390/kernel/entry.S
1140 ++++ b/arch/s390/kernel/entry.S
1141 +@@ -624,12 +624,15 @@ ENTRY(mcck_int_handler)
1142 + 4: j 4b
1143 + ENDPROC(mcck_int_handler)
1144 +
1145 +-#
1146 +-# PSW restart interrupt handler
1147 +-#
1148 + ENTRY(restart_int_handler)
1149 + ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
1150 + stg %r15,__LC_SAVE_AREA_RESTART
1151 ++ TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
1152 ++ jz 0f
1153 ++ la %r15,4095
1154 ++ lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r15)
1155 ++0: larl %r15,.Lstosm_tmp
1156 ++ stosm 0(%r15),0x04 # turn dat on, keep irqs off
1157 + lg %r15,__LC_RESTART_STACK
1158 + xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
1159 + stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
1160 +@@ -638,7 +641,7 @@ ENTRY(restart_int_handler)
1161 + xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
1162 + lg %r1,__LC_RESTART_FN # load fn, parm & source cpu
1163 + lg %r2,__LC_RESTART_DATA
1164 +- lg %r3,__LC_RESTART_SOURCE
1165 ++ lgf %r3,__LC_RESTART_SOURCE
1166 + ltgr %r3,%r3 # test source cpu address
1167 + jm 1f # negative -> skip source stop
1168 + 0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu
1169 +diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
1170 +index 50e2c21e0ec94..911cd39123514 100644
1171 +--- a/arch/s390/kernel/ipl.c
1172 ++++ b/arch/s390/kernel/ipl.c
1173 +@@ -179,8 +179,6 @@ static inline int __diag308(unsigned long subcode, void *addr)
1174 +
1175 + int diag308(unsigned long subcode, void *addr)
1176 + {
1177 +- if (IS_ENABLED(CONFIG_KASAN))
1178 +- __arch_local_irq_stosm(0x04); /* enable DAT */
1179 + diag_stat_inc(DIAG_STAT_X308);
1180 + return __diag308(subcode, addr);
1181 + }
1182 +@@ -1843,7 +1841,6 @@ static struct kobj_attribute on_restart_attr = __ATTR_RW(on_restart);
1183 +
1184 + static void __do_restart(void *ignore)
1185 + {
1186 +- __arch_local_irq_stosm(0x04); /* enable DAT */
1187 + smp_send_stop();
1188 + #ifdef CONFIG_CRASH_DUMP
1189 + crash_kexec(NULL);
1190 +diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
1191 +index 1005a6935fbe3..c1fbc979e0e8b 100644
1192 +--- a/arch/s390/kernel/machine_kexec.c
1193 ++++ b/arch/s390/kernel/machine_kexec.c
1194 +@@ -263,7 +263,6 @@ static void __do_machine_kexec(void *data)
1195 + */
1196 + static void __machine_kexec(void *data)
1197 + {
1198 +- __arch_local_irq_stosm(0x04); /* enable DAT */
1199 + pfault_fini();
1200 + tracing_off();
1201 + debug_locks_off();
1202 +diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
1203 +index ff0f9e8389162..ee23908f1b960 100644
1204 +--- a/arch/s390/kernel/setup.c
1205 ++++ b/arch/s390/kernel/setup.c
1206 +@@ -421,7 +421,7 @@ static void __init setup_lowcore_dat_off(void)
1207 + lc->restart_stack = (unsigned long) restart_stack;
1208 + lc->restart_fn = (unsigned long) do_restart;
1209 + lc->restart_data = 0;
1210 +- lc->restart_source = -1UL;
1211 ++ lc->restart_source = -1U;
1212 +
1213 + mcck_stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
1214 + if (!mcck_stack)
1215 +@@ -450,12 +450,19 @@ static void __init setup_lowcore_dat_off(void)
1216 +
1217 + static void __init setup_lowcore_dat_on(void)
1218 + {
1219 ++ struct lowcore *lc = lowcore_ptr[0];
1220 ++
1221 + __ctl_clear_bit(0, 28);
1222 + S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
1223 + S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT;
1224 + S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT;
1225 + S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
1226 ++ __ctl_store(S390_lowcore.cregs_save_area, 0, 15);
1227 + __ctl_set_bit(0, 28);
1228 ++ mem_assign_absolute(S390_lowcore.restart_flags, RESTART_FLAG_CTLREGS);
1229 ++ mem_assign_absolute(S390_lowcore.program_new_psw, lc->program_new_psw);
1230 ++ memcpy_absolute(&S390_lowcore.cregs_save_area, lc->cregs_save_area,
1231 ++ sizeof(S390_lowcore.cregs_save_area));
1232 + }
1233 +
1234 + static struct resource code_resource = {
1235 +diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
1236 +index 8984711f72ede..8e8ace899407c 100644
1237 +--- a/arch/s390/kernel/smp.c
1238 ++++ b/arch/s390/kernel/smp.c
1239 +@@ -252,6 +252,7 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
1240 + cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
1241 + cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
1242 + lc->cpu_nr = cpu;
1243 ++ lc->restart_flags = RESTART_FLAG_CTLREGS;
1244 + lc->spinlock_lockval = arch_spin_lockval(cpu);
1245 + lc->spinlock_index = 0;
1246 + lc->percpu_offset = __per_cpu_offset[cpu];
1247 +@@ -297,7 +298,7 @@ static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
1248 + lc->restart_stack = lc->nodat_stack;
1249 + lc->restart_fn = (unsigned long) func;
1250 + lc->restart_data = (unsigned long) data;
1251 +- lc->restart_source = -1UL;
1252 ++ lc->restart_source = -1U;
1253 + pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
1254 + }
1255 +
1256 +@@ -311,12 +312,12 @@ static void __pcpu_delegate(pcpu_delegate_fn *func, void *data)
1257 + func(data); /* should not return */
1258 + }
1259 +
1260 +-static void __no_sanitize_address pcpu_delegate(struct pcpu *pcpu,
1261 +- pcpu_delegate_fn *func,
1262 +- void *data, unsigned long stack)
1263 ++static void pcpu_delegate(struct pcpu *pcpu,
1264 ++ pcpu_delegate_fn *func,
1265 ++ void *data, unsigned long stack)
1266 + {
1267 + struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
1268 +- unsigned long source_cpu = stap();
1269 ++ unsigned int source_cpu = stap();
1270 +
1271 + __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
1272 + if (pcpu->address == source_cpu) {
1273 +@@ -569,6 +570,9 @@ static void smp_ctl_bit_callback(void *info)
1274 + __ctl_load(cregs, 0, 15);
1275 + }
1276 +
1277 ++static DEFINE_SPINLOCK(ctl_lock);
1278 ++static unsigned long ctlreg;
1279 ++
1280 + /*
1281 + * Set a bit in a control register of all cpus
1282 + */
1283 +@@ -576,6 +580,11 @@ void smp_ctl_set_bit(int cr, int bit)
1284 + {
1285 + struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
1286 +
1287 ++ spin_lock(&ctl_lock);
1288 ++ memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg));
1289 ++ __set_bit(bit, &ctlreg);
1290 ++ memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg));
1291 ++ spin_unlock(&ctl_lock);
1292 + on_each_cpu(smp_ctl_bit_callback, &parms, 1);
1293 + }
1294 + EXPORT_SYMBOL(smp_ctl_set_bit);
1295 +@@ -587,6 +596,11 @@ void smp_ctl_clear_bit(int cr, int bit)
1296 + {
1297 + struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
1298 +
1299 ++ spin_lock(&ctl_lock);
1300 ++ memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg));
1301 ++ __clear_bit(bit, &ctlreg);
1302 ++ memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg));
1303 ++ spin_unlock(&ctl_lock);
1304 + on_each_cpu(smp_ctl_bit_callback, &parms, 1);
1305 + }
1306 + EXPORT_SYMBOL(smp_ctl_clear_bit);
1307 +@@ -895,14 +909,13 @@ static void smp_init_secondary(void)
1308 + /*
1309 + * Activate a secondary processor.
1310 + */
1311 +-static void __no_sanitize_address smp_start_secondary(void *cpuvoid)
1312 ++static void smp_start_secondary(void *cpuvoid)
1313 + {
1314 + S390_lowcore.restart_stack = (unsigned long) restart_stack;
1315 + S390_lowcore.restart_fn = (unsigned long) do_restart;
1316 + S390_lowcore.restart_data = 0;
1317 +- S390_lowcore.restart_source = -1UL;
1318 +- __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
1319 +- __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
1320 ++ S390_lowcore.restart_source = -1U;
1321 ++ S390_lowcore.restart_flags = 0;
1322 + call_on_stack_noreturn(smp_init_secondary, S390_lowcore.kernel_stack);
1323 + }
1324 +
1325 +diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
1326 +index d548d60caed25..16256e17a544a 100644
1327 +--- a/arch/s390/kvm/interrupt.c
1328 ++++ b/arch/s390/kvm/interrupt.c
1329 +@@ -419,13 +419,13 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
1330 + static void __set_cpu_idle(struct kvm_vcpu *vcpu)
1331 + {
1332 + kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
1333 +- set_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
1334 ++ set_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
1335 + }
1336 +
1337 + static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
1338 + {
1339 + kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
1340 +- clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
1341 ++ clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
1342 + }
1343 +
1344 + static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
1345 +@@ -3050,18 +3050,18 @@ int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
1346 +
1347 + static void __airqs_kick_single_vcpu(struct kvm *kvm, u8 deliverable_mask)
1348 + {
1349 +- int vcpu_id, online_vcpus = atomic_read(&kvm->online_vcpus);
1350 ++ int vcpu_idx, online_vcpus = atomic_read(&kvm->online_vcpus);
1351 + struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
1352 + struct kvm_vcpu *vcpu;
1353 +
1354 +- for_each_set_bit(vcpu_id, kvm->arch.idle_mask, online_vcpus) {
1355 +- vcpu = kvm_get_vcpu(kvm, vcpu_id);
1356 ++ for_each_set_bit(vcpu_idx, kvm->arch.idle_mask, online_vcpus) {
1357 ++ vcpu = kvm_get_vcpu(kvm, vcpu_idx);
1358 + if (psw_ioint_disabled(vcpu))
1359 + continue;
1360 + deliverable_mask &= (u8)(vcpu->arch.sie_block->gcr[6] >> 24);
1361 + if (deliverable_mask) {
1362 + /* lately kicked but not yet running */
1363 +- if (test_and_set_bit(vcpu_id, gi->kicked_mask))
1364 ++ if (test_and_set_bit(vcpu_idx, gi->kicked_mask))
1365 + return;
1366 + kvm_s390_vcpu_wakeup(vcpu);
1367 + return;
1368 +diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
1369 +index 4527ac7b5961d..8580543c5bc33 100644
1370 +--- a/arch/s390/kvm/kvm-s390.c
1371 ++++ b/arch/s390/kvm/kvm-s390.c
1372 +@@ -4044,7 +4044,7 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
1373 + kvm_s390_patch_guest_per_regs(vcpu);
1374 + }
1375 +
1376 +- clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
1377 ++ clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.gisa_int.kicked_mask);
1378 +
1379 + vcpu->arch.sie_block->icptcode = 0;
1380 + cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1381 +diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
1382 +index 9fad25109b0dd..ecd741ee3276e 100644
1383 +--- a/arch/s390/kvm/kvm-s390.h
1384 ++++ b/arch/s390/kvm/kvm-s390.h
1385 +@@ -79,7 +79,7 @@ static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
1386 +
1387 + static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
1388 + {
1389 +- return test_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
1390 ++ return test_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
1391 + }
1392 +
1393 + static inline int kvm_is_ucontrol(struct kvm *kvm)
1394 +diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c
1395 +index a0fdc6dc5f9d0..cc3af046c14e5 100644
1396 +--- a/arch/s390/mm/kasan_init.c
1397 ++++ b/arch/s390/mm/kasan_init.c
1398 +@@ -107,6 +107,9 @@ static void __init kasan_early_pgtable_populate(unsigned long address,
1399 + sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
1400 + }
1401 +
1402 ++ /*
1403 ++ * The first 1MB of 1:1 mapping is mapped with 4KB pages
1404 ++ */
1405 + while (address < end) {
1406 + pg_dir = pgd_offset_k(address);
1407 + if (pgd_none(*pg_dir)) {
1408 +@@ -157,30 +160,26 @@ static void __init kasan_early_pgtable_populate(unsigned long address,
1409 +
1410 + pm_dir = pmd_offset(pu_dir, address);
1411 + if (pmd_none(*pm_dir)) {
1412 +- if (mode == POPULATE_ZERO_SHADOW &&
1413 +- IS_ALIGNED(address, PMD_SIZE) &&
1414 ++ if (IS_ALIGNED(address, PMD_SIZE) &&
1415 + end - address >= PMD_SIZE) {
1416 +- pmd_populate(&init_mm, pm_dir,
1417 +- kasan_early_shadow_pte);
1418 +- address = (address + PMD_SIZE) & PMD_MASK;
1419 +- continue;
1420 +- }
1421 +- /* the first megabyte of 1:1 is mapped with 4k pages */
1422 +- if (has_edat && address && end - address >= PMD_SIZE &&
1423 +- mode != POPULATE_ZERO_SHADOW) {
1424 +- void *page;
1425 +-
1426 +- if (mode == POPULATE_ONE2ONE) {
1427 +- page = (void *)address;
1428 +- } else {
1429 +- page = kasan_early_alloc_segment();
1430 +- memset(page, 0, _SEGMENT_SIZE);
1431 ++ if (mode == POPULATE_ZERO_SHADOW) {
1432 ++ pmd_populate(&init_mm, pm_dir, kasan_early_shadow_pte);
1433 ++ address = (address + PMD_SIZE) & PMD_MASK;
1434 ++ continue;
1435 ++ } else if (has_edat && address) {
1436 ++ void *page;
1437 ++
1438 ++ if (mode == POPULATE_ONE2ONE) {
1439 ++ page = (void *)address;
1440 ++ } else {
1441 ++ page = kasan_early_alloc_segment();
1442 ++ memset(page, 0, _SEGMENT_SIZE);
1443 ++ }
1444 ++ pmd_val(*pm_dir) = __pa(page) | sgt_prot;
1445 ++ address = (address + PMD_SIZE) & PMD_MASK;
1446 ++ continue;
1447 + }
1448 +- pmd_val(*pm_dir) = __pa(page) | sgt_prot;
1449 +- address = (address + PMD_SIZE) & PMD_MASK;
1450 +- continue;
1451 + }
1452 +-
1453 + pt_dir = kasan_early_pte_alloc();
1454 + pmd_populate(&init_mm, pm_dir, pt_dir);
1455 + } else if (pmd_large(*pm_dir)) {
1456 +diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
1457 +index 8fcb7ecb7225a..77cd965cffefa 100644
1458 +--- a/arch/s390/pci/pci.c
1459 ++++ b/arch/s390/pci/pci.c
1460 +@@ -661,9 +661,10 @@ int zpci_enable_device(struct zpci_dev *zdev)
1461 + {
1462 + int rc;
1463 +
1464 +- rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
1465 +- if (rc)
1466 ++ if (clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES)) {
1467 ++ rc = -EIO;
1468 + goto out;
1469 ++ }
1470 +
1471 + rc = zpci_dma_init_device(zdev);
1472 + if (rc)
1473 +@@ -684,7 +685,7 @@ int zpci_disable_device(struct zpci_dev *zdev)
1474 + * The zPCI function may already be disabled by the platform, this is
1475 + * detected in clp_disable_fh() which becomes a no-op.
1476 + */
1477 +- return clp_disable_fh(zdev);
1478 ++ return clp_disable_fh(zdev) ? -EIO : 0;
1479 + }
1480 +
1481 + /**
1482 +diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
1483 +index d3331596ddbe1..0a0e8b8293bef 100644
1484 +--- a/arch/s390/pci/pci_clp.c
1485 ++++ b/arch/s390/pci/pci_clp.c
1486 +@@ -213,15 +213,19 @@ out:
1487 + }
1488 +
1489 + static int clp_refresh_fh(u32 fid);
1490 +-/*
1491 +- * Enable/Disable a given PCI function and update its function handle if
1492 +- * necessary
1493 ++/**
1494 ++ * clp_set_pci_fn() - Execute a command on a PCI function
1495 ++ * @zdev: Function that will be affected
1496 ++ * @nr_dma_as: DMA address space number
1497 ++ * @command: The command code to execute
1498 ++ *
1499 ++ * Returns: 0 on success, < 0 for Linux errors (e.g. -ENOMEM), and
1500 ++ * > 0 for non-success platform responses
1501 + */
1502 + static int clp_set_pci_fn(struct zpci_dev *zdev, u8 nr_dma_as, u8 command)
1503 + {
1504 + struct clp_req_rsp_set_pci *rrb;
1505 + int rc, retries = 100;
1506 +- u32 fid = zdev->fid;
1507 +
1508 + rrb = clp_alloc_block(GFP_KERNEL);
1509 + if (!rrb)
1510 +@@ -245,17 +249,16 @@ static int clp_set_pci_fn(struct zpci_dev *zdev, u8 nr_dma_as, u8 command)
1511 + }
1512 + } while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
1513 +
1514 +- if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
1515 +- zpci_err("Set PCI FN:\n");
1516 +- zpci_err_clp(rrb->response.hdr.rsp, rc);
1517 +- }
1518 +-
1519 + if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
1520 + zdev->fh = rrb->response.fh;
1521 +- } else if (!rc && rrb->response.hdr.rsp == CLP_RC_SETPCIFN_ALRDY &&
1522 +- rrb->response.fh == 0) {
1523 ++ } else if (!rc && rrb->response.hdr.rsp == CLP_RC_SETPCIFN_ALRDY) {
1524 + /* Function is already in desired state - update handle */
1525 +- rc = clp_refresh_fh(fid);
1526 ++ rc = clp_refresh_fh(zdev->fid);
1527 ++ } else {
1528 ++ zpci_err("Set PCI FN:\n");
1529 ++ zpci_err_clp(rrb->response.hdr.rsp, rc);
1530 ++ if (!rc)
1531 ++ rc = rrb->response.hdr.rsp;
1532 + }
1533 + clp_free_block(rrb);
1534 + return rc;
1535 +@@ -301,17 +304,13 @@ int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
1536 +
1537 + rc = clp_set_pci_fn(zdev, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
1538 + zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
1539 +- if (rc)
1540 +- goto out;
1541 +-
1542 +- if (zpci_use_mio(zdev)) {
1543 ++ if (!rc && zpci_use_mio(zdev)) {
1544 + rc = clp_set_pci_fn(zdev, nr_dma_as, CLP_SET_ENABLE_MIO);
1545 + zpci_dbg(3, "ena mio fid:%x, fh:%x, rc:%d\n",
1546 + zdev->fid, zdev->fh, rc);
1547 + if (rc)
1548 + clp_disable_fh(zdev);
1549 + }
1550 +-out:
1551 + return rc;
1552 + }
1553 +
1554 +diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
1555 +index 2144e54a6c892..388643ca2177e 100644
1556 +--- a/arch/x86/crypto/aesni-intel_glue.c
1557 ++++ b/arch/x86/crypto/aesni-intel_glue.c
1558 +@@ -849,6 +849,8 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
1559 + return -EINVAL;
1560 +
1561 + err = skcipher_walk_virt(&walk, req, false);
1562 ++ if (err)
1563 ++ return err;
1564 +
1565 + if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
1566 + int blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
1567 +@@ -862,7 +864,10 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
1568 + skcipher_request_set_crypt(&subreq, req->src, req->dst,
1569 + blocks * AES_BLOCK_SIZE, req->iv);
1570 + req = &subreq;
1571 ++
1572 + err = skcipher_walk_virt(&walk, req, false);
1573 ++ if (err)
1574 ++ return err;
1575 + } else {
1576 + tail = 0;
1577 + }
1578 +diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
1579 +index c682b09b18fa0..482a9931d1e65 100644
1580 +--- a/arch/x86/events/intel/uncore_snbep.c
1581 ++++ b/arch/x86/events/intel/uncore_snbep.c
1582 +@@ -3838,26 +3838,32 @@ clear_attr_update:
1583 + return ret;
1584 + }
1585 +
1586 +-static int skx_iio_set_mapping(struct intel_uncore_type *type)
1587 +-{
1588 +- return pmu_iio_set_mapping(type, &skx_iio_mapping_group);
1589 +-}
1590 +-
1591 +-static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
1592 ++static void
1593 ++pmu_iio_cleanup_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
1594 + {
1595 +- struct attribute **attr = skx_iio_mapping_group.attrs;
1596 ++ struct attribute **attr = ag->attrs;
1597 +
1598 + if (!attr)
1599 + return;
1600 +
1601 + for (; *attr; attr++)
1602 + kfree((*attr)->name);
1603 +- kfree(attr_to_ext_attr(*skx_iio_mapping_group.attrs));
1604 +- kfree(skx_iio_mapping_group.attrs);
1605 +- skx_iio_mapping_group.attrs = NULL;
1606 ++ kfree(attr_to_ext_attr(*ag->attrs));
1607 ++ kfree(ag->attrs);
1608 ++ ag->attrs = NULL;
1609 + kfree(type->topology);
1610 + }
1611 +
1612 ++static int skx_iio_set_mapping(struct intel_uncore_type *type)
1613 ++{
1614 ++ return pmu_iio_set_mapping(type, &skx_iio_mapping_group);
1615 ++}
1616 ++
1617 ++static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
1618 ++{
1619 ++ pmu_iio_cleanup_mapping(type, &skx_iio_mapping_group);
1620 ++}
1621 ++
1622 + static struct intel_uncore_type skx_uncore_iio = {
1623 + .name = "iio",
1624 + .num_counters = 4,
1625 +@@ -4501,6 +4507,11 @@ static int snr_iio_set_mapping(struct intel_uncore_type *type)
1626 + return pmu_iio_set_mapping(type, &snr_iio_mapping_group);
1627 + }
1628 +
1629 ++static void snr_iio_cleanup_mapping(struct intel_uncore_type *type)
1630 ++{
1631 ++ pmu_iio_cleanup_mapping(type, &snr_iio_mapping_group);
1632 ++}
1633 ++
1634 + static struct intel_uncore_type snr_uncore_iio = {
1635 + .name = "iio",
1636 + .num_counters = 4,
1637 +@@ -4517,7 +4528,7 @@ static struct intel_uncore_type snr_uncore_iio = {
1638 + .attr_update = snr_iio_attr_update,
1639 + .get_topology = snr_iio_get_topology,
1640 + .set_mapping = snr_iio_set_mapping,
1641 +- .cleanup_mapping = skx_iio_cleanup_mapping,
1642 ++ .cleanup_mapping = snr_iio_cleanup_mapping,
1643 + };
1644 +
1645 + static struct intel_uncore_type snr_uncore_irp = {
1646 +@@ -5092,6 +5103,11 @@ static int icx_iio_set_mapping(struct intel_uncore_type *type)
1647 + return pmu_iio_set_mapping(type, &icx_iio_mapping_group);
1648 + }
1649 +
1650 ++static void icx_iio_cleanup_mapping(struct intel_uncore_type *type)
1651 ++{
1652 ++ pmu_iio_cleanup_mapping(type, &icx_iio_mapping_group);
1653 ++}
1654 ++
1655 + static struct intel_uncore_type icx_uncore_iio = {
1656 + .name = "iio",
1657 + .num_counters = 4,
1658 +@@ -5109,7 +5125,7 @@ static struct intel_uncore_type icx_uncore_iio = {
1659 + .attr_update = icx_iio_attr_update,
1660 + .get_topology = icx_iio_get_topology,
1661 + .set_mapping = icx_iio_set_mapping,
1662 +- .cleanup_mapping = skx_iio_cleanup_mapping,
1663 ++ .cleanup_mapping = icx_iio_cleanup_mapping,
1664 + };
1665 +
1666 + static struct intel_uncore_type icx_uncore_irp = {
1667 +diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
1668 +index 0607ec4f50914..da9321548f6f1 100644
1669 +--- a/arch/x86/include/asm/mce.h
1670 ++++ b/arch/x86/include/asm/mce.h
1671 +@@ -265,6 +265,7 @@ enum mcp_flags {
1672 + MCP_TIMESTAMP = BIT(0), /* log time stamp */
1673 + MCP_UC = BIT(1), /* log uncorrected errors */
1674 + MCP_DONTLOG = BIT(2), /* only clear, don't log */
1675 ++ MCP_QUEUE_LOG = BIT(3), /* only queue to genpool */
1676 + };
1677 + bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
1678 +
1679 +diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
1680 +index 22791aadc085c..8cb7816d03b4c 100644
1681 +--- a/arch/x86/kernel/cpu/mce/core.c
1682 ++++ b/arch/x86/kernel/cpu/mce/core.c
1683 +@@ -817,7 +817,10 @@ log_it:
1684 + if (mca_cfg.dont_log_ce && !mce_usable_address(&m))
1685 + goto clear_it;
1686 +
1687 +- mce_log(&m);
1688 ++ if (flags & MCP_QUEUE_LOG)
1689 ++ mce_gen_pool_add(&m);
1690 ++ else
1691 ++ mce_log(&m);
1692 +
1693 + clear_it:
1694 + /*
1695 +@@ -1639,10 +1642,12 @@ static void __mcheck_cpu_init_generic(void)
1696 + m_fl = MCP_DONTLOG;
1697 +
1698 + /*
1699 +- * Log the machine checks left over from the previous reset.
1700 ++ * Log the machine checks left over from the previous reset. Log them
1701 ++ * only, do not start processing them. That will happen in mcheck_late_init()
1702 ++ * when all consumers have been registered on the notifier chain.
1703 + */
1704 + bitmap_fill(all_banks, MAX_NR_BANKS);
1705 +- machine_check_poll(MCP_UC | m_fl, &all_banks);
1706 ++ machine_check_poll(MCP_UC | MCP_QUEUE_LOG | m_fl, &all_banks);
1707 +
1708 + cr4_set_bits(X86_CR4_MCE);
1709 +
1710 +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
1711 +index 47b7652702397..c268fb59f7794 100644
1712 +--- a/arch/x86/kvm/mmu/mmu.c
1713 ++++ b/arch/x86/kvm/mmu/mmu.c
1714 +@@ -323,12 +323,6 @@ static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
1715 + static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
1716 + struct x86_exception *exception)
1717 + {
1718 +- /* Check if guest physical address doesn't exceed guest maximum */
1719 +- if (kvm_vcpu_is_illegal_gpa(vcpu, gpa)) {
1720 +- exception->error_code |= PFERR_RSVD_MASK;
1721 +- return UNMAPPED_GVA;
1722 +- }
1723 +-
1724 + return gpa;
1725 + }
1726 +
1727 +@@ -2852,6 +2846,7 @@ int kvm_mmu_max_mapping_level(struct kvm *kvm,
1728 + kvm_pfn_t pfn, int max_level)
1729 + {
1730 + struct kvm_lpage_info *linfo;
1731 ++ int host_level;
1732 +
1733 + max_level = min(max_level, max_huge_page_level);
1734 + for ( ; max_level > PG_LEVEL_4K; max_level--) {
1735 +@@ -2863,7 +2858,8 @@ int kvm_mmu_max_mapping_level(struct kvm *kvm,
1736 + if (max_level == PG_LEVEL_4K)
1737 + return PG_LEVEL_4K;
1738 +
1739 +- return host_pfn_mapping_level(kvm, gfn, pfn, slot);
1740 ++ host_level = host_pfn_mapping_level(kvm, gfn, pfn, slot);
1741 ++ return min(host_level, max_level);
1742 + }
1743 +
1744 + int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
1745 +@@ -2887,17 +2883,12 @@ int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
1746 + if (!slot)
1747 + return PG_LEVEL_4K;
1748 +
1749 +- level = kvm_mmu_max_mapping_level(vcpu->kvm, slot, gfn, pfn, max_level);
1750 +- if (level == PG_LEVEL_4K)
1751 +- return level;
1752 +-
1753 +- *req_level = level = min(level, max_level);
1754 +-
1755 + /*
1756 + * Enforce the iTLB multihit workaround after capturing the requested
1757 + * level, which will be used to do precise, accurate accounting.
1758 + */
1759 +- if (huge_page_disallowed)
1760 ++ *req_level = level = kvm_mmu_max_mapping_level(vcpu->kvm, slot, gfn, pfn, max_level);
1761 ++ if (level == PG_LEVEL_4K || huge_page_disallowed)
1762 + return PG_LEVEL_4K;
1763 +
1764 + /*
1765 +diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
1766 +index d80cb122b5f38..0a1fa42d03aa6 100644
1767 +--- a/arch/x86/kvm/mmu/tdp_mmu.c
1768 ++++ b/arch/x86/kvm/mmu/tdp_mmu.c
1769 +@@ -412,6 +412,7 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
1770 + bool was_leaf = was_present && is_last_spte(old_spte, level);
1771 + bool is_leaf = is_present && is_last_spte(new_spte, level);
1772 + bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
1773 ++ bool was_large, is_large;
1774 +
1775 + WARN_ON(level > PT64_ROOT_MAX_LEVEL);
1776 + WARN_ON(level < PG_LEVEL_4K);
1777 +@@ -445,13 +446,6 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
1778 +
1779 + trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
1780 +
1781 +- if (is_large_pte(old_spte) != is_large_pte(new_spte)) {
1782 +- if (is_large_pte(old_spte))
1783 +- atomic64_sub(1, (atomic64_t*)&kvm->stat.lpages);
1784 +- else
1785 +- atomic64_add(1, (atomic64_t*)&kvm->stat.lpages);
1786 +- }
1787 +-
1788 + /*
1789 + * The only times a SPTE should be changed from a non-present to
1790 + * non-present state is when an MMIO entry is installed/modified/
1791 +@@ -477,6 +471,18 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
1792 + return;
1793 + }
1794 +
1795 ++ /*
1796 ++ * Update large page stats if a large page is being zapped, created, or
1797 ++ * is replacing an existing shadow page.
1798 ++ */
1799 ++ was_large = was_leaf && is_large_pte(old_spte);
1800 ++ is_large = is_leaf && is_large_pte(new_spte);
1801 ++ if (was_large != is_large) {
1802 ++ if (was_large)
1803 ++ atomic64_sub(1, (atomic64_t *)&kvm->stat.lpages);
1804 ++ else
1805 ++ atomic64_add(1, (atomic64_t *)&kvm->stat.lpages);
1806 ++ }
1807 +
1808 + if (was_leaf && is_dirty_spte(old_spte) &&
1809 + (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
1810 +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
1811 +index b3f77d18eb5aa..ac1803dac4357 100644
1812 +--- a/arch/x86/kvm/vmx/nested.c
1813 ++++ b/arch/x86/kvm/vmx/nested.c
1814 +@@ -2223,12 +2223,11 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
1815 + ~PIN_BASED_VMX_PREEMPTION_TIMER);
1816 +
1817 + /* Posted interrupts setting is only taken from vmcs12. */
1818 +- if (nested_cpu_has_posted_intr(vmcs12)) {
1819 ++ vmx->nested.pi_pending = false;
1820 ++ if (nested_cpu_has_posted_intr(vmcs12))
1821 + vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
1822 +- vmx->nested.pi_pending = false;
1823 +- } else {
1824 ++ else
1825 + exec_control &= ~PIN_BASED_POSTED_INTR;
1826 +- }
1827 + pin_controls_set(vmx, exec_control);
1828 +
1829 + /*
1830 +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
1831 +index 927a552393b96..256f8cab4b8b4 100644
1832 +--- a/arch/x86/kvm/vmx/vmx.c
1833 ++++ b/arch/x86/kvm/vmx/vmx.c
1834 +@@ -6368,6 +6368,9 @@ static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
1835 + {
1836 + struct vcpu_vmx *vmx = to_vmx(vcpu);
1837 +
1838 ++ if (vmx->emulation_required)
1839 ++ return;
1840 ++
1841 + if (vmx->exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
1842 + handle_external_interrupt_irqoff(vcpu);
1843 + else if (vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI)
1844 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1845 +index e5d5c5ed7dd43..7ec7c2dce5065 100644
1846 +--- a/arch/x86/kvm/x86.c
1847 ++++ b/arch/x86/kvm/x86.c
1848 +@@ -3316,6 +3316,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1849 + if (!msr_info->host_initiated) {
1850 + s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
1851 + adjust_tsc_offset_guest(vcpu, adj);
1852 ++ /* Before back to guest, tsc_timestamp must be adjusted
1853 ++ * as well, otherwise guest's percpu pvclock time could jump.
1854 ++ */
1855 ++ kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
1856 + }
1857 + vcpu->arch.ia32_tsc_adjust_msr = data;
1858 + }
1859 +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
1860 +index 7279559185630..673a634eadd9f 100644
1861 +--- a/block/bfq-iosched.c
1862 ++++ b/block/bfq-iosched.c
1863 +@@ -2361,6 +2361,9 @@ static int bfq_request_merge(struct request_queue *q, struct request **req,
1864 + __rq = bfq_find_rq_fmerge(bfqd, bio, q);
1865 + if (__rq && elv_bio_merge_ok(__rq, bio)) {
1866 + *req = __rq;
1867 ++
1868 ++ if (blk_discard_mergable(__rq))
1869 ++ return ELEVATOR_DISCARD_MERGE;
1870 + return ELEVATOR_FRONT_MERGE;
1871 + }
1872 +
1873 +diff --git a/block/bio.c b/block/bio.c
1874 +index 1fab762e079be..d95e3456ba0c5 100644
1875 +--- a/block/bio.c
1876 ++++ b/block/bio.c
1877 +@@ -979,6 +979,14 @@ static int bio_iov_bvec_set_append(struct bio *bio, struct iov_iter *iter)
1878 + return 0;
1879 + }
1880 +
1881 ++static void bio_put_pages(struct page **pages, size_t size, size_t off)
1882 ++{
1883 ++ size_t i, nr = DIV_ROUND_UP(size + (off & ~PAGE_MASK), PAGE_SIZE);
1884 ++
1885 ++ for (i = 0; i < nr; i++)
1886 ++ put_page(pages[i]);
1887 ++}
1888 ++
1889 + #define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
1890 +
1891 + /**
1892 +@@ -1023,8 +1031,10 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1893 + if (same_page)
1894 + put_page(page);
1895 + } else {
1896 +- if (WARN_ON_ONCE(bio_full(bio, len)))
1897 +- return -EINVAL;
1898 ++ if (WARN_ON_ONCE(bio_full(bio, len))) {
1899 ++ bio_put_pages(pages + i, left, offset);
1900 ++ return -EINVAL;
1901 ++ }
1902 + __bio_add_page(bio, page, len, offset);
1903 + }
1904 + offset = 0;
1905 +@@ -1069,6 +1079,7 @@ static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
1906 + len = min_t(size_t, PAGE_SIZE - offset, left);
1907 + if (bio_add_hw_page(q, bio, page, len, offset,
1908 + max_append_sectors, &same_page) != len) {
1909 ++ bio_put_pages(pages + i, left, offset);
1910 + ret = -EINVAL;
1911 + break;
1912 + }
1913 +diff --git a/block/blk-crypto.c b/block/blk-crypto.c
1914 +index c5bdaafffa29f..103c2e2d50d67 100644
1915 +--- a/block/blk-crypto.c
1916 ++++ b/block/blk-crypto.c
1917 +@@ -332,7 +332,7 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
1918 + if (mode->keysize == 0)
1919 + return -EINVAL;
1920 +
1921 +- if (dun_bytes == 0 || dun_bytes > BLK_CRYPTO_MAX_IV_SIZE)
1922 ++ if (dun_bytes == 0 || dun_bytes > mode->ivsize)
1923 + return -EINVAL;
1924 +
1925 + if (!is_power_of_2(data_unit_size))
1926 +diff --git a/block/blk-merge.c b/block/blk-merge.c
1927 +index a11b3b53717ef..eeba8422ae823 100644
1928 +--- a/block/blk-merge.c
1929 ++++ b/block/blk-merge.c
1930 +@@ -348,6 +348,8 @@ void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
1931 + trace_block_split(split, (*bio)->bi_iter.bi_sector);
1932 + submit_bio_noacct(*bio);
1933 + *bio = split;
1934 ++
1935 ++ blk_throtl_charge_bio_split(*bio);
1936 + }
1937 + }
1938 +
1939 +@@ -705,22 +707,6 @@ static void blk_account_io_merge_request(struct request *req)
1940 + }
1941 + }
1942 +
1943 +-/*
1944 +- * Two cases of handling DISCARD merge:
1945 +- * If max_discard_segments > 1, the driver takes every bio
1946 +- * as a range and send them to controller together. The ranges
1947 +- * needn't to be contiguous.
1948 +- * Otherwise, the bios/requests will be handled as same as
1949 +- * others which should be contiguous.
1950 +- */
1951 +-static inline bool blk_discard_mergable(struct request *req)
1952 +-{
1953 +- if (req_op(req) == REQ_OP_DISCARD &&
1954 +- queue_max_discard_segments(req->q) > 1)
1955 +- return true;
1956 +- return false;
1957 +-}
1958 +-
1959 + static enum elv_merge blk_try_req_merge(struct request *req,
1960 + struct request *next)
1961 + {
1962 +diff --git a/block/blk-throttle.c b/block/blk-throttle.c
1963 +index b1b22d863bdf8..55c49015e5333 100644
1964 +--- a/block/blk-throttle.c
1965 ++++ b/block/blk-throttle.c
1966 +@@ -178,6 +178,9 @@ struct throtl_grp {
1967 + unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
1968 + unsigned long bio_cnt_reset_time;
1969 +
1970 ++ atomic_t io_split_cnt[2];
1971 ++ atomic_t last_io_split_cnt[2];
1972 ++
1973 + struct blkg_rwstat stat_bytes;
1974 + struct blkg_rwstat stat_ios;
1975 + };
1976 +@@ -777,6 +780,8 @@ static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
1977 + tg->bytes_disp[rw] = 0;
1978 + tg->io_disp[rw] = 0;
1979 +
1980 ++ atomic_set(&tg->io_split_cnt[rw], 0);
1981 ++
1982 + /*
1983 + * Previous slice has expired. We must have trimmed it after last
1984 + * bio dispatch. That means since start of last slice, we never used
1985 +@@ -799,6 +804,9 @@ static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
1986 + tg->io_disp[rw] = 0;
1987 + tg->slice_start[rw] = jiffies;
1988 + tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
1989 ++
1990 ++ atomic_set(&tg->io_split_cnt[rw], 0);
1991 ++
1992 + throtl_log(&tg->service_queue,
1993 + "[%c] new slice start=%lu end=%lu jiffies=%lu",
1994 + rw == READ ? 'R' : 'W', tg->slice_start[rw],
1995 +@@ -1031,6 +1039,9 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
1996 + jiffies + tg->td->throtl_slice);
1997 + }
1998 +
1999 ++ if (iops_limit != UINT_MAX)
2000 ++ tg->io_disp[rw] += atomic_xchg(&tg->io_split_cnt[rw], 0);
2001 ++
2002 + if (tg_with_in_bps_limit(tg, bio, bps_limit, &bps_wait) &&
2003 + tg_with_in_iops_limit(tg, bio, iops_limit, &iops_wait)) {
2004 + if (wait)
2005 +@@ -2052,12 +2063,14 @@ static void throtl_downgrade_check(struct throtl_grp *tg)
2006 + }
2007 +
2008 + if (tg->iops[READ][LIMIT_LOW]) {
2009 ++ tg->last_io_disp[READ] += atomic_xchg(&tg->last_io_split_cnt[READ], 0);
2010 + iops = tg->last_io_disp[READ] * HZ / elapsed_time;
2011 + if (iops >= tg->iops[READ][LIMIT_LOW])
2012 + tg->last_low_overflow_time[READ] = now;
2013 + }
2014 +
2015 + if (tg->iops[WRITE][LIMIT_LOW]) {
2016 ++ tg->last_io_disp[WRITE] += atomic_xchg(&tg->last_io_split_cnt[WRITE], 0);
2017 + iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
2018 + if (iops >= tg->iops[WRITE][LIMIT_LOW])
2019 + tg->last_low_overflow_time[WRITE] = now;
2020 +@@ -2176,6 +2189,25 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td)
2021 + }
2022 + #endif
2023 +
2024 ++void blk_throtl_charge_bio_split(struct bio *bio)
2025 ++{
2026 ++ struct blkcg_gq *blkg = bio->bi_blkg;
2027 ++ struct throtl_grp *parent = blkg_to_tg(blkg);
2028 ++ struct throtl_service_queue *parent_sq;
2029 ++ bool rw = bio_data_dir(bio);
2030 ++
2031 ++ do {
2032 ++ if (!parent->has_rules[rw])
2033 ++ break;
2034 ++
2035 ++ atomic_inc(&parent->io_split_cnt[rw]);
2036 ++ atomic_inc(&parent->last_io_split_cnt[rw]);
2037 ++
2038 ++ parent_sq = parent->service_queue.parent_sq;
2039 ++ parent = sq_to_tg(parent_sq);
2040 ++ } while (parent);
2041 ++}
2042 ++
2043 + bool blk_throtl_bio(struct bio *bio)
2044 + {
2045 + struct request_queue *q = bio->bi_bdev->bd_disk->queue;
2046 +diff --git a/block/blk.h b/block/blk.h
2047 +index cb01429c162c6..f10cc9b2c27f7 100644
2048 +--- a/block/blk.h
2049 ++++ b/block/blk.h
2050 +@@ -289,11 +289,13 @@ int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
2051 + extern int blk_throtl_init(struct request_queue *q);
2052 + extern void blk_throtl_exit(struct request_queue *q);
2053 + extern void blk_throtl_register_queue(struct request_queue *q);
2054 ++extern void blk_throtl_charge_bio_split(struct bio *bio);
2055 + bool blk_throtl_bio(struct bio *bio);
2056 + #else /* CONFIG_BLK_DEV_THROTTLING */
2057 + static inline int blk_throtl_init(struct request_queue *q) { return 0; }
2058 + static inline void blk_throtl_exit(struct request_queue *q) { }
2059 + static inline void blk_throtl_register_queue(struct request_queue *q) { }
2060 ++static inline void blk_throtl_charge_bio_split(struct bio *bio) { }
2061 + static inline bool blk_throtl_bio(struct bio *bio) { return false; }
2062 + #endif /* CONFIG_BLK_DEV_THROTTLING */
2063 + #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2064 +diff --git a/block/elevator.c b/block/elevator.c
2065 +index 52ada14cfe452..a5fe2615ec0f1 100644
2066 +--- a/block/elevator.c
2067 ++++ b/block/elevator.c
2068 +@@ -336,6 +336,9 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req,
2069 + __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
2070 + if (__rq && elv_bio_merge_ok(__rq, bio)) {
2071 + *req = __rq;
2072 ++
2073 ++ if (blk_discard_mergable(__rq))
2074 ++ return ELEVATOR_DISCARD_MERGE;
2075 + return ELEVATOR_BACK_MERGE;
2076 + }
2077 +
2078 +diff --git a/block/mq-deadline.c b/block/mq-deadline.c
2079 +index 36920670dccc3..3c3693c34f061 100644
2080 +--- a/block/mq-deadline.c
2081 ++++ b/block/mq-deadline.c
2082 +@@ -629,6 +629,8 @@ static int dd_request_merge(struct request_queue *q, struct request **rq,
2083 +
2084 + if (elv_bio_merge_ok(__rq, bio)) {
2085 + *rq = __rq;
2086 ++ if (blk_discard_mergable(__rq))
2087 ++ return ELEVATOR_DISCARD_MERGE;
2088 + return ELEVATOR_FRONT_MERGE;
2089 + }
2090 + }
2091 +diff --git a/certs/Makefile b/certs/Makefile
2092 +index 359239a0ee9e3..f9344e52ecdae 100644
2093 +--- a/certs/Makefile
2094 ++++ b/certs/Makefile
2095 +@@ -57,11 +57,19 @@ endif
2096 + redirect_openssl = 2>&1
2097 + quiet_redirect_openssl = 2>&1
2098 + silent_redirect_openssl = 2>/dev/null
2099 ++openssl_available = $(shell openssl help 2>/dev/null && echo yes)
2100 +
2101 + # We do it this way rather than having a boolean option for enabling an
2102 + # external private key, because 'make randconfig' might enable such a
2103 + # boolean option and we unfortunately can't make it depend on !RANDCONFIG.
2104 + ifeq ($(CONFIG_MODULE_SIG_KEY),"certs/signing_key.pem")
2105 ++
2106 ++ifeq ($(openssl_available),yes)
2107 ++X509TEXT=$(shell openssl x509 -in "certs/signing_key.pem" -text 2>/dev/null)
2108 ++
2109 ++$(if $(findstring rsaEncryption,$(X509TEXT)),,$(shell rm -f "certs/signing_key.pem"))
2110 ++endif
2111 ++
2112 + $(obj)/signing_key.pem: $(obj)/x509.genkey
2113 + @$(kecho) "###"
2114 + @$(kecho) "### Now generating an X.509 key pair to be used for signing modules."
2115 +diff --git a/crypto/ecc.h b/crypto/ecc.h
2116 +index a006132646a43..1350e8eb6ac23 100644
2117 +--- a/crypto/ecc.h
2118 ++++ b/crypto/ecc.h
2119 +@@ -27,6 +27,7 @@
2120 + #define _CRYPTO_ECC_H
2121 +
2122 + #include <crypto/ecc_curve.h>
2123 ++#include <asm/unaligned.h>
2124 +
2125 + /* One digit is u64 qword. */
2126 + #define ECC_CURVE_NIST_P192_DIGITS 3
2127 +@@ -46,13 +47,13 @@
2128 + * @out: Output array
2129 + * @ndigits: Number of digits to copy
2130 + */
2131 +-static inline void ecc_swap_digits(const u64 *in, u64 *out, unsigned int ndigits)
2132 ++static inline void ecc_swap_digits(const void *in, u64 *out, unsigned int ndigits)
2133 + {
2134 + const __be64 *src = (__force __be64 *)in;
2135 + int i;
2136 +
2137 + for (i = 0; i < ndigits; i++)
2138 +- out[i] = be64_to_cpu(src[ndigits - 1 - i]);
2139 ++ out[i] = get_unaligned_be64(&src[ndigits - 1 - i]);
2140 + }
2141 +
2142 + /**
2143 +diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
2144 +index f8d06da78e4f3..6863e57b088d5 100644
2145 +--- a/crypto/tcrypt.c
2146 ++++ b/crypto/tcrypt.c
2147 +@@ -290,6 +290,11 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
2148 + }
2149 +
2150 + ret = crypto_aead_setauthsize(tfm, authsize);
2151 ++ if (ret) {
2152 ++ pr_err("alg: aead: Failed to setauthsize for %s: %d\n", algo,
2153 ++ ret);
2154 ++ goto out_free_tfm;
2155 ++ }
2156 +
2157 + for (i = 0; i < num_mb; ++i)
2158 + if (testmgr_alloc_buf(data[i].xbuf)) {
2159 +@@ -315,7 +320,7 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
2160 + for (i = 0; i < num_mb; ++i) {
2161 + data[i].req = aead_request_alloc(tfm, GFP_KERNEL);
2162 + if (!data[i].req) {
2163 +- pr_err("alg: skcipher: Failed to allocate request for %s\n",
2164 ++ pr_err("alg: aead: Failed to allocate request for %s\n",
2165 + algo);
2166 + while (i--)
2167 + aead_request_free(data[i].req);
2168 +@@ -567,13 +572,19 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
2169 + sgout = &sg[9];
2170 +
2171 + tfm = crypto_alloc_aead(algo, 0, 0);
2172 +-
2173 + if (IS_ERR(tfm)) {
2174 + pr_err("alg: aead: Failed to load transform for %s: %ld\n", algo,
2175 + PTR_ERR(tfm));
2176 + goto out_notfm;
2177 + }
2178 +
2179 ++ ret = crypto_aead_setauthsize(tfm, authsize);
2180 ++ if (ret) {
2181 ++ pr_err("alg: aead: Failed to setauthsize for %s: %d\n", algo,
2182 ++ ret);
2183 ++ goto out_noreq;
2184 ++ }
2185 ++
2186 + crypto_init_wait(&wait);
2187 + printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
2188 + get_driver_name(crypto_aead, tfm), e);
2189 +@@ -611,8 +622,13 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
2190 + break;
2191 + }
2192 + }
2193 ++
2194 + ret = crypto_aead_setkey(tfm, key, *keysize);
2195 +- ret = crypto_aead_setauthsize(tfm, authsize);
2196 ++ if (ret) {
2197 ++ pr_err("setkey() failed flags=%x: %d\n",
2198 ++ crypto_aead_get_flags(tfm), ret);
2199 ++ goto out;
2200 ++ }
2201 +
2202 + iv_len = crypto_aead_ivsize(tfm);
2203 + if (iv_len)
2204 +@@ -622,15 +638,8 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
2205 + printk(KERN_INFO "test %u (%d bit key, %d byte blocks): ",
2206 + i, *keysize * 8, bs);
2207 +
2208 +-
2209 + memset(tvmem[0], 0xff, PAGE_SIZE);
2210 +
2211 +- if (ret) {
2212 +- pr_err("setkey() failed flags=%x\n",
2213 +- crypto_aead_get_flags(tfm));
2214 +- goto out;
2215 +- }
2216 +-
2217 + sg_init_aead(sg, xbuf, bs + (enc ? 0 : authsize),
2218 + assoc, aad_size);
2219 +
2220 +diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
2221 +index 1f6007abcf18e..89c22bc550570 100644
2222 +--- a/drivers/acpi/prmt.c
2223 ++++ b/drivers/acpi/prmt.c
2224 +@@ -288,10 +288,18 @@ invalid_guid:
2225 +
2226 + void __init init_prmt(void)
2227 + {
2228 ++ struct acpi_table_header *tbl;
2229 + acpi_status status;
2230 +- int mc = acpi_table_parse_entries(ACPI_SIG_PRMT, sizeof(struct acpi_table_prmt) +
2231 ++ int mc;
2232 ++
2233 ++ status = acpi_get_table(ACPI_SIG_PRMT, 0, &tbl);
2234 ++ if (ACPI_FAILURE(status))
2235 ++ return;
2236 ++
2237 ++ mc = acpi_table_parse_entries(ACPI_SIG_PRMT, sizeof(struct acpi_table_prmt) +
2238 + sizeof (struct acpi_table_prmt_header),
2239 + 0, acpi_parse_prmt, 0);
2240 ++ acpi_put_table(tbl);
2241 + /*
2242 + * Return immediately if PRMT table is not present or no PRM module found.
2243 + */
2244 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
2245 +index 61c762961ca8e..44f434acfce08 100644
2246 +--- a/drivers/ata/libata-core.c
2247 ++++ b/drivers/ata/libata-core.c
2248 +@@ -5573,7 +5573,7 @@ int ata_host_start(struct ata_host *host)
2249 + have_stop = 1;
2250 + }
2251 +
2252 +- if (host->ops->host_stop)
2253 ++ if (host->ops && host->ops->host_stop)
2254 + have_stop = 1;
2255 +
2256 + if (have_stop) {
2257 +diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
2258 +index 2e5e7c9939334..8b2a0eb3f32a4 100644
2259 +--- a/drivers/auxdisplay/hd44780.c
2260 ++++ b/drivers/auxdisplay/hd44780.c
2261 +@@ -323,8 +323,8 @@ static int hd44780_remove(struct platform_device *pdev)
2262 + {
2263 + struct charlcd *lcd = platform_get_drvdata(pdev);
2264 +
2265 +- kfree(lcd->drvdata);
2266 + charlcd_unregister(lcd);
2267 ++ kfree(lcd->drvdata);
2268 +
2269 + kfree(lcd);
2270 + return 0;
2271 +diff --git a/drivers/base/dd.c b/drivers/base/dd.c
2272 +index 437cd61343b26..68ea1f949daa9 100644
2273 +--- a/drivers/base/dd.c
2274 ++++ b/drivers/base/dd.c
2275 +@@ -580,7 +580,8 @@ re_probe:
2276 + goto probe_failed;
2277 + }
2278 +
2279 +- if (driver_sysfs_add(dev)) {
2280 ++ ret = driver_sysfs_add(dev);
2281 ++ if (ret) {
2282 + pr_err("%s: driver_sysfs_add(%s) failed\n",
2283 + __func__, dev_name(dev));
2284 + goto probe_failed;
2285 +@@ -602,15 +603,18 @@ re_probe:
2286 + goto probe_failed;
2287 + }
2288 +
2289 +- if (device_add_groups(dev, drv->dev_groups)) {
2290 ++ ret = device_add_groups(dev, drv->dev_groups);
2291 ++ if (ret) {
2292 + dev_err(dev, "device_add_groups() failed\n");
2293 + goto dev_groups_failed;
2294 + }
2295 +
2296 +- if (dev_has_sync_state(dev) &&
2297 +- device_create_file(dev, &dev_attr_state_synced)) {
2298 +- dev_err(dev, "state_synced sysfs add failed\n");
2299 +- goto dev_sysfs_state_synced_failed;
2300 ++ if (dev_has_sync_state(dev)) {
2301 ++ ret = device_create_file(dev, &dev_attr_state_synced);
2302 ++ if (ret) {
2303 ++ dev_err(dev, "state_synced sysfs add failed\n");
2304 ++ goto dev_sysfs_state_synced_failed;
2305 ++ }
2306 + }
2307 +
2308 + if (test_remove) {
2309 +diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
2310 +index 68c549d712304..bdbedc6660a87 100644
2311 +--- a/drivers/base/firmware_loader/main.c
2312 ++++ b/drivers/base/firmware_loader/main.c
2313 +@@ -165,7 +165,7 @@ static inline int fw_state_wait(struct fw_priv *fw_priv)
2314 + return __fw_state_wait_common(fw_priv, MAX_SCHEDULE_TIMEOUT);
2315 + }
2316 +
2317 +-static int fw_cache_piggyback_on_request(const char *name);
2318 ++static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv);
2319 +
2320 + static struct fw_priv *__allocate_fw_priv(const char *fw_name,
2321 + struct firmware_cache *fwc,
2322 +@@ -707,10 +707,8 @@ int assign_fw(struct firmware *fw, struct device *device)
2323 + * on request firmware.
2324 + */
2325 + if (!(fw_priv->opt_flags & FW_OPT_NOCACHE) &&
2326 +- fw_priv->fwc->state == FW_LOADER_START_CACHE) {
2327 +- if (fw_cache_piggyback_on_request(fw_priv->fw_name))
2328 +- kref_get(&fw_priv->ref);
2329 +- }
2330 ++ fw_priv->fwc->state == FW_LOADER_START_CACHE)
2331 ++ fw_cache_piggyback_on_request(fw_priv);
2332 +
2333 + /* pass the pages buffer to driver at the last minute */
2334 + fw_set_page_data(fw_priv, fw);
2335 +@@ -1259,11 +1257,11 @@ static int __fw_entry_found(const char *name)
2336 + return 0;
2337 + }
2338 +
2339 +-static int fw_cache_piggyback_on_request(const char *name)
2340 ++static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv)
2341 + {
2342 +- struct firmware_cache *fwc = &fw_cache;
2343 ++ const char *name = fw_priv->fw_name;
2344 ++ struct firmware_cache *fwc = fw_priv->fwc;
2345 + struct fw_cache_entry *fce;
2346 +- int ret = 0;
2347 +
2348 + spin_lock(&fwc->name_lock);
2349 + if (__fw_entry_found(name))
2350 +@@ -1271,13 +1269,12 @@ static int fw_cache_piggyback_on_request(const char *name)
2351 +
2352 + fce = alloc_fw_cache_entry(name);
2353 + if (fce) {
2354 +- ret = 1;
2355 + list_add(&fce->list, &fwc->fw_names);
2356 ++ kref_get(&fw_priv->ref);
2357 + pr_debug("%s: fw: %s\n", __func__, name);
2358 + }
2359 + found:
2360 + spin_unlock(&fwc->name_lock);
2361 +- return ret;
2362 + }
2363 +
2364 + static void free_fw_cache_entry(struct fw_cache_entry *fce)
2365 +@@ -1508,9 +1505,8 @@ static inline void unregister_fw_pm_ops(void)
2366 + unregister_pm_notifier(&fw_cache.pm_notify);
2367 + }
2368 + #else
2369 +-static int fw_cache_piggyback_on_request(const char *name)
2370 ++static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv)
2371 + {
2372 +- return 0;
2373 + }
2374 + static inline int register_fw_pm_ops(void)
2375 + {
2376 +diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
2377 +index fe3e38dd5324f..2fc826e97591e 100644
2378 +--- a/drivers/base/regmap/regmap.c
2379 ++++ b/drivers/base/regmap/regmap.c
2380 +@@ -1667,7 +1667,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
2381 + if (ret) {
2382 + dev_err(map->dev,
2383 + "Error in caching of register: %x ret: %d\n",
2384 +- reg + i, ret);
2385 ++ reg + regmap_get_offset(map, i), ret);
2386 + return ret;
2387 + }
2388 + }
2389 +diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
2390 +index 6535614a7dc13..1df2b5801c3bc 100644
2391 +--- a/drivers/bcma/main.c
2392 ++++ b/drivers/bcma/main.c
2393 +@@ -236,6 +236,7 @@ EXPORT_SYMBOL(bcma_core_irq);
2394 +
2395 + void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
2396 + {
2397 ++ device_initialize(&core->dev);
2398 + core->dev.release = bcma_release_core_dev;
2399 + core->dev.bus = &bcma_bus_type;
2400 + dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index);
2401 +@@ -277,11 +278,10 @@ static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core)
2402 + {
2403 + int err;
2404 +
2405 +- err = device_register(&core->dev);
2406 ++ err = device_add(&core->dev);
2407 + if (err) {
2408 + bcma_err(bus, "Could not register dev for core 0x%03X\n",
2409 + core->id.id);
2410 +- put_device(&core->dev);
2411 + return;
2412 + }
2413 + core->dev_registered = true;
2414 +@@ -372,7 +372,7 @@ void bcma_unregister_cores(struct bcma_bus *bus)
2415 + /* Now noone uses internally-handled cores, we can free them */
2416 + list_for_each_entry_safe(core, tmp, &bus->cores, list) {
2417 + list_del(&core->list);
2418 +- kfree(core);
2419 ++ put_device(&core->dev);
2420 + }
2421 + }
2422 +
2423 +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
2424 +index 19f5d5a8b16a3..93708b1938e80 100644
2425 +--- a/drivers/block/nbd.c
2426 ++++ b/drivers/block/nbd.c
2427 +@@ -49,6 +49,7 @@
2428 +
2429 + static DEFINE_IDR(nbd_index_idr);
2430 + static DEFINE_MUTEX(nbd_index_mutex);
2431 ++static struct workqueue_struct *nbd_del_wq;
2432 + static int nbd_total_devices = 0;
2433 +
2434 + struct nbd_sock {
2435 +@@ -113,6 +114,7 @@ struct nbd_device {
2436 + struct mutex config_lock;
2437 + struct gendisk *disk;
2438 + struct workqueue_struct *recv_workq;
2439 ++ struct work_struct remove_work;
2440 +
2441 + struct list_head list;
2442 + struct task_struct *task_recv;
2443 +@@ -233,7 +235,7 @@ static const struct device_attribute backend_attr = {
2444 + .show = backend_show,
2445 + };
2446 +
2447 +-static void nbd_dev_remove(struct nbd_device *nbd)
2448 ++static void nbd_del_disk(struct nbd_device *nbd)
2449 + {
2450 + struct gendisk *disk = nbd->disk;
2451 +
2452 +@@ -242,24 +244,60 @@ static void nbd_dev_remove(struct nbd_device *nbd)
2453 + blk_cleanup_disk(disk);
2454 + blk_mq_free_tag_set(&nbd->tag_set);
2455 + }
2456 ++}
2457 ++
2458 ++/*
2459 ++ * Place this in the last just before the nbd is freed to
2460 ++ * make sure that the disk and the related kobject are also
2461 ++ * totally removed to avoid duplicate creation of the same
2462 ++ * one.
2463 ++ */
2464 ++static void nbd_notify_destroy_completion(struct nbd_device *nbd)
2465 ++{
2466 ++ if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) &&
2467 ++ nbd->destroy_complete)
2468 ++ complete(nbd->destroy_complete);
2469 ++}
2470 +
2471 ++static void nbd_dev_remove_work(struct work_struct *work)
2472 ++{
2473 ++ struct nbd_device *nbd =
2474 ++ container_of(work, struct nbd_device, remove_work);
2475 ++
2476 ++ nbd_del_disk(nbd);
2477 ++
2478 ++ mutex_lock(&nbd_index_mutex);
2479 + /*
2480 +- * Place this in the last just before the nbd is freed to
2481 +- * make sure that the disk and the related kobject are also
2482 +- * totally removed to avoid duplicate creation of the same
2483 +- * one.
2484 ++ * Remove from idr after del_gendisk() completes,
2485 ++ * so if the same id is reused, the following
2486 ++ * add_disk() will succeed.
2487 + */
2488 +- if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && nbd->destroy_complete)
2489 +- complete(nbd->destroy_complete);
2490 ++ idr_remove(&nbd_index_idr, nbd->index);
2491 ++
2492 ++ nbd_notify_destroy_completion(nbd);
2493 ++ mutex_unlock(&nbd_index_mutex);
2494 +
2495 + kfree(nbd);
2496 + }
2497 +
2498 ++static void nbd_dev_remove(struct nbd_device *nbd)
2499 ++{
2500 ++ /* Call del_gendisk() asynchrounously to prevent deadlock */
2501 ++ if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags)) {
2502 ++ queue_work(nbd_del_wq, &nbd->remove_work);
2503 ++ return;
2504 ++ }
2505 ++
2506 ++ nbd_del_disk(nbd);
2507 ++ idr_remove(&nbd_index_idr, nbd->index);
2508 ++ nbd_notify_destroy_completion(nbd);
2509 ++ kfree(nbd);
2510 ++}
2511 ++
2512 + static void nbd_put(struct nbd_device *nbd)
2513 + {
2514 + if (refcount_dec_and_mutex_lock(&nbd->refs,
2515 + &nbd_index_mutex)) {
2516 +- idr_remove(&nbd_index_idr, nbd->index);
2517 + nbd_dev_remove(nbd);
2518 + mutex_unlock(&nbd_index_mutex);
2519 + }
2520 +@@ -1388,6 +1426,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
2521 + unsigned int cmd, unsigned long arg)
2522 + {
2523 + struct nbd_config *config = nbd->config;
2524 ++ loff_t bytesize;
2525 +
2526 + switch (cmd) {
2527 + case NBD_DISCONNECT:
2528 +@@ -1402,8 +1441,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
2529 + case NBD_SET_SIZE:
2530 + return nbd_set_size(nbd, arg, config->blksize);
2531 + case NBD_SET_SIZE_BLOCKS:
2532 +- return nbd_set_size(nbd, arg * config->blksize,
2533 +- config->blksize);
2534 ++ if (check_mul_overflow((loff_t)arg, config->blksize, &bytesize))
2535 ++ return -EINVAL;
2536 ++ return nbd_set_size(nbd, bytesize, config->blksize);
2537 + case NBD_SET_TIMEOUT:
2538 + nbd_set_cmd_timeout(nbd, arg);
2539 + return 0;
2540 +@@ -1683,6 +1723,7 @@ static int nbd_dev_add(int index)
2541 + nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
2542 + BLK_MQ_F_BLOCKING;
2543 + nbd->tag_set.driver_data = nbd;
2544 ++ INIT_WORK(&nbd->remove_work, nbd_dev_remove_work);
2545 + nbd->destroy_complete = NULL;
2546 + nbd->backend = NULL;
2547 +
2548 +@@ -1729,7 +1770,17 @@ static int nbd_dev_add(int index)
2549 + refcount_set(&nbd->refs, 1);
2550 + INIT_LIST_HEAD(&nbd->list);
2551 + disk->major = NBD_MAJOR;
2552