Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.20 commit in: /
Date: Wed, 13 Mar 2019 22:09:28
Message-Id: 1552514943.6d334d71d619bf689f1adf37e57d1717ebb1ee66.mpagano@gentoo
1 commit: 6d334d71d619bf689f1adf37e57d1717ebb1ee66
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Mar 13 22:09:03 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Mar 13 22:09:03 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=6d334d71
7
8 proj/linux-patches: Linux patch 4.20.16
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1015_linux-4.20.16.patch | 7205 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 7209 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index dd61e24..516aaa4 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -103,6 +103,10 @@ Patch: 1014_linux-4.20.15.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.20.15
23
24 +Patch: 1015_linux-4.20.16.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.20.16
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1015_linux-4.20.16.patch b/1015_linux-4.20.16.patch
33 new file mode 100644
34 index 0000000..68b917a
35 --- /dev/null
36 +++ b/1015_linux-4.20.16.patch
37 @@ -0,0 +1,7205 @@
38 +diff --git a/Makefile b/Makefile
39 +index 25b45c24bac0..2979ad27e16a 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 20
46 +-SUBLEVEL = 15
47 ++SUBLEVEL = 16
48 + EXTRAVERSION =
49 + NAME = Shy Crocodile
50 +
51 +diff --git a/arch/arm/boot/dts/am335x-shc.dts b/arch/arm/boot/dts/am335x-shc.dts
52 +index 1d925ed2b102..8fbbad11a80c 100644
53 +--- a/arch/arm/boot/dts/am335x-shc.dts
54 ++++ b/arch/arm/boot/dts/am335x-shc.dts
55 +@@ -215,7 +215,7 @@
56 + pinctrl-names = "default";
57 + pinctrl-0 = <&mmc1_pins>;
58 + bus-width = <0x4>;
59 +- cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
60 ++ cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
61 + cd-inverted;
62 + max-frequency = <26000000>;
63 + vmmc-supply = <&vmmcsd_fixed>;
64 +diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
65 +index 27a1ee28c3bb..94efca78c42f 100644
66 +--- a/arch/arm/boot/dts/exynos3250.dtsi
67 ++++ b/arch/arm/boot/dts/exynos3250.dtsi
68 +@@ -168,6 +168,9 @@
69 + interrupt-controller;
70 + #interrupt-cells = <3>;
71 + interrupt-parent = <&gic>;
72 ++ clock-names = "clkout8";
73 ++ clocks = <&cmu CLK_FIN_PLL>;
74 ++ #clock-cells = <1>;
75 + };
76 +
77 + mipi_phy: video-phy {
78 +diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
79 +index 2caa3132f34e..fe91b6828da3 100644
80 +--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
81 ++++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
82 +@@ -49,7 +49,7 @@
83 + };
84 +
85 + emmc_pwrseq: pwrseq {
86 +- pinctrl-0 = <&sd1_cd>;
87 ++ pinctrl-0 = <&emmc_rstn>;
88 + pinctrl-names = "default";
89 + compatible = "mmc-pwrseq-emmc";
90 + reset-gpios = <&gpk1 2 GPIO_ACTIVE_LOW>;
91 +@@ -161,12 +161,6 @@
92 + cpu0-supply = <&buck2_reg>;
93 + };
94 +
95 +-/* RSTN signal for eMMC */
96 +-&sd1_cd {
97 +- samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
98 +- samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
99 +-};
100 +-
101 + &pinctrl_1 {
102 + gpio_power_key: power_key {
103 + samsung,pins = "gpx1-3";
104 +@@ -184,6 +178,11 @@
105 + samsung,pins = "gpx3-7";
106 + samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
107 + };
108 ++
109 ++ emmc_rstn: emmc-rstn {
110 ++ samsung,pins = "gpk1-2";
111 ++ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
112 ++ };
113 + };
114 +
115 + &ehci {
116 +diff --git a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
117 +index 2fac4baf1eb4..934cec60577a 100644
118 +--- a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
119 ++++ b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
120 +@@ -467,7 +467,7 @@
121 + buck8_reg: BUCK8 {
122 + regulator-name = "vdd_1.8v_ldo";
123 + regulator-min-microvolt = <800000>;
124 +- regulator-max-microvolt = <1500000>;
125 ++ regulator-max-microvolt = <2000000>;
126 + regulator-always-on;
127 + regulator-boot-on;
128 + };
129 +diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
130 +index 95a3c1cb877d..89ba48f4273b 100644
131 +--- a/arch/arm/boot/dts/imx6sx.dtsi
132 ++++ b/arch/arm/boot/dts/imx6sx.dtsi
133 +@@ -462,7 +462,7 @@
134 + };
135 +
136 + gpt: gpt@2098000 {
137 +- compatible = "fsl,imx6sx-gpt", "fsl,imx31-gpt";
138 ++ compatible = "fsl,imx6sx-gpt", "fsl,imx6dl-gpt";
139 + reg = <0x02098000 0x4000>;
140 + interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
141 + clocks = <&clks IMX6SX_CLK_GPT_BUS>,
142 +diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi
143 +index 0d9faf1a51ea..a86b89086334 100644
144 +--- a/arch/arm/boot/dts/meson.dtsi
145 ++++ b/arch/arm/boot/dts/meson.dtsi
146 +@@ -263,7 +263,7 @@
147 + compatible = "amlogic,meson6-dwmac", "snps,dwmac";
148 + reg = <0xc9410000 0x10000
149 + 0xc1108108 0x4>;
150 +- interrupts = <GIC_SPI 8 IRQ_TYPE_EDGE_RISING>;
151 ++ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
152 + interrupt-names = "macirq";
153 + status = "disabled";
154 + };
155 +diff --git a/arch/arm/boot/dts/meson8b-ec100.dts b/arch/arm/boot/dts/meson8b-ec100.dts
156 +index 0872f6e3abf5..d50fc2f60fa3 100644
157 +--- a/arch/arm/boot/dts/meson8b-ec100.dts
158 ++++ b/arch/arm/boot/dts/meson8b-ec100.dts
159 +@@ -205,8 +205,7 @@
160 + cap-sd-highspeed;
161 + disable-wp;
162 +
163 +- cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
164 +- cd-inverted;
165 ++ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
166 +
167 + vmmc-supply = <&vcc_3v3>;
168 + };
169 +diff --git a/arch/arm/boot/dts/meson8b-odroidc1.dts b/arch/arm/boot/dts/meson8b-odroidc1.dts
170 +index 58669abda259..0f0a46ddf3ff 100644
171 +--- a/arch/arm/boot/dts/meson8b-odroidc1.dts
172 ++++ b/arch/arm/boot/dts/meson8b-odroidc1.dts
173 +@@ -221,7 +221,6 @@
174 + /* Realtek RTL8211F (0x001cc916) */
175 + eth_phy: ethernet-phy@0 {
176 + reg = <0>;
177 +- eee-broken-1000t;
178 + interrupt-parent = <&gpio_intc>;
179 + /* GPIOH_3 */
180 + interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
181 +@@ -273,8 +272,7 @@
182 + cap-sd-highspeed;
183 + disable-wp;
184 +
185 +- cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
186 +- cd-inverted;
187 ++ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
188 +
189 + vmmc-supply = <&tflash_vdd>;
190 + vqmmc-supply = <&tf_io>;
191 +diff --git a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
192 +index f5853610b20b..6ac02beb5fa7 100644
193 +--- a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
194 ++++ b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
195 +@@ -206,8 +206,7 @@
196 + cap-sd-highspeed;
197 + disable-wp;
198 +
199 +- cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
200 +- cd-inverted;
201 ++ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
202 +
203 + vmmc-supply = <&vcc_3v3>;
204 + };
205 +diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
206 +index ddc7a7bb33c0..f57acf8f66b9 100644
207 +--- a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
208 ++++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
209 +@@ -105,7 +105,7 @@
210 + interrupts-extended = <
211 + &cpcap 15 0 &cpcap 14 0 &cpcap 28 0 &cpcap 19 0
212 + &cpcap 18 0 &cpcap 17 0 &cpcap 16 0 &cpcap 49 0
213 +- &cpcap 48 1
214 ++ &cpcap 48 0
215 + >;
216 + interrupt-names =
217 + "id_ground", "id_float", "se0conn", "vbusvld",
218 +diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi
219 +index d5fe55392230..68e675258906 100644
220 +--- a/arch/arm/boot/dts/omap3-gta04.dtsi
221 ++++ b/arch/arm/boot/dts/omap3-gta04.dtsi
222 +@@ -714,11 +714,7 @@
223 +
224 + vdda-supply = <&vdac>;
225 +
226 +- #address-cells = <1>;
227 +- #size-cells = <0>;
228 +-
229 + port {
230 +- reg = <0>;
231 + venc_out: endpoint {
232 + remote-endpoint = <&opa_in>;
233 + ti,channels = <1>;
234 +diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
235 +index 182a53991c90..826920e6b878 100644
236 +--- a/arch/arm/boot/dts/omap3-n900.dts
237 ++++ b/arch/arm/boot/dts/omap3-n900.dts
238 +@@ -814,7 +814,7 @@
239 + /* For debugging, it is often good idea to remove this GPIO.
240 + It means you can remove back cover (to reboot by removing
241 + battery) and still use the MMC card. */
242 +- cd-gpios = <&gpio6 0 GPIO_ACTIVE_HIGH>; /* 160 */
243 ++ cd-gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* 160 */
244 + };
245 +
246 + /* most boards use vaux3, only some old versions use vmmc2 instead */
247 +diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
248 +index 0d9b85317529..e142e6c70a59 100644
249 +--- a/arch/arm/boot/dts/omap3-n950-n9.dtsi
250 ++++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
251 +@@ -370,6 +370,19 @@
252 + compatible = "ti,omap2-onenand";
253 + reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */
254 +
255 ++ /*
256 ++ * These timings are based on CONFIG_OMAP_GPMC_DEBUG=y reported
257 ++ * bootloader set values when booted with v4.19 using both N950
258 ++ * and N9 devices (OneNAND Manufacturer: Samsung):
259 ++ *
260 ++ * gpmc cs0 before gpmc_cs_program_settings:
261 ++ * cs0 GPMC_CS_CONFIG1: 0xfd001202
262 ++ * cs0 GPMC_CS_CONFIG2: 0x00181800
263 ++ * cs0 GPMC_CS_CONFIG3: 0x00030300
264 ++ * cs0 GPMC_CS_CONFIG4: 0x18001804
265 ++ * cs0 GPMC_CS_CONFIG5: 0x03171d1d
266 ++ * cs0 GPMC_CS_CONFIG6: 0x97080000
267 ++ */
268 + gpmc,sync-read;
269 + gpmc,sync-write;
270 + gpmc,burst-length = <16>;
271 +@@ -379,26 +392,27 @@
272 + gpmc,device-width = <2>;
273 + gpmc,mux-add-data = <2>;
274 + gpmc,cs-on-ns = <0>;
275 +- gpmc,cs-rd-off-ns = <87>;
276 +- gpmc,cs-wr-off-ns = <87>;
277 ++ gpmc,cs-rd-off-ns = <122>;
278 ++ gpmc,cs-wr-off-ns = <122>;
279 + gpmc,adv-on-ns = <0>;
280 +- gpmc,adv-rd-off-ns = <10>;
281 +- gpmc,adv-wr-off-ns = <10>;
282 +- gpmc,oe-on-ns = <15>;
283 +- gpmc,oe-off-ns = <87>;
284 ++ gpmc,adv-rd-off-ns = <15>;
285 ++ gpmc,adv-wr-off-ns = <15>;
286 ++ gpmc,oe-on-ns = <20>;
287 ++ gpmc,oe-off-ns = <122>;
288 + gpmc,we-on-ns = <0>;
289 +- gpmc,we-off-ns = <87>;
290 +- gpmc,rd-cycle-ns = <112>;
291 +- gpmc,wr-cycle-ns = <112>;
292 +- gpmc,access-ns = <81>;
293 ++ gpmc,we-off-ns = <122>;
294 ++ gpmc,rd-cycle-ns = <148>;
295 ++ gpmc,wr-cycle-ns = <148>;
296 ++ gpmc,access-ns = <117>;
297 + gpmc,page-burst-access-ns = <15>;
298 + gpmc,bus-turnaround-ns = <0>;
299 + gpmc,cycle2cycle-delay-ns = <0>;
300 + gpmc,wait-monitoring-ns = <0>;
301 +- gpmc,clk-activation-ns = <5>;
302 +- gpmc,wr-data-mux-bus-ns = <30>;
303 +- gpmc,wr-access-ns = <81>;
304 +- gpmc,sync-clk-ps = <15000>;
305 ++ gpmc,clk-activation-ns = <10>;
306 ++ gpmc,wr-data-mux-bus-ns = <40>;
307 ++ gpmc,wr-access-ns = <117>;
308 ++
309 ++ gpmc,sync-clk-ps = <15000>; /* TBC; Where this value came? */
310 +
311 + /*
312 + * MTD partition table corresponding to Nokia's MeeGo 1.2
313 +diff --git a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
314 +index 5d23667dc2d2..25540b7694d5 100644
315 +--- a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
316 ++++ b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
317 +@@ -53,7 +53,7 @@
318 +
319 + aliases {
320 + serial0 = &uart0;
321 +- /* ethernet0 is the H3 emac, defined in sun8i-h3.dtsi */
322 ++ ethernet0 = &emac;
323 + ethernet1 = &sdiowifi;
324 + };
325 +
326 +diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
327 +index ed36dcab80f1..f51919974183 100644
328 +--- a/arch/arm/plat-pxa/ssp.c
329 ++++ b/arch/arm/plat-pxa/ssp.c
330 +@@ -190,8 +190,6 @@ static int pxa_ssp_remove(struct platform_device *pdev)
331 + if (ssp == NULL)
332 + return -ENODEV;
333 +
334 +- iounmap(ssp->mmio_base);
335 +-
336 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
337 + release_mem_region(res->start, resource_size(res));
338 +
339 +@@ -201,7 +199,6 @@ static int pxa_ssp_remove(struct platform_device *pdev)
340 + list_del(&ssp->node);
341 + mutex_unlock(&ssp_lock);
342 +
343 +- kfree(ssp);
344 + return 0;
345 + }
346 +
347 +diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
348 +index f4964bee6a1a..e80a792827ed 100644
349 +--- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
350 ++++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
351 +@@ -118,6 +118,7 @@
352 + reset-gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
353 + clocks = <&pmic>;
354 + clock-names = "ext_clock";
355 ++ post-power-on-delay-ms = <10>;
356 + power-off-delay-us = <10>;
357 + };
358 +
359 +@@ -300,7 +301,6 @@
360 +
361 + dwmmc_0: dwmmc0@f723d000 {
362 + cap-mmc-highspeed;
363 +- mmc-hs200-1_8v;
364 + non-removable;
365 + bus-width = <0x8>;
366 + vmmc-supply = <&ldo19>;
367 +diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
368 +index b29fe80d7288..461612a5ab5e 100644
369 +--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
370 ++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
371 +@@ -397,7 +397,7 @@
372 + };
373 +
374 + intc: interrupt-controller@9bc0000 {
375 +- compatible = "arm,gic-v3";
376 ++ compatible = "qcom,msm8996-gic-v3", "arm,gic-v3";
377 + #interrupt-cells = <3>;
378 + interrupt-controller;
379 + #redistributor-regions = <1>;
380 +diff --git a/arch/arm64/boot/dts/renesas/r8a7796.dtsi b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
381 +index 1ec6aaa520c1..09320caea54e 100644
382 +--- a/arch/arm64/boot/dts/renesas/r8a7796.dtsi
383 ++++ b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
384 +@@ -1160,6 +1160,9 @@
385 + <&cpg CPG_CORE R8A7796_CLK_S3D1>,
386 + <&scif_clk>;
387 + clock-names = "fck", "brg_int", "scif_clk";
388 ++ dmas = <&dmac1 0x13>, <&dmac1 0x12>,
389 ++ <&dmac2 0x13>, <&dmac2 0x12>;
390 ++ dma-names = "tx", "rx", "tx", "rx";
391 + power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
392 + resets = <&cpg 310>;
393 + status = "disabled";
394 +diff --git a/arch/arm64/boot/dts/renesas/r8a77965.dtsi b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
395 +index 83946ca2eba5..d59dada13722 100644
396 +--- a/arch/arm64/boot/dts/renesas/r8a77965.dtsi
397 ++++ b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
398 +@@ -1028,6 +1028,9 @@
399 + <&cpg CPG_CORE R8A77965_CLK_S3D1>,
400 + <&scif_clk>;
401 + clock-names = "fck", "brg_int", "scif_clk";
402 ++ dmas = <&dmac1 0x13>, <&dmac1 0x12>,
403 ++ <&dmac2 0x13>, <&dmac2 0x12>;
404 ++ dma-names = "tx", "rx", "tx", "rx";
405 + power-domains = <&sysc R8A77965_PD_ALWAYS_ON>;
406 + resets = <&cpg 310>;
407 + status = "disabled";
408 +diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
409 +index eb5e8bddb610..8954c8c6f547 100644
410 +--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
411 ++++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
412 +@@ -101,6 +101,7 @@
413 + sdio_pwrseq: sdio_pwrseq {
414 + compatible = "mmc-pwrseq-simple";
415 + reset-gpios = <&gpio 7 GPIO_ACTIVE_LOW>; /* WIFI_EN */
416 ++ post-power-on-delay-ms = <10>;
417 + };
418 + };
419 +
420 +diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
421 +index 2a5b338b2542..f17afb99890c 100644
422 +--- a/arch/arm64/kernel/probes/kprobes.c
423 ++++ b/arch/arm64/kernel/probes/kprobes.c
424 +@@ -478,13 +478,13 @@ bool arch_within_kprobe_blacklist(unsigned long addr)
425 + addr < (unsigned long)__entry_text_end) ||
426 + (addr >= (unsigned long)__idmap_text_start &&
427 + addr < (unsigned long)__idmap_text_end) ||
428 ++ (addr >= (unsigned long)__hyp_text_start &&
429 ++ addr < (unsigned long)__hyp_text_end) ||
430 + !!search_exception_tables(addr))
431 + return true;
432 +
433 + if (!is_kernel_in_hyp_mode()) {
434 +- if ((addr >= (unsigned long)__hyp_text_start &&
435 +- addr < (unsigned long)__hyp_text_end) ||
436 +- (addr >= (unsigned long)__hyp_idmap_text_start &&
437 ++ if ((addr >= (unsigned long)__hyp_idmap_text_start &&
438 + addr < (unsigned long)__hyp_idmap_text_end))
439 + return true;
440 + }
441 +diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts
442 +index 50cff3cbcc6d..4f7b1fa31cf5 100644
443 +--- a/arch/mips/boot/dts/ingenic/ci20.dts
444 ++++ b/arch/mips/boot/dts/ingenic/ci20.dts
445 +@@ -76,7 +76,7 @@
446 + status = "okay";
447 +
448 + pinctrl-names = "default";
449 +- pinctrl-0 = <&pins_uart2>;
450 ++ pinctrl-0 = <&pins_uart3>;
451 + };
452 +
453 + &uart4 {
454 +@@ -196,9 +196,9 @@
455 + bias-disable;
456 + };
457 +
458 +- pins_uart2: uart2 {
459 +- function = "uart2";
460 +- groups = "uart2-data", "uart2-hwflow";
461 ++ pins_uart3: uart3 {
462 ++ function = "uart3";
463 ++ groups = "uart3-data", "uart3-hwflow";
464 + bias-disable;
465 + };
466 +
467 +diff --git a/arch/mips/boot/dts/ingenic/jz4740.dtsi b/arch/mips/boot/dts/ingenic/jz4740.dtsi
468 +index 6fb16fd24035..2beb78a62b7d 100644
469 +--- a/arch/mips/boot/dts/ingenic/jz4740.dtsi
470 ++++ b/arch/mips/boot/dts/ingenic/jz4740.dtsi
471 +@@ -161,7 +161,7 @@
472 + #dma-cells = <2>;
473 +
474 + interrupt-parent = <&intc>;
475 +- interrupts = <29>;
476 ++ interrupts = <20>;
477 +
478 + clocks = <&cgu JZ4740_CLK_DMA>;
479 +
480 +diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
481 +index d4f7fd4550e1..85522c137f19 100644
482 +--- a/arch/mips/kernel/process.c
483 ++++ b/arch/mips/kernel/process.c
484 +@@ -371,7 +371,7 @@ static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
485 + static int get_frame_info(struct mips_frame_info *info)
486 + {
487 + bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
488 +- union mips_instruction insn, *ip, *ip_end;
489 ++ union mips_instruction insn, *ip;
490 + const unsigned int max_insns = 128;
491 + unsigned int last_insn_size = 0;
492 + unsigned int i;
493 +@@ -384,10 +384,9 @@ static int get_frame_info(struct mips_frame_info *info)
494 + if (!ip)
495 + goto err;
496 +
497 +- ip_end = (void *)ip + info->func_size;
498 +-
499 +- for (i = 0; i < max_insns && ip < ip_end; i++) {
500 ++ for (i = 0; i < max_insns; i++) {
501 + ip = (void *)ip + last_insn_size;
502 ++
503 + if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
504 + insn.word = ip->halfword[0] << 16;
505 + last_insn_size = 2;
506 +diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
507 +index 0531f49af5c3..ce70bceb8872 100644
508 +--- a/arch/riscv/include/asm/processor.h
509 ++++ b/arch/riscv/include/asm/processor.h
510 +@@ -22,7 +22,7 @@
511 + * This decides where the kernel will search for a free chunk of vm
512 + * space during mmap's.
513 + */
514 +-#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE >> 1)
515 ++#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
516 +
517 + #define STACK_TOP TASK_SIZE
518 + #define STACK_TOP_MAX STACK_TOP
519 +diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
520 +index 2c290e6aaa6e..6d652826b5cb 100644
521 +--- a/arch/riscv/kernel/setup.c
522 ++++ b/arch/riscv/kernel/setup.c
523 +@@ -196,7 +196,7 @@ static void __init setup_bootmem(void)
524 + BUG_ON(mem_size == 0);
525 +
526 + set_max_mapnr(PFN_DOWN(mem_size));
527 +- max_low_pfn = memblock_end_of_DRAM();
528 ++ max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
529 +
530 + #ifdef CONFIG_BLK_DEV_INITRD
531 + setup_initrd();
532 +diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
533 +index 1d9bfaff60bc..658ebf645f42 100644
534 +--- a/arch/riscv/mm/init.c
535 ++++ b/arch/riscv/mm/init.c
536 +@@ -28,7 +28,8 @@ static void __init zone_sizes_init(void)
537 + unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
538 +
539 + #ifdef CONFIG_ZONE_DMA32
540 +- max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn));
541 ++ max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
542 ++ (unsigned long) PFN_PHYS(max_low_pfn)));
543 + #endif
544 + max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
545 +
546 +diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
547 +index 64037895b085..f105ae8651c9 100644
548 +--- a/arch/x86/boot/compressed/head_64.S
549 ++++ b/arch/x86/boot/compressed/head_64.S
550 +@@ -600,6 +600,14 @@ ENTRY(trampoline_32bit_src)
551 + leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax
552 + movl %eax, %cr3
553 + 3:
554 ++ /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
555 ++ pushl %ecx
556 ++ movl $MSR_EFER, %ecx
557 ++ rdmsr
558 ++ btsl $_EFER_LME, %eax
559 ++ wrmsr
560 ++ popl %ecx
561 ++
562 + /* Enable PAE and LA57 (if required) paging modes */
563 + movl $X86_CR4_PAE, %eax
564 + cmpl $0, %edx
565 +diff --git a/arch/x86/boot/compressed/pgtable.h b/arch/x86/boot/compressed/pgtable.h
566 +index 91f75638f6e6..6ff7e81b5628 100644
567 +--- a/arch/x86/boot/compressed/pgtable.h
568 ++++ b/arch/x86/boot/compressed/pgtable.h
569 +@@ -6,7 +6,7 @@
570 + #define TRAMPOLINE_32BIT_PGTABLE_OFFSET 0
571 +
572 + #define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE
573 +-#define TRAMPOLINE_32BIT_CODE_SIZE 0x60
574 ++#define TRAMPOLINE_32BIT_CODE_SIZE 0x70
575 +
576 + #define TRAMPOLINE_32BIT_STACK_END TRAMPOLINE_32BIT_SIZE
577 +
578 +diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
579 +index b684f0294f35..e2b1447192a8 100644
580 +--- a/arch/x86/events/core.c
581 ++++ b/arch/x86/events/core.c
582 +@@ -1995,7 +1995,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu)
583 + */
584 + static void free_fake_cpuc(struct cpu_hw_events *cpuc)
585 + {
586 +- kfree(cpuc->shared_regs);
587 ++ intel_cpuc_finish(cpuc);
588 + kfree(cpuc);
589 + }
590 +
591 +@@ -2007,14 +2007,11 @@ static struct cpu_hw_events *allocate_fake_cpuc(void)
592 + cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
593 + if (!cpuc)
594 + return ERR_PTR(-ENOMEM);
595 +-
596 +- /* only needed, if we have extra_regs */
597 +- if (x86_pmu.extra_regs) {
598 +- cpuc->shared_regs = allocate_shared_regs(cpu);
599 +- if (!cpuc->shared_regs)
600 +- goto error;
601 +- }
602 + cpuc->is_fake = 1;
603 ++
604 ++ if (intel_cpuc_prepare(cpuc, cpu))
605 ++ goto error;
606 ++
607 + return cpuc;
608 + error:
609 + free_fake_cpuc(cpuc);
610 +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
611 +index ede20c44cc69..3d77c736299f 100644
612 +--- a/arch/x86/events/intel/core.c
613 ++++ b/arch/x86/events/intel/core.c
614 +@@ -1999,6 +1999,39 @@ static void intel_pmu_nhm_enable_all(int added)
615 + intel_pmu_enable_all(added);
616 + }
617 +
618 ++static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
619 ++{
620 ++ u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
621 ++
622 ++ if (cpuc->tfa_shadow != val) {
623 ++ cpuc->tfa_shadow = val;
624 ++ wrmsrl(MSR_TSX_FORCE_ABORT, val);
625 ++ }
626 ++}
627 ++
628 ++static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
629 ++{
630 ++ /*
631 ++ * We're going to use PMC3, make sure TFA is set before we touch it.
632 ++ */
633 ++ if (cntr == 3 && !cpuc->is_fake)
634 ++ intel_set_tfa(cpuc, true);
635 ++}
636 ++
637 ++static void intel_tfa_pmu_enable_all(int added)
638 ++{
639 ++ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
640 ++
641 ++ /*
642 ++ * If we find PMC3 is no longer used when we enable the PMU, we can
643 ++ * clear TFA.
644 ++ */
645 ++ if (!test_bit(3, cpuc->active_mask))
646 ++ intel_set_tfa(cpuc, false);
647 ++
648 ++ intel_pmu_enable_all(added);
649 ++}
650 ++
651 + static void enable_counter_freeze(void)
652 + {
653 + update_debugctlmsr(get_debugctlmsr() |
654 +@@ -2768,6 +2801,35 @@ intel_stop_scheduling(struct cpu_hw_events *cpuc)
655 + raw_spin_unlock(&excl_cntrs->lock);
656 + }
657 +
658 ++static struct event_constraint *
659 ++dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
660 ++{
661 ++ WARN_ON_ONCE(!cpuc->constraint_list);
662 ++
663 ++ if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
664 ++ struct event_constraint *cx;
665 ++
666 ++ /*
667 ++ * grab pre-allocated constraint entry
668 ++ */
669 ++ cx = &cpuc->constraint_list[idx];
670 ++
671 ++ /*
672 ++ * initialize dynamic constraint
673 ++ * with static constraint
674 ++ */
675 ++ *cx = *c;
676 ++
677 ++ /*
678 ++ * mark constraint as dynamic
679 ++ */
680 ++ cx->flags |= PERF_X86_EVENT_DYNAMIC;
681 ++ c = cx;
682 ++ }
683 ++
684 ++ return c;
685 ++}
686 ++
687 + static struct event_constraint *
688 + intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
689 + int idx, struct event_constraint *c)
690 +@@ -2798,27 +2860,7 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
691 + * only needed when constraint has not yet
692 + * been cloned (marked dynamic)
693 + */
694 +- if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
695 +- struct event_constraint *cx;
696 +-
697 +- /*
698 +- * grab pre-allocated constraint entry
699 +- */
700 +- cx = &cpuc->constraint_list[idx];
701 +-
702 +- /*
703 +- * initialize dynamic constraint
704 +- * with static constraint
705 +- */
706 +- *cx = *c;
707 +-
708 +- /*
709 +- * mark constraint as dynamic, so we
710 +- * can free it later on
711 +- */
712 +- cx->flags |= PERF_X86_EVENT_DYNAMIC;
713 +- c = cx;
714 +- }
715 ++ c = dyn_constraint(cpuc, c, idx);
716 +
717 + /*
718 + * From here on, the constraint is dynamic.
719 +@@ -3345,6 +3387,26 @@ glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
720 + return c;
721 + }
722 +
723 ++static bool allow_tsx_force_abort = true;
724 ++
725 ++static struct event_constraint *
726 ++tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
727 ++ struct perf_event *event)
728 ++{
729 ++ struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
730 ++
731 ++ /*
732 ++ * Without TFA we must not use PMC3.
733 ++ */
734 ++ if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
735 ++ c = dyn_constraint(cpuc, c, idx);
736 ++ c->idxmsk64 &= ~(1ULL << 3);
737 ++ c->weight--;
738 ++ }
739 ++
740 ++ return c;
741 ++}
742 ++
743 + /*
744 + * Broadwell:
745 + *
746 +@@ -3398,7 +3460,7 @@ ssize_t intel_event_sysfs_show(char *page, u64 config)
747 + return x86_event_sysfs_show(page, config, event);
748 + }
749 +
750 +-struct intel_shared_regs *allocate_shared_regs(int cpu)
751 ++static struct intel_shared_regs *allocate_shared_regs(int cpu)
752 + {
753 + struct intel_shared_regs *regs;
754 + int i;
755 +@@ -3430,23 +3492,24 @@ static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
756 + return c;
757 + }
758 +
759 +-static int intel_pmu_cpu_prepare(int cpu)
760 +-{
761 +- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
762 +
763 ++int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
764 ++{
765 + if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
766 + cpuc->shared_regs = allocate_shared_regs(cpu);
767 + if (!cpuc->shared_regs)
768 + goto err;
769 + }
770 +
771 +- if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
772 ++ if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
773 + size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
774 +
775 +- cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
776 ++ cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
777 + if (!cpuc->constraint_list)
778 + goto err_shared_regs;
779 ++ }
780 +
781 ++ if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
782 + cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
783 + if (!cpuc->excl_cntrs)
784 + goto err_constraint_list;
785 +@@ -3468,6 +3531,11 @@ err:
786 + return -ENOMEM;
787 + }
788 +
789 ++static int intel_pmu_cpu_prepare(int cpu)
790 ++{
791 ++ return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
792 ++}
793 ++
794 + static void flip_smm_bit(void *data)
795 + {
796 + unsigned long set = *(unsigned long *)data;
797 +@@ -3542,9 +3610,8 @@ static void intel_pmu_cpu_starting(int cpu)
798 + }
799 + }
800 +
801 +-static void free_excl_cntrs(int cpu)
802 ++static void free_excl_cntrs(struct cpu_hw_events *cpuc)
803 + {
804 +- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
805 + struct intel_excl_cntrs *c;
806 +
807 + c = cpuc->excl_cntrs;
808 +@@ -3552,9 +3619,10 @@ static void free_excl_cntrs(int cpu)
809 + if (c->core_id == -1 || --c->refcnt == 0)
810 + kfree(c);
811 + cpuc->excl_cntrs = NULL;
812 +- kfree(cpuc->constraint_list);
813 +- cpuc->constraint_list = NULL;
814 + }
815 ++
816 ++ kfree(cpuc->constraint_list);
817 ++ cpuc->constraint_list = NULL;
818 + }
819 +
820 + static void intel_pmu_cpu_dying(int cpu)
821 +@@ -3565,9 +3633,8 @@ static void intel_pmu_cpu_dying(int cpu)
822 + disable_counter_freeze();
823 + }
824 +
825 +-static void intel_pmu_cpu_dead(int cpu)
826 ++void intel_cpuc_finish(struct cpu_hw_events *cpuc)
827 + {
828 +- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
829 + struct intel_shared_regs *pc;
830 +
831 + pc = cpuc->shared_regs;
832 +@@ -3577,7 +3644,12 @@ static void intel_pmu_cpu_dead(int cpu)
833 + cpuc->shared_regs = NULL;
834 + }
835 +
836 +- free_excl_cntrs(cpu);
837 ++ free_excl_cntrs(cpuc);
838 ++}
839 ++
840 ++static void intel_pmu_cpu_dead(int cpu)
841 ++{
842 ++ intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
843 + }
844 +
845 + static void intel_pmu_sched_task(struct perf_event_context *ctx,
846 +@@ -4070,8 +4142,11 @@ static struct attribute *intel_pmu_caps_attrs[] = {
847 + NULL
848 + };
849 +
850 ++DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort);
851 ++
852 + static struct attribute *intel_pmu_attrs[] = {
853 + &dev_attr_freeze_on_smi.attr,
854 ++ NULL, /* &dev_attr_allow_tsx_force_abort.attr.attr */
855 + NULL,
856 + };
857 +
858 +@@ -4564,6 +4639,15 @@ __init int intel_pmu_init(void)
859 + tsx_attr = hsw_tsx_events_attrs;
860 + intel_pmu_pebs_data_source_skl(
861 + boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
862 ++
863 ++ if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
864 ++ x86_pmu.flags |= PMU_FL_TFA;
865 ++ x86_pmu.get_event_constraints = tfa_get_event_constraints;
866 ++ x86_pmu.enable_all = intel_tfa_pmu_enable_all;
867 ++ x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
868 ++ intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr.attr;
869 ++ }
870 ++
871 + pr_cont("Skylake events, ");
872 + name = "skylake";
873 + break;
874 +@@ -4715,7 +4799,7 @@ static __init int fixup_ht_bug(void)
875 + hardlockup_detector_perf_restart();
876 +
877 + for_each_online_cpu(c)
878 +- free_excl_cntrs(c);
879 ++ free_excl_cntrs(&per_cpu(cpu_hw_events, c));
880 +
881 + cpus_read_unlock();
882 + pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
883 +diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
884 +index d46fd6754d92..a345d079f876 100644
885 +--- a/arch/x86/events/perf_event.h
886 ++++ b/arch/x86/events/perf_event.h
887 +@@ -242,6 +242,11 @@ struct cpu_hw_events {
888 + struct intel_excl_cntrs *excl_cntrs;
889 + int excl_thread_id; /* 0 or 1 */
890 +
891 ++ /*
892 ++ * SKL TSX_FORCE_ABORT shadow
893 ++ */
894 ++ u64 tfa_shadow;
895 ++
896 + /*
897 + * AMD specific bits
898 + */
899 +@@ -681,6 +686,7 @@ do { \
900 + #define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */
901 + #define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */
902 + #define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */
903 ++#define PMU_FL_TFA 0x20 /* deal with TSX force abort */
904 +
905 + #define EVENT_VAR(_id) event_attr_##_id
906 + #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
907 +@@ -889,7 +895,8 @@ struct event_constraint *
908 + x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
909 + struct perf_event *event);
910 +
911 +-struct intel_shared_regs *allocate_shared_regs(int cpu);
912 ++extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
913 ++extern void intel_cpuc_finish(struct cpu_hw_events *cpuc);
914 +
915 + int intel_pmu_init(void);
916 +
917 +@@ -1025,9 +1032,13 @@ static inline int intel_pmu_init(void)
918 + return 0;
919 + }
920 +
921 +-static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
922 ++static inline int intel_cpuc_prepare(struct cpu_hw_event *cpuc, int cpu)
923 ++{
924 ++ return 0;
925 ++}
926 ++
927 ++static inline void intel_cpuc_finish(struct cpu_hw_event *cpuc)
928 + {
929 +- return NULL;
930 + }
931 +
932 + static inline int is_ht_workaround_enabled(void)
933 +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
934 +index 28c4a502b419..9246a6715cf2 100644
935 +--- a/arch/x86/include/asm/cpufeatures.h
936 ++++ b/arch/x86/include/asm/cpufeatures.h
937 +@@ -342,6 +342,7 @@
938 + /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
939 + #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
940 + #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
941 ++#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
942 + #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
943 + #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
944 + #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
945 +diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
946 +index 0dd6b0f4000e..d9a9993af882 100644
947 +--- a/arch/x86/include/asm/intel-family.h
948 ++++ b/arch/x86/include/asm/intel-family.h
949 +@@ -6,7 +6,7 @@
950 + * "Big Core" Processors (Branded as Core, Xeon, etc...)
951 + *
952 + * The "_X" parts are generally the EP and EX Xeons, or the
953 +- * "Extreme" ones, like Broadwell-E.
954 ++ * "Extreme" ones, like Broadwell-E, or Atom microserver.
955 + *
956 + * While adding a new CPUID for a new microarchitecture, add a new
957 + * group to keep logically sorted out in chronological order. Within
958 +@@ -71,6 +71,7 @@
959 + #define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */
960 + #define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */
961 + #define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */
962 ++#define INTEL_FAM6_ATOM_TREMONT_X 0x86 /* Jacobsville */
963 +
964 + /* Xeon Phi */
965 +
966 +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
967 +index 9e39cc8bd989..ea192e402abe 100644
968 +--- a/arch/x86/include/asm/msr-index.h
969 ++++ b/arch/x86/include/asm/msr-index.h
970 +@@ -630,6 +630,12 @@
971 +
972 + #define MSR_IA32_TSC_DEADLINE 0x000006E0
973 +
974 ++
975 ++#define MSR_TSX_FORCE_ABORT 0x0000010F
976 ++
977 ++#define MSR_TFA_RTM_FORCE_ABORT_BIT 0
978 ++#define MSR_TFA_RTM_FORCE_ABORT BIT_ULL(MSR_TFA_RTM_FORCE_ABORT_BIT)
979 ++
980 + /* P4/Xeon+ specific */
981 + #define MSR_IA32_MCG_EAX 0x00000180
982 + #define MSR_IA32_MCG_EBX 0x00000181
983 +diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
984 +index 8f657286d599..0ce558a8150d 100644
985 +--- a/arch/x86/include/asm/page_64_types.h
986 ++++ b/arch/x86/include/asm/page_64_types.h
987 +@@ -7,7 +7,11 @@
988 + #endif
989 +
990 + #ifdef CONFIG_KASAN
991 ++#ifdef CONFIG_KASAN_EXTRA
992 ++#define KASAN_STACK_ORDER 2
993 ++#else
994 + #define KASAN_STACK_ORDER 1
995 ++#endif
996 + #else
997 + #define KASAN_STACK_ORDER 0
998 + #endif
999 +diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
1000 +index 07b5fc00b188..a4e7e100ed26 100644
1001 +--- a/arch/x86/kernel/cpu/microcode/amd.c
1002 ++++ b/arch/x86/kernel/cpu/microcode/amd.c
1003 +@@ -707,7 +707,7 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
1004 + if (!p) {
1005 + return ret;
1006 + } else {
1007 +- if (boot_cpu_data.microcode == p->patch_id)
1008 ++ if (boot_cpu_data.microcode >= p->patch_id)
1009 + return ret;
1010 +
1011 + ret = UCODE_NEW;
1012 +diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
1013 +index 278cd07228dd..9490a2845f14 100644
1014 +--- a/arch/x86/kernel/kexec-bzimage64.c
1015 ++++ b/arch/x86/kernel/kexec-bzimage64.c
1016 +@@ -167,6 +167,9 @@ setup_efi_state(struct boot_params *params, unsigned long params_load_addr,
1017 + struct efi_info *current_ei = &boot_params.efi_info;
1018 + struct efi_info *ei = &params->efi_info;
1019 +
1020 ++ if (!efi_enabled(EFI_RUNTIME_SERVICES))
1021 ++ return 0;
1022 ++
1023 + if (!current_ei->efi_memmap_size)
1024 + return 0;
1025 +
1026 +diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
1027 +index 30a5111ae5fd..527e69b12002 100644
1028 +--- a/arch/x86/pci/fixup.c
1029 ++++ b/arch/x86/pci/fixup.c
1030 +@@ -635,6 +635,22 @@ static void quirk_no_aersid(struct pci_dev *pdev)
1031 + DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
1032 + PCI_CLASS_BRIDGE_PCI, 8, quirk_no_aersid);
1033 +
1034 ++static void quirk_intel_th_dnv(struct pci_dev *dev)
1035 ++{
1036 ++ struct resource *r = &dev->resource[4];
1037 ++
1038 ++ /*
1039 ++ * Denverton reports 2k of RTIT_BAR (intel_th resource 4), which
1040 ++ * appears to be 4 MB in reality.
1041 ++ */
1042 ++ if (r->end == r->start + 0x7ff) {
1043 ++ r->start = 0;
1044 ++ r->end = 0x3fffff;
1045 ++ r->flags |= IORESOURCE_UNSET;
1046 ++ }
1047 ++}
1048 ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x19e1, quirk_intel_th_dnv);
1049 ++
1050 + #ifdef CONFIG_PHYS_ADDR_T_64BIT
1051 +
1052 + #define AMD_141b_MMIO_BASE(x) (0x80 + (x) * 0x8)
1053 +diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig
1054 +index 11fed6c06a7c..b5938160fb3d 100644
1055 +--- a/arch/xtensa/configs/smp_lx200_defconfig
1056 ++++ b/arch/xtensa/configs/smp_lx200_defconfig
1057 +@@ -33,6 +33,7 @@ CONFIG_SMP=y
1058 + CONFIG_HOTPLUG_CPU=y
1059 + # CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set
1060 + # CONFIG_PCI is not set
1061 ++CONFIG_VECTORS_OFFSET=0x00002000
1062 + CONFIG_XTENSA_PLATFORM_XTFPGA=y
1063 + CONFIG_CMDLINE_BOOL=y
1064 + CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=96M@0"
1065 +diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
1066 +index 9053a5622d2c..5bd38ea2da38 100644
1067 +--- a/arch/xtensa/kernel/head.S
1068 ++++ b/arch/xtensa/kernel/head.S
1069 +@@ -280,12 +280,13 @@ should_never_return:
1070 +
1071 + movi a2, cpu_start_ccount
1072 + 1:
1073 ++ memw
1074 + l32i a3, a2, 0
1075 + beqi a3, 0, 1b
1076 + movi a3, 0
1077 + s32i a3, a2, 0
1078 +- memw
1079 + 1:
1080 ++ memw
1081 + l32i a3, a2, 0
1082 + beqi a3, 0, 1b
1083 + wsr a3, ccount
1084 +@@ -321,11 +322,13 @@ ENTRY(cpu_restart)
1085 + rsr a0, prid
1086 + neg a2, a0
1087 + movi a3, cpu_start_id
1088 ++ memw
1089 + s32i a2, a3, 0
1090 + #if XCHAL_DCACHE_IS_WRITEBACK
1091 + dhwbi a3, 0
1092 + #endif
1093 + 1:
1094 ++ memw
1095 + l32i a2, a3, 0
1096 + dhi a3, 0
1097 + bne a2, a0, 1b
1098 +diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
1099 +index 932d64689bac..be1f280c322c 100644
1100 +--- a/arch/xtensa/kernel/smp.c
1101 ++++ b/arch/xtensa/kernel/smp.c
1102 +@@ -83,7 +83,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
1103 + {
1104 + unsigned i;
1105 +
1106 +- for (i = 0; i < max_cpus; ++i)
1107 ++ for_each_possible_cpu(i)
1108 + set_cpu_present(i, true);
1109 + }
1110 +
1111 +@@ -96,6 +96,11 @@ void __init smp_init_cpus(void)
1112 + pr_info("%s: Core Count = %d\n", __func__, ncpus);
1113 + pr_info("%s: Core Id = %d\n", __func__, core_id);
1114 +
1115 ++ if (ncpus > NR_CPUS) {
1116 ++ ncpus = NR_CPUS;
1117 ++ pr_info("%s: limiting core count by %d\n", __func__, ncpus);
1118 ++ }
1119 ++
1120 + for (i = 0; i < ncpus; ++i)
1121 + set_cpu_possible(i, true);
1122 + }
1123 +@@ -195,9 +200,11 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
1124 + int i;
1125 +
1126 + #ifdef CONFIG_HOTPLUG_CPU
1127 +- cpu_start_id = cpu;
1128 +- system_flush_invalidate_dcache_range(
1129 +- (unsigned long)&cpu_start_id, sizeof(cpu_start_id));
1130 ++ WRITE_ONCE(cpu_start_id, cpu);
1131 ++ /* Pairs with the third memw in the cpu_restart */
1132 ++ mb();
1133 ++ system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id,
1134 ++ sizeof(cpu_start_id));
1135 + #endif
1136 + smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
1137 +
1138 +@@ -206,18 +213,21 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
1139 + ccount = get_ccount();
1140 + while (!ccount);
1141 +
1142 +- cpu_start_ccount = ccount;
1143 ++ WRITE_ONCE(cpu_start_ccount, ccount);
1144 +
1145 +- while (time_before(jiffies, timeout)) {
1146 ++ do {
1147 ++ /*
1148 ++ * Pairs with the first two memws in the
1149 ++ * .Lboot_secondary.
1150 ++ */
1151 + mb();
1152 +- if (!cpu_start_ccount)
1153 +- break;
1154 +- }
1155 ++ ccount = READ_ONCE(cpu_start_ccount);
1156 ++ } while (ccount && time_before(jiffies, timeout));
1157 +
1158 +- if (cpu_start_ccount) {
1159 ++ if (ccount) {
1160 + smp_call_function_single(0, mx_cpu_stop,
1161 +- (void *)cpu, 1);
1162 +- cpu_start_ccount = 0;
1163 ++ (void *)cpu, 1);
1164 ++ WRITE_ONCE(cpu_start_ccount, 0);
1165 + return -EIO;
1166 + }
1167 + }
1168 +@@ -237,6 +247,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
1169 + pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
1170 + __func__, cpu, idle, start_info.stack);
1171 +
1172 ++ init_completion(&cpu_running);
1173 + ret = boot_secondary(cpu, idle);
1174 + if (ret == 0) {
1175 + wait_for_completion_timeout(&cpu_running,
1176 +@@ -298,8 +309,10 @@ void __cpu_die(unsigned int cpu)
1177 + unsigned long timeout = jiffies + msecs_to_jiffies(1000);
1178 + while (time_before(jiffies, timeout)) {
1179 + system_invalidate_dcache_range((unsigned long)&cpu_start_id,
1180 +- sizeof(cpu_start_id));
1181 +- if (cpu_start_id == -cpu) {
1182 ++ sizeof(cpu_start_id));
1183 ++ /* Pairs with the second memw in the cpu_restart */
1184 ++ mb();
1185 ++ if (READ_ONCE(cpu_start_id) == -cpu) {
1186 + platform_cpu_kill(cpu);
1187 + return;
1188 + }
1189 +diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
1190 +index fd524a54d2ab..378186b5eb40 100644
1191 +--- a/arch/xtensa/kernel/time.c
1192 ++++ b/arch/xtensa/kernel/time.c
1193 +@@ -89,7 +89,7 @@ static int ccount_timer_shutdown(struct clock_event_device *evt)
1194 + container_of(evt, struct ccount_timer, evt);
1195 +
1196 + if (timer->irq_enabled) {
1197 +- disable_irq(evt->irq);
1198 ++ disable_irq_nosync(evt->irq);
1199 + timer->irq_enabled = 0;
1200 + }
1201 + return 0;
1202 +diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
1203 +index 38c35c32aff2..c1c72b42dda0 100644
1204 +--- a/block/blk-iolatency.c
1205 ++++ b/block/blk-iolatency.c
1206 +@@ -72,6 +72,7 @@
1207 + #include <linux/sched/loadavg.h>
1208 + #include <linux/sched/signal.h>
1209 + #include <trace/events/block.h>
1210 ++#include <linux/blk-mq.h>
1211 + #include "blk-rq-qos.h"
1212 + #include "blk-stat.h"
1213 +
1214 +@@ -648,6 +649,9 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
1215 + return;
1216 +
1217 + enabled = blk_iolatency_enabled(iolat->blkiolat);
1218 ++ if (!enabled)
1219 ++ return;
1220 ++
1221 + while (blkg && blkg->parent) {
1222 + iolat = blkg_to_lat(blkg);
1223 + if (!iolat) {
1224 +@@ -657,7 +661,7 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
1225 + rqw = &iolat->rq_wait;
1226 +
1227 + atomic_dec(&rqw->inflight);
1228 +- if (!enabled || iolat->min_lat_nsec == 0)
1229 ++ if (iolat->min_lat_nsec == 0)
1230 + goto next;
1231 + iolatency_record_time(iolat, &bio->bi_issue, now,
1232 + issue_as_root);
1233 +@@ -801,10 +805,13 @@ int blk_iolatency_init(struct request_queue *q)
1234 + return 0;
1235 + }
1236 +
1237 +-static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
1238 ++/*
1239 ++ * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
1240 ++ * return 0.
1241 ++ */
1242 ++static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
1243 + {
1244 + struct iolatency_grp *iolat = blkg_to_lat(blkg);
1245 +- struct blk_iolatency *blkiolat = iolat->blkiolat;
1246 + u64 oldval = iolat->min_lat_nsec;
1247 +
1248 + iolat->min_lat_nsec = val;
1249 +@@ -813,9 +820,10 @@ static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
1250 + BLKIOLATENCY_MAX_WIN_SIZE);
1251 +
1252 + if (!oldval && val)
1253 +- atomic_inc(&blkiolat->enabled);
1254 ++ return 1;
1255 + if (oldval && !val)
1256 +- atomic_dec(&blkiolat->enabled);
1257 ++ return -1;
1258 ++ return 0;
1259 + }
1260 +
1261 + static void iolatency_clear_scaling(struct blkcg_gq *blkg)
1262 +@@ -847,6 +855,7 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
1263 + u64 lat_val = 0;
1264 + u64 oldval;
1265 + int ret;
1266 ++ int enable = 0;
1267 +
1268 + ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
1269 + if (ret)
1270 +@@ -881,7 +890,12 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
1271 + blkg = ctx.blkg;
1272 + oldval = iolat->min_lat_nsec;
1273 +
1274 +- iolatency_set_min_lat_nsec(blkg, lat_val);
1275 ++ enable = iolatency_set_min_lat_nsec(blkg, lat_val);
1276 ++ if (enable) {
1277 ++ WARN_ON_ONCE(!blk_get_queue(blkg->q));
1278 ++ blkg_get(blkg);
1279 ++ }
1280 ++
1281 + if (oldval != iolat->min_lat_nsec) {
1282 + iolatency_clear_scaling(blkg);
1283 + }
1284 +@@ -889,6 +903,24 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
1285 + ret = 0;
1286 + out:
1287 + blkg_conf_finish(&ctx);
1288 ++ if (ret == 0 && enable) {
1289 ++ struct iolatency_grp *tmp = blkg_to_lat(blkg);
1290 ++ struct blk_iolatency *blkiolat = tmp->blkiolat;
1291 ++
1292 ++ blk_mq_freeze_queue(blkg->q);
1293 ++
1294 ++ if (enable == 1)
1295 ++ atomic_inc(&blkiolat->enabled);
1296 ++ else if (enable == -1)
1297 ++ atomic_dec(&blkiolat->enabled);
1298 ++ else
1299 ++ WARN_ON_ONCE(1);
1300 ++
1301 ++ blk_mq_unfreeze_queue(blkg->q);
1302 ++
1303 ++ blkg_put(blkg);
1304 ++ blk_put_queue(blkg->q);
1305 ++ }
1306 + return ret ?: nbytes;
1307 + }
1308 +
1309 +@@ -1024,8 +1056,14 @@ static void iolatency_pd_offline(struct blkg_policy_data *pd)
1310 + {
1311 + struct iolatency_grp *iolat = pd_to_lat(pd);
1312 + struct blkcg_gq *blkg = lat_to_blkg(iolat);
1313 ++ struct blk_iolatency *blkiolat = iolat->blkiolat;
1314 ++ int ret;
1315 +
1316 +- iolatency_set_min_lat_nsec(blkg, 0);
1317 ++ ret = iolatency_set_min_lat_nsec(blkg, 0);
1318 ++ if (ret == 1)
1319 ++ atomic_inc(&blkiolat->enabled);
1320 ++ if (ret == -1)
1321 ++ atomic_dec(&blkiolat->enabled);
1322 + iolatency_clear_scaling(blkg);
1323 + }
1324 +
1325 +diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
1326 +index f133b7f5652f..26110e74e086 100644
1327 +--- a/drivers/clk/qcom/gcc-sdm845.c
1328 ++++ b/drivers/clk/qcom/gcc-sdm845.c
1329 +@@ -115,8 +115,8 @@ static const char * const gcc_parent_names_6[] = {
1330 + "core_bi_pll_test_se",
1331 + };
1332 +
1333 +-static const char * const gcc_parent_names_7[] = {
1334 +- "bi_tcxo",
1335 ++static const char * const gcc_parent_names_7_ao[] = {
1336 ++ "bi_tcxo_ao",
1337 + "gpll0",
1338 + "gpll0_out_even",
1339 + "core_bi_pll_test_se",
1340 +@@ -128,6 +128,12 @@ static const char * const gcc_parent_names_8[] = {
1341 + "core_bi_pll_test_se",
1342 + };
1343 +
1344 ++static const char * const gcc_parent_names_8_ao[] = {
1345 ++ "bi_tcxo_ao",
1346 ++ "gpll0",
1347 ++ "core_bi_pll_test_se",
1348 ++};
1349 ++
1350 + static const struct parent_map gcc_parent_map_10[] = {
1351 + { P_BI_TCXO, 0 },
1352 + { P_GPLL0_OUT_MAIN, 1 },
1353 +@@ -210,7 +216,7 @@ static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
1354 + .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
1355 + .clkr.hw.init = &(struct clk_init_data){
1356 + .name = "gcc_cpuss_ahb_clk_src",
1357 +- .parent_names = gcc_parent_names_7,
1358 ++ .parent_names = gcc_parent_names_7_ao,
1359 + .num_parents = 4,
1360 + .ops = &clk_rcg2_ops,
1361 + },
1362 +@@ -229,7 +235,7 @@ static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = {
1363 + .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
1364 + .clkr.hw.init = &(struct clk_init_data){
1365 + .name = "gcc_cpuss_rbcpr_clk_src",
1366 +- .parent_names = gcc_parent_names_8,
1367 ++ .parent_names = gcc_parent_names_8_ao,
1368 + .num_parents = 3,
1369 + .ops = &clk_rcg2_ops,
1370 + },
1371 +diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
1372 +index 8d77090ad94a..0241450f3eb3 100644
1373 +--- a/drivers/clk/ti/divider.c
1374 ++++ b/drivers/clk/ti/divider.c
1375 +@@ -403,8 +403,10 @@ int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
1376 + num_dividers = i;
1377 +
1378 + tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL);
1379 +- if (!tmp)
1380 ++ if (!tmp) {
1381 ++ *table = ERR_PTR(-ENOMEM);
1382 + return -ENOMEM;
1383 ++ }
1384 +
1385 + valid_div = 0;
1386 + *width = 0;
1387 +@@ -439,6 +441,7 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
1388 + {
1389 + struct clk_omap_divider *div;
1390 + struct clk_omap_reg *reg;
1391 ++ int ret;
1392 +
1393 + if (!setup)
1394 + return NULL;
1395 +@@ -458,6 +461,12 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
1396 + div->flags |= CLK_DIVIDER_POWER_OF_TWO;
1397 +
1398 + div->table = _get_div_table_from_setup(setup, &div->width);
1399 ++ if (IS_ERR(div->table)) {
1400 ++ ret = PTR_ERR(div->table);
1401 ++ kfree(div);
1402 ++ return ERR_PTR(ret);
1403 ++ }
1404 ++
1405 +
1406 + div->shift = setup->bit_shift;
1407 + div->latch = -EINVAL;
1408 +diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
1409 +index 4e557684f792..fe69dccfa0c0 100644
1410 +--- a/drivers/dma/at_xdmac.c
1411 ++++ b/drivers/dma/at_xdmac.c
1412 +@@ -203,6 +203,7 @@ struct at_xdmac_chan {
1413 + u32 save_cim;
1414 + u32 save_cnda;
1415 + u32 save_cndc;
1416 ++ u32 irq_status;
1417 + unsigned long status;
1418 + struct tasklet_struct tasklet;
1419 + struct dma_slave_config sconfig;
1420 +@@ -1580,8 +1581,8 @@ static void at_xdmac_tasklet(unsigned long data)
1421 + struct at_xdmac_desc *desc;
1422 + u32 error_mask;
1423 +
1424 +- dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n",
1425 +- __func__, atchan->status);
1426 ++ dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
1427 ++ __func__, atchan->irq_status);
1428 +
1429 + error_mask = AT_XDMAC_CIS_RBEIS
1430 + | AT_XDMAC_CIS_WBEIS
1431 +@@ -1589,15 +1590,15 @@ static void at_xdmac_tasklet(unsigned long data)
1432 +
1433 + if (at_xdmac_chan_is_cyclic(atchan)) {
1434 + at_xdmac_handle_cyclic(atchan);
1435 +- } else if ((atchan->status & AT_XDMAC_CIS_LIS)
1436 +- || (atchan->status & error_mask)) {
1437 ++ } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
1438 ++ || (atchan->irq_status & error_mask)) {
1439 + struct dma_async_tx_descriptor *txd;
1440 +
1441 +- if (atchan->status & AT_XDMAC_CIS_RBEIS)
1442 ++ if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
1443 + dev_err(chan2dev(&atchan->chan), "read bus error!!!");
1444 +- if (atchan->status & AT_XDMAC_CIS_WBEIS)
1445 ++ if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
1446 + dev_err(chan2dev(&atchan->chan), "write bus error!!!");
1447 +- if (atchan->status & AT_XDMAC_CIS_ROIS)
1448 ++ if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
1449 + dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
1450 +
1451 + spin_lock(&atchan->lock);
1452 +@@ -1652,7 +1653,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
1453 + atchan = &atxdmac->chan[i];
1454 + chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1455 + chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
1456 +- atchan->status = chan_status & chan_imr;
1457 ++ atchan->irq_status = chan_status & chan_imr;
1458 + dev_vdbg(atxdmac->dma.dev,
1459 + "%s: chan%d: imr=0x%x, status=0x%x\n",
1460 + __func__, i, chan_imr, chan_status);
1461 +@@ -1666,7 +1667,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
1462 + at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
1463 + at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
1464 +
1465 +- if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
1466 ++ if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
1467 + at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1468 +
1469 + tasklet_schedule(&atchan->tasklet);
1470 +diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
1471 +index aa1712beb0cc..7b7fba0c9253 100644
1472 +--- a/drivers/dma/dmatest.c
1473 ++++ b/drivers/dma/dmatest.c
1474 +@@ -642,11 +642,9 @@ static int dmatest_func(void *data)
1475 + srcs[i] = um->addr[i] + src_off;
1476 + ret = dma_mapping_error(dev->dev, um->addr[i]);
1477 + if (ret) {
1478 +- dmaengine_unmap_put(um);
1479 + result("src mapping error", total_tests,
1480 + src_off, dst_off, len, ret);
1481 +- failed_tests++;
1482 +- continue;
1483 ++ goto error_unmap_continue;
1484 + }
1485 + um->to_cnt++;
1486 + }
1487 +@@ -661,11 +659,9 @@ static int dmatest_func(void *data)
1488 + DMA_BIDIRECTIONAL);
1489 + ret = dma_mapping_error(dev->dev, dsts[i]);
1490 + if (ret) {
1491 +- dmaengine_unmap_put(um);
1492 + result("dst mapping error", total_tests,
1493 + src_off, dst_off, len, ret);
1494 +- failed_tests++;
1495 +- continue;
1496 ++ goto error_unmap_continue;
1497 + }
1498 + um->bidi_cnt++;
1499 + }
1500 +@@ -693,12 +689,10 @@ static int dmatest_func(void *data)
1501 + }
1502 +
1503 + if (!tx) {
1504 +- dmaengine_unmap_put(um);
1505 + result("prep error", total_tests, src_off,
1506 + dst_off, len, ret);
1507 + msleep(100);
1508 +- failed_tests++;
1509 +- continue;
1510 ++ goto error_unmap_continue;
1511 + }
1512 +
1513 + done->done = false;
1514 +@@ -707,12 +701,10 @@ static int dmatest_func(void *data)
1515 + cookie = tx->tx_submit(tx);
1516 +
1517 + if (dma_submit_error(cookie)) {
1518 +- dmaengine_unmap_put(um);
1519 + result("submit error", total_tests, src_off,
1520 + dst_off, len, ret);
1521 + msleep(100);
1522 +- failed_tests++;
1523 +- continue;
1524 ++ goto error_unmap_continue;
1525 + }
1526 + dma_async_issue_pending(chan);
1527 +
1528 +@@ -725,16 +717,14 @@ static int dmatest_func(void *data)
1529 + dmaengine_unmap_put(um);
1530 + result("test timed out", total_tests, src_off, dst_off,
1531 + len, 0);
1532 +- failed_tests++;
1533 +- continue;
1534 ++ goto error_unmap_continue;
1535 + } else if (status != DMA_COMPLETE) {
1536 + dmaengine_unmap_put(um);
1537 + result(status == DMA_ERROR ?
1538 + "completion error status" :
1539 + "completion busy status", total_tests, src_off,
1540 + dst_off, len, ret);
1541 +- failed_tests++;
1542 +- continue;
1543 ++ goto error_unmap_continue;
1544 + }
1545 +
1546 + dmaengine_unmap_put(um);
1547 +@@ -779,6 +769,12 @@ static int dmatest_func(void *data)
1548 + verbose_result("test passed", total_tests, src_off,
1549 + dst_off, len, 0);
1550 + }
1551 ++
1552 ++ continue;
1553 ++
1554 ++error_unmap_continue:
1555 ++ dmaengine_unmap_put(um);
1556 ++ failed_tests++;
1557 + }
1558 + ktime = ktime_sub(ktime_get(), ktime);
1559 + ktime = ktime_sub(ktime, comparetime);
1560 +diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
1561 +index 6bc8e6640d71..c51462f5aa1e 100644
1562 +--- a/drivers/firmware/iscsi_ibft.c
1563 ++++ b/drivers/firmware/iscsi_ibft.c
1564 +@@ -542,6 +542,7 @@ static umode_t __init ibft_check_tgt_for(void *data, int type)
1565 + case ISCSI_BOOT_TGT_NIC_ASSOC:
1566 + case ISCSI_BOOT_TGT_CHAP_TYPE:
1567 + rc = S_IRUGO;
1568 ++ break;
1569 + case ISCSI_BOOT_TGT_NAME:
1570 + if (tgt->tgt_name_len)
1571 + rc = S_IRUGO;
1572 +diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
1573 +index 5960396c8d9a..222170a1715f 100644
1574 +--- a/drivers/gpio/gpio-vf610.c
1575 ++++ b/drivers/gpio/gpio-vf610.c
1576 +@@ -250,6 +250,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
1577 + struct vf610_gpio_port *port;
1578 + struct resource *iores;
1579 + struct gpio_chip *gc;
1580 ++ int i;
1581 + int ret;
1582 +
1583 + port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
1584 +@@ -289,6 +290,10 @@ static int vf610_gpio_probe(struct platform_device *pdev)
1585 + if (ret < 0)
1586 + return ret;
1587 +
1588 ++ /* Mask all GPIO interrupts */
1589 ++ for (i = 0; i < gc->ngpio; i++)
1590 ++ vf610_gpio_writel(0, port->base + PORT_PCR(i));
1591 ++
1592 + /* Clear the interrupt status register for all GPIO's */
1593 + vf610_gpio_writel(~0, port->base + PORT_ISFR);
1594 +
1595 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
1596 +index 59cc678de8c1..bbac15fd8caa 100644
1597 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
1598 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
1599 +@@ -1671,7 +1671,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
1600 + effective_mode &= ~S_IWUSR;
1601 +
1602 + if ((adev->flags & AMD_IS_APU) &&
1603 +- (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
1604 ++ (attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
1605 ++ attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
1606 + attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
1607 + attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
1608 + return 0;
1609 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
1610 +index e45e929aaab5..90a5970af4b7 100644
1611 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
1612 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
1613 +@@ -38,6 +38,7 @@
1614 + #include "amdgpu_gem.h"
1615 + #include <drm/amdgpu_drm.h>
1616 + #include <linux/dma-buf.h>
1617 ++#include <linux/dma-fence-array.h>
1618 +
1619 + static const struct dma_buf_ops amdgpu_dmabuf_ops;
1620 +
1621 +@@ -189,6 +190,48 @@ error:
1622 + return ERR_PTR(ret);
1623 + }
1624 +
1625 ++static int
1626 ++__reservation_object_make_exclusive(struct reservation_object *obj)
1627 ++{
1628 ++ struct dma_fence **fences;
1629 ++ unsigned int count;
1630 ++ int r;
1631 ++
1632 ++ if (!reservation_object_get_list(obj)) /* no shared fences to convert */
1633 ++ return 0;
1634 ++
1635 ++ r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
1636 ++ if (r)
1637 ++ return r;
1638 ++
1639 ++ if (count == 0) {
1640 ++ /* Now that was unexpected. */
1641 ++ } else if (count == 1) {
1642 ++ reservation_object_add_excl_fence(obj, fences[0]);
1643 ++ dma_fence_put(fences[0]);
1644 ++ kfree(fences);
1645 ++ } else {
1646 ++ struct dma_fence_array *array;
1647 ++
1648 ++ array = dma_fence_array_create(count, fences,
1649 ++ dma_fence_context_alloc(1), 0,
1650 ++ false);
1651 ++ if (!array)
1652 ++ goto err_fences_put;
1653 ++
1654 ++ reservation_object_add_excl_fence(obj, &array->base);
1655 ++ dma_fence_put(&array->base);
1656 ++ }
1657 ++
1658 ++ return 0;
1659 ++
1660 ++err_fences_put:
1661 ++ while (count--)
1662 ++ dma_fence_put(fences[count]);
1663 ++ kfree(fences);
1664 ++ return -ENOMEM;
1665 ++}
1666 ++
1667 + /**
1668 + * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
1669 + * @dma_buf: Shared DMA buffer
1670 +@@ -220,16 +263,16 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
1671 +
1672 + if (attach->dev->driver != adev->dev->driver) {
1673 + /*
1674 +- * Wait for all shared fences to complete before we switch to future
1675 +- * use of exclusive fence on this prime shared bo.
1676 ++ * We only create shared fences for internal use, but importers
1677 ++ * of the dmabuf rely on exclusive fences for implicitly
1678 ++ * tracking write hazards. As any of the current fences may
1679 ++ * correspond to a write, we need to convert all existing
1680 ++ * fences on the reservation object into a single exclusive
1681 ++ * fence.
1682 + */
1683 +- r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
1684 +- true, false,
1685 +- MAX_SCHEDULE_TIMEOUT);
1686 +- if (unlikely(r < 0)) {
1687 +- DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
1688 ++ r = __reservation_object_make_exclusive(bo->tbo.resv);
1689 ++ if (r)
1690 + goto error_unreserve;
1691 +- }
1692 + }
1693 +
1694 + /* pin buffer into GTT */
1695 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1696 +index 62df4bd0a0fc..16c83155ef5c 100644
1697 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1698 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1699 +@@ -3405,14 +3405,15 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
1700 + struct amdgpu_task_info *task_info)
1701 + {
1702 + struct amdgpu_vm *vm;
1703 ++ unsigned long flags;
1704 +
1705 +- spin_lock(&adev->vm_manager.pasid_lock);
1706 ++ spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
1707 +
1708 + vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
1709 + if (vm)
1710 + *task_info = vm->task_info;
1711 +
1712 +- spin_unlock(&adev->vm_manager.pasid_lock);
1713 ++ spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
1714 + }
1715 +
1716 + /**
1717 +diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
1718 +index f8cee95d61cc..7d5cbadbe1cb 100644
1719 +--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
1720 ++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
1721 +@@ -92,7 +92,20 @@ static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev,
1722 + static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
1723 + bool enable)
1724 + {
1725 ++ u32 tmp = 0;
1726 +
1727 ++ if (enable) {
1728 ++ tmp = REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) |
1729 ++ REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) |
1730 ++ REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0);
1731 ++
1732 ++ WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_LOW,
1733 ++ lower_32_bits(adev->doorbell.base));
1734 ++ WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH,
1735 ++ upper_32_bits(adev->doorbell.base));
1736 ++ }
1737 ++
1738 ++ WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_CNTL, tmp);
1739 + }
1740 +
1741 + static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev,
1742 +diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
1743 +index 4cc0dcb1a187..825d1cae85ab 100644
1744 +--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
1745 ++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
1746 +@@ -705,11 +705,13 @@ static int soc15_common_early_init(void *handle)
1747 + break;
1748 + case CHIP_RAVEN:
1749 + if (adev->rev_id >= 0x8)
1750 +- adev->external_rev_id = adev->rev_id + 0x81;
1751 ++ adev->external_rev_id = adev->rev_id + 0x79;
1752 + else if (adev->pdev->device == 0x15d8)
1753 + adev->external_rev_id = adev->rev_id + 0x41;
1754 ++ else if (adev->rev_id == 1)
1755 ++ adev->external_rev_id = adev->rev_id + 0x20;
1756 + else
1757 +- adev->external_rev_id = 0x1;
1758 ++ adev->external_rev_id = adev->rev_id + 0x01;
1759 +
1760 + if (adev->rev_id >= 0x8) {
1761 + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1762 +diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
1763 +index 00a9c2ab9e6c..64fb788b6647 100644
1764 +--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
1765 ++++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
1766 +@@ -1406,7 +1406,7 @@ static void dsi_pll_disable(struct dss_pll *pll)
1767 +
1768 + static int dsi_dump_dsi_clocks(struct seq_file *s, void *p)
1769 + {
1770 +- struct dsi_data *dsi = p;
1771 ++ struct dsi_data *dsi = s->private;
1772 + struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo;
1773 + enum dss_clk_source dispc_clk_src, dsi_clk_src;
1774 + int dsi_module = dsi->module_id;
1775 +@@ -1467,7 +1467,7 @@ static int dsi_dump_dsi_clocks(struct seq_file *s, void *p)
1776 + #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
1777 + static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
1778 + {
1779 +- struct dsi_data *dsi = p;
1780 ++ struct dsi_data *dsi = s->private;
1781 + unsigned long flags;
1782 + struct dsi_irq_stats stats;
1783 +
1784 +@@ -1558,7 +1558,7 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
1785 +
1786 + static int dsi_dump_dsi_regs(struct seq_file *s, void *p)
1787 + {
1788 +- struct dsi_data *dsi = p;
1789 ++ struct dsi_data *dsi = s->private;
1790 +
1791 + if (dsi_runtime_get(dsi))
1792 + return 0;
1793 +@@ -4751,6 +4751,17 @@ static int dsi_set_config(struct omap_dss_device *dssdev,
1794 + dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH;
1795 + dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW;
1796 + dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH;
1797 ++ /*
1798 ++ * HACK: These flags should be handled through the omap_dss_device bus
1799 ++ * flags, but this will only be possible when the DSI encoder will be
1800 ++ * converted to the omapdrm-managed encoder model.
1801 ++ */
1802 ++ dsi->vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE;
1803 ++ dsi->vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
1804 ++ dsi->vm.flags &= ~DISPLAY_FLAGS_DE_LOW;
1805 ++ dsi->vm.flags |= DISPLAY_FLAGS_DE_HIGH;
1806 ++ dsi->vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE;
1807 ++ dsi->vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
1808 +
1809 + dss_mgr_set_timings(&dsi->output, &dsi->vm);
1810 +
1811 +@@ -5083,15 +5094,15 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
1812 +
1813 + snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1);
1814 + dsi->debugfs.regs = dss_debugfs_create_file(dss, name,
1815 +- dsi_dump_dsi_regs, &dsi);
1816 ++ dsi_dump_dsi_regs, dsi);
1817 + #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
1818 + snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1);
1819 + dsi->debugfs.irqs = dss_debugfs_create_file(dss, name,
1820 +- dsi_dump_dsi_irqs, &dsi);
1821 ++ dsi_dump_dsi_irqs, dsi);
1822 + #endif
1823 + snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1);
1824 + dsi->debugfs.clks = dss_debugfs_create_file(dss, name,
1825 +- dsi_dump_dsi_clocks, &dsi);
1826 ++ dsi_dump_dsi_clocks, dsi);
1827 +
1828 + return 0;
1829 + }
1830 +@@ -5104,8 +5115,6 @@ static void dsi_unbind(struct device *dev, struct device *master, void *data)
1831 + dss_debugfs_remove_file(dsi->debugfs.irqs);
1832 + dss_debugfs_remove_file(dsi->debugfs.regs);
1833 +
1834 +- of_platform_depopulate(dev);
1835 +-
1836 + WARN_ON(dsi->scp_clk_refcount > 0);
1837 +
1838 + dss_pll_unregister(&dsi->pll);
1839 +@@ -5457,6 +5466,8 @@ static int dsi_remove(struct platform_device *pdev)
1840 +
1841 + dsi_uninit_output(dsi);
1842 +
1843 ++ of_platform_depopulate(&pdev->dev);
1844 ++
1845 + pm_runtime_disable(&pdev->dev);
1846 +
1847 + if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) {
1848 +diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
1849 +index d587779a80b4..a97294ac96d5 100644
1850 +--- a/drivers/gpu/drm/radeon/ci_dpm.c
1851 ++++ b/drivers/gpu/drm/radeon/ci_dpm.c
1852 +@@ -5676,7 +5676,7 @@ int ci_dpm_init(struct radeon_device *rdev)
1853 + u16 data_offset, size;
1854 + u8 frev, crev;
1855 + struct ci_power_info *pi;
1856 +- enum pci_bus_speed speed_cap;
1857 ++ enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
1858 + struct pci_dev *root = rdev->pdev->bus->self;
1859 + int ret;
1860 +
1861 +@@ -5685,7 +5685,8 @@ int ci_dpm_init(struct radeon_device *rdev)
1862 + return -ENOMEM;
1863 + rdev->pm.dpm.priv = pi;
1864 +
1865 +- speed_cap = pcie_get_speed_cap(root);
1866 ++ if (!pci_is_root_bus(rdev->pdev->bus))
1867 ++ speed_cap = pcie_get_speed_cap(root);
1868 + if (speed_cap == PCI_SPEED_UNKNOWN) {
1869 + pi->sys_pcie_mask = 0;
1870 + } else {
1871 +diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
1872 +index 8fb60b3af015..0a785ef0ab66 100644
1873 +--- a/drivers/gpu/drm/radeon/si_dpm.c
1874 ++++ b/drivers/gpu/drm/radeon/si_dpm.c
1875 +@@ -6899,7 +6899,7 @@ int si_dpm_init(struct radeon_device *rdev)
1876 + struct ni_power_info *ni_pi;
1877 + struct si_power_info *si_pi;
1878 + struct atom_clock_dividers dividers;
1879 +- enum pci_bus_speed speed_cap;
1880 ++ enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
1881 + struct pci_dev *root = rdev->pdev->bus->self;
1882 + int ret;
1883 +
1884 +@@ -6911,7 +6911,8 @@ int si_dpm_init(struct radeon_device *rdev)
1885 + eg_pi = &ni_pi->eg;
1886 + pi = &eg_pi->rv7xx;
1887 +
1888 +- speed_cap = pcie_get_speed_cap(root);
1889 ++ if (!pci_is_root_bus(rdev->pdev->bus))
1890 ++ speed_cap = pcie_get_speed_cap(root);
1891 + if (speed_cap == PCI_SPEED_UNKNOWN) {
1892 + si_pi->sys_pcie_mask = 0;
1893 + } else {
1894 +diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
1895 +index f949287d926c..4e0562aa2cc9 100644
1896 +--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
1897 ++++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
1898 +@@ -760,6 +760,7 @@ static int sun4i_tcon_init_clocks(struct device *dev,
1899 + return PTR_ERR(tcon->sclk0);
1900 + }
1901 + }
1902 ++ clk_prepare_enable(tcon->sclk0);
1903 +
1904 + if (tcon->quirks->has_channel_1) {
1905 + tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
1906 +@@ -774,6 +775,7 @@ static int sun4i_tcon_init_clocks(struct device *dev,
1907 +
1908 + static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon)
1909 + {
1910 ++ clk_disable_unprepare(tcon->sclk0);
1911 + clk_disable_unprepare(tcon->clk);
1912 + }
1913 +
1914 +diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
1915 +index b1086bfb0465..cd9c65f3d404 100644
1916 +--- a/drivers/i2c/busses/i2c-omap.c
1917 ++++ b/drivers/i2c/busses/i2c-omap.c
1918 +@@ -1500,8 +1500,7 @@ static int omap_i2c_remove(struct platform_device *pdev)
1919 + return 0;
1920 + }
1921 +
1922 +-#ifdef CONFIG_PM
1923 +-static int omap_i2c_runtime_suspend(struct device *dev)
1924 ++static int __maybe_unused omap_i2c_runtime_suspend(struct device *dev)
1925 + {
1926 + struct omap_i2c_dev *omap = dev_get_drvdata(dev);
1927 +
1928 +@@ -1527,7 +1526,7 @@ static int omap_i2c_runtime_suspend(struct device *dev)
1929 + return 0;
1930 + }
1931 +
1932 +-static int omap_i2c_runtime_resume(struct device *dev)
1933 ++static int __maybe_unused omap_i2c_runtime_resume(struct device *dev)
1934 + {
1935 + struct omap_i2c_dev *omap = dev_get_drvdata(dev);
1936 +
1937 +@@ -1542,20 +1541,18 @@ static int omap_i2c_runtime_resume(struct device *dev)
1938 + }
1939 +
1940 + static const struct dev_pm_ops omap_i2c_pm_ops = {
1941 ++ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1942 ++ pm_runtime_force_resume)
1943 + SET_RUNTIME_PM_OPS(omap_i2c_runtime_suspend,
1944 + omap_i2c_runtime_resume, NULL)
1945 + };
1946 +-#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
1947 +-#else
1948 +-#define OMAP_I2C_PM_OPS NULL
1949 +-#endif /* CONFIG_PM */
1950 +
1951 + static struct platform_driver omap_i2c_driver = {
1952 + .probe = omap_i2c_probe,
1953 + .remove = omap_i2c_remove,
1954 + .driver = {
1955 + .name = "omap_i2c",
1956 +- .pm = OMAP_I2C_PM_OPS,
1957 ++ .pm = &omap_i2c_pm_ops,
1958 + .of_match_table = of_match_ptr(omap_i2c_of_match),
1959 + },
1960 + };
1961 +diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
1962 +index 676c1fd1119d..7ef385db52c3 100644
1963 +--- a/drivers/infiniband/core/umem_odp.c
1964 ++++ b/drivers/infiniband/core/umem_odp.c
1965 +@@ -356,6 +356,8 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
1966 + umem->writable = 1;
1967 + umem->is_odp = 1;
1968 + odp_data->per_mm = per_mm;
1969 ++ umem->owning_mm = per_mm->mm;
1970 ++ mmgrab(umem->owning_mm);
1971 +
1972 + mutex_init(&odp_data->umem_mutex);
1973 + init_completion(&odp_data->notifier_completion);
1974 +@@ -388,6 +390,7 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
1975 + out_page_list:
1976 + vfree(odp_data->page_list);
1977 + out_odp_data:
1978 ++ mmdrop(umem->owning_mm);
1979 + kfree(odp_data);
1980 + return ERR_PTR(ret);
1981 + }
1982 +diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
1983 +index 4baa8f4d49de..46bf74375ea6 100644
1984 +--- a/drivers/infiniband/hw/hfi1/ud.c
1985 ++++ b/drivers/infiniband/hw/hfi1/ud.c
1986 +@@ -980,7 +980,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
1987 + opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
1988 + wc.ex.imm_data = packet->ohdr->u.ud.imm_data;
1989 + wc.wc_flags = IB_WC_WITH_IMM;
1990 +- tlen -= sizeof(u32);
1991 + } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
1992 + wc.ex.imm_data = 0;
1993 + wc.wc_flags = 0;
1994 +diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
1995 +index 4d4c31ea4e2d..90268b838d4e 100644
1996 +--- a/drivers/infiniband/hw/qib/qib_ud.c
1997 ++++ b/drivers/infiniband/hw/qib/qib_ud.c
1998 +@@ -513,7 +513,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
1999 + opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
2000 + wc.ex.imm_data = ohdr->u.ud.imm_data;
2001 + wc.wc_flags = IB_WC_WITH_IMM;
2002 +- tlen -= sizeof(u32);
2003 + } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
2004 + wc.ex.imm_data = 0;
2005 + wc.wc_flags = 0;
2006 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
2007 +index 1da119d901a9..73e808c1e6ad 100644
2008 +--- a/drivers/infiniband/ulp/ipoib/ipoib.h
2009 ++++ b/drivers/infiniband/ulp/ipoib/ipoib.h
2010 +@@ -248,7 +248,6 @@ struct ipoib_cm_tx {
2011 + struct list_head list;
2012 + struct net_device *dev;
2013 + struct ipoib_neigh *neigh;
2014 +- struct ipoib_path *path;
2015 + struct ipoib_tx_buf *tx_ring;
2016 + unsigned int tx_head;
2017 + unsigned int tx_tail;
2018 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
2019 +index 0428e01e8f69..aa9dcfc36cd3 100644
2020 +--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
2021 ++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
2022 +@@ -1312,7 +1312,6 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
2023 +
2024 + neigh->cm = tx;
2025 + tx->neigh = neigh;
2026 +- tx->path = path;
2027 + tx->dev = dev;
2028 + list_add(&tx->list, &priv->cm.start_list);
2029 + set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
2030 +@@ -1371,7 +1370,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
2031 + neigh->daddr + QPN_AND_OPTIONS_OFFSET);
2032 + goto free_neigh;
2033 + }
2034 +- memcpy(&pathrec, &p->path->pathrec, sizeof(pathrec));
2035 ++ memcpy(&pathrec, &path->pathrec, sizeof(pathrec));
2036 +
2037 + spin_unlock_irqrestore(&priv->lock, flags);
2038 + netif_tx_unlock_bh(dev);
2039 +diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
2040 +index 225ae6980182..628ef617bb2f 100644
2041 +--- a/drivers/input/mouse/elan_i2c_core.c
2042 ++++ b/drivers/input/mouse/elan_i2c_core.c
2043 +@@ -1337,6 +1337,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
2044 + { "ELAN0000", 0 },
2045 + { "ELAN0100", 0 },
2046 + { "ELAN0600", 0 },
2047 ++ { "ELAN0601", 0 },
2048 + { "ELAN0602", 0 },
2049 + { "ELAN0605", 0 },
2050 + { "ELAN0608", 0 },
2051 +diff --git a/drivers/input/tablet/wacom_serial4.c b/drivers/input/tablet/wacom_serial4.c
2052 +index 38bfaca48eab..150f9eecaca7 100644
2053 +--- a/drivers/input/tablet/wacom_serial4.c
2054 ++++ b/drivers/input/tablet/wacom_serial4.c
2055 +@@ -187,6 +187,7 @@ enum {
2056 + MODEL_DIGITIZER_II = 0x5544, /* UD */
2057 + MODEL_GRAPHIRE = 0x4554, /* ET */
2058 + MODEL_PENPARTNER = 0x4354, /* CT */
2059 ++ MODEL_ARTPAD_II = 0x4B54, /* KT */
2060 + };
2061 +
2062 + static void wacom_handle_model_response(struct wacom *wacom)
2063 +@@ -245,6 +246,7 @@ static void wacom_handle_model_response(struct wacom *wacom)
2064 + wacom->flags = F_HAS_STYLUS2 | F_HAS_SCROLLWHEEL;
2065 + break;
2066 +
2067 ++ case MODEL_ARTPAD_II:
2068 + case MODEL_DIGITIZER_II:
2069 + wacom->dev->name = "Wacom Digitizer II";
2070 + wacom->dev->id.version = MODEL_DIGITIZER_II;
2071 +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
2072 +index 325f3bad118b..4d2c5d4f586f 100644
2073 +--- a/drivers/iommu/amd_iommu.c
2074 ++++ b/drivers/iommu/amd_iommu.c
2075 +@@ -1929,16 +1929,13 @@ static void do_attach(struct iommu_dev_data *dev_data,
2076 +
2077 + static void do_detach(struct iommu_dev_data *dev_data)
2078 + {
2079 ++ struct protection_domain *domain = dev_data->domain;
2080 + struct amd_iommu *iommu;
2081 + u16 alias;
2082 +
2083 + iommu = amd_iommu_rlookup_table[dev_data->devid];
2084 + alias = dev_data->alias;
2085 +
2086 +- /* decrease reference counters */
2087 +- dev_data->domain->dev_iommu[iommu->index] -= 1;
2088 +- dev_data->domain->dev_cnt -= 1;
2089 +-
2090 + /* Update data structures */
2091 + dev_data->domain = NULL;
2092 + list_del(&dev_data->list);
2093 +@@ -1948,6 +1945,16 @@ static void do_detach(struct iommu_dev_data *dev_data)
2094 +
2095 + /* Flush the DTE entry */
2096 + device_flush_dte(dev_data);
2097 ++
2098 ++ /* Flush IOTLB */
2099 ++ domain_flush_tlb_pde(domain);
2100 ++
2101 ++ /* Wait for the flushes to finish */
2102 ++ domain_flush_complete(domain);
2103 ++
2104 ++ /* decrease reference counters - needs to happen after the flushes */
2105 ++ domain->dev_iommu[iommu->index] -= 1;
2106 ++ domain->dev_cnt -= 1;
2107 + }
2108 +
2109 + /*
2110 +@@ -2555,13 +2562,13 @@ out_unmap:
2111 + bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
2112 + iommu_unmap_page(domain, bus_addr, PAGE_SIZE);
2113 +
2114 +- if (--mapped_pages)
2115 ++ if (--mapped_pages == 0)
2116 + goto out_free_iova;
2117 + }
2118 + }
2119 +
2120 + out_free_iova:
2121 +- free_iova_fast(&dma_dom->iovad, address, npages);
2122 ++ free_iova_fast(&dma_dom->iovad, address >> PAGE_SHIFT, npages);
2123 +
2124 + out_err:
2125 + return 0;
2126 +diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
2127 +index 350f999d205b..c3aba3fc818d 100644
2128 +--- a/drivers/irqchip/irq-gic-v3-its.c
2129 ++++ b/drivers/irqchip/irq-gic-v3-its.c
2130 +@@ -1586,6 +1586,9 @@ static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
2131 + nr_irqs /= 2;
2132 + } while (nr_irqs > 0);
2133 +
2134 ++ if (!nr_irqs)
2135 ++ err = -ENOSPC;
2136 ++
2137 + if (err)
2138 + goto out;
2139 +
2140 +@@ -2065,6 +2068,29 @@ static int __init allocate_lpi_tables(void)
2141 + return 0;
2142 + }
2143 +
2144 ++static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
2145 ++{
2146 ++ u32 count = 1000000; /* 1s! */
2147 ++ bool clean;
2148 ++ u64 val;
2149 ++
2150 ++ val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2151 ++ val &= ~GICR_VPENDBASER_Valid;
2152 ++ gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2153 ++
2154 ++ do {
2155 ++ val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2156 ++ clean = !(val & GICR_VPENDBASER_Dirty);
2157 ++ if (!clean) {
2158 ++ count--;
2159 ++ cpu_relax();
2160 ++ udelay(1);
2161 ++ }
2162 ++ } while (!clean && count);
2163 ++
2164 ++ return val;
2165 ++}
2166 ++
2167 + static void its_cpu_init_lpis(void)
2168 + {
2169 + void __iomem *rbase = gic_data_rdist_rd_base();
2170 +@@ -2150,6 +2176,30 @@ static void its_cpu_init_lpis(void)
2171 + val |= GICR_CTLR_ENABLE_LPIS;
2172 + writel_relaxed(val, rbase + GICR_CTLR);
2173 +
2174 ++ if (gic_rdists->has_vlpis) {
2175 ++ void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2176 ++
2177 ++ /*
2178 ++ * It's possible for CPU to receive VLPIs before it is
2179 ++ * sheduled as a vPE, especially for the first CPU, and the
2180 ++ * VLPI with INTID larger than 2^(IDbits+1) will be considered
2181 ++ * as out of range and dropped by GIC.
2182 ++ * So we initialize IDbits to known value to avoid VLPI drop.
2183 ++ */
2184 ++ val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2185 ++ pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
2186 ++ smp_processor_id(), val);
2187 ++ gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2188 ++
2189 ++ /*
2190 ++ * Also clear Valid bit of GICR_VPENDBASER, in case some
2191 ++ * ancient programming gets left in and has possibility of
2192 ++ * corrupting memory.
2193 ++ */
2194 ++ val = its_clear_vpend_valid(vlpi_base);
2195 ++ WARN_ON(val & GICR_VPENDBASER_Dirty);
2196 ++ }
2197 ++
2198 + /* Make sure the GIC has seen the above */
2199 + dsb(sy);
2200 + out:
2201 +@@ -2776,26 +2826,11 @@ static void its_vpe_schedule(struct its_vpe *vpe)
2202 + static void its_vpe_deschedule(struct its_vpe *vpe)
2203 + {
2204 + void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2205 +- u32 count = 1000000; /* 1s! */
2206 +- bool clean;
2207 + u64 val;
2208 +
2209 +- /* We're being scheduled out */
2210 +- val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2211 +- val &= ~GICR_VPENDBASER_Valid;
2212 +- gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2213 +-
2214 +- do {
2215 +- val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2216 +- clean = !(val & GICR_VPENDBASER_Dirty);
2217 +- if (!clean) {
2218 +- count--;
2219 +- cpu_relax();
2220 +- udelay(1);
2221 +- }
2222 +- } while (!clean && count);
2223 ++ val = its_clear_vpend_valid(vlpi_base);
2224 +
2225 +- if (unlikely(!clean && !count)) {
2226 ++ if (unlikely(val & GICR_VPENDBASER_Dirty)) {
2227 + pr_err_ratelimited("ITS virtual pending table not cleaning\n");
2228 + vpe->idai = false;
2229 + vpe->pending_last = true;
2230 +diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
2231 +index 25f32e1d7764..3496b61a312a 100644
2232 +--- a/drivers/irqchip/irq-mmp.c
2233 ++++ b/drivers/irqchip/irq-mmp.c
2234 +@@ -34,6 +34,9 @@
2235 + #define SEL_INT_PENDING (1 << 6)
2236 + #define SEL_INT_NUM_MASK 0x3f
2237 +
2238 ++#define MMP2_ICU_INT_ROUTE_PJ4_IRQ (1 << 5)
2239 ++#define MMP2_ICU_INT_ROUTE_PJ4_FIQ (1 << 6)
2240 ++
2241 + struct icu_chip_data {
2242 + int nr_irqs;
2243 + unsigned int virq_base;
2244 +@@ -190,7 +193,8 @@ static const struct mmp_intc_conf mmp_conf = {
2245 + static const struct mmp_intc_conf mmp2_conf = {
2246 + .conf_enable = 0x20,
2247 + .conf_disable = 0x0,
2248 +- .conf_mask = 0x7f,
2249 ++ .conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ |
2250 ++ MMP2_ICU_INT_ROUTE_PJ4_FIQ,
2251 + };
2252 +
2253 + static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs)
2254 +diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
2255 +index 66a174979b3c..81745644f720 100644
2256 +--- a/drivers/media/rc/rc-main.c
2257 ++++ b/drivers/media/rc/rc-main.c
2258 +@@ -274,6 +274,7 @@ static unsigned int ir_update_mapping(struct rc_dev *dev,
2259 + unsigned int new_keycode)
2260 + {
2261 + int old_keycode = rc_map->scan[index].keycode;
2262 ++ int i;
2263 +
2264 + /* Did the user wish to remove the mapping? */
2265 + if (new_keycode == KEY_RESERVED || new_keycode == KEY_UNKNOWN) {
2266 +@@ -288,9 +289,20 @@ static unsigned int ir_update_mapping(struct rc_dev *dev,
2267 + old_keycode == KEY_RESERVED ? "New" : "Replacing",
2268 + rc_map->scan[index].scancode, new_keycode);
2269 + rc_map->scan[index].keycode = new_keycode;
2270 ++ __set_bit(new_keycode, dev->input_dev->keybit);
2271 + }
2272 +
2273 + if (old_keycode != KEY_RESERVED) {
2274 ++ /* A previous mapping was updated... */
2275 ++ __clear_bit(old_keycode, dev->input_dev->keybit);
2276 ++ /* ... but another scancode might use the same keycode */
2277 ++ for (i = 0; i < rc_map->len; i++) {
2278 ++ if (rc_map->scan[i].keycode == old_keycode) {
2279 ++ __set_bit(old_keycode, dev->input_dev->keybit);
2280 ++ break;
2281 ++ }
2282 ++ }
2283 ++
2284 + /* Possibly shrink the keytable, failure is not a problem */
2285 + ir_resize_table(dev, rc_map, GFP_ATOMIC);
2286 + }
2287 +@@ -1750,7 +1762,6 @@ static int rc_prepare_rx_device(struct rc_dev *dev)
2288 + set_bit(EV_REP, dev->input_dev->evbit);
2289 + set_bit(EV_MSC, dev->input_dev->evbit);
2290 + set_bit(MSC_SCAN, dev->input_dev->mscbit);
2291 +- bitmap_fill(dev->input_dev->keybit, KEY_CNT);
2292 +
2293 + /* Pointer/mouse events */
2294 + set_bit(EV_REL, dev->input_dev->evbit);
2295 +diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
2296 +index 76dc3ee8ca21..5ad4f91662e6 100644
2297 +--- a/drivers/media/usb/uvc/uvc_driver.c
2298 ++++ b/drivers/media/usb/uvc/uvc_driver.c
2299 +@@ -1065,11 +1065,19 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
2300 + return -EINVAL;
2301 + }
2302 +
2303 +- /* Make sure the terminal type MSB is not null, otherwise it
2304 +- * could be confused with a unit.
2305 ++ /*
2306 ++ * Reject invalid terminal types that would cause issues:
2307 ++ *
2308 ++ * - The high byte must be non-zero, otherwise it would be
2309 ++ * confused with a unit.
2310 ++ *
2311 ++ * - Bit 15 must be 0, as we use it internally as a terminal
2312 ++ * direction flag.
2313 ++ *
2314 ++ * Other unknown types are accepted.
2315 + */
2316 + type = get_unaligned_le16(&buffer[4]);
2317 +- if ((type & 0xff00) == 0) {
2318 ++ if ((type & 0x7f00) == 0 || (type & 0x8000) != 0) {
2319 + uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol "
2320 + "interface %d INPUT_TERMINAL %d has invalid "
2321 + "type 0x%04x, skipping\n", udev->devnum,
2322 +diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c
2323 +index 0fb986ba3290..0ae723f75341 100644
2324 +--- a/drivers/net/ethernet/altera/altera_msgdma.c
2325 ++++ b/drivers/net/ethernet/altera/altera_msgdma.c
2326 +@@ -145,7 +145,8 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
2327 + & 0xffff;
2328 +
2329 + if (inuse) { /* Tx FIFO is not empty */
2330 +- ready = priv->tx_prod - priv->tx_cons - inuse - 1;
2331 ++ ready = max_t(int,
2332 ++ priv->tx_prod - priv->tx_cons - inuse - 1, 0);
2333 + } else {
2334 + /* Check for buffered last packet */
2335 + status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
2336 +diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
2337 +index 3d45f4c92cf6..9bbaad9f3d63 100644
2338 +--- a/drivers/net/ethernet/cadence/macb.h
2339 ++++ b/drivers/net/ethernet/cadence/macb.h
2340 +@@ -643,6 +643,7 @@
2341 + #define MACB_CAPS_JUMBO 0x00000020
2342 + #define MACB_CAPS_GEM_HAS_PTP 0x00000040
2343 + #define MACB_CAPS_BD_RD_PREFETCH 0x00000080
2344 ++#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100
2345 + #define MACB_CAPS_FIFO_MODE 0x10000000
2346 + #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
2347 + #define MACB_CAPS_SG_DISABLED 0x40000000
2348 +@@ -1214,6 +1215,8 @@ struct macb {
2349 +
2350 + int rx_bd_rd_prefetch;
2351 + int tx_bd_rd_prefetch;
2352 ++
2353 ++ u32 rx_intr_mask;
2354 + };
2355 +
2356 + #ifdef CONFIG_MACB_USE_HWSTAMP
2357 +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
2358 +index 4c816e5a841f..73aa7969db96 100644
2359 +--- a/drivers/net/ethernet/cadence/macb_main.c
2360 ++++ b/drivers/net/ethernet/cadence/macb_main.c
2361 +@@ -56,8 +56,7 @@
2362 + /* level of occupied TX descriptors under which we wake up TX process */
2363 + #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
2364 +
2365 +-#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
2366 +- | MACB_BIT(ISR_ROVR))
2367 ++#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR))
2368 + #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
2369 + | MACB_BIT(ISR_RLE) \
2370 + | MACB_BIT(TXERR))
2371 +@@ -1270,7 +1269,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
2372 + queue_writel(queue, ISR, MACB_BIT(RCOMP));
2373 + napi_reschedule(napi);
2374 + } else {
2375 +- queue_writel(queue, IER, MACB_RX_INT_FLAGS);
2376 ++ queue_writel(queue, IER, bp->rx_intr_mask);
2377 + }
2378 + }
2379 +
2380 +@@ -1288,7 +1287,7 @@ static void macb_hresp_error_task(unsigned long data)
2381 + u32 ctrl;
2382 +
2383 + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2384 +- queue_writel(queue, IDR, MACB_RX_INT_FLAGS |
2385 ++ queue_writel(queue, IDR, bp->rx_intr_mask |
2386 + MACB_TX_INT_FLAGS |
2387 + MACB_BIT(HRESP));
2388 + }
2389 +@@ -1318,7 +1317,7 @@ static void macb_hresp_error_task(unsigned long data)
2390 +
2391 + /* Enable interrupts */
2392 + queue_writel(queue, IER,
2393 +- MACB_RX_INT_FLAGS |
2394 ++ bp->rx_intr_mask |
2395 + MACB_TX_INT_FLAGS |
2396 + MACB_BIT(HRESP));
2397 + }
2398 +@@ -1372,14 +1371,14 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
2399 + (unsigned int)(queue - bp->queues),
2400 + (unsigned long)status);
2401 +
2402 +- if (status & MACB_RX_INT_FLAGS) {
2403 ++ if (status & bp->rx_intr_mask) {
2404 + /* There's no point taking any more interrupts
2405 + * until we have processed the buffers. The
2406 + * scheduling call may fail if the poll routine
2407 + * is already scheduled, so disable interrupts
2408 + * now.
2409 + */
2410 +- queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
2411 ++ queue_writel(queue, IDR, bp->rx_intr_mask);
2412 + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
2413 + queue_writel(queue, ISR, MACB_BIT(RCOMP));
2414 +
2415 +@@ -1412,8 +1411,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
2416 + /* There is a hardware issue under heavy load where DMA can
2417 + * stop, this causes endless "used buffer descriptor read"
2418 + * interrupts but it can be cleared by re-enabling RX. See
2419 +- * the at91 manual, section 41.3.1 or the Zynq manual
2420 +- * section 16.7.4 for details.
2421 ++ * the at91rm9200 manual, section 41.3.1 or the Zynq manual
2422 ++ * section 16.7.4 for details. RXUBR is only enabled for
2423 ++ * these two versions.
2424 + */
2425 + if (status & MACB_BIT(RXUBR)) {
2426 + ctrl = macb_readl(bp, NCR);
2427 +@@ -2263,7 +2263,7 @@ static void macb_init_hw(struct macb *bp)
2428 +
2429 + /* Enable interrupts */
2430 + queue_writel(queue, IER,
2431 +- MACB_RX_INT_FLAGS |
2432 ++ bp->rx_intr_mask |
2433 + MACB_TX_INT_FLAGS |
2434 + MACB_BIT(HRESP));
2435 + }
2436 +@@ -3911,6 +3911,7 @@ static const struct macb_config sama5d4_config = {
2437 + };
2438 +
2439 + static const struct macb_config emac_config = {
2440 ++ .caps = MACB_CAPS_NEEDS_RSTONUBR,
2441 + .clk_init = at91ether_clk_init,
2442 + .init = at91ether_init,
2443 + };
2444 +@@ -3932,7 +3933,8 @@ static const struct macb_config zynqmp_config = {
2445 + };
2446 +
2447 + static const struct macb_config zynq_config = {
2448 +- .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
2449 ++ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF |
2450 ++ MACB_CAPS_NEEDS_RSTONUBR,
2451 + .dma_burst_length = 16,
2452 + .clk_init = macb_clk_init,
2453 + .init = macb_init,
2454 +@@ -4087,6 +4089,10 @@ static int macb_probe(struct platform_device *pdev)
2455 + macb_dma_desc_get_size(bp);
2456 + }
2457 +
2458 ++ bp->rx_intr_mask = MACB_RX_INT_FLAGS;
2459 ++ if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
2460 ++ bp->rx_intr_mask |= MACB_BIT(RXUBR);
2461 ++
2462 + mac = of_get_mac_address(np);
2463 + if (mac) {
2464 + ether_addr_copy(bp->dev->dev_addr, mac);
2465 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
2466 +index 6242249c9f4c..b043370c2685 100644
2467 +--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
2468 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
2469 +@@ -2419,6 +2419,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
2470 + out_notify_fail:
2471 + (void)cancel_work_sync(&priv->service_task);
2472 + out_read_prop_fail:
2473 ++ /* safe for ACPI FW */
2474 ++ of_node_put(to_of_node(priv->fwnode));
2475 + free_netdev(ndev);
2476 + return ret;
2477 + }
2478 +@@ -2448,6 +2450,9 @@ static int hns_nic_dev_remove(struct platform_device *pdev)
2479 + set_bit(NIC_STATE_REMOVING, &priv->state);
2480 + (void)cancel_work_sync(&priv->service_task);
2481 +
2482 ++ /* safe for ACPI FW */
2483 ++ of_node_put(to_of_node(priv->fwnode));
2484 ++
2485 + free_netdev(ndev);
2486 + return 0;
2487 + }
2488 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
2489 +index 774beda040a1..e2710ff48fb0 100644
2490 +--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
2491 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
2492 +@@ -1157,16 +1157,18 @@ static int hns_get_regs_len(struct net_device *net_dev)
2493 + */
2494 + static int hns_nic_nway_reset(struct net_device *netdev)
2495 + {
2496 +- int ret = 0;
2497 + struct phy_device *phy = netdev->phydev;
2498 +
2499 +- if (netif_running(netdev)) {
2500 +- /* if autoneg is disabled, don't restart auto-negotiation */
2501 +- if (phy && phy->autoneg == AUTONEG_ENABLE)
2502 +- ret = genphy_restart_aneg(phy);
2503 +- }
2504 ++ if (!netif_running(netdev))
2505 ++ return 0;
2506 +
2507 +- return ret;
2508 ++ if (!phy)
2509 ++ return -EOPNOTSUPP;
2510 ++
2511 ++ if (phy->autoneg != AUTONEG_ENABLE)
2512 ++ return -EINVAL;
2513 ++
2514 ++ return genphy_restart_aneg(phy);
2515 + }
2516 +
2517 + static u32
2518 +diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
2519 +index 017e08452d8c..baf5cc251f32 100644
2520 +--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
2521 ++++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
2522 +@@ -321,7 +321,7 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
2523 + }
2524 +
2525 + hns_mdio_cmd_write(mdio_dev, is_c45,
2526 +- MDIO_C45_WRITE_ADDR, phy_id, devad);
2527 ++ MDIO_C45_READ, phy_id, devad);
2528 + }
2529 +
2530 + /* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/
2531 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
2532 +index 88a8576ca9ce..f81abed29a76 100644
2533 +--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
2534 ++++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
2535 +@@ -480,19 +480,19 @@ static void qed_init_qm_pq(struct qed_hwfn *p_hwfn,
2536 +
2537 + /* get pq index according to PQ_FLAGS */
2538 + static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
2539 +- u32 pq_flags)
2540 ++ unsigned long pq_flags)
2541 + {
2542 + struct qed_qm_info *qm_info = &p_hwfn->qm_info;
2543 +
2544 + /* Can't have multiple flags set here */
2545 +- if (bitmap_weight((unsigned long *)&pq_flags,
2546 ++ if (bitmap_weight(&pq_flags,
2547 + sizeof(pq_flags) * BITS_PER_BYTE) > 1) {
2548 +- DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags);
2549 ++ DP_ERR(p_hwfn, "requested multiple pq flags 0x%lx\n", pq_flags);
2550 + goto err;
2551 + }
2552 +
2553 + if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) {
2554 +- DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags);
2555 ++ DP_ERR(p_hwfn, "pq flag 0x%lx is not set\n", pq_flags);
2556 + goto err;
2557 + }
2558 +
2559 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
2560 +index 67c02ea93906..64ac95ca4df2 100644
2561 +--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
2562 ++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
2563 +@@ -609,6 +609,10 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
2564 + (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
2565 + !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
2566 +
2567 ++ SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
2568 ++ (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
2569 ++ !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
2570 ++
2571 + SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
2572 + !!(accept_filter & QED_ACCEPT_BCAST));
2573 +
2574 +@@ -744,6 +748,11 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
2575 + return rc;
2576 + }
2577 +
2578 ++ if (p_params->update_ctl_frame_check) {
2579 ++ p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en;
2580 ++ p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en;
2581 ++ }
2582 ++
2583 + /* Update mcast bins for VFs, PF doesn't use this functionality */
2584 + qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
2585 +
2586 +@@ -2207,7 +2216,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
2587 + u16 num_queues = 0;
2588 +
2589 + /* Since the feature controls only queue-zones,
2590 +- * make sure we have the contexts [rx, tx, xdp] to
2591 ++ * make sure we have the contexts [rx, xdp, tcs] to
2592 + * match.
2593 + */
2594 + for_each_hwfn(cdev, i) {
2595 +@@ -2217,7 +2226,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
2596 + u16 cids;
2597 +
2598 + cids = hwfn->pf_params.eth_pf_params.num_cons;
2599 +- num_queues += min_t(u16, l2_queues, cids / 3);
2600 ++ cids /= (2 + info->num_tc);
2601 ++ num_queues += min_t(u16, l2_queues, cids);
2602 + }
2603 +
2604 + /* queues might theoretically be >256, but interrupts'
2605 +@@ -2688,7 +2698,8 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
2606 + if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
2607 + accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
2608 + QED_ACCEPT_MCAST_UNMATCHED;
2609 +- accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2610 ++ accept_flags.tx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
2611 ++ QED_ACCEPT_MCAST_UNMATCHED;
2612 + } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
2613 + accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2614 + accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2615 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
2616 +index 8d80f1095d17..7127d5aaac42 100644
2617 +--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
2618 ++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
2619 +@@ -219,6 +219,9 @@ struct qed_sp_vport_update_params {
2620 + struct qed_rss_params *rss_params;
2621 + struct qed_filter_accept_flags accept_flags;
2622 + struct qed_sge_tpa_params *sge_tpa_params;
2623 ++ u8 update_ctl_frame_check;
2624 ++ u8 mac_chk_en;
2625 ++ u8 ethtype_chk_en;
2626 + };
2627 +
2628 + int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
2629 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
2630 +index 9e728ec82c21..25f67c0d5c57 100644
2631 +--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
2632 ++++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
2633 +@@ -2441,19 +2441,24 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
2634 + {
2635 + struct qed_ll2_tx_pkt_info pkt;
2636 + const skb_frag_t *frag;
2637 ++ u8 flags = 0, nr_frags;
2638 + int rc = -EINVAL, i;
2639 + dma_addr_t mapping;
2640 + u16 vlan = 0;
2641 +- u8 flags = 0;
2642 +
2643 + if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
2644 + DP_INFO(cdev, "Cannot transmit a checksummed packet\n");
2645 + return -EINVAL;
2646 + }
2647 +
2648 +- if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
2649 ++ /* Cache number of fragments from SKB since SKB may be freed by
2650 ++ * the completion routine after calling qed_ll2_prepare_tx_packet()
2651 ++ */
2652 ++ nr_frags = skb_shinfo(skb)->nr_frags;
2653 ++
2654 ++ if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
2655 + DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
2656 +- 1 + skb_shinfo(skb)->nr_frags);
2657 ++ 1 + nr_frags);
2658 + return -EINVAL;
2659 + }
2660 +
2661 +@@ -2475,7 +2480,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
2662 + }
2663 +
2664 + memset(&pkt, 0, sizeof(pkt));
2665 +- pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags;
2666 ++ pkt.num_of_bds = 1 + nr_frags;
2667 + pkt.vlan = vlan;
2668 + pkt.bd_flags = flags;
2669 + pkt.tx_dest = QED_LL2_TX_DEST_NW;
2670 +@@ -2486,12 +2491,17 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
2671 + test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags))
2672 + pkt.remove_stag = true;
2673 +
2674 ++ /* qed_ll2_prepare_tx_packet() may actually send the packet if
2675 ++ * there are no fragments in the skb and subsequently the completion
2676 ++ * routine may run and free the SKB, so no dereferencing the SKB
2677 ++ * beyond this point unless skb has any fragments.
2678 ++ */
2679 + rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
2680 + &pkt, 1);
2681 + if (rc)
2682 + goto err;
2683 +
2684 +- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2685 ++ for (i = 0; i < nr_frags; i++) {
2686 + frag = &skb_shinfo(skb)->frags[i];
2687 +
2688 + mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
2689 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
2690 +index 3157c0d99441..dae2896e1d8e 100644
2691 +--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
2692 ++++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
2693 +@@ -380,6 +380,7 @@ void qed_consq_setup(struct qed_hwfn *p_hwfn);
2694 + * @param p_hwfn
2695 + */
2696 + void qed_consq_free(struct qed_hwfn *p_hwfn);
2697 ++int qed_spq_pend_post(struct qed_hwfn *p_hwfn);
2698 +
2699 + /**
2700 + * @file
2701 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
2702 +index 0a9c5bb0fa48..a721b9348b6c 100644
2703 +--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
2704 ++++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
2705 +@@ -402,6 +402,11 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
2706 +
2707 + qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
2708 +
2709 ++ /* Attempt to post pending requests */
2710 ++ spin_lock_bh(&p_hwfn->p_spq->lock);
2711 ++ rc = qed_spq_pend_post(p_hwfn);
2712 ++ spin_unlock_bh(&p_hwfn->p_spq->lock);
2713 ++
2714 + return rc;
2715 + }
2716 +
2717 +@@ -744,7 +749,7 @@ static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
2718 + return 0;
2719 + }
2720 +
2721 +-static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
2722 ++int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
2723 + {
2724 + struct qed_spq *p_spq = p_hwfn->p_spq;
2725 + struct qed_spq_entry *p_ent = NULL;
2726 +@@ -882,7 +887,6 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
2727 + struct qed_spq_entry *p_ent = NULL;
2728 + struct qed_spq_entry *tmp;
2729 + struct qed_spq_entry *found = NULL;
2730 +- int rc;
2731 +
2732 + if (!p_hwfn)
2733 + return -EINVAL;
2734 +@@ -940,12 +944,7 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
2735 + */
2736 + qed_spq_return_entry(p_hwfn, found);
2737 +
2738 +- /* Attempt to post pending requests */
2739 +- spin_lock_bh(&p_spq->lock);
2740 +- rc = qed_spq_pend_post(p_hwfn);
2741 +- spin_unlock_bh(&p_spq->lock);
2742 +-
2743 +- return rc;
2744 ++ return 0;
2745 + }
2746 +
2747 + int qed_consq_alloc(struct qed_hwfn *p_hwfn)
2748 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
2749 +index ca6290fa0f30..71a7af134dd8 100644
2750 +--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
2751 ++++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
2752 +@@ -1969,7 +1969,9 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
2753 + params.vport_id = vf->vport_id;
2754 + params.max_buffers_per_cqe = start->max_buffers_per_cqe;
2755 + params.mtu = vf->mtu;
2756 +- params.check_mac = true;
2757 ++
2758 ++ /* Non trusted VFs should enable control frame filtering */
2759 ++ params.check_mac = !vf->p_vf_info.is_trusted_configured;
2760 +
2761 + rc = qed_sp_eth_vport_start(p_hwfn, &params);
2762 + if (rc) {
2763 +@@ -5130,6 +5132,9 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
2764 + params.opaque_fid = vf->opaque_fid;
2765 + params.vport_id = vf->vport_id;
2766 +
2767 ++ params.update_ctl_frame_check = 1;
2768 ++ params.mac_chk_en = !vf_info->is_trusted_configured;
2769 ++
2770 + if (vf_info->rx_accept_mode & mask) {
2771 + flags->update_rx_mode_config = 1;
2772 + flags->rx_accept_filter = vf_info->rx_accept_mode;
2773 +@@ -5147,7 +5152,8 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
2774 + }
2775 +
2776 + if (flags->update_rx_mode_config ||
2777 +- flags->update_tx_mode_config)
2778 ++ flags->update_tx_mode_config ||
2779 ++ params.update_ctl_frame_check)
2780 + qed_sp_vport_update(hwfn, &params,
2781 + QED_SPQ_MODE_EBLOCK, NULL);
2782 + }
2783 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
2784 +index b6cccf44bf40..5dda547772c1 100644
2785 +--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
2786 ++++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
2787 +@@ -261,6 +261,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
2788 + struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
2789 + struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
2790 + struct vf_pf_resc_request *p_resc;
2791 ++ u8 retry_cnt = VF_ACQUIRE_THRESH;
2792 + bool resources_acquired = false;
2793 + struct vfpf_acquire_tlv *req;
2794 + int rc = 0, attempts = 0;
2795 +@@ -314,6 +315,15 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
2796 +
2797 + /* send acquire request */
2798 + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
2799 ++
2800 ++ /* Re-try acquire in case of vf-pf hw channel timeout */
2801 ++ if (retry_cnt && rc == -EBUSY) {
2802 ++ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2803 ++ "VF retrying to acquire due to VPC timeout\n");
2804 ++ retry_cnt--;
2805 ++ continue;
2806 ++ }
2807 ++
2808 + if (rc)
2809 + goto exit;
2810 +
2811 +diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
2812 +index de98a974673b..4b875f652ecd 100644
2813 +--- a/drivers/net/ethernet/qlogic/qede/qede.h
2814 ++++ b/drivers/net/ethernet/qlogic/qede/qede.h
2815 +@@ -489,6 +489,9 @@ struct qede_reload_args {
2816 +
2817 + /* Datapath functions definition */
2818 + netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
2819 ++u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
2820 ++ struct net_device *sb_dev,
2821 ++ select_queue_fallback_t fallback);
2822 + netdev_features_t qede_features_check(struct sk_buff *skb,
2823 + struct net_device *dev,
2824 + netdev_features_t features);
2825 +diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
2826 +index 1a78027de071..a96da16f3404 100644
2827 +--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
2828 ++++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
2829 +@@ -1695,6 +1695,19 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2830 + return NETDEV_TX_OK;
2831 + }
2832 +
2833 ++u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
2834 ++ struct net_device *sb_dev,
2835 ++ select_queue_fallback_t fallback)
2836 ++{
2837 ++ struct qede_dev *edev = netdev_priv(dev);
2838 ++ int total_txq;
2839 ++
2840 ++ total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc;
2841 ++
2842 ++ return QEDE_TSS_COUNT(edev) ?
2843 ++ fallback(dev, skb, NULL) % total_txq : 0;
2844 ++}
2845 ++
2846 + /* 8B udp header + 8B base tunnel header + 32B option length */
2847 + #define QEDE_MAX_TUN_HDR_LEN 48
2848 +
2849 +diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
2850 +index 46d0f2eaa0c0..f3d9c40c4115 100644
2851 +--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
2852 ++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
2853 +@@ -631,6 +631,7 @@ static const struct net_device_ops qede_netdev_ops = {
2854 + .ndo_open = qede_open,
2855 + .ndo_stop = qede_close,
2856 + .ndo_start_xmit = qede_start_xmit,
2857 ++ .ndo_select_queue = qede_select_queue,
2858 + .ndo_set_rx_mode = qede_set_rx_mode,
2859 + .ndo_set_mac_address = qede_set_mac_addr,
2860 + .ndo_validate_addr = eth_validate_addr,
2861 +@@ -666,6 +667,7 @@ static const struct net_device_ops qede_netdev_vf_ops = {
2862 + .ndo_open = qede_open,
2863 + .ndo_stop = qede_close,
2864 + .ndo_start_xmit = qede_start_xmit,
2865 ++ .ndo_select_queue = qede_select_queue,
2866 + .ndo_set_rx_mode = qede_set_rx_mode,
2867 + .ndo_set_mac_address = qede_set_mac_addr,
2868 + .ndo_validate_addr = eth_validate_addr,
2869 +@@ -684,6 +686,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = {
2870 + .ndo_open = qede_open,
2871 + .ndo_stop = qede_close,
2872 + .ndo_start_xmit = qede_start_xmit,
2873 ++ .ndo_select_queue = qede_select_queue,
2874 + .ndo_set_rx_mode = qede_set_rx_mode,
2875 + .ndo_set_mac_address = qede_set_mac_addr,
2876 + .ndo_validate_addr = eth_validate_addr,
2877 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
2878 +index 7b923362ee55..3b174eae77c1 100644
2879 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
2880 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
2881 +@@ -1342,8 +1342,10 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
2882 + }
2883 +
2884 + ret = phy_power_on(bsp_priv, true);
2885 +- if (ret)
2886 ++ if (ret) {
2887 ++ gmac_clk_enable(bsp_priv, false);
2888 + return ret;
2889 ++ }
2890 +
2891 + pm_runtime_enable(dev);
2892 + pm_runtime_get_sync(dev);
2893 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
2894 +index 9caf79ba5ef1..4d5fb4b51cc4 100644
2895 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
2896 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
2897 +@@ -719,8 +719,11 @@ static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
2898 + {
2899 + unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
2900 +
2901 +- if (!clk)
2902 +- return 0;
2903 ++ if (!clk) {
2904 ++ clk = priv->plat->clk_ref_rate;
2905 ++ if (!clk)
2906 ++ return 0;
2907 ++ }
2908 +
2909 + return (usec * (clk / 1000000)) / 256;
2910 + }
2911 +@@ -729,8 +732,11 @@ static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv)
2912 + {
2913 + unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
2914 +
2915 +- if (!clk)
2916 +- return 0;
2917 ++ if (!clk) {
2918 ++ clk = priv->plat->clk_ref_rate;
2919 ++ if (!clk)
2920 ++ return 0;
2921 ++ }
2922 +
2923 + return (riwt * 256) / (clk / 1000000);
2924 + }
2925 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2926 +index 5d83d6a7694b..9340526d2a9a 100644
2927 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2928 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2929 +@@ -3031,10 +3031,22 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2930 +
2931 + tx_q = &priv->tx_queue[queue];
2932 +
2933 ++ if (priv->tx_path_in_lpi_mode)
2934 ++ stmmac_disable_eee_mode(priv);
2935 ++
2936 + /* Manage oversized TCP frames for GMAC4 device */
2937 + if (skb_is_gso(skb) && priv->tso) {
2938 +- if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
2939 ++ if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
2940 ++ /*
2941 ++ * There is no way to determine the number of TSO
2942 ++ * capable Queues. Let's use always the Queue 0
2943 ++ * because if TSO is supported then at least this
2944 ++ * one will be capable.
2945 ++ */
2946 ++ skb_set_queue_mapping(skb, 0);
2947 ++
2948 + return stmmac_tso_xmit(skb, dev);
2949 ++ }
2950 + }
2951 +
2952 + if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
2953 +@@ -3049,9 +3061,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2954 + return NETDEV_TX_BUSY;
2955 + }
2956 +
2957 +- if (priv->tx_path_in_lpi_mode)
2958 +- stmmac_disable_eee_mode(priv);
2959 +-
2960 + entry = tx_q->cur_tx;
2961 + first_entry = entry;
2962 + WARN_ON(tx_q->tx_skbuff[first_entry]);
2963 +diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
2964 +index 59fdda67f89f..0a3b2c45549e 100644
2965 +--- a/drivers/net/wireless/ath/ath10k/core.c
2966 ++++ b/drivers/net/wireless/ath/ath10k/core.c
2967 +@@ -548,7 +548,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
2968 + {
2969 + .id = WCN3990_HW_1_0_DEV_VERSION,
2970 + .dev_id = 0,
2971 +- .bus = ATH10K_BUS_PCI,
2972 ++ .bus = ATH10K_BUS_SNOC,
2973 + .name = "wcn3990 hw1.0",
2974 + .continuous_frag_desc = true,
2975 + .tx_chain_mask = 0x7,
2976 +diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
2977 +index c070a9e51ebf..fae572b38416 100644
2978 +--- a/drivers/net/wireless/ath/ath9k/init.c
2979 ++++ b/drivers/net/wireless/ath/ath9k/init.c
2980 +@@ -636,15 +636,15 @@ static int ath9k_of_init(struct ath_softc *sc)
2981 + ret = ath9k_eeprom_request(sc, eeprom_name);
2982 + if (ret)
2983 + return ret;
2984 ++
2985 ++ ah->ah_flags &= ~AH_USE_EEPROM;
2986 ++ ah->ah_flags |= AH_NO_EEP_SWAP;
2987 + }
2988 +
2989 + mac = of_get_mac_address(np);
2990 + if (mac)
2991 + ether_addr_copy(common->macaddr, mac);
2992 +
2993 +- ah->ah_flags &= ~AH_USE_EEPROM;
2994 +- ah->ah_flags |= AH_NO_EEP_SWAP;
2995 +-
2996 + return 0;
2997 + }
2998 +
2999 +diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
3000 +index bd10165d7eec..4d4b07701149 100644
3001 +--- a/drivers/net/wireless/ti/wlcore/sdio.c
3002 ++++ b/drivers/net/wireless/ti/wlcore/sdio.c
3003 +@@ -164,6 +164,12 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
3004 + }
3005 +
3006 + sdio_claim_host(func);
3007 ++ /*
3008 ++ * To guarantee that the SDIO card is power cycled, as required to make
3009 ++ * the FW programming to succeed, let's do a brute force HW reset.
3010 ++ */
3011 ++ mmc_hw_reset(card->host);
3012 ++
3013 + sdio_enable_func(func);
3014 + sdio_release_host(func);
3015 +
3016 +@@ -174,20 +180,13 @@ static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue)
3017 + {
3018 + struct sdio_func *func = dev_to_sdio_func(glue->dev);
3019 + struct mmc_card *card = func->card;
3020 +- int error;
3021 +
3022 + sdio_claim_host(func);
3023 + sdio_disable_func(func);
3024 + sdio_release_host(func);
3025 +
3026 + /* Let runtime PM know the card is powered off */
3027 +- error = pm_runtime_put(&card->dev);
3028 +- if (error < 0 && error != -EBUSY) {
3029 +- dev_err(&card->dev, "%s failed: %i\n", __func__, error);
3030 +-
3031 +- return error;
3032 +- }
3033 +-
3034 ++ pm_runtime_put(&card->dev);
3035 + return 0;
3036 + }
3037 +
3038 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
3039 +index 5f9a5ef93969..21d7b646c73d 100644
3040 +--- a/drivers/nvme/host/core.c
3041 ++++ b/drivers/nvme/host/core.c
3042 +@@ -1182,6 +1182,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
3043 + * effects say only one namespace is affected.
3044 + */
3045 + if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
3046 ++ mutex_lock(&ctrl->scan_lock);
3047 + nvme_start_freeze(ctrl);
3048 + nvme_wait_freeze(ctrl);
3049 + }
3050 +@@ -1210,8 +1211,10 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
3051 + */
3052 + if (effects & NVME_CMD_EFFECTS_LBCC)
3053 + nvme_update_formats(ctrl);
3054 +- if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK))
3055 ++ if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
3056 + nvme_unfreeze(ctrl);
3057 ++ mutex_unlock(&ctrl->scan_lock);
3058 ++ }
3059 + if (effects & NVME_CMD_EFFECTS_CCC)
3060 + nvme_init_identify(ctrl);
3061 + if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC))
3062 +@@ -3300,6 +3303,7 @@ static void nvme_scan_work(struct work_struct *work)
3063 + if (nvme_identify_ctrl(ctrl, &id))
3064 + return;
3065 +
3066 ++ mutex_lock(&ctrl->scan_lock);
3067 + nn = le32_to_cpu(id->nn);
3068 + if (ctrl->vs >= NVME_VS(1, 1, 0) &&
3069 + !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
3070 +@@ -3308,6 +3312,7 @@ static void nvme_scan_work(struct work_struct *work)
3071 + }
3072 + nvme_scan_ns_sequential(ctrl, nn);
3073 + out_free_id:
3074 ++ mutex_unlock(&ctrl->scan_lock);
3075 + kfree(id);
3076 + down_write(&ctrl->namespaces_rwsem);
3077 + list_sort(NULL, &ctrl->namespaces, ns_cmp);
3078 +@@ -3550,6 +3555,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
3079 +
3080 + ctrl->state = NVME_CTRL_NEW;
3081 + spin_lock_init(&ctrl->lock);
3082 ++ mutex_init(&ctrl->scan_lock);
3083 + INIT_LIST_HEAD(&ctrl->namespaces);
3084 + init_rwsem(&ctrl->namespaces_rwsem);
3085 + ctrl->dev = dev;
3086 +diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
3087 +index 6ffa99a10a60..5274881f9141 100644
3088 +--- a/drivers/nvme/host/nvme.h
3089 ++++ b/drivers/nvme/host/nvme.h
3090 +@@ -153,6 +153,7 @@ struct nvme_ctrl {
3091 + enum nvme_ctrl_state state;
3092 + bool identified;
3093 + spinlock_t lock;
3094 ++ struct mutex scan_lock;
3095 + const struct nvme_ctrl_ops *ops;
3096 + struct request_queue *admin_q;
3097 + struct request_queue *connect_q;
3098 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
3099 +index c0d01048ce4d..5c58e0ffa3ac 100644
3100 +--- a/drivers/nvme/host/pci.c
3101 ++++ b/drivers/nvme/host/pci.c
3102 +@@ -2280,27 +2280,18 @@ static void nvme_reset_work(struct work_struct *work)
3103 + if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
3104 + nvme_dev_disable(dev, false);
3105 +
3106 +- /*
3107 +- * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
3108 +- * initializing procedure here.
3109 +- */
3110 +- if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
3111 +- dev_warn(dev->ctrl.device,
3112 +- "failed to mark controller CONNECTING\n");
3113 +- goto out;
3114 +- }
3115 +-
3116 ++ mutex_lock(&dev->shutdown_lock);
3117 + result = nvme_pci_enable(dev);
3118 + if (result)
3119 +- goto out;
3120 ++ goto out_unlock;
3121 +
3122 + result = nvme_pci_configure_admin_queue(dev);
3123 + if (result)
3124 +- goto out;
3125 ++ goto out_unlock;
3126 +
3127 + result = nvme_alloc_admin_tags(dev);
3128 + if (result)
3129 +- goto out;
3130 ++ goto out_unlock;
3131 +
3132 + /*
3133 + * Limit the max command size to prevent iod->sg allocations going
3134 +@@ -2308,6 +2299,17 @@ static void nvme_reset_work(struct work_struct *work)
3135 + */
3136 + dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
3137 + dev->ctrl.max_segments = NVME_MAX_SEGS;
3138 ++ mutex_unlock(&dev->shutdown_lock);
3139 ++
3140 ++ /*
3141 ++ * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
3142 ++ * initializing procedure here.
3143 ++ */
3144 ++ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
3145 ++ dev_warn(dev->ctrl.device,
3146 ++ "failed to mark controller CONNECTING\n");
3147 ++ goto out;
3148 ++ }
3149 +
3150 + result = nvme_init_identify(&dev->ctrl);
3151 + if (result)
3152 +@@ -2372,6 +2374,8 @@ static void nvme_reset_work(struct work_struct *work)
3153 + nvme_start_ctrl(&dev->ctrl);
3154 + return;
3155 +
3156 ++ out_unlock:
3157 ++ mutex_unlock(&dev->shutdown_lock);
3158 + out:
3159 + nvme_remove_dead_ctrl(dev, result);
3160 + }
3161 +diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
3162 +index 0dbcf429089f..1a8b85051b1b 100644
3163 +--- a/drivers/pci/pcie/pme.c
3164 ++++ b/drivers/pci/pcie/pme.c
3165 +@@ -432,31 +432,6 @@ static void pcie_pme_remove(struct pcie_device *srv)
3166 + kfree(get_service_data(srv));
3167 + }
3168 +
3169 +-static int pcie_pme_runtime_suspend(struct pcie_device *srv)
3170 +-{
3171 +- struct pcie_pme_service_data *data = get_service_data(srv);
3172 +-
3173 +- spin_lock_irq(&data->lock);
3174 +- pcie_pme_interrupt_enable(srv->port, false);
3175 +- pcie_clear_root_pme_status(srv->port);
3176 +- data->noirq = true;
3177 +- spin_unlock_irq(&data->lock);
3178 +-
3179 +- return 0;
3180 +-}
3181 +-
3182 +-static int pcie_pme_runtime_resume(struct pcie_device *srv)
3183 +-{
3184 +- struct pcie_pme_service_data *data = get_service_data(srv);
3185 +-
3186 +- spin_lock_irq(&data->lock);
3187 +- pcie_pme_interrupt_enable(srv->port, true);
3188 +- data->noirq = false;
3189 +- spin_unlock_irq(&data->lock);
3190 +-
3191 +- return 0;
3192 +-}
3193 +-
3194 + static struct pcie_port_service_driver pcie_pme_driver = {
3195 + .name = "pcie_pme",
3196 + .port_type = PCI_EXP_TYPE_ROOT_PORT,
3197 +@@ -464,8 +439,6 @@ static struct pcie_port_service_driver pcie_pme_driver = {
3198 +
3199 + .probe = pcie_pme_probe,
3200 + .suspend = pcie_pme_suspend,
3201 +- .runtime_suspend = pcie_pme_runtime_suspend,
3202 +- .runtime_resume = pcie_pme_runtime_resume,
3203 + .resume = pcie_pme_resume,
3204 + .remove = pcie_pme_remove,
3205 + };
3206 +diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
3207 +index b03481ef99a1..98905d4a79ca 100644
3208 +--- a/drivers/pinctrl/pinctrl-mcp23s08.c
3209 ++++ b/drivers/pinctrl/pinctrl-mcp23s08.c
3210 +@@ -832,8 +832,13 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
3211 + break;
3212 +
3213 + case MCP_TYPE_S18:
3214 ++ one_regmap_config =
3215 ++ devm_kmemdup(dev, &mcp23x17_regmap,
3216 ++ sizeof(struct regmap_config), GFP_KERNEL);
3217 ++ if (!one_regmap_config)
3218 ++ return -ENOMEM;
3219 + mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp,
3220 +- &mcp23x17_regmap);
3221 ++ one_regmap_config);
3222 + mcp->reg_shift = 1;
3223 + mcp->chip.ngpio = 16;
3224 + mcp->chip.label = "mcp23s18";
3225 +diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
3226 +index 54f6a40c75c6..0430cad6d84e 100644
3227 +--- a/drivers/platform/x86/Kconfig
3228 ++++ b/drivers/platform/x86/Kconfig
3229 +@@ -901,6 +901,7 @@ config TOSHIBA_WMI
3230 + config ACPI_CMPC
3231 + tristate "CMPC Laptop Extras"
3232 + depends on ACPI && INPUT
3233 ++ depends on BACKLIGHT_LCD_SUPPORT
3234 + depends on RFKILL || RFKILL=n
3235 + select BACKLIGHT_CLASS_DEVICE
3236 + help
3237 +@@ -1124,6 +1125,7 @@ config INTEL_OAKTRAIL
3238 + config SAMSUNG_Q10
3239 + tristate "Samsung Q10 Extras"
3240 + depends on ACPI
3241 ++ depends on BACKLIGHT_LCD_SUPPORT
3242 + select BACKLIGHT_CLASS_DEVICE
3243 + ---help---
3244 + This driver provides support for backlight control on Samsung Q10
3245 +diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
3246 +index 99af1a0a3314..8f2af450152f 100644
3247 +--- a/drivers/s390/net/qeth_core.h
3248 ++++ b/drivers/s390/net/qeth_core.h
3249 +@@ -22,6 +22,7 @@
3250 + #include <linux/hashtable.h>
3251 + #include <linux/ip.h>
3252 + #include <linux/refcount.h>
3253 ++#include <linux/workqueue.h>
3254 +
3255 + #include <net/ipv6.h>
3256 + #include <net/if_inet6.h>
3257 +@@ -790,6 +791,7 @@ struct qeth_card {
3258 + struct qeth_seqno seqno;
3259 + struct qeth_card_options options;
3260 +
3261 ++ struct workqueue_struct *event_wq;
3262 + wait_queue_head_t wait_q;
3263 + spinlock_t mclock;
3264 + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
3265 +@@ -968,7 +970,6 @@ extern const struct attribute_group *qeth_osn_attr_groups[];
3266 + extern const struct attribute_group qeth_device_attr_group;
3267 + extern const struct attribute_group qeth_device_blkt_group;
3268 + extern const struct device_type qeth_generic_devtype;
3269 +-extern struct workqueue_struct *qeth_wq;
3270 +
3271 + int qeth_card_hw_is_reachable(struct qeth_card *);
3272 + const char *qeth_get_cardname_short(struct qeth_card *);
3273 +diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
3274 +index 0c9a5250dd93..ebbc3ad504f9 100644
3275 +--- a/drivers/s390/net/qeth_core_main.c
3276 ++++ b/drivers/s390/net/qeth_core_main.c
3277 +@@ -76,8 +76,7 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
3278 + static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
3279 + static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
3280 +
3281 +-struct workqueue_struct *qeth_wq;
3282 +-EXPORT_SYMBOL_GPL(qeth_wq);
3283 ++static struct workqueue_struct *qeth_wq;
3284 +
3285 + int qeth_card_hw_is_reachable(struct qeth_card *card)
3286 + {
3287 +@@ -568,6 +567,7 @@ static int __qeth_issue_next_read(struct qeth_card *card)
3288 + QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
3289 + rc, CARD_DEVID(card));
3290 + atomic_set(&channel->irq_pending, 0);
3291 ++ qeth_release_buffer(channel, iob);
3292 + card->read_or_write_problem = 1;
3293 + qeth_schedule_recovery(card);
3294 + wake_up(&card->wait_q);
3295 +@@ -1129,6 +1129,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
3296 + rc = qeth_get_problem(card, cdev, irb);
3297 + if (rc) {
3298 + card->read_or_write_problem = 1;
3299 ++ if (iob)
3300 ++ qeth_release_buffer(iob->channel, iob);
3301 + qeth_clear_ipacmd_list(card);
3302 + qeth_schedule_recovery(card);
3303 + goto out;
3304 +@@ -1468,6 +1470,10 @@ static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
3305 + CARD_RDEV(card) = gdev->cdev[0];
3306 + CARD_WDEV(card) = gdev->cdev[1];
3307 + CARD_DDEV(card) = gdev->cdev[2];
3308 ++
3309 ++ card->event_wq = alloc_ordered_workqueue("%s", 0, dev_name(&gdev->dev));
3310 ++ if (!card->event_wq)
3311 ++ goto out_wq;
3312 + if (qeth_setup_channel(&card->read, true))
3313 + goto out_ip;
3314 + if (qeth_setup_channel(&card->write, true))
3315 +@@ -1483,6 +1489,8 @@ out_data:
3316 + out_channel:
3317 + qeth_clean_channel(&card->read);
3318 + out_ip:
3319 ++ destroy_workqueue(card->event_wq);
3320 ++out_wq:
3321 + dev_set_drvdata(&gdev->dev, NULL);
3322 + kfree(card);
3323 + out:
3324 +@@ -1811,6 +1819,7 @@ static int qeth_idx_activate_get_answer(struct qeth_card *card,
3325 + QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
3326 + QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
3327 + atomic_set(&channel->irq_pending, 0);
3328 ++ qeth_release_buffer(channel, iob);
3329 + wake_up(&card->wait_q);
3330 + return rc;
3331 + }
3332 +@@ -1880,6 +1889,7 @@ static int qeth_idx_activate_channel(struct qeth_card *card,
3333 + rc);
3334 + QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3335 + atomic_set(&channel->irq_pending, 0);
3336 ++ qeth_release_buffer(channel, iob);
3337 + wake_up(&card->wait_q);
3338 + return rc;
3339 + }
3340 +@@ -2060,6 +2070,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
3341 + }
3342 + reply = qeth_alloc_reply(card);
3343 + if (!reply) {
3344 ++ qeth_release_buffer(channel, iob);
3345 + return -ENOMEM;
3346 + }
3347 + reply->callback = reply_cb;
3348 +@@ -2391,11 +2402,12 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
3349 + return 0;
3350 + }
3351 +
3352 +-static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q)
3353 ++static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
3354 + {
3355 + if (!q)
3356 + return;
3357 +
3358 ++ qeth_clear_outq_buffers(q, 1);
3359 + qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
3360 + kfree(q);
3361 + }
3362 +@@ -2469,10 +2481,8 @@ out_freeoutqbufs:
3363 + card->qdio.out_qs[i]->bufs[j] = NULL;
3364 + }
3365 + out_freeoutq:
3366 +- while (i > 0) {
3367 +- qeth_free_qdio_out_buf(card->qdio.out_qs[--i]);
3368 +- qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
3369 +- }
3370 ++ while (i > 0)
3371 ++ qeth_free_output_queue(card->qdio.out_qs[--i]);
3372 + kfree(card->qdio.out_qs);
3373 + card->qdio.out_qs = NULL;
3374 + out_freepool:
3375 +@@ -2505,10 +2515,8 @@ static void qeth_free_qdio_buffers(struct qeth_card *card)
3376 + qeth_free_buffer_pool(card);
3377 + /* free outbound qdio_qs */
3378 + if (card->qdio.out_qs) {
3379 +- for (i = 0; i < card->qdio.no_out_queues; ++i) {
3380 +- qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
3381 +- qeth_free_qdio_out_buf(card->qdio.out_qs[i]);
3382 +- }
3383 ++ for (i = 0; i < card->qdio.no_out_queues; i++)
3384 ++ qeth_free_output_queue(card->qdio.out_qs[i]);
3385 + kfree(card->qdio.out_qs);
3386 + card->qdio.out_qs = NULL;
3387 + }
3388 +@@ -5035,6 +5043,7 @@ static void qeth_core_free_card(struct qeth_card *card)
3389 + qeth_clean_channel(&card->read);
3390 + qeth_clean_channel(&card->write);
3391 + qeth_clean_channel(&card->data);
3392 ++ destroy_workqueue(card->event_wq);
3393 + qeth_free_qdio_buffers(card);
3394 + unregister_service_level(&card->qeth_service_level);
3395 + dev_set_drvdata(&card->gdev->dev, NULL);
3396 +diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
3397 +index 8d3601891c62..a6c55cbf3d69 100644
3398 +--- a/drivers/s390/net/qeth_l2_main.c
3399 ++++ b/drivers/s390/net/qeth_l2_main.c
3400 +@@ -391,6 +391,8 @@ static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
3401 + qeth_clear_cmd_buffers(&card->read);
3402 + qeth_clear_cmd_buffers(&card->write);
3403 + }
3404 ++
3405 ++ flush_workqueue(card->event_wq);
3406 + }
3407 +
3408 + static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
3409 +@@ -823,6 +825,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
3410 +
3411 + if (cgdev->state == CCWGROUP_ONLINE)
3412 + qeth_l2_set_offline(cgdev);
3413 ++
3414 ++ cancel_work_sync(&card->close_dev_work);
3415 + if (qeth_netdev_is_registered(card->dev))
3416 + unregister_netdev(card->dev);
3417 + }
3418 +@@ -1453,7 +1457,7 @@ static void qeth_bridge_state_change(struct qeth_card *card,
3419 + data->card = card;
3420 + memcpy(&data->qports, qports,
3421 + sizeof(struct qeth_sbp_state_change) + extrasize);
3422 +- queue_work(qeth_wq, &data->worker);
3423 ++ queue_work(card->event_wq, &data->worker);
3424 + }
3425 +
3426 + struct qeth_bridge_host_data {
3427 +@@ -1525,7 +1529,7 @@ static void qeth_bridge_host_event(struct qeth_card *card,
3428 + data->card = card;
3429 + memcpy(&data->hostevs, hostevs,
3430 + sizeof(struct qeth_ipacmd_addr_change) + extrasize);
3431 +- queue_work(qeth_wq, &data->worker);
3432 ++ queue_work(card->event_wq, &data->worker);
3433 + }
3434 +
3435 + /* SETBRIDGEPORT support; sending commands */
3436 +diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
3437 +index f08b745c2007..29a2408b9282 100644
3438 +--- a/drivers/s390/net/qeth_l3_main.c
3439 ++++ b/drivers/s390/net/qeth_l3_main.c
3440 +@@ -1436,6 +1436,8 @@ static void qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
3441 + qeth_clear_cmd_buffers(&card->read);
3442 + qeth_clear_cmd_buffers(&card->write);
3443 + }
3444 ++
3445 ++ flush_workqueue(card->event_wq);
3446 + }
3447 +
3448 + /*
3449 +@@ -2428,6 +2430,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
3450 + if (cgdev->state == CCWGROUP_ONLINE)
3451 + qeth_l3_set_offline(cgdev);
3452 +
3453 ++ cancel_work_sync(&card->close_dev_work);
3454 + if (qeth_netdev_is_registered(card->dev))
3455 + unregister_netdev(card->dev);
3456 + qeth_l3_clear_ip_htable(card, 0);
3457 +diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
3458 +index 6be77b3aa8a5..ac79f2088b31 100644
3459 +--- a/drivers/scsi/53c700.c
3460 ++++ b/drivers/scsi/53c700.c
3461 +@@ -295,7 +295,7 @@ NCR_700_detect(struct scsi_host_template *tpnt,
3462 + if(tpnt->sdev_attrs == NULL)
3463 + tpnt->sdev_attrs = NCR_700_dev_attrs;
3464 +
3465 +- memory = dma_alloc_attrs(hostdata->dev, TOTAL_MEM_SIZE, &pScript,
3466 ++ memory = dma_alloc_attrs(dev, TOTAL_MEM_SIZE, &pScript,
3467 + GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
3468 + if(memory == NULL) {
3469 + printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n");
3470 +diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
3471 +index 1e77d96a18f2..055fe61ea539 100644
3472 +--- a/drivers/scsi/aacraid/commsup.c
3473 ++++ b/drivers/scsi/aacraid/commsup.c
3474 +@@ -1304,8 +1304,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
3475 + ADD : DELETE;
3476 + break;
3477 + }
3478 +- case AifBuManagerEvent:
3479 +- aac_handle_aif_bu(dev, aifcmd);
3480 ++ break;
3481 ++ case AifBuManagerEvent:
3482 ++ aac_handle_aif_bu(dev, aifcmd);
3483 + break;
3484 + }
3485 +
3486 +diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
3487 +index 350257c13a5b..bc9f2a2365f4 100644
3488 +--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
3489 ++++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
3490 +@@ -240,6 +240,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
3491 + return NULL;
3492 + }
3493 +
3494 ++ cmgr->hba = hba;
3495 + cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list),
3496 + GFP_KERNEL);
3497 + if (!cmgr->free_list) {
3498 +@@ -256,7 +257,6 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
3499 + goto mem_err;
3500 + }
3501 +
3502 +- cmgr->hba = hba;
3503 + cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
3504 +
3505 + for (i = 0; i < arr_sz; i++) {
3506 +@@ -295,7 +295,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
3507 +
3508 + /* Allocate pool of io_bdts - one for each bnx2fc_cmd */
3509 + mem_size = num_ios * sizeof(struct io_bdt *);
3510 +- cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL);
3511 ++ cmgr->io_bdt_pool = kzalloc(mem_size, GFP_KERNEL);
3512 + if (!cmgr->io_bdt_pool) {
3513 + printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
3514 + goto mem_err;
3515 +diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
3516 +index be83590ed955..ff943f477d6f 100644
3517 +--- a/drivers/scsi/libfc/fc_lport.c
3518 ++++ b/drivers/scsi/libfc/fc_lport.c
3519 +@@ -1726,14 +1726,14 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
3520 + fc_frame_payload_op(fp) != ELS_LS_ACC) {
3521 + FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n");
3522 + fc_lport_error(lport, fp);
3523 +- goto err;
3524 ++ goto out;
3525 + }
3526 +
3527 + flp = fc_frame_payload_get(fp, sizeof(*flp));
3528 + if (!flp) {
3529 + FC_LPORT_DBG(lport, "FLOGI bad response\n");
3530 + fc_lport_error(lport, fp);
3531 +- goto err;
3532 ++ goto out;
3533 + }
3534 +
3535 + mfs = ntohs(flp->fl_csp.sp_bb_data) &
3536 +@@ -1743,7 +1743,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
3537 + FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
3538 + "lport->mfs:%hu\n", mfs, lport->mfs);
3539 + fc_lport_error(lport, fp);
3540 +- goto err;
3541 ++ goto out;
3542 + }
3543 +
3544 + if (mfs <= lport->mfs) {
3545 +diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
3546 +index 1e1c0f1b9e69..8ed2113f5a1e 100644
3547 +--- a/drivers/scsi/libfc/fc_rport.c
3548 ++++ b/drivers/scsi/libfc/fc_rport.c
3549 +@@ -184,7 +184,6 @@ void fc_rport_destroy(struct kref *kref)
3550 + struct fc_rport_priv *rdata;
3551 +
3552 + rdata = container_of(kref, struct fc_rport_priv, kref);
3553 +- WARN_ON(!list_empty(&rdata->peers));
3554 + kfree_rcu(rdata, rcu);
3555 + }
3556 + EXPORT_SYMBOL(fc_rport_destroy);
3557 +diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
3558 +index 60bcc6df97a9..65305b3848bc 100644
3559 +--- a/drivers/scsi/scsi_debug.c
3560 ++++ b/drivers/scsi/scsi_debug.c
3561 +@@ -62,7 +62,7 @@
3562 +
3563 + /* make sure inq_product_rev string corresponds to this version */
3564 + #define SDEBUG_VERSION "0188" /* format to fit INQUIRY revision field */
3565 +-static const char *sdebug_version_date = "20180128";
3566 ++static const char *sdebug_version_date = "20190125";
3567 +
3568 + #define MY_NAME "scsi_debug"
3569 +
3570 +@@ -735,7 +735,7 @@ static inline bool scsi_debug_lbp(void)
3571 + (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
3572 + }
3573 +
3574 +-static void *fake_store(unsigned long long lba)
3575 ++static void *lba2fake_store(unsigned long long lba)
3576 + {
3577 + lba = do_div(lba, sdebug_store_sectors);
3578 +
3579 +@@ -2514,8 +2514,8 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
3580 + return ret;
3581 + }
3582 +
3583 +-/* If fake_store(lba,num) compares equal to arr(num), then copy top half of
3584 +- * arr into fake_store(lba,num) and return true. If comparison fails then
3585 ++/* If lba2fake_store(lba,num) compares equal to arr(num), then copy top half of
3586 ++ * arr into lba2fake_store(lba,num) and return true. If comparison fails then
3587 + * return false. */
3588 + static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
3589 + {
3590 +@@ -2643,7 +2643,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
3591 + if (sdt->app_tag == cpu_to_be16(0xffff))
3592 + continue;
3593 +
3594 +- ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
3595 ++ ret = dif_verify(sdt, lba2fake_store(sector), sector, ei_lba);
3596 + if (ret) {
3597 + dif_errors++;
3598 + return ret;
3599 +@@ -3261,10 +3261,12 @@ err_out:
3600 + static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3601 + u32 ei_lba, bool unmap, bool ndob)
3602 + {
3603 ++ int ret;
3604 + unsigned long iflags;
3605 + unsigned long long i;
3606 +- int ret;
3607 +- u64 lba_off;
3608 ++ u32 lb_size = sdebug_sector_size;
3609 ++ u64 block, lbaa;
3610 ++ u8 *fs1p;
3611 +
3612 + ret = check_device_access_params(scp, lba, num);
3613 + if (ret)
3614 +@@ -3276,31 +3278,30 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3615 + unmap_region(lba, num);
3616 + goto out;
3617 + }
3618 +-
3619 +- lba_off = lba * sdebug_sector_size;
3620 ++ lbaa = lba;
3621 ++ block = do_div(lbaa, sdebug_store_sectors);
3622 + /* if ndob then zero 1 logical block, else fetch 1 logical block */
3623 ++ fs1p = fake_storep + (block * lb_size);
3624 + if (ndob) {
3625 +- memset(fake_storep + lba_off, 0, sdebug_sector_size);
3626 ++ memset(fs1p, 0, lb_size);
3627 + ret = 0;
3628 + } else
3629 +- ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
3630 +- sdebug_sector_size);
3631 ++ ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3632 +
3633 + if (-1 == ret) {
3634 + write_unlock_irqrestore(&atomic_rw, iflags);
3635 + return DID_ERROR << 16;
3636 +- } else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size))
3637 ++ } else if (sdebug_verbose && !ndob && (ret < lb_size))
3638 + sdev_printk(KERN_INFO, scp->device,
3639 + "%s: %s: lb size=%u, IO sent=%d bytes\n",
3640 +- my_name, "write same",
3641 +- sdebug_sector_size, ret);
3642 ++ my_name, "write same", lb_size, ret);
3643 +
3644 + /* Copy first sector to remaining blocks */
3645 +- for (i = 1 ; i < num ; i++)
3646 +- memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
3647 +- fake_storep + lba_off,
3648 +- sdebug_sector_size);
3649 +-
3650 ++ for (i = 1 ; i < num ; i++) {
3651 ++ lbaa = lba + i;
3652 ++ block = do_div(lbaa, sdebug_store_sectors);
3653 ++ memmove(fake_storep + (block * lb_size), fs1p, lb_size);
3654 ++ }
3655 + if (scsi_debug_lbp())
3656 + map_region(lba, num);
3657 + out:
3658 +diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
3659 +index 5ce24718c2fd..d8b3ba047c28 100644
3660 +--- a/drivers/soc/fsl/qbman/qman.c
3661 ++++ b/drivers/soc/fsl/qbman/qman.c
3662 +@@ -1124,18 +1124,19 @@ static void qm_mr_process_task(struct work_struct *work);
3663 + static irqreturn_t portal_isr(int irq, void *ptr)
3664 + {
3665 + struct qman_portal *p = ptr;
3666 +-
3667 +- u32 clear = QM_DQAVAIL_MASK | p->irq_sources;
3668 + u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
3669 ++ u32 clear = 0;
3670 +
3671 + if (unlikely(!is))
3672 + return IRQ_NONE;
3673 +
3674 + /* DQRR-handling if it's interrupt-driven */
3675 +- if (is & QM_PIRQ_DQRI)
3676 ++ if (is & QM_PIRQ_DQRI) {
3677 + __poll_portal_fast(p, QMAN_POLL_LIMIT);
3678 ++ clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI;
3679 ++ }
3680 + /* Handling of anything else that's interrupt-driven */
3681 +- clear |= __poll_portal_slow(p, is);
3682 ++ clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW;
3683 + qm_out(&p->p, QM_REG_ISR, clear);
3684 + return IRQ_HANDLED;
3685 + }
3686 +diff --git a/drivers/staging/erofs/namei.c b/drivers/staging/erofs/namei.c
3687 +index 5596c52e246d..ecc51ef0753f 100644
3688 +--- a/drivers/staging/erofs/namei.c
3689 ++++ b/drivers/staging/erofs/namei.c
3690 +@@ -15,74 +15,77 @@
3691 +
3692 + #include <trace/events/erofs.h>
3693 +
3694 +-/* based on the value of qn->len is accurate */
3695 +-static inline int dirnamecmp(struct qstr *qn,
3696 +- struct qstr *qd, unsigned int *matched)
3697 ++struct erofs_qstr {
3698 ++ const unsigned char *name;
3699 ++ const unsigned char *end;
3700 ++};
3701 ++
3702 ++/* based on the end of qn is accurate and it must have the trailing '\0' */
3703 ++static inline int dirnamecmp(const struct erofs_qstr *qn,
3704 ++ const struct erofs_qstr *qd,
3705 ++ unsigned int *matched)
3706 + {
3707 +- unsigned int i = *matched, len = min(qn->len, qd->len);
3708 +-loop:
3709 +- if (unlikely(i >= len)) {
3710 +- *matched = i;
3711 +- if (qn->len < qd->len) {
3712 +- /*
3713 +- * actually (qn->len == qd->len)
3714 +- * when qd->name[i] == '\0'
3715 +- */
3716 +- return qd->name[i] == '\0' ? 0 : -1;
3717 ++ unsigned int i = *matched;
3718 ++
3719 ++ /*
3720 ++ * on-disk error, let's only BUG_ON in the debugging mode.
3721 ++ * otherwise, it will return 1 to just skip the invalid name
3722 ++ * and go on (in consideration of the lookup performance).
3723 ++ */
3724 ++ DBG_BUGON(qd->name > qd->end);
3725 ++
3726 ++ /* qd could not have trailing '\0' */
3727 ++ /* However it is absolutely safe if < qd->end */
3728 ++ while (qd->name + i < qd->end && qd->name[i] != '\0') {
3729 ++ if (qn->name[i] != qd->name[i]) {
3730 ++ *matched = i;
3731 ++ return qn->name[i] > qd->name[i] ? 1 : -1;
3732 + }
3733 +- return (qn->len > qd->len);
3734 ++ ++i;
3735 + }
3736 +-
3737 +- if (qn->name[i] != qd->name[i]) {
3738 +- *matched = i;
3739 +- return qn->name[i] > qd->name[i] ? 1 : -1;
3740 +- }
3741 +-
3742 +- ++i;
3743 +- goto loop;
3744 ++ *matched = i;
3745 ++ /* See comments in __d_alloc on the terminating NUL character */
3746 ++ return qn->name[i] == '\0' ? 0 : 1;
3747 + }
3748 +
3749 +-static struct erofs_dirent *find_target_dirent(
3750 +- struct qstr *name,
3751 +- u8 *data, int maxsize)
3752 ++#define nameoff_from_disk(off, sz) (le16_to_cpu(off) & ((sz) - 1))
3753 ++
3754 ++static struct erofs_dirent *find_target_dirent(struct erofs_qstr *name,
3755 ++ u8 *data,
3756 ++ unsigned int dirblksize,
3757 ++ const int ndirents)
3758 + {
3759 +- unsigned int ndirents, head, back;
3760 ++ int head, back;
3761 + unsigned int startprfx, endprfx;
3762 + struct erofs_dirent *const de = (struct erofs_dirent *)data;
3763 +
3764 +- /* make sure that maxsize is valid */
3765 +- BUG_ON(maxsize < sizeof(struct erofs_dirent));
3766 +-
3767 +- ndirents = le16_to_cpu(de->nameoff) / sizeof(*de);
3768 +-
3769 +- /* corrupted dir (may be unnecessary...) */
3770 +- BUG_ON(!ndirents);
3771 +-
3772 +- head = 0;
3773 ++ /* since the 1st dirent has been evaluated previously */
3774 ++ head = 1;
3775 + back = ndirents - 1;
3776 + startprfx = endprfx = 0;
3777 +
3778 + while (head <= back) {
3779 +- unsigned int mid = head + (back - head) / 2;
3780 +- unsigned int nameoff = le16_to_cpu(de[mid].nameoff);
3781 ++ const int mid = head + (back - head) / 2;
3782 ++ const int nameoff = nameoff_from_disk(de[mid].nameoff,
3783 ++ dirblksize);
3784 + unsigned int matched = min(startprfx, endprfx);
3785 +-
3786 +- struct qstr dname = QSTR_INIT(data + nameoff,
3787 +- unlikely(mid >= ndirents - 1) ?
3788 +- maxsize - nameoff :
3789 +- le16_to_cpu(de[mid + 1].nameoff) - nameoff);
3790 ++ struct erofs_qstr dname = {
3791 ++ .name = data + nameoff,
3792 ++ .end = unlikely(mid >= ndirents - 1) ?
3793 ++ data + dirblksize :
3794 ++ data + nameoff_from_disk(de[mid + 1].nameoff,
3795 ++ dirblksize)
3796 ++ };
3797 +
3798 + /* string comparison without already matched prefix */
3799 + int ret = dirnamecmp(name, &dname, &matched);
3800 +
3801 +- if (unlikely(!ret))
3802 ++ if (unlikely(!ret)) {
3803 + return de + mid;
3804 +- else if (ret > 0) {
3805 ++ } else if (ret > 0) {
3806 + head = mid + 1;
3807 + startprfx = matched;
3808 +- } else if (unlikely(mid < 1)) /* fix "mid" overflow */
3809 +- break;
3810 +- else {
3811 ++ } else {
3812 + back = mid - 1;
3813 + endprfx = matched;
3814 + }
3815 +@@ -91,12 +94,12 @@ static struct erofs_dirent *find_target_dirent(
3816 + return ERR_PTR(-ENOENT);
3817 + }
3818 +
3819 +-static struct page *find_target_block_classic(
3820 +- struct inode *dir,
3821 +- struct qstr *name, int *_diff)
3822 ++static struct page *find_target_block_classic(struct inode *dir,
3823 ++ struct erofs_qstr *name,
3824 ++ int *_ndirents)
3825 + {
3826 + unsigned int startprfx, endprfx;
3827 +- unsigned int head, back;
3828 ++ int head, back;
3829 + struct address_space *const mapping = dir->i_mapping;
3830 + struct page *candidate = ERR_PTR(-ENOENT);
3831 +
3832 +@@ -105,41 +108,43 @@ static struct page *find_target_block_classic(
3833 + back = inode_datablocks(dir) - 1;
3834 +
3835 + while (head <= back) {
3836 +- unsigned int mid = head + (back - head) / 2;
3837 ++ const int mid = head + (back - head) / 2;
3838 + struct page *page = read_mapping_page(mapping, mid, NULL);
3839 +
3840 +- if (IS_ERR(page)) {
3841 +-exact_out:
3842 +- if (!IS_ERR(candidate)) /* valid candidate */
3843 +- put_page(candidate);
3844 +- return page;
3845 +- } else {
3846 +- int diff;
3847 +- unsigned int ndirents, matched;
3848 +- struct qstr dname;
3849 ++ if (!IS_ERR(page)) {
3850 + struct erofs_dirent *de = kmap_atomic(page);
3851 +- unsigned int nameoff = le16_to_cpu(de->nameoff);
3852 +-
3853 +- ndirents = nameoff / sizeof(*de);
3854 ++ const int nameoff = nameoff_from_disk(de->nameoff,
3855 ++ EROFS_BLKSIZ);
3856 ++ const int ndirents = nameoff / sizeof(*de);
3857 ++ int diff;
3858 ++ unsigned int matched;
3859 ++ struct erofs_qstr dname;
3860 +
3861 +- /* corrupted dir (should have one entry at least) */
3862 +- BUG_ON(!ndirents || nameoff > PAGE_SIZE);
3863 ++ if (unlikely(!ndirents)) {
3864 ++ DBG_BUGON(1);
3865 ++ kunmap_atomic(de);
3866 ++ put_page(page);
3867 ++ page = ERR_PTR(-EIO);
3868 ++ goto out;
3869 ++ }
3870 +
3871 + matched = min(startprfx, endprfx);
3872 +
3873 + dname.name = (u8 *)de + nameoff;
3874 +- dname.len = ndirents == 1 ?
3875 +- /* since the rest of the last page is 0 */
3876 +- EROFS_BLKSIZ - nameoff
3877 +- : le16_to_cpu(de[1].nameoff) - nameoff;
3878 ++ if (ndirents == 1)
3879 ++ dname.end = (u8 *)de + EROFS_BLKSIZ;
3880 ++ else
3881 ++ dname.end = (u8 *)de +
3882 ++ nameoff_from_disk(de[1].nameoff,
3883 ++ EROFS_BLKSIZ);
3884 +
3885 + /* string comparison without already matched prefix */
3886 + diff = dirnamecmp(name, &dname, &matched);
3887 + kunmap_atomic(de);
3888 +
3889 + if (unlikely(!diff)) {
3890 +- *_diff = 0;
3891 +- goto exact_out;
3892 ++ *_ndirents = 0;
3893 ++ goto out;
3894 + } else if (diff > 0) {
3895 + head = mid + 1;
3896 + startprfx = matched;
3897 +@@ -147,45 +152,51 @@ exact_out:
3898 + if (likely(!IS_ERR(candidate)))
3899 + put_page(candidate);
3900 + candidate = page;
3901 ++ *_ndirents = ndirents;
3902 + } else {
3903 + put_page(page);
3904 +
3905 +- if (unlikely(mid < 1)) /* fix "mid" overflow */
3906 +- break;
3907 +-
3908 + back = mid - 1;
3909 + endprfx = matched;
3910 + }
3911 ++ continue;
3912 + }
3913 ++out: /* free if the candidate is valid */
3914 ++ if (!IS_ERR(candidate))
3915 ++ put_page(candidate);
3916 ++ return page;
3917 + }
3918 +- *_diff = 1;
3919 + return candidate;
3920 + }
3921 +
3922 + int erofs_namei(struct inode *dir,
3923 +- struct qstr *name,
3924 +- erofs_nid_t *nid, unsigned int *d_type)
3925 ++ struct qstr *name,
3926 ++ erofs_nid_t *nid, unsigned int *d_type)
3927 + {
3928 +- int diff;
3929 ++ int ndirents;
3930 + struct page *page;
3931 +- u8 *data;
3932 ++ void *data;
3933 + struct erofs_dirent *de;
3934 ++ struct erofs_qstr qn;
3935 +
3936 + if (unlikely(!dir->i_size))
3937 + return -ENOENT;
3938 +
3939 +- diff = 1;
3940 +- page = find_target_block_classic(dir, name, &diff);
3941 ++ qn.name = name->name;
3942 ++ qn.end = name->name + name->len;
3943 ++
3944 ++ ndirents = 0;
3945 ++ page = find_target_block_classic(dir, &qn, &ndirents);
3946 +
3947 + if (unlikely(IS_ERR(page)))
3948 + return PTR_ERR(page);
3949 +
3950 + data = kmap_atomic(page);
3951 + /* the target page has been mapped */
3952 +- de = likely(diff) ?
3953 +- /* since the rest of the last page is 0 */
3954 +- find_target_dirent(name, data, EROFS_BLKSIZ) :
3955 +- (struct erofs_dirent *)data;
3956 ++ if (ndirents)
3957 ++ de = find_target_dirent(&qn, data, EROFS_BLKSIZ, ndirents);
3958 ++ else
3959 ++ de = (struct erofs_dirent *)data;
3960 +
3961 + if (likely(!IS_ERR(de))) {
3962 + *nid = le64_to_cpu(de->nid);
3963 +diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
3964 +index cb7fcd7c0ad8..c1e9ea621f41 100644
3965 +--- a/drivers/usb/dwc3/dwc3-exynos.c
3966 ++++ b/drivers/usb/dwc3/dwc3-exynos.c
3967 +@@ -78,7 +78,7 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
3968 + for (i = 0; i < exynos->num_clks; i++) {
3969 + ret = clk_prepare_enable(exynos->clks[i]);
3970 + if (ret) {
3971 +- while (--i > 0)
3972 ++ while (i-- > 0)
3973 + clk_disable_unprepare(exynos->clks[i]);
3974 + return ret;
3975 + }
3976 +@@ -223,7 +223,7 @@ static int dwc3_exynos_resume(struct device *dev)
3977 + for (i = 0; i < exynos->num_clks; i++) {
3978 + ret = clk_prepare_enable(exynos->clks[i]);
3979 + if (ret) {
3980 +- while (--i > 0)
3981 ++ while (i-- > 0)
3982 + clk_disable_unprepare(exynos->clks[i]);
3983 + return ret;
3984 + }
3985 +diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
3986 +index d7312eed6088..91ea3083e7ad 100644
3987 +--- a/drivers/usb/phy/Kconfig
3988 ++++ b/drivers/usb/phy/Kconfig
3989 +@@ -21,7 +21,7 @@ config AB8500_USB
3990 +
3991 + config FSL_USB2_OTG
3992 + bool "Freescale USB OTG Transceiver Driver"
3993 +- depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM
3994 ++ depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM=y && PM
3995 + depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
3996 + select USB_PHY
3997 + help
3998 +diff --git a/fs/autofs/expire.c b/fs/autofs/expire.c
3999 +index d441244b79df..28d9c2b1b3bb 100644
4000 +--- a/fs/autofs/expire.c
4001 ++++ b/fs/autofs/expire.c
4002 +@@ -596,7 +596,6 @@ int autofs_expire_run(struct super_block *sb,
4003 + pkt.len = dentry->d_name.len;
4004 + memcpy(pkt.name, dentry->d_name.name, pkt.len);
4005 + pkt.name[pkt.len] = '\0';
4006 +- dput(dentry);
4007 +
4008 + if (copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire)))
4009 + ret = -EFAULT;
4010 +@@ -609,6 +608,8 @@ int autofs_expire_run(struct super_block *sb,
4011 + complete_all(&ino->expire_complete);
4012 + spin_unlock(&sbi->fs_lock);
4013 +
4014 ++ dput(dentry);
4015 ++
4016 + return ret;
4017 + }
4018 +
4019 +diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
4020 +index 846c052569dd..3c14a8e45ffb 100644
4021 +--- a/fs/autofs/inode.c
4022 ++++ b/fs/autofs/inode.c
4023 +@@ -255,8 +255,10 @@ int autofs_fill_super(struct super_block *s, void *data, int silent)
4024 + }
4025 + root_inode = autofs_get_inode(s, S_IFDIR | 0755);
4026 + root = d_make_root(root_inode);
4027 +- if (!root)
4028 ++ if (!root) {
4029 ++ ret = -ENOMEM;
4030 + goto fail_ino;
4031 ++ }
4032 + pipe = NULL;
4033 +
4034 + root->d_fsdata = ino;
4035 +diff --git a/fs/buffer.c b/fs/buffer.c
4036 +index 1286c2b95498..72e33ffa00ff 100644
4037 +--- a/fs/buffer.c
4038 ++++ b/fs/buffer.c
4039 +@@ -200,6 +200,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
4040 + struct buffer_head *head;
4041 + struct page *page;
4042 + int all_mapped = 1;
4043 ++ static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
4044 +
4045 + index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
4046 + page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
4047 +@@ -227,15 +228,15 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
4048 + * file io on the block device and getblk. It gets dealt with
4049 + * elsewhere, don't buffer_error if we had some unmapped buffers
4050 + */
4051 +- if (all_mapped) {
4052 +- printk("__find_get_block_slow() failed. "
4053 +- "block=%llu, b_blocknr=%llu\n",
4054 +- (unsigned long long)block,
4055 +- (unsigned long long)bh->b_blocknr);
4056 +- printk("b_state=0x%08lx, b_size=%zu\n",
4057 +- bh->b_state, bh->b_size);
4058 +- printk("device %pg blocksize: %d\n", bdev,
4059 +- 1 << bd_inode->i_blkbits);
4060 ++ ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
4061 ++ if (all_mapped && __ratelimit(&last_warned)) {
4062 ++ printk("__find_get_block_slow() failed. block=%llu, "
4063 ++ "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
4064 ++ "device %pg blocksize: %d\n",
4065 ++ (unsigned long long)block,
4066 ++ (unsigned long long)bh->b_blocknr,
4067 ++ bh->b_state, bh->b_size, bdev,
4068 ++ 1 << bd_inode->i_blkbits);
4069 + }
4070 + out_unlock:
4071 + spin_unlock(&bd_mapping->private_lock);
4072 +diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
4073 +index 5671d5ee7f58..eaf39fb71f8b 100644
4074 +--- a/fs/cifs/smb2pdu.h
4075 ++++ b/fs/cifs/smb2pdu.h
4076 +@@ -84,8 +84,8 @@
4077 +
4078 + #define NUMBER_OF_SMB2_COMMANDS 0x0013
4079 +
4080 +-/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */
4081 +-#define MAX_SMB2_HDR_SIZE 0x00b0
4082 ++/* 52 transform hdr + 64 hdr + 88 create rsp */
4083 ++#define MAX_SMB2_HDR_SIZE 204
4084 +
4085 + #define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe)
4086 + #define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd)
4087 +diff --git a/fs/drop_caches.c b/fs/drop_caches.c
4088 +index 82377017130f..d31b6c72b476 100644
4089 +--- a/fs/drop_caches.c
4090 ++++ b/fs/drop_caches.c
4091 +@@ -21,8 +21,13 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
4092 + spin_lock(&sb->s_inode_list_lock);
4093 + list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
4094 + spin_lock(&inode->i_lock);
4095 ++ /*
4096 ++ * We must skip inodes in unusual state. We may also skip
4097 ++ * inodes without pages but we deliberately won't in case
4098 ++ * we need to reschedule to avoid softlockups.
4099 ++ */
4100 + if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
4101 +- (inode->i_mapping->nrpages == 0)) {
4102 ++ (inode->i_mapping->nrpages == 0 && !need_resched())) {
4103 + spin_unlock(&inode->i_lock);
4104 + continue;
4105 + }
4106 +@@ -30,6 +35,7 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
4107 + spin_unlock(&inode->i_lock);
4108 + spin_unlock(&sb->s_inode_list_lock);
4109 +
4110 ++ cond_resched();
4111 + invalidate_mapping_pages(inode->i_mapping, 0, -1);
4112 + iput(toput_inode);
4113 + toput_inode = inode;
4114 +diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
4115 +index 05431324b262..57cdce53b64b 100644
4116 +--- a/fs/gfs2/glock.c
4117 ++++ b/fs/gfs2/glock.c
4118 +@@ -107,7 +107,7 @@ static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode,
4119 +
4120 + static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
4121 + {
4122 +- u32 hash = jhash2((u32 *)name, sizeof(*name) / 4, 0);
4123 ++ u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0);
4124 +
4125 + return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
4126 + }
4127 +diff --git a/fs/iomap.c b/fs/iomap.c
4128 +index ce837d962d47..83d9a196fe3e 100644
4129 +--- a/fs/iomap.c
4130 ++++ b/fs/iomap.c
4131 +@@ -116,6 +116,12 @@ iomap_page_create(struct inode *inode, struct page *page)
4132 + atomic_set(&iop->read_count, 0);
4133 + atomic_set(&iop->write_count, 0);
4134 + bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
4135 ++
4136 ++ /*
4137 ++ * migrate_page_move_mapping() assumes that pages with private data have
4138 ++ * their count elevated by 1.
4139 ++ */
4140 ++ get_page(page);
4141 + set_page_private(page, (unsigned long)iop);
4142 + SetPagePrivate(page);
4143 + return iop;
4144 +@@ -132,6 +138,7 @@ iomap_page_release(struct page *page)
4145 + WARN_ON_ONCE(atomic_read(&iop->write_count));
4146 + ClearPagePrivate(page);
4147 + set_page_private(page, 0);
4148 ++ put_page(page);
4149 + kfree(iop);
4150 + }
4151 +
4152 +@@ -569,8 +576,10 @@ iomap_migrate_page(struct address_space *mapping, struct page *newpage,
4153 +
4154 + if (page_has_private(page)) {
4155 + ClearPagePrivate(page);
4156 ++ get_page(newpage);
4157 + set_page_private(newpage, page_private(page));
4158 + set_page_private(page, 0);
4159 ++ put_page(page);
4160 + SetPagePrivate(newpage);
4161 + }
4162 +
4163 +@@ -1797,6 +1806,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
4164 + loff_t pos = iocb->ki_pos, start = pos;
4165 + loff_t end = iocb->ki_pos + count - 1, ret = 0;
4166 + unsigned int flags = IOMAP_DIRECT;
4167 ++ bool wait_for_completion = is_sync_kiocb(iocb);
4168 + struct blk_plug plug;
4169 + struct iomap_dio *dio;
4170 +
4171 +@@ -1816,7 +1826,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
4172 + dio->end_io = end_io;
4173 + dio->error = 0;
4174 + dio->flags = 0;
4175 +- dio->wait_for_completion = is_sync_kiocb(iocb);
4176 +
4177 + dio->submit.iter = iter;
4178 + dio->submit.waiter = current;
4179 +@@ -1871,7 +1880,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
4180 + dio_warn_stale_pagecache(iocb->ki_filp);
4181 + ret = 0;
4182 +
4183 +- if (iov_iter_rw(iter) == WRITE && !dio->wait_for_completion &&
4184 ++ if (iov_iter_rw(iter) == WRITE && !wait_for_completion &&
4185 + !inode->i_sb->s_dio_done_wq) {
4186 + ret = sb_init_dio_done_wq(inode->i_sb);
4187 + if (ret < 0)
4188 +@@ -1887,7 +1896,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
4189 + if (ret <= 0) {
4190 + /* magic error code to fall back to buffered I/O */
4191 + if (ret == -ENOTBLK) {
4192 +- dio->wait_for_completion = true;
4193 ++ wait_for_completion = true;
4194 + ret = 0;
4195 + }
4196 + break;
4197 +@@ -1909,8 +1918,24 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
4198 + if (dio->flags & IOMAP_DIO_WRITE_FUA)
4199 + dio->flags &= ~IOMAP_DIO_NEED_SYNC;
4200 +
4201 ++ /*
4202 ++ * We are about to drop our additional submission reference, which
4203 ++ * might be the last reference to the dio. There are three three
4204 ++ * different ways we can progress here:
4205 ++ *
4206 ++ * (a) If this is the last reference we will always complete and free
4207 ++ * the dio ourselves.
4208 ++ * (b) If this is not the last reference, and we serve an asynchronous
4209 ++ * iocb, we must never touch the dio after the decrement, the
4210 ++ * I/O completion handler will complete and free it.
4211 ++ * (c) If this is not the last reference, but we serve a synchronous
4212 ++ * iocb, the I/O completion handler will wake us up on the drop
4213 ++ * of the final reference, and we will complete and free it here
4214 ++ * after we got woken by the I/O completion handler.
4215 ++ */
4216 ++ dio->wait_for_completion = wait_for_completion;
4217 + if (!atomic_dec_and_test(&dio->ref)) {
4218 +- if (!dio->wait_for_completion)
4219 ++ if (!wait_for_completion)
4220 + return -EIOCBQUEUED;
4221 +
4222 + for (;;) {
4223 +@@ -1927,9 +1952,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
4224 + __set_current_state(TASK_RUNNING);
4225 + }
4226 +
4227 +- ret = iomap_dio_complete(dio);
4228 +-
4229 +- return ret;
4230 ++ return iomap_dio_complete(dio);
4231 +
4232 + out_free_dio:
4233 + kfree(dio);
4234 +diff --git a/fs/nfs/super.c b/fs/nfs/super.c
4235 +index 5ef2c71348bd..6b666d187907 100644
4236 +--- a/fs/nfs/super.c
4237 ++++ b/fs/nfs/super.c
4238 +@@ -1906,6 +1906,11 @@ static int nfs_parse_devname(const char *dev_name,
4239 + size_t len;
4240 + char *end;
4241 +
4242 ++ if (unlikely(!dev_name || !*dev_name)) {
4243 ++ dfprintk(MOUNT, "NFS: device name not specified\n");
4244 ++ return -EINVAL;
4245 ++ }
4246 ++
4247 + /* Is the host name protected with square brakcets? */
4248 + if (*dev_name == '[') {
4249 + end = strchr(++dev_name, ']');
4250 +diff --git a/fs/proc/generic.c b/fs/proc/generic.c
4251 +index 8ae109429a88..e39bac94dead 100644
4252 +--- a/fs/proc/generic.c
4253 ++++ b/fs/proc/generic.c
4254 +@@ -256,7 +256,7 @@ struct dentry *proc_lookup_de(struct inode *dir, struct dentry *dentry,
4255 + inode = proc_get_inode(dir->i_sb, de);
4256 + if (!inode)
4257 + return ERR_PTR(-ENOMEM);
4258 +- d_set_d_op(dentry, &proc_misc_dentry_ops);
4259 ++ d_set_d_op(dentry, de->proc_dops);
4260 + return d_splice_alias(inode, dentry);
4261 + }
4262 + read_unlock(&proc_subdir_lock);
4263 +@@ -429,6 +429,8 @@ static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
4264 + INIT_LIST_HEAD(&ent->pde_openers);
4265 + proc_set_user(ent, (*parent)->uid, (*parent)->gid);
4266 +
4267 ++ ent->proc_dops = &proc_misc_dentry_ops;
4268 ++
4269 + out:
4270 + return ent;
4271 + }
4272 +diff --git a/fs/proc/internal.h b/fs/proc/internal.h
4273 +index 5185d7f6a51e..95b14196f284 100644
4274 +--- a/fs/proc/internal.h
4275 ++++ b/fs/proc/internal.h
4276 +@@ -44,6 +44,7 @@ struct proc_dir_entry {
4277 + struct completion *pde_unload_completion;
4278 + const struct inode_operations *proc_iops;
4279 + const struct file_operations *proc_fops;
4280 ++ const struct dentry_operations *proc_dops;
4281 + union {
4282 + const struct seq_operations *seq_ops;
4283 + int (*single_show)(struct seq_file *, void *);
4284 +diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
4285 +index d5e0fcb3439e..a7b12435519e 100644
4286 +--- a/fs/proc/proc_net.c
4287 ++++ b/fs/proc/proc_net.c
4288 +@@ -38,6 +38,22 @@ static struct net *get_proc_net(const struct inode *inode)
4289 + return maybe_get_net(PDE_NET(PDE(inode)));
4290 + }
4291 +
4292 ++static int proc_net_d_revalidate(struct dentry *dentry, unsigned int flags)
4293 ++{
4294 ++ return 0;
4295 ++}
4296 ++
4297 ++static const struct dentry_operations proc_net_dentry_ops = {
4298 ++ .d_revalidate = proc_net_d_revalidate,
4299 ++ .d_delete = always_delete_dentry,
4300 ++};
4301 ++
4302 ++static void pde_force_lookup(struct proc_dir_entry *pde)
4303 ++{
4304 ++ /* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */
4305 ++ pde->proc_dops = &proc_net_dentry_ops;
4306 ++}
4307 ++
4308 + static int seq_open_net(struct inode *inode, struct file *file)
4309 + {
4310 + unsigned int state_size = PDE(inode)->state_size;
4311 +@@ -90,6 +106,7 @@ struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode,
4312 + p = proc_create_reg(name, mode, &parent, data);
4313 + if (!p)
4314 + return NULL;
4315 ++ pde_force_lookup(p);
4316 + p->proc_fops = &proc_net_seq_fops;
4317 + p->seq_ops = ops;
4318 + p->state_size = state_size;
4319 +@@ -133,6 +150,7 @@ struct proc_dir_entry *proc_create_net_data_write(const char *name, umode_t mode
4320 + p = proc_create_reg(name, mode, &parent, data);
4321 + if (!p)
4322 + return NULL;
4323 ++ pde_force_lookup(p);
4324 + p->proc_fops = &proc_net_seq_fops;
4325 + p->seq_ops = ops;
4326 + p->state_size = state_size;
4327 +@@ -181,6 +199,7 @@ struct proc_dir_entry *proc_create_net_single(const char *name, umode_t mode,
4328 + p = proc_create_reg(name, mode, &parent, data);
4329 + if (!p)
4330 + return NULL;
4331 ++ pde_force_lookup(p);
4332 + p->proc_fops = &proc_net_single_fops;
4333 + p->single_show = show;
4334 + return proc_register(parent, p);
4335 +@@ -223,6 +242,7 @@ struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mo
4336 + p = proc_create_reg(name, mode, &parent, data);
4337 + if (!p)
4338 + return NULL;
4339 ++ pde_force_lookup(p);
4340 + p->proc_fops = &proc_net_single_fops;
4341 + p->single_show = show;
4342 + p->write = write;
4343 +diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
4344 +index bfe1639df02d..97fc498dc767 100644
4345 +--- a/include/drm/drm_cache.h
4346 ++++ b/include/drm/drm_cache.h
4347 +@@ -47,6 +47,24 @@ static inline bool drm_arch_can_wc_memory(void)
4348 + return false;
4349 + #elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
4350 + return false;
4351 ++#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
4352 ++ /*
4353 ++ * The DRM driver stack is designed to work with cache coherent devices
4354 ++ * only, but permits an optimization to be enabled in some cases, where
4355 ++ * for some buffers, both the CPU and the GPU use uncached mappings,
4356 ++ * removing the need for DMA snooping and allocation in the CPU caches.
4357 ++ *
4358 ++ * The use of uncached GPU mappings relies on the correct implementation
4359 ++ * of the PCIe NoSnoop TLP attribute by the platform, otherwise the GPU
4360 ++ * will use cached mappings nonetheless. On x86 platforms, this does not
4361 ++ * seem to matter, as uncached CPU mappings will snoop the caches in any
4362 ++ * case. However, on ARM and arm64, enabling this optimization on a
4363 ++ * platform where NoSnoop is ignored results in loss of coherency, which
4364 ++ * breaks correct operation of the device. Since we have no way of
4365 ++ * detecting whether NoSnoop works or not, just disable this
4366 ++ * optimization entirely for ARM and arm64.
4367 ++ */
4368 ++ return false;
4369 + #else
4370 + return true;
4371 + #endif
4372 +diff --git a/include/linux/filter.h b/include/linux/filter.h
4373 +index b776626aeb84..958eddbc44d3 100644
4374 +--- a/include/linux/filter.h
4375 ++++ b/include/linux/filter.h
4376 +@@ -591,8 +591,8 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb)
4377 + return qdisc_skb_cb(skb)->data;
4378 + }
4379 +
4380 +-static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
4381 +- struct sk_buff *skb)
4382 ++static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
4383 ++ struct sk_buff *skb)
4384 + {
4385 + u8 *cb_data = bpf_skb_cb(skb);
4386 + u8 cb_saved[BPF_SKB_CB_LEN];
4387 +@@ -611,15 +611,30 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
4388 + return res;
4389 + }
4390 +
4391 ++static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
4392 ++ struct sk_buff *skb)
4393 ++{
4394 ++ u32 res;
4395 ++
4396 ++ preempt_disable();
4397 ++ res = __bpf_prog_run_save_cb(prog, skb);
4398 ++ preempt_enable();
4399 ++ return res;
4400 ++}
4401 ++
4402 + static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
4403 + struct sk_buff *skb)
4404 + {
4405 + u8 *cb_data = bpf_skb_cb(skb);
4406 ++ u32 res;
4407 +
4408 + if (unlikely(prog->cb_access))
4409 + memset(cb_data, 0, BPF_SKB_CB_LEN);
4410 +
4411 +- return BPF_PROG_RUN(prog, skb);
4412 ++ preempt_disable();
4413 ++ res = BPF_PROG_RUN(prog, skb);
4414 ++ preempt_enable();
4415 ++ return res;
4416 + }
4417 +
4418 + static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
4419 +diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
4420 +index 071b4cbdf010..c848a7cc502e 100644
4421 +--- a/include/linux/irqchip/arm-gic-v3.h
4422 ++++ b/include/linux/irqchip/arm-gic-v3.h
4423 +@@ -319,7 +319,7 @@
4424 + #define GITS_TYPER_PLPIS (1UL << 0)
4425 + #define GITS_TYPER_VLPIS (1UL << 1)
4426 + #define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4
4427 +-#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
4428 ++#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0xf) + 1)
4429 + #define GITS_TYPER_IDBITS_SHIFT 8
4430 + #define GITS_TYPER_DEVBITS_SHIFT 13
4431 + #define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
4432 +diff --git a/include/linux/signal.h b/include/linux/signal.h
4433 +index f428e86f4800..b5d99482d3fe 100644
4434 +--- a/include/linux/signal.h
4435 ++++ b/include/linux/signal.h
4436 +@@ -388,7 +388,7 @@ extern bool unhandled_signal(struct task_struct *tsk, int sig);
4437 + #endif
4438 +
4439 + #define siginmask(sig, mask) \
4440 +- ((sig) < SIGRTMIN && (rt_sigmask(sig) & (mask)))
4441 ++ ((sig) > 0 && (sig) < SIGRTMIN && (rt_sigmask(sig) & (mask)))
4442 +
4443 + #define SIG_KERNEL_ONLY_MASK (\
4444 + rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP))
4445 +diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
4446 +index 7ddfc65586b0..4335bd771ce5 100644
4447 +--- a/include/linux/stmmac.h
4448 ++++ b/include/linux/stmmac.h
4449 +@@ -184,6 +184,7 @@ struct plat_stmmacenet_data {
4450 + struct clk *pclk;
4451 + struct clk *clk_ptp_ref;
4452 + unsigned int clk_ptp_rate;
4453 ++ unsigned int clk_ref_rate;
4454 + struct reset_control *stmmac_rst;
4455 + struct stmmac_axi *axi;
4456 + int has_gmac4;
4457 +diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
4458 +index 841835a387e1..b4984bbbe157 100644
4459 +--- a/include/net/netfilter/nf_tables.h
4460 ++++ b/include/net/netfilter/nf_tables.h
4461 +@@ -469,9 +469,7 @@ struct nft_set_binding {
4462 + int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
4463 + struct nft_set_binding *binding);
4464 + void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
4465 +- struct nft_set_binding *binding);
4466 +-void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set,
4467 +- struct nft_set_binding *binding);
4468 ++ struct nft_set_binding *binding, bool commit);
4469 + void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set);
4470 +
4471 + /**
4472 +@@ -721,6 +719,13 @@ struct nft_expr_type {
4473 + #define NFT_EXPR_STATEFUL 0x1
4474 + #define NFT_EXPR_GC 0x2
4475 +
4476 ++enum nft_trans_phase {
4477 ++ NFT_TRANS_PREPARE,
4478 ++ NFT_TRANS_ABORT,
4479 ++ NFT_TRANS_COMMIT,
4480 ++ NFT_TRANS_RELEASE
4481 ++};
4482 ++
4483 + /**
4484 + * struct nft_expr_ops - nf_tables expression operations
4485 + *
4486 +@@ -750,7 +755,8 @@ struct nft_expr_ops {
4487 + void (*activate)(const struct nft_ctx *ctx,
4488 + const struct nft_expr *expr);
4489 + void (*deactivate)(const struct nft_ctx *ctx,
4490 +- const struct nft_expr *expr);
4491 ++ const struct nft_expr *expr,
4492 ++ enum nft_trans_phase phase);
4493 + void (*destroy)(const struct nft_ctx *ctx,
4494 + const struct nft_expr *expr);
4495 + void (*destroy_clone)(const struct nft_ctx *ctx,
4496 +@@ -1323,12 +1329,15 @@ struct nft_trans_rule {
4497 + struct nft_trans_set {
4498 + struct nft_set *set;
4499 + u32 set_id;
4500 ++ bool bound;
4501 + };
4502 +
4503 + #define nft_trans_set(trans) \
4504 + (((struct nft_trans_set *)trans->data)->set)
4505 + #define nft_trans_set_id(trans) \
4506 + (((struct nft_trans_set *)trans->data)->set_id)
4507 ++#define nft_trans_set_bound(trans) \
4508 ++ (((struct nft_trans_set *)trans->data)->bound)
4509 +
4510 + struct nft_trans_chain {
4511 + bool update;
4512 +diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
4513 +index 9425c2fb872f..6c24b1fb2db8 100644
4514 +--- a/kernel/bpf/cgroup.c
4515 ++++ b/kernel/bpf/cgroup.c
4516 +@@ -572,7 +572,7 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
4517 + bpf_compute_and_save_data_end(skb, &saved_data_end);
4518 +
4519 + ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
4520 +- bpf_prog_run_save_cb);
4521 ++ __bpf_prog_run_save_cb);
4522 + bpf_restore_data_end(skb, saved_data_end);
4523 + __skb_pull(skb, offset);
4524 + skb->sk = save_sk;
4525 +diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
4526 +index 2c1790288138..3388c0b85a57 100644
4527 +--- a/kernel/bpf/hashtab.c
4528 ++++ b/kernel/bpf/hashtab.c
4529 +@@ -677,7 +677,7 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
4530 + }
4531 +
4532 + if (htab_is_prealloc(htab)) {
4533 +- pcpu_freelist_push(&htab->freelist, &l->fnode);
4534 ++ __pcpu_freelist_push(&htab->freelist, &l->fnode);
4535 + } else {
4536 + atomic_dec(&htab->count);
4537 + l->htab = htab;
4538 +@@ -739,7 +739,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
4539 + } else {
4540 + struct pcpu_freelist_node *l;
4541 +
4542 +- l = pcpu_freelist_pop(&htab->freelist);
4543 ++ l = __pcpu_freelist_pop(&htab->freelist);
4544 + if (!l)
4545 + return ERR_PTR(-E2BIG);
4546 + l_new = container_of(l, struct htab_elem, fnode);
4547 +diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c
4548 +index 673fa6fe2d73..0c1b4ba9e90e 100644
4549 +--- a/kernel/bpf/percpu_freelist.c
4550 ++++ b/kernel/bpf/percpu_freelist.c
4551 +@@ -28,8 +28,8 @@ void pcpu_freelist_destroy(struct pcpu_freelist *s)
4552 + free_percpu(s->freelist);
4553 + }
4554 +
4555 +-static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head,
4556 +- struct pcpu_freelist_node *node)
4557 ++static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head,
4558 ++ struct pcpu_freelist_node *node)
4559 + {
4560 + raw_spin_lock(&head->lock);
4561 + node->next = head->first;
4562 +@@ -37,12 +37,22 @@ static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head,
4563 + raw_spin_unlock(&head->lock);
4564 + }
4565 +
4566 +-void pcpu_freelist_push(struct pcpu_freelist *s,
4567 ++void __pcpu_freelist_push(struct pcpu_freelist *s,
4568 + struct pcpu_freelist_node *node)
4569 + {
4570 + struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist);
4571 +
4572 +- __pcpu_freelist_push(head, node);
4573 ++ ___pcpu_freelist_push(head, node);
4574 ++}
4575 ++
4576 ++void pcpu_freelist_push(struct pcpu_freelist *s,
4577 ++ struct pcpu_freelist_node *node)
4578 ++{
4579 ++ unsigned long flags;
4580 ++
4581 ++ local_irq_save(flags);
4582 ++ __pcpu_freelist_push(s, node);
4583 ++ local_irq_restore(flags);
4584 + }
4585 +
4586 + void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
4587 +@@ -63,7 +73,7 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
4588 + for_each_possible_cpu(cpu) {
4589 + again:
4590 + head = per_cpu_ptr(s->freelist, cpu);
4591 +- __pcpu_freelist_push(head, buf);
4592 ++ ___pcpu_freelist_push(head, buf);
4593 + i++;
4594 + buf += elem_size;
4595 + if (i == nr_elems)
4596 +@@ -74,14 +84,12 @@ again:
4597 + local_irq_restore(flags);
4598 + }
4599 +
4600 +-struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
4601 ++struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
4602 + {
4603 + struct pcpu_freelist_head *head;
4604 + struct pcpu_freelist_node *node;
4605 +- unsigned long flags;
4606 + int orig_cpu, cpu;
4607 +
4608 +- local_irq_save(flags);
4609 + orig_cpu = cpu = raw_smp_processor_id();
4610 + while (1) {
4611 + head = per_cpu_ptr(s->freelist, cpu);
4612 +@@ -89,16 +97,25 @@ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
4613 + node = head->first;
4614 + if (node) {
4615 + head->first = node->next;
4616 +- raw_spin_unlock_irqrestore(&head->lock, flags);
4617 ++ raw_spin_unlock(&head->lock);
4618 + return node;
4619 + }
4620 + raw_spin_unlock(&head->lock);
4621 + cpu = cpumask_next(cpu, cpu_possible_mask);
4622 + if (cpu >= nr_cpu_ids)
4623 + cpu = 0;
4624 +- if (cpu == orig_cpu) {
4625 +- local_irq_restore(flags);
4626 ++ if (cpu == orig_cpu)
4627 + return NULL;
4628 +- }
4629 + }
4630 + }
4631 ++
4632 ++struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
4633 ++{
4634 ++ struct pcpu_freelist_node *ret;
4635 ++ unsigned long flags;
4636 ++
4637 ++ local_irq_save(flags);
4638 ++ ret = __pcpu_freelist_pop(s);
4639 ++ local_irq_restore(flags);
4640 ++ return ret;
4641 ++}
4642 +diff --git a/kernel/bpf/percpu_freelist.h b/kernel/bpf/percpu_freelist.h
4643 +index 3049aae8ea1e..c3960118e617 100644
4644 +--- a/kernel/bpf/percpu_freelist.h
4645 ++++ b/kernel/bpf/percpu_freelist.h
4646 +@@ -22,8 +22,12 @@ struct pcpu_freelist_node {
4647 + struct pcpu_freelist_node *next;
4648 + };
4649 +
4650 ++/* pcpu_freelist_* do spin_lock_irqsave. */
4651 + void pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *);
4652 + struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *);
4653 ++/* __pcpu_freelist_* do spin_lock only. caller must disable irqs. */
4654 ++void __pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *);
4655 ++struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *);
4656 + void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
4657 + u32 nr_elems);
4658 + int pcpu_freelist_init(struct pcpu_freelist *);
4659 +diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
4660 +index cf5040fd5434..5f03ddf7b615 100644
4661 +--- a/kernel/bpf/syscall.c
4662 ++++ b/kernel/bpf/syscall.c
4663 +@@ -712,8 +712,13 @@ static int map_lookup_elem(union bpf_attr *attr)
4664 +
4665 + if (bpf_map_is_dev_bound(map)) {
4666 + err = bpf_map_offload_lookup_elem(map, key, value);
4667 +- } else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
4668 +- map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
4669 ++ goto done;
4670 ++ }
4671 ++
4672 ++ preempt_disable();
4673 ++ this_cpu_inc(bpf_prog_active);
4674 ++ if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
4675 ++ map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
4676 + err = bpf_percpu_hash_copy(map, key, value);
4677 + } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
4678 + err = bpf_percpu_array_copy(map, key, value);
4679 +@@ -743,7 +748,10 @@ static int map_lookup_elem(union bpf_attr *attr)
4680 + }
4681 + rcu_read_unlock();
4682 + }
4683 ++ this_cpu_dec(bpf_prog_active);
4684 ++ preempt_enable();
4685 +
4686 ++done:
4687 + if (err)
4688 + goto free_value;
4689 +
4690 +diff --git a/kernel/events/core.c b/kernel/events/core.c
4691 +index 699bc25d6204..18997730b665 100644
4692 +--- a/kernel/events/core.c
4693 ++++ b/kernel/events/core.c
4694 +@@ -436,18 +436,18 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
4695 + void __user *buffer, size_t *lenp,
4696 + loff_t *ppos)
4697 + {
4698 +- int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
4699 +-
4700 +- if (ret || !write)
4701 +- return ret;
4702 +-
4703 ++ int ret;
4704 ++ int perf_cpu = sysctl_perf_cpu_time_max_percent;
4705 + /*
4706 + * If throttling is disabled don't allow the write:
4707 + */
4708 +- if (sysctl_perf_cpu_time_max_percent == 100 ||
4709 +- sysctl_perf_cpu_time_max_percent == 0)
4710 ++ if (write && (perf_cpu == 100 || perf_cpu == 0))
4711 + return -EINVAL;
4712 +
4713 ++ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
4714 ++ if (ret || !write)
4715 ++ return ret;
4716 ++
4717 + max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
4718 + perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
4719 + update_perf_cpu_limits();
4720 +diff --git a/kernel/relay.c b/kernel/relay.c
4721 +index 04f248644e06..9e0f52375487 100644
4722 +--- a/kernel/relay.c
4723 ++++ b/kernel/relay.c
4724 +@@ -428,6 +428,8 @@ static struct dentry *relay_create_buf_file(struct rchan *chan,
4725 + dentry = chan->cb->create_buf_file(tmpname, chan->parent,
4726 + S_IRUSR, buf,
4727 + &chan->is_global);
4728 ++ if (IS_ERR(dentry))
4729 ++ dentry = NULL;
4730 +
4731 + kfree(tmpname);
4732 +
4733 +@@ -461,7 +463,7 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
4734 + dentry = chan->cb->create_buf_file(NULL, NULL,
4735 + S_IRUSR, buf,
4736 + &chan->is_global);
4737 +- if (WARN_ON(dentry))
4738 ++ if (IS_ERR_OR_NULL(dentry))
4739 + goto free_buf;
4740 + }
4741 +
4742 +diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
4743 +index fe24de3fbc93..c3484785b179 100644
4744 +--- a/kernel/sched/psi.c
4745 ++++ b/kernel/sched/psi.c
4746 +@@ -124,6 +124,7 @@
4747 + * sampling of the aggregate task states would be.
4748 + */
4749 +
4750 ++#include "../workqueue_internal.h"
4751 + #include <linux/sched/loadavg.h>
4752 + #include <linux/seq_file.h>
4753 + #include <linux/proc_fs.h>
4754 +@@ -480,9 +481,6 @@ static void psi_group_change(struct psi_group *group, int cpu,
4755 + groupc->tasks[t]++;
4756 +
4757 + write_seqcount_end(&groupc->seq);
4758 +-
4759 +- if (!delayed_work_pending(&group->clock_work))
4760 +- schedule_delayed_work(&group->clock_work, PSI_FREQ);
4761 + }
4762 +
4763 + static struct psi_group *iterate_groups(struct task_struct *task, void **iter)
4764 +@@ -513,6 +511,7 @@ void psi_task_change(struct task_struct *task, int clear, int set)
4765 + {
4766 + int cpu = task_cpu(task);
4767 + struct psi_group *group;
4768 ++ bool wake_clock = true;
4769 + void *iter = NULL;
4770 +
4771 + if (!task->pid)
4772 +@@ -530,8 +529,22 @@ void psi_task_change(struct task_struct *task, int clear, int set)
4773 + task->psi_flags &= ~clear;
4774 + task->psi_flags |= set;
4775 +
4776 +- while ((group = iterate_groups(task, &iter)))
4777 ++ /*
4778 ++ * Periodic aggregation shuts off if there is a period of no
4779 ++ * task changes, so we wake it back up if necessary. However,
4780 ++ * don't do this if the task change is the aggregation worker
4781 ++ * itself going to sleep, or we'll ping-pong forever.
4782 ++ */
4783 ++ if (unlikely((clear & TSK_RUNNING) &&
4784 ++ (task->flags & PF_WQ_WORKER) &&
4785 ++ wq_worker_last_func(task) == psi_update_work))
4786 ++ wake_clock = false;
4787 ++
4788 ++ while ((group = iterate_groups(task, &iter))) {
4789 + psi_group_change(group, cpu, clear, set);
4790 ++ if (wake_clock && !delayed_work_pending(&group->clock_work))
4791 ++ schedule_delayed_work(&group->clock_work, PSI_FREQ);
4792 ++ }
4793 + }
4794 +
4795 + void psi_memstall_tick(struct task_struct *task, int cpu)
4796 +diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
4797 +index 9864a35c8bb5..6c28d519447d 100644
4798 +--- a/kernel/trace/bpf_trace.c
4799 ++++ b/kernel/trace/bpf_trace.c
4800 +@@ -1158,22 +1158,12 @@ static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *
4801 +
4802 + int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
4803 + {
4804 +- int err;
4805 +-
4806 +- mutex_lock(&bpf_event_mutex);
4807 +- err = __bpf_probe_register(btp, prog);
4808 +- mutex_unlock(&bpf_event_mutex);
4809 +- return err;
4810 ++ return __bpf_probe_register(btp, prog);
4811 + }
4812 +
4813 + int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
4814 + {
4815 +- int err;
4816 +-
4817 +- mutex_lock(&bpf_event_mutex);
4818 +- err = tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
4819 +- mutex_unlock(&bpf_event_mutex);
4820 +- return err;
4821 ++ return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
4822 + }
4823 +
4824 + int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
4825 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
4826 +index 0280deac392e..288b2105bbb1 100644
4827 +--- a/kernel/workqueue.c
4828 ++++ b/kernel/workqueue.c
4829 +@@ -909,6 +909,26 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task)
4830 + return to_wakeup ? to_wakeup->task : NULL;
4831 + }
4832 +
4833 ++/**
4834 ++ * wq_worker_last_func - retrieve worker's last work function
4835 ++ *
4836 ++ * Determine the last function a worker executed. This is called from
4837 ++ * the scheduler to get a worker's last known identity.
4838 ++ *
4839 ++ * CONTEXT:
4840 ++ * spin_lock_irq(rq->lock)
4841 ++ *
4842 ++ * Return:
4843 ++ * The last work function %current executed as a worker, NULL if it
4844 ++ * hasn't executed any work yet.
4845 ++ */
4846 ++work_func_t wq_worker_last_func(struct task_struct *task)
4847 ++{
4848 ++ struct worker *worker = kthread_data(task);
4849 ++
4850 ++ return worker->last_func;
4851 ++}
4852 ++
4853 + /**
4854 + * worker_set_flags - set worker flags and adjust nr_running accordingly
4855 + * @worker: self
4856 +@@ -2184,6 +2204,9 @@ __acquires(&pool->lock)
4857 + if (unlikely(cpu_intensive))
4858 + worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
4859 +
4860 ++ /* tag the worker for identification in schedule() */
4861 ++ worker->last_func = worker->current_func;
4862 ++
4863 + /* we're done with it, release */
4864 + hash_del(&worker->hentry);
4865 + worker->current_work = NULL;
4866 +diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
4867 +index 66fbb5a9e633..cb68b03ca89a 100644
4868 +--- a/kernel/workqueue_internal.h
4869 ++++ b/kernel/workqueue_internal.h
4870 +@@ -53,6 +53,9 @@ struct worker {
4871 +
4872 + /* used only by rescuers to point to the target workqueue */
4873 + struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */
4874 ++
4875 ++ /* used by the scheduler to determine a worker's last known identity */
4876 ++ work_func_t last_func;
4877 + };
4878 +
4879 + /**
4880 +@@ -67,9 +70,10 @@ static inline struct worker *current_wq_worker(void)
4881 +
4882 + /*
4883 + * Scheduler hooks for concurrency managed workqueue. Only to be used from
4884 +- * sched/core.c and workqueue.c.
4885 ++ * sched/ and workqueue.c.
4886 + */
4887 + void wq_worker_waking_up(struct task_struct *task, int cpu);
4888 + struct task_struct *wq_worker_sleeping(struct task_struct *task);
4889 ++work_func_t wq_worker_last_func(struct task_struct *task);
4890 +
4891 + #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
4892 +diff --git a/lib/test_kmod.c b/lib/test_kmod.c
4893 +index d82d022111e0..9cf77628fc91 100644
4894 +--- a/lib/test_kmod.c
4895 ++++ b/lib/test_kmod.c
4896 +@@ -632,7 +632,7 @@ static void __kmod_config_free(struct test_config *config)
4897 + config->test_driver = NULL;
4898 +
4899 + kfree_const(config->test_fs);
4900 +- config->test_driver = NULL;
4901 ++ config->test_fs = NULL;
4902 + }
4903 +
4904 + static void kmod_config_free(struct kmod_test_device *test_dev)
4905 +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
4906 +index 21d94b5677e8..cb201163666f 100644
4907 +--- a/mm/memory_hotplug.c
4908 ++++ b/mm/memory_hotplug.c
4909 +@@ -1189,11 +1189,13 @@ static inline int pageblock_free(struct page *page)
4910 + return PageBuddy(page) && page_order(page) >= pageblock_order;
4911 + }
4912 +
4913 +-/* Return the start of the next active pageblock after a given page */
4914 +-static struct page *next_active_pageblock(struct page *page)
4915 ++/* Return the pfn of the start of the next active pageblock after a given pfn */
4916 ++static unsigned long next_active_pageblock(unsigned long pfn)
4917 + {
4918 ++ struct page *page = pfn_to_page(pfn);
4919 ++
4920 + /* Ensure the starting page is pageblock-aligned */
4921 +- BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
4922 ++ BUG_ON(pfn & (pageblock_nr_pages - 1));
4923 +
4924 + /* If the entire pageblock is free, move to the end of free page */
4925 + if (pageblock_free(page)) {
4926 +@@ -1201,16 +1203,16 @@ static struct page *next_active_pageblock(struct page *page)
4927 + /* be careful. we don't have locks, page_order can be changed.*/
4928 + order = page_order(page);
4929 + if ((order < MAX_ORDER) && (order >= pageblock_order))
4930 +- return page + (1 << order);
4931 ++ return pfn + (1 << order);
4932 + }
4933 +
4934 +- return page + pageblock_nr_pages;
4935 ++ return pfn + pageblock_nr_pages;
4936 + }
4937 +
4938 +-static bool is_pageblock_removable_nolock(struct page *page)
4939 ++static bool is_pageblock_removable_nolock(unsigned long pfn)
4940 + {
4941 ++ struct page *page = pfn_to_page(pfn);
4942 + struct zone *zone;
4943 +- unsigned long pfn;
4944 +
4945 + /*
4946 + * We have to be careful here because we are iterating over memory
4947 +@@ -1233,12 +1235,14 @@ static bool is_pageblock_removable_nolock(struct page *page)
4948 + /* Checks if this range of memory is likely to be hot-removable. */
4949 + bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
4950 + {
4951 +- struct page *page = pfn_to_page(start_pfn);
4952 +- struct page *end_page = page + nr_pages;
4953 ++ unsigned long end_pfn, pfn;
4954 ++
4955 ++ end_pfn = min(start_pfn + nr_pages,
4956 ++ zone_end_pfn(page_zone(pfn_to_page(start_pfn))));
4957 +
4958 + /* Check the starting page of each pageblock within the range */
4959 +- for (; page < end_page; page = next_active_pageblock(page)) {
4960 +- if (!is_pageblock_removable_nolock(page))
4961 ++ for (pfn = start_pfn; pfn < end_pfn; pfn = next_active_pageblock(pfn)) {
4962 ++ if (!is_pageblock_removable_nolock(pfn))
4963 + return false;
4964 + cond_resched();
4965 + }
4966 +@@ -1274,6 +1278,9 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
4967 + i++;
4968 + if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
4969 + continue;
4970 ++ /* Check if we got outside of the zone */
4971 ++ if (zone && !zone_spans_pfn(zone, pfn + i))
4972 ++ return 0;
4973 + page = pfn_to_page(pfn + i);
4974 + if (zone && page_zone(page) != zone)
4975 + return 0;
4976 +diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
4977 +index e8090f099eb8..ef0dec20c7d8 100644
4978 +--- a/net/batman-adv/bat_v_elp.c
4979 ++++ b/net/batman-adv/bat_v_elp.c
4980 +@@ -104,6 +104,9 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
4981 +
4982 + ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo);
4983 +
4984 ++ /* free the TID stats immediately */
4985 ++ cfg80211_sinfo_release_content(&sinfo);
4986 ++
4987 + dev_put(real_netdev);
4988 + if (ret == -ENOENT) {
4989 + /* Node is not associated anymore! It would be
4990 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
4991 +index 5e55cef0cec3..6693e209efe8 100644
4992 +--- a/net/bridge/netfilter/ebtables.c
4993 ++++ b/net/bridge/netfilter/ebtables.c
4994 +@@ -2293,9 +2293,12 @@ static int compat_do_replace(struct net *net, void __user *user,
4995 +
4996 + xt_compat_lock(NFPROTO_BRIDGE);
4997 +
4998 +- ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
4999 +- if (ret < 0)
5000 +- goto out_unlock;
5001 ++ if (tmp.nentries) {
5002 ++ ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
5003 ++ if (ret < 0)
5004 ++ goto out_unlock;
5005 ++ }
5006 ++
5007 + ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
5008 + if (ret < 0)
5009 + goto out_unlock;
5010 +diff --git a/net/core/filter.c b/net/core/filter.c
5011 +index 16350f8c8815..821050957aca 100644
5012 +--- a/net/core/filter.c
5013 ++++ b/net/core/filter.c
5014 +@@ -3927,10 +3927,12 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
5015 + /* Only some socketops are supported */
5016 + switch (optname) {
5017 + case SO_RCVBUF:
5018 ++ val = min_t(u32, val, sysctl_rmem_max);
5019 + sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
5020 + sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
5021 + break;
5022 + case SO_SNDBUF:
5023 ++ val = min_t(u32, val, sysctl_wmem_max);
5024 + sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
5025 + sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
5026 + break;
5027 +diff --git a/net/core/skmsg.c b/net/core/skmsg.c
5028 +index 54d854807630..97fc71d90159 100644
5029 +--- a/net/core/skmsg.c
5030 ++++ b/net/core/skmsg.c
5031 +@@ -545,8 +545,8 @@ static void sk_psock_destroy_deferred(struct work_struct *gc)
5032 + struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
5033 +
5034 + /* No sk_callback_lock since already detached. */
5035 +- if (psock->parser.enabled)
5036 +- strp_done(&psock->parser.strp);
5037 ++ strp_stop(&psock->parser.strp);
5038 ++ strp_done(&psock->parser.strp);
5039 +
5040 + cancel_work_sync(&psock->work);
5041 +
5042 +diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
5043 +index d7b43e700023..68a21bf75dd0 100644
5044 +--- a/net/ipv4/ip_vti.c
5045 ++++ b/net/ipv4/ip_vti.c
5046 +@@ -74,6 +74,33 @@ drop:
5047 + return 0;
5048 + }
5049 +
5050 ++static int vti_input_ipip(struct sk_buff *skb, int nexthdr, __be32 spi,
5051 ++ int encap_type)
5052 ++{
5053 ++ struct ip_tunnel *tunnel;
5054 ++ const struct iphdr *iph = ip_hdr(skb);
5055 ++ struct net *net = dev_net(skb->dev);
5056 ++ struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
5057 ++
5058 ++ tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
5059 ++ iph->saddr, iph->daddr, 0);
5060 ++ if (tunnel) {
5061 ++ if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
5062 ++ goto drop;
5063 ++
5064 ++ XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
5065 ++
5066 ++ skb->dev = tunnel->dev;
5067 ++
5068 ++ return xfrm_input(skb, nexthdr, spi, encap_type);
5069 ++ }
5070 ++
5071 ++ return -EINVAL;
5072 ++drop:
5073 ++ kfree_skb(skb);
5074 ++ return 0;
5075 ++}
5076 ++
5077 + static int vti_rcv(struct sk_buff *skb)
5078 + {
5079 + XFRM_SPI_SKB_CB(skb)->family = AF_INET;
5080 +@@ -82,6 +109,14 @@ static int vti_rcv(struct sk_buff *skb)
5081 + return vti_input(skb, ip_hdr(skb)->protocol, 0, 0);
5082 + }
5083 +
5084 ++static int vti_rcv_ipip(struct sk_buff *skb)
5085 ++{
5086 ++ XFRM_SPI_SKB_CB(skb)->family = AF_INET;
5087 ++ XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
5088 ++
5089 ++ return vti_input_ipip(skb, ip_hdr(skb)->protocol, ip_hdr(skb)->saddr, 0);
5090 ++}
5091 ++
5092 + static int vti_rcv_cb(struct sk_buff *skb, int err)
5093 + {
5094 + unsigned short family;
5095 +@@ -435,6 +470,12 @@ static struct xfrm4_protocol vti_ipcomp4_protocol __read_mostly = {
5096 + .priority = 100,
5097 + };
5098 +
5099 ++static struct xfrm_tunnel ipip_handler __read_mostly = {
5100 ++ .handler = vti_rcv_ipip,
5101 ++ .err_handler = vti4_err,
5102 ++ .priority = 0,
5103 ++};
5104 ++
5105 + static int __net_init vti_init_net(struct net *net)
5106 + {
5107 + int err;
5108 +@@ -603,6 +644,13 @@ static int __init vti_init(void)
5109 + if (err < 0)
5110 + goto xfrm_proto_comp_failed;
5111 +
5112 ++ msg = "ipip tunnel";
5113 ++ err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
5114 ++ if (err < 0) {
5115 ++ pr_info("%s: cant't register tunnel\n",__func__);
5116 ++ goto xfrm_tunnel_failed;
5117 ++ }
5118 ++
5119 + msg = "netlink interface";
5120 + err = rtnl_link_register(&vti_link_ops);
5121 + if (err < 0)
5122 +@@ -612,6 +660,8 @@ static int __init vti_init(void)
5123 +
5124 + rtnl_link_failed:
5125 + xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
5126 ++xfrm_tunnel_failed:
5127 ++ xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
5128 + xfrm_proto_comp_failed:
5129 + xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
5130 + xfrm_proto_ah_failed:
5131 +diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
5132 +index 432141f04af3..7d6318664eb2 100644
5133 +--- a/net/netfilter/ipvs/ip_vs_ctl.c
5134 ++++ b/net/netfilter/ipvs/ip_vs_ctl.c
5135 +@@ -2220,6 +2220,18 @@ static int ip_vs_set_timeout(struct netns_ipvs *ipvs, struct ip_vs_timeout_user
5136 + u->tcp_fin_timeout,
5137 + u->udp_timeout);
5138 +
5139 ++#ifdef CONFIG_IP_VS_PROTO_TCP
5140 ++ if (u->tcp_timeout < 0 || u->tcp_timeout > (INT_MAX / HZ) ||
5141 ++ u->tcp_fin_timeout < 0 || u->tcp_fin_timeout > (INT_MAX / HZ)) {
5142 ++ return -EINVAL;
5143 ++ }
5144 ++#endif
5145 ++
5146 ++#ifdef CONFIG_IP_VS_PROTO_UDP
5147 ++ if (u->udp_timeout < 0 || u->udp_timeout > (INT_MAX / HZ))
5148 ++ return -EINVAL;
5149 ++#endif
5150 ++
5151 + #ifdef CONFIG_IP_VS_PROTO_TCP
5152 + if (u->tcp_timeout) {
5153 + pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
5154 +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
5155 +index e92e749aff53..830b1328fe97 100644
5156 +--- a/net/netfilter/nf_conntrack_core.c
5157 ++++ b/net/netfilter/nf_conntrack_core.c
5158 +@@ -1007,6 +1007,22 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
5159 + }
5160 +
5161 + if (nf_ct_key_equal(h, tuple, zone, net)) {
5162 ++ /* Tuple is taken already, so caller will need to find
5163 ++ * a new source port to use.
5164 ++ *
5165 ++ * Only exception:
5166 ++ * If the *original tuples* are identical, then both
5167 ++ * conntracks refer to the same flow.
5168 ++ * This is a rare situation, it can occur e.g. when
5169 ++ * more than one UDP packet is sent from same socket
5170 ++ * in different threads.
5171 ++ *
5172 ++ * Let nf_ct_resolve_clash() deal with this later.
5173 ++ */
5174 ++ if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
5175 ++ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
5176 ++ continue;
5177 ++
5178 + NF_CT_STAT_INC_ATOMIC(net, found);
5179 + rcu_read_unlock();
5180 + return 1;
5181 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
5182 +index 5114a0d2a41e..36d4717fee3b 100644
5183 +--- a/net/netfilter/nf_tables_api.c
5184 ++++ b/net/netfilter/nf_tables_api.c
5185 +@@ -116,6 +116,23 @@ static void nft_trans_destroy(struct nft_trans *trans)
5186 + kfree(trans);
5187 + }
5188 +
5189 ++static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
5190 ++{
5191 ++ struct net *net = ctx->net;
5192 ++ struct nft_trans *trans;
5193 ++
5194 ++ if (!nft_set_is_anonymous(set))
5195 ++ return;
5196 ++
5197 ++ list_for_each_entry_reverse(trans, &net->nft.commit_list, list) {
5198 ++ if (trans->msg_type == NFT_MSG_NEWSET &&
5199 ++ nft_trans_set(trans) == set) {
5200 ++ nft_trans_set_bound(trans) = true;
5201 ++ break;
5202 ++ }
5203 ++ }
5204 ++}
5205 ++
5206 + static int nf_tables_register_hook(struct net *net,
5207 + const struct nft_table *table,
5208 + struct nft_chain *chain)
5209 +@@ -211,18 +228,6 @@ static int nft_delchain(struct nft_ctx *ctx)
5210 + return err;
5211 + }
5212 +
5213 +-/* either expr ops provide both activate/deactivate, or neither */
5214 +-static bool nft_expr_check_ops(const struct nft_expr_ops *ops)
5215 +-{
5216 +- if (!ops)
5217 +- return true;
5218 +-
5219 +- if (WARN_ON_ONCE((!ops->activate ^ !ops->deactivate)))
5220 +- return false;
5221 +-
5222 +- return true;
5223 +-}
5224 +-
5225 + static void nft_rule_expr_activate(const struct nft_ctx *ctx,
5226 + struct nft_rule *rule)
5227 + {
5228 +@@ -238,14 +243,15 @@ static void nft_rule_expr_activate(const struct nft_ctx *ctx,
5229 + }
5230 +
5231 + static void nft_rule_expr_deactivate(const struct nft_ctx *ctx,
5232 +- struct nft_rule *rule)
5233 ++ struct nft_rule *rule,
5234 ++ enum nft_trans_phase phase)
5235 + {
5236 + struct nft_expr *expr;
5237 +
5238 + expr = nft_expr_first(rule);
5239 + while (expr != nft_expr_last(rule) && expr->ops) {
5240 + if (expr->ops->deactivate)
5241 +- expr->ops->deactivate(ctx, expr);
5242 ++ expr->ops->deactivate(ctx, expr, phase);
5243 +
5244 + expr = nft_expr_next(expr);
5245 + }
5246 +@@ -296,7 +302,7 @@ static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule)
5247 + nft_trans_destroy(trans);
5248 + return err;
5249 + }
5250 +- nft_rule_expr_deactivate(ctx, rule);
5251 ++ nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_PREPARE);
5252 +
5253 + return 0;
5254 + }
5255 +@@ -1932,9 +1938,6 @@ static int nf_tables_delchain(struct net *net, struct sock *nlsk,
5256 + */
5257 + int nft_register_expr(struct nft_expr_type *type)
5258 + {
5259 +- if (!nft_expr_check_ops(type->ops))
5260 +- return -EINVAL;
5261 +-
5262 + nfnl_lock(NFNL_SUBSYS_NFTABLES);
5263 + if (type->family == NFPROTO_UNSPEC)
5264 + list_add_tail_rcu(&type->list, &nf_tables_expressions);
5265 +@@ -2082,10 +2085,6 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx,
5266 + err = PTR_ERR(ops);
5267 + goto err1;
5268 + }
5269 +- if (!nft_expr_check_ops(ops)) {
5270 +- err = -EINVAL;
5271 +- goto err1;
5272 +- }
5273 + } else
5274 + ops = type->ops;
5275 +
5276 +@@ -2482,7 +2481,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
5277 + static void nf_tables_rule_release(const struct nft_ctx *ctx,
5278 + struct nft_rule *rule)
5279 + {
5280 +- nft_rule_expr_deactivate(ctx, rule);
5281 ++ nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_RELEASE);
5282 + nf_tables_rule_destroy(ctx, rule);
5283 + }
5284 +
5285 +@@ -3679,39 +3678,30 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
5286 + bind:
5287 + binding->chain = ctx->chain;
5288 + list_add_tail_rcu(&binding->list, &set->bindings);
5289 ++ nft_set_trans_bind(ctx, set);
5290 ++
5291 + return 0;
5292 + }
5293 + EXPORT_SYMBOL_GPL(nf_tables_bind_set);
5294 +
5295 +-void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set,
5296 +- struct nft_set_binding *binding)
5297 +-{
5298 +- if (list_empty(&set->bindings) && nft_set_is_anonymous(set) &&
5299 +- nft_is_active(ctx->net, set))
5300 +- list_add_tail_rcu(&set->list, &ctx->table->sets);
5301 +-
5302 +- list_add_tail_rcu(&binding->list, &set->bindings);
5303 +-}
5304 +-EXPORT_SYMBOL_GPL(nf_tables_rebind_set);
5305 +-
5306 + void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
5307 +- struct nft_set_binding *binding)
5308 ++ struct nft_set_binding *binding, bool event)
5309 + {
5310 + list_del_rcu(&binding->list);
5311 +
5312 +- if (list_empty(&set->bindings) && nft_set_is_anonymous(set) &&
5313 +- nft_is_active(ctx->net, set))
5314 ++ if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) {
5315 + list_del_rcu(&set->list);
5316 ++ if (event)
5317 ++ nf_tables_set_notify(ctx, set, NFT_MSG_DELSET,
5318 ++ GFP_KERNEL);
5319 ++ }
5320 + }
5321 + EXPORT_SYMBOL_GPL(nf_tables_unbind_set);
5322 +
5323 + void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set)
5324 + {
5325 +- if (list_empty(&set->bindings) && nft_set_is_anonymous(set) &&
5326 +- nft_is_active(ctx->net, set)) {
5327 +- nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC);
5328 ++ if (list_empty(&set->bindings) && nft_set_is_anonymous(set))
5329 + nft_set_destroy(set);
5330 +- }
5331 + }
5332 + EXPORT_SYMBOL_GPL(nf_tables_destroy_set);
5333 +
5334 +@@ -6504,6 +6494,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
5335 + nf_tables_rule_notify(&trans->ctx,
5336 + nft_trans_rule(trans),
5337 + NFT_MSG_DELRULE);
5338 ++ nft_rule_expr_deactivate(&trans->ctx,
5339 ++ nft_trans_rule(trans),
5340 ++ NFT_TRANS_COMMIT);
5341 + break;
5342 + case NFT_MSG_NEWSET:
5343 + nft_clear(net, nft_trans_set(trans));
5344 +@@ -6590,7 +6583,8 @@ static void nf_tables_abort_release(struct nft_trans *trans)
5345 + nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
5346 + break;
5347 + case NFT_MSG_NEWSET:
5348 +- nft_set_destroy(nft_trans_set(trans));
5349 ++ if (!nft_trans_set_bound(trans))
5350 ++ nft_set_destroy(nft_trans_set(trans));
5351 + break;
5352 + case NFT_MSG_NEWSETELEM:
5353 + nft_set_elem_destroy(nft_trans_elem_set(trans),
5354 +@@ -6651,7 +6645,9 @@ static int __nf_tables_abort(struct net *net)
5355 + case NFT_MSG_NEWRULE:
5356 + trans->ctx.chain->use--;
5357 + list_del_rcu(&nft_trans_rule(trans)->list);
5358 +- nft_rule_expr_deactivate(&trans->ctx, nft_trans_rule(trans));
5359 ++ nft_rule_expr_deactivate(&trans->ctx,
5360 ++ nft_trans_rule(trans),
5361 ++ NFT_TRANS_ABORT);
5362 + break;
5363 + case NFT_MSG_DELRULE:
5364 + trans->ctx.chain->use++;
5365 +@@ -6661,7 +6657,8 @@ static int __nf_tables_abort(struct net *net)
5366 + break;
5367 + case NFT_MSG_NEWSET:
5368 + trans->ctx.table->use--;
5369 +- list_del_rcu(&nft_trans_set(trans)->list);
5370 ++ if (!nft_trans_set_bound(trans))
5371 ++ list_del_rcu(&nft_trans_set(trans)->list);
5372 + break;
5373 + case NFT_MSG_DELSET:
5374 + trans->ctx.table->use++;
5375 +diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
5376 +index c90a4640723f..0a4bad55a8aa 100644
5377 +--- a/net/netfilter/nft_compat.c
5378 ++++ b/net/netfilter/nft_compat.c
5379 +@@ -22,11 +22,15 @@
5380 + #include <linux/netfilter_bridge/ebtables.h>
5381 + #include <linux/netfilter_arp/arp_tables.h>
5382 + #include <net/netfilter/nf_tables.h>
5383 ++#include <net/netns/generic.h>
5384 +
5385 + struct nft_xt {
5386 + struct list_head head;
5387 + struct nft_expr_ops ops;
5388 +- unsigned int refcnt;
5389 ++ refcount_t refcnt;
5390 ++
5391 ++ /* used only when transaction mutex is locked */
5392 ++ unsigned int listcnt;
5393 +
5394 + /* Unlike other expressions, ops doesn't have static storage duration.
5395 + * nft core assumes they do. We use kfree_rcu so that nft core can
5396 +@@ -43,10 +47,39 @@ struct nft_xt_match_priv {
5397 + void *info;
5398 + };
5399 +
5400 ++struct nft_compat_net {
5401 ++ struct list_head nft_target_list;
5402 ++ struct list_head nft_match_list;
5403 ++};
5404 ++
5405 ++static unsigned int nft_compat_net_id __read_mostly;
5406 ++static struct nft_expr_type nft_match_type;
5407 ++static struct nft_expr_type nft_target_type;
5408 ++
5409 ++static struct nft_compat_net *nft_compat_pernet(struct net *net)
5410 ++{
5411 ++ return net_generic(net, nft_compat_net_id);
5412 ++}
5413 ++
5414 ++static void nft_xt_get(struct nft_xt *xt)
5415 ++{
5416 ++ /* refcount_inc() warns on 0 -> 1 transition, but we can't
5417 ++ * init the reference count to 1 in .select_ops -- we can't
5418 ++ * undo such an increase when another expression inside the same
5419 ++ * rule fails afterwards.
5420 ++ */
5421 ++ if (xt->listcnt == 0)
5422 ++ refcount_set(&xt->refcnt, 1);
5423 ++ else
5424 ++ refcount_inc(&xt->refcnt);
5425 ++
5426 ++ xt->listcnt++;
5427 ++}
5428 ++
5429 + static bool nft_xt_put(struct nft_xt *xt)
5430 + {
5431 +- if (--xt->refcnt == 0) {
5432 +- list_del(&xt->head);
5433 ++ if (refcount_dec_and_test(&xt->refcnt)) {
5434 ++ WARN_ON_ONCE(!list_empty(&xt->head));
5435 + kfree_rcu(xt, rcu_head);
5436 + return true;
5437 + }
5438 +@@ -273,7 +306,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
5439 + return -EINVAL;
5440 +
5441 + nft_xt = container_of(expr->ops, struct nft_xt, ops);
5442 +- nft_xt->refcnt++;
5443 ++ nft_xt_get(nft_xt);
5444 + return 0;
5445 + }
5446 +
5447 +@@ -487,7 +520,7 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
5448 + return ret;
5449 +
5450 + nft_xt = container_of(expr->ops, struct nft_xt, ops);
5451 +- nft_xt->refcnt++;
5452 ++ nft_xt_get(nft_xt);
5453 + return 0;
5454 + }
5455 +
5456 +@@ -541,6 +574,18 @@ nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
5457 + __nft_match_destroy(ctx, expr, nft_expr_priv(expr));
5458 + }
5459 +
5460 ++static void nft_compat_deactivate(const struct nft_ctx *ctx,
5461 ++ const struct nft_expr *expr,
5462 ++ enum nft_trans_phase phase)
5463 ++{
5464 ++ struct nft_xt *xt = container_of(expr->ops, struct nft_xt, ops);
5465 ++
5466 ++ if (phase == NFT_TRANS_ABORT || phase == NFT_TRANS_COMMIT) {
5467 ++ if (--xt->listcnt == 0)
5468 ++ list_del_init(&xt->head);
5469 ++ }
5470 ++}
5471 ++
5472 + static void
5473 + nft_match_large_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
5474 + {
5475 +@@ -735,10 +780,6 @@ static const struct nfnetlink_subsystem nfnl_compat_subsys = {
5476 + .cb = nfnl_nft_compat_cb,
5477 + };
5478 +
5479 +-static LIST_HEAD(nft_match_list);
5480 +-
5481 +-static struct nft_expr_type nft_match_type;
5482 +-
5483 + static bool nft_match_cmp(const struct xt_match *match,
5484 + const char *name, u32 rev, u32 family)
5485 + {
5486 +@@ -750,6 +791,7 @@ static const struct nft_expr_ops *
5487 + nft_match_select_ops(const struct nft_ctx *ctx,
5488 + const struct nlattr * const tb[])
5489 + {
5490 ++ struct nft_compat_net *cn;
5491 + struct nft_xt *nft_match;
5492 + struct xt_match *match;
5493 + unsigned int matchsize;
5494 +@@ -766,8 +808,10 @@ nft_match_select_ops(const struct nft_ctx *ctx,
5495 + rev = ntohl(nla_get_be32(tb[NFTA_MATCH_REV]));
5496 + family = ctx->family;
5497 +
5498 ++ cn = nft_compat_pernet(ctx->net);
5499 ++
5500 + /* Re-use the existing match if it's already loaded. */
5501 +- list_for_each_entry(nft_match, &nft_match_list, head) {
5502 ++ list_for_each_entry(nft_match, &cn->nft_match_list, head) {
5503 + struct xt_match *match = nft_match->ops.data;
5504 +
5505 + if (nft_match_cmp(match, mt_name, rev, family))
5506 +@@ -790,11 +834,12 @@ nft_match_select_ops(const struct nft_ctx *ctx,
5507 + goto err;
5508 + }
5509 +
5510 +- nft_match->refcnt = 0;
5511 ++ refcount_set(&nft_match->refcnt, 0);
5512 + nft_match->ops.type = &nft_match_type;
5513 + nft_match->ops.eval = nft_match_eval;
5514 + nft_match->ops.init = nft_match_init;
5515 + nft_match->ops.destroy = nft_match_destroy;
5516 ++ nft_match->ops.deactivate = nft_compat_deactivate;
5517 + nft_match->ops.dump = nft_match_dump;
5518 + nft_match->ops.validate = nft_match_validate;
5519 + nft_match->ops.data = match;
5520 +@@ -811,7 +856,8 @@ nft_match_select_ops(const struct nft_ctx *ctx,
5521 +
5522 + nft_match->ops.size = matchsize;
5523 +
5524 +- list_add(&nft_match->head, &nft_match_list);
5525 ++ nft_match->listcnt = 0;
5526 ++ list_add(&nft_match->head, &cn->nft_match_list);
5527 +
5528 + return &nft_match->ops;
5529 + err:
5530 +@@ -827,10 +873,6 @@ static struct nft_expr_type nft_match_type __read_mostly = {
5531 + .owner = THIS_MODULE,
5532 + };
5533 +
5534 +-static LIST_HEAD(nft_target_list);
5535 +-
5536 +-static struct nft_expr_type nft_target_type;
5537 +-
5538 + static bool nft_target_cmp(const struct xt_target *tg,
5539 + const char *name, u32 rev, u32 family)
5540 + {
5541 +@@ -842,6 +884,7 @@ static const struct nft_expr_ops *
5542 + nft_target_select_ops(const struct nft_ctx *ctx,
5543 + const struct nlattr * const tb[])
5544 + {
5545 ++ struct nft_compat_net *cn;
5546 + struct nft_xt *nft_target;
5547 + struct xt_target *target;
5548 + char *tg_name;
5549 +@@ -862,8 +905,9 @@ nft_target_select_ops(const struct nft_ctx *ctx,
5550 + strcmp(tg_name, "standard") == 0)
5551 + return ERR_PTR(-EINVAL);
5552 +
5553 ++ cn = nft_compat_pernet(ctx->net);
5554 + /* Re-use the existing target if it's already loaded. */
5555 +- list_for_each_entry(nft_target, &nft_target_list, head) {
5556 ++ list_for_each_entry(nft_target, &cn->nft_target_list, head) {
5557 + struct xt_target *target = nft_target->ops.data;
5558 +
5559 + if (!target->target)
5560 +@@ -894,11 +938,12 @@ nft_target_select_ops(const struct nft_ctx *ctx,
5561 + goto err;
5562 + }
5563 +
5564 +- nft_target->refcnt = 0;
5565 ++ refcount_set(&nft_target->refcnt, 0);
5566 + nft_target->ops.type = &nft_target_type;
5567 + nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
5568 + nft_target->ops.init = nft_target_init;
5569 + nft_target->ops.destroy = nft_target_destroy;
5570 ++ nft_target->ops.deactivate = nft_compat_deactivate;
5571 + nft_target->ops.dump = nft_target_dump;
5572 + nft_target->ops.validate = nft_target_validate;
5573 + nft_target->ops.data = target;
5574 +@@ -908,7 +953,8 @@ nft_target_select_ops(const struct nft_ctx *ctx,
5575 + else
5576 + nft_target->ops.eval = nft_target_eval_xt;
5577 +
5578 +- list_add(&nft_target->head, &nft_target_list);
5579 ++ nft_target->listcnt = 0;
5580 ++ list_add(&nft_target->head, &cn->nft_target_list);
5581 +
5582 + return &nft_target->ops;
5583 + err:
5584 +@@ -924,13 +970,74 @@ static struct nft_expr_type nft_target_type __read_mostly = {
5585 + .owner = THIS_MODULE,
5586 + };
5587 +
5588 ++static int __net_init nft_compat_init_net(struct net *net)
5589 ++{
5590 ++ struct nft_compat_net *cn = nft_compat_pernet(net);
5591 ++
5592 ++ INIT_LIST_HEAD(&cn->nft_target_list);
5593 ++ INIT_LIST_HEAD(&cn->nft_match_list);
5594 ++
5595 ++ return 0;
5596 ++}
5597 ++
5598 ++static void __net_exit nft_compat_exit_net(struct net *net)
5599 ++{
5600 ++ struct nft_compat_net *cn = nft_compat_pernet(net);
5601 ++ struct nft_xt *xt, *next;
5602 ++
5603 ++ if (list_empty(&cn->nft_match_list) &&
5604 ++ list_empty(&cn->nft_target_list))
5605 ++ return;
5606 ++
5607 ++ /* If there was an error that caused nft_xt expr to not be initialized
5608 ++ * fully and noone else requested the same expression later, the lists
5609 ++ * contain 0-refcount entries that still hold module reference.
5610 ++ *
5611 ++ * Clean them here.
5612 ++ */
5613 ++ mutex_lock(&net->nft.commit_mutex);
5614 ++ list_for_each_entry_safe(xt, next, &cn->nft_target_list, head) {
5615 ++ struct xt_target *target = xt->ops.data;
5616 ++
5617 ++ list_del_init(&xt->head);
5618 ++
5619 ++ if (refcount_read(&xt->refcnt))
5620 ++ continue;
5621 ++ module_put(target->me);
5622 ++ kfree(xt);
5623 ++ }
5624 ++
5625 ++ list_for_each_entry_safe(xt, next, &cn->nft_match_list, head) {
5626 ++ struct xt_match *match = xt->ops.data;
5627 ++
5628 ++ list_del_init(&xt->head);
5629 ++
5630 ++ if (refcount_read(&xt->refcnt))
5631 ++ continue;
5632 ++ module_put(match->me);
5633 ++ kfree(xt);
5634 ++ }
5635 ++ mutex_unlock(&net->nft.commit_mutex);
5636 ++}
5637 ++
5638 ++static struct pernet_operations nft_compat_net_ops = {
5639 ++ .init = nft_compat_init_net,
5640 ++ .exit = nft_compat_exit_net,
5641 ++ .id = &nft_compat_net_id,
5642 ++ .size = sizeof(struct nft_compat_net),
5643 ++};
5644 ++
5645 + static int __init nft_compat_module_init(void)
5646 + {
5647 + int ret;
5648 +
5649 ++ ret = register_pernet_subsys(&nft_compat_net_ops);
5650 ++ if (ret < 0)
5651 ++ goto err_target;
5652 ++
5653 + ret = nft_register_expr(&nft_match_type);
5654 + if (ret < 0)
5655 +- return ret;
5656 ++ goto err_pernet;
5657 +
5658 + ret = nft_register_expr(&nft_target_type);
5659 + if (ret < 0)
5660 +@@ -943,45 +1050,21 @@ static int __init nft_compat_module_init(void)
5661 + }
5662 +
5663 + return ret;
5664 +-
5665 + err_target:
5666 + nft_unregister_expr(&nft_target_type);
5667 + err_match:
5668 + nft_unregister_expr(&nft_match_type);
5669 ++err_pernet:
5670 ++ unregister_pernet_subsys(&nft_compat_net_ops);
5671 + return ret;
5672 + }
5673 +
5674 + static void __exit nft_compat_module_exit(void)
5675 + {
5676 +- struct nft_xt *xt, *next;
5677 +-
5678 +- /* list should be empty here, it can be non-empty only in case there
5679 +- * was an error that caused nft_xt expr to not be initialized fully
5680 +- * and noone else requested the same expression later.
5681 +- *
5682 +- * In this case, the lists contain 0-refcount entries that still
5683 +- * hold module reference.
5684 +- */
5685 +- list_for_each_entry_safe(xt, next, &nft_target_list, head) {
5686 +- struct xt_target *target = xt->ops.data;
5687 +-
5688 +- if (WARN_ON_ONCE(xt->refcnt))
5689 +- continue;
5690 +- module_put(target->me);
5691 +- kfree(xt);
5692 +- }
5693 +-
5694 +- list_for_each_entry_safe(xt, next, &nft_match_list, head) {
5695 +- struct xt_match *match = xt->ops.data;
5696 +-
5697 +- if (WARN_ON_ONCE(xt->refcnt))
5698 +- continue;
5699 +- module_put(match->me);
5700 +- kfree(xt);
5701 +- }
5702 + nfnetlink_subsys_unregister(&nfnl_compat_subsys);
5703 + nft_unregister_expr(&nft_target_type);
5704 + nft_unregister_expr(&nft_match_type);
5705 ++ unregister_pernet_subsys(&nft_compat_net_ops);
5706 + }
5707 +
5708 + MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFT_COMPAT);
5709 +diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
5710 +index 07d4efd3d851..f1172f99752b 100644
5711 +--- a/net/netfilter/nft_dynset.c
5712 ++++ b/net/netfilter/nft_dynset.c
5713 +@@ -235,20 +235,17 @@ err1:
5714 + return err;
5715 + }
5716 +
5717 +-static void nft_dynset_activate(const struct nft_ctx *ctx,
5718 +- const struct nft_expr *expr)
5719 +-{
5720 +- struct nft_dynset *priv = nft_expr_priv(expr);
5721 +-
5722 +- nf_tables_rebind_set(ctx, priv->set, &priv->binding);
5723 +-}
5724 +-
5725 + static void nft_dynset_deactivate(const struct nft_ctx *ctx,
5726 +- const struct nft_expr *expr)
5727 ++ const struct nft_expr *expr,
5728 ++ enum nft_trans_phase phase)
5729 + {
5730 + struct nft_dynset *priv = nft_expr_priv(expr);
5731 +
5732 +- nf_tables_unbind_set(ctx, priv->set, &priv->binding);
5733 ++ if (phase == NFT_TRANS_PREPARE)
5734 ++ return;
5735 ++
5736 ++ nf_tables_unbind_set(ctx, priv->set, &priv->binding,
5737 ++ phase == NFT_TRANS_COMMIT);
5738 + }
5739 +
5740 + static void nft_dynset_destroy(const struct nft_ctx *ctx,
5741 +@@ -296,7 +293,6 @@ static const struct nft_expr_ops nft_dynset_ops = {
5742 + .eval = nft_dynset_eval,
5743 + .init = nft_dynset_init,
5744 + .destroy = nft_dynset_destroy,
5745 +- .activate = nft_dynset_activate,
5746 + .deactivate = nft_dynset_deactivate,
5747 + .dump = nft_dynset_dump,
5748 + };
5749 +diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
5750 +index 0777a93211e2..3f6d1d2a6281 100644
5751 +--- a/net/netfilter/nft_immediate.c
5752 ++++ b/net/netfilter/nft_immediate.c
5753 +@@ -72,10 +72,14 @@ static void nft_immediate_activate(const struct nft_ctx *ctx,
5754 + }
5755 +
5756 + static void nft_immediate_deactivate(const struct nft_ctx *ctx,
5757 +- const struct nft_expr *expr)
5758 ++ const struct nft_expr *expr,
5759 ++ enum nft_trans_phase phase)
5760 + {
5761 + const struct nft_immediate_expr *priv = nft_expr_priv(expr);
5762 +
5763 ++ if (phase == NFT_TRANS_COMMIT)
5764 ++ return;
5765 ++
5766 + return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg));
5767 + }
5768 +
5769 +diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
5770 +index 227b2b15a19c..14496da5141d 100644
5771 +--- a/net/netfilter/nft_lookup.c
5772 ++++ b/net/netfilter/nft_lookup.c
5773 +@@ -121,20 +121,17 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
5774 + return 0;
5775 + }
5776 +
5777 +-static void nft_lookup_activate(const struct nft_ctx *ctx,
5778 +- const struct nft_expr *expr)
5779 +-{
5780 +- struct nft_lookup *priv = nft_expr_priv(expr);
5781 +-
5782 +- nf_tables_rebind_set(ctx, priv->set, &priv->binding);
5783 +-}
5784 +-
5785 + static void nft_lookup_deactivate(const struct nft_ctx *ctx,
5786 +- const struct nft_expr *expr)
5787 ++ const struct nft_expr *expr,
5788 ++ enum nft_trans_phase phase)
5789 + {
5790 + struct nft_lookup *priv = nft_expr_priv(expr);
5791 +
5792 +- nf_tables_unbind_set(ctx, priv->set, &priv->binding);
5793 ++ if (phase == NFT_TRANS_PREPARE)
5794 ++ return;
5795 ++
5796 ++ nf_tables_unbind_set(ctx, priv->set, &priv->binding,
5797 ++ phase == NFT_TRANS_COMMIT);
5798 + }
5799 +
5800 + static void nft_lookup_destroy(const struct nft_ctx *ctx,
5801 +@@ -225,7 +222,6 @@ static const struct nft_expr_ops nft_lookup_ops = {
5802 + .size = NFT_EXPR_SIZE(sizeof(struct nft_lookup)),
5803 + .eval = nft_lookup_eval,
5804 + .init = nft_lookup_init,
5805 +- .activate = nft_lookup_activate,
5806 + .deactivate = nft_lookup_deactivate,
5807 + .destroy = nft_lookup_destroy,
5808 + .dump = nft_lookup_dump,
5809 +diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
5810 +index a3185ca2a3a9..ae178e914486 100644
5811 +--- a/net/netfilter/nft_objref.c
5812 ++++ b/net/netfilter/nft_objref.c
5813 +@@ -155,20 +155,17 @@ nla_put_failure:
5814 + return -1;
5815 + }
5816 +
5817 +-static void nft_objref_map_activate(const struct nft_ctx *ctx,
5818 +- const struct nft_expr *expr)
5819 +-{
5820 +- struct nft_objref_map *priv = nft_expr_priv(expr);
5821 +-
5822 +- nf_tables_rebind_set(ctx, priv->set, &priv->binding);
5823 +-}
5824 +-
5825 + static void nft_objref_map_deactivate(const struct nft_ctx *ctx,
5826 +- const struct nft_expr *expr)
5827 ++ const struct nft_expr *expr,
5828 ++ enum nft_trans_phase phase)
5829 + {
5830 + struct nft_objref_map *priv = nft_expr_priv(expr);
5831 +
5832 +- nf_tables_unbind_set(ctx, priv->set, &priv->binding);
5833 ++ if (phase == NFT_TRANS_PREPARE)
5834 ++ return;
5835 ++
5836 ++ nf_tables_unbind_set(ctx, priv->set, &priv->binding,
5837 ++ phase == NFT_TRANS_COMMIT);
5838 + }
5839 +
5840 + static void nft_objref_map_destroy(const struct nft_ctx *ctx,
5841 +@@ -185,7 +182,6 @@ static const struct nft_expr_ops nft_objref_map_ops = {
5842 + .size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)),
5843 + .eval = nft_objref_map_eval,
5844 + .init = nft_objref_map_init,
5845 +- .activate = nft_objref_map_activate,
5846 + .deactivate = nft_objref_map_deactivate,
5847 + .destroy = nft_objref_map_destroy,
5848 + .dump = nft_objref_map_dump,
5849 +diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
5850 +index 5d3cce9e8744..15eb5d3d4750 100644
5851 +--- a/net/vmw_vsock/virtio_transport.c
5852 ++++ b/net/vmw_vsock/virtio_transport.c
5853 +@@ -75,6 +75,9 @@ static u32 virtio_transport_get_local_cid(void)
5854 + {
5855 + struct virtio_vsock *vsock = virtio_vsock_get();
5856 +
5857 ++ if (!vsock)
5858 ++ return VMADDR_CID_ANY;
5859 ++
5860 + return vsock->guest_cid;
5861 + }
5862 +
5863 +@@ -584,10 +587,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
5864 +
5865 + virtio_vsock_update_guest_cid(vsock);
5866 +
5867 +- ret = vsock_core_init(&virtio_transport.transport);
5868 +- if (ret < 0)
5869 +- goto out_vqs;
5870 +-
5871 + vsock->rx_buf_nr = 0;
5872 + vsock->rx_buf_max_nr = 0;
5873 + atomic_set(&vsock->queued_replies, 0);
5874 +@@ -618,8 +617,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
5875 + mutex_unlock(&the_virtio_vsock_mutex);
5876 + return 0;
5877 +
5878 +-out_vqs:
5879 +- vsock->vdev->config->del_vqs(vsock->vdev);
5880 + out:
5881 + kfree(vsock);
5882 + mutex_unlock(&the_virtio_vsock_mutex);
5883 +@@ -637,6 +634,9 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
5884 + flush_work(&vsock->event_work);
5885 + flush_work(&vsock->send_pkt_work);
5886 +
5887 ++ /* Reset all connected sockets when the device disappear */
5888 ++ vsock_for_each_connected_socket(virtio_vsock_reset_sock);
5889 ++
5890 + vdev->config->reset(vdev);
5891 +
5892 + mutex_lock(&vsock->rx_lock);
5893 +@@ -669,7 +669,6 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
5894 +
5895 + mutex_lock(&the_virtio_vsock_mutex);
5896 + the_virtio_vsock = NULL;
5897 +- vsock_core_exit();
5898 + mutex_unlock(&the_virtio_vsock_mutex);
5899 +
5900 + vdev->config->del_vqs(vdev);
5901 +@@ -702,14 +701,28 @@ static int __init virtio_vsock_init(void)
5902 + virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0);
5903 + if (!virtio_vsock_workqueue)
5904 + return -ENOMEM;
5905 ++
5906 + ret = register_virtio_driver(&virtio_vsock_driver);
5907 + if (ret)
5908 +- destroy_workqueue(virtio_vsock_workqueue);
5909 ++ goto out_wq;
5910 ++
5911 ++ ret = vsock_core_init(&virtio_transport.transport);
5912 ++ if (ret)
5913 ++ goto out_vdr;
5914 ++
5915 ++ return 0;
5916 ++
5917 ++out_vdr:
5918 ++ unregister_virtio_driver(&virtio_vsock_driver);
5919 ++out_wq:
5920 ++ destroy_workqueue(virtio_vsock_workqueue);
5921 + return ret;
5922 ++
5923 + }
5924 +
5925 + static void __exit virtio_vsock_exit(void)
5926 + {
5927 ++ vsock_core_exit();
5928 + unregister_virtio_driver(&virtio_vsock_driver);
5929 + destroy_workqueue(virtio_vsock_workqueue);
5930 + }
5931 +diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
5932 +index 08c88de0ffda..11975ec8d566 100644
5933 +--- a/security/apparmor/domain.c
5934 ++++ b/security/apparmor/domain.c
5935 +@@ -1444,7 +1444,10 @@ check:
5936 + new = aa_label_merge(label, target, GFP_KERNEL);
5937 + if (IS_ERR_OR_NULL(new)) {
5938 + info = "failed to build target label";
5939 +- error = PTR_ERR(new);
5940 ++ if (!new)
5941 ++ error = -ENOMEM;
5942 ++ else
5943 ++ error = PTR_ERR(new);
5944 + new = NULL;
5945 + perms.allow = 0;
5946 + goto audit;
5947 +diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
5948 +index 42446a216f3b..7d1eeb084968 100644
5949 +--- a/security/apparmor/lsm.c
5950 ++++ b/security/apparmor/lsm.c
5951 +@@ -1598,12 +1598,14 @@ static unsigned int apparmor_ipv4_postroute(void *priv,
5952 + return apparmor_ip_postroute(priv, skb, state);
5953 + }
5954 +
5955 ++#if IS_ENABLED(CONFIG_IPV6)
5956 + static unsigned int apparmor_ipv6_postroute(void *priv,
5957 + struct sk_buff *skb,
5958 + const struct nf_hook_state *state)
5959 + {
5960 + return apparmor_ip_postroute(priv, skb, state);
5961 + }
5962 ++#endif
5963 +
5964 + static const struct nf_hook_ops apparmor_nf_ops[] = {
5965 + {
5966 +diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
5967 +index 9988d5c126b6..94094168c4a6 100644
5968 +--- a/tools/bpf/bpftool/map.c
5969 ++++ b/tools/bpf/bpftool/map.c
5970 +@@ -439,6 +439,20 @@ static char **parse_bytes(char **argv, const char *name, unsigned char *val,
5971 + return argv + i;
5972 + }
5973 +
5974 ++/* on per cpu maps we must copy the provided value on all value instances */
5975 ++static void fill_per_cpu_value(struct bpf_map_info *info, void *value)
5976 ++{
5977 ++ unsigned int i, n, step;
5978 ++
5979 ++ if (!map_is_per_cpu(info->type))
5980 ++ return;
5981 ++
5982 ++ n = get_possible_cpus();
5983 ++ step = round_up(info->value_size, 8);
5984 ++ for (i = 1; i < n; i++)
5985 ++ memcpy(value + i * step, value, info->value_size);
5986 ++}
5987 ++
5988 + static int parse_elem(char **argv, struct bpf_map_info *info,
5989 + void *key, void *value, __u32 key_size, __u32 value_size,
5990 + __u32 *flags, __u32 **value_fd)
5991 +@@ -518,6 +532,8 @@ static int parse_elem(char **argv, struct bpf_map_info *info,
5992 + argv = parse_bytes(argv, "value", value, value_size);
5993 + if (!argv)
5994 + return -1;
5995 ++
5996 ++ fill_per_cpu_value(info, value);
5997 + }
5998 +
5999 + return parse_elem(argv, info, key, NULL, key_size, value_size,
6000 +diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
6001 +index 69b01a6158bd..91b9de5f4e17 100644
6002 +--- a/tools/bpf/bpftool/prog.c
6003 ++++ b/tools/bpf/bpftool/prog.c
6004 +@@ -130,13 +130,14 @@ static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
6005 +
6006 + static int prog_fd_by_tag(unsigned char *tag)
6007 + {
6008 +- struct bpf_prog_info info = {};
6009 +- __u32 len = sizeof(info);
6010 + unsigned int id = 0;
6011 + int err;
6012 + int fd;
6013 +
6014 + while (true) {
6015 ++ struct bpf_prog_info info = {};
6016 ++ __u32 len = sizeof(info);
6017 ++
6018 + err = bpf_prog_get_next_id(id, &id);
6019 + if (err) {
6020 + p_err("%s", strerror(errno));
6021 +diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
6022 +index a7b4d3f611c5..d8791e0e5f75 100644
6023 +--- a/tools/perf/builtin-script.c
6024 ++++ b/tools/perf/builtin-script.c
6025 +@@ -1633,13 +1633,8 @@ static void perf_sample__fprint_metric(struct perf_script *script,
6026 + .force_header = false,
6027 + };
6028 + struct perf_evsel *ev2;
6029 +- static bool init;
6030 + u64 val;
6031 +
6032 +- if (!init) {
6033 +- perf_stat__init_shadow_stats();
6034 +- init = true;
6035 +- }
6036 + if (!evsel->stats)
6037 + perf_evlist__alloc_stats(script->session->evlist, false);
6038 + if (evsel_script(evsel->leader)->gnum++ == 0)
6039 +@@ -1746,7 +1741,7 @@ static void process_event(struct perf_script *script,
6040 + return;
6041 + }
6042 +
6043 +- if (PRINT_FIELD(TRACE)) {
6044 ++ if (PRINT_FIELD(TRACE) && sample->raw_data) {
6045 + event_format__fprintf(evsel->tp_format, sample->cpu,
6046 + sample->raw_data, sample->raw_size, fp);
6047 + }
6048 +@@ -2305,6 +2300,8 @@ static int __cmd_script(struct perf_script *script)
6049 +
6050 + signal(SIGINT, sig_handler);
6051 +
6052 ++ perf_stat__init_shadow_stats();
6053 ++
6054 + /* override event processing functions */
6055 + if (script->show_task_events) {
6056 + script->tool.comm = process_comm_event;
6057 +diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
6058 +index 835619476370..c108519ddd61 100644
6059 +--- a/tools/perf/builtin-trace.c
6060 ++++ b/tools/perf/builtin-trace.c
6061 +@@ -2424,19 +2424,30 @@ static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
6062 +
6063 + static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
6064 + {
6065 +- struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname");
6066 ++ bool found = false;
6067 ++ struct perf_evsel *evsel, *tmp;
6068 ++ struct parse_events_error err = { .idx = 0, };
6069 ++ int ret = parse_events(evlist, "probe:vfs_getname*", &err);
6070 +
6071 +- if (IS_ERR(evsel))
6072 ++ if (ret)
6073 + return false;
6074 +
6075 +- if (perf_evsel__field(evsel, "pathname") == NULL) {
6076 ++ evlist__for_each_entry_safe(evlist, evsel, tmp) {
6077 ++ if (!strstarts(perf_evsel__name(evsel), "probe:vfs_getname"))
6078 ++ continue;
6079 ++
6080 ++ if (perf_evsel__field(evsel, "pathname")) {
6081 ++ evsel->handler = trace__vfs_getname;
6082 ++ found = true;
6083 ++ continue;
6084 ++ }
6085 ++
6086 ++ list_del_init(&evsel->node);
6087 ++ evsel->evlist = NULL;
6088 + perf_evsel__delete(evsel);
6089 +- return false;
6090 + }
6091 +
6092 +- evsel->handler = trace__vfs_getname;
6093 +- perf_evlist__add(evlist, evsel);
6094 +- return true;
6095 ++ return found;
6096 + }
6097 +
6098 + static struct perf_evsel *perf_evsel__new_pgfault(u64 config)
6099 +diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
6100 +index 1ccbd3342069..383674f448fc 100644
6101 +--- a/tools/perf/util/cpumap.c
6102 ++++ b/tools/perf/util/cpumap.c
6103 +@@ -134,7 +134,12 @@ struct cpu_map *cpu_map__new(const char *cpu_list)
6104 + if (!cpu_list)
6105 + return cpu_map__read_all_cpu_map();
6106 +
6107 +- if (!isdigit(*cpu_list))
6108 ++ /*
6109 ++ * must handle the case of empty cpumap to cover
6110 ++ * TOPOLOGY header for NUMA nodes with no CPU
6111 ++ * ( e.g., because of CPU hotplug)
6112 ++ */
6113 ++ if (!isdigit(*cpu_list) && *cpu_list != '\0')
6114 + goto out;
6115 +
6116 + while (isdigit(*cpu_list)) {
6117 +@@ -181,8 +186,10 @@ struct cpu_map *cpu_map__new(const char *cpu_list)
6118 +
6119 + if (nr_cpus > 0)
6120 + cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
6121 +- else
6122 ++ else if (*cpu_list != '\0')
6123 + cpus = cpu_map__default_new();
6124 ++ else
6125 ++ cpus = cpu_map__dummy_new();
6126 + invalid:
6127 + free(tmp_cpus);
6128 + out:
6129 +diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
6130 +index 1904e7f6ec84..77126a5fa9b6 100644
6131 +--- a/tools/perf/util/ordered-events.c
6132 ++++ b/tools/perf/util/ordered-events.c
6133 +@@ -359,8 +359,10 @@ void ordered_events__free(struct ordered_events *oe)
6134 + * Current buffer might not have all the events allocated
6135 + * yet, we need to free only allocated ones ...
6136 + */
6137 +- list_del(&oe->buffer->list);
6138 +- ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe);
6139 ++ if (oe->buffer) {
6140 ++ list_del(&oe->buffer->list);
6141 ++ ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe);
6142 ++ }
6143 +
6144 + /* ... and continue with the rest */
6145 + list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) {
6146 +diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
6147 +index 63f758c655d5..64d1f36dee99 100644
6148 +--- a/tools/perf/util/setup.py
6149 ++++ b/tools/perf/util/setup.py
6150 +@@ -17,6 +17,8 @@ if cc == "clang":
6151 + vars[var] = sub("-mcet", "", vars[var])
6152 + if not clang_has_option("-fcf-protection"):
6153 + vars[var] = sub("-fcf-protection", "", vars[var])
6154 ++ if not clang_has_option("-fstack-clash-protection"):
6155 ++ vars[var] = sub("-fstack-clash-protection", "", vars[var])
6156 +
6157 + from distutils.core import setup, Extension
6158 +
6159 +diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
6160 +index 66a84d5846c8..03cb8c6d620a 100644
6161 +--- a/tools/perf/util/symbol-elf.c
6162 ++++ b/tools/perf/util/symbol-elf.c
6163 +@@ -87,6 +87,11 @@ static inline uint8_t elf_sym__type(const GElf_Sym *sym)
6164 + return GELF_ST_TYPE(sym->st_info);
6165 + }
6166 +
6167 ++static inline uint8_t elf_sym__visibility(const GElf_Sym *sym)
6168 ++{
6169 ++ return GELF_ST_VISIBILITY(sym->st_other);
6170 ++}
6171 ++
6172 + #ifndef STT_GNU_IFUNC
6173 + #define STT_GNU_IFUNC 10
6174 + #endif
6175 +@@ -111,7 +116,9 @@ static inline int elf_sym__is_label(const GElf_Sym *sym)
6176 + return elf_sym__type(sym) == STT_NOTYPE &&
6177 + sym->st_name != 0 &&
6178 + sym->st_shndx != SHN_UNDEF &&
6179 +- sym->st_shndx != SHN_ABS;
6180 ++ sym->st_shndx != SHN_ABS &&
6181 ++ elf_sym__visibility(sym) != STV_HIDDEN &&
6182 ++ elf_sym__visibility(sym) != STV_INTERNAL;
6183 + }
6184 +
6185 + static bool elf_sym__filter(GElf_Sym *sym)
6186 +diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h
6187 +index 315a44fa32af..84fd6f1bf33e 100644
6188 +--- a/tools/testing/selftests/bpf/bpf_util.h
6189 ++++ b/tools/testing/selftests/bpf/bpf_util.h
6190 +@@ -13,7 +13,7 @@ static inline unsigned int bpf_num_possible_cpus(void)
6191 + unsigned int start, end, possible_cpus = 0;
6192 + char buff[128];
6193 + FILE *fp;
6194 +- int n;
6195 ++ int len, n, i, j = 0;
6196 +
6197 + fp = fopen(fcpu, "r");
6198 + if (!fp) {
6199 +@@ -21,17 +21,27 @@ static inline unsigned int bpf_num_possible_cpus(void)
6200 + exit(1);
6201 + }
6202 +
6203 +- while (fgets(buff, sizeof(buff), fp)) {
6204 +- n = sscanf(buff, "%u-%u", &start, &end);
6205 +- if (n == 0) {
6206 +- printf("Failed to retrieve # possible CPUs!\n");
6207 +- exit(1);
6208 +- } else if (n == 1) {
6209 +- end = start;
6210 ++ if (!fgets(buff, sizeof(buff), fp)) {
6211 ++ printf("Failed to read %s!\n", fcpu);
6212 ++ exit(1);
6213 ++ }
6214 ++
6215 ++ len = strlen(buff);
6216 ++ for (i = 0; i <= len; i++) {
6217 ++ if (buff[i] == ',' || buff[i] == '\0') {
6218 ++ buff[i] = '\0';
6219 ++ n = sscanf(&buff[j], "%u-%u", &start, &end);
6220 ++ if (n <= 0) {
6221 ++ printf("Failed to retrieve # possible CPUs!\n");
6222 ++ exit(1);
6223 ++ } else if (n == 1) {
6224 ++ end = start;
6225 ++ }
6226 ++ possible_cpus += end - start + 1;
6227 ++ j = i + 1;
6228 + }
6229 +- possible_cpus = start == 0 ? end + 1 : 0;
6230 +- break;
6231 + }
6232 ++
6233 + fclose(fp);
6234 +
6235 + return possible_cpus;
6236 +diff --git a/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh b/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
6237 +index bab13dd025a6..0d26b5e3f966 100755
6238 +--- a/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
6239 ++++ b/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
6240 +@@ -37,6 +37,10 @@ prerequisite()
6241 + exit $ksft_skip
6242 + fi
6243 +
6244 ++ present_cpus=`cat $SYSFS/devices/system/cpu/present`
6245 ++ present_max=${present_cpus##*-}
6246 ++ echo "present_cpus = $present_cpus present_max = $present_max"
6247 ++
6248 + echo -e "\t Cpus in online state: $online_cpus"
6249 +
6250 + offline_cpus=`cat $SYSFS/devices/system/cpu/offline`
6251 +@@ -151,6 +155,8 @@ online_cpus=0
6252 + online_max=0
6253 + offline_cpus=0
6254 + offline_max=0
6255 ++present_cpus=0
6256 ++present_max=0
6257 +
6258 + while getopts e:ahp: opt; do
6259 + case $opt in
6260 +@@ -190,9 +196,10 @@ if [ $allcpus -eq 0 ]; then
6261 + online_cpu_expect_success $online_max
6262 +
6263 + if [[ $offline_cpus -gt 0 ]]; then
6264 +- echo -e "\t offline to online to offline: cpu $offline_max"
6265 +- online_cpu_expect_success $offline_max
6266 +- offline_cpu_expect_success $offline_max
6267 ++ echo -e "\t offline to online to offline: cpu $present_max"
6268 ++ online_cpu_expect_success $present_max
6269 ++ offline_cpu_expect_success $present_max
6270 ++ online_cpu $present_max
6271 + fi
6272 + exit 0
6273 + else
6274 +diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
6275 +index 923570a9708a..68e2295e7589 100644
6276 +--- a/tools/testing/selftests/net/Makefile
6277 ++++ b/tools/testing/selftests/net/Makefile
6278 +@@ -19,6 +19,6 @@ TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls
6279 + KSFT_KHDR_INSTALL := 1
6280 + include ../lib.mk
6281 +
6282 +-$(OUTPUT)/reuseport_bpf_numa: LDFLAGS += -lnuma
6283 ++$(OUTPUT)/reuseport_bpf_numa: LDLIBS += -lnuma
6284 + $(OUTPUT)/tcp_mmap: LDFLAGS += -lpthread
6285 + $(OUTPUT)/tcp_inq: LDFLAGS += -lpthread
6286 +diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
6287 +index 47ed6cef93fb..c9ff2b47bd1c 100644
6288 +--- a/tools/testing/selftests/netfilter/Makefile
6289 ++++ b/tools/testing/selftests/netfilter/Makefile
6290 +@@ -1,6 +1,6 @@
6291 + # SPDX-License-Identifier: GPL-2.0
6292 + # Makefile for netfilter selftests
6293 +
6294 +-TEST_PROGS := nft_trans_stress.sh
6295 ++TEST_PROGS := nft_trans_stress.sh nft_nat.sh
6296 +
6297 + include ../lib.mk
6298 +diff --git a/tools/testing/selftests/netfilter/config b/tools/testing/selftests/netfilter/config
6299 +index 1017313e41a8..59caa8f71cd8 100644
6300 +--- a/tools/testing/selftests/netfilter/config
6301 ++++ b/tools/testing/selftests/netfilter/config
6302 +@@ -1,2 +1,2 @@
6303 + CONFIG_NET_NS=y
6304 +-NF_TABLES_INET=y
6305 ++CONFIG_NF_TABLES_INET=y
6306 +diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
6307 +new file mode 100755
6308 +index 000000000000..8ec76681605c
6309 +--- /dev/null
6310 ++++ b/tools/testing/selftests/netfilter/nft_nat.sh
6311 +@@ -0,0 +1,762 @@
6312 ++#!/bin/bash
6313 ++#
6314 ++# This test is for basic NAT functionality: snat, dnat, redirect, masquerade.
6315 ++#
6316 ++
6317 ++# Kselftest framework requirement - SKIP code is 4.
6318 ++ksft_skip=4
6319 ++ret=0
6320 ++
6321 ++nft --version > /dev/null 2>&1
6322 ++if [ $? -ne 0 ];then
6323 ++ echo "SKIP: Could not run test without nft tool"
6324 ++ exit $ksft_skip
6325 ++fi
6326 ++
6327 ++ip -Version > /dev/null 2>&1
6328 ++if [ $? -ne 0 ];then
6329 ++ echo "SKIP: Could not run test without ip tool"
6330 ++ exit $ksft_skip
6331 ++fi
6332 ++
6333 ++ip netns add ns0
6334 ++ip netns add ns1
6335 ++ip netns add ns2
6336 ++
6337 ++ip link add veth0 netns ns0 type veth peer name eth0 netns ns1
6338 ++ip link add veth1 netns ns0 type veth peer name eth0 netns ns2
6339 ++
6340 ++ip -net ns0 link set lo up
6341 ++ip -net ns0 link set veth0 up
6342 ++ip -net ns0 addr add 10.0.1.1/24 dev veth0
6343 ++ip -net ns0 addr add dead:1::1/64 dev veth0
6344 ++
6345 ++ip -net ns0 link set veth1 up
6346 ++ip -net ns0 addr add 10.0.2.1/24 dev veth1
6347 ++ip -net ns0 addr add dead:2::1/64 dev veth1
6348 ++
6349 ++for i in 1 2; do
6350 ++ ip -net ns$i link set lo up
6351 ++ ip -net ns$i link set eth0 up
6352 ++ ip -net ns$i addr add 10.0.$i.99/24 dev eth0
6353 ++ ip -net ns$i route add default via 10.0.$i.1
6354 ++ ip -net ns$i addr add dead:$i::99/64 dev eth0
6355 ++ ip -net ns$i route add default via dead:$i::1
6356 ++done
6357 ++
6358 ++bad_counter()
6359 ++{
6360 ++ local ns=$1
6361 ++ local counter=$2
6362 ++ local expect=$3
6363 ++
6364 ++ echo "ERROR: $counter counter in $ns has unexpected value (expected $expect)" 1>&2
6365 ++ ip netns exec $ns nft list counter inet filter $counter 1>&2
6366 ++}
6367 ++
6368 ++check_counters()
6369 ++{
6370 ++ ns=$1
6371 ++ local lret=0
6372 ++
6373 ++ cnt=$(ip netns exec $ns nft list counter inet filter ns0in | grep -q "packets 1 bytes 84")
6374 ++ if [ $? -ne 0 ]; then
6375 ++ bad_counter $ns ns0in "packets 1 bytes 84"
6376 ++ lret=1
6377 ++ fi
6378 ++ cnt=$(ip netns exec $ns nft list counter inet filter ns0out | grep -q "packets 1 bytes 84")
6379 ++ if [ $? -ne 0 ]; then
6380 ++ bad_counter $ns ns0out "packets 1 bytes 84"
6381 ++ lret=1
6382 ++ fi
6383 ++
6384 ++ expect="packets 1 bytes 104"
6385 ++ cnt=$(ip netns exec $ns nft list counter inet filter ns0in6 | grep -q "$expect")
6386 ++ if [ $? -ne 0 ]; then
6387 ++ bad_counter $ns ns0in6 "$expect"
6388 ++ lret=1
6389 ++ fi
6390 ++ cnt=$(ip netns exec $ns nft list counter inet filter ns0out6 | grep -q "$expect")
6391 ++ if [ $? -ne 0 ]; then
6392 ++ bad_counter $ns ns0out6 "$expect"
6393 ++ lret=1
6394 ++ fi
6395 ++
6396 ++ return $lret
6397 ++}
6398 ++
6399 ++check_ns0_counters()
6400 ++{
6401 ++ local ns=$1
6402 ++ local lret=0
6403 ++
6404 ++ cnt=$(ip netns exec ns0 nft list counter inet filter ns0in | grep -q "packets 0 bytes 0")
6405 ++ if [ $? -ne 0 ]; then
6406 ++ bad_counter ns0 ns0in "packets 0 bytes 0"
6407 ++ lret=1
6408 ++ fi
6409 ++
6410 ++ cnt=$(ip netns exec ns0 nft list counter inet filter ns0in6 | grep -q "packets 0 bytes 0")
6411 ++ if [ $? -ne 0 ]; then
6412 ++ bad_counter ns0 ns0in6 "packets 0 bytes 0"
6413 ++ lret=1
6414 ++ fi
6415 ++
6416 ++ cnt=$(ip netns exec ns0 nft list counter inet filter ns0out | grep -q "packets 0 bytes 0")
6417 ++ if [ $? -ne 0 ]; then
6418 ++ bad_counter ns0 ns0out "packets 0 bytes 0"
6419 ++ lret=1
6420 ++ fi
6421 ++ cnt=$(ip netns exec ns0 nft list counter inet filter ns0out6 | grep -q "packets 0 bytes 0")
6422 ++ if [ $? -ne 0 ]; then
6423 ++ bad_counter ns0 ns0out6 "packets 0 bytes 0"
6424 ++ lret=1
6425 ++ fi
6426 ++
6427 ++ for dir in "in" "out" ; do
6428 ++ expect="packets 1 bytes 84"
6429 ++ cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir} | grep -q "$expect")
6430 ++ if [ $? -ne 0 ]; then
6431 ++ bad_counter ns0 $ns$dir "$expect"
6432 ++ lret=1
6433 ++ fi
6434 ++
6435 ++ expect="packets 1 bytes 104"
6436 ++ cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir}6 | grep -q "$expect")
6437 ++ if [ $? -ne 0 ]; then
6438 ++ bad_counter ns0 $ns$dir6 "$expect"
6439 ++ lret=1
6440 ++ fi
6441 ++ done
6442 ++
6443 ++ return $lret
6444 ++}
6445 ++
6446 ++reset_counters()
6447 ++{
6448 ++ for i in 0 1 2;do
6449 ++ ip netns exec ns$i nft reset counters inet > /dev/null
6450 ++ done
6451 ++}
6452 ++
6453 ++test_local_dnat6()
6454 ++{
6455 ++ local lret=0
6456 ++ip netns exec ns0 nft -f - <<EOF
6457 ++table ip6 nat {
6458 ++ chain output {
6459 ++ type nat hook output priority 0; policy accept;
6460 ++ ip6 daddr dead:1::99 dnat to dead:2::99
6461 ++ }
6462 ++}
6463 ++EOF
6464 ++ if [ $? -ne 0 ]; then
6465 ++ echo "SKIP: Could not add add ip6 dnat hook"
6466 ++ return $ksft_skip
6467 ++ fi
6468 ++
6469 ++ # ping netns1, expect rewrite to netns2
6470 ++ ip netns exec ns0 ping -q -c 1 dead:1::99 > /dev/null
6471 ++ if [ $? -ne 0 ]; then
6472 ++ lret=1
6473 ++ echo "ERROR: ping6 failed"
6474 ++ return $lret
6475 ++ fi
6476 ++
6477 ++ expect="packets 0 bytes 0"
6478 ++ for dir in "in6" "out6" ; do
6479 ++ cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
6480 ++ if [ $? -ne 0 ]; then
6481 ++ bad_counter ns0 ns1$dir "$expect"
6482 ++ lret=1
6483 ++ fi
6484 ++ done
6485 ++
6486 ++ expect="packets 1 bytes 104"
6487 ++ for dir in "in6" "out6" ; do
6488 ++ cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
6489 ++ if [ $? -ne 0 ]; then
6490 ++ bad_counter ns0 ns2$dir "$expect"
6491 ++ lret=1
6492 ++ fi
6493 ++ done
6494 ++
6495 ++ # expect 0 count in ns1
6496 ++ expect="packets 0 bytes 0"
6497 ++ for dir in "in6" "out6" ; do
6498 ++ cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
6499 ++ if [ $? -ne 0 ]; then
6500 ++ bad_counter ns1 ns0$dir "$expect"
6501 ++ lret=1
6502 ++ fi
6503 ++ done
6504 ++
6505 ++ # expect 1 packet in ns2
6506 ++ expect="packets 1 bytes 104"
6507 ++ for dir in "in6" "out6" ; do
6508 ++ cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
6509 ++ if [ $? -ne 0 ]; then
6510 ++ bad_counter ns2 ns0$dir "$expect"
6511 ++ lret=1
6512 ++ fi
6513 ++ done
6514 ++
6515 ++ test $lret -eq 0 && echo "PASS: ipv6 ping to ns1 was NATted to ns2"
6516 ++ ip netns exec ns0 nft flush chain ip6 nat output
6517 ++
6518 ++ return $lret
6519 ++}
6520 ++
6521 ++test_local_dnat()
6522 ++{
6523 ++ local lret=0
6524 ++ip netns exec ns0 nft -f - <<EOF
6525 ++table ip nat {
6526 ++ chain output {
6527 ++ type nat hook output priority 0; policy accept;
6528 ++ ip daddr 10.0.1.99 dnat to 10.0.2.99
6529 ++ }
6530 ++}
6531 ++EOF
6532 ++ # ping netns1, expect rewrite to netns2
6533 ++ ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
6534 ++ if [ $? -ne 0 ]; then
6535 ++ lret=1
6536 ++ echo "ERROR: ping failed"
6537 ++ return $lret
6538 ++ fi
6539 ++
6540 ++ expect="packets 0 bytes 0"
6541 ++ for dir in "in" "out" ; do
6542 ++ cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
6543 ++ if [ $? -ne 0 ]; then
6544 ++ bad_counter ns0 ns1$dir "$expect"
6545 ++ lret=1
6546 ++ fi
6547 ++ done
6548 ++
6549 ++ expect="packets 1 bytes 84"
6550 ++ for dir in "in" "out" ; do
6551 ++ cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
6552 ++ if [ $? -ne 0 ]; then
6553 ++ bad_counter ns0 ns2$dir "$expect"
6554 ++ lret=1
6555 ++ fi
6556 ++ done
6557 ++
6558 ++ # expect 0 count in ns1
6559 ++ expect="packets 0 bytes 0"
6560 ++ for dir in "in" "out" ; do
6561 ++ cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
6562 ++ if [ $? -ne 0 ]; then
6563 ++ bad_counter ns1 ns0$dir "$expect"
6564 ++ lret=1
6565 ++ fi
6566 ++ done
6567 ++
6568 ++ # expect 1 packet in ns2
6569 ++ expect="packets 1 bytes 84"
6570 ++ for dir in "in" "out" ; do
6571 ++ cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
6572 ++ if [ $? -ne 0 ]; then
6573 ++ bad_counter ns2 ns0$dir "$expect"
6574 ++ lret=1
6575 ++ fi
6576 ++ done
6577 ++
6578 ++ test $lret -eq 0 && echo "PASS: ping to ns1 was NATted to ns2"
6579 ++
6580 ++ ip netns exec ns0 nft flush chain ip nat output
6581 ++
6582 ++ reset_counters
6583 ++ ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
6584 ++ if [ $? -ne 0 ]; then
6585 ++ lret=1
6586 ++ echo "ERROR: ping failed"
6587 ++ return $lret
6588 ++ fi
6589 ++
6590 ++ expect="packets 1 bytes 84"
6591 ++ for dir in "in" "out" ; do
6592 ++ cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
6593 ++ if [ $? -ne 0 ]; then
6594 ++ bad_counter ns1 ns1$dir "$expect"
6595 ++ lret=1
6596 ++ fi
6597 ++ done
6598 ++ expect="packets 0 bytes 0"
6599 ++ for dir in "in" "out" ; do
6600 ++ cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
6601 ++ if [ $? -ne 0 ]; then
6602 ++ bad_counter ns0 ns2$dir "$expect"
6603 ++ lret=1
6604 ++ fi
6605 ++ done
6606 ++
6607 ++ # expect 1 count in ns1
6608 ++ expect="packets 1 bytes 84"
6609 ++ for dir in "in" "out" ; do
6610 ++ cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
6611 ++ if [ $? -ne 0 ]; then
6612 ++ bad_counter ns0 ns0$dir "$expect"
6613 ++ lret=1
6614 ++ fi
6615 ++ done
6616 ++
6617 ++ # expect 0 packet in ns2
6618 ++ expect="packets 0 bytes 0"
6619 ++ for dir in "in" "out" ; do
6620 ++ cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
6621 ++ if [ $? -ne 0 ]; then
6622 ++ bad_counter ns2 ns2$dir "$expect"
6623 ++ lret=1
6624 ++ fi
6625 ++ done
6626 ++
6627 ++ test $lret -eq 0 && echo "PASS: ping to ns1 OK after nat output chain flush"
6628 ++
6629 ++ return $lret
6630 ++}
6631 ++
6632 ++
6633 ++test_masquerade6()
6634 ++{
6635 ++ local lret=0
6636 ++
6637 ++ ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
6638 ++
6639 ++ ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
6640 ++ if [ $? -ne 0 ] ; then
6641 ++ echo "ERROR: cannot ping ns1 from ns2 via ipv6"
6642 ++ return 1
6643 ++ lret=1
6644 ++ fi
6645 ++
6646 ++ expect="packets 1 bytes 104"
6647 ++ for dir in "in6" "out6" ; do
6648 ++ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
6649 ++ if [ $? -ne 0 ]; then
6650 ++ bad_counter ns1 ns2$dir "$expect"
6651 ++ lret=1
6652 ++ fi
6653 ++
6654 ++ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
6655 ++ if [ $? -ne 0 ]; then
6656 ++ bad_counter ns2 ns1$dir "$expect"
6657 ++ lret=1
6658 ++ fi
6659 ++ done
6660 ++
6661 ++ reset_counters
6662 ++
6663 ++# add masquerading rule
6664 ++ip netns exec ns0 nft -f - <<EOF
6665 ++table ip6 nat {
6666 ++ chain postrouting {
6667 ++ type nat hook postrouting priority 0; policy accept;
6668 ++ meta oif veth0 masquerade
6669 ++ }
6670 ++}
6671 ++EOF
6672 ++ ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
6673 ++ if [ $? -ne 0 ] ; then
6674 ++ echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerading"
6675 ++ lret=1
6676 ++ fi
6677 ++
6678 ++ # ns1 should have seen packets from ns0, due to masquerade
6679 ++ expect="packets 1 bytes 104"
6680 ++ for dir in "in6" "out6" ; do
6681 ++
6682 ++ cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
6683 ++ if [ $? -ne 0 ]; then
6684 ++ bad_counter ns1 ns0$dir "$expect"
6685 ++ lret=1
6686 ++ fi
6687 ++
6688 ++ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
6689 ++ if [ $? -ne 0 ]; then
6690 ++ bad_counter ns2 ns1$dir "$expect"
6691 ++ lret=1
6692 ++ fi
6693 ++ done
6694 ++
6695 ++ # ns1 should not have seen packets from ns2, due to masquerade
6696 ++ expect="packets 0 bytes 0"
6697 ++ for dir in "in6" "out6" ; do
6698 ++ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
6699 ++ if [ $? -ne 0 ]; then
6700 ++ bad_counter ns1 ns0$dir "$expect"
6701 ++ lret=1
6702 ++ fi
6703 ++
6704 ++ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
6705 ++ if [ $? -ne 0 ]; then
6706 ++ bad_counter ns2 ns1$dir "$expect"
6707 ++ lret=1
6708 ++ fi
6709 ++ done
6710 ++
6711 ++ ip netns exec ns0 nft flush chain ip6 nat postrouting
6712 ++ if [ $? -ne 0 ]; then
6713 ++ echo "ERROR: Could not flush ip6 nat postrouting" 1>&2
6714 ++ lret=1
6715 ++ fi
6716 ++
6717 ++ test $lret -eq 0 && echo "PASS: IPv6 masquerade for ns2"
6718 ++
6719 ++ return $lret
6720 ++}
6721 ++
6722 ++test_masquerade()
6723 ++{
6724 ++ local lret=0
6725 ++
6726 ++ ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
6727 ++ ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
6728 ++
6729 ++ ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
6730 ++ if [ $? -ne 0 ] ; then
6731 ++ echo "ERROR: canot ping ns1 from ns2"
6732 ++ lret=1
6733 ++ fi
6734 ++
6735 ++ expect="packets 1 bytes 84"
6736 ++ for dir in "in" "out" ; do
6737 ++ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
6738 ++ if [ $? -ne 0 ]; then
6739 ++ bad_counter ns1 ns2$dir "$expect"
6740 ++ lret=1
6741 ++ fi
6742 ++
6743 ++ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
6744 ++ if [ $? -ne 0 ]; then
6745 ++ bad_counter ns2 ns1$dir "$expect"
6746 ++ lret=1
6747 ++ fi
6748 ++ done
6749 ++
6750 ++ reset_counters
6751 ++
6752 ++# add masquerading rule
6753 ++ip netns exec ns0 nft -f - <<EOF
6754 ++table ip nat {
6755 ++ chain postrouting {
6756 ++ type nat hook postrouting priority 0; policy accept;
6757 ++ meta oif veth0 masquerade
6758 ++ }
6759 ++}
6760 ++EOF
6761 ++ ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
6762 ++ if [ $? -ne 0 ] ; then
6763 ++ echo "ERROR: cannot ping ns1 from ns2 with active ip masquerading"
6764 ++ lret=1
6765 ++ fi
6766 ++
6767 ++ # ns1 should have seen packets from ns0, due to masquerade
6768 ++ expect="packets 1 bytes 84"
6769 ++ for dir in "in" "out" ; do
6770 ++ cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
6771 ++ if [ $? -ne 0 ]; then
6772 ++ bad_counter ns1 ns0$dir "$expect"
6773 ++ lret=1
6774 ++ fi
6775 ++
6776 ++ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
6777 ++ if [ $? -ne 0 ]; then
6778 ++ bad_counter ns2 ns1$dir "$expect"
6779 ++ lret=1
6780 ++ fi
6781 ++ done
6782 ++
6783 ++ # ns1 should not have seen packets from ns2, due to masquerade
6784 ++ expect="packets 0 bytes 0"
6785 ++ for dir in "in" "out" ; do
6786 ++ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
6787 ++ if [ $? -ne 0 ]; then
6788 ++ bad_counter ns1 ns0$dir "$expect"
6789 ++ lret=1
6790 ++ fi
6791 ++
6792 ++ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
6793 ++ if [ $? -ne 0 ]; then
6794 ++ bad_counter ns2 ns1$dir "$expect"
6795 ++ lret=1
6796 ++ fi
6797 ++ done
6798 ++
6799 ++ ip netns exec ns0 nft flush chain ip nat postrouting
6800 ++ if [ $? -ne 0 ]; then
6801 ++ echo "ERROR: Could not flush nat postrouting" 1>&2
6802 ++ lret=1
6803 ++ fi
6804 ++
6805 ++ test $lret -eq 0 && echo "PASS: IP masquerade for ns2"
6806 ++
6807 ++ return $lret
6808 ++}
6809 ++
6810 ++test_redirect6()
6811 ++{
6812 ++ local lret=0
6813 ++
6814 ++ ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
6815 ++
6816 ++ ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
6817 ++ if [ $? -ne 0 ] ; then
6818 ++ echo "ERROR: cannnot ping ns1 from ns2 via ipv6"
6819 ++ lret=1
6820 ++ fi
6821 ++
6822 ++ expect="packets 1 bytes 104"
6823 ++ for dir in "in6" "out6" ; do
6824 ++ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
6825 ++ if [ $? -ne 0 ]; then
6826 ++ bad_counter ns1 ns2$dir "$expect"
6827 ++ lret=1
6828 ++ fi
6829 ++
6830 ++ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
6831 ++ if [ $? -ne 0 ]; then
6832 ++ bad_counter ns2 ns1$dir "$expect"
6833 ++ lret=1
6834 ++ fi
6835 ++ done
6836 ++
6837 ++ reset_counters
6838 ++
6839 ++# add redirect rule
6840 ++ip netns exec ns0 nft -f - <<EOF
6841 ++table ip6 nat {
6842 ++ chain prerouting {
6843 ++ type nat hook prerouting priority 0; policy accept;
6844 ++ meta iif veth1 meta l4proto icmpv6 ip6 saddr dead:2::99 ip6 daddr dead:1::99 redirect
6845 ++ }
6846 ++}
6847 ++EOF
6848 ++ ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
6849 ++ if [ $? -ne 0 ] ; then
6850 ++ echo "ERROR: cannot ping ns1 from ns2 with active ip6 redirect"
6851 ++ lret=1
6852 ++ fi
6853 ++
6854 ++ # ns1 should have seen no packets from ns2, due to redirection
6855 ++ expect="packets 0 bytes 0"
6856 ++ for dir in "in6" "out6" ; do
6857 ++ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
6858 ++ if [ $? -ne 0 ]; then
6859 ++ bad_counter ns1 ns0$dir "$expect"
6860 ++ lret=1
6861 ++ fi
6862 ++ done
6863 ++
6864 ++ # ns0 should have seen packets from ns2, due to masquerade
6865 ++ expect="packets 1 bytes 104"
6866 ++ for dir in "in6" "out6" ; do
6867 ++ cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
6868 ++ if [ $? -ne 0 ]; then
6869 ++ bad_counter ns1 ns0$dir "$expect"
6870 ++ lret=1
6871 ++ fi
6872 ++ done
6873 ++
6874 ++ ip netns exec ns0 nft delete table ip6 nat
6875 ++ if [ $? -ne 0 ]; then
6876 ++ echo "ERROR: Could not delete ip6 nat table" 1>&2
6877 ++ lret=1
6878 ++ fi
6879 ++
6880 ++ test $lret -eq 0 && echo "PASS: IPv6 redirection for ns2"
6881 ++
6882 ++ return $lret
6883 ++}
6884 ++
6885 ++test_redirect()
6886 ++{
6887 ++ local lret=0
6888 ++
6889 ++ ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
6890 ++ ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
6891 ++
6892 ++ ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
6893 ++ if [ $? -ne 0 ] ; then
6894 ++ echo "ERROR: cannot ping ns1 from ns2"
6895 ++ lret=1
6896 ++ fi
6897 ++
6898 ++ expect="packets 1 bytes 84"
6899 ++ for dir in "in" "out" ; do
6900 ++ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
6901 ++ if [ $? -ne 0 ]; then
6902 ++ bad_counter ns1 ns2$dir "$expect"
6903 ++ lret=1
6904 ++ fi
6905 ++
6906 ++ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
6907 ++ if [ $? -ne 0 ]; then
6908 ++ bad_counter ns2 ns1$dir "$expect"
6909 ++ lret=1
6910 ++ fi
6911 ++ done
6912 ++
6913 ++ reset_counters
6914 ++
6915 ++# add redirect rule
6916 ++ip netns exec ns0 nft -f - <<EOF
6917 ++table ip nat {
6918 ++ chain prerouting {
6919 ++ type nat hook prerouting priority 0; policy accept;
6920 ++ meta iif veth1 ip protocol icmp ip saddr 10.0.2.99 ip daddr 10.0.1.99 redirect
6921 ++ }
6922 ++}
6923 ++EOF
6924 ++ ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
6925 ++ if [ $? -ne 0 ] ; then
6926 ++ echo "ERROR: cannot ping ns1 from ns2 with active ip redirect"
6927 ++ lret=1
6928 ++ fi
6929 ++
6930 ++ # ns1 should have seen no packets from ns2, due to redirection
6931 ++ expect="packets 0 bytes 0"
6932 ++ for dir in "in" "out" ; do
6933 ++
6934 ++ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
6935 ++ if [ $? -ne 0 ]; then
6936 ++ bad_counter ns1 ns0$dir "$expect"
6937 ++ lret=1
6938 ++ fi
6939 ++ done
6940 ++
6941 ++ # ns0 should have seen packets from ns2, due to masquerade
6942 ++ expect="packets 1 bytes 84"
6943 ++ for dir in "in" "out" ; do
6944 ++ cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
6945 ++ if [ $? -ne 0 ]; then
6946 ++ bad_counter ns1 ns0$dir "$expect"
6947 ++ lret=1
6948 ++ fi
6949 ++ done
6950 ++
6951 ++ ip netns exec ns0 nft delete table ip nat
6952 ++ if [ $? -ne 0 ]; then
6953 ++ echo "ERROR: Could not delete nat table" 1>&2
6954 ++ lret=1
6955 ++ fi
6956 ++
6957 ++ test $lret -eq 0 && echo "PASS: IP redirection for ns2"
6958 ++
6959 ++ return $lret
6960 ++}
6961 ++
6962 ++
6963 ++# ip netns exec ns0 ping -c 1 -q 10.0.$i.99
6964 ++for i in 0 1 2; do
6965 ++ip netns exec ns$i nft -f - <<EOF
6966 ++table inet filter {
6967 ++ counter ns0in {}
6968 ++ counter ns1in {}
6969 ++ counter ns2in {}
6970 ++
6971 ++ counter ns0out {}
6972 ++ counter ns1out {}
6973 ++ counter ns2out {}
6974 ++
6975 ++ counter ns0in6 {}
6976 ++ counter ns1in6 {}
6977 ++ counter ns2in6 {}
6978 ++
6979 ++ counter ns0out6 {}
6980 ++ counter ns1out6 {}
6981 ++ counter ns2out6 {}
6982 ++
6983 ++ map nsincounter {
6984 ++ type ipv4_addr : counter
6985 ++ elements = { 10.0.1.1 : "ns0in",
6986 ++ 10.0.2.1 : "ns0in",
6987 ++ 10.0.1.99 : "ns1in",
6988 ++ 10.0.2.99 : "ns2in" }
6989 ++ }
6990 ++
6991 ++ map nsincounter6 {
6992 ++ type ipv6_addr : counter
6993 ++ elements = { dead:1::1 : "ns0in6",
6994 ++ dead:2::1 : "ns0in6",
6995 ++ dead:1::99 : "ns1in6",
6996 ++ dead:2::99 : "ns2in6" }
6997 ++ }
6998 ++
6999 ++ map nsoutcounter {
7000 ++ type ipv4_addr : counter
7001 ++ elements = { 10.0.1.1 : "ns0out",
7002 ++ 10.0.2.1 : "ns0out",
7003 ++ 10.0.1.99: "ns1out",
7004 ++ 10.0.2.99: "ns2out" }
7005 ++ }
7006 ++
7007 ++ map nsoutcounter6 {
7008 ++ type ipv6_addr : counter
7009 ++ elements = { dead:1::1 : "ns0out6",
7010 ++ dead:2::1 : "ns0out6",
7011 ++ dead:1::99 : "ns1out6",
7012 ++ dead:2::99 : "ns2out6" }
7013 ++ }
7014 ++
7015 ++ chain input {
7016 ++ type filter hook input priority 0; policy accept;
7017 ++ counter name ip saddr map @nsincounter
7018 ++ icmpv6 type { "echo-request", "echo-reply" } counter name ip6 saddr map @nsincounter6
7019 ++ }
7020 ++ chain output {
7021 ++ type filter hook output priority 0; policy accept;
7022 ++ counter name ip daddr map @nsoutcounter
7023 ++ icmpv6 type { "echo-request", "echo-reply" } counter name ip6 daddr map @nsoutcounter6
7024 ++ }
7025 ++}
7026 ++EOF
7027 ++done
7028 ++
7029 ++sleep 3
7030 ++# test basic connectivity
7031 ++for i in 1 2; do
7032 ++ ip netns exec ns0 ping -c 1 -q 10.0.$i.99 > /dev/null
7033 ++ if [ $? -ne 0 ];then
7034 ++ echo "ERROR: Could not reach other namespace(s)" 1>&2
7035 ++ ret=1
7036 ++ fi
7037 ++
7038 ++ ip netns exec ns0 ping -c 1 -q dead:$i::99 > /dev/null
7039 ++ if [ $? -ne 0 ];then
7040 ++ echo "ERROR: Could not reach other namespace(s) via ipv6" 1>&2
7041 ++ ret=1
7042 ++ fi
7043 ++ check_counters ns$i
7044 ++ if [ $? -ne 0 ]; then
7045 ++ ret=1
7046 ++ fi
7047 ++
7048 ++ check_ns0_counters ns$i
7049 ++ if [ $? -ne 0 ]; then
7050 ++ ret=1
7051 ++ fi
7052 ++ reset_counters
7053 ++done
7054 ++
7055 ++if [ $ret -eq 0 ];then
7056 ++ echo "PASS: netns routing/connectivity: ns0 can reach ns1 and ns2"
7057 ++fi
7058 ++
7059 ++reset_counters
7060 ++test_local_dnat
7061 ++test_local_dnat6
7062 ++
7063 ++reset_counters
7064 ++test_masquerade
7065 ++test_masquerade6
7066 ++
7067 ++reset_counters
7068 ++test_redirect
7069 ++test_redirect6
7070 ++
7071 ++for i in 0 1 2; do ip netns del ns$i;done
7072 ++
7073 ++exit $ret
7074 +diff --git a/tools/testing/selftests/proc/.gitignore b/tools/testing/selftests/proc/.gitignore
7075 +index 82121a81681f..29bac5ef9a93 100644
7076 +--- a/tools/testing/selftests/proc/.gitignore
7077 ++++ b/tools/testing/selftests/proc/.gitignore
7078 +@@ -10,4 +10,5 @@
7079 + /proc-uptime-002
7080 + /read
7081 + /self
7082 ++/setns-dcache
7083 + /thread-self
7084 +diff --git a/tools/testing/selftests/proc/Makefile b/tools/testing/selftests/proc/Makefile
7085 +index 1c12c34cf85d..434d033ee067 100644
7086 +--- a/tools/testing/selftests/proc/Makefile
7087 ++++ b/tools/testing/selftests/proc/Makefile
7088 +@@ -14,6 +14,7 @@ TEST_GEN_PROGS += proc-uptime-001
7089 + TEST_GEN_PROGS += proc-uptime-002
7090 + TEST_GEN_PROGS += read
7091 + TEST_GEN_PROGS += self
7092 ++TEST_GEN_PROGS += setns-dcache
7093 + TEST_GEN_PROGS += thread-self
7094 +
7095 + include ../lib.mk
7096 +diff --git a/tools/testing/selftests/proc/setns-dcache.c b/tools/testing/selftests/proc/setns-dcache.c
7097 +new file mode 100644
7098 +index 000000000000..60ab197a73fc
7099 +--- /dev/null
7100 ++++ b/tools/testing/selftests/proc/setns-dcache.c
7101 +@@ -0,0 +1,129 @@
7102 ++/*
7103 ++ * Copyright © 2019 Alexey Dobriyan <adobriyan@×××××.com>
7104 ++ *
7105 ++ * Permission to use, copy, modify, and distribute this software for any
7106 ++ * purpose with or without fee is hereby granted, provided that the above
7107 ++ * copyright notice and this permission notice appear in all copies.
7108 ++ *
7109 ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
7110 ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
7111 ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
7112 ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
7113 ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
7114 ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
7115 ++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
7116 ++ */
7117 ++/*
7118 ++ * Test that setns(CLONE_NEWNET) points to new /proc/net content even
7119 ++ * if old one is in dcache.
7120 ++ *
7121 ++ * FIXME /proc/net/unix is under CONFIG_UNIX which can be disabled.
7122 ++ */
7123 ++#undef NDEBUG
7124 ++#include <assert.h>
7125 ++#include <errno.h>
7126 ++#include <sched.h>
7127 ++#include <signal.h>
7128 ++#include <stdio.h>
7129 ++#include <stdlib.h>
7130 ++#include <string.h>
7131 ++#include <unistd.h>
7132 ++#include <sys/types.h>
7133 ++#include <sys/stat.h>
7134 ++#include <fcntl.h>
7135 ++#include <sys/socket.h>
7136 ++
7137 ++static pid_t pid = -1;
7138 ++
7139 ++static void f(void)
7140 ++{
7141 ++ if (pid > 0) {
7142 ++ kill(pid, SIGTERM);
7143 ++ }
7144 ++}
7145 ++
7146 ++int main(void)
7147 ++{
7148 ++ int fd[2];
7149 ++ char _ = 0;
7150 ++ int nsfd;
7151 ++
7152 ++ atexit(f);
7153 ++
7154 ++ /* Check for priviledges and syscall availability straight away. */
7155 ++ if (unshare(CLONE_NEWNET) == -1) {
7156 ++ if (errno == ENOSYS || errno == EPERM) {
7157 ++ return 4;
7158 ++ }
7159 ++ return 1;
7160 ++ }
7161 ++ /* Distinguisher between two otherwise empty net namespaces. */
7162 ++ if (socket(AF_UNIX, SOCK_STREAM, 0) == -1) {
7163 ++ return 1;
7164 ++ }
7165 ++
7166 ++ if (pipe(fd) == -1) {
7167 ++ return 1;
7168 ++ }
7169 ++
7170 ++ pid = fork();
7171 ++ if (pid == -1) {
7172 ++ return 1;
7173 ++ }
7174 ++
7175 ++ if (pid == 0) {
7176 ++ if (unshare(CLONE_NEWNET) == -1) {
7177 ++ return 1;
7178 ++ }
7179 ++
7180 ++ if (write(fd[1], &_, 1) != 1) {
7181 ++ return 1;
7182 ++ }
7183 ++
7184 ++ pause();
7185 ++
7186 ++ return 0;
7187 ++ }
7188 ++
7189 ++ if (read(fd[0], &_, 1) != 1) {
7190 ++ return 1;
7191 ++ }
7192 ++
7193 ++ {
7194 ++ char buf[64];
7195 ++ snprintf(buf, sizeof(buf), "/proc/%u/ns/net", pid);
7196 ++ nsfd = open(buf, O_RDONLY);
7197 ++ if (nsfd == -1) {
7198 ++ return 1;
7199 ++ }
7200 ++ }
7201 ++
7202 ++ /* Reliably pin dentry into dcache. */
7203 ++ (void)open("/proc/net/unix", O_RDONLY);
7204 ++
7205 ++ if (setns(nsfd, CLONE_NEWNET) == -1) {
7206 ++ return 1;
7207 ++ }
7208 ++
7209 ++ kill(pid, SIGTERM);
7210 ++ pid = 0;
7211 ++
7212 ++ {
7213 ++ char buf[4096];
7214 ++ ssize_t rv;
7215 ++ int fd;
7216 ++
7217 ++ fd = open("/proc/net/unix", O_RDONLY);
7218 ++ if (fd == -1) {
7219 ++ return 1;
7220 ++ }
7221 ++
7222 ++#define S "Num RefCount Protocol Flags Type St Inode Path\n"
7223 ++ rv = read(fd, buf, sizeof(buf));
7224 ++
7225 ++ assert(rv == strlen(S));
7226 ++ assert(memcmp(buf, S, strlen(S)) == 0);
7227 ++ }
7228 ++
7229 ++ return 0;
7230 ++}
7231 +diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile
7232 +index c02683cfb6c9..7656c7ce79d9 100644
7233 +--- a/tools/testing/selftests/timers/Makefile
7234 ++++ b/tools/testing/selftests/timers/Makefile
7235 +@@ -1,6 +1,6 @@
7236 + # SPDX-License-Identifier: GPL-2.0
7237 + CFLAGS += -O3 -Wl,-no-as-needed -Wall
7238 +-LDFLAGS += -lrt -lpthread -lm
7239 ++LDLIBS += -lrt -lpthread -lm
7240 +
7241 + # these are all "safe" tests that don't modify
7242 + # system time or require escalated privileges