Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Wed, 13 Mar 2019 22:08:14
Message-Id: 1552514869.1f44abc4f0483b4c40349d848e8bbcd3ebb200ed.mpagano@gentoo
1 commit: 1f44abc4f0483b4c40349d848e8bbcd3ebb200ed
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Mar 13 22:07:49 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Mar 13 22:07:49 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1f44abc4
7
8 proj/linux-patches: Linux patch 4.19.29
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1028_linux-4.19.29.patch | 6412 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 6416 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 292278e..9c98a66 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -155,6 +155,10 @@ Patch: 1027_linux-4.19.28.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.19.28
23
24 +Patch: 1028_linux-4.19.29.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.19.29
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1028_linux-4.19.29.patch b/1028_linux-4.19.29.patch
33 new file mode 100644
34 index 0000000..0eb9cf9
35 --- /dev/null
36 +++ b/1028_linux-4.19.29.patch
37 @@ -0,0 +1,6412 @@
38 +diff --git a/Makefile b/Makefile
39 +index c6ac023ba33a..6e526583291c 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 19
46 +-SUBLEVEL = 28
47 ++SUBLEVEL = 29
48 + EXTRAVERSION =
49 + NAME = "People's Front"
50 +
51 +diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
52 +index 27a1ee28c3bb..94efca78c42f 100644
53 +--- a/arch/arm/boot/dts/exynos3250.dtsi
54 ++++ b/arch/arm/boot/dts/exynos3250.dtsi
55 +@@ -168,6 +168,9 @@
56 + interrupt-controller;
57 + #interrupt-cells = <3>;
58 + interrupt-parent = <&gic>;
59 ++ clock-names = "clkout8";
60 ++ clocks = <&cmu CLK_FIN_PLL>;
61 ++ #clock-cells = <1>;
62 + };
63 +
64 + mipi_phy: video-phy {
65 +diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
66 +index a09e46c9dbc0..00820d239753 100644
67 +--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
68 ++++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
69 +@@ -49,7 +49,7 @@
70 + };
71 +
72 + emmc_pwrseq: pwrseq {
73 +- pinctrl-0 = <&sd1_cd>;
74 ++ pinctrl-0 = <&emmc_rstn>;
75 + pinctrl-names = "default";
76 + compatible = "mmc-pwrseq-emmc";
77 + reset-gpios = <&gpk1 2 GPIO_ACTIVE_LOW>;
78 +@@ -161,12 +161,6 @@
79 + cpu0-supply = <&buck2_reg>;
80 + };
81 +
82 +-/* RSTN signal for eMMC */
83 +-&sd1_cd {
84 +- samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
85 +- samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
86 +-};
87 +-
88 + &pinctrl_1 {
89 + gpio_power_key: power_key {
90 + samsung,pins = "gpx1-3";
91 +@@ -184,6 +178,11 @@
92 + samsung,pins = "gpx3-7";
93 + samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
94 + };
95 ++
96 ++ emmc_rstn: emmc-rstn {
97 ++ samsung,pins = "gpk1-2";
98 ++ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
99 ++ };
100 + };
101 +
102 + &ehci {
103 +diff --git a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
104 +index 2f4f40882dab..27214e6ebe4f 100644
105 +--- a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
106 ++++ b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
107 +@@ -334,7 +334,7 @@
108 + buck8_reg: BUCK8 {
109 + regulator-name = "vdd_1.8v_ldo";
110 + regulator-min-microvolt = <800000>;
111 +- regulator-max-microvolt = <1500000>;
112 ++ regulator-max-microvolt = <2000000>;
113 + regulator-always-on;
114 + regulator-boot-on;
115 + };
116 +diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
117 +index 844caa39364f..50083cecc6c9 100644
118 +--- a/arch/arm/boot/dts/imx6sx.dtsi
119 ++++ b/arch/arm/boot/dts/imx6sx.dtsi
120 +@@ -462,7 +462,7 @@
121 + };
122 +
123 + gpt: gpt@2098000 {
124 +- compatible = "fsl,imx6sx-gpt", "fsl,imx31-gpt";
125 ++ compatible = "fsl,imx6sx-gpt", "fsl,imx6dl-gpt";
126 + reg = <0x02098000 0x4000>;
127 + interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
128 + clocks = <&clks IMX6SX_CLK_GPT_BUS>,
129 +diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi
130 +index 0d9faf1a51ea..a86b89086334 100644
131 +--- a/arch/arm/boot/dts/meson.dtsi
132 ++++ b/arch/arm/boot/dts/meson.dtsi
133 +@@ -263,7 +263,7 @@
134 + compatible = "amlogic,meson6-dwmac", "snps,dwmac";
135 + reg = <0xc9410000 0x10000
136 + 0xc1108108 0x4>;
137 +- interrupts = <GIC_SPI 8 IRQ_TYPE_EDGE_RISING>;
138 ++ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
139 + interrupt-names = "macirq";
140 + status = "disabled";
141 + };
142 +diff --git a/arch/arm/boot/dts/meson8b-odroidc1.dts b/arch/arm/boot/dts/meson8b-odroidc1.dts
143 +index ef3177d3da3d..8fdeeffecbdb 100644
144 +--- a/arch/arm/boot/dts/meson8b-odroidc1.dts
145 ++++ b/arch/arm/boot/dts/meson8b-odroidc1.dts
146 +@@ -125,7 +125,6 @@
147 + /* Realtek RTL8211F (0x001cc916) */
148 + eth_phy: ethernet-phy@0 {
149 + reg = <0>;
150 +- eee-broken-1000t;
151 + interrupt-parent = <&gpio_intc>;
152 + /* GPIOH_3 */
153 + interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
154 +@@ -172,8 +171,7 @@
155 + cap-sd-highspeed;
156 + disable-wp;
157 +
158 +- cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
159 +- cd-inverted;
160 ++ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
161 +
162 + vmmc-supply = <&tflash_vdd>;
163 + vqmmc-supply = <&tf_io>;
164 +diff --git a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
165 +index f5853610b20b..6ac02beb5fa7 100644
166 +--- a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
167 ++++ b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
168 +@@ -206,8 +206,7 @@
169 + cap-sd-highspeed;
170 + disable-wp;
171 +
172 +- cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
173 +- cd-inverted;
174 ++ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
175 +
176 + vmmc-supply = <&vcc_3v3>;
177 + };
178 +diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
179 +index ddc7a7bb33c0..f57acf8f66b9 100644
180 +--- a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
181 ++++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
182 +@@ -105,7 +105,7 @@
183 + interrupts-extended = <
184 + &cpcap 15 0 &cpcap 14 0 &cpcap 28 0 &cpcap 19 0
185 + &cpcap 18 0 &cpcap 17 0 &cpcap 16 0 &cpcap 49 0
186 +- &cpcap 48 1
187 ++ &cpcap 48 0
188 + >;
189 + interrupt-names =
190 + "id_ground", "id_float", "se0conn", "vbusvld",
191 +diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
192 +index 0d9b85317529..e142e6c70a59 100644
193 +--- a/arch/arm/boot/dts/omap3-n950-n9.dtsi
194 ++++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
195 +@@ -370,6 +370,19 @@
196 + compatible = "ti,omap2-onenand";
197 + reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */
198 +
199 ++ /*
200 ++ * These timings are based on CONFIG_OMAP_GPMC_DEBUG=y reported
201 ++ * bootloader set values when booted with v4.19 using both N950
202 ++ * and N9 devices (OneNAND Manufacturer: Samsung):
203 ++ *
204 ++ * gpmc cs0 before gpmc_cs_program_settings:
205 ++ * cs0 GPMC_CS_CONFIG1: 0xfd001202
206 ++ * cs0 GPMC_CS_CONFIG2: 0x00181800
207 ++ * cs0 GPMC_CS_CONFIG3: 0x00030300
208 ++ * cs0 GPMC_CS_CONFIG4: 0x18001804
209 ++ * cs0 GPMC_CS_CONFIG5: 0x03171d1d
210 ++ * cs0 GPMC_CS_CONFIG6: 0x97080000
211 ++ */
212 + gpmc,sync-read;
213 + gpmc,sync-write;
214 + gpmc,burst-length = <16>;
215 +@@ -379,26 +392,27 @@
216 + gpmc,device-width = <2>;
217 + gpmc,mux-add-data = <2>;
218 + gpmc,cs-on-ns = <0>;
219 +- gpmc,cs-rd-off-ns = <87>;
220 +- gpmc,cs-wr-off-ns = <87>;
221 ++ gpmc,cs-rd-off-ns = <122>;
222 ++ gpmc,cs-wr-off-ns = <122>;
223 + gpmc,adv-on-ns = <0>;
224 +- gpmc,adv-rd-off-ns = <10>;
225 +- gpmc,adv-wr-off-ns = <10>;
226 +- gpmc,oe-on-ns = <15>;
227 +- gpmc,oe-off-ns = <87>;
228 ++ gpmc,adv-rd-off-ns = <15>;
229 ++ gpmc,adv-wr-off-ns = <15>;
230 ++ gpmc,oe-on-ns = <20>;
231 ++ gpmc,oe-off-ns = <122>;
232 + gpmc,we-on-ns = <0>;
233 +- gpmc,we-off-ns = <87>;
234 +- gpmc,rd-cycle-ns = <112>;
235 +- gpmc,wr-cycle-ns = <112>;
236 +- gpmc,access-ns = <81>;
237 ++ gpmc,we-off-ns = <122>;
238 ++ gpmc,rd-cycle-ns = <148>;
239 ++ gpmc,wr-cycle-ns = <148>;
240 ++ gpmc,access-ns = <117>;
241 + gpmc,page-burst-access-ns = <15>;
242 + gpmc,bus-turnaround-ns = <0>;
243 + gpmc,cycle2cycle-delay-ns = <0>;
244 + gpmc,wait-monitoring-ns = <0>;
245 +- gpmc,clk-activation-ns = <5>;
246 +- gpmc,wr-data-mux-bus-ns = <30>;
247 +- gpmc,wr-access-ns = <81>;
248 +- gpmc,sync-clk-ps = <15000>;
249 ++ gpmc,clk-activation-ns = <10>;
250 ++ gpmc,wr-data-mux-bus-ns = <40>;
251 ++ gpmc,wr-access-ns = <117>;
252 ++
253 ++ gpmc,sync-clk-ps = <15000>; /* TBC; Where this value came? */
254 +
255 + /*
256 + * MTD partition table corresponding to Nokia's MeeGo 1.2
257 +diff --git a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
258 +index 5d23667dc2d2..25540b7694d5 100644
259 +--- a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
260 ++++ b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
261 +@@ -53,7 +53,7 @@
262 +
263 + aliases {
264 + serial0 = &uart0;
265 +- /* ethernet0 is the H3 emac, defined in sun8i-h3.dtsi */
266 ++ ethernet0 = &emac;
267 + ethernet1 = &sdiowifi;
268 + };
269 +
270 +diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
271 +index ed36dcab80f1..f51919974183 100644
272 +--- a/arch/arm/plat-pxa/ssp.c
273 ++++ b/arch/arm/plat-pxa/ssp.c
274 +@@ -190,8 +190,6 @@ static int pxa_ssp_remove(struct platform_device *pdev)
275 + if (ssp == NULL)
276 + return -ENODEV;
277 +
278 +- iounmap(ssp->mmio_base);
279 +-
280 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
281 + release_mem_region(res->start, resource_size(res));
282 +
283 +@@ -201,7 +199,6 @@ static int pxa_ssp_remove(struct platform_device *pdev)
284 + list_del(&ssp->node);
285 + mutex_unlock(&ssp_lock);
286 +
287 +- kfree(ssp);
288 + return 0;
289 + }
290 +
291 +diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
292 +index f4964bee6a1a..e80a792827ed 100644
293 +--- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
294 ++++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
295 +@@ -118,6 +118,7 @@
296 + reset-gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
297 + clocks = <&pmic>;
298 + clock-names = "ext_clock";
299 ++ post-power-on-delay-ms = <10>;
300 + power-off-delay-us = <10>;
301 + };
302 +
303 +@@ -300,7 +301,6 @@
304 +
305 + dwmmc_0: dwmmc0@f723d000 {
306 + cap-mmc-highspeed;
307 +- mmc-hs200-1_8v;
308 + non-removable;
309 + bus-width = <0x8>;
310 + vmmc-supply = <&ldo19>;
311 +diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
312 +index cd3865e7a270..8c86c41a0d25 100644
313 +--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
314 ++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
315 +@@ -399,7 +399,7 @@
316 + };
317 +
318 + intc: interrupt-controller@9bc0000 {
319 +- compatible = "arm,gic-v3";
320 ++ compatible = "qcom,msm8996-gic-v3", "arm,gic-v3";
321 + #interrupt-cells = <3>;
322 + interrupt-controller;
323 + #redistributor-regions = <1>;
324 +diff --git a/arch/arm64/boot/dts/renesas/r8a7796.dtsi b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
325 +index cbd35c00b4af..33cb0281c39c 100644
326 +--- a/arch/arm64/boot/dts/renesas/r8a7796.dtsi
327 ++++ b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
328 +@@ -1161,6 +1161,9 @@
329 + <&cpg CPG_CORE R8A7796_CLK_S3D1>,
330 + <&scif_clk>;
331 + clock-names = "fck", "brg_int", "scif_clk";
332 ++ dmas = <&dmac1 0x13>, <&dmac1 0x12>,
333 ++ <&dmac2 0x13>, <&dmac2 0x12>;
334 ++ dma-names = "tx", "rx", "tx", "rx";
335 + power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
336 + resets = <&cpg 310>;
337 + status = "disabled";
338 +diff --git a/arch/arm64/boot/dts/renesas/r8a77965.dtsi b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
339 +index 0cd44461a0bd..f60f08ba1a6f 100644
340 +--- a/arch/arm64/boot/dts/renesas/r8a77965.dtsi
341 ++++ b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
342 +@@ -951,6 +951,9 @@
343 + <&cpg CPG_CORE R8A77965_CLK_S3D1>,
344 + <&scif_clk>;
345 + clock-names = "fck", "brg_int", "scif_clk";
346 ++ dmas = <&dmac1 0x13>, <&dmac1 0x12>,
347 ++ <&dmac2 0x13>, <&dmac2 0x12>;
348 ++ dma-names = "tx", "rx", "tx", "rx";
349 + power-domains = <&sysc R8A77965_PD_ALWAYS_ON>;
350 + resets = <&cpg 310>;
351 + status = "disabled";
352 +diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
353 +index eb5e8bddb610..8954c8c6f547 100644
354 +--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
355 ++++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
356 +@@ -101,6 +101,7 @@
357 + sdio_pwrseq: sdio_pwrseq {
358 + compatible = "mmc-pwrseq-simple";
359 + reset-gpios = <&gpio 7 GPIO_ACTIVE_LOW>; /* WIFI_EN */
360 ++ post-power-on-delay-ms = <10>;
361 + };
362 + };
363 +
364 +diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
365 +index b5a367d4bba6..30bb13797034 100644
366 +--- a/arch/arm64/kernel/probes/kprobes.c
367 ++++ b/arch/arm64/kernel/probes/kprobes.c
368 +@@ -478,13 +478,13 @@ bool arch_within_kprobe_blacklist(unsigned long addr)
369 + addr < (unsigned long)__entry_text_end) ||
370 + (addr >= (unsigned long)__idmap_text_start &&
371 + addr < (unsigned long)__idmap_text_end) ||
372 ++ (addr >= (unsigned long)__hyp_text_start &&
373 ++ addr < (unsigned long)__hyp_text_end) ||
374 + !!search_exception_tables(addr))
375 + return true;
376 +
377 + if (!is_kernel_in_hyp_mode()) {
378 +- if ((addr >= (unsigned long)__hyp_text_start &&
379 +- addr < (unsigned long)__hyp_text_end) ||
380 +- (addr >= (unsigned long)__hyp_idmap_text_start &&
381 ++ if ((addr >= (unsigned long)__hyp_idmap_text_start &&
382 + addr < (unsigned long)__hyp_idmap_text_end))
383 + return true;
384 + }
385 +diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts
386 +index 50cff3cbcc6d..4f7b1fa31cf5 100644
387 +--- a/arch/mips/boot/dts/ingenic/ci20.dts
388 ++++ b/arch/mips/boot/dts/ingenic/ci20.dts
389 +@@ -76,7 +76,7 @@
390 + status = "okay";
391 +
392 + pinctrl-names = "default";
393 +- pinctrl-0 = <&pins_uart2>;
394 ++ pinctrl-0 = <&pins_uart3>;
395 + };
396 +
397 + &uart4 {
398 +@@ -196,9 +196,9 @@
399 + bias-disable;
400 + };
401 +
402 +- pins_uart2: uart2 {
403 +- function = "uart2";
404 +- groups = "uart2-data", "uart2-hwflow";
405 ++ pins_uart3: uart3 {
406 ++ function = "uart3";
407 ++ groups = "uart3-data", "uart3-hwflow";
408 + bias-disable;
409 + };
410 +
411 +diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
412 +index d4f7fd4550e1..85522c137f19 100644
413 +--- a/arch/mips/kernel/process.c
414 ++++ b/arch/mips/kernel/process.c
415 +@@ -371,7 +371,7 @@ static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
416 + static int get_frame_info(struct mips_frame_info *info)
417 + {
418 + bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
419 +- union mips_instruction insn, *ip, *ip_end;
420 ++ union mips_instruction insn, *ip;
421 + const unsigned int max_insns = 128;
422 + unsigned int last_insn_size = 0;
423 + unsigned int i;
424 +@@ -384,10 +384,9 @@ static int get_frame_info(struct mips_frame_info *info)
425 + if (!ip)
426 + goto err;
427 +
428 +- ip_end = (void *)ip + info->func_size;
429 +-
430 +- for (i = 0; i < max_insns && ip < ip_end; i++) {
431 ++ for (i = 0; i < max_insns; i++) {
432 + ip = (void *)ip + last_insn_size;
433 ++
434 + if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
435 + insn.word = ip->halfword[0] << 16;
436 + last_insn_size = 2;
437 +diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
438 +index 3fe4af8147d2..c23578a37b44 100644
439 +--- a/arch/riscv/include/asm/processor.h
440 ++++ b/arch/riscv/include/asm/processor.h
441 +@@ -22,7 +22,7 @@
442 + * This decides where the kernel will search for a free chunk of vm
443 + * space during mmap's.
444 + */
445 +-#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE >> 1)
446 ++#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
447 +
448 + #define STACK_TOP TASK_SIZE
449 + #define STACK_TOP_MAX STACK_TOP
450 +diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
451 +index b2d26d9d8489..9713d4e8c22b 100644
452 +--- a/arch/riscv/kernel/setup.c
453 ++++ b/arch/riscv/kernel/setup.c
454 +@@ -186,7 +186,7 @@ static void __init setup_bootmem(void)
455 + BUG_ON(mem_size == 0);
456 +
457 + set_max_mapnr(PFN_DOWN(mem_size));
458 +- max_low_pfn = memblock_end_of_DRAM();
459 ++ max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
460 +
461 + #ifdef CONFIG_BLK_DEV_INITRD
462 + setup_initrd();
463 +diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
464 +index 58a522f9bcc3..200a4b315e15 100644
465 +--- a/arch/riscv/mm/init.c
466 ++++ b/arch/riscv/mm/init.c
467 +@@ -29,7 +29,8 @@ static void __init zone_sizes_init(void)
468 + unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
469 +
470 + #ifdef CONFIG_ZONE_DMA32
471 +- max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn));
472 ++ max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
473 ++ (unsigned long) PFN_PHYS(max_low_pfn)));
474 + #endif
475 + max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
476 +
477 +diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
478 +index 64037895b085..f105ae8651c9 100644
479 +--- a/arch/x86/boot/compressed/head_64.S
480 ++++ b/arch/x86/boot/compressed/head_64.S
481 +@@ -600,6 +600,14 @@ ENTRY(trampoline_32bit_src)
482 + leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax
483 + movl %eax, %cr3
484 + 3:
485 ++ /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
486 ++ pushl %ecx
487 ++ movl $MSR_EFER, %ecx
488 ++ rdmsr
489 ++ btsl $_EFER_LME, %eax
490 ++ wrmsr
491 ++ popl %ecx
492 ++
493 + /* Enable PAE and LA57 (if required) paging modes */
494 + movl $X86_CR4_PAE, %eax
495 + cmpl $0, %edx
496 +diff --git a/arch/x86/boot/compressed/pgtable.h b/arch/x86/boot/compressed/pgtable.h
497 +index 91f75638f6e6..6ff7e81b5628 100644
498 +--- a/arch/x86/boot/compressed/pgtable.h
499 ++++ b/arch/x86/boot/compressed/pgtable.h
500 +@@ -6,7 +6,7 @@
501 + #define TRAMPOLINE_32BIT_PGTABLE_OFFSET 0
502 +
503 + #define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE
504 +-#define TRAMPOLINE_32BIT_CODE_SIZE 0x60
505 ++#define TRAMPOLINE_32BIT_CODE_SIZE 0x70
506 +
507 + #define TRAMPOLINE_32BIT_STACK_END TRAMPOLINE_32BIT_SIZE
508 +
509 +diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
510 +index c04a8813cff9..a41554350893 100644
511 +--- a/arch/x86/events/core.c
512 ++++ b/arch/x86/events/core.c
513 +@@ -1970,7 +1970,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu)
514 + */
515 + static void free_fake_cpuc(struct cpu_hw_events *cpuc)
516 + {
517 +- kfree(cpuc->shared_regs);
518 ++ intel_cpuc_finish(cpuc);
519 + kfree(cpuc);
520 + }
521 +
522 +@@ -1982,14 +1982,11 @@ static struct cpu_hw_events *allocate_fake_cpuc(void)
523 + cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
524 + if (!cpuc)
525 + return ERR_PTR(-ENOMEM);
526 +-
527 +- /* only needed, if we have extra_regs */
528 +- if (x86_pmu.extra_regs) {
529 +- cpuc->shared_regs = allocate_shared_regs(cpu);
530 +- if (!cpuc->shared_regs)
531 +- goto error;
532 +- }
533 + cpuc->is_fake = 1;
534 ++
535 ++ if (intel_cpuc_prepare(cpuc, cpu))
536 ++ goto error;
537 ++
538 + return cpuc;
539 + error:
540 + free_fake_cpuc(cpuc);
541 +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
542 +index fbd7551a8d44..220b40b75e6f 100644
543 +--- a/arch/x86/events/intel/core.c
544 ++++ b/arch/x86/events/intel/core.c
545 +@@ -1995,6 +1995,39 @@ static void intel_pmu_nhm_enable_all(int added)
546 + intel_pmu_enable_all(added);
547 + }
548 +
549 ++static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
550 ++{
551 ++ u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
552 ++
553 ++ if (cpuc->tfa_shadow != val) {
554 ++ cpuc->tfa_shadow = val;
555 ++ wrmsrl(MSR_TSX_FORCE_ABORT, val);
556 ++ }
557 ++}
558 ++
559 ++static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
560 ++{
561 ++ /*
562 ++ * We're going to use PMC3, make sure TFA is set before we touch it.
563 ++ */
564 ++ if (cntr == 3 && !cpuc->is_fake)
565 ++ intel_set_tfa(cpuc, true);
566 ++}
567 ++
568 ++static void intel_tfa_pmu_enable_all(int added)
569 ++{
570 ++ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
571 ++
572 ++ /*
573 ++ * If we find PMC3 is no longer used when we enable the PMU, we can
574 ++ * clear TFA.
575 ++ */
576 ++ if (!test_bit(3, cpuc->active_mask))
577 ++ intel_set_tfa(cpuc, false);
578 ++
579 ++ intel_pmu_enable_all(added);
580 ++}
581 ++
582 + static inline u64 intel_pmu_get_status(void)
583 + {
584 + u64 status;
585 +@@ -2652,6 +2685,35 @@ intel_stop_scheduling(struct cpu_hw_events *cpuc)
586 + raw_spin_unlock(&excl_cntrs->lock);
587 + }
588 +
589 ++static struct event_constraint *
590 ++dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
591 ++{
592 ++ WARN_ON_ONCE(!cpuc->constraint_list);
593 ++
594 ++ if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
595 ++ struct event_constraint *cx;
596 ++
597 ++ /*
598 ++ * grab pre-allocated constraint entry
599 ++ */
600 ++ cx = &cpuc->constraint_list[idx];
601 ++
602 ++ /*
603 ++ * initialize dynamic constraint
604 ++ * with static constraint
605 ++ */
606 ++ *cx = *c;
607 ++
608 ++ /*
609 ++ * mark constraint as dynamic
610 ++ */
611 ++ cx->flags |= PERF_X86_EVENT_DYNAMIC;
612 ++ c = cx;
613 ++ }
614 ++
615 ++ return c;
616 ++}
617 ++
618 + static struct event_constraint *
619 + intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
620 + int idx, struct event_constraint *c)
621 +@@ -2682,27 +2744,7 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
622 + * only needed when constraint has not yet
623 + * been cloned (marked dynamic)
624 + */
625 +- if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
626 +- struct event_constraint *cx;
627 +-
628 +- /*
629 +- * grab pre-allocated constraint entry
630 +- */
631 +- cx = &cpuc->constraint_list[idx];
632 +-
633 +- /*
634 +- * initialize dynamic constraint
635 +- * with static constraint
636 +- */
637 +- *cx = *c;
638 +-
639 +- /*
640 +- * mark constraint as dynamic, so we
641 +- * can free it later on
642 +- */
643 +- cx->flags |= PERF_X86_EVENT_DYNAMIC;
644 +- c = cx;
645 +- }
646 ++ c = dyn_constraint(cpuc, c, idx);
647 +
648 + /*
649 + * From here on, the constraint is dynamic.
650 +@@ -3229,6 +3271,26 @@ glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
651 + return c;
652 + }
653 +
654 ++static bool allow_tsx_force_abort = true;
655 ++
656 ++static struct event_constraint *
657 ++tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
658 ++ struct perf_event *event)
659 ++{
660 ++ struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
661 ++
662 ++ /*
663 ++ * Without TFA we must not use PMC3.
664 ++ */
665 ++ if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
666 ++ c = dyn_constraint(cpuc, c, idx);
667 ++ c->idxmsk64 &= ~(1ULL << 3);
668 ++ c->weight--;
669 ++ }
670 ++
671 ++ return c;
672 ++}
673 ++
674 + /*
675 + * Broadwell:
676 + *
677 +@@ -3282,7 +3344,7 @@ ssize_t intel_event_sysfs_show(char *page, u64 config)
678 + return x86_event_sysfs_show(page, config, event);
679 + }
680 +
681 +-struct intel_shared_regs *allocate_shared_regs(int cpu)
682 ++static struct intel_shared_regs *allocate_shared_regs(int cpu)
683 + {
684 + struct intel_shared_regs *regs;
685 + int i;
686 +@@ -3314,23 +3376,24 @@ static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
687 + return c;
688 + }
689 +
690 +-static int intel_pmu_cpu_prepare(int cpu)
691 +-{
692 +- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
693 +
694 ++int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
695 ++{
696 + if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
697 + cpuc->shared_regs = allocate_shared_regs(cpu);
698 + if (!cpuc->shared_regs)
699 + goto err;
700 + }
701 +
702 +- if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
703 ++ if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
704 + size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
705 +
706 +- cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
707 ++ cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
708 + if (!cpuc->constraint_list)
709 + goto err_shared_regs;
710 ++ }
711 +
712 ++ if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
713 + cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
714 + if (!cpuc->excl_cntrs)
715 + goto err_constraint_list;
716 +@@ -3352,6 +3415,11 @@ err:
717 + return -ENOMEM;
718 + }
719 +
720 ++static int intel_pmu_cpu_prepare(int cpu)
721 ++{
722 ++ return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
723 ++}
724 ++
725 + static void flip_smm_bit(void *data)
726 + {
727 + unsigned long set = *(unsigned long *)data;
728 +@@ -3423,9 +3491,8 @@ static void intel_pmu_cpu_starting(int cpu)
729 + }
730 + }
731 +
732 +-static void free_excl_cntrs(int cpu)
733 ++static void free_excl_cntrs(struct cpu_hw_events *cpuc)
734 + {
735 +- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
736 + struct intel_excl_cntrs *c;
737 +
738 + c = cpuc->excl_cntrs;
739 +@@ -3433,9 +3500,10 @@ static void free_excl_cntrs(int cpu)
740 + if (c->core_id == -1 || --c->refcnt == 0)
741 + kfree(c);
742 + cpuc->excl_cntrs = NULL;
743 +- kfree(cpuc->constraint_list);
744 +- cpuc->constraint_list = NULL;
745 + }
746 ++
747 ++ kfree(cpuc->constraint_list);
748 ++ cpuc->constraint_list = NULL;
749 + }
750 +
751 + static void intel_pmu_cpu_dying(int cpu)
752 +@@ -3443,9 +3511,8 @@ static void intel_pmu_cpu_dying(int cpu)
753 + fini_debug_store_on_cpu(cpu);
754 + }
755 +
756 +-static void intel_pmu_cpu_dead(int cpu)
757 ++void intel_cpuc_finish(struct cpu_hw_events *cpuc)
758 + {
759 +- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
760 + struct intel_shared_regs *pc;
761 +
762 + pc = cpuc->shared_regs;
763 +@@ -3455,7 +3522,12 @@ static void intel_pmu_cpu_dead(int cpu)
764 + cpuc->shared_regs = NULL;
765 + }
766 +
767 +- free_excl_cntrs(cpu);
768 ++ free_excl_cntrs(cpuc);
769 ++}
770 ++
771 ++static void intel_pmu_cpu_dead(int cpu)
772 ++{
773 ++ intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
774 + }
775 +
776 + static void intel_pmu_sched_task(struct perf_event_context *ctx,
777 +@@ -3917,8 +3989,11 @@ static struct attribute *intel_pmu_caps_attrs[] = {
778 + NULL
779 + };
780 +
781 ++DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort);
782 ++
783 + static struct attribute *intel_pmu_attrs[] = {
784 + &dev_attr_freeze_on_smi.attr,
785 ++ NULL, /* &dev_attr_allow_tsx_force_abort.attr.attr */
786 + NULL,
787 + };
788 +
789 +@@ -4374,6 +4449,15 @@ __init int intel_pmu_init(void)
790 + x86_pmu.cpu_events = get_hsw_events_attrs();
791 + intel_pmu_pebs_data_source_skl(
792 + boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
793 ++
794 ++ if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
795 ++ x86_pmu.flags |= PMU_FL_TFA;
796 ++ x86_pmu.get_event_constraints = tfa_get_event_constraints;
797 ++ x86_pmu.enable_all = intel_tfa_pmu_enable_all;
798 ++ x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
799 ++ intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr.attr;
800 ++ }
801 ++
802 + pr_cont("Skylake events, ");
803 + name = "skylake";
804 + break;
805 +@@ -4515,7 +4599,7 @@ static __init int fixup_ht_bug(void)
806 + hardlockup_detector_perf_restart();
807 +
808 + for_each_online_cpu(c)
809 +- free_excl_cntrs(c);
810 ++ free_excl_cntrs(&per_cpu(cpu_hw_events, c));
811 +
812 + cpus_read_unlock();
813 + pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
814 +diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
815 +index 0ee3a441ad79..5c424009b71f 100644
816 +--- a/arch/x86/events/perf_event.h
817 ++++ b/arch/x86/events/perf_event.h
818 +@@ -242,6 +242,11 @@ struct cpu_hw_events {
819 + struct intel_excl_cntrs *excl_cntrs;
820 + int excl_thread_id; /* 0 or 1 */
821 +
822 ++ /*
823 ++ * SKL TSX_FORCE_ABORT shadow
824 ++ */
825 ++ u64 tfa_shadow;
826 ++
827 + /*
828 + * AMD specific bits
829 + */
830 +@@ -679,6 +684,7 @@ do { \
831 + #define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */
832 + #define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */
833 + #define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */
834 ++#define PMU_FL_TFA 0x20 /* deal with TSX force abort */
835 +
836 + #define EVENT_VAR(_id) event_attr_##_id
837 + #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
838 +@@ -887,7 +893,8 @@ struct event_constraint *
839 + x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
840 + struct perf_event *event);
841 +
842 +-struct intel_shared_regs *allocate_shared_regs(int cpu);
843 ++extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
844 ++extern void intel_cpuc_finish(struct cpu_hw_events *cpuc);
845 +
846 + int intel_pmu_init(void);
847 +
848 +@@ -1023,9 +1030,13 @@ static inline int intel_pmu_init(void)
849 + return 0;
850 + }
851 +
852 +-static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
853 ++static inline int intel_cpuc_prepare(struct cpu_hw_event *cpuc, int cpu)
854 ++{
855 ++ return 0;
856 ++}
857 ++
858 ++static inline void intel_cpuc_finish(struct cpu_hw_event *cpuc)
859 + {
860 +- return NULL;
861 + }
862 +
863 + static inline int is_ht_workaround_enabled(void)
864 +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
865 +index 89a048c2faec..7b31ee5223fc 100644
866 +--- a/arch/x86/include/asm/cpufeatures.h
867 ++++ b/arch/x86/include/asm/cpufeatures.h
868 +@@ -340,6 +340,7 @@
869 + /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
870 + #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
871 + #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
872 ++#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
873 + #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
874 + #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
875 + #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
876 +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
877 +index 1f9de7635bcb..f14ca0be1e3f 100644
878 +--- a/arch/x86/include/asm/msr-index.h
879 ++++ b/arch/x86/include/asm/msr-index.h
880 +@@ -629,6 +629,12 @@
881 +
882 + #define MSR_IA32_TSC_DEADLINE 0x000006E0
883 +
884 ++
885 ++#define MSR_TSX_FORCE_ABORT 0x0000010F
886 ++
887 ++#define MSR_TFA_RTM_FORCE_ABORT_BIT 0
888 ++#define MSR_TFA_RTM_FORCE_ABORT BIT_ULL(MSR_TFA_RTM_FORCE_ABORT_BIT)
889 ++
890 + /* P4/Xeon+ specific */
891 + #define MSR_IA32_MCG_EAX 0x00000180
892 + #define MSR_IA32_MCG_EBX 0x00000181
893 +diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
894 +index b99d497e342d..0b6352aabbd3 100644
895 +--- a/arch/x86/include/asm/page_64_types.h
896 ++++ b/arch/x86/include/asm/page_64_types.h
897 +@@ -7,7 +7,11 @@
898 + #endif
899 +
900 + #ifdef CONFIG_KASAN
901 ++#ifdef CONFIG_KASAN_EXTRA
902 ++#define KASAN_STACK_ORDER 2
903 ++#else
904 + #define KASAN_STACK_ORDER 1
905 ++#endif
906 + #else
907 + #define KASAN_STACK_ORDER 0
908 + #endif
909 +diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
910 +index 07b5fc00b188..a4e7e100ed26 100644
911 +--- a/arch/x86/kernel/cpu/microcode/amd.c
912 ++++ b/arch/x86/kernel/cpu/microcode/amd.c
913 +@@ -707,7 +707,7 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
914 + if (!p) {
915 + return ret;
916 + } else {
917 +- if (boot_cpu_data.microcode == p->patch_id)
918 ++ if (boot_cpu_data.microcode >= p->patch_id)
919 + return ret;
920 +
921 + ret = UCODE_NEW;
922 +diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
923 +index 278cd07228dd..9490a2845f14 100644
924 +--- a/arch/x86/kernel/kexec-bzimage64.c
925 ++++ b/arch/x86/kernel/kexec-bzimage64.c
926 +@@ -167,6 +167,9 @@ setup_efi_state(struct boot_params *params, unsigned long params_load_addr,
927 + struct efi_info *current_ei = &boot_params.efi_info;
928 + struct efi_info *ei = &params->efi_info;
929 +
930 ++ if (!efi_enabled(EFI_RUNTIME_SERVICES))
931 ++ return 0;
932 ++
933 + if (!current_ei->efi_memmap_size)
934 + return 0;
935 +
936 +diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
937 +index 13f4485ca388..bd372e896557 100644
938 +--- a/arch/x86/pci/fixup.c
939 ++++ b/arch/x86/pci/fixup.c
940 +@@ -641,6 +641,22 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334b, quirk_no_aersid);
941 + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334c, quirk_no_aersid);
942 + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334d, quirk_no_aersid);
943 +
944 ++static void quirk_intel_th_dnv(struct pci_dev *dev)
945 ++{
946 ++ struct resource *r = &dev->resource[4];
947 ++
948 ++ /*
949 ++ * Denverton reports 2k of RTIT_BAR (intel_th resource 4), which
950 ++ * appears to be 4 MB in reality.
951 ++ */
952 ++ if (r->end == r->start + 0x7ff) {
953 ++ r->start = 0;
954 ++ r->end = 0x3fffff;
955 ++ r->flags |= IORESOURCE_UNSET;
956 ++ }
957 ++}
958 ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x19e1, quirk_intel_th_dnv);
959 ++
960 + #ifdef CONFIG_PHYS_ADDR_T_64BIT
961 +
962 + #define AMD_141b_MMIO_BASE(x) (0x80 + (x) * 0x8)
963 +diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig
964 +index 11fed6c06a7c..b5938160fb3d 100644
965 +--- a/arch/xtensa/configs/smp_lx200_defconfig
966 ++++ b/arch/xtensa/configs/smp_lx200_defconfig
967 +@@ -33,6 +33,7 @@ CONFIG_SMP=y
968 + CONFIG_HOTPLUG_CPU=y
969 + # CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set
970 + # CONFIG_PCI is not set
971 ++CONFIG_VECTORS_OFFSET=0x00002000
972 + CONFIG_XTENSA_PLATFORM_XTFPGA=y
973 + CONFIG_CMDLINE_BOOL=y
974 + CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=96M@0"
975 +diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
976 +index 9053a5622d2c..5bd38ea2da38 100644
977 +--- a/arch/xtensa/kernel/head.S
978 ++++ b/arch/xtensa/kernel/head.S
979 +@@ -280,12 +280,13 @@ should_never_return:
980 +
981 + movi a2, cpu_start_ccount
982 + 1:
983 ++ memw
984 + l32i a3, a2, 0
985 + beqi a3, 0, 1b
986 + movi a3, 0
987 + s32i a3, a2, 0
988 +- memw
989 + 1:
990 ++ memw
991 + l32i a3, a2, 0
992 + beqi a3, 0, 1b
993 + wsr a3, ccount
994 +@@ -321,11 +322,13 @@ ENTRY(cpu_restart)
995 + rsr a0, prid
996 + neg a2, a0
997 + movi a3, cpu_start_id
998 ++ memw
999 + s32i a2, a3, 0
1000 + #if XCHAL_DCACHE_IS_WRITEBACK
1001 + dhwbi a3, 0
1002 + #endif
1003 + 1:
1004 ++ memw
1005 + l32i a2, a3, 0
1006 + dhi a3, 0
1007 + bne a2, a0, 1b
1008 +diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
1009 +index 932d64689bac..be1f280c322c 100644
1010 +--- a/arch/xtensa/kernel/smp.c
1011 ++++ b/arch/xtensa/kernel/smp.c
1012 +@@ -83,7 +83,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
1013 + {
1014 + unsigned i;
1015 +
1016 +- for (i = 0; i < max_cpus; ++i)
1017 ++ for_each_possible_cpu(i)
1018 + set_cpu_present(i, true);
1019 + }
1020 +
1021 +@@ -96,6 +96,11 @@ void __init smp_init_cpus(void)
1022 + pr_info("%s: Core Count = %d\n", __func__, ncpus);
1023 + pr_info("%s: Core Id = %d\n", __func__, core_id);
1024 +
1025 ++ if (ncpus > NR_CPUS) {
1026 ++ ncpus = NR_CPUS;
1027 ++ pr_info("%s: limiting core count by %d\n", __func__, ncpus);
1028 ++ }
1029 ++
1030 + for (i = 0; i < ncpus; ++i)
1031 + set_cpu_possible(i, true);
1032 + }
1033 +@@ -195,9 +200,11 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
1034 + int i;
1035 +
1036 + #ifdef CONFIG_HOTPLUG_CPU
1037 +- cpu_start_id = cpu;
1038 +- system_flush_invalidate_dcache_range(
1039 +- (unsigned long)&cpu_start_id, sizeof(cpu_start_id));
1040 ++ WRITE_ONCE(cpu_start_id, cpu);
1041 ++ /* Pairs with the third memw in the cpu_restart */
1042 ++ mb();
1043 ++ system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id,
1044 ++ sizeof(cpu_start_id));
1045 + #endif
1046 + smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
1047 +
1048 +@@ -206,18 +213,21 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
1049 + ccount = get_ccount();
1050 + while (!ccount);
1051 +
1052 +- cpu_start_ccount = ccount;
1053 ++ WRITE_ONCE(cpu_start_ccount, ccount);
1054 +
1055 +- while (time_before(jiffies, timeout)) {
1056 ++ do {
1057 ++ /*
1058 ++ * Pairs with the first two memws in the
1059 ++ * .Lboot_secondary.
1060 ++ */
1061 + mb();
1062 +- if (!cpu_start_ccount)
1063 +- break;
1064 +- }
1065 ++ ccount = READ_ONCE(cpu_start_ccount);
1066 ++ } while (ccount && time_before(jiffies, timeout));
1067 +
1068 +- if (cpu_start_ccount) {
1069 ++ if (ccount) {
1070 + smp_call_function_single(0, mx_cpu_stop,
1071 +- (void *)cpu, 1);
1072 +- cpu_start_ccount = 0;
1073 ++ (void *)cpu, 1);
1074 ++ WRITE_ONCE(cpu_start_ccount, 0);
1075 + return -EIO;
1076 + }
1077 + }
1078 +@@ -237,6 +247,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
1079 + pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
1080 + __func__, cpu, idle, start_info.stack);
1081 +
1082 ++ init_completion(&cpu_running);
1083 + ret = boot_secondary(cpu, idle);
1084 + if (ret == 0) {
1085 + wait_for_completion_timeout(&cpu_running,
1086 +@@ -298,8 +309,10 @@ void __cpu_die(unsigned int cpu)
1087 + unsigned long timeout = jiffies + msecs_to_jiffies(1000);
1088 + while (time_before(jiffies, timeout)) {
1089 + system_invalidate_dcache_range((unsigned long)&cpu_start_id,
1090 +- sizeof(cpu_start_id));
1091 +- if (cpu_start_id == -cpu) {
1092 ++ sizeof(cpu_start_id));
1093 ++ /* Pairs with the second memw in the cpu_restart */
1094 ++ mb();
1095 ++ if (READ_ONCE(cpu_start_id) == -cpu) {
1096 + platform_cpu_kill(cpu);
1097 + return;
1098 + }
1099 +diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
1100 +index fd524a54d2ab..378186b5eb40 100644
1101 +--- a/arch/xtensa/kernel/time.c
1102 ++++ b/arch/xtensa/kernel/time.c
1103 +@@ -89,7 +89,7 @@ static int ccount_timer_shutdown(struct clock_event_device *evt)
1104 + container_of(evt, struct ccount_timer, evt);
1105 +
1106 + if (timer->irq_enabled) {
1107 +- disable_irq(evt->irq);
1108 ++ disable_irq_nosync(evt->irq);
1109 + timer->irq_enabled = 0;
1110 + }
1111 + return 0;
1112 +diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
1113 +index 19923f8a029d..b154e057ca67 100644
1114 +--- a/block/blk-iolatency.c
1115 ++++ b/block/blk-iolatency.c
1116 +@@ -72,6 +72,7 @@
1117 + #include <linux/sched/loadavg.h>
1118 + #include <linux/sched/signal.h>
1119 + #include <trace/events/block.h>
1120 ++#include <linux/blk-mq.h>
1121 + #include "blk-rq-qos.h"
1122 + #include "blk-stat.h"
1123 +
1124 +@@ -568,6 +569,9 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
1125 + return;
1126 +
1127 + enabled = blk_iolatency_enabled(iolat->blkiolat);
1128 ++ if (!enabled)
1129 ++ return;
1130 ++
1131 + while (blkg && blkg->parent) {
1132 + iolat = blkg_to_lat(blkg);
1133 + if (!iolat) {
1134 +@@ -577,7 +581,7 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
1135 + rqw = &iolat->rq_wait;
1136 +
1137 + atomic_dec(&rqw->inflight);
1138 +- if (!enabled || iolat->min_lat_nsec == 0)
1139 ++ if (iolat->min_lat_nsec == 0)
1140 + goto next;
1141 + iolatency_record_time(iolat, &bio->bi_issue, now,
1142 + issue_as_root);
1143 +@@ -721,10 +725,13 @@ int blk_iolatency_init(struct request_queue *q)
1144 + return 0;
1145 + }
1146 +
1147 +-static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
1148 ++/*
1149 ++ * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
1150 ++ * return 0.
1151 ++ */
1152 ++static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
1153 + {
1154 + struct iolatency_grp *iolat = blkg_to_lat(blkg);
1155 +- struct blk_iolatency *blkiolat = iolat->blkiolat;
1156 + u64 oldval = iolat->min_lat_nsec;
1157 +
1158 + iolat->min_lat_nsec = val;
1159 +@@ -733,9 +740,10 @@ static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
1160 + BLKIOLATENCY_MAX_WIN_SIZE);
1161 +
1162 + if (!oldval && val)
1163 +- atomic_inc(&blkiolat->enabled);
1164 ++ return 1;
1165 + if (oldval && !val)
1166 +- atomic_dec(&blkiolat->enabled);
1167 ++ return -1;
1168 ++ return 0;
1169 + }
1170 +
1171 + static void iolatency_clear_scaling(struct blkcg_gq *blkg)
1172 +@@ -768,6 +776,7 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
1173 + u64 lat_val = 0;
1174 + u64 oldval;
1175 + int ret;
1176 ++ int enable = 0;
1177 +
1178 + ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
1179 + if (ret)
1180 +@@ -803,7 +812,12 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
1181 + blkg = ctx.blkg;
1182 + oldval = iolat->min_lat_nsec;
1183 +
1184 +- iolatency_set_min_lat_nsec(blkg, lat_val);
1185 ++ enable = iolatency_set_min_lat_nsec(blkg, lat_val);
1186 ++ if (enable) {
1187 ++ WARN_ON_ONCE(!blk_get_queue(blkg->q));
1188 ++ blkg_get(blkg);
1189 ++ }
1190 ++
1191 + if (oldval != iolat->min_lat_nsec) {
1192 + iolatency_clear_scaling(blkg);
1193 + }
1194 +@@ -811,6 +825,24 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
1195 + ret = 0;
1196 + out:
1197 + blkg_conf_finish(&ctx);
1198 ++ if (ret == 0 && enable) {
1199 ++ struct iolatency_grp *tmp = blkg_to_lat(blkg);
1200 ++ struct blk_iolatency *blkiolat = tmp->blkiolat;
1201 ++
1202 ++ blk_mq_freeze_queue(blkg->q);
1203 ++
1204 ++ if (enable == 1)
1205 ++ atomic_inc(&blkiolat->enabled);
1206 ++ else if (enable == -1)
1207 ++ atomic_dec(&blkiolat->enabled);
1208 ++ else
1209 ++ WARN_ON_ONCE(1);
1210 ++
1211 ++ blk_mq_unfreeze_queue(blkg->q);
1212 ++
1213 ++ blkg_put(blkg);
1214 ++ blk_put_queue(blkg->q);
1215 ++ }
1216 + return ret ?: nbytes;
1217 + }
1218 +
1219 +@@ -910,8 +942,14 @@ static void iolatency_pd_offline(struct blkg_policy_data *pd)
1220 + {
1221 + struct iolatency_grp *iolat = pd_to_lat(pd);
1222 + struct blkcg_gq *blkg = lat_to_blkg(iolat);
1223 ++ struct blk_iolatency *blkiolat = iolat->blkiolat;
1224 ++ int ret;
1225 +
1226 +- iolatency_set_min_lat_nsec(blkg, 0);
1227 ++ ret = iolatency_set_min_lat_nsec(blkg, 0);
1228 ++ if (ret == 1)
1229 ++ atomic_inc(&blkiolat->enabled);
1230 ++ if (ret == -1)
1231 ++ atomic_dec(&blkiolat->enabled);
1232 + iolatency_clear_scaling(blkg);
1233 + }
1234 +
1235 +diff --git a/drivers/base/dd.c b/drivers/base/dd.c
1236 +index 7caa1adaf62a..f5b74856784a 100644
1237 +--- a/drivers/base/dd.c
1238 ++++ b/drivers/base/dd.c
1239 +@@ -963,9 +963,9 @@ static void __device_release_driver(struct device *dev, struct device *parent)
1240 + drv->remove(dev);
1241 +
1242 + device_links_driver_cleanup(dev);
1243 +- dma_deconfigure(dev);
1244 +
1245 + devres_release_all(dev);
1246 ++ dma_deconfigure(dev);
1247 + dev->driver = NULL;
1248 + dev_set_drvdata(dev, NULL);
1249 + if (dev->pm_domain && dev->pm_domain->dismiss)
1250 +diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
1251 +index fa1a196350f1..3bf11a620094 100644
1252 +--- a/drivers/clk/qcom/gcc-sdm845.c
1253 ++++ b/drivers/clk/qcom/gcc-sdm845.c
1254 +@@ -131,8 +131,8 @@ static const char * const gcc_parent_names_6[] = {
1255 + "core_bi_pll_test_se",
1256 + };
1257 +
1258 +-static const char * const gcc_parent_names_7[] = {
1259 +- "bi_tcxo",
1260 ++static const char * const gcc_parent_names_7_ao[] = {
1261 ++ "bi_tcxo_ao",
1262 + "gpll0",
1263 + "gpll0_out_even",
1264 + "core_bi_pll_test_se",
1265 +@@ -144,6 +144,12 @@ static const char * const gcc_parent_names_8[] = {
1266 + "core_bi_pll_test_se",
1267 + };
1268 +
1269 ++static const char * const gcc_parent_names_8_ao[] = {
1270 ++ "bi_tcxo_ao",
1271 ++ "gpll0",
1272 ++ "core_bi_pll_test_se",
1273 ++};
1274 ++
1275 + static const struct parent_map gcc_parent_map_10[] = {
1276 + { P_BI_TCXO, 0 },
1277 + { P_GPLL0_OUT_MAIN, 1 },
1278 +@@ -226,7 +232,7 @@ static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
1279 + .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
1280 + .clkr.hw.init = &(struct clk_init_data){
1281 + .name = "gcc_cpuss_ahb_clk_src",
1282 +- .parent_names = gcc_parent_names_7,
1283 ++ .parent_names = gcc_parent_names_7_ao,
1284 + .num_parents = 4,
1285 + .ops = &clk_rcg2_ops,
1286 + },
1287 +@@ -245,7 +251,7 @@ static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = {
1288 + .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
1289 + .clkr.hw.init = &(struct clk_init_data){
1290 + .name = "gcc_cpuss_rbcpr_clk_src",
1291 +- .parent_names = gcc_parent_names_8,
1292 ++ .parent_names = gcc_parent_names_8_ao,
1293 + .num_parents = 3,
1294 + .ops = &clk_rcg2_ops,
1295 + },
1296 +diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
1297 +index ccfb4d9a152a..079f0beda8b6 100644
1298 +--- a/drivers/clk/ti/divider.c
1299 ++++ b/drivers/clk/ti/divider.c
1300 +@@ -367,8 +367,10 @@ int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
1301 + num_dividers = i;
1302 +
1303 + tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL);
1304 +- if (!tmp)
1305 ++ if (!tmp) {
1306 ++ *table = ERR_PTR(-ENOMEM);
1307 + return -ENOMEM;
1308 ++ }
1309 +
1310 + valid_div = 0;
1311 + *width = 0;
1312 +@@ -403,6 +405,7 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
1313 + {
1314 + struct clk_omap_divider *div;
1315 + struct clk_omap_reg *reg;
1316 ++ int ret;
1317 +
1318 + if (!setup)
1319 + return NULL;
1320 +@@ -422,6 +425,12 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
1321 + div->flags |= CLK_DIVIDER_POWER_OF_TWO;
1322 +
1323 + div->table = _get_div_table_from_setup(setup, &div->width);
1324 ++ if (IS_ERR(div->table)) {
1325 ++ ret = PTR_ERR(div->table);
1326 ++ kfree(div);
1327 ++ return ERR_PTR(ret);
1328 ++ }
1329 ++
1330 +
1331 + div->shift = setup->bit_shift;
1332 + div->latch = -EINVAL;
1333 +diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
1334 +index 4bf72561667c..a75b95fac3bd 100644
1335 +--- a/drivers/dma/at_xdmac.c
1336 ++++ b/drivers/dma/at_xdmac.c
1337 +@@ -203,6 +203,7 @@ struct at_xdmac_chan {
1338 + u32 save_cim;
1339 + u32 save_cnda;
1340 + u32 save_cndc;
1341 ++ u32 irq_status;
1342 + unsigned long status;
1343 + struct tasklet_struct tasklet;
1344 + struct dma_slave_config sconfig;
1345 +@@ -1580,8 +1581,8 @@ static void at_xdmac_tasklet(unsigned long data)
1346 + struct at_xdmac_desc *desc;
1347 + u32 error_mask;
1348 +
1349 +- dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n",
1350 +- __func__, atchan->status);
1351 ++ dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
1352 ++ __func__, atchan->irq_status);
1353 +
1354 + error_mask = AT_XDMAC_CIS_RBEIS
1355 + | AT_XDMAC_CIS_WBEIS
1356 +@@ -1589,15 +1590,15 @@ static void at_xdmac_tasklet(unsigned long data)
1357 +
1358 + if (at_xdmac_chan_is_cyclic(atchan)) {
1359 + at_xdmac_handle_cyclic(atchan);
1360 +- } else if ((atchan->status & AT_XDMAC_CIS_LIS)
1361 +- || (atchan->status & error_mask)) {
1362 ++ } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
1363 ++ || (atchan->irq_status & error_mask)) {
1364 + struct dma_async_tx_descriptor *txd;
1365 +
1366 +- if (atchan->status & AT_XDMAC_CIS_RBEIS)
1367 ++ if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
1368 + dev_err(chan2dev(&atchan->chan), "read bus error!!!");
1369 +- if (atchan->status & AT_XDMAC_CIS_WBEIS)
1370 ++ if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
1371 + dev_err(chan2dev(&atchan->chan), "write bus error!!!");
1372 +- if (atchan->status & AT_XDMAC_CIS_ROIS)
1373 ++ if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
1374 + dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
1375 +
1376 + spin_lock_bh(&atchan->lock);
1377 +@@ -1652,7 +1653,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
1378 + atchan = &atxdmac->chan[i];
1379 + chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1380 + chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
1381 +- atchan->status = chan_status & chan_imr;
1382 ++ atchan->irq_status = chan_status & chan_imr;
1383 + dev_vdbg(atxdmac->dma.dev,
1384 + "%s: chan%d: imr=0x%x, status=0x%x\n",
1385 + __func__, i, chan_imr, chan_status);
1386 +@@ -1666,7 +1667,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
1387 + at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
1388 + at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
1389 +
1390 +- if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
1391 ++ if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
1392 + at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1393 +
1394 + tasklet_schedule(&atchan->tasklet);
1395 +diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
1396 +index aa1712beb0cc..7b7fba0c9253 100644
1397 +--- a/drivers/dma/dmatest.c
1398 ++++ b/drivers/dma/dmatest.c
1399 +@@ -642,11 +642,9 @@ static int dmatest_func(void *data)
1400 + srcs[i] = um->addr[i] + src_off;
1401 + ret = dma_mapping_error(dev->dev, um->addr[i]);
1402 + if (ret) {
1403 +- dmaengine_unmap_put(um);
1404 + result("src mapping error", total_tests,
1405 + src_off, dst_off, len, ret);
1406 +- failed_tests++;
1407 +- continue;
1408 ++ goto error_unmap_continue;
1409 + }
1410 + um->to_cnt++;
1411 + }
1412 +@@ -661,11 +659,9 @@ static int dmatest_func(void *data)
1413 + DMA_BIDIRECTIONAL);
1414 + ret = dma_mapping_error(dev->dev, dsts[i]);
1415 + if (ret) {
1416 +- dmaengine_unmap_put(um);
1417 + result("dst mapping error", total_tests,
1418 + src_off, dst_off, len, ret);
1419 +- failed_tests++;
1420 +- continue;
1421 ++ goto error_unmap_continue;
1422 + }
1423 + um->bidi_cnt++;
1424 + }
1425 +@@ -693,12 +689,10 @@ static int dmatest_func(void *data)
1426 + }
1427 +
1428 + if (!tx) {
1429 +- dmaengine_unmap_put(um);
1430 + result("prep error", total_tests, src_off,
1431 + dst_off, len, ret);
1432 + msleep(100);
1433 +- failed_tests++;
1434 +- continue;
1435 ++ goto error_unmap_continue;
1436 + }
1437 +
1438 + done->done = false;
1439 +@@ -707,12 +701,10 @@ static int dmatest_func(void *data)
1440 + cookie = tx->tx_submit(tx);
1441 +
1442 + if (dma_submit_error(cookie)) {
1443 +- dmaengine_unmap_put(um);
1444 + result("submit error", total_tests, src_off,
1445 + dst_off, len, ret);
1446 + msleep(100);
1447 +- failed_tests++;
1448 +- continue;
1449 ++ goto error_unmap_continue;
1450 + }
1451 + dma_async_issue_pending(chan);
1452 +
1453 +@@ -725,16 +717,14 @@ static int dmatest_func(void *data)
1454 + dmaengine_unmap_put(um);
1455 + result("test timed out", total_tests, src_off, dst_off,
1456 + len, 0);
1457 +- failed_tests++;
1458 +- continue;
1459 ++ goto error_unmap_continue;
1460 + } else if (status != DMA_COMPLETE) {
1461 + dmaengine_unmap_put(um);
1462 + result(status == DMA_ERROR ?
1463 + "completion error status" :
1464 + "completion busy status", total_tests, src_off,
1465 + dst_off, len, ret);
1466 +- failed_tests++;
1467 +- continue;
1468 ++ goto error_unmap_continue;
1469 + }
1470 +
1471 + dmaengine_unmap_put(um);
1472 +@@ -779,6 +769,12 @@ static int dmatest_func(void *data)
1473 + verbose_result("test passed", total_tests, src_off,
1474 + dst_off, len, 0);
1475 + }
1476 ++
1477 ++ continue;
1478 ++
1479 ++error_unmap_continue:
1480 ++ dmaengine_unmap_put(um);
1481 ++ failed_tests++;
1482 + }
1483 + ktime = ktime_sub(ktime_get(), ktime);
1484 + ktime = ktime_sub(ktime, comparetime);
1485 +diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
1486 +index 6bc8e6640d71..c51462f5aa1e 100644
1487 +--- a/drivers/firmware/iscsi_ibft.c
1488 ++++ b/drivers/firmware/iscsi_ibft.c
1489 +@@ -542,6 +542,7 @@ static umode_t __init ibft_check_tgt_for(void *data, int type)
1490 + case ISCSI_BOOT_TGT_NIC_ASSOC:
1491 + case ISCSI_BOOT_TGT_CHAP_TYPE:
1492 + rc = S_IRUGO;
1493 ++ break;
1494 + case ISCSI_BOOT_TGT_NAME:
1495 + if (tgt->tgt_name_len)
1496 + rc = S_IRUGO;
1497 +diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
1498 +index d4ad6d0e02a2..7e09ce75ffb2 100644
1499 +--- a/drivers/gpio/gpio-vf610.c
1500 ++++ b/drivers/gpio/gpio-vf610.c
1501 +@@ -259,6 +259,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
1502 + struct vf610_gpio_port *port;
1503 + struct resource *iores;
1504 + struct gpio_chip *gc;
1505 ++ int i;
1506 + int ret;
1507 +
1508 + port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
1509 +@@ -298,6 +299,10 @@ static int vf610_gpio_probe(struct platform_device *pdev)
1510 + if (ret < 0)
1511 + return ret;
1512 +
1513 ++ /* Mask all GPIO interrupts */
1514 ++ for (i = 0; i < gc->ngpio; i++)
1515 ++ vf610_gpio_writel(0, port->base + PORT_PCR(i));
1516 ++
1517 + /* Clear the interrupt status register for all GPIO's */
1518 + vf610_gpio_writel(~0, port->base + PORT_ISFR);
1519 +
1520 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
1521 +index 7b4e657a95c7..c3df75a9f65d 100644
1522 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
1523 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
1524 +@@ -1443,7 +1443,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
1525 + effective_mode &= ~S_IWUSR;
1526 +
1527 + if ((adev->flags & AMD_IS_APU) &&
1528 +- (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
1529 ++ (attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
1530 ++ attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
1531 + attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
1532 + attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
1533 + return 0;
1534 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
1535 +index 1c5d97f4b4dd..8dcf6227ab99 100644
1536 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
1537 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
1538 +@@ -37,6 +37,7 @@
1539 + #include "amdgpu_display.h"
1540 + #include <drm/amdgpu_drm.h>
1541 + #include <linux/dma-buf.h>
1542 ++#include <linux/dma-fence-array.h>
1543 +
1544 + static const struct dma_buf_ops amdgpu_dmabuf_ops;
1545 +
1546 +@@ -188,6 +189,48 @@ error:
1547 + return ERR_PTR(ret);
1548 + }
1549 +
1550 ++static int
1551 ++__reservation_object_make_exclusive(struct reservation_object *obj)
1552 ++{
1553 ++ struct dma_fence **fences;
1554 ++ unsigned int count;
1555 ++ int r;
1556 ++
1557 ++ if (!reservation_object_get_list(obj)) /* no shared fences to convert */
1558 ++ return 0;
1559 ++
1560 ++ r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
1561 ++ if (r)
1562 ++ return r;
1563 ++
1564 ++ if (count == 0) {
1565 ++ /* Now that was unexpected. */
1566 ++ } else if (count == 1) {
1567 ++ reservation_object_add_excl_fence(obj, fences[0]);
1568 ++ dma_fence_put(fences[0]);
1569 ++ kfree(fences);
1570 ++ } else {
1571 ++ struct dma_fence_array *array;
1572 ++
1573 ++ array = dma_fence_array_create(count, fences,
1574 ++ dma_fence_context_alloc(1), 0,
1575 ++ false);
1576 ++ if (!array)
1577 ++ goto err_fences_put;
1578 ++
1579 ++ reservation_object_add_excl_fence(obj, &array->base);
1580 ++ dma_fence_put(&array->base);
1581 ++ }
1582 ++
1583 ++ return 0;
1584 ++
1585 ++err_fences_put:
1586 ++ while (count--)
1587 ++ dma_fence_put(fences[count]);
1588 ++ kfree(fences);
1589 ++ return -ENOMEM;
1590 ++}
1591 ++
1592 + /**
1593 + * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
1594 + * @dma_buf: shared DMA buffer
1595 +@@ -219,16 +262,16 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
1596 +
1597 + if (attach->dev->driver != adev->dev->driver) {
1598 + /*
1599 +- * Wait for all shared fences to complete before we switch to future
1600 +- * use of exclusive fence on this prime shared bo.
1601 ++ * We only create shared fences for internal use, but importers
1602 ++ * of the dmabuf rely on exclusive fences for implicitly
1603 ++ * tracking write hazards. As any of the current fences may
1604 ++ * correspond to a write, we need to convert all existing
1605 ++ * fences on the reservation object into a single exclusive
1606 ++ * fence.
1607 + */
1608 +- r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
1609 +- true, false,
1610 +- MAX_SCHEDULE_TIMEOUT);
1611 +- if (unlikely(r < 0)) {
1612 +- DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
1613 ++ r = __reservation_object_make_exclusive(bo->tbo.resv);
1614 ++ if (r)
1615 + goto error_unreserve;
1616 +- }
1617 + }
1618 +
1619 + /* pin buffer into GTT */
1620 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1621 +index 6a84526e20e0..49fe5084c53d 100644
1622 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1623 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1624 +@@ -3011,14 +3011,15 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
1625 + struct amdgpu_task_info *task_info)
1626 + {
1627 + struct amdgpu_vm *vm;
1628 ++ unsigned long flags;
1629 +
1630 +- spin_lock(&adev->vm_manager.pasid_lock);
1631 ++ spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
1632 +
1633 + vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
1634 + if (vm)
1635 + *task_info = vm->task_info;
1636 +
1637 +- spin_unlock(&adev->vm_manager.pasid_lock);
1638 ++ spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
1639 + }
1640 +
1641 + /**
1642 +diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
1643 +index d587779a80b4..a97294ac96d5 100644
1644 +--- a/drivers/gpu/drm/radeon/ci_dpm.c
1645 ++++ b/drivers/gpu/drm/radeon/ci_dpm.c
1646 +@@ -5676,7 +5676,7 @@ int ci_dpm_init(struct radeon_device *rdev)
1647 + u16 data_offset, size;
1648 + u8 frev, crev;
1649 + struct ci_power_info *pi;
1650 +- enum pci_bus_speed speed_cap;
1651 ++ enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
1652 + struct pci_dev *root = rdev->pdev->bus->self;
1653 + int ret;
1654 +
1655 +@@ -5685,7 +5685,8 @@ int ci_dpm_init(struct radeon_device *rdev)
1656 + return -ENOMEM;
1657 + rdev->pm.dpm.priv = pi;
1658 +
1659 +- speed_cap = pcie_get_speed_cap(root);
1660 ++ if (!pci_is_root_bus(rdev->pdev->bus))
1661 ++ speed_cap = pcie_get_speed_cap(root);
1662 + if (speed_cap == PCI_SPEED_UNKNOWN) {
1663 + pi->sys_pcie_mask = 0;
1664 + } else {
1665 +diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
1666 +index 8fb60b3af015..0a785ef0ab66 100644
1667 +--- a/drivers/gpu/drm/radeon/si_dpm.c
1668 ++++ b/drivers/gpu/drm/radeon/si_dpm.c
1669 +@@ -6899,7 +6899,7 @@ int si_dpm_init(struct radeon_device *rdev)
1670 + struct ni_power_info *ni_pi;
1671 + struct si_power_info *si_pi;
1672 + struct atom_clock_dividers dividers;
1673 +- enum pci_bus_speed speed_cap;
1674 ++ enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
1675 + struct pci_dev *root = rdev->pdev->bus->self;
1676 + int ret;
1677 +
1678 +@@ -6911,7 +6911,8 @@ int si_dpm_init(struct radeon_device *rdev)
1679 + eg_pi = &ni_pi->eg;
1680 + pi = &eg_pi->rv7xx;
1681 +
1682 +- speed_cap = pcie_get_speed_cap(root);
1683 ++ if (!pci_is_root_bus(rdev->pdev->bus))
1684 ++ speed_cap = pcie_get_speed_cap(root);
1685 + if (speed_cap == PCI_SPEED_UNKNOWN) {
1686 + si_pi->sys_pcie_mask = 0;
1687 + } else {
1688 +diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
1689 +index 3fb084f802e2..8c31c9ab06f8 100644
1690 +--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
1691 ++++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
1692 +@@ -672,6 +672,7 @@ static int sun4i_tcon_init_clocks(struct device *dev,
1693 + return PTR_ERR(tcon->sclk0);
1694 + }
1695 + }
1696 ++ clk_prepare_enable(tcon->sclk0);
1697 +
1698 + if (tcon->quirks->has_channel_1) {
1699 + tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
1700 +@@ -686,6 +687,7 @@ static int sun4i_tcon_init_clocks(struct device *dev,
1701 +
1702 + static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon)
1703 + {
1704 ++ clk_disable_unprepare(tcon->sclk0);
1705 + clk_disable_unprepare(tcon->clk);
1706 + }
1707 +
1708 +diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
1709 +index 65d06a819307..2ac86096ddd9 100644
1710 +--- a/drivers/i2c/busses/i2c-omap.c
1711 ++++ b/drivers/i2c/busses/i2c-omap.c
1712 +@@ -1498,8 +1498,7 @@ static int omap_i2c_remove(struct platform_device *pdev)
1713 + return 0;
1714 + }
1715 +
1716 +-#ifdef CONFIG_PM
1717 +-static int omap_i2c_runtime_suspend(struct device *dev)
1718 ++static int __maybe_unused omap_i2c_runtime_suspend(struct device *dev)
1719 + {
1720 + struct omap_i2c_dev *omap = dev_get_drvdata(dev);
1721 +
1722 +@@ -1525,7 +1524,7 @@ static int omap_i2c_runtime_suspend(struct device *dev)
1723 + return 0;
1724 + }
1725 +
1726 +-static int omap_i2c_runtime_resume(struct device *dev)
1727 ++static int __maybe_unused omap_i2c_runtime_resume(struct device *dev)
1728 + {
1729 + struct omap_i2c_dev *omap = dev_get_drvdata(dev);
1730 +
1731 +@@ -1540,20 +1539,18 @@ static int omap_i2c_runtime_resume(struct device *dev)
1732 + }
1733 +
1734 + static const struct dev_pm_ops omap_i2c_pm_ops = {
1735 ++ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1736 ++ pm_runtime_force_resume)
1737 + SET_RUNTIME_PM_OPS(omap_i2c_runtime_suspend,
1738 + omap_i2c_runtime_resume, NULL)
1739 + };
1740 +-#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
1741 +-#else
1742 +-#define OMAP_I2C_PM_OPS NULL
1743 +-#endif /* CONFIG_PM */
1744 +
1745 + static struct platform_driver omap_i2c_driver = {
1746 + .probe = omap_i2c_probe,
1747 + .remove = omap_i2c_remove,
1748 + .driver = {
1749 + .name = "omap_i2c",
1750 +- .pm = OMAP_I2C_PM_OPS,
1751 ++ .pm = &omap_i2c_pm_ops,
1752 + .of_match_table = of_match_ptr(omap_i2c_of_match),
1753 + },
1754 + };
1755 +diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
1756 +index 70d39fc450a1..54eb69564264 100644
1757 +--- a/drivers/infiniband/hw/hfi1/ud.c
1758 ++++ b/drivers/infiniband/hw/hfi1/ud.c
1759 +@@ -980,7 +980,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
1760 + opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
1761 + wc.ex.imm_data = packet->ohdr->u.ud.imm_data;
1762 + wc.wc_flags = IB_WC_WITH_IMM;
1763 +- tlen -= sizeof(u32);
1764 + } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
1765 + wc.ex.imm_data = 0;
1766 + wc.wc_flags = 0;
1767 +diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
1768 +index f8d029a2390f..bce2b5cd3c7b 100644
1769 +--- a/drivers/infiniband/hw/qib/qib_ud.c
1770 ++++ b/drivers/infiniband/hw/qib/qib_ud.c
1771 +@@ -513,7 +513,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
1772 + opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
1773 + wc.ex.imm_data = ohdr->u.ud.imm_data;
1774 + wc.wc_flags = IB_WC_WITH_IMM;
1775 +- tlen -= sizeof(u32);
1776 + } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
1777 + wc.ex.imm_data = 0;
1778 + wc.wc_flags = 0;
1779 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
1780 +index 1abe3c62f106..b22d02c9de90 100644
1781 +--- a/drivers/infiniband/ulp/ipoib/ipoib.h
1782 ++++ b/drivers/infiniband/ulp/ipoib/ipoib.h
1783 +@@ -248,7 +248,6 @@ struct ipoib_cm_tx {
1784 + struct list_head list;
1785 + struct net_device *dev;
1786 + struct ipoib_neigh *neigh;
1787 +- struct ipoib_path *path;
1788 + struct ipoib_tx_buf *tx_ring;
1789 + unsigned int tx_head;
1790 + unsigned int tx_tail;
1791 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
1792 +index 0428e01e8f69..aa9dcfc36cd3 100644
1793 +--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
1794 ++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
1795 +@@ -1312,7 +1312,6 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
1796 +
1797 + neigh->cm = tx;
1798 + tx->neigh = neigh;
1799 +- tx->path = path;
1800 + tx->dev = dev;
1801 + list_add(&tx->list, &priv->cm.start_list);
1802 + set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
1803 +@@ -1371,7 +1370,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
1804 + neigh->daddr + QPN_AND_OPTIONS_OFFSET);
1805 + goto free_neigh;
1806 + }
1807 +- memcpy(&pathrec, &p->path->pathrec, sizeof(pathrec));
1808 ++ memcpy(&pathrec, &path->pathrec, sizeof(pathrec));
1809 +
1810 + spin_unlock_irqrestore(&priv->lock, flags);
1811 + netif_tx_unlock_bh(dev);
1812 +diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
1813 +index 225ae6980182..628ef617bb2f 100644
1814 +--- a/drivers/input/mouse/elan_i2c_core.c
1815 ++++ b/drivers/input/mouse/elan_i2c_core.c
1816 +@@ -1337,6 +1337,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
1817 + { "ELAN0000", 0 },
1818 + { "ELAN0100", 0 },
1819 + { "ELAN0600", 0 },
1820 ++ { "ELAN0601", 0 },
1821 + { "ELAN0602", 0 },
1822 + { "ELAN0605", 0 },
1823 + { "ELAN0608", 0 },
1824 +diff --git a/drivers/input/tablet/wacom_serial4.c b/drivers/input/tablet/wacom_serial4.c
1825 +index 38bfaca48eab..150f9eecaca7 100644
1826 +--- a/drivers/input/tablet/wacom_serial4.c
1827 ++++ b/drivers/input/tablet/wacom_serial4.c
1828 +@@ -187,6 +187,7 @@ enum {
1829 + MODEL_DIGITIZER_II = 0x5544, /* UD */
1830 + MODEL_GRAPHIRE = 0x4554, /* ET */
1831 + MODEL_PENPARTNER = 0x4354, /* CT */
1832 ++ MODEL_ARTPAD_II = 0x4B54, /* KT */
1833 + };
1834 +
1835 + static void wacom_handle_model_response(struct wacom *wacom)
1836 +@@ -245,6 +246,7 @@ static void wacom_handle_model_response(struct wacom *wacom)
1837 + wacom->flags = F_HAS_STYLUS2 | F_HAS_SCROLLWHEEL;
1838 + break;
1839 +
1840 ++ case MODEL_ARTPAD_II:
1841 + case MODEL_DIGITIZER_II:
1842 + wacom->dev->name = "Wacom Digitizer II";
1843 + wacom->dev->id.version = MODEL_DIGITIZER_II;
1844 +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
1845 +index 34c9aa76a7bd..27500abe8ca7 100644
1846 +--- a/drivers/iommu/amd_iommu.c
1847 ++++ b/drivers/iommu/amd_iommu.c
1848 +@@ -1929,16 +1929,13 @@ static void do_attach(struct iommu_dev_data *dev_data,
1849 +
1850 + static void do_detach(struct iommu_dev_data *dev_data)
1851 + {
1852 ++ struct protection_domain *domain = dev_data->domain;
1853 + struct amd_iommu *iommu;
1854 + u16 alias;
1855 +
1856 + iommu = amd_iommu_rlookup_table[dev_data->devid];
1857 + alias = dev_data->alias;
1858 +
1859 +- /* decrease reference counters */
1860 +- dev_data->domain->dev_iommu[iommu->index] -= 1;
1861 +- dev_data->domain->dev_cnt -= 1;
1862 +-
1863 + /* Update data structures */
1864 + dev_data->domain = NULL;
1865 + list_del(&dev_data->list);
1866 +@@ -1948,6 +1945,16 @@ static void do_detach(struct iommu_dev_data *dev_data)
1867 +
1868 + /* Flush the DTE entry */
1869 + device_flush_dte(dev_data);
1870 ++
1871 ++ /* Flush IOTLB */
1872 ++ domain_flush_tlb_pde(domain);
1873 ++
1874 ++ /* Wait for the flushes to finish */
1875 ++ domain_flush_complete(domain);
1876 ++
1877 ++ /* decrease reference counters - needs to happen after the flushes */
1878 ++ domain->dev_iommu[iommu->index] -= 1;
1879 ++ domain->dev_cnt -= 1;
1880 + }
1881 +
1882 + /*
1883 +@@ -2555,13 +2562,13 @@ out_unmap:
1884 + bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
1885 + iommu_unmap_page(domain, bus_addr, PAGE_SIZE);
1886 +
1887 +- if (--mapped_pages)
1888 ++ if (--mapped_pages == 0)
1889 + goto out_free_iova;
1890 + }
1891 + }
1892 +
1893 + out_free_iova:
1894 +- free_iova_fast(&dma_dom->iovad, address, npages);
1895 ++ free_iova_fast(&dma_dom->iovad, address >> PAGE_SHIFT, npages);
1896 +
1897 + out_err:
1898 + return 0;
1899 +diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
1900 +index 4c2246fe5dbe..15579cba1a88 100644
1901 +--- a/drivers/irqchip/irq-gic-v3-its.c
1902 ++++ b/drivers/irqchip/irq-gic-v3-its.c
1903 +@@ -1581,6 +1581,9 @@ static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
1904 + nr_irqs /= 2;
1905 + } while (nr_irqs > 0);
1906 +
1907 ++ if (!nr_irqs)
1908 ++ err = -ENOSPC;
1909 ++
1910 + if (err)
1911 + goto out;
1912 +
1913 +@@ -1951,6 +1954,29 @@ static void its_free_pending_table(struct page *pt)
1914 + get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
1915 + }
1916 +
1917 ++static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
1918 ++{
1919 ++ u32 count = 1000000; /* 1s! */
1920 ++ bool clean;
1921 ++ u64 val;
1922 ++
1923 ++ val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
1924 ++ val &= ~GICR_VPENDBASER_Valid;
1925 ++ gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
1926 ++
1927 ++ do {
1928 ++ val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
1929 ++ clean = !(val & GICR_VPENDBASER_Dirty);
1930 ++ if (!clean) {
1931 ++ count--;
1932 ++ cpu_relax();
1933 ++ udelay(1);
1934 ++ }
1935 ++ } while (!clean && count);
1936 ++
1937 ++ return val;
1938 ++}
1939 ++
1940 + static void its_cpu_init_lpis(void)
1941 + {
1942 + void __iomem *rbase = gic_data_rdist_rd_base();
1943 +@@ -2024,6 +2050,30 @@ static void its_cpu_init_lpis(void)
1944 + val |= GICR_CTLR_ENABLE_LPIS;
1945 + writel_relaxed(val, rbase + GICR_CTLR);
1946 +
1947 ++ if (gic_rdists->has_vlpis) {
1948 ++ void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
1949 ++
1950 ++ /*
1951 ++ * It's possible for CPU to receive VLPIs before it is
1952 ++ * sheduled as a vPE, especially for the first CPU, and the
1953 ++ * VLPI with INTID larger than 2^(IDbits+1) will be considered
1954 ++ * as out of range and dropped by GIC.
1955 ++ * So we initialize IDbits to known value to avoid VLPI drop.
1956 ++ */
1957 ++ val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
1958 ++ pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
1959 ++ smp_processor_id(), val);
1960 ++ gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
1961 ++
1962 ++ /*
1963 ++ * Also clear Valid bit of GICR_VPENDBASER, in case some
1964 ++ * ancient programming gets left in and has possibility of
1965 ++ * corrupting memory.
1966 ++ */
1967 ++ val = its_clear_vpend_valid(vlpi_base);
1968 ++ WARN_ON(val & GICR_VPENDBASER_Dirty);
1969 ++ }
1970 ++
1971 + /* Make sure the GIC has seen the above */
1972 + dsb(sy);
1973 + }
1974 +@@ -2644,26 +2694,11 @@ static void its_vpe_schedule(struct its_vpe *vpe)
1975 + static void its_vpe_deschedule(struct its_vpe *vpe)
1976 + {
1977 + void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
1978 +- u32 count = 1000000; /* 1s! */
1979 +- bool clean;
1980 + u64 val;
1981 +
1982 +- /* We're being scheduled out */
1983 +- val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
1984 +- val &= ~GICR_VPENDBASER_Valid;
1985 +- gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
1986 +-
1987 +- do {
1988 +- val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
1989 +- clean = !(val & GICR_VPENDBASER_Dirty);
1990 +- if (!clean) {
1991 +- count--;
1992 +- cpu_relax();
1993 +- udelay(1);
1994 +- }
1995 +- } while (!clean && count);
1996 ++ val = its_clear_vpend_valid(vlpi_base);
1997 +
1998 +- if (unlikely(!clean && !count)) {
1999 ++ if (unlikely(val & GICR_VPENDBASER_Dirty)) {
2000 + pr_err_ratelimited("ITS virtual pending table not cleaning\n");
2001 + vpe->idai = false;
2002 + vpe->pending_last = true;
2003 +diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
2004 +index 25f32e1d7764..3496b61a312a 100644
2005 +--- a/drivers/irqchip/irq-mmp.c
2006 ++++ b/drivers/irqchip/irq-mmp.c
2007 +@@ -34,6 +34,9 @@
2008 + #define SEL_INT_PENDING (1 << 6)
2009 + #define SEL_INT_NUM_MASK 0x3f
2010 +
2011 ++#define MMP2_ICU_INT_ROUTE_PJ4_IRQ (1 << 5)
2012 ++#define MMP2_ICU_INT_ROUTE_PJ4_FIQ (1 << 6)
2013 ++
2014 + struct icu_chip_data {
2015 + int nr_irqs;
2016 + unsigned int virq_base;
2017 +@@ -190,7 +193,8 @@ static const struct mmp_intc_conf mmp_conf = {
2018 + static const struct mmp_intc_conf mmp2_conf = {
2019 + .conf_enable = 0x20,
2020 + .conf_disable = 0x0,
2021 +- .conf_mask = 0x7f,
2022 ++ .conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ |
2023 ++ MMP2_ICU_INT_ROUTE_PJ4_FIQ,
2024 + };
2025 +
2026 + static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs)
2027 +diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
2028 +index 361abbc00486..6f1fd40fce10 100644
2029 +--- a/drivers/media/usb/uvc/uvc_driver.c
2030 ++++ b/drivers/media/usb/uvc/uvc_driver.c
2031 +@@ -1065,11 +1065,19 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
2032 + return -EINVAL;
2033 + }
2034 +
2035 +- /* Make sure the terminal type MSB is not null, otherwise it
2036 +- * could be confused with a unit.
2037 ++ /*
2038 ++ * Reject invalid terminal types that would cause issues:
2039 ++ *
2040 ++ * - The high byte must be non-zero, otherwise it would be
2041 ++ * confused with a unit.
2042 ++ *
2043 ++ * - Bit 15 must be 0, as we use it internally as a terminal
2044 ++ * direction flag.
2045 ++ *
2046 ++ * Other unknown types are accepted.
2047 + */
2048 + type = get_unaligned_le16(&buffer[4]);
2049 +- if ((type & 0xff00) == 0) {
2050 ++ if ((type & 0x7f00) == 0 || (type & 0x8000) != 0) {
2051 + uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol "
2052 + "interface %d INPUT_TERMINAL %d has invalid "
2053 + "type 0x%04x, skipping\n", udev->devnum,
2054 +diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c
2055 +index 0fb986ba3290..0ae723f75341 100644
2056 +--- a/drivers/net/ethernet/altera/altera_msgdma.c
2057 ++++ b/drivers/net/ethernet/altera/altera_msgdma.c
2058 +@@ -145,7 +145,8 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
2059 + & 0xffff;
2060 +
2061 + if (inuse) { /* Tx FIFO is not empty */
2062 +- ready = priv->tx_prod - priv->tx_cons - inuse - 1;
2063 ++ ready = max_t(int,
2064 ++ priv->tx_prod - priv->tx_cons - inuse - 1, 0);
2065 + } else {
2066 + /* Check for buffered last packet */
2067 + status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
2068 +diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
2069 +index 3d45f4c92cf6..9bbaad9f3d63 100644
2070 +--- a/drivers/net/ethernet/cadence/macb.h
2071 ++++ b/drivers/net/ethernet/cadence/macb.h
2072 +@@ -643,6 +643,7 @@
2073 + #define MACB_CAPS_JUMBO 0x00000020
2074 + #define MACB_CAPS_GEM_HAS_PTP 0x00000040
2075 + #define MACB_CAPS_BD_RD_PREFETCH 0x00000080
2076 ++#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100
2077 + #define MACB_CAPS_FIFO_MODE 0x10000000
2078 + #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
2079 + #define MACB_CAPS_SG_DISABLED 0x40000000
2080 +@@ -1214,6 +1215,8 @@ struct macb {
2081 +
2082 + int rx_bd_rd_prefetch;
2083 + int tx_bd_rd_prefetch;
2084 ++
2085 ++ u32 rx_intr_mask;
2086 + };
2087 +
2088 + #ifdef CONFIG_MACB_USE_HWSTAMP
2089 +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
2090 +index 8f4b2f9a8e07..8abea1c3844f 100644
2091 +--- a/drivers/net/ethernet/cadence/macb_main.c
2092 ++++ b/drivers/net/ethernet/cadence/macb_main.c
2093 +@@ -56,8 +56,7 @@
2094 + /* level of occupied TX descriptors under which we wake up TX process */
2095 + #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
2096 +
2097 +-#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
2098 +- | MACB_BIT(ISR_ROVR))
2099 ++#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR))
2100 + #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
2101 + | MACB_BIT(ISR_RLE) \
2102 + | MACB_BIT(TXERR))
2103 +@@ -1271,7 +1270,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
2104 + queue_writel(queue, ISR, MACB_BIT(RCOMP));
2105 + napi_reschedule(napi);
2106 + } else {
2107 +- queue_writel(queue, IER, MACB_RX_INT_FLAGS);
2108 ++ queue_writel(queue, IER, bp->rx_intr_mask);
2109 + }
2110 + }
2111 +
2112 +@@ -1289,7 +1288,7 @@ static void macb_hresp_error_task(unsigned long data)
2113 + u32 ctrl;
2114 +
2115 + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2116 +- queue_writel(queue, IDR, MACB_RX_INT_FLAGS |
2117 ++ queue_writel(queue, IDR, bp->rx_intr_mask |
2118 + MACB_TX_INT_FLAGS |
2119 + MACB_BIT(HRESP));
2120 + }
2121 +@@ -1319,7 +1318,7 @@ static void macb_hresp_error_task(unsigned long data)
2122 +
2123 + /* Enable interrupts */
2124 + queue_writel(queue, IER,
2125 +- MACB_RX_INT_FLAGS |
2126 ++ bp->rx_intr_mask |
2127 + MACB_TX_INT_FLAGS |
2128 + MACB_BIT(HRESP));
2129 + }
2130 +@@ -1373,14 +1372,14 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
2131 + (unsigned int)(queue - bp->queues),
2132 + (unsigned long)status);
2133 +
2134 +- if (status & MACB_RX_INT_FLAGS) {
2135 ++ if (status & bp->rx_intr_mask) {
2136 + /* There's no point taking any more interrupts
2137 + * until we have processed the buffers. The
2138 + * scheduling call may fail if the poll routine
2139 + * is already scheduled, so disable interrupts
2140 + * now.
2141 + */
2142 +- queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
2143 ++ queue_writel(queue, IDR, bp->rx_intr_mask);
2144 + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
2145 + queue_writel(queue, ISR, MACB_BIT(RCOMP));
2146 +
2147 +@@ -1413,8 +1412,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
2148 + /* There is a hardware issue under heavy load where DMA can
2149 + * stop, this causes endless "used buffer descriptor read"
2150 + * interrupts but it can be cleared by re-enabling RX. See
2151 +- * the at91 manual, section 41.3.1 or the Zynq manual
2152 +- * section 16.7.4 for details.
2153 ++ * the at91rm9200 manual, section 41.3.1 or the Zynq manual
2154 ++ * section 16.7.4 for details. RXUBR is only enabled for
2155 ++ * these two versions.
2156 + */
2157 + if (status & MACB_BIT(RXUBR)) {
2158 + ctrl = macb_readl(bp, NCR);
2159 +@@ -2264,7 +2264,7 @@ static void macb_init_hw(struct macb *bp)
2160 +
2161 + /* Enable interrupts */
2162 + queue_writel(queue, IER,
2163 +- MACB_RX_INT_FLAGS |
2164 ++ bp->rx_intr_mask |
2165 + MACB_TX_INT_FLAGS |
2166 + MACB_BIT(HRESP));
2167 + }
2168 +@@ -3912,6 +3912,7 @@ static const struct macb_config sama5d4_config = {
2169 + };
2170 +
2171 + static const struct macb_config emac_config = {
2172 ++ .caps = MACB_CAPS_NEEDS_RSTONUBR,
2173 + .clk_init = at91ether_clk_init,
2174 + .init = at91ether_init,
2175 + };
2176 +@@ -3933,7 +3934,8 @@ static const struct macb_config zynqmp_config = {
2177 + };
2178 +
2179 + static const struct macb_config zynq_config = {
2180 +- .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
2181 ++ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF |
2182 ++ MACB_CAPS_NEEDS_RSTONUBR,
2183 + .dma_burst_length = 16,
2184 + .clk_init = macb_clk_init,
2185 + .init = macb_init,
2186 +@@ -4088,6 +4090,10 @@ static int macb_probe(struct platform_device *pdev)
2187 + macb_dma_desc_get_size(bp);
2188 + }
2189 +
2190 ++ bp->rx_intr_mask = MACB_RX_INT_FLAGS;
2191 ++ if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
2192 ++ bp->rx_intr_mask |= MACB_BIT(RXUBR);
2193 ++
2194 + mac = of_get_mac_address(np);
2195 + if (mac) {
2196 + ether_addr_copy(bp->dev->dev_addr, mac);
2197 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
2198 +index 6242249c9f4c..b043370c2685 100644
2199 +--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
2200 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
2201 +@@ -2419,6 +2419,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
2202 + out_notify_fail:
2203 + (void)cancel_work_sync(&priv->service_task);
2204 + out_read_prop_fail:
2205 ++ /* safe for ACPI FW */
2206 ++ of_node_put(to_of_node(priv->fwnode));
2207 + free_netdev(ndev);
2208 + return ret;
2209 + }
2210 +@@ -2448,6 +2450,9 @@ static int hns_nic_dev_remove(struct platform_device *pdev)
2211 + set_bit(NIC_STATE_REMOVING, &priv->state);
2212 + (void)cancel_work_sync(&priv->service_task);
2213 +
2214 ++ /* safe for ACPI FW */
2215 ++ of_node_put(to_of_node(priv->fwnode));
2216 ++
2217 + free_netdev(ndev);
2218 + return 0;
2219 + }
2220 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
2221 +index 774beda040a1..e2710ff48fb0 100644
2222 +--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
2223 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
2224 +@@ -1157,16 +1157,18 @@ static int hns_get_regs_len(struct net_device *net_dev)
2225 + */
2226 + static int hns_nic_nway_reset(struct net_device *netdev)
2227 + {
2228 +- int ret = 0;
2229 + struct phy_device *phy = netdev->phydev;
2230 +
2231 +- if (netif_running(netdev)) {
2232 +- /* if autoneg is disabled, don't restart auto-negotiation */
2233 +- if (phy && phy->autoneg == AUTONEG_ENABLE)
2234 +- ret = genphy_restart_aneg(phy);
2235 +- }
2236 ++ if (!netif_running(netdev))
2237 ++ return 0;
2238 +
2239 +- return ret;
2240 ++ if (!phy)
2241 ++ return -EOPNOTSUPP;
2242 ++
2243 ++ if (phy->autoneg != AUTONEG_ENABLE)
2244 ++ return -EINVAL;
2245 ++
2246 ++ return genphy_restart_aneg(phy);
2247 + }
2248 +
2249 + static u32
2250 +diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
2251 +index 017e08452d8c..baf5cc251f32 100644
2252 +--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
2253 ++++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
2254 +@@ -321,7 +321,7 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
2255 + }
2256 +
2257 + hns_mdio_cmd_write(mdio_dev, is_c45,
2258 +- MDIO_C45_WRITE_ADDR, phy_id, devad);
2259 ++ MDIO_C45_READ, phy_id, devad);
2260 + }
2261 +
2262 + /* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/
2263 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
2264 +index 2f69ee9221c6..4dd82a1612aa 100644
2265 +--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
2266 ++++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
2267 +@@ -473,19 +473,19 @@ static void qed_init_qm_pq(struct qed_hwfn *p_hwfn,
2268 +
2269 + /* get pq index according to PQ_FLAGS */
2270 + static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
2271 +- u32 pq_flags)
2272 ++ unsigned long pq_flags)
2273 + {
2274 + struct qed_qm_info *qm_info = &p_hwfn->qm_info;
2275 +
2276 + /* Can't have multiple flags set here */
2277 +- if (bitmap_weight((unsigned long *)&pq_flags,
2278 ++ if (bitmap_weight(&pq_flags,
2279 + sizeof(pq_flags) * BITS_PER_BYTE) > 1) {
2280 +- DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags);
2281 ++ DP_ERR(p_hwfn, "requested multiple pq flags 0x%lx\n", pq_flags);
2282 + goto err;
2283 + }
2284 +
2285 + if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) {
2286 +- DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags);
2287 ++ DP_ERR(p_hwfn, "pq flag 0x%lx is not set\n", pq_flags);
2288 + goto err;
2289 + }
2290 +
2291 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
2292 +index 67c02ea93906..64ac95ca4df2 100644
2293 +--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
2294 ++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
2295 +@@ -609,6 +609,10 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
2296 + (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
2297 + !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
2298 +
2299 ++ SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
2300 ++ (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
2301 ++ !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
2302 ++
2303 + SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
2304 + !!(accept_filter & QED_ACCEPT_BCAST));
2305 +
2306 +@@ -744,6 +748,11 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
2307 + return rc;
2308 + }
2309 +
2310 ++ if (p_params->update_ctl_frame_check) {
2311 ++ p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en;
2312 ++ p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en;
2313 ++ }
2314 ++
2315 + /* Update mcast bins for VFs, PF doesn't use this functionality */
2316 + qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
2317 +
2318 +@@ -2207,7 +2216,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
2319 + u16 num_queues = 0;
2320 +
2321 + /* Since the feature controls only queue-zones,
2322 +- * make sure we have the contexts [rx, tx, xdp] to
2323 ++ * make sure we have the contexts [rx, xdp, tcs] to
2324 + * match.
2325 + */
2326 + for_each_hwfn(cdev, i) {
2327 +@@ -2217,7 +2226,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
2328 + u16 cids;
2329 +
2330 + cids = hwfn->pf_params.eth_pf_params.num_cons;
2331 +- num_queues += min_t(u16, l2_queues, cids / 3);
2332 ++ cids /= (2 + info->num_tc);
2333 ++ num_queues += min_t(u16, l2_queues, cids);
2334 + }
2335 +
2336 + /* queues might theoretically be >256, but interrupts'
2337 +@@ -2688,7 +2698,8 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
2338 + if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
2339 + accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
2340 + QED_ACCEPT_MCAST_UNMATCHED;
2341 +- accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2342 ++ accept_flags.tx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
2343 ++ QED_ACCEPT_MCAST_UNMATCHED;
2344 + } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
2345 + accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2346 + accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2347 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
2348 +index 8d80f1095d17..7127d5aaac42 100644
2349 +--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
2350 ++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
2351 +@@ -219,6 +219,9 @@ struct qed_sp_vport_update_params {
2352 + struct qed_rss_params *rss_params;
2353 + struct qed_filter_accept_flags accept_flags;
2354 + struct qed_sge_tpa_params *sge_tpa_params;
2355 ++ u8 update_ctl_frame_check;
2356 ++ u8 mac_chk_en;
2357 ++ u8 ethtype_chk_en;
2358 + };
2359 +
2360 + int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
2361 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
2362 +index 92cd8abeb41d..015de1e0addd 100644
2363 +--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
2364 ++++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
2365 +@@ -2430,19 +2430,24 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
2366 + {
2367 + struct qed_ll2_tx_pkt_info pkt;
2368 + const skb_frag_t *frag;
2369 ++ u8 flags = 0, nr_frags;
2370 + int rc = -EINVAL, i;
2371 + dma_addr_t mapping;
2372 + u16 vlan = 0;
2373 +- u8 flags = 0;
2374 +
2375 + if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
2376 + DP_INFO(cdev, "Cannot transmit a checksummed packet\n");
2377 + return -EINVAL;
2378 + }
2379 +
2380 +- if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
2381 ++ /* Cache number of fragments from SKB since SKB may be freed by
2382 ++ * the completion routine after calling qed_ll2_prepare_tx_packet()
2383 ++ */
2384 ++ nr_frags = skb_shinfo(skb)->nr_frags;
2385 ++
2386 ++ if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
2387 + DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
2388 +- 1 + skb_shinfo(skb)->nr_frags);
2389 ++ 1 + nr_frags);
2390 + return -EINVAL;
2391 + }
2392 +
2393 +@@ -2464,7 +2469,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
2394 + }
2395 +
2396 + memset(&pkt, 0, sizeof(pkt));
2397 +- pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags;
2398 ++ pkt.num_of_bds = 1 + nr_frags;
2399 + pkt.vlan = vlan;
2400 + pkt.bd_flags = flags;
2401 + pkt.tx_dest = QED_LL2_TX_DEST_NW;
2402 +@@ -2475,12 +2480,17 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
2403 + test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags))
2404 + pkt.remove_stag = true;
2405 +
2406 ++ /* qed_ll2_prepare_tx_packet() may actually send the packet if
2407 ++ * there are no fragments in the skb and subsequently the completion
2408 ++ * routine may run and free the SKB, so no dereferencing the SKB
2409 ++ * beyond this point unless skb has any fragments.
2410 ++ */
2411 + rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
2412 + &pkt, 1);
2413 + if (rc)
2414 + goto err;
2415 +
2416 +- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2417 ++ for (i = 0; i < nr_frags; i++) {
2418 + frag = &skb_shinfo(skb)->frags[i];
2419 +
2420 + mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
2421 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
2422 +index 3157c0d99441..dae2896e1d8e 100644
2423 +--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
2424 ++++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
2425 +@@ -380,6 +380,7 @@ void qed_consq_setup(struct qed_hwfn *p_hwfn);
2426 + * @param p_hwfn
2427 + */
2428 + void qed_consq_free(struct qed_hwfn *p_hwfn);
2429 ++int qed_spq_pend_post(struct qed_hwfn *p_hwfn);
2430 +
2431 + /**
2432 + * @file
2433 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
2434 +index 7106ad17afe2..a0ee847f379b 100644
2435 +--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
2436 ++++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
2437 +@@ -402,6 +402,11 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
2438 +
2439 + qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
2440 +
2441 ++ /* Attempt to post pending requests */
2442 ++ spin_lock_bh(&p_hwfn->p_spq->lock);
2443 ++ rc = qed_spq_pend_post(p_hwfn);
2444 ++ spin_unlock_bh(&p_hwfn->p_spq->lock);
2445 ++
2446 + return rc;
2447 + }
2448 +
2449 +@@ -745,7 +750,7 @@ static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
2450 + return 0;
2451 + }
2452 +
2453 +-static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
2454 ++int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
2455 + {
2456 + struct qed_spq *p_spq = p_hwfn->p_spq;
2457 + struct qed_spq_entry *p_ent = NULL;
2458 +@@ -883,7 +888,6 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
2459 + struct qed_spq_entry *p_ent = NULL;
2460 + struct qed_spq_entry *tmp;
2461 + struct qed_spq_entry *found = NULL;
2462 +- int rc;
2463 +
2464 + if (!p_hwfn)
2465 + return -EINVAL;
2466 +@@ -941,12 +945,7 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
2467 + */
2468 + qed_spq_return_entry(p_hwfn, found);
2469 +
2470 +- /* Attempt to post pending requests */
2471 +- spin_lock_bh(&p_spq->lock);
2472 +- rc = qed_spq_pend_post(p_hwfn);
2473 +- spin_unlock_bh(&p_spq->lock);
2474 +-
2475 +- return rc;
2476 ++ return 0;
2477 + }
2478 +
2479 + int qed_consq_alloc(struct qed_hwfn *p_hwfn)
2480 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
2481 +index ca6290fa0f30..71a7af134dd8 100644
2482 +--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
2483 ++++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
2484 +@@ -1969,7 +1969,9 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
2485 + params.vport_id = vf->vport_id;
2486 + params.max_buffers_per_cqe = start->max_buffers_per_cqe;
2487 + params.mtu = vf->mtu;
2488 +- params.check_mac = true;
2489 ++
2490 ++ /* Non trusted VFs should enable control frame filtering */
2491 ++ params.check_mac = !vf->p_vf_info.is_trusted_configured;
2492 +
2493 + rc = qed_sp_eth_vport_start(p_hwfn, &params);
2494 + if (rc) {
2495 +@@ -5130,6 +5132,9 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
2496 + params.opaque_fid = vf->opaque_fid;
2497 + params.vport_id = vf->vport_id;
2498 +
2499 ++ params.update_ctl_frame_check = 1;
2500 ++ params.mac_chk_en = !vf_info->is_trusted_configured;
2501 ++
2502 + if (vf_info->rx_accept_mode & mask) {
2503 + flags->update_rx_mode_config = 1;
2504 + flags->rx_accept_filter = vf_info->rx_accept_mode;
2505 +@@ -5147,7 +5152,8 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
2506 + }
2507 +
2508 + if (flags->update_rx_mode_config ||
2509 +- flags->update_tx_mode_config)
2510 ++ flags->update_tx_mode_config ||
2511 ++ params.update_ctl_frame_check)
2512 + qed_sp_vport_update(hwfn, &params,
2513 + QED_SPQ_MODE_EBLOCK, NULL);
2514 + }
2515 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
2516 +index be118d057b92..6ab3fb008139 100644
2517 +--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
2518 ++++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
2519 +@@ -261,6 +261,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
2520 + struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
2521 + struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
2522 + struct vf_pf_resc_request *p_resc;
2523 ++ u8 retry_cnt = VF_ACQUIRE_THRESH;
2524 + bool resources_acquired = false;
2525 + struct vfpf_acquire_tlv *req;
2526 + int rc = 0, attempts = 0;
2527 +@@ -314,6 +315,15 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
2528 +
2529 + /* send acquire request */
2530 + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
2531 ++
2532 ++ /* Re-try acquire in case of vf-pf hw channel timeout */
2533 ++ if (retry_cnt && rc == -EBUSY) {
2534 ++ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2535 ++ "VF retrying to acquire due to VPC timeout\n");
2536 ++ retry_cnt--;
2537 ++ continue;
2538 ++ }
2539 ++
2540 + if (rc)
2541 + goto exit;
2542 +
2543 +diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
2544 +index 6a4d266fb8e2..d242a5724069 100644
2545 +--- a/drivers/net/ethernet/qlogic/qede/qede.h
2546 ++++ b/drivers/net/ethernet/qlogic/qede/qede.h
2547 +@@ -489,6 +489,9 @@ struct qede_reload_args {
2548 +
2549 + /* Datapath functions definition */
2550 + netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
2551 ++u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
2552 ++ struct net_device *sb_dev,
2553 ++ select_queue_fallback_t fallback);
2554 + netdev_features_t qede_features_check(struct sk_buff *skb,
2555 + struct net_device *dev,
2556 + netdev_features_t features);
2557 +diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
2558 +index 1a78027de071..a96da16f3404 100644
2559 +--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
2560 ++++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
2561 +@@ -1695,6 +1695,19 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2562 + return NETDEV_TX_OK;
2563 + }
2564 +
2565 ++u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
2566 ++ struct net_device *sb_dev,
2567 ++ select_queue_fallback_t fallback)
2568 ++{
2569 ++ struct qede_dev *edev = netdev_priv(dev);
2570 ++ int total_txq;
2571 ++
2572 ++ total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc;
2573 ++
2574 ++ return QEDE_TSS_COUNT(edev) ?
2575 ++ fallback(dev, skb, NULL) % total_txq : 0;
2576 ++}
2577 ++
2578 + /* 8B udp header + 8B base tunnel header + 32B option length */
2579 + #define QEDE_MAX_TUN_HDR_LEN 48
2580 +
2581 +diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
2582 +index 46d0f2eaa0c0..f3d9c40c4115 100644
2583 +--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
2584 ++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
2585 +@@ -631,6 +631,7 @@ static const struct net_device_ops qede_netdev_ops = {
2586 + .ndo_open = qede_open,
2587 + .ndo_stop = qede_close,
2588 + .ndo_start_xmit = qede_start_xmit,
2589 ++ .ndo_select_queue = qede_select_queue,
2590 + .ndo_set_rx_mode = qede_set_rx_mode,
2591 + .ndo_set_mac_address = qede_set_mac_addr,
2592 + .ndo_validate_addr = eth_validate_addr,
2593 +@@ -666,6 +667,7 @@ static const struct net_device_ops qede_netdev_vf_ops = {
2594 + .ndo_open = qede_open,
2595 + .ndo_stop = qede_close,
2596 + .ndo_start_xmit = qede_start_xmit,
2597 ++ .ndo_select_queue = qede_select_queue,
2598 + .ndo_set_rx_mode = qede_set_rx_mode,
2599 + .ndo_set_mac_address = qede_set_mac_addr,
2600 + .ndo_validate_addr = eth_validate_addr,
2601 +@@ -684,6 +686,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = {
2602 + .ndo_open = qede_open,
2603 + .ndo_stop = qede_close,
2604 + .ndo_start_xmit = qede_start_xmit,
2605 ++ .ndo_select_queue = qede_select_queue,
2606 + .ndo_set_rx_mode = qede_set_rx_mode,
2607 + .ndo_set_mac_address = qede_set_mac_addr,
2608 + .ndo_validate_addr = eth_validate_addr,
2609 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
2610 +index 7b923362ee55..3b174eae77c1 100644
2611 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
2612 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
2613 +@@ -1342,8 +1342,10 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
2614 + }
2615 +
2616 + ret = phy_power_on(bsp_priv, true);
2617 +- if (ret)
2618 ++ if (ret) {
2619 ++ gmac_clk_enable(bsp_priv, false);
2620 + return ret;
2621 ++ }
2622 +
2623 + pm_runtime_enable(dev);
2624 + pm_runtime_get_sync(dev);
2625 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
2626 +index 9caf79ba5ef1..4d5fb4b51cc4 100644
2627 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
2628 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
2629 +@@ -719,8 +719,11 @@ static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
2630 + {
2631 + unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
2632 +
2633 +- if (!clk)
2634 +- return 0;
2635 ++ if (!clk) {
2636 ++ clk = priv->plat->clk_ref_rate;
2637 ++ if (!clk)
2638 ++ return 0;
2639 ++ }
2640 +
2641 + return (usec * (clk / 1000000)) / 256;
2642 + }
2643 +@@ -729,8 +732,11 @@ static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv)
2644 + {
2645 + unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
2646 +
2647 +- if (!clk)
2648 +- return 0;
2649 ++ if (!clk) {
2650 ++ clk = priv->plat->clk_ref_rate;
2651 ++ if (!clk)
2652 ++ return 0;
2653 ++ }
2654 +
2655 + return (riwt * 256) / (clk / 1000000);
2656 + }
2657 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2658 +index 123b74e25ed8..43ab9e905bed 100644
2659 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2660 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2661 +@@ -3028,10 +3028,22 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2662 +
2663 + tx_q = &priv->tx_queue[queue];
2664 +
2665 ++ if (priv->tx_path_in_lpi_mode)
2666 ++ stmmac_disable_eee_mode(priv);
2667 ++
2668 + /* Manage oversized TCP frames for GMAC4 device */
2669 + if (skb_is_gso(skb) && priv->tso) {
2670 +- if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
2671 ++ if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
2672 ++ /*
2673 ++ * There is no way to determine the number of TSO
2674 ++ * capable Queues. Let's use always the Queue 0
2675 ++ * because if TSO is supported then at least this
2676 ++ * one will be capable.
2677 ++ */
2678 ++ skb_set_queue_mapping(skb, 0);
2679 ++
2680 + return stmmac_tso_xmit(skb, dev);
2681 ++ }
2682 + }
2683 +
2684 + if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
2685 +@@ -3046,9 +3058,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2686 + return NETDEV_TX_BUSY;
2687 + }
2688 +
2689 +- if (priv->tx_path_in_lpi_mode)
2690 +- stmmac_disable_eee_mode(priv);
2691 +-
2692 + entry = tx_q->cur_tx;
2693 + first_entry = entry;
2694 + WARN_ON(tx_q->tx_skbuff[first_entry]);
2695 +diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
2696 +index c070a9e51ebf..fae572b38416 100644
2697 +--- a/drivers/net/wireless/ath/ath9k/init.c
2698 ++++ b/drivers/net/wireless/ath/ath9k/init.c
2699 +@@ -636,15 +636,15 @@ static int ath9k_of_init(struct ath_softc *sc)
2700 + ret = ath9k_eeprom_request(sc, eeprom_name);
2701 + if (ret)
2702 + return ret;
2703 ++
2704 ++ ah->ah_flags &= ~AH_USE_EEPROM;
2705 ++ ah->ah_flags |= AH_NO_EEP_SWAP;
2706 + }
2707 +
2708 + mac = of_get_mac_address(np);
2709 + if (mac)
2710 + ether_addr_copy(common->macaddr, mac);
2711 +
2712 +- ah->ah_flags &= ~AH_USE_EEPROM;
2713 +- ah->ah_flags |= AH_NO_EEP_SWAP;
2714 +-
2715 + return 0;
2716 + }
2717 +
2718 +diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
2719 +index 750bea3574ee..627df164b7b6 100644
2720 +--- a/drivers/net/wireless/ti/wlcore/sdio.c
2721 ++++ b/drivers/net/wireless/ti/wlcore/sdio.c
2722 +@@ -164,6 +164,12 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
2723 + }
2724 +
2725 + sdio_claim_host(func);
2726 ++ /*
2727 ++ * To guarantee that the SDIO card is power cycled, as required to make
2728 ++ * the FW programming to succeed, let's do a brute force HW reset.
2729 ++ */
2730 ++ mmc_hw_reset(card->host);
2731 ++
2732 + sdio_enable_func(func);
2733 + sdio_release_host(func);
2734 +
2735 +@@ -174,20 +180,13 @@ static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue)
2736 + {
2737 + struct sdio_func *func = dev_to_sdio_func(glue->dev);
2738 + struct mmc_card *card = func->card;
2739 +- int error;
2740 +
2741 + sdio_claim_host(func);
2742 + sdio_disable_func(func);
2743 + sdio_release_host(func);
2744 +
2745 + /* Let runtime PM know the card is powered off */
2746 +- error = pm_runtime_put(&card->dev);
2747 +- if (error < 0 && error != -EBUSY) {
2748 +- dev_err(&card->dev, "%s failed: %i\n", __func__, error);
2749 +-
2750 +- return error;
2751 +- }
2752 +-
2753 ++ pm_runtime_put(&card->dev);
2754 + return 0;
2755 + }
2756 +
2757 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
2758 +index e0d2b7473901..2cdb3032ca0f 100644
2759 +--- a/drivers/nvme/host/core.c
2760 ++++ b/drivers/nvme/host/core.c
2761 +@@ -1182,6 +1182,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
2762 + * effects say only one namespace is affected.
2763 + */
2764 + if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
2765 ++ mutex_lock(&ctrl->scan_lock);
2766 + nvme_start_freeze(ctrl);
2767 + nvme_wait_freeze(ctrl);
2768 + }
2769 +@@ -1210,8 +1211,10 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
2770 + */
2771 + if (effects & NVME_CMD_EFFECTS_LBCC)
2772 + nvme_update_formats(ctrl);
2773 +- if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK))
2774 ++ if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
2775 + nvme_unfreeze(ctrl);
2776 ++ mutex_unlock(&ctrl->scan_lock);
2777 ++ }
2778 + if (effects & NVME_CMD_EFFECTS_CCC)
2779 + nvme_init_identify(ctrl);
2780 + if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC))
2781 +@@ -3292,6 +3295,7 @@ static void nvme_scan_work(struct work_struct *work)
2782 + if (nvme_identify_ctrl(ctrl, &id))
2783 + return;
2784 +
2785 ++ mutex_lock(&ctrl->scan_lock);
2786 + nn = le32_to_cpu(id->nn);
2787 + if (ctrl->vs >= NVME_VS(1, 1, 0) &&
2788 + !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
2789 +@@ -3300,6 +3304,7 @@ static void nvme_scan_work(struct work_struct *work)
2790 + }
2791 + nvme_scan_ns_sequential(ctrl, nn);
2792 + out_free_id:
2793 ++ mutex_unlock(&ctrl->scan_lock);
2794 + kfree(id);
2795 + down_write(&ctrl->namespaces_rwsem);
2796 + list_sort(NULL, &ctrl->namespaces, ns_cmp);
2797 +@@ -3535,6 +3540,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
2798 +
2799 + ctrl->state = NVME_CTRL_NEW;
2800 + spin_lock_init(&ctrl->lock);
2801 ++ mutex_init(&ctrl->scan_lock);
2802 + INIT_LIST_HEAD(&ctrl->namespaces);
2803 + init_rwsem(&ctrl->namespaces_rwsem);
2804 + ctrl->dev = dev;
2805 +diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
2806 +index 60220de2db52..e82cdaec81c9 100644
2807 +--- a/drivers/nvme/host/nvme.h
2808 ++++ b/drivers/nvme/host/nvme.h
2809 +@@ -148,6 +148,7 @@ struct nvme_ctrl {
2810 + enum nvme_ctrl_state state;
2811 + bool identified;
2812 + spinlock_t lock;
2813 ++ struct mutex scan_lock;
2814 + const struct nvme_ctrl_ops *ops;
2815 + struct request_queue *admin_q;
2816 + struct request_queue *connect_q;
2817 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
2818 +index f46313f441ec..7b9ef8e734e7 100644
2819 +--- a/drivers/nvme/host/pci.c
2820 ++++ b/drivers/nvme/host/pci.c
2821 +@@ -2260,27 +2260,18 @@ static void nvme_reset_work(struct work_struct *work)
2822 + if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
2823 + nvme_dev_disable(dev, false);
2824 +
2825 +- /*
2826 +- * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
2827 +- * initializing procedure here.
2828 +- */
2829 +- if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
2830 +- dev_warn(dev->ctrl.device,
2831 +- "failed to mark controller CONNECTING\n");
2832 +- goto out;
2833 +- }
2834 +-
2835 ++ mutex_lock(&dev->shutdown_lock);
2836 + result = nvme_pci_enable(dev);
2837 + if (result)
2838 +- goto out;
2839 ++ goto out_unlock;
2840 +
2841 + result = nvme_pci_configure_admin_queue(dev);
2842 + if (result)
2843 +- goto out;
2844 ++ goto out_unlock;
2845 +
2846 + result = nvme_alloc_admin_tags(dev);
2847 + if (result)
2848 +- goto out;
2849 ++ goto out_unlock;
2850 +
2851 + /*
2852 + * Limit the max command size to prevent iod->sg allocations going
2853 +@@ -2288,6 +2279,17 @@ static void nvme_reset_work(struct work_struct *work)
2854 + */
2855 + dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
2856 + dev->ctrl.max_segments = NVME_MAX_SEGS;
2857 ++ mutex_unlock(&dev->shutdown_lock);
2858 ++
2859 ++ /*
2860 ++ * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
2861 ++ * initializing procedure here.
2862 ++ */
2863 ++ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
2864 ++ dev_warn(dev->ctrl.device,
2865 ++ "failed to mark controller CONNECTING\n");
2866 ++ goto out;
2867 ++ }
2868 +
2869 + result = nvme_init_identify(&dev->ctrl);
2870 + if (result)
2871 +@@ -2352,6 +2354,8 @@ static void nvme_reset_work(struct work_struct *work)
2872 + nvme_start_ctrl(&dev->ctrl);
2873 + return;
2874 +
2875 ++ out_unlock:
2876 ++ mutex_unlock(&dev->shutdown_lock);
2877 + out:
2878 + nvme_remove_dead_ctrl(dev, result);
2879 + }
2880 +diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
2881 +index cf73a403d22d..cecbce21d01f 100644
2882 +--- a/drivers/pinctrl/pinctrl-mcp23s08.c
2883 ++++ b/drivers/pinctrl/pinctrl-mcp23s08.c
2884 +@@ -832,8 +832,13 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
2885 + break;
2886 +
2887 + case MCP_TYPE_S18:
2888 ++ one_regmap_config =
2889 ++ devm_kmemdup(dev, &mcp23x17_regmap,
2890 ++ sizeof(struct regmap_config), GFP_KERNEL);
2891 ++ if (!one_regmap_config)
2892 ++ return -ENOMEM;
2893 + mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp,
2894 +- &mcp23x17_regmap);
2895 ++ one_regmap_config);
2896 + mcp->reg_shift = 1;
2897 + mcp->chip.ngpio = 16;
2898 + mcp->chip.label = "mcp23s18";
2899 +diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
2900 +index 0c1aa6c314f5..7563c07e14e4 100644
2901 +--- a/drivers/platform/x86/Kconfig
2902 ++++ b/drivers/platform/x86/Kconfig
2903 +@@ -856,6 +856,7 @@ config TOSHIBA_WMI
2904 + config ACPI_CMPC
2905 + tristate "CMPC Laptop Extras"
2906 + depends on ACPI && INPUT
2907 ++ depends on BACKLIGHT_LCD_SUPPORT
2908 + depends on RFKILL || RFKILL=n
2909 + select BACKLIGHT_CLASS_DEVICE
2910 + help
2911 +@@ -1077,6 +1078,7 @@ config INTEL_OAKTRAIL
2912 + config SAMSUNG_Q10
2913 + tristate "Samsung Q10 Extras"
2914 + depends on ACPI
2915 ++ depends on BACKLIGHT_LCD_SUPPORT
2916 + select BACKLIGHT_CLASS_DEVICE
2917 + ---help---
2918 + This driver provides support for backlight control on Samsung Q10
2919 +diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
2920 +index 970654fcc48d..2d1f6a583641 100644
2921 +--- a/drivers/s390/net/qeth_core.h
2922 ++++ b/drivers/s390/net/qeth_core.h
2923 +@@ -22,6 +22,7 @@
2924 + #include <linux/hashtable.h>
2925 + #include <linux/ip.h>
2926 + #include <linux/refcount.h>
2927 ++#include <linux/workqueue.h>
2928 +
2929 + #include <net/ipv6.h>
2930 + #include <net/if_inet6.h>
2931 +diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
2932 +index b03515d43745..56aacf32f71b 100644
2933 +--- a/drivers/s390/net/qeth_core_main.c
2934 ++++ b/drivers/s390/net/qeth_core_main.c
2935 +@@ -565,6 +565,7 @@ static int __qeth_issue_next_read(struct qeth_card *card)
2936 + QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! "
2937 + "rc=%i\n", dev_name(&card->gdev->dev), rc);
2938 + atomic_set(&channel->irq_pending, 0);
2939 ++ qeth_release_buffer(channel, iob);
2940 + card->read_or_write_problem = 1;
2941 + qeth_schedule_recovery(card);
2942 + wake_up(&card->wait_q);
2943 +@@ -1187,6 +1188,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
2944 + rc = qeth_get_problem(cdev, irb);
2945 + if (rc) {
2946 + card->read_or_write_problem = 1;
2947 ++ if (iob)
2948 ++ qeth_release_buffer(iob->channel, iob);
2949 + qeth_clear_ipacmd_list(card);
2950 + qeth_schedule_recovery(card);
2951 + goto out;
2952 +@@ -1852,6 +1855,7 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
2953 + QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
2954 + QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
2955 + atomic_set(&channel->irq_pending, 0);
2956 ++ qeth_release_buffer(channel, iob);
2957 + wake_up(&card->wait_q);
2958 + return rc;
2959 + }
2960 +@@ -1923,6 +1927,7 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
2961 + rc);
2962 + QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
2963 + atomic_set(&channel->irq_pending, 0);
2964 ++ qeth_release_buffer(channel, iob);
2965 + wake_up(&card->wait_q);
2966 + return rc;
2967 + }
2968 +@@ -2110,6 +2115,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
2969 + }
2970 + reply = qeth_alloc_reply(card);
2971 + if (!reply) {
2972 ++ qeth_release_buffer(channel, iob);
2973 + return -ENOMEM;
2974 + }
2975 + reply->callback = reply_cb;
2976 +@@ -2448,11 +2454,12 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
2977 + return 0;
2978 + }
2979 +
2980 +-static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q)
2981 ++static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2982 + {
2983 + if (!q)
2984 + return;
2985 +
2986 ++ qeth_clear_outq_buffers(q, 1);
2987 + qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2988 + kfree(q);
2989 + }
2990 +@@ -2526,10 +2533,8 @@ out_freeoutqbufs:
2991 + card->qdio.out_qs[i]->bufs[j] = NULL;
2992 + }
2993 + out_freeoutq:
2994 +- while (i > 0) {
2995 +- qeth_free_qdio_out_buf(card->qdio.out_qs[--i]);
2996 +- qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
2997 +- }
2998 ++ while (i > 0)
2999 ++ qeth_free_output_queue(card->qdio.out_qs[--i]);
3000 + kfree(card->qdio.out_qs);
3001 + card->qdio.out_qs = NULL;
3002 + out_freepool:
3003 +@@ -2562,10 +2567,8 @@ static void qeth_free_qdio_buffers(struct qeth_card *card)
3004 + qeth_free_buffer_pool(card);
3005 + /* free outbound qdio_qs */
3006 + if (card->qdio.out_qs) {
3007 +- for (i = 0; i < card->qdio.no_out_queues; ++i) {
3008 +- qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
3009 +- qeth_free_qdio_out_buf(card->qdio.out_qs[i]);
3010 +- }
3011 ++ for (i = 0; i < card->qdio.no_out_queues; i++)
3012 ++ qeth_free_output_queue(card->qdio.out_qs[i]);
3013 + kfree(card->qdio.out_qs);
3014 + card->qdio.out_qs = NULL;
3015 + }
3016 +diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
3017 +index 76b2fba5fba2..b7513c5848cf 100644
3018 +--- a/drivers/s390/net/qeth_l2_main.c
3019 ++++ b/drivers/s390/net/qeth_l2_main.c
3020 +@@ -854,6 +854,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
3021 +
3022 + if (cgdev->state == CCWGROUP_ONLINE)
3023 + qeth_l2_set_offline(cgdev);
3024 ++
3025 ++ cancel_work_sync(&card->close_dev_work);
3026 + if (qeth_netdev_is_registered(card->dev))
3027 + unregister_netdev(card->dev);
3028 + }
3029 +diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
3030 +index b7f6a8384543..7f71ca0d08e7 100644
3031 +--- a/drivers/s390/net/qeth_l3_main.c
3032 ++++ b/drivers/s390/net/qeth_l3_main.c
3033 +@@ -2611,6 +2611,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
3034 + if (cgdev->state == CCWGROUP_ONLINE)
3035 + qeth_l3_set_offline(cgdev);
3036 +
3037 ++ cancel_work_sync(&card->close_dev_work);
3038 + if (qeth_netdev_is_registered(card->dev))
3039 + unregister_netdev(card->dev);
3040 + qeth_l3_clear_ip_htable(card, 0);
3041 +diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
3042 +index 6be77b3aa8a5..ac79f2088b31 100644
3043 +--- a/drivers/scsi/53c700.c
3044 ++++ b/drivers/scsi/53c700.c
3045 +@@ -295,7 +295,7 @@ NCR_700_detect(struct scsi_host_template *tpnt,
3046 + if(tpnt->sdev_attrs == NULL)
3047 + tpnt->sdev_attrs = NCR_700_dev_attrs;
3048 +
3049 +- memory = dma_alloc_attrs(hostdata->dev, TOTAL_MEM_SIZE, &pScript,
3050 ++ memory = dma_alloc_attrs(dev, TOTAL_MEM_SIZE, &pScript,
3051 + GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
3052 + if(memory == NULL) {
3053 + printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n");
3054 +diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
3055 +index 6e1b022a823d..3236240a4edd 100644
3056 +--- a/drivers/scsi/aacraid/commsup.c
3057 ++++ b/drivers/scsi/aacraid/commsup.c
3058 +@@ -1304,8 +1304,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
3059 + ADD : DELETE;
3060 + break;
3061 + }
3062 +- case AifBuManagerEvent:
3063 +- aac_handle_aif_bu(dev, aifcmd);
3064 ++ break;
3065 ++ case AifBuManagerEvent:
3066 ++ aac_handle_aif_bu(dev, aifcmd);
3067 + break;
3068 + }
3069 +
3070 +diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
3071 +index 350257c13a5b..bc9f2a2365f4 100644
3072 +--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
3073 ++++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
3074 +@@ -240,6 +240,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
3075 + return NULL;
3076 + }
3077 +
3078 ++ cmgr->hba = hba;
3079 + cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list),
3080 + GFP_KERNEL);
3081 + if (!cmgr->free_list) {
3082 +@@ -256,7 +257,6 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
3083 + goto mem_err;
3084 + }
3085 +
3086 +- cmgr->hba = hba;
3087 + cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
3088 +
3089 + for (i = 0; i < arr_sz; i++) {
3090 +@@ -295,7 +295,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
3091 +
3092 + /* Allocate pool of io_bdts - one for each bnx2fc_cmd */
3093 + mem_size = num_ios * sizeof(struct io_bdt *);
3094 +- cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL);
3095 ++ cmgr->io_bdt_pool = kzalloc(mem_size, GFP_KERNEL);
3096 + if (!cmgr->io_bdt_pool) {
3097 + printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
3098 + goto mem_err;
3099 +diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
3100 +index be83590ed955..ff943f477d6f 100644
3101 +--- a/drivers/scsi/libfc/fc_lport.c
3102 ++++ b/drivers/scsi/libfc/fc_lport.c
3103 +@@ -1726,14 +1726,14 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
3104 + fc_frame_payload_op(fp) != ELS_LS_ACC) {
3105 + FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n");
3106 + fc_lport_error(lport, fp);
3107 +- goto err;
3108 ++ goto out;
3109 + }
3110 +
3111 + flp = fc_frame_payload_get(fp, sizeof(*flp));
3112 + if (!flp) {
3113 + FC_LPORT_DBG(lport, "FLOGI bad response\n");
3114 + fc_lport_error(lport, fp);
3115 +- goto err;
3116 ++ goto out;
3117 + }
3118 +
3119 + mfs = ntohs(flp->fl_csp.sp_bb_data) &
3120 +@@ -1743,7 +1743,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
3121 + FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
3122 + "lport->mfs:%hu\n", mfs, lport->mfs);
3123 + fc_lport_error(lport, fp);
3124 +- goto err;
3125 ++ goto out;
3126 + }
3127 +
3128 + if (mfs <= lport->mfs) {
3129 +diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
3130 +index 372387a450df..1797e47fab38 100644
3131 +--- a/drivers/scsi/libfc/fc_rport.c
3132 ++++ b/drivers/scsi/libfc/fc_rport.c
3133 +@@ -184,7 +184,6 @@ void fc_rport_destroy(struct kref *kref)
3134 + struct fc_rport_priv *rdata;
3135 +
3136 + rdata = container_of(kref, struct fc_rport_priv, kref);
3137 +- WARN_ON(!list_empty(&rdata->peers));
3138 + kfree_rcu(rdata, rcu);
3139 + }
3140 + EXPORT_SYMBOL(fc_rport_destroy);
3141 +diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
3142 +index 60bcc6df97a9..65305b3848bc 100644
3143 +--- a/drivers/scsi/scsi_debug.c
3144 ++++ b/drivers/scsi/scsi_debug.c
3145 +@@ -62,7 +62,7 @@
3146 +
3147 + /* make sure inq_product_rev string corresponds to this version */
3148 + #define SDEBUG_VERSION "0188" /* format to fit INQUIRY revision field */
3149 +-static const char *sdebug_version_date = "20180128";
3150 ++static const char *sdebug_version_date = "20190125";
3151 +
3152 + #define MY_NAME "scsi_debug"
3153 +
3154 +@@ -735,7 +735,7 @@ static inline bool scsi_debug_lbp(void)
3155 + (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
3156 + }
3157 +
3158 +-static void *fake_store(unsigned long long lba)
3159 ++static void *lba2fake_store(unsigned long long lba)
3160 + {
3161 + lba = do_div(lba, sdebug_store_sectors);
3162 +
3163 +@@ -2514,8 +2514,8 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
3164 + return ret;
3165 + }
3166 +
3167 +-/* If fake_store(lba,num) compares equal to arr(num), then copy top half of
3168 +- * arr into fake_store(lba,num) and return true. If comparison fails then
3169 ++/* If lba2fake_store(lba,num) compares equal to arr(num), then copy top half of
3170 ++ * arr into lba2fake_store(lba,num) and return true. If comparison fails then
3171 + * return false. */
3172 + static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
3173 + {
3174 +@@ -2643,7 +2643,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
3175 + if (sdt->app_tag == cpu_to_be16(0xffff))
3176 + continue;
3177 +
3178 +- ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
3179 ++ ret = dif_verify(sdt, lba2fake_store(sector), sector, ei_lba);
3180 + if (ret) {
3181 + dif_errors++;
3182 + return ret;
3183 +@@ -3261,10 +3261,12 @@ err_out:
3184 + static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3185 + u32 ei_lba, bool unmap, bool ndob)
3186 + {
3187 ++ int ret;
3188 + unsigned long iflags;
3189 + unsigned long long i;
3190 +- int ret;
3191 +- u64 lba_off;
3192 ++ u32 lb_size = sdebug_sector_size;
3193 ++ u64 block, lbaa;
3194 ++ u8 *fs1p;
3195 +
3196 + ret = check_device_access_params(scp, lba, num);
3197 + if (ret)
3198 +@@ -3276,31 +3278,30 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3199 + unmap_region(lba, num);
3200 + goto out;
3201 + }
3202 +-
3203 +- lba_off = lba * sdebug_sector_size;
3204 ++ lbaa = lba;
3205 ++ block = do_div(lbaa, sdebug_store_sectors);
3206 + /* if ndob then zero 1 logical block, else fetch 1 logical block */
3207 ++ fs1p = fake_storep + (block * lb_size);
3208 + if (ndob) {
3209 +- memset(fake_storep + lba_off, 0, sdebug_sector_size);
3210 ++ memset(fs1p, 0, lb_size);
3211 + ret = 0;
3212 + } else
3213 +- ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
3214 +- sdebug_sector_size);
3215 ++ ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3216 +
3217 + if (-1 == ret) {
3218 + write_unlock_irqrestore(&atomic_rw, iflags);
3219 + return DID_ERROR << 16;
3220 +- } else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size))
3221 ++ } else if (sdebug_verbose && !ndob && (ret < lb_size))
3222 + sdev_printk(KERN_INFO, scp->device,
3223 + "%s: %s: lb size=%u, IO sent=%d bytes\n",
3224 +- my_name, "write same",
3225 +- sdebug_sector_size, ret);
3226 ++ my_name, "write same", lb_size, ret);
3227 +
3228 + /* Copy first sector to remaining blocks */
3229 +- for (i = 1 ; i < num ; i++)
3230 +- memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
3231 +- fake_storep + lba_off,
3232 +- sdebug_sector_size);
3233 +-
3234 ++ for (i = 1 ; i < num ; i++) {
3235 ++ lbaa = lba + i;
3236 ++ block = do_div(lbaa, sdebug_store_sectors);
3237 ++ memmove(fake_storep + (block * lb_size), fs1p, lb_size);
3238 ++ }
3239 + if (scsi_debug_lbp())
3240 + map_region(lba, num);
3241 + out:
3242 +diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
3243 +index 8cc015183043..a4ac6073c555 100644
3244 +--- a/drivers/soc/fsl/qbman/qman.c
3245 ++++ b/drivers/soc/fsl/qbman/qman.c
3246 +@@ -1081,18 +1081,19 @@ static void qm_mr_process_task(struct work_struct *work);
3247 + static irqreturn_t portal_isr(int irq, void *ptr)
3248 + {
3249 + struct qman_portal *p = ptr;
3250 +-
3251 +- u32 clear = QM_DQAVAIL_MASK | p->irq_sources;
3252 + u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
3253 ++ u32 clear = 0;
3254 +
3255 + if (unlikely(!is))
3256 + return IRQ_NONE;
3257 +
3258 + /* DQRR-handling if it's interrupt-driven */
3259 +- if (is & QM_PIRQ_DQRI)
3260 ++ if (is & QM_PIRQ_DQRI) {
3261 + __poll_portal_fast(p, QMAN_POLL_LIMIT);
3262 ++ clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI;
3263 ++ }
3264 + /* Handling of anything else that's interrupt-driven */
3265 +- clear |= __poll_portal_slow(p, is);
3266 ++ clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW;
3267 + qm_out(&p->p, QM_REG_ISR, clear);
3268 + return IRQ_HANDLED;
3269 + }
3270 +diff --git a/drivers/staging/erofs/inode.c b/drivers/staging/erofs/inode.c
3271 +index 9e7815f55a17..7448744cc515 100644
3272 +--- a/drivers/staging/erofs/inode.c
3273 ++++ b/drivers/staging/erofs/inode.c
3274 +@@ -184,16 +184,16 @@ static int fill_inode(struct inode *inode, int isdir)
3275 + /* setup the new inode */
3276 + if (S_ISREG(inode->i_mode)) {
3277 + #ifdef CONFIG_EROFS_FS_XATTR
3278 +- if (vi->xattr_isize)
3279 +- inode->i_op = &erofs_generic_xattr_iops;
3280 ++ inode->i_op = &erofs_generic_xattr_iops;
3281 + #endif
3282 + inode->i_fop = &generic_ro_fops;
3283 + } else if (S_ISDIR(inode->i_mode)) {
3284 + inode->i_op =
3285 + #ifdef CONFIG_EROFS_FS_XATTR
3286 +- vi->xattr_isize ? &erofs_dir_xattr_iops :
3287 +-#endif
3288 ++ &erofs_dir_xattr_iops;
3289 ++#else
3290 + &erofs_dir_iops;
3291 ++#endif
3292 + inode->i_fop = &erofs_dir_fops;
3293 + } else if (S_ISLNK(inode->i_mode)) {
3294 + /* by default, page_get_link is used for symlink */
3295 +diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
3296 +index 9f44ed8f0023..c70f0c5237ea 100644
3297 +--- a/drivers/staging/erofs/internal.h
3298 ++++ b/drivers/staging/erofs/internal.h
3299 +@@ -327,12 +327,17 @@ static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid)
3300 + return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits);
3301 + }
3302 +
3303 +-#define inode_set_inited_xattr(inode) (EROFS_V(inode)->flags |= 1)
3304 +-#define inode_has_inited_xattr(inode) (EROFS_V(inode)->flags & 1)
3305 ++/* atomic flag definitions */
3306 ++#define EROFS_V_EA_INITED_BIT 0
3307 ++
3308 ++/* bitlock definitions (arranged in reverse order) */
3309 ++#define EROFS_V_BL_XATTR_BIT (BITS_PER_LONG - 1)
3310 +
3311 + struct erofs_vnode {
3312 + erofs_nid_t nid;
3313 +- unsigned int flags;
3314 ++
3315 ++ /* atomic flags (including bitlocks) */
3316 ++ unsigned long flags;
3317 +
3318 + unsigned char data_mapping_mode;
3319 + /* inline size in bytes */
3320 +@@ -485,8 +490,9 @@ struct erofs_map_blocks_iter {
3321 + };
3322 +
3323 +
3324 +-static inline struct page *erofs_get_inline_page(struct inode *inode,
3325 +- erofs_blk_t blkaddr)
3326 ++static inline struct page *
3327 ++erofs_get_inline_page(struct inode *inode,
3328 ++ erofs_blk_t blkaddr)
3329 + {
3330 + return erofs_get_meta_page(inode->i_sb,
3331 + blkaddr, S_ISDIR(inode->i_mode));
3332 +diff --git a/drivers/staging/erofs/namei.c b/drivers/staging/erofs/namei.c
3333 +index 546a47156101..023f64fa2c87 100644
3334 +--- a/drivers/staging/erofs/namei.c
3335 ++++ b/drivers/staging/erofs/namei.c
3336 +@@ -15,74 +15,77 @@
3337 +
3338 + #include <trace/events/erofs.h>
3339 +
3340 +-/* based on the value of qn->len is accurate */
3341 +-static inline int dirnamecmp(struct qstr *qn,
3342 +- struct qstr *qd, unsigned *matched)
3343 ++struct erofs_qstr {
3344 ++ const unsigned char *name;
3345 ++ const unsigned char *end;
3346 ++};
3347 ++
3348 ++/* based on the end of qn is accurate and it must have the trailing '\0' */
3349 ++static inline int dirnamecmp(const struct erofs_qstr *qn,
3350 ++ const struct erofs_qstr *qd,
3351 ++ unsigned int *matched)
3352 + {
3353 +- unsigned i = *matched, len = min(qn->len, qd->len);
3354 +-loop:
3355 +- if (unlikely(i >= len)) {
3356 +- *matched = i;
3357 +- if (qn->len < qd->len) {
3358 +- /*
3359 +- * actually (qn->len == qd->len)
3360 +- * when qd->name[i] == '\0'
3361 +- */
3362 +- return qd->name[i] == '\0' ? 0 : -1;
3363 ++ unsigned int i = *matched;
3364 ++
3365 ++ /*
3366 ++ * on-disk error, let's only BUG_ON in the debugging mode.
3367 ++ * otherwise, it will return 1 to just skip the invalid name
3368 ++ * and go on (in consideration of the lookup performance).
3369 ++ */
3370 ++ DBG_BUGON(qd->name > qd->end);
3371 ++
3372 ++ /* qd could not have trailing '\0' */
3373 ++ /* However it is absolutely safe if < qd->end */
3374 ++ while (qd->name + i < qd->end && qd->name[i] != '\0') {
3375 ++ if (qn->name[i] != qd->name[i]) {
3376 ++ *matched = i;
3377 ++ return qn->name[i] > qd->name[i] ? 1 : -1;
3378 + }
3379 +- return (qn->len > qd->len);
3380 ++ ++i;
3381 + }
3382 +-
3383 +- if (qn->name[i] != qd->name[i]) {
3384 +- *matched = i;
3385 +- return qn->name[i] > qd->name[i] ? 1 : -1;
3386 +- }
3387 +-
3388 +- ++i;
3389 +- goto loop;
3390 ++ *matched = i;
3391 ++ /* See comments in __d_alloc on the terminating NUL character */
3392 ++ return qn->name[i] == '\0' ? 0 : 1;
3393 + }
3394 +
3395 +-static struct erofs_dirent *find_target_dirent(
3396 +- struct qstr *name,
3397 +- u8 *data, int maxsize)
3398 ++#define nameoff_from_disk(off, sz) (le16_to_cpu(off) & ((sz) - 1))
3399 ++
3400 ++static struct erofs_dirent *find_target_dirent(struct erofs_qstr *name,
3401 ++ u8 *data,
3402 ++ unsigned int dirblksize,
3403 ++ const int ndirents)
3404 + {
3405 +- unsigned ndirents, head, back;
3406 +- unsigned startprfx, endprfx;
3407 ++ int head, back;
3408 ++ unsigned int startprfx, endprfx;
3409 + struct erofs_dirent *const de = (struct erofs_dirent *)data;
3410 +
3411 +- /* make sure that maxsize is valid */
3412 +- BUG_ON(maxsize < sizeof(struct erofs_dirent));
3413 +-
3414 +- ndirents = le16_to_cpu(de->nameoff) / sizeof(*de);
3415 +-
3416 +- /* corrupted dir (may be unnecessary...) */
3417 +- BUG_ON(!ndirents);
3418 +-
3419 +- head = 0;
3420 ++ /* since the 1st dirent has been evaluated previously */
3421 ++ head = 1;
3422 + back = ndirents - 1;
3423 + startprfx = endprfx = 0;
3424 +
3425 + while (head <= back) {
3426 +- unsigned mid = head + (back - head) / 2;
3427 +- unsigned nameoff = le16_to_cpu(de[mid].nameoff);
3428 +- unsigned matched = min(startprfx, endprfx);
3429 +-
3430 +- struct qstr dname = QSTR_INIT(data + nameoff,
3431 +- unlikely(mid >= ndirents - 1) ?
3432 +- maxsize - nameoff :
3433 +- le16_to_cpu(de[mid + 1].nameoff) - nameoff);
3434 ++ const int mid = head + (back - head) / 2;
3435 ++ const int nameoff = nameoff_from_disk(de[mid].nameoff,
3436 ++ dirblksize);
3437 ++ unsigned int matched = min(startprfx, endprfx);
3438 ++ struct erofs_qstr dname = {
3439 ++ .name = data + nameoff,
3440 ++ .end = unlikely(mid >= ndirents - 1) ?
3441 ++ data + dirblksize :
3442 ++ data + nameoff_from_disk(de[mid + 1].nameoff,
3443 ++ dirblksize)
3444 ++ };
3445 +
3446 + /* string comparison without already matched prefix */
3447 + int ret = dirnamecmp(name, &dname, &matched);
3448 +
3449 +- if (unlikely(!ret))
3450 ++ if (unlikely(!ret)) {
3451 + return de + mid;
3452 +- else if (ret > 0) {
3453 ++ } else if (ret > 0) {
3454 + head = mid + 1;
3455 + startprfx = matched;
3456 +- } else if (unlikely(mid < 1)) /* fix "mid" overflow */
3457 +- break;
3458 +- else {
3459 ++ } else {
3460 + back = mid - 1;
3461 + endprfx = matched;
3462 + }
3463 +@@ -91,12 +94,12 @@ static struct erofs_dirent *find_target_dirent(
3464 + return ERR_PTR(-ENOENT);
3465 + }
3466 +
3467 +-static struct page *find_target_block_classic(
3468 +- struct inode *dir,
3469 +- struct qstr *name, int *_diff)
3470 ++static struct page *find_target_block_classic(struct inode *dir,
3471 ++ struct erofs_qstr *name,
3472 ++ int *_ndirents)
3473 + {
3474 +- unsigned startprfx, endprfx;
3475 +- unsigned head, back;
3476 ++ unsigned int startprfx, endprfx;
3477 ++ int head, back;
3478 + struct address_space *const mapping = dir->i_mapping;
3479 + struct page *candidate = ERR_PTR(-ENOENT);
3480 +
3481 +@@ -105,41 +108,43 @@ static struct page *find_target_block_classic(
3482 + back = inode_datablocks(dir) - 1;
3483 +
3484 + while (head <= back) {
3485 +- unsigned mid = head + (back - head) / 2;
3486 ++ const int mid = head + (back - head) / 2;
3487 + struct page *page = read_mapping_page(mapping, mid, NULL);
3488 +
3489 +- if (IS_ERR(page)) {
3490 +-exact_out:
3491 +- if (!IS_ERR(candidate)) /* valid candidate */
3492 +- put_page(candidate);
3493 +- return page;
3494 +- } else {
3495 +- int diff;
3496 +- unsigned ndirents, matched;
3497 +- struct qstr dname;
3498 ++ if (!IS_ERR(page)) {
3499 + struct erofs_dirent *de = kmap_atomic(page);
3500 +- unsigned nameoff = le16_to_cpu(de->nameoff);
3501 +-
3502 +- ndirents = nameoff / sizeof(*de);
3503 ++ const int nameoff = nameoff_from_disk(de->nameoff,
3504 ++ EROFS_BLKSIZ);
3505 ++ const int ndirents = nameoff / sizeof(*de);
3506 ++ int diff;
3507 ++ unsigned int matched;
3508 ++ struct erofs_qstr dname;
3509 +
3510 +- /* corrupted dir (should have one entry at least) */
3511 +- BUG_ON(!ndirents || nameoff > PAGE_SIZE);
3512 ++ if (unlikely(!ndirents)) {
3513 ++ DBG_BUGON(1);
3514 ++ kunmap_atomic(de);
3515 ++ put_page(page);
3516 ++ page = ERR_PTR(-EIO);
3517 ++ goto out;
3518 ++ }
3519 +
3520 + matched = min(startprfx, endprfx);
3521 +
3522 + dname.name = (u8 *)de + nameoff;
3523 +- dname.len = ndirents == 1 ?
3524 +- /* since the rest of the last page is 0 */
3525 +- EROFS_BLKSIZ - nameoff
3526 +- : le16_to_cpu(de[1].nameoff) - nameoff;
3527 ++ if (ndirents == 1)
3528 ++ dname.end = (u8 *)de + EROFS_BLKSIZ;
3529 ++ else
3530 ++ dname.end = (u8 *)de +
3531 ++ nameoff_from_disk(de[1].nameoff,
3532 ++ EROFS_BLKSIZ);
3533 +
3534 + /* string comparison without already matched prefix */
3535 + diff = dirnamecmp(name, &dname, &matched);
3536 + kunmap_atomic(de);
3537 +
3538 + if (unlikely(!diff)) {
3539 +- *_diff = 0;
3540 +- goto exact_out;
3541 ++ *_ndirents = 0;
3542 ++ goto out;
3543 + } else if (diff > 0) {
3544 + head = mid + 1;
3545 + startprfx = matched;
3546 +@@ -147,45 +152,51 @@ exact_out:
3547 + if (likely(!IS_ERR(candidate)))
3548 + put_page(candidate);
3549 + candidate = page;
3550 ++ *_ndirents = ndirents;
3551 + } else {
3552 + put_page(page);
3553 +
3554 +- if (unlikely(mid < 1)) /* fix "mid" overflow */
3555 +- break;
3556 +-
3557 + back = mid - 1;
3558 + endprfx = matched;
3559 + }
3560 ++ continue;
3561 + }
3562 ++out: /* free if the candidate is valid */
3563 ++ if (!IS_ERR(candidate))
3564 ++ put_page(candidate);
3565 ++ return page;
3566 + }
3567 +- *_diff = 1;
3568 + return candidate;
3569 + }
3570 +
3571 + int erofs_namei(struct inode *dir,
3572 +- struct qstr *name,
3573 +- erofs_nid_t *nid, unsigned *d_type)
3574 ++ struct qstr *name,
3575 ++ erofs_nid_t *nid, unsigned int *d_type)
3576 + {
3577 +- int diff;
3578 ++ int ndirents;
3579 + struct page *page;
3580 +- u8 *data;
3581 ++ void *data;
3582 + struct erofs_dirent *de;
3583 ++ struct erofs_qstr qn;
3584 +
3585 + if (unlikely(!dir->i_size))
3586 + return -ENOENT;
3587 +
3588 +- diff = 1;
3589 +- page = find_target_block_classic(dir, name, &diff);
3590 ++ qn.name = name->name;
3591 ++ qn.end = name->name + name->len;
3592 ++
3593 ++ ndirents = 0;
3594 ++ page = find_target_block_classic(dir, &qn, &ndirents);
3595 +
3596 + if (unlikely(IS_ERR(page)))
3597 + return PTR_ERR(page);
3598 +
3599 + data = kmap_atomic(page);
3600 + /* the target page has been mapped */
3601 +- de = likely(diff) ?
3602 +- /* since the rest of the last page is 0 */
3603 +- find_target_dirent(name, data, EROFS_BLKSIZ) :
3604 +- (struct erofs_dirent *)data;
3605 ++ if (ndirents)
3606 ++ de = find_target_dirent(&qn, data, EROFS_BLKSIZ, ndirents);
3607 ++ else
3608 ++ de = (struct erofs_dirent *)data;
3609 +
3610 + if (likely(!IS_ERR(de))) {
3611 + *nid = le64_to_cpu(de->nid);
3612 +diff --git a/drivers/staging/erofs/xattr.c b/drivers/staging/erofs/xattr.c
3613 +index 0e9cfeccdf99..2db99cff3c99 100644
3614 +--- a/drivers/staging/erofs/xattr.c
3615 ++++ b/drivers/staging/erofs/xattr.c
3616 +@@ -24,36 +24,77 @@ struct xattr_iter {
3617 +
3618 + static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
3619 + {
3620 +- /* only init_inode_xattrs use non-atomic once */
3621 ++ /* the only user of kunmap() is 'init_inode_xattrs' */
3622 + if (unlikely(!atomic))
3623 + kunmap(it->page);
3624 + else
3625 + kunmap_atomic(it->kaddr);
3626 ++
3627 + unlock_page(it->page);
3628 + put_page(it->page);
3629 + }
3630 +
3631 +-static void init_inode_xattrs(struct inode *inode)
3632 ++static inline void xattr_iter_end_final(struct xattr_iter *it)
3633 ++{
3634 ++ if (!it->page)
3635 ++ return;
3636 ++
3637 ++ xattr_iter_end(it, true);
3638 ++}
3639 ++
3640 ++static int init_inode_xattrs(struct inode *inode)
3641 + {
3642 ++ struct erofs_vnode *const vi = EROFS_V(inode);
3643 + struct xattr_iter it;
3644 + unsigned i;
3645 + struct erofs_xattr_ibody_header *ih;
3646 + struct erofs_sb_info *sbi;
3647 +- struct erofs_vnode *vi;
3648 + bool atomic_map;
3649 ++ int ret = 0;
3650 +
3651 +- if (likely(inode_has_inited_xattr(inode)))
3652 +- return;
3653 ++ /* the most case is that xattrs of this inode are initialized. */
3654 ++ if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
3655 ++ return 0;
3656 ++
3657 ++ if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_XATTR_BIT, TASK_KILLABLE))
3658 ++ return -ERESTARTSYS;
3659 +
3660 +- vi = EROFS_V(inode);
3661 +- BUG_ON(!vi->xattr_isize);
3662 ++ /* someone has initialized xattrs for us? */
3663 ++ if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
3664 ++ goto out_unlock;
3665 ++
3666 ++ /*
3667 ++ * bypass all xattr operations if ->xattr_isize is not greater than
3668 ++ * sizeof(struct erofs_xattr_ibody_header), in detail:
3669 ++ * 1) it is not enough to contain erofs_xattr_ibody_header then
3670 ++ * ->xattr_isize should be 0 (it means no xattr);
3671 ++ * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
3672 ++ * undefined right now (maybe use later with some new sb feature).
3673 ++ */
3674 ++ if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
3675 ++ errln("xattr_isize %d of nid %llu is not supported yet",
3676 ++ vi->xattr_isize, vi->nid);
3677 ++ ret = -ENOTSUPP;
3678 ++ goto out_unlock;
3679 ++ } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
3680 ++ if (unlikely(vi->xattr_isize)) {
3681 ++ DBG_BUGON(1);
3682 ++ ret = -EIO;
3683 ++ goto out_unlock; /* xattr ondisk layout error */
3684 ++ }
3685 ++ ret = -ENOATTR;
3686 ++ goto out_unlock;
3687 ++ }
3688 +
3689 + sbi = EROFS_I_SB(inode);
3690 + it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
3691 + it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
3692 +
3693 + it.page = erofs_get_inline_page(inode, it.blkaddr);
3694 +- BUG_ON(IS_ERR(it.page));
3695 ++ if (IS_ERR(it.page)) {
3696 ++ ret = PTR_ERR(it.page);
3697 ++ goto out_unlock;
3698 ++ }
3699 +
3700 + /* read in shared xattr array (non-atomic, see kmalloc below) */
3701 + it.kaddr = kmap(it.page);
3702 +@@ -62,9 +103,13 @@ static void init_inode_xattrs(struct inode *inode)
3703 + ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
3704 +
3705 + vi->xattr_shared_count = ih->h_shared_count;
3706 +- vi->xattr_shared_xattrs = (unsigned *)kmalloc_array(
3707 +- vi->xattr_shared_count, sizeof(unsigned),
3708 +- GFP_KERNEL | __GFP_NOFAIL);
3709 ++ vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
3710 ++ sizeof(uint), GFP_KERNEL);
3711 ++ if (!vi->xattr_shared_xattrs) {
3712 ++ xattr_iter_end(&it, atomic_map);
3713 ++ ret = -ENOMEM;
3714 ++ goto out_unlock;
3715 ++ }
3716 +
3717 + /* let's skip ibody header */
3718 + it.ofs += sizeof(struct erofs_xattr_ibody_header);
3719 +@@ -77,7 +122,12 @@ static void init_inode_xattrs(struct inode *inode)
3720 +
3721 + it.page = erofs_get_meta_page(inode->i_sb,
3722 + ++it.blkaddr, S_ISDIR(inode->i_mode));
3723 +- BUG_ON(IS_ERR(it.page));
3724 ++ if (IS_ERR(it.page)) {
3725 ++ kfree(vi->xattr_shared_xattrs);
3726 ++ vi->xattr_shared_xattrs = NULL;
3727 ++ ret = PTR_ERR(it.page);
3728 ++ goto out_unlock;
3729 ++ }
3730 +
3731 + it.kaddr = kmap_atomic(it.page);
3732 + atomic_map = true;
3733 +@@ -89,7 +139,11 @@ static void init_inode_xattrs(struct inode *inode)
3734 + }
3735 + xattr_iter_end(&it, atomic_map);
3736 +
3737 +- inode_set_inited_xattr(inode);
3738 ++ set_bit(EROFS_V_EA_INITED_BIT, &vi->flags);
3739 ++
3740 ++out_unlock:
3741 ++ clear_and_wake_up_bit(EROFS_V_BL_XATTR_BIT, &vi->flags);
3742 ++ return ret;
3743 + }
3744 +
3745 + struct xattr_iter_handlers {
3746 +@@ -99,18 +153,25 @@ struct xattr_iter_handlers {
3747 + void (*value)(struct xattr_iter *, unsigned, char *, unsigned);
3748 + };
3749 +
3750 +-static void xattr_iter_fixup(struct xattr_iter *it)
3751 ++static inline int xattr_iter_fixup(struct xattr_iter *it)
3752 + {
3753 +- if (unlikely(it->ofs >= EROFS_BLKSIZ)) {
3754 +- xattr_iter_end(it, true);
3755 ++ if (it->ofs < EROFS_BLKSIZ)
3756 ++ return 0;
3757 +
3758 +- it->blkaddr += erofs_blknr(it->ofs);
3759 +- it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
3760 +- BUG_ON(IS_ERR(it->page));
3761 ++ xattr_iter_end(it, true);
3762 +
3763 +- it->kaddr = kmap_atomic(it->page);
3764 +- it->ofs = erofs_blkoff(it->ofs);
3765 ++ it->blkaddr += erofs_blknr(it->ofs);
3766 ++ it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
3767 ++ if (IS_ERR(it->page)) {
3768 ++ int err = PTR_ERR(it->page);
3769 ++
3770 ++ it->page = NULL;
3771 ++ return err;
3772 + }
3773 ++
3774 ++ it->kaddr = kmap_atomic(it->page);
3775 ++ it->ofs = erofs_blkoff(it->ofs);
3776 ++ return 0;
3777 + }
3778 +
3779 + static int inline_xattr_iter_begin(struct xattr_iter *it,
3780 +@@ -132,21 +193,24 @@ static int inline_xattr_iter_begin(struct xattr_iter *it,
3781 + it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
3782 +
3783 + it->page = erofs_get_inline_page(inode, it->blkaddr);
3784 +- BUG_ON(IS_ERR(it->page));
3785 +- it->kaddr = kmap_atomic(it->page);
3786 ++ if (IS_ERR(it->page))
3787 ++ return PTR_ERR(it->page);
3788 +
3789 ++ it->kaddr = kmap_atomic(it->page);
3790 + return vi->xattr_isize - xattr_header_sz;
3791 + }
3792 +
3793 + static int xattr_foreach(struct xattr_iter *it,
3794 +- struct xattr_iter_handlers *op, unsigned *tlimit)
3795 ++ const struct xattr_iter_handlers *op, unsigned int *tlimit)
3796 + {
3797 + struct erofs_xattr_entry entry;
3798 + unsigned value_sz, processed, slice;
3799 + int err;
3800 +
3801 + /* 0. fixup blkaddr, ofs, ipage */
3802 +- xattr_iter_fixup(it);
3803 ++ err = xattr_iter_fixup(it);
3804 ++ if (err)
3805 ++ return err;
3806 +
3807 + /*
3808 + * 1. read xattr entry to the memory,
3809 +@@ -178,7 +242,9 @@ static int xattr_foreach(struct xattr_iter *it,
3810 + if (it->ofs >= EROFS_BLKSIZ) {
3811 + BUG_ON(it->ofs > EROFS_BLKSIZ);
3812 +
3813 +- xattr_iter_fixup(it);
3814 ++ err = xattr_iter_fixup(it);
3815 ++ if (err)
3816 ++ goto out;
3817 + it->ofs = 0;
3818 + }
3819 +
3820 +@@ -210,7 +276,10 @@ static int xattr_foreach(struct xattr_iter *it,
3821 + while (processed < value_sz) {
3822 + if (it->ofs >= EROFS_BLKSIZ) {
3823 + BUG_ON(it->ofs > EROFS_BLKSIZ);
3824 +- xattr_iter_fixup(it);
3825 ++
3826 ++ err = xattr_iter_fixup(it);
3827 ++ if (err)
3828 ++ goto out;
3829 + it->ofs = 0;
3830 + }
3831 +
3832 +@@ -270,7 +339,7 @@ static void xattr_copyvalue(struct xattr_iter *_it,
3833 + memcpy(it->buffer + processed, buf, len);
3834 + }
3835 +
3836 +-static struct xattr_iter_handlers find_xattr_handlers = {
3837 ++static const struct xattr_iter_handlers find_xattr_handlers = {
3838 + .entry = xattr_entrymatch,
3839 + .name = xattr_namematch,
3840 + .alloc_buffer = xattr_checkbuffer,
3841 +@@ -291,8 +360,11 @@ static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
3842 + ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
3843 + if (ret >= 0)
3844 + break;
3845 ++
3846 ++ if (ret != -ENOATTR) /* -ENOMEM, -EIO, etc. */
3847 ++ break;
3848 + }
3849 +- xattr_iter_end(&it->it, true);
3850 ++ xattr_iter_end_final(&it->it);
3851 +
3852 + return ret < 0 ? ret : it->buffer_size;
3853 + }
3854 +@@ -315,8 +387,10 @@ static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
3855 + xattr_iter_end(&it->it, true);
3856 +
3857 + it->it.page = erofs_get_meta_page(inode->i_sb,
3858 +- blkaddr, false);
3859 +- BUG_ON(IS_ERR(it->it.page));
3860 ++ blkaddr, false);
3861 ++ if (IS_ERR(it->it.page))
3862 ++ return PTR_ERR(it->it.page);
3863 ++
3864 + it->it.kaddr = kmap_atomic(it->it.page);
3865 + it->it.blkaddr = blkaddr;
3866 + }
3867 +@@ -324,9 +398,12 @@ static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
3868 + ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
3869 + if (ret >= 0)
3870 + break;
3871 ++
3872 ++ if (ret != -ENOATTR) /* -ENOMEM, -EIO, etc. */
3873 ++ break;
3874 + }
3875 + if (vi->xattr_shared_count)
3876 +- xattr_iter_end(&it->it, true);
3877 ++ xattr_iter_end_final(&it->it);
3878 +
3879 + return ret < 0 ? ret : it->buffer_size;
3880 + }
3881 +@@ -351,7 +428,9 @@ int erofs_getxattr(struct inode *inode, int index,
3882 + if (unlikely(name == NULL))
3883 + return -EINVAL;
3884 +
3885 +- init_inode_xattrs(inode);
3886 ++ ret = init_inode_xattrs(inode);
3887 ++ if (ret)
3888 ++ return ret;
3889 +
3890 + it.index = index;
3891 +
3892 +@@ -374,7 +453,6 @@ static int erofs_xattr_generic_get(const struct xattr_handler *handler,
3893 + struct dentry *unused, struct inode *inode,
3894 + const char *name, void *buffer, size_t size)
3895 + {
3896 +- struct erofs_vnode *const vi = EROFS_V(inode);
3897 + struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
3898 +
3899 + switch (handler->flags) {
3900 +@@ -392,9 +470,6 @@ static int erofs_xattr_generic_get(const struct xattr_handler *handler,
3901 + return -EINVAL;
3902 + }
3903 +
3904 +- if (!vi->xattr_isize)
3905 +- return -ENOATTR;
3906 +-
3907 + return erofs_getxattr(inode, handler->flags, name, buffer, size);
3908 + }
3909 +
3910 +@@ -494,7 +569,7 @@ static int xattr_skipvalue(struct xattr_iter *_it,
3911 + return 1;
3912 + }
3913 +
3914 +-static struct xattr_iter_handlers list_xattr_handlers = {
3915 ++static const struct xattr_iter_handlers list_xattr_handlers = {
3916 + .entry = xattr_entrylist,
3917 + .name = xattr_namelist,
3918 + .alloc_buffer = xattr_skipvalue,
3919 +@@ -516,7 +591,7 @@ static int inline_listxattr(struct listxattr_iter *it)
3920 + if (ret < 0)
3921 + break;
3922 + }
3923 +- xattr_iter_end(&it->it, true);
3924 ++ xattr_iter_end_final(&it->it);
3925 + return ret < 0 ? ret : it->buffer_ofs;
3926 + }
3927 +
3928 +@@ -538,8 +613,10 @@ static int shared_listxattr(struct listxattr_iter *it)
3929 + xattr_iter_end(&it->it, true);
3930 +
3931 + it->it.page = erofs_get_meta_page(inode->i_sb,
3932 +- blkaddr, false);
3933 +- BUG_ON(IS_ERR(it->it.page));
3934 ++ blkaddr, false);
3935 ++ if (IS_ERR(it->it.page))
3936 ++ return PTR_ERR(it->it.page);
3937 ++
3938 + it->it.kaddr = kmap_atomic(it->it.page);
3939 + it->it.blkaddr = blkaddr;
3940 + }
3941 +@@ -549,7 +626,7 @@ static int shared_listxattr(struct listxattr_iter *it)
3942 + break;
3943 + }
3944 + if (vi->xattr_shared_count)
3945 +- xattr_iter_end(&it->it, true);
3946 ++ xattr_iter_end_final(&it->it);
3947 +
3948 + return ret < 0 ? ret : it->buffer_ofs;
3949 + }
3950 +@@ -560,7 +637,9 @@ ssize_t erofs_listxattr(struct dentry *dentry,
3951 + int ret;
3952 + struct listxattr_iter it;
3953 +
3954 +- init_inode_xattrs(d_inode(dentry));
3955 ++ ret = init_inode_xattrs(d_inode(dentry));
3956 ++ if (ret)
3957 ++ return ret;
3958 +
3959 + it.dentry = dentry;
3960 + it.buffer = buffer;
3961 +diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
3962 +index d7312eed6088..91ea3083e7ad 100644
3963 +--- a/drivers/usb/phy/Kconfig
3964 ++++ b/drivers/usb/phy/Kconfig
3965 +@@ -21,7 +21,7 @@ config AB8500_USB
3966 +
3967 + config FSL_USB2_OTG
3968 + bool "Freescale USB OTG Transceiver Driver"
3969 +- depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM
3970 ++ depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM=y && PM
3971 + depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
3972 + select USB_PHY
3973 + help
3974 +diff --git a/fs/autofs/expire.c b/fs/autofs/expire.c
3975 +index d441244b79df..28d9c2b1b3bb 100644
3976 +--- a/fs/autofs/expire.c
3977 ++++ b/fs/autofs/expire.c
3978 +@@ -596,7 +596,6 @@ int autofs_expire_run(struct super_block *sb,
3979 + pkt.len = dentry->d_name.len;
3980 + memcpy(pkt.name, dentry->d_name.name, pkt.len);
3981 + pkt.name[pkt.len] = '\0';
3982 +- dput(dentry);
3983 +
3984 + if (copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire)))
3985 + ret = -EFAULT;
3986 +@@ -609,6 +608,8 @@ int autofs_expire_run(struct super_block *sb,
3987 + complete_all(&ino->expire_complete);
3988 + spin_unlock(&sbi->fs_lock);
3989 +
3990 ++ dput(dentry);
3991 ++
3992 + return ret;
3993 + }
3994 +
3995 +diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
3996 +index 846c052569dd..3c14a8e45ffb 100644
3997 +--- a/fs/autofs/inode.c
3998 ++++ b/fs/autofs/inode.c
3999 +@@ -255,8 +255,10 @@ int autofs_fill_super(struct super_block *s, void *data, int silent)
4000 + }
4001 + root_inode = autofs_get_inode(s, S_IFDIR | 0755);
4002 + root = d_make_root(root_inode);
4003 +- if (!root)
4004 ++ if (!root) {
4005 ++ ret = -ENOMEM;
4006 + goto fail_ino;
4007 ++ }
4008 + pipe = NULL;
4009 +
4010 + root->d_fsdata = ino;
4011 +diff --git a/fs/buffer.c b/fs/buffer.c
4012 +index 6f1ae3ac9789..c083c4b3c1e7 100644
4013 +--- a/fs/buffer.c
4014 ++++ b/fs/buffer.c
4015 +@@ -200,6 +200,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
4016 + struct buffer_head *head;
4017 + struct page *page;
4018 + int all_mapped = 1;
4019 ++ static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
4020 +
4021 + index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
4022 + page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
4023 +@@ -227,15 +228,15 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
4024 + * file io on the block device and getblk. It gets dealt with
4025 + * elsewhere, don't buffer_error if we had some unmapped buffers
4026 + */
4027 +- if (all_mapped) {
4028 +- printk("__find_get_block_slow() failed. "
4029 +- "block=%llu, b_blocknr=%llu\n",
4030 +- (unsigned long long)block,
4031 +- (unsigned long long)bh->b_blocknr);
4032 +- printk("b_state=0x%08lx, b_size=%zu\n",
4033 +- bh->b_state, bh->b_size);
4034 +- printk("device %pg blocksize: %d\n", bdev,
4035 +- 1 << bd_inode->i_blkbits);
4036 ++ ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
4037 ++ if (all_mapped && __ratelimit(&last_warned)) {
4038 ++ printk("__find_get_block_slow() failed. block=%llu, "
4039 ++ "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
4040 ++ "device %pg blocksize: %d\n",
4041 ++ (unsigned long long)block,
4042 ++ (unsigned long long)bh->b_blocknr,
4043 ++ bh->b_state, bh->b_size, bdev,
4044 ++ 1 << bd_inode->i_blkbits);
4045 + }
4046 + out_unlock:
4047 + spin_unlock(&bd_mapping->private_lock);
4048 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
4049 +index 1e5a1171212f..a2d701775c49 100644
4050 +--- a/fs/cifs/smb2pdu.c
4051 ++++ b/fs/cifs/smb2pdu.c
4052 +@@ -2243,10 +2243,12 @@ SMB2_open_free(struct smb_rqst *rqst)
4053 + {
4054 + int i;
4055 +
4056 +- cifs_small_buf_release(rqst->rq_iov[0].iov_base);
4057 +- for (i = 1; i < rqst->rq_nvec; i++)
4058 +- if (rqst->rq_iov[i].iov_base != smb2_padding)
4059 +- kfree(rqst->rq_iov[i].iov_base);
4060 ++ if (rqst && rqst->rq_iov) {
4061 ++ cifs_small_buf_release(rqst->rq_iov[0].iov_base);
4062 ++ for (i = 1; i < rqst->rq_nvec; i++)
4063 ++ if (rqst->rq_iov[i].iov_base != smb2_padding)
4064 ++ kfree(rqst->rq_iov[i].iov_base);
4065 ++ }
4066 + }
4067 +
4068 + int
4069 +@@ -2535,7 +2537,8 @@ SMB2_close_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
4070 + void
4071 + SMB2_close_free(struct smb_rqst *rqst)
4072 + {
4073 +- cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
4074 ++ if (rqst && rqst->rq_iov)
4075 ++ cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
4076 + }
4077 +
4078 + int
4079 +@@ -2685,7 +2688,8 @@ SMB2_query_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
4080 + void
4081 + SMB2_query_info_free(struct smb_rqst *rqst)
4082 + {
4083 +- cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
4084 ++ if (rqst && rqst->rq_iov)
4085 ++ cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
4086 + }
4087 +
4088 + static int
4089 +diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
4090 +index 8fb7887f2b3d..437257d1116f 100644
4091 +--- a/fs/cifs/smb2pdu.h
4092 ++++ b/fs/cifs/smb2pdu.h
4093 +@@ -84,8 +84,8 @@
4094 +
4095 + #define NUMBER_OF_SMB2_COMMANDS 0x0013
4096 +
4097 +-/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */
4098 +-#define MAX_SMB2_HDR_SIZE 0x00b0
4099 ++/* 52 transform hdr + 64 hdr + 88 create rsp */
4100 ++#define MAX_SMB2_HDR_SIZE 204
4101 +
4102 + #define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe)
4103 + #define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd)
4104 +diff --git a/fs/drop_caches.c b/fs/drop_caches.c
4105 +index 82377017130f..d31b6c72b476 100644
4106 +--- a/fs/drop_caches.c
4107 ++++ b/fs/drop_caches.c
4108 +@@ -21,8 +21,13 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
4109 + spin_lock(&sb->s_inode_list_lock);
4110 + list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
4111 + spin_lock(&inode->i_lock);
4112 ++ /*
4113 ++ * We must skip inodes in unusual state. We may also skip
4114 ++ * inodes without pages but we deliberately won't in case
4115 ++ * we need to reschedule to avoid softlockups.
4116 ++ */
4117 + if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
4118 +- (inode->i_mapping->nrpages == 0)) {
4119 ++ (inode->i_mapping->nrpages == 0 && !need_resched())) {
4120 + spin_unlock(&inode->i_lock);
4121 + continue;
4122 + }
4123 +@@ -30,6 +35,7 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
4124 + spin_unlock(&inode->i_lock);
4125 + spin_unlock(&sb->s_inode_list_lock);
4126 +
4127 ++ cond_resched();
4128 + invalidate_mapping_pages(inode->i_mapping, 0, -1);
4129 + iput(toput_inode);
4130 + toput_inode = inode;
4131 +diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
4132 +index 4614ee25f621..9d566e62684c 100644
4133 +--- a/fs/gfs2/glock.c
4134 ++++ b/fs/gfs2/glock.c
4135 +@@ -107,7 +107,7 @@ static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode,
4136 +
4137 + static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
4138 + {
4139 +- u32 hash = jhash2((u32 *)name, sizeof(*name) / 4, 0);
4140 ++ u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0);
4141 +
4142 + return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
4143 + }
4144 +diff --git a/fs/iomap.c b/fs/iomap.c
4145 +index e57fb1e534c5..fac45206418a 100644
4146 +--- a/fs/iomap.c
4147 ++++ b/fs/iomap.c
4148 +@@ -117,6 +117,12 @@ iomap_page_create(struct inode *inode, struct page *page)
4149 + atomic_set(&iop->read_count, 0);
4150 + atomic_set(&iop->write_count, 0);
4151 + bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
4152 ++
4153 ++ /*
4154 ++ * migrate_page_move_mapping() assumes that pages with private data have
4155 ++ * their count elevated by 1.
4156 ++ */
4157 ++ get_page(page);
4158 + set_page_private(page, (unsigned long)iop);
4159 + SetPagePrivate(page);
4160 + return iop;
4161 +@@ -133,6 +139,7 @@ iomap_page_release(struct page *page)
4162 + WARN_ON_ONCE(atomic_read(&iop->write_count));
4163 + ClearPagePrivate(page);
4164 + set_page_private(page, 0);
4165 ++ put_page(page);
4166 + kfree(iop);
4167 + }
4168 +
4169 +@@ -565,8 +572,10 @@ iomap_migrate_page(struct address_space *mapping, struct page *newpage,
4170 +
4171 + if (page_has_private(page)) {
4172 + ClearPagePrivate(page);
4173 ++ get_page(newpage);
4174 + set_page_private(newpage, page_private(page));
4175 + set_page_private(page, 0);
4176 ++ put_page(page);
4177 + SetPagePrivate(newpage);
4178 + }
4179 +
4180 +@@ -1778,6 +1787,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
4181 + loff_t pos = iocb->ki_pos, start = pos;
4182 + loff_t end = iocb->ki_pos + count - 1, ret = 0;
4183 + unsigned int flags = IOMAP_DIRECT;
4184 ++ bool wait_for_completion = is_sync_kiocb(iocb);
4185 + struct blk_plug plug;
4186 + struct iomap_dio *dio;
4187 +
4188 +@@ -1797,7 +1807,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
4189 + dio->end_io = end_io;
4190 + dio->error = 0;
4191 + dio->flags = 0;
4192 +- dio->wait_for_completion = is_sync_kiocb(iocb);
4193 +
4194 + dio->submit.iter = iter;
4195 + dio->submit.waiter = current;
4196 +@@ -1852,7 +1861,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
4197 + dio_warn_stale_pagecache(iocb->ki_filp);
4198 + ret = 0;
4199 +
4200 +- if (iov_iter_rw(iter) == WRITE && !dio->wait_for_completion &&
4201 ++ if (iov_iter_rw(iter) == WRITE && !wait_for_completion &&
4202 + !inode->i_sb->s_dio_done_wq) {
4203 + ret = sb_init_dio_done_wq(inode->i_sb);
4204 + if (ret < 0)
4205 +@@ -1868,7 +1877,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
4206 + if (ret <= 0) {
4207 + /* magic error code to fall back to buffered I/O */
4208 + if (ret == -ENOTBLK) {
4209 +- dio->wait_for_completion = true;
4210 ++ wait_for_completion = true;
4211 + ret = 0;
4212 + }
4213 + break;
4214 +@@ -1890,8 +1899,24 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
4215 + if (dio->flags & IOMAP_DIO_WRITE_FUA)
4216 + dio->flags &= ~IOMAP_DIO_NEED_SYNC;
4217 +
4218 ++ /*
4219 ++ * We are about to drop our additional submission reference, which
4220 ++ * might be the last reference to the dio. There are three three
4221 ++ * different ways we can progress here:
4222 ++ *
4223 ++ * (a) If this is the last reference we will always complete and free
4224 ++ * the dio ourselves.
4225 ++ * (b) If this is not the last reference, and we serve an asynchronous
4226 ++ * iocb, we must never touch the dio after the decrement, the
4227 ++ * I/O completion handler will complete and free it.
4228 ++ * (c) If this is not the last reference, but we serve a synchronous
4229 ++ * iocb, the I/O completion handler will wake us up on the drop
4230 ++ * of the final reference, and we will complete and free it here
4231 ++ * after we got woken by the I/O completion handler.
4232 ++ */
4233 ++ dio->wait_for_completion = wait_for_completion;
4234 + if (!atomic_dec_and_test(&dio->ref)) {
4235 +- if (!dio->wait_for_completion)
4236 ++ if (!wait_for_completion)
4237 + return -EIOCBQUEUED;
4238 +
4239 + for (;;) {
4240 +@@ -1908,9 +1933,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
4241 + __set_current_state(TASK_RUNNING);
4242 + }
4243 +
4244 +- ret = iomap_dio_complete(dio);
4245 +-
4246 +- return ret;
4247 ++ return iomap_dio_complete(dio);
4248 +
4249 + out_free_dio:
4250 + kfree(dio);
4251 +diff --git a/fs/nfs/super.c b/fs/nfs/super.c
4252 +index 5ef2c71348bd..6b666d187907 100644
4253 +--- a/fs/nfs/super.c
4254 ++++ b/fs/nfs/super.c
4255 +@@ -1906,6 +1906,11 @@ static int nfs_parse_devname(const char *dev_name,
4256 + size_t len;
4257 + char *end;
4258 +
4259 ++ if (unlikely(!dev_name || !*dev_name)) {
4260 ++ dfprintk(MOUNT, "NFS: device name not specified\n");
4261 ++ return -EINVAL;
4262 ++ }
4263 ++
4264 + /* Is the host name protected with square brakcets? */
4265 + if (*dev_name == '[') {
4266 + end = strchr(++dev_name, ']');
4267 +diff --git a/fs/proc/generic.c b/fs/proc/generic.c
4268 +index 8ae109429a88..e39bac94dead 100644
4269 +--- a/fs/proc/generic.c
4270 ++++ b/fs/proc/generic.c
4271 +@@ -256,7 +256,7 @@ struct dentry *proc_lookup_de(struct inode *dir, struct dentry *dentry,
4272 + inode = proc_get_inode(dir->i_sb, de);
4273 + if (!inode)
4274 + return ERR_PTR(-ENOMEM);
4275 +- d_set_d_op(dentry, &proc_misc_dentry_ops);
4276 ++ d_set_d_op(dentry, de->proc_dops);
4277 + return d_splice_alias(inode, dentry);
4278 + }
4279 + read_unlock(&proc_subdir_lock);
4280 +@@ -429,6 +429,8 @@ static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
4281 + INIT_LIST_HEAD(&ent->pde_openers);
4282 + proc_set_user(ent, (*parent)->uid, (*parent)->gid);
4283 +
4284 ++ ent->proc_dops = &proc_misc_dentry_ops;
4285 ++
4286 + out:
4287 + return ent;
4288 + }
4289 +diff --git a/fs/proc/internal.h b/fs/proc/internal.h
4290 +index 5185d7f6a51e..95b14196f284 100644
4291 +--- a/fs/proc/internal.h
4292 ++++ b/fs/proc/internal.h
4293 +@@ -44,6 +44,7 @@ struct proc_dir_entry {
4294 + struct completion *pde_unload_completion;
4295 + const struct inode_operations *proc_iops;
4296 + const struct file_operations *proc_fops;
4297 ++ const struct dentry_operations *proc_dops;
4298 + union {
4299 + const struct seq_operations *seq_ops;
4300 + int (*single_show)(struct seq_file *, void *);
4301 +diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
4302 +index d5e0fcb3439e..a7b12435519e 100644
4303 +--- a/fs/proc/proc_net.c
4304 ++++ b/fs/proc/proc_net.c
4305 +@@ -38,6 +38,22 @@ static struct net *get_proc_net(const struct inode *inode)
4306 + return maybe_get_net(PDE_NET(PDE(inode)));
4307 + }
4308 +
4309 ++static int proc_net_d_revalidate(struct dentry *dentry, unsigned int flags)
4310 ++{
4311 ++ return 0;
4312 ++}
4313 ++
4314 ++static const struct dentry_operations proc_net_dentry_ops = {
4315 ++ .d_revalidate = proc_net_d_revalidate,
4316 ++ .d_delete = always_delete_dentry,
4317 ++};
4318 ++
4319 ++static void pde_force_lookup(struct proc_dir_entry *pde)
4320 ++{
4321 ++ /* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */
4322 ++ pde->proc_dops = &proc_net_dentry_ops;
4323 ++}
4324 ++
4325 + static int seq_open_net(struct inode *inode, struct file *file)
4326 + {
4327 + unsigned int state_size = PDE(inode)->state_size;
4328 +@@ -90,6 +106,7 @@ struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode,
4329 + p = proc_create_reg(name, mode, &parent, data);
4330 + if (!p)
4331 + return NULL;
4332 ++ pde_force_lookup(p);
4333 + p->proc_fops = &proc_net_seq_fops;
4334 + p->seq_ops = ops;
4335 + p->state_size = state_size;
4336 +@@ -133,6 +150,7 @@ struct proc_dir_entry *proc_create_net_data_write(const char *name, umode_t mode
4337 + p = proc_create_reg(name, mode, &parent, data);
4338 + if (!p)
4339 + return NULL;
4340 ++ pde_force_lookup(p);
4341 + p->proc_fops = &proc_net_seq_fops;
4342 + p->seq_ops = ops;
4343 + p->state_size = state_size;
4344 +@@ -181,6 +199,7 @@ struct proc_dir_entry *proc_create_net_single(const char *name, umode_t mode,
4345 + p = proc_create_reg(name, mode, &parent, data);
4346 + if (!p)
4347 + return NULL;
4348 ++ pde_force_lookup(p);
4349 + p->proc_fops = &proc_net_single_fops;
4350 + p->single_show = show;
4351 + return proc_register(parent, p);
4352 +@@ -223,6 +242,7 @@ struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mo
4353 + p = proc_create_reg(name, mode, &parent, data);
4354 + if (!p)
4355 + return NULL;
4356 ++ pde_force_lookup(p);
4357 + p->proc_fops = &proc_net_single_fops;
4358 + p->single_show = show;
4359 + p->write = write;
4360 +diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
4361 +index bfe1639df02d..97fc498dc767 100644
4362 +--- a/include/drm/drm_cache.h
4363 ++++ b/include/drm/drm_cache.h
4364 +@@ -47,6 +47,24 @@ static inline bool drm_arch_can_wc_memory(void)
4365 + return false;
4366 + #elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
4367 + return false;
4368 ++#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
4369 ++ /*
4370 ++ * The DRM driver stack is designed to work with cache coherent devices
4371 ++ * only, but permits an optimization to be enabled in some cases, where
4372 ++ * for some buffers, both the CPU and the GPU use uncached mappings,
4373 ++ * removing the need for DMA snooping and allocation in the CPU caches.
4374 ++ *
4375 ++ * The use of uncached GPU mappings relies on the correct implementation
4376 ++ * of the PCIe NoSnoop TLP attribute by the platform, otherwise the GPU
4377 ++ * will use cached mappings nonetheless. On x86 platforms, this does not
4378 ++ * seem to matter, as uncached CPU mappings will snoop the caches in any
4379 ++ * case. However, on ARM and arm64, enabling this optimization on a
4380 ++ * platform where NoSnoop is ignored results in loss of coherency, which
4381 ++ * breaks correct operation of the device. Since we have no way of
4382 ++ * detecting whether NoSnoop works or not, just disable this
4383 ++ * optimization entirely for ARM and arm64.
4384 ++ */
4385 ++ return false;
4386 + #else
4387 + return true;
4388 + #endif
4389 +diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
4390 +index 8bdbb5f29494..3188c0bef3e7 100644
4391 +--- a/include/linux/irqchip/arm-gic-v3.h
4392 ++++ b/include/linux/irqchip/arm-gic-v3.h
4393 +@@ -319,7 +319,7 @@
4394 + #define GITS_TYPER_PLPIS (1UL << 0)
4395 + #define GITS_TYPER_VLPIS (1UL << 1)
4396 + #define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4
4397 +-#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
4398 ++#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0xf) + 1)
4399 + #define GITS_TYPER_IDBITS_SHIFT 8
4400 + #define GITS_TYPER_DEVBITS_SHIFT 13
4401 + #define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
4402 +diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
4403 +index 7ddfc65586b0..4335bd771ce5 100644
4404 +--- a/include/linux/stmmac.h
4405 ++++ b/include/linux/stmmac.h
4406 +@@ -184,6 +184,7 @@ struct plat_stmmacenet_data {
4407 + struct clk *pclk;
4408 + struct clk *clk_ptp_ref;
4409 + unsigned int clk_ptp_rate;
4410 ++ unsigned int clk_ref_rate;
4411 + struct reset_control *stmmac_rst;
4412 + struct stmmac_axi *axi;
4413 + int has_gmac4;
4414 +diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
4415 +index 03cc59ee9c95..cebadd6af4d9 100644
4416 +--- a/kernel/bpf/hashtab.c
4417 ++++ b/kernel/bpf/hashtab.c
4418 +@@ -677,7 +677,7 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
4419 + }
4420 +
4421 + if (htab_is_prealloc(htab)) {
4422 +- pcpu_freelist_push(&htab->freelist, &l->fnode);
4423 ++ __pcpu_freelist_push(&htab->freelist, &l->fnode);
4424 + } else {
4425 + atomic_dec(&htab->count);
4426 + l->htab = htab;
4427 +@@ -739,7 +739,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
4428 + } else {
4429 + struct pcpu_freelist_node *l;
4430 +
4431 +- l = pcpu_freelist_pop(&htab->freelist);
4432 ++ l = __pcpu_freelist_pop(&htab->freelist);
4433 + if (!l)
4434 + return ERR_PTR(-E2BIG);
4435 + l_new = container_of(l, struct htab_elem, fnode);
4436 +diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c
4437 +index 673fa6fe2d73..0c1b4ba9e90e 100644
4438 +--- a/kernel/bpf/percpu_freelist.c
4439 ++++ b/kernel/bpf/percpu_freelist.c
4440 +@@ -28,8 +28,8 @@ void pcpu_freelist_destroy(struct pcpu_freelist *s)
4441 + free_percpu(s->freelist);
4442 + }
4443 +
4444 +-static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head,
4445 +- struct pcpu_freelist_node *node)
4446 ++static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head,
4447 ++ struct pcpu_freelist_node *node)
4448 + {
4449 + raw_spin_lock(&head->lock);
4450 + node->next = head->first;
4451 +@@ -37,12 +37,22 @@ static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head,
4452 + raw_spin_unlock(&head->lock);
4453 + }
4454 +
4455 +-void pcpu_freelist_push(struct pcpu_freelist *s,
4456 ++void __pcpu_freelist_push(struct pcpu_freelist *s,
4457 + struct pcpu_freelist_node *node)
4458 + {
4459 + struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist);
4460 +
4461 +- __pcpu_freelist_push(head, node);
4462 ++ ___pcpu_freelist_push(head, node);
4463 ++}
4464 ++
4465 ++void pcpu_freelist_push(struct pcpu_freelist *s,
4466 ++ struct pcpu_freelist_node *node)
4467 ++{
4468 ++ unsigned long flags;
4469 ++
4470 ++ local_irq_save(flags);
4471 ++ __pcpu_freelist_push(s, node);
4472 ++ local_irq_restore(flags);
4473 + }
4474 +
4475 + void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
4476 +@@ -63,7 +73,7 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
4477 + for_each_possible_cpu(cpu) {
4478 + again:
4479 + head = per_cpu_ptr(s->freelist, cpu);
4480 +- __pcpu_freelist_push(head, buf);
4481 ++ ___pcpu_freelist_push(head, buf);
4482 + i++;
4483 + buf += elem_size;
4484 + if (i == nr_elems)
4485 +@@ -74,14 +84,12 @@ again:
4486 + local_irq_restore(flags);
4487 + }
4488 +
4489 +-struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
4490 ++struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
4491 + {
4492 + struct pcpu_freelist_head *head;
4493 + struct pcpu_freelist_node *node;
4494 +- unsigned long flags;
4495 + int orig_cpu, cpu;
4496 +
4497 +- local_irq_save(flags);
4498 + orig_cpu = cpu = raw_smp_processor_id();
4499 + while (1) {
4500 + head = per_cpu_ptr(s->freelist, cpu);
4501 +@@ -89,16 +97,25 @@ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
4502 + node = head->first;
4503 + if (node) {
4504 + head->first = node->next;
4505 +- raw_spin_unlock_irqrestore(&head->lock, flags);
4506 ++ raw_spin_unlock(&head->lock);
4507 + return node;
4508 + }
4509 + raw_spin_unlock(&head->lock);
4510 + cpu = cpumask_next(cpu, cpu_possible_mask);
4511 + if (cpu >= nr_cpu_ids)
4512 + cpu = 0;
4513 +- if (cpu == orig_cpu) {
4514 +- local_irq_restore(flags);
4515 ++ if (cpu == orig_cpu)
4516 + return NULL;
4517 +- }
4518 + }
4519 + }
4520 ++
4521 ++struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
4522 ++{
4523 ++ struct pcpu_freelist_node *ret;
4524 ++ unsigned long flags;
4525 ++
4526 ++ local_irq_save(flags);
4527 ++ ret = __pcpu_freelist_pop(s);
4528 ++ local_irq_restore(flags);
4529 ++ return ret;
4530 ++}
4531 +diff --git a/kernel/bpf/percpu_freelist.h b/kernel/bpf/percpu_freelist.h
4532 +index 3049aae8ea1e..c3960118e617 100644
4533 +--- a/kernel/bpf/percpu_freelist.h
4534 ++++ b/kernel/bpf/percpu_freelist.h
4535 +@@ -22,8 +22,12 @@ struct pcpu_freelist_node {
4536 + struct pcpu_freelist_node *next;
4537 + };
4538 +
4539 ++/* pcpu_freelist_* do spin_lock_irqsave. */
4540 + void pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *);
4541 + struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *);
4542 ++/* __pcpu_freelist_* do spin_lock only. caller must disable irqs. */
4543 ++void __pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *);
4544 ++struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *);
4545 + void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
4546 + u32 nr_elems);
4547 + int pcpu_freelist_init(struct pcpu_freelist *);
4548 +diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
4549 +index 382c09dddf93..cc40b8be1171 100644
4550 +--- a/kernel/bpf/syscall.c
4551 ++++ b/kernel/bpf/syscall.c
4552 +@@ -701,8 +701,13 @@ static int map_lookup_elem(union bpf_attr *attr)
4553 +
4554 + if (bpf_map_is_dev_bound(map)) {
4555 + err = bpf_map_offload_lookup_elem(map, key, value);
4556 +- } else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
4557 +- map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
4558 ++ goto done;
4559 ++ }
4560 ++
4561 ++ preempt_disable();
4562 ++ this_cpu_inc(bpf_prog_active);
4563 ++ if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
4564 ++ map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
4565 + err = bpf_percpu_hash_copy(map, key, value);
4566 + } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
4567 + err = bpf_percpu_array_copy(map, key, value);
4568 +@@ -722,7 +727,10 @@ static int map_lookup_elem(union bpf_attr *attr)
4569 + rcu_read_unlock();
4570 + err = ptr ? 0 : -ENOENT;
4571 + }
4572 ++ this_cpu_dec(bpf_prog_active);
4573 ++ preempt_enable();
4574 +
4575 ++done:
4576 + if (err)
4577 + goto free_value;
4578 +
4579 +diff --git a/kernel/events/core.c b/kernel/events/core.c
4580 +index 4fb9d5054618..aa996a0854b9 100644
4581 +--- a/kernel/events/core.c
4582 ++++ b/kernel/events/core.c
4583 +@@ -436,18 +436,18 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
4584 + void __user *buffer, size_t *lenp,
4585 + loff_t *ppos)
4586 + {
4587 +- int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
4588 +-
4589 +- if (ret || !write)
4590 +- return ret;
4591 +-
4592 ++ int ret;
4593 ++ int perf_cpu = sysctl_perf_cpu_time_max_percent;
4594 + /*
4595 + * If throttling is disabled don't allow the write:
4596 + */
4597 +- if (sysctl_perf_cpu_time_max_percent == 100 ||
4598 +- sysctl_perf_cpu_time_max_percent == 0)
4599 ++ if (write && (perf_cpu == 100 || perf_cpu == 0))
4600 + return -EINVAL;
4601 +
4602 ++ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
4603 ++ if (ret || !write)
4604 ++ return ret;
4605 ++
4606 + max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
4607 + perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
4608 + update_perf_cpu_limits();
4609 +diff --git a/kernel/relay.c b/kernel/relay.c
4610 +index 04f248644e06..9e0f52375487 100644
4611 +--- a/kernel/relay.c
4612 ++++ b/kernel/relay.c
4613 +@@ -428,6 +428,8 @@ static struct dentry *relay_create_buf_file(struct rchan *chan,
4614 + dentry = chan->cb->create_buf_file(tmpname, chan->parent,
4615 + S_IRUSR, buf,
4616 + &chan->is_global);
4617 ++ if (IS_ERR(dentry))
4618 ++ dentry = NULL;
4619 +
4620 + kfree(tmpname);
4621 +
4622 +@@ -461,7 +463,7 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
4623 + dentry = chan->cb->create_buf_file(NULL, NULL,
4624 + S_IRUSR, buf,
4625 + &chan->is_global);
4626 +- if (WARN_ON(dentry))
4627 ++ if (IS_ERR_OR_NULL(dentry))
4628 + goto free_buf;
4629 + }
4630 +
4631 +diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
4632 +index 9864a35c8bb5..6c28d519447d 100644
4633 +--- a/kernel/trace/bpf_trace.c
4634 ++++ b/kernel/trace/bpf_trace.c
4635 +@@ -1158,22 +1158,12 @@ static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *
4636 +
4637 + int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
4638 + {
4639 +- int err;
4640 +-
4641 +- mutex_lock(&bpf_event_mutex);
4642 +- err = __bpf_probe_register(btp, prog);
4643 +- mutex_unlock(&bpf_event_mutex);
4644 +- return err;
4645 ++ return __bpf_probe_register(btp, prog);
4646 + }
4647 +
4648 + int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
4649 + {
4650 +- int err;
4651 +-
4652 +- mutex_lock(&bpf_event_mutex);
4653 +- err = tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
4654 +- mutex_unlock(&bpf_event_mutex);
4655 +- return err;
4656 ++ return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
4657 + }
4658 +
4659 + int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
4660 +diff --git a/lib/test_kmod.c b/lib/test_kmod.c
4661 +index d82d022111e0..9cf77628fc91 100644
4662 +--- a/lib/test_kmod.c
4663 ++++ b/lib/test_kmod.c
4664 +@@ -632,7 +632,7 @@ static void __kmod_config_free(struct test_config *config)
4665 + config->test_driver = NULL;
4666 +
4667 + kfree_const(config->test_fs);
4668 +- config->test_driver = NULL;
4669 ++ config->test_fs = NULL;
4670 + }
4671 +
4672 + static void kmod_config_free(struct kmod_test_device *test_dev)
4673 +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
4674 +index c6119ad3561e..156991edec2a 100644
4675 +--- a/mm/memory_hotplug.c
4676 ++++ b/mm/memory_hotplug.c
4677 +@@ -1213,11 +1213,13 @@ static inline int pageblock_free(struct page *page)
4678 + return PageBuddy(page) && page_order(page) >= pageblock_order;
4679 + }
4680 +
4681 +-/* Return the start of the next active pageblock after a given page */
4682 +-static struct page *next_active_pageblock(struct page *page)
4683 ++/* Return the pfn of the start of the next active pageblock after a given pfn */
4684 ++static unsigned long next_active_pageblock(unsigned long pfn)
4685 + {
4686 ++ struct page *page = pfn_to_page(pfn);
4687 ++
4688 + /* Ensure the starting page is pageblock-aligned */
4689 +- BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
4690 ++ BUG_ON(pfn & (pageblock_nr_pages - 1));
4691 +
4692 + /* If the entire pageblock is free, move to the end of free page */
4693 + if (pageblock_free(page)) {
4694 +@@ -1225,16 +1227,16 @@ static struct page *next_active_pageblock(struct page *page)
4695 + /* be careful. we don't have locks, page_order can be changed.*/
4696 + order = page_order(page);
4697 + if ((order < MAX_ORDER) && (order >= pageblock_order))
4698 +- return page + (1 << order);
4699 ++ return pfn + (1 << order);
4700 + }
4701 +
4702 +- return page + pageblock_nr_pages;
4703 ++ return pfn + pageblock_nr_pages;
4704 + }
4705 +
4706 +-static bool is_pageblock_removable_nolock(struct page *page)
4707 ++static bool is_pageblock_removable_nolock(unsigned long pfn)
4708 + {
4709 ++ struct page *page = pfn_to_page(pfn);
4710 + struct zone *zone;
4711 +- unsigned long pfn;
4712 +
4713 + /*
4714 + * We have to be careful here because we are iterating over memory
4715 +@@ -1257,12 +1259,14 @@ static bool is_pageblock_removable_nolock(struct page *page)
4716 + /* Checks if this range of memory is likely to be hot-removable. */
4717 + bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
4718 + {
4719 +- struct page *page = pfn_to_page(start_pfn);
4720 +- struct page *end_page = page + nr_pages;
4721 ++ unsigned long end_pfn, pfn;
4722 ++
4723 ++ end_pfn = min(start_pfn + nr_pages,
4724 ++ zone_end_pfn(page_zone(pfn_to_page(start_pfn))));
4725 +
4726 + /* Check the starting page of each pageblock within the range */
4727 +- for (; page < end_page; page = next_active_pageblock(page)) {
4728 +- if (!is_pageblock_removable_nolock(page))
4729 ++ for (pfn = start_pfn; pfn < end_pfn; pfn = next_active_pageblock(pfn)) {
4730 ++ if (!is_pageblock_removable_nolock(pfn))
4731 + return false;
4732 + cond_resched();
4733 + }
4734 +@@ -1298,6 +1302,9 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
4735 + i++;
4736 + if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
4737 + continue;
4738 ++ /* Check if we got outside of the zone */
4739 ++ if (zone && !zone_spans_pfn(zone, pfn + i))
4740 ++ return 0;
4741 + page = pfn_to_page(pfn + i);
4742 + if (zone && page_zone(page) != zone)
4743 + return 0;
4744 +diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
4745 +index e8090f099eb8..ef0dec20c7d8 100644
4746 +--- a/net/batman-adv/bat_v_elp.c
4747 ++++ b/net/batman-adv/bat_v_elp.c
4748 +@@ -104,6 +104,9 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
4749 +
4750 + ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo);
4751 +
4752 ++ /* free the TID stats immediately */
4753 ++ cfg80211_sinfo_release_content(&sinfo);
4754 ++
4755 + dev_put(real_netdev);
4756 + if (ret == -ENOENT) {
4757 + /* Node is not associated anymore! It would be
4758 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
4759 +index 5e55cef0cec3..6693e209efe8 100644
4760 +--- a/net/bridge/netfilter/ebtables.c
4761 ++++ b/net/bridge/netfilter/ebtables.c
4762 +@@ -2293,9 +2293,12 @@ static int compat_do_replace(struct net *net, void __user *user,
4763 +
4764 + xt_compat_lock(NFPROTO_BRIDGE);
4765 +
4766 +- ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
4767 +- if (ret < 0)
4768 +- goto out_unlock;
4769 ++ if (tmp.nentries) {
4770 ++ ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
4771 ++ if (ret < 0)
4772 ++ goto out_unlock;
4773 ++ }
4774 ++
4775 + ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
4776 + if (ret < 0)
4777 + goto out_unlock;
4778 +diff --git a/net/core/filter.c b/net/core/filter.c
4779 +index fb0080e84bd4..bed9061102f4 100644
4780 +--- a/net/core/filter.c
4781 ++++ b/net/core/filter.c
4782 +@@ -3909,10 +3909,12 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4783 + /* Only some socketops are supported */
4784 + switch (optname) {
4785 + case SO_RCVBUF:
4786 ++ val = min_t(u32, val, sysctl_rmem_max);
4787 + sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
4788 + sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
4789 + break;
4790 + case SO_SNDBUF:
4791 ++ val = min_t(u32, val, sysctl_wmem_max);
4792 + sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
4793 + sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
4794 + break;
4795 +diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
4796 +index 7f56944b020f..40a7cd56e008 100644
4797 +--- a/net/ipv4/ip_vti.c
4798 ++++ b/net/ipv4/ip_vti.c
4799 +@@ -74,6 +74,33 @@ drop:
4800 + return 0;
4801 + }
4802 +
4803 ++static int vti_input_ipip(struct sk_buff *skb, int nexthdr, __be32 spi,
4804 ++ int encap_type)
4805 ++{
4806 ++ struct ip_tunnel *tunnel;
4807 ++ const struct iphdr *iph = ip_hdr(skb);
4808 ++ struct net *net = dev_net(skb->dev);
4809 ++ struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
4810 ++
4811 ++ tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
4812 ++ iph->saddr, iph->daddr, 0);
4813 ++ if (tunnel) {
4814 ++ if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
4815 ++ goto drop;
4816 ++
4817 ++ XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
4818 ++
4819 ++ skb->dev = tunnel->dev;
4820 ++
4821 ++ return xfrm_input(skb, nexthdr, spi, encap_type);
4822 ++ }
4823 ++
4824 ++ return -EINVAL;
4825 ++drop:
4826 ++ kfree_skb(skb);
4827 ++ return 0;
4828 ++}
4829 ++
4830 + static int vti_rcv(struct sk_buff *skb)
4831 + {
4832 + XFRM_SPI_SKB_CB(skb)->family = AF_INET;
4833 +@@ -82,6 +109,14 @@ static int vti_rcv(struct sk_buff *skb)
4834 + return vti_input(skb, ip_hdr(skb)->protocol, 0, 0);
4835 + }
4836 +
4837 ++static int vti_rcv_ipip(struct sk_buff *skb)
4838 ++{
4839 ++ XFRM_SPI_SKB_CB(skb)->family = AF_INET;
4840 ++ XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
4841 ++
4842 ++ return vti_input_ipip(skb, ip_hdr(skb)->protocol, ip_hdr(skb)->saddr, 0);
4843 ++}
4844 ++
4845 + static int vti_rcv_cb(struct sk_buff *skb, int err)
4846 + {
4847 + unsigned short family;
4848 +@@ -435,6 +470,12 @@ static struct xfrm4_protocol vti_ipcomp4_protocol __read_mostly = {
4849 + .priority = 100,
4850 + };
4851 +
4852 ++static struct xfrm_tunnel ipip_handler __read_mostly = {
4853 ++ .handler = vti_rcv_ipip,
4854 ++ .err_handler = vti4_err,
4855 ++ .priority = 0,
4856 ++};
4857 ++
4858 + static int __net_init vti_init_net(struct net *net)
4859 + {
4860 + int err;
4861 +@@ -603,6 +644,13 @@ static int __init vti_init(void)
4862 + if (err < 0)
4863 + goto xfrm_proto_comp_failed;
4864 +
4865 ++ msg = "ipip tunnel";
4866 ++ err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
4867 ++ if (err < 0) {
4868 ++ pr_info("%s: cant't register tunnel\n",__func__);
4869 ++ goto xfrm_tunnel_failed;
4870 ++ }
4871 ++
4872 + msg = "netlink interface";
4873 + err = rtnl_link_register(&vti_link_ops);
4874 + if (err < 0)
4875 +@@ -612,6 +660,8 @@ static int __init vti_init(void)
4876 +
4877 + rtnl_link_failed:
4878 + xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
4879 ++xfrm_tunnel_failed:
4880 ++ xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
4881 + xfrm_proto_comp_failed:
4882 + xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
4883 + xfrm_proto_ah_failed:
4884 +diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
4885 +index 518364f4abcc..55a77314340a 100644
4886 +--- a/net/netfilter/ipvs/ip_vs_ctl.c
4887 ++++ b/net/netfilter/ipvs/ip_vs_ctl.c
4888 +@@ -2220,6 +2220,18 @@ static int ip_vs_set_timeout(struct netns_ipvs *ipvs, struct ip_vs_timeout_user
4889 + u->tcp_fin_timeout,
4890 + u->udp_timeout);
4891 +
4892 ++#ifdef CONFIG_IP_VS_PROTO_TCP
4893 ++ if (u->tcp_timeout < 0 || u->tcp_timeout > (INT_MAX / HZ) ||
4894 ++ u->tcp_fin_timeout < 0 || u->tcp_fin_timeout > (INT_MAX / HZ)) {
4895 ++ return -EINVAL;
4896 ++ }
4897 ++#endif
4898 ++
4899 ++#ifdef CONFIG_IP_VS_PROTO_UDP
4900 ++ if (u->udp_timeout < 0 || u->udp_timeout > (INT_MAX / HZ))
4901 ++ return -EINVAL;
4902 ++#endif
4903 ++
4904 + #ifdef CONFIG_IP_VS_PROTO_TCP
4905 + if (u->tcp_timeout) {
4906 + pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
4907 +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
4908 +index 277d02a8cac8..895171a2e1f1 100644
4909 +--- a/net/netfilter/nf_conntrack_core.c
4910 ++++ b/net/netfilter/nf_conntrack_core.c
4911 +@@ -1007,6 +1007,22 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
4912 + }
4913 +
4914 + if (nf_ct_key_equal(h, tuple, zone, net)) {
4915 ++ /* Tuple is taken already, so caller will need to find
4916 ++ * a new source port to use.
4917 ++ *
4918 ++ * Only exception:
4919 ++ * If the *original tuples* are identical, then both
4920 ++ * conntracks refer to the same flow.
4921 ++ * This is a rare situation, it can occur e.g. when
4922 ++ * more than one UDP packet is sent from same socket
4923 ++ * in different threads.
4924 ++ *
4925 ++ * Let nf_ct_resolve_clash() deal with this later.
4926 ++ */
4927 ++ if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
4928 ++ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
4929 ++ continue;
4930 ++
4931 + NF_CT_STAT_INC_ATOMIC(net, found);
4932 + rcu_read_unlock();
4933 + return 1;
4934 +diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
4935 +index 0d0d68c989df..1dae02a97ee3 100644
4936 +--- a/net/netfilter/xt_TEE.c
4937 ++++ b/net/netfilter/xt_TEE.c
4938 +@@ -14,6 +14,8 @@
4939 + #include <linux/skbuff.h>
4940 + #include <linux/route.h>
4941 + #include <linux/netfilter/x_tables.h>
4942 ++#include <net/net_namespace.h>
4943 ++#include <net/netns/generic.h>
4944 + #include <net/route.h>
4945 + #include <net/netfilter/ipv4/nf_dup_ipv4.h>
4946 + #include <net/netfilter/ipv6/nf_dup_ipv6.h>
4947 +@@ -25,8 +27,15 @@ struct xt_tee_priv {
4948 + int oif;
4949 + };
4950 +
4951 ++static unsigned int tee_net_id __read_mostly;
4952 + static const union nf_inet_addr tee_zero_address;
4953 +
4954 ++struct tee_net {
4955 ++ struct list_head priv_list;
4956 ++ /* lock protects the priv_list */
4957 ++ struct mutex lock;
4958 ++};
4959 ++
4960 + static unsigned int
4961 + tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
4962 + {
4963 +@@ -51,17 +60,16 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
4964 + }
4965 + #endif
4966 +
4967 +-static DEFINE_MUTEX(priv_list_mutex);
4968 +-static LIST_HEAD(priv_list);
4969 +-
4970 + static int tee_netdev_event(struct notifier_block *this, unsigned long event,
4971 + void *ptr)
4972 + {
4973 + struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4974 ++ struct net *net = dev_net(dev);
4975 ++ struct tee_net *tn = net_generic(net, tee_net_id);
4976 + struct xt_tee_priv *priv;
4977 +
4978 +- mutex_lock(&priv_list_mutex);
4979 +- list_for_each_entry(priv, &priv_list, list) {
4980 ++ mutex_lock(&tn->lock);
4981 ++ list_for_each_entry(priv, &tn->priv_list, list) {
4982 + switch (event) {
4983 + case NETDEV_REGISTER:
4984 + if (!strcmp(dev->name, priv->tginfo->oif))
4985 +@@ -79,13 +87,14 @@ static int tee_netdev_event(struct notifier_block *this, unsigned long event,
4986 + break;
4987 + }
4988 + }
4989 +- mutex_unlock(&priv_list_mutex);
4990 ++ mutex_unlock(&tn->lock);
4991 +
4992 + return NOTIFY_DONE;
4993 + }
4994 +
4995 + static int tee_tg_check(const struct xt_tgchk_param *par)
4996 + {
4997 ++ struct tee_net *tn = net_generic(par->net, tee_net_id);
4998 + struct xt_tee_tginfo *info = par->targinfo;
4999 + struct xt_tee_priv *priv;
5000 +
5001 +@@ -95,6 +104,8 @@ static int tee_tg_check(const struct xt_tgchk_param *par)
5002 + return -EINVAL;
5003 +
5004 + if (info->oif[0]) {
5005 ++ struct net_device *dev;
5006 ++
5007 + if (info->oif[sizeof(info->oif)-1] != '\0')
5008 + return -EINVAL;
5009 +
5010 +@@ -106,9 +117,14 @@ static int tee_tg_check(const struct xt_tgchk_param *par)
5011 + priv->oif = -1;
5012 + info->priv = priv;
5013 +
5014 +- mutex_lock(&priv_list_mutex);
5015 +- list_add(&priv->list, &priv_list);
5016 +- mutex_unlock(&priv_list_mutex);
5017 ++ dev = dev_get_by_name(par->net, info->oif);
5018 ++ if (dev) {
5019 ++ priv->oif = dev->ifindex;
5020 ++ dev_put(dev);
5021 ++ }
5022 ++ mutex_lock(&tn->lock);
5023 ++ list_add(&priv->list, &tn->priv_list);
5024 ++ mutex_unlock(&tn->lock);
5025 + } else
5026 + info->priv = NULL;
5027 +
5028 +@@ -118,12 +134,13 @@ static int tee_tg_check(const struct xt_tgchk_param *par)
5029 +
5030 + static void tee_tg_destroy(const struct xt_tgdtor_param *par)
5031 + {
5032 ++ struct tee_net *tn = net_generic(par->net, tee_net_id);
5033 + struct xt_tee_tginfo *info = par->targinfo;
5034 +
5035 + if (info->priv) {
5036 +- mutex_lock(&priv_list_mutex);
5037 ++ mutex_lock(&tn->lock);
5038 + list_del(&info->priv->list);
5039 +- mutex_unlock(&priv_list_mutex);
5040 ++ mutex_unlock(&tn->lock);
5041 + kfree(info->priv);
5042 + }
5043 + static_key_slow_dec(&xt_tee_enabled);
5044 +@@ -156,6 +173,21 @@ static struct xt_target tee_tg_reg[] __read_mostly = {
5045 + #endif
5046 + };
5047 +
5048 ++static int __net_init tee_net_init(struct net *net)
5049 ++{
5050 ++ struct tee_net *tn = net_generic(net, tee_net_id);
5051 ++
5052 ++ INIT_LIST_HEAD(&tn->priv_list);
5053 ++ mutex_init(&tn->lock);
5054 ++ return 0;
5055 ++}
5056 ++
5057 ++static struct pernet_operations tee_net_ops = {
5058 ++ .init = tee_net_init,
5059 ++ .id = &tee_net_id,
5060 ++ .size = sizeof(struct tee_net),
5061 ++};
5062 ++
5063 + static struct notifier_block tee_netdev_notifier = {
5064 + .notifier_call = tee_netdev_event,
5065 + };
5066 +@@ -164,22 +196,32 @@ static int __init tee_tg_init(void)
5067 + {
5068 + int ret;
5069 +
5070 +- ret = xt_register_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
5071 +- if (ret)
5072 ++ ret = register_pernet_subsys(&tee_net_ops);
5073 ++ if (ret < 0)
5074 + return ret;
5075 ++
5076 ++ ret = xt_register_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
5077 ++ if (ret < 0)
5078 ++ goto cleanup_subsys;
5079 ++
5080 + ret = register_netdevice_notifier(&tee_netdev_notifier);
5081 +- if (ret) {
5082 +- xt_unregister_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
5083 +- return ret;
5084 +- }
5085 ++ if (ret < 0)
5086 ++ goto unregister_targets;
5087 +
5088 + return 0;
5089 ++
5090 ++unregister_targets:
5091 ++ xt_unregister_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
5092 ++cleanup_subsys:
5093 ++ unregister_pernet_subsys(&tee_net_ops);
5094 ++ return ret;
5095 + }
5096 +
5097 + static void __exit tee_tg_exit(void)
5098 + {
5099 + unregister_netdevice_notifier(&tee_netdev_notifier);
5100 + xt_unregister_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
5101 ++ unregister_pernet_subsys(&tee_net_ops);
5102 + }
5103 +
5104 + module_init(tee_tg_init);
5105 +diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
5106 +index 5d3cce9e8744..15eb5d3d4750 100644
5107 +--- a/net/vmw_vsock/virtio_transport.c
5108 ++++ b/net/vmw_vsock/virtio_transport.c
5109 +@@ -75,6 +75,9 @@ static u32 virtio_transport_get_local_cid(void)
5110 + {
5111 + struct virtio_vsock *vsock = virtio_vsock_get();
5112 +
5113 ++ if (!vsock)
5114 ++ return VMADDR_CID_ANY;
5115 ++
5116 + return vsock->guest_cid;
5117 + }
5118 +
5119 +@@ -584,10 +587,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
5120 +
5121 + virtio_vsock_update_guest_cid(vsock);
5122 +
5123 +- ret = vsock_core_init(&virtio_transport.transport);
5124 +- if (ret < 0)
5125 +- goto out_vqs;
5126 +-
5127 + vsock->rx_buf_nr = 0;
5128 + vsock->rx_buf_max_nr = 0;
5129 + atomic_set(&vsock->queued_replies, 0);
5130 +@@ -618,8 +617,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
5131 + mutex_unlock(&the_virtio_vsock_mutex);
5132 + return 0;
5133 +
5134 +-out_vqs:
5135 +- vsock->vdev->config->del_vqs(vsock->vdev);
5136 + out:
5137 + kfree(vsock);
5138 + mutex_unlock(&the_virtio_vsock_mutex);
5139 +@@ -637,6 +634,9 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
5140 + flush_work(&vsock->event_work);
5141 + flush_work(&vsock->send_pkt_work);
5142 +
5143 ++ /* Reset all connected sockets when the device disappear */
5144 ++ vsock_for_each_connected_socket(virtio_vsock_reset_sock);
5145 ++
5146 + vdev->config->reset(vdev);
5147 +
5148 + mutex_lock(&vsock->rx_lock);
5149 +@@ -669,7 +669,6 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
5150 +
5151 + mutex_lock(&the_virtio_vsock_mutex);
5152 + the_virtio_vsock = NULL;
5153 +- vsock_core_exit();
5154 + mutex_unlock(&the_virtio_vsock_mutex);
5155 +
5156 + vdev->config->del_vqs(vdev);
5157 +@@ -702,14 +701,28 @@ static int __init virtio_vsock_init(void)
5158 + virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0);
5159 + if (!virtio_vsock_workqueue)
5160 + return -ENOMEM;
5161 ++
5162 + ret = register_virtio_driver(&virtio_vsock_driver);
5163 + if (ret)
5164 +- destroy_workqueue(virtio_vsock_workqueue);
5165 ++ goto out_wq;
5166 ++
5167 ++ ret = vsock_core_init(&virtio_transport.transport);
5168 ++ if (ret)
5169 ++ goto out_vdr;
5170 ++
5171 ++ return 0;
5172 ++
5173 ++out_vdr:
5174 ++ unregister_virtio_driver(&virtio_vsock_driver);
5175 ++out_wq:
5176 ++ destroy_workqueue(virtio_vsock_workqueue);
5177 + return ret;
5178 ++
5179 + }
5180 +
5181 + static void __exit virtio_vsock_exit(void)
5182 + {
5183 ++ vsock_core_exit();
5184 + unregister_virtio_driver(&virtio_vsock_driver);
5185 + destroy_workqueue(virtio_vsock_workqueue);
5186 + }
5187 +diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
5188 +index 08c88de0ffda..11975ec8d566 100644
5189 +--- a/security/apparmor/domain.c
5190 ++++ b/security/apparmor/domain.c
5191 +@@ -1444,7 +1444,10 @@ check:
5192 + new = aa_label_merge(label, target, GFP_KERNEL);
5193 + if (IS_ERR_OR_NULL(new)) {
5194 + info = "failed to build target label";
5195 +- error = PTR_ERR(new);
5196 ++ if (!new)
5197 ++ error = -ENOMEM;
5198 ++ else
5199 ++ error = PTR_ERR(new);
5200 + new = NULL;
5201 + perms.allow = 0;
5202 + goto audit;
5203 +diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
5204 +index b455930a3eaf..ec73d83d0d31 100644
5205 +--- a/tools/bpf/bpftool/map.c
5206 ++++ b/tools/bpf/bpftool/map.c
5207 +@@ -370,6 +370,20 @@ static char **parse_bytes(char **argv, const char *name, unsigned char *val,
5208 + return argv + i;
5209 + }
5210 +
5211 ++/* on per cpu maps we must copy the provided value on all value instances */
5212 ++static void fill_per_cpu_value(struct bpf_map_info *info, void *value)
5213 ++{
5214 ++ unsigned int i, n, step;
5215 ++
5216 ++ if (!map_is_per_cpu(info->type))
5217 ++ return;
5218 ++
5219 ++ n = get_possible_cpus();
5220 ++ step = round_up(info->value_size, 8);
5221 ++ for (i = 1; i < n; i++)
5222 ++ memcpy(value + i * step, value, info->value_size);
5223 ++}
5224 ++
5225 + static int parse_elem(char **argv, struct bpf_map_info *info,
5226 + void *key, void *value, __u32 key_size, __u32 value_size,
5227 + __u32 *flags, __u32 **value_fd)
5228 +@@ -449,6 +463,8 @@ static int parse_elem(char **argv, struct bpf_map_info *info,
5229 + argv = parse_bytes(argv, "value", value, value_size);
5230 + if (!argv)
5231 + return -1;
5232 ++
5233 ++ fill_per_cpu_value(info, value);
5234 + }
5235 +
5236 + return parse_elem(argv, info, key, NULL, key_size, value_size,
5237 +diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
5238 +index 0de024a6cc2b..bbba0d61570f 100644
5239 +--- a/tools/bpf/bpftool/prog.c
5240 ++++ b/tools/bpf/bpftool/prog.c
5241 +@@ -109,13 +109,14 @@ static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
5242 +
5243 + static int prog_fd_by_tag(unsigned char *tag)
5244 + {
5245 +- struct bpf_prog_info info = {};
5246 +- __u32 len = sizeof(info);
5247 + unsigned int id = 0;
5248 + int err;
5249 + int fd;
5250 +
5251 + while (true) {
5252 ++ struct bpf_prog_info info = {};
5253 ++ __u32 len = sizeof(info);
5254 ++
5255 + err = bpf_prog_get_next_id(id, &id);
5256 + if (err) {
5257 + p_err("%s", strerror(errno));
5258 +diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
5259 +index 6c1e7ceedcf3..53c11fc0855e 100644
5260 +--- a/tools/perf/builtin-script.c
5261 ++++ b/tools/perf/builtin-script.c
5262 +@@ -1589,13 +1589,8 @@ static void perf_sample__fprint_metric(struct perf_script *script,
5263 + .force_header = false,
5264 + };
5265 + struct perf_evsel *ev2;
5266 +- static bool init;
5267 + u64 val;
5268 +
5269 +- if (!init) {
5270 +- perf_stat__init_shadow_stats();
5271 +- init = true;
5272 +- }
5273 + if (!evsel->stats)
5274 + perf_evlist__alloc_stats(script->session->evlist, false);
5275 + if (evsel_script(evsel->leader)->gnum++ == 0)
5276 +@@ -1658,7 +1653,7 @@ static void process_event(struct perf_script *script,
5277 + return;
5278 + }
5279 +
5280 +- if (PRINT_FIELD(TRACE)) {
5281 ++ if (PRINT_FIELD(TRACE) && sample->raw_data) {
5282 + event_format__fprintf(evsel->tp_format, sample->cpu,
5283 + sample->raw_data, sample->raw_size, fp);
5284 + }
5285 +@@ -2214,6 +2209,8 @@ static int __cmd_script(struct perf_script *script)
5286 +
5287 + signal(SIGINT, sig_handler);
5288 +
5289 ++ perf_stat__init_shadow_stats();
5290 ++
5291 + /* override event processing functions */
5292 + if (script->show_task_events) {
5293 + script->tool.comm = process_comm_event;
5294 +diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
5295 +index 22ab8e67c760..3f43aedb384d 100644
5296 +--- a/tools/perf/builtin-trace.c
5297 ++++ b/tools/perf/builtin-trace.c
5298 +@@ -2263,19 +2263,30 @@ static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
5299 +
5300 + static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
5301 + {
5302 +- struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname");
5303 ++ bool found = false;
5304 ++ struct perf_evsel *evsel, *tmp;
5305 ++ struct parse_events_error err = { .idx = 0, };
5306 ++ int ret = parse_events(evlist, "probe:vfs_getname*", &err);
5307 +
5308 +- if (IS_ERR(evsel))
5309 ++ if (ret)
5310 + return false;
5311 +
5312 +- if (perf_evsel__field(evsel, "pathname") == NULL) {
5313 ++ evlist__for_each_entry_safe(evlist, evsel, tmp) {
5314 ++ if (!strstarts(perf_evsel__name(evsel), "probe:vfs_getname"))
5315 ++ continue;
5316 ++
5317 ++ if (perf_evsel__field(evsel, "pathname")) {
5318 ++ evsel->handler = trace__vfs_getname;
5319 ++ found = true;
5320 ++ continue;
5321 ++ }
5322 ++
5323 ++ list_del_init(&evsel->node);
5324 ++ evsel->evlist = NULL;
5325 + perf_evsel__delete(evsel);
5326 +- return false;
5327 + }
5328 +
5329 +- evsel->handler = trace__vfs_getname;
5330 +- perf_evlist__add(evlist, evsel);
5331 +- return true;
5332 ++ return found;
5333 + }
5334 +
5335 + static struct perf_evsel *perf_evsel__new_pgfault(u64 config)
5336 +diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
5337 +index 1ccbd3342069..383674f448fc 100644
5338 +--- a/tools/perf/util/cpumap.c
5339 ++++ b/tools/perf/util/cpumap.c
5340 +@@ -134,7 +134,12 @@ struct cpu_map *cpu_map__new(const char *cpu_list)
5341 + if (!cpu_list)
5342 + return cpu_map__read_all_cpu_map();
5343 +
5344 +- if (!isdigit(*cpu_list))
5345 ++ /*
5346 ++ * must handle the case of empty cpumap to cover
5347 ++ * TOPOLOGY header for NUMA nodes with no CPU
5348 ++ * ( e.g., because of CPU hotplug)
5349 ++ */
5350 ++ if (!isdigit(*cpu_list) && *cpu_list != '\0')
5351 + goto out;
5352 +
5353 + while (isdigit(*cpu_list)) {
5354 +@@ -181,8 +186,10 @@ struct cpu_map *cpu_map__new(const char *cpu_list)
5355 +
5356 + if (nr_cpus > 0)
5357 + cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
5358 +- else
5359 ++ else if (*cpu_list != '\0')
5360 + cpus = cpu_map__default_new();
5361 ++ else
5362 ++ cpus = cpu_map__dummy_new();
5363 + invalid:
5364 + free(tmp_cpus);
5365 + out:
5366 +diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
5367 +index 6e70cc00c161..a701a8a48f00 100644
5368 +--- a/tools/perf/util/symbol-elf.c
5369 ++++ b/tools/perf/util/symbol-elf.c
5370 +@@ -87,6 +87,11 @@ static inline uint8_t elf_sym__type(const GElf_Sym *sym)
5371 + return GELF_ST_TYPE(sym->st_info);
5372 + }
5373 +
5374 ++static inline uint8_t elf_sym__visibility(const GElf_Sym *sym)
5375 ++{
5376 ++ return GELF_ST_VISIBILITY(sym->st_other);
5377 ++}
5378 ++
5379 + #ifndef STT_GNU_IFUNC
5380 + #define STT_GNU_IFUNC 10
5381 + #endif
5382 +@@ -111,7 +116,9 @@ static inline int elf_sym__is_label(const GElf_Sym *sym)
5383 + return elf_sym__type(sym) == STT_NOTYPE &&
5384 + sym->st_name != 0 &&
5385 + sym->st_shndx != SHN_UNDEF &&
5386 +- sym->st_shndx != SHN_ABS;
5387 ++ sym->st_shndx != SHN_ABS &&
5388 ++ elf_sym__visibility(sym) != STV_HIDDEN &&
5389 ++ elf_sym__visibility(sym) != STV_INTERNAL;
5390 + }
5391 +
5392 + static bool elf_sym__filter(GElf_Sym *sym)
5393 +diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h
5394 +index 315a44fa32af..84fd6f1bf33e 100644
5395 +--- a/tools/testing/selftests/bpf/bpf_util.h
5396 ++++ b/tools/testing/selftests/bpf/bpf_util.h
5397 +@@ -13,7 +13,7 @@ static inline unsigned int bpf_num_possible_cpus(void)
5398 + unsigned int start, end, possible_cpus = 0;
5399 + char buff[128];
5400 + FILE *fp;
5401 +- int n;
5402 ++ int len, n, i, j = 0;
5403 +
5404 + fp = fopen(fcpu, "r");
5405 + if (!fp) {
5406 +@@ -21,17 +21,27 @@ static inline unsigned int bpf_num_possible_cpus(void)
5407 + exit(1);
5408 + }
5409 +
5410 +- while (fgets(buff, sizeof(buff), fp)) {
5411 +- n = sscanf(buff, "%u-%u", &start, &end);
5412 +- if (n == 0) {
5413 +- printf("Failed to retrieve # possible CPUs!\n");
5414 +- exit(1);
5415 +- } else if (n == 1) {
5416 +- end = start;
5417 ++ if (!fgets(buff, sizeof(buff), fp)) {
5418 ++ printf("Failed to read %s!\n", fcpu);
5419 ++ exit(1);
5420 ++ }
5421 ++
5422 ++ len = strlen(buff);
5423 ++ for (i = 0; i <= len; i++) {
5424 ++ if (buff[i] == ',' || buff[i] == '\0') {
5425 ++ buff[i] = '\0';
5426 ++ n = sscanf(&buff[j], "%u-%u", &start, &end);
5427 ++ if (n <= 0) {
5428 ++ printf("Failed to retrieve # possible CPUs!\n");
5429 ++ exit(1);
5430 ++ } else if (n == 1) {
5431 ++ end = start;
5432 ++ }
5433 ++ possible_cpus += end - start + 1;
5434 ++ j = i + 1;
5435 + }
5436 +- possible_cpus = start == 0 ? end + 1 : 0;
5437 +- break;
5438 + }
5439 ++
5440 + fclose(fp);
5441 +
5442 + return possible_cpus;
5443 +diff --git a/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh b/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
5444 +index bab13dd025a6..0d26b5e3f966 100755
5445 +--- a/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
5446 ++++ b/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
5447 +@@ -37,6 +37,10 @@ prerequisite()
5448 + exit $ksft_skip
5449 + fi
5450 +
5451 ++ present_cpus=`cat $SYSFS/devices/system/cpu/present`
5452 ++ present_max=${present_cpus##*-}
5453 ++ echo "present_cpus = $present_cpus present_max = $present_max"
5454 ++
5455 + echo -e "\t Cpus in online state: $online_cpus"
5456 +
5457 + offline_cpus=`cat $SYSFS/devices/system/cpu/offline`
5458 +@@ -151,6 +155,8 @@ online_cpus=0
5459 + online_max=0
5460 + offline_cpus=0
5461 + offline_max=0
5462 ++present_cpus=0
5463 ++present_max=0
5464 +
5465 + while getopts e:ahp: opt; do
5466 + case $opt in
5467 +@@ -190,9 +196,10 @@ if [ $allcpus -eq 0 ]; then
5468 + online_cpu_expect_success $online_max
5469 +
5470 + if [[ $offline_cpus -gt 0 ]]; then
5471 +- echo -e "\t offline to online to offline: cpu $offline_max"
5472 +- online_cpu_expect_success $offline_max
5473 +- offline_cpu_expect_success $offline_max
5474 ++ echo -e "\t offline to online to offline: cpu $present_max"
5475 ++ online_cpu_expect_success $present_max
5476 ++ offline_cpu_expect_success $present_max
5477 ++ online_cpu $present_max
5478 + fi
5479 + exit 0
5480 + else
5481 +diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
5482 +index 919aa2ac00af..9a3764a1084e 100644
5483 +--- a/tools/testing/selftests/net/Makefile
5484 ++++ b/tools/testing/selftests/net/Makefile
5485 +@@ -18,6 +18,6 @@ TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls
5486 + KSFT_KHDR_INSTALL := 1
5487 + include ../lib.mk
5488 +
5489 +-$(OUTPUT)/reuseport_bpf_numa: LDFLAGS += -lnuma
5490 ++$(OUTPUT)/reuseport_bpf_numa: LDLIBS += -lnuma
5491 + $(OUTPUT)/tcp_mmap: LDFLAGS += -lpthread
5492 + $(OUTPUT)/tcp_inq: LDFLAGS += -lpthread
5493 +diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
5494 +index 47ed6cef93fb..c9ff2b47bd1c 100644
5495 +--- a/tools/testing/selftests/netfilter/Makefile
5496 ++++ b/tools/testing/selftests/netfilter/Makefile
5497 +@@ -1,6 +1,6 @@
5498 + # SPDX-License-Identifier: GPL-2.0
5499 + # Makefile for netfilter selftests
5500 +
5501 +-TEST_PROGS := nft_trans_stress.sh
5502 ++TEST_PROGS := nft_trans_stress.sh nft_nat.sh
5503 +
5504 + include ../lib.mk
5505 +diff --git a/tools/testing/selftests/netfilter/config b/tools/testing/selftests/netfilter/config
5506 +index 1017313e41a8..59caa8f71cd8 100644
5507 +--- a/tools/testing/selftests/netfilter/config
5508 ++++ b/tools/testing/selftests/netfilter/config
5509 +@@ -1,2 +1,2 @@
5510 + CONFIG_NET_NS=y
5511 +-NF_TABLES_INET=y
5512 ++CONFIG_NF_TABLES_INET=y
5513 +diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
5514 +new file mode 100755
5515 +index 000000000000..8ec76681605c
5516 +--- /dev/null
5517 ++++ b/tools/testing/selftests/netfilter/nft_nat.sh
5518 +@@ -0,0 +1,762 @@
5519 ++#!/bin/bash
5520 ++#
5521 ++# This test is for basic NAT functionality: snat, dnat, redirect, masquerade.
5522 ++#
5523 ++
5524 ++# Kselftest framework requirement - SKIP code is 4.
5525 ++ksft_skip=4
5526 ++ret=0
5527 ++
5528 ++nft --version > /dev/null 2>&1
5529 ++if [ $? -ne 0 ];then
5530 ++ echo "SKIP: Could not run test without nft tool"
5531 ++ exit $ksft_skip
5532 ++fi
5533 ++
5534 ++ip -Version > /dev/null 2>&1
5535 ++if [ $? -ne 0 ];then
5536 ++ echo "SKIP: Could not run test without ip tool"
5537 ++ exit $ksft_skip
5538 ++fi
5539 ++
5540 ++ip netns add ns0
5541 ++ip netns add ns1
5542 ++ip netns add ns2
5543 ++
5544 ++ip link add veth0 netns ns0 type veth peer name eth0 netns ns1
5545 ++ip link add veth1 netns ns0 type veth peer name eth0 netns ns2
5546 ++
5547 ++ip -net ns0 link set lo up
5548 ++ip -net ns0 link set veth0 up
5549 ++ip -net ns0 addr add 10.0.1.1/24 dev veth0
5550 ++ip -net ns0 addr add dead:1::1/64 dev veth0
5551 ++
5552 ++ip -net ns0 link set veth1 up
5553 ++ip -net ns0 addr add 10.0.2.1/24 dev veth1
5554 ++ip -net ns0 addr add dead:2::1/64 dev veth1
5555 ++
5556 ++for i in 1 2; do
5557 ++ ip -net ns$i link set lo up
5558 ++ ip -net ns$i link set eth0 up
5559 ++ ip -net ns$i addr add 10.0.$i.99/24 dev eth0
5560 ++ ip -net ns$i route add default via 10.0.$i.1
5561 ++ ip -net ns$i addr add dead:$i::99/64 dev eth0
5562 ++ ip -net ns$i route add default via dead:$i::1
5563 ++done
5564 ++
5565 ++bad_counter()
5566 ++{
5567 ++ local ns=$1
5568 ++ local counter=$2
5569 ++ local expect=$3
5570 ++
5571 ++ echo "ERROR: $counter counter in $ns has unexpected value (expected $expect)" 1>&2
5572 ++ ip netns exec $ns nft list counter inet filter $counter 1>&2
5573 ++}
5574 ++
5575 ++check_counters()
5576 ++{
5577 ++ ns=$1
5578 ++ local lret=0
5579 ++
5580 ++ cnt=$(ip netns exec $ns nft list counter inet filter ns0in | grep -q "packets 1 bytes 84")
5581 ++ if [ $? -ne 0 ]; then
5582 ++ bad_counter $ns ns0in "packets 1 bytes 84"
5583 ++ lret=1
5584 ++ fi
5585 ++ cnt=$(ip netns exec $ns nft list counter inet filter ns0out | grep -q "packets 1 bytes 84")
5586 ++ if [ $? -ne 0 ]; then
5587 ++ bad_counter $ns ns0out "packets 1 bytes 84"
5588 ++ lret=1
5589 ++ fi
5590 ++
5591 ++ expect="packets 1 bytes 104"
5592 ++ cnt=$(ip netns exec $ns nft list counter inet filter ns0in6 | grep -q "$expect")
5593 ++ if [ $? -ne 0 ]; then
5594 ++ bad_counter $ns ns0in6 "$expect"
5595 ++ lret=1
5596 ++ fi
5597 ++ cnt=$(ip netns exec $ns nft list counter inet filter ns0out6 | grep -q "$expect")
5598 ++ if [ $? -ne 0 ]; then
5599 ++ bad_counter $ns ns0out6 "$expect"
5600 ++ lret=1
5601 ++ fi
5602 ++
5603 ++ return $lret
5604 ++}
5605 ++
5606 ++check_ns0_counters()
5607 ++{
5608 ++ local ns=$1
5609 ++ local lret=0
5610 ++
5611 ++ cnt=$(ip netns exec ns0 nft list counter inet filter ns0in | grep -q "packets 0 bytes 0")
5612 ++ if [ $? -ne 0 ]; then
5613 ++ bad_counter ns0 ns0in "packets 0 bytes 0"
5614 ++ lret=1
5615 ++ fi
5616 ++
5617 ++ cnt=$(ip netns exec ns0 nft list counter inet filter ns0in6 | grep -q "packets 0 bytes 0")
5618 ++ if [ $? -ne 0 ]; then
5619 ++ bad_counter ns0 ns0in6 "packets 0 bytes 0"
5620 ++ lret=1
5621 ++ fi
5622 ++
5623 ++ cnt=$(ip netns exec ns0 nft list counter inet filter ns0out | grep -q "packets 0 bytes 0")
5624 ++ if [ $? -ne 0 ]; then
5625 ++ bad_counter ns0 ns0out "packets 0 bytes 0"
5626 ++ lret=1
5627 ++ fi
5628 ++ cnt=$(ip netns exec ns0 nft list counter inet filter ns0out6 | grep -q "packets 0 bytes 0")
5629 ++ if [ $? -ne 0 ]; then
5630 ++ bad_counter ns0 ns0out6 "packets 0 bytes 0"
5631 ++ lret=1
5632 ++ fi
5633 ++
5634 ++ for dir in "in" "out" ; do
5635 ++ expect="packets 1 bytes 84"
5636 ++ cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir} | grep -q "$expect")
5637 ++ if [ $? -ne 0 ]; then
5638 ++ bad_counter ns0 $ns$dir "$expect"
5639 ++ lret=1
5640 ++ fi
5641 ++
5642 ++ expect="packets 1 bytes 104"
5643 ++ cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir}6 | grep -q "$expect")
5644 ++ if [ $? -ne 0 ]; then
5645 ++ bad_counter ns0 $ns$dir6 "$expect"
5646 ++ lret=1
5647 ++ fi
5648 ++ done
5649 ++
5650 ++ return $lret
5651 ++}
5652 ++
5653 ++reset_counters()
5654 ++{
5655 ++ for i in 0 1 2;do
5656 ++ ip netns exec ns$i nft reset counters inet > /dev/null
5657 ++ done
5658 ++}
5659 ++
5660 ++test_local_dnat6()
5661 ++{
5662 ++ local lret=0
5663 ++ip netns exec ns0 nft -f - <<EOF
5664 ++table ip6 nat {
5665 ++ chain output {
5666 ++ type nat hook output priority 0; policy accept;
5667 ++ ip6 daddr dead:1::99 dnat to dead:2::99
5668 ++ }
5669 ++}
5670 ++EOF
5671 ++ if [ $? -ne 0 ]; then
5672 ++ echo "SKIP: Could not add add ip6 dnat hook"
5673 ++ return $ksft_skip
5674 ++ fi
5675 ++
5676 ++ # ping netns1, expect rewrite to netns2
5677 ++ ip netns exec ns0 ping -q -c 1 dead:1::99 > /dev/null
5678 ++ if [ $? -ne 0 ]; then
5679 ++ lret=1
5680 ++ echo "ERROR: ping6 failed"
5681 ++ return $lret
5682 ++ fi
5683 ++
5684 ++ expect="packets 0 bytes 0"
5685 ++ for dir in "in6" "out6" ; do
5686 ++ cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
5687 ++ if [ $? -ne 0 ]; then
5688 ++ bad_counter ns0 ns1$dir "$expect"
5689 ++ lret=1
5690 ++ fi
5691 ++ done
5692 ++
5693 ++ expect="packets 1 bytes 104"
5694 ++ for dir in "in6" "out6" ; do
5695 ++ cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
5696 ++ if [ $? -ne 0 ]; then
5697 ++ bad_counter ns0 ns2$dir "$expect"
5698 ++ lret=1
5699 ++ fi
5700 ++ done
5701 ++
5702 ++ # expect 0 count in ns1
5703 ++ expect="packets 0 bytes 0"
5704 ++ for dir in "in6" "out6" ; do
5705 ++ cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
5706 ++ if [ $? -ne 0 ]; then
5707 ++ bad_counter ns1 ns0$dir "$expect"
5708 ++ lret=1
5709 ++ fi
5710 ++ done
5711 ++
5712 ++ # expect 1 packet in ns2
5713 ++ expect="packets 1 bytes 104"
5714 ++ for dir in "in6" "out6" ; do
5715 ++ cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
5716 ++ if [ $? -ne 0 ]; then
5717 ++ bad_counter ns2 ns0$dir "$expect"
5718 ++ lret=1
5719 ++ fi
5720 ++ done
5721 ++
5722 ++ test $lret -eq 0 && echo "PASS: ipv6 ping to ns1 was NATted to ns2"
5723 ++ ip netns exec ns0 nft flush chain ip6 nat output
5724 ++
5725 ++ return $lret
5726 ++}
5727 ++
5728 ++test_local_dnat()
5729 ++{
5730 ++ local lret=0
5731 ++ip netns exec ns0 nft -f - <<EOF
5732 ++table ip nat {
5733 ++ chain output {
5734 ++ type nat hook output priority 0; policy accept;
5735 ++ ip daddr 10.0.1.99 dnat to 10.0.2.99
5736 ++ }
5737 ++}
5738 ++EOF
5739 ++ # ping netns1, expect rewrite to netns2
5740 ++ ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
5741 ++ if [ $? -ne 0 ]; then
5742 ++ lret=1
5743 ++ echo "ERROR: ping failed"
5744 ++ return $lret
5745 ++ fi
5746 ++
5747 ++ expect="packets 0 bytes 0"
5748 ++ for dir in "in" "out" ; do
5749 ++ cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
5750 ++ if [ $? -ne 0 ]; then
5751 ++ bad_counter ns0 ns1$dir "$expect"
5752 ++ lret=1
5753 ++ fi
5754 ++ done
5755 ++
5756 ++ expect="packets 1 bytes 84"
5757 ++ for dir in "in" "out" ; do
5758 ++ cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
5759 ++ if [ $? -ne 0 ]; then
5760 ++ bad_counter ns0 ns2$dir "$expect"
5761 ++ lret=1
5762 ++ fi
5763 ++ done
5764 ++
5765 ++ # expect 0 count in ns1
5766 ++ expect="packets 0 bytes 0"
5767 ++ for dir in "in" "out" ; do
5768 ++ cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
5769 ++ if [ $? -ne 0 ]; then
5770 ++ bad_counter ns1 ns0$dir "$expect"
5771 ++ lret=1
5772 ++ fi
5773 ++ done
5774 ++
5775 ++ # expect 1 packet in ns2
5776 ++ expect="packets 1 bytes 84"
5777 ++ for dir in "in" "out" ; do
5778 ++ cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
5779 ++ if [ $? -ne 0 ]; then
5780 ++ bad_counter ns2 ns0$dir "$expect"
5781 ++ lret=1
5782 ++ fi
5783 ++ done
5784 ++
5785 ++ test $lret -eq 0 && echo "PASS: ping to ns1 was NATted to ns2"
5786 ++
5787 ++ ip netns exec ns0 nft flush chain ip nat output
5788 ++
5789 ++ reset_counters
5790 ++ ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
5791 ++ if [ $? -ne 0 ]; then
5792 ++ lret=1
5793 ++ echo "ERROR: ping failed"
5794 ++ return $lret
5795 ++ fi
5796 ++
5797 ++ expect="packets 1 bytes 84"
5798 ++ for dir in "in" "out" ; do
5799 ++ cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
5800 ++ if [ $? -ne 0 ]; then
5801 ++ bad_counter ns1 ns1$dir "$expect"
5802 ++ lret=1
5803 ++ fi
5804 ++ done
5805 ++ expect="packets 0 bytes 0"
5806 ++ for dir in "in" "out" ; do
5807 ++ cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
5808 ++ if [ $? -ne 0 ]; then
5809 ++ bad_counter ns0 ns2$dir "$expect"
5810 ++ lret=1
5811 ++ fi
5812 ++ done
5813 ++
5814 ++ # expect 1 count in ns1
5815 ++ expect="packets 1 bytes 84"
5816 ++ for dir in "in" "out" ; do
5817 ++ cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
5818 ++ if [ $? -ne 0 ]; then
5819 ++ bad_counter ns0 ns0$dir "$expect"
5820 ++ lret=1
5821 ++ fi
5822 ++ done
5823 ++
5824 ++ # expect 0 packet in ns2
5825 ++ expect="packets 0 bytes 0"
5826 ++ for dir in "in" "out" ; do
5827 ++ cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
5828 ++ if [ $? -ne 0 ]; then
5829 ++ bad_counter ns2 ns2$dir "$expect"
5830 ++ lret=1
5831 ++ fi
5832 ++ done
5833 ++
5834 ++ test $lret -eq 0 && echo "PASS: ping to ns1 OK after nat output chain flush"
5835 ++
5836 ++ return $lret
5837 ++}
5838 ++
5839 ++
5840 ++test_masquerade6()
5841 ++{
5842 ++ local lret=0
5843 ++
5844 ++ ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
5845 ++
5846 ++ ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
5847 ++ if [ $? -ne 0 ] ; then
5848 ++ echo "ERROR: cannot ping ns1 from ns2 via ipv6"
5849 ++ return 1
5850 ++ lret=1
5851 ++ fi
5852 ++
5853 ++ expect="packets 1 bytes 104"
5854 ++ for dir in "in6" "out6" ; do
5855 ++ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
5856 ++ if [ $? -ne 0 ]; then
5857 ++ bad_counter ns1 ns2$dir "$expect"
5858 ++ lret=1
5859 ++ fi
5860 ++
5861 ++ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
5862 ++ if [ $? -ne 0 ]; then
5863 ++ bad_counter ns2 ns1$dir "$expect"
5864 ++ lret=1
5865 ++ fi
5866 ++ done
5867 ++
5868 ++ reset_counters
5869 ++
5870 ++# add masquerading rule
5871 ++ip netns exec ns0 nft -f - <<EOF
5872 ++table ip6 nat {
5873 ++ chain postrouting {
5874 ++ type nat hook postrouting priority 0; policy accept;
5875 ++ meta oif veth0 masquerade
5876 ++ }
5877 ++}
5878 ++EOF
5879 ++ ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
5880 ++ if [ $? -ne 0 ] ; then
5881 ++ echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerading"
5882 ++ lret=1
5883 ++ fi
5884 ++
5885 ++ # ns1 should have seen packets from ns0, due to masquerade
5886 ++ expect="packets 1 bytes 104"
5887 ++ for dir in "in6" "out6" ; do
5888 ++
5889 ++ cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
5890 ++ if [ $? -ne 0 ]; then
5891 ++ bad_counter ns1 ns0$dir "$expect"
5892 ++ lret=1
5893 ++ fi
5894 ++
5895 ++ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
5896 ++ if [ $? -ne 0 ]; then
5897 ++ bad_counter ns2 ns1$dir "$expect"
5898 ++ lret=1
5899 ++ fi
5900 ++ done
5901 ++
5902 ++ # ns1 should not have seen packets from ns2, due to masquerade
5903 ++ expect="packets 0 bytes 0"
5904 ++ for dir in "in6" "out6" ; do
5905 ++ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
5906 ++ if [ $? -ne 0 ]; then
5907 ++ bad_counter ns1 ns0$dir "$expect"
5908 ++ lret=1
5909 ++ fi
5910 ++
5911 ++ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
5912 ++ if [ $? -ne 0 ]; then
5913 ++ bad_counter ns2 ns1$dir "$expect"
5914 ++ lret=1
5915 ++ fi
5916 ++ done
5917 ++
5918 ++ ip netns exec ns0 nft flush chain ip6 nat postrouting
5919 ++ if [ $? -ne 0 ]; then
5920 ++ echo "ERROR: Could not flush ip6 nat postrouting" 1>&2
5921 ++ lret=1
5922 ++ fi
5923 ++
5924 ++ test $lret -eq 0 && echo "PASS: IPv6 masquerade for ns2"
5925 ++
5926 ++ return $lret
5927 ++}
5928 ++
5929 ++test_masquerade()
5930 ++{
5931 ++ local lret=0
5932 ++
5933 ++ ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
5934 ++ ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
5935 ++
5936 ++ ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
5937 ++ if [ $? -ne 0 ] ; then
5938 ++ echo "ERROR: canot ping ns1 from ns2"
5939 ++ lret=1
5940 ++ fi
5941 ++
5942 ++ expect="packets 1 bytes 84"
5943 ++ for dir in "in" "out" ; do
5944 ++ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
5945 ++ if [ $? -ne 0 ]; then
5946 ++ bad_counter ns1 ns2$dir "$expect"
5947 ++ lret=1
5948 ++ fi
5949 ++
5950 ++ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
5951 ++ if [ $? -ne 0 ]; then
5952 ++ bad_counter ns2 ns1$dir "$expect"
5953 ++ lret=1
5954 ++ fi
5955 ++ done
5956 ++
5957 ++ reset_counters
5958 ++
5959 ++# add masquerading rule
5960 ++ip netns exec ns0 nft -f - <<EOF
5961 ++table ip nat {
5962 ++ chain postrouting {
5963 ++ type nat hook postrouting priority 0; policy accept;
5964 ++ meta oif veth0 masquerade
5965 ++ }
5966 ++}
5967 ++EOF
5968 ++ ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
5969 ++ if [ $? -ne 0 ] ; then
5970 ++ echo "ERROR: cannot ping ns1 from ns2 with active ip masquerading"
5971 ++ lret=1
5972 ++ fi
5973 ++
5974 ++ # ns1 should have seen packets from ns0, due to masquerade
5975 ++ expect="packets 1 bytes 84"
5976 ++ for dir in "in" "out" ; do
5977 ++ cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
5978 ++ if [ $? -ne 0 ]; then
5979 ++ bad_counter ns1 ns0$dir "$expect"
5980 ++ lret=1
5981 ++ fi
5982 ++
5983 ++ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
5984 ++ if [ $? -ne 0 ]; then
5985 ++ bad_counter ns2 ns1$dir "$expect"
5986 ++ lret=1
5987 ++ fi
5988 ++ done
5989 ++
5990 ++ # ns1 should not have seen packets from ns2, due to masquerade
5991 ++ expect="packets 0 bytes 0"
5992 ++ for dir in "in" "out" ; do
5993 ++ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
5994 ++ if [ $? -ne 0 ]; then
5995 ++ bad_counter ns1 ns0$dir "$expect"
5996 ++ lret=1
5997 ++ fi
5998 ++
5999 ++ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
6000 ++ if [ $? -ne 0 ]; then
6001 ++ bad_counter ns2 ns1$dir "$expect"
6002 ++ lret=1
6003 ++ fi
6004 ++ done
6005 ++
6006 ++ ip netns exec ns0 nft flush chain ip nat postrouting
6007 ++ if [ $? -ne 0 ]; then
6008 ++ echo "ERROR: Could not flush nat postrouting" 1>&2
6009 ++ lret=1
6010 ++ fi
6011 ++
6012 ++ test $lret -eq 0 && echo "PASS: IP masquerade for ns2"
6013 ++
6014 ++ return $lret
6015 ++}
6016 ++
6017 ++test_redirect6()
6018 ++{
6019 ++ local lret=0
6020 ++
6021 ++ ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
6022 ++
6023 ++ ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
6024 ++ if [ $? -ne 0 ] ; then
6025 ++ echo "ERROR: cannnot ping ns1 from ns2 via ipv6"
6026 ++ lret=1
6027 ++ fi
6028 ++
6029 ++ expect="packets 1 bytes 104"
6030 ++ for dir in "in6" "out6" ; do
6031 ++ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
6032 ++ if [ $? -ne 0 ]; then
6033 ++ bad_counter ns1 ns2$dir "$expect"
6034 ++ lret=1
6035 ++ fi
6036 ++
6037 ++ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
6038 ++ if [ $? -ne 0 ]; then
6039 ++ bad_counter ns2 ns1$dir "$expect"
6040 ++ lret=1
6041 ++ fi
6042 ++ done
6043 ++
6044 ++ reset_counters
6045 ++
6046 ++# add redirect rule
6047 ++ip netns exec ns0 nft -f - <<EOF
6048 ++table ip6 nat {
6049 ++ chain prerouting {
6050 ++ type nat hook prerouting priority 0; policy accept;
6051 ++ meta iif veth1 meta l4proto icmpv6 ip6 saddr dead:2::99 ip6 daddr dead:1::99 redirect
6052 ++ }
6053 ++}
6054 ++EOF
6055 ++ ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
6056 ++ if [ $? -ne 0 ] ; then
6057 ++ echo "ERROR: cannot ping ns1 from ns2 with active ip6 redirect"
6058 ++ lret=1
6059 ++ fi
6060 ++
6061 ++ # ns1 should have seen no packets from ns2, due to redirection
6062 ++ expect="packets 0 bytes 0"
6063 ++ for dir in "in6" "out6" ; do
6064 ++ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
6065 ++ if [ $? -ne 0 ]; then
6066 ++ bad_counter ns1 ns0$dir "$expect"
6067 ++ lret=1
6068 ++ fi
6069 ++ done
6070 ++
6071 ++ # ns0 should have seen packets from ns2, due to masquerade
6072 ++ expect="packets 1 bytes 104"
6073 ++ for dir in "in6" "out6" ; do
6074 ++ cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
6075 ++ if [ $? -ne 0 ]; then
6076 ++ bad_counter ns1 ns0$dir "$expect"
6077 ++ lret=1
6078 ++ fi
6079 ++ done
6080 ++
6081 ++ ip netns exec ns0 nft delete table ip6 nat
6082 ++ if [ $? -ne 0 ]; then
6083 ++ echo "ERROR: Could not delete ip6 nat table" 1>&2
6084 ++ lret=1
6085 ++ fi
6086 ++
6087 ++ test $lret -eq 0 && echo "PASS: IPv6 redirection for ns2"
6088 ++
6089 ++ return $lret
6090 ++}
6091 ++
6092 ++test_redirect()
6093 ++{
6094 ++ local lret=0
6095 ++
6096 ++ ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
6097 ++ ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
6098 ++
6099 ++ ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
6100 ++ if [ $? -ne 0 ] ; then
6101 ++ echo "ERROR: cannot ping ns1 from ns2"
6102 ++ lret=1
6103 ++ fi
6104 ++
6105 ++ expect="packets 1 bytes 84"
6106 ++ for dir in "in" "out" ; do
6107 ++ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
6108 ++ if [ $? -ne 0 ]; then
6109 ++ bad_counter ns1 ns2$dir "$expect"
6110 ++ lret=1
6111 ++ fi
6112 ++
6113 ++ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
6114 ++ if [ $? -ne 0 ]; then
6115 ++ bad_counter ns2 ns1$dir "$expect"
6116 ++ lret=1
6117 ++ fi
6118 ++ done
6119 ++
6120 ++ reset_counters
6121 ++
6122 ++# add redirect rule
6123 ++ip netns exec ns0 nft -f - <<EOF
6124 ++table ip nat {
6125 ++ chain prerouting {
6126 ++ type nat hook prerouting priority 0; policy accept;
6127 ++ meta iif veth1 ip protocol icmp ip saddr 10.0.2.99 ip daddr 10.0.1.99 redirect
6128 ++ }
6129 ++}
6130 ++EOF
6131 ++ ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
6132 ++ if [ $? -ne 0 ] ; then
6133 ++ echo "ERROR: cannot ping ns1 from ns2 with active ip redirect"
6134 ++ lret=1
6135 ++ fi
6136 ++
6137 ++ # ns1 should have seen no packets from ns2, due to redirection
6138 ++ expect="packets 0 bytes 0"
6139 ++ for dir in "in" "out" ; do
6140 ++
6141 ++ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
6142 ++ if [ $? -ne 0 ]; then
6143 ++ bad_counter ns1 ns0$dir "$expect"
6144 ++ lret=1
6145 ++ fi
6146 ++ done
6147 ++
6148 ++ # ns0 should have seen packets from ns2, due to masquerade
6149 ++ expect="packets 1 bytes 84"
6150 ++ for dir in "in" "out" ; do
6151 ++ cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
6152 ++ if [ $? -ne 0 ]; then
6153 ++ bad_counter ns1 ns0$dir "$expect"
6154 ++ lret=1
6155 ++ fi
6156 ++ done
6157 ++
6158 ++ ip netns exec ns0 nft delete table ip nat
6159 ++ if [ $? -ne 0 ]; then
6160 ++ echo "ERROR: Could not delete nat table" 1>&2
6161 ++ lret=1
6162 ++ fi
6163 ++
6164 ++ test $lret -eq 0 && echo "PASS: IP redirection for ns2"
6165 ++
6166 ++ return $lret
6167 ++}
6168 ++
6169 ++
6170 ++# ip netns exec ns0 ping -c 1 -q 10.0.$i.99
6171 ++for i in 0 1 2; do
6172 ++ip netns exec ns$i nft -f - <<EOF
6173 ++table inet filter {
6174 ++ counter ns0in {}
6175 ++ counter ns1in {}
6176 ++ counter ns2in {}
6177 ++
6178 ++ counter ns0out {}
6179 ++ counter ns1out {}
6180 ++ counter ns2out {}
6181 ++
6182 ++ counter ns0in6 {}
6183 ++ counter ns1in6 {}
6184 ++ counter ns2in6 {}
6185 ++
6186 ++ counter ns0out6 {}
6187 ++ counter ns1out6 {}
6188 ++ counter ns2out6 {}
6189 ++
6190 ++ map nsincounter {
6191 ++ type ipv4_addr : counter
6192 ++ elements = { 10.0.1.1 : "ns0in",
6193 ++ 10.0.2.1 : "ns0in",
6194 ++ 10.0.1.99 : "ns1in",
6195 ++ 10.0.2.99 : "ns2in" }
6196 ++ }
6197 ++
6198 ++ map nsincounter6 {
6199 ++ type ipv6_addr : counter
6200 ++ elements = { dead:1::1 : "ns0in6",
6201 ++ dead:2::1 : "ns0in6",
6202 ++ dead:1::99 : "ns1in6",
6203 ++ dead:2::99 : "ns2in6" }
6204 ++ }
6205 ++
6206 ++ map nsoutcounter {
6207 ++ type ipv4_addr : counter
6208 ++ elements = { 10.0.1.1 : "ns0out",
6209 ++ 10.0.2.1 : "ns0out",
6210 ++ 10.0.1.99: "ns1out",
6211 ++ 10.0.2.99: "ns2out" }
6212 ++ }
6213 ++
6214 ++ map nsoutcounter6 {
6215 ++ type ipv6_addr : counter
6216 ++ elements = { dead:1::1 : "ns0out6",
6217 ++ dead:2::1 : "ns0out6",
6218 ++ dead:1::99 : "ns1out6",
6219 ++ dead:2::99 : "ns2out6" }
6220 ++ }
6221 ++
6222 ++ chain input {
6223 ++ type filter hook input priority 0; policy accept;
6224 ++ counter name ip saddr map @nsincounter
6225 ++ icmpv6 type { "echo-request", "echo-reply" } counter name ip6 saddr map @nsincounter6
6226 ++ }
6227 ++ chain output {
6228 ++ type filter hook output priority 0; policy accept;
6229 ++ counter name ip daddr map @nsoutcounter
6230 ++ icmpv6 type { "echo-request", "echo-reply" } counter name ip6 daddr map @nsoutcounter6
6231 ++ }
6232 ++}
6233 ++EOF
6234 ++done
6235 ++
6236 ++sleep 3
6237 ++# test basic connectivity
6238 ++for i in 1 2; do
6239 ++ ip netns exec ns0 ping -c 1 -q 10.0.$i.99 > /dev/null
6240 ++ if [ $? -ne 0 ];then
6241 ++ echo "ERROR: Could not reach other namespace(s)" 1>&2
6242 ++ ret=1
6243 ++ fi
6244 ++
6245 ++ ip netns exec ns0 ping -c 1 -q dead:$i::99 > /dev/null
6246 ++ if [ $? -ne 0 ];then
6247 ++ echo "ERROR: Could not reach other namespace(s) via ipv6" 1>&2
6248 ++ ret=1
6249 ++ fi
6250 ++ check_counters ns$i
6251 ++ if [ $? -ne 0 ]; then
6252 ++ ret=1
6253 ++ fi
6254 ++
6255 ++ check_ns0_counters ns$i
6256 ++ if [ $? -ne 0 ]; then
6257 ++ ret=1
6258 ++ fi
6259 ++ reset_counters
6260 ++done
6261 ++
6262 ++if [ $ret -eq 0 ];then
6263 ++ echo "PASS: netns routing/connectivity: ns0 can reach ns1 and ns2"
6264 ++fi
6265 ++
6266 ++reset_counters
6267 ++test_local_dnat
6268 ++test_local_dnat6
6269 ++
6270 ++reset_counters
6271 ++test_masquerade
6272 ++test_masquerade6
6273 ++
6274 ++reset_counters
6275 ++test_redirect
6276 ++test_redirect6
6277 ++
6278 ++for i in 0 1 2; do ip netns del ns$i;done
6279 ++
6280 ++exit $ret
6281 +diff --git a/tools/testing/selftests/proc/.gitignore b/tools/testing/selftests/proc/.gitignore
6282 +index 82121a81681f..29bac5ef9a93 100644
6283 +--- a/tools/testing/selftests/proc/.gitignore
6284 ++++ b/tools/testing/selftests/proc/.gitignore
6285 +@@ -10,4 +10,5 @@
6286 + /proc-uptime-002
6287 + /read
6288 + /self
6289 ++/setns-dcache
6290 + /thread-self
6291 +diff --git a/tools/testing/selftests/proc/Makefile b/tools/testing/selftests/proc/Makefile
6292 +index 1c12c34cf85d..434d033ee067 100644
6293 +--- a/tools/testing/selftests/proc/Makefile
6294 ++++ b/tools/testing/selftests/proc/Makefile
6295 +@@ -14,6 +14,7 @@ TEST_GEN_PROGS += proc-uptime-001
6296 + TEST_GEN_PROGS += proc-uptime-002
6297 + TEST_GEN_PROGS += read
6298 + TEST_GEN_PROGS += self
6299 ++TEST_GEN_PROGS += setns-dcache
6300 + TEST_GEN_PROGS += thread-self
6301 +
6302 + include ../lib.mk
6303 +diff --git a/tools/testing/selftests/proc/setns-dcache.c b/tools/testing/selftests/proc/setns-dcache.c
6304 +new file mode 100644
6305 +index 000000000000..60ab197a73fc
6306 +--- /dev/null
6307 ++++ b/tools/testing/selftests/proc/setns-dcache.c
6308 +@@ -0,0 +1,129 @@
6309 ++/*
6310 ++ * Copyright © 2019 Alexey Dobriyan <adobriyan@×××××.com>
6311 ++ *
6312 ++ * Permission to use, copy, modify, and distribute this software for any
6313 ++ * purpose with or without fee is hereby granted, provided that the above
6314 ++ * copyright notice and this permission notice appear in all copies.
6315 ++ *
6316 ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
6317 ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
6318 ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
6319 ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
6320 ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
6321 ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
6322 ++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
6323 ++ */
6324 ++/*
6325 ++ * Test that setns(CLONE_NEWNET) points to new /proc/net content even
6326 ++ * if old one is in dcache.
6327 ++ *
6328 ++ * FIXME /proc/net/unix is under CONFIG_UNIX which can be disabled.
6329 ++ */
6330 ++#undef NDEBUG
6331 ++#include <assert.h>
6332 ++#include <errno.h>
6333 ++#include <sched.h>
6334 ++#include <signal.h>
6335 ++#include <stdio.h>
6336 ++#include <stdlib.h>
6337 ++#include <string.h>
6338 ++#include <unistd.h>
6339 ++#include <sys/types.h>
6340 ++#include <sys/stat.h>
6341 ++#include <fcntl.h>
6342 ++#include <sys/socket.h>
6343 ++
6344 ++static pid_t pid = -1;
6345 ++
6346 ++static void f(void)
6347 ++{
6348 ++ if (pid > 0) {
6349 ++ kill(pid, SIGTERM);
6350 ++ }
6351 ++}
6352 ++
6353 ++int main(void)
6354 ++{
6355 ++ int fd[2];
6356 ++ char _ = 0;
6357 ++ int nsfd;
6358 ++
6359 ++ atexit(f);
6360 ++
6361 ++ /* Check for priviledges and syscall availability straight away. */
6362 ++ if (unshare(CLONE_NEWNET) == -1) {
6363 ++ if (errno == ENOSYS || errno == EPERM) {
6364 ++ return 4;
6365 ++ }
6366 ++ return 1;
6367 ++ }
6368 ++ /* Distinguisher between two otherwise empty net namespaces. */
6369 ++ if (socket(AF_UNIX, SOCK_STREAM, 0) == -1) {
6370 ++ return 1;
6371 ++ }
6372 ++
6373 ++ if (pipe(fd) == -1) {
6374 ++ return 1;
6375 ++ }
6376 ++
6377 ++ pid = fork();
6378 ++ if (pid == -1) {
6379 ++ return 1;
6380 ++ }
6381 ++
6382 ++ if (pid == 0) {
6383 ++ if (unshare(CLONE_NEWNET) == -1) {
6384 ++ return 1;
6385 ++ }
6386 ++
6387 ++ if (write(fd[1], &_, 1) != 1) {
6388 ++ return 1;
6389 ++ }
6390 ++
6391 ++ pause();
6392 ++
6393 ++ return 0;
6394 ++ }
6395 ++
6396 ++ if (read(fd[0], &_, 1) != 1) {
6397 ++ return 1;
6398 ++ }
6399 ++
6400 ++ {
6401 ++ char buf[64];
6402 ++ snprintf(buf, sizeof(buf), "/proc/%u/ns/net", pid);
6403 ++ nsfd = open(buf, O_RDONLY);
6404 ++ if (nsfd == -1) {
6405 ++ return 1;
6406 ++ }
6407 ++ }
6408 ++
6409 ++ /* Reliably pin dentry into dcache. */
6410 ++ (void)open("/proc/net/unix", O_RDONLY);
6411 ++
6412 ++ if (setns(nsfd, CLONE_NEWNET) == -1) {
6413 ++ return 1;
6414 ++ }
6415 ++
6416 ++ kill(pid, SIGTERM);
6417 ++ pid = 0;
6418 ++
6419 ++ {
6420 ++ char buf[4096];
6421 ++ ssize_t rv;
6422 ++ int fd;
6423 ++
6424 ++ fd = open("/proc/net/unix", O_RDONLY);
6425 ++ if (fd == -1) {
6426 ++ return 1;
6427 ++ }
6428 ++
6429 ++#define S "Num RefCount Protocol Flags Type St Inode Path\n"
6430 ++ rv = read(fd, buf, sizeof(buf));
6431 ++
6432 ++ assert(rv == strlen(S));
6433 ++ assert(memcmp(buf, S, strlen(S)) == 0);
6434 ++ }
6435 ++
6436 ++ return 0;
6437 ++}
6438 +diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile
6439 +index c02683cfb6c9..7656c7ce79d9 100644
6440 +--- a/tools/testing/selftests/timers/Makefile
6441 ++++ b/tools/testing/selftests/timers/Makefile
6442 +@@ -1,6 +1,6 @@
6443 + # SPDX-License-Identifier: GPL-2.0
6444 + CFLAGS += -O3 -Wl,-no-as-needed -Wall
6445 +-LDFLAGS += -lrt -lpthread -lm
6446 ++LDLIBS += -lrt -lpthread -lm
6447 +
6448 + # these are all "safe" tests that don't modify
6449 + # system time or require escalated privileges