Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.10 commit in: /
Date: Wed, 10 Feb 2021 09:51:40
Message-Id: 1612950675.6c84e9a9d87af7d00d731b3a4f131091c7393002.alicef@gentoo
1 commit: 6c84e9a9d87af7d00d731b3a4f131091c7393002
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Wed Feb 10 09:51:09 2021 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Wed Feb 10 09:51:15 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=6c84e9a9
7
8 Linux patch 5.10.15
9
10 Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>
11
12 0000_README | 4 +
13 1014_linux-5.10.15.patch | 4352 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 4356 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 7375e82..7d03d9d 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -99,6 +99,10 @@ Patch: 1013_linux-5.10.14.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.10.14
23
24 +Patch: 1014_linux-5.10.15.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.10.15
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1014_linux-5.10.15.patch b/1014_linux-5.10.15.patch
33 new file mode 100644
34 index 0000000..28991a6
35 --- /dev/null
36 +++ b/1014_linux-5.10.15.patch
37 @@ -0,0 +1,4352 @@
38 +diff --git a/Documentation/filesystems/overlayfs.rst b/Documentation/filesystems/overlayfs.rst
39 +index 580ab9a0fe319..137afeb3f581c 100644
40 +--- a/Documentation/filesystems/overlayfs.rst
41 ++++ b/Documentation/filesystems/overlayfs.rst
42 +@@ -575,6 +575,14 @@ without significant effort.
43 + The advantage of mounting with the "volatile" option is that all forms of
44 + sync calls to the upper filesystem are omitted.
45 +
46 ++In order to avoid a giving a false sense of safety, the syncfs (and fsync)
47 ++semantics of volatile mounts are slightly different than that of the rest of
48 ++VFS. If any writeback error occurs on the upperdir's filesystem after a
49 ++volatile mount takes place, all sync functions will return an error. Once this
50 ++condition is reached, the filesystem will not recover, and every subsequent sync
51 ++call will return an error, even if the upperdir has not experience a new error
52 ++since the last sync call.
53 ++
54 + When overlay is mounted with "volatile" option, the directory
55 + "$workdir/work/incompat/volatile" is created. During next mount, overlay
56 + checks for this directory and refuses to mount if present. This is a strong
57 +diff --git a/Makefile b/Makefile
58 +index bb3770be9779d..b62d2d4ea7b02 100644
59 +--- a/Makefile
60 ++++ b/Makefile
61 +@@ -1,7 +1,7 @@
62 + # SPDX-License-Identifier: GPL-2.0
63 + VERSION = 5
64 + PATCHLEVEL = 10
65 +-SUBLEVEL = 14
66 ++SUBLEVEL = 15
67 + EXTRAVERSION =
68 + NAME = Kleptomaniac Octopus
69 +
70 +@@ -812,10 +812,12 @@ KBUILD_CFLAGS += -ftrivial-auto-var-init=zero
71 + KBUILD_CFLAGS += -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang
72 + endif
73 +
74 ++DEBUG_CFLAGS :=
75 ++
76 + # Workaround for GCC versions < 5.0
77 + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61801
78 + ifdef CONFIG_CC_IS_GCC
79 +-DEBUG_CFLAGS := $(call cc-ifversion, -lt, 0500, $(call cc-option, -fno-var-tracking-assignments))
80 ++DEBUG_CFLAGS += $(call cc-ifversion, -lt, 0500, $(call cc-option, -fno-var-tracking-assignments))
81 + endif
82 +
83 + ifdef CONFIG_DEBUG_INFO
84 +@@ -948,12 +950,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init)
85 + # change __FILE__ to the relative path from the srctree
86 + KBUILD_CPPFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
87 +
88 +-# ensure -fcf-protection is disabled when using retpoline as it is
89 +-# incompatible with -mindirect-branch=thunk-extern
90 +-ifdef CONFIG_RETPOLINE
91 +-KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
92 +-endif
93 +-
94 + # include additional Makefiles when needed
95 + include-y := scripts/Makefile.extrawarn
96 + include-$(CONFIG_KASAN) += scripts/Makefile.kasan
97 +diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi
98 +index c8745bc800f71..7b8c18e6605e4 100644
99 +--- a/arch/arm/boot/dts/omap3-gta04.dtsi
100 ++++ b/arch/arm/boot/dts/omap3-gta04.dtsi
101 +@@ -114,7 +114,7 @@
102 + gpio-sck = <&gpio1 12 GPIO_ACTIVE_HIGH>;
103 + gpio-miso = <&gpio1 18 GPIO_ACTIVE_HIGH>;
104 + gpio-mosi = <&gpio1 20 GPIO_ACTIVE_HIGH>;
105 +- cs-gpios = <&gpio1 19 GPIO_ACTIVE_HIGH>;
106 ++ cs-gpios = <&gpio1 19 GPIO_ACTIVE_LOW>;
107 + num-chipselects = <1>;
108 +
109 + /* lcd panel */
110 +@@ -124,7 +124,6 @@
111 + spi-max-frequency = <100000>;
112 + spi-cpol;
113 + spi-cpha;
114 +- spi-cs-high;
115 +
116 + backlight= <&backlight>;
117 + label = "lcd";
118 +diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-drc02.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-drc02.dtsi
119 +index 62ab23824a3e7..e4d287d994214 100644
120 +--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-drc02.dtsi
121 ++++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-drc02.dtsi
122 +@@ -35,7 +35,7 @@
123 + */
124 + rs485-rx-en {
125 + gpio-hog;
126 +- gpios = <8 GPIO_ACTIVE_HIGH>;
127 ++ gpios = <8 0>;
128 + output-low;
129 + line-name = "rs485-rx-en";
130 + };
131 +@@ -63,7 +63,7 @@
132 + */
133 + usb-hub {
134 + gpio-hog;
135 +- gpios = <2 GPIO_ACTIVE_HIGH>;
136 ++ gpios = <2 0>;
137 + output-high;
138 + line-name = "usb-hub-reset";
139 + };
140 +@@ -87,6 +87,12 @@
141 + };
142 + };
143 +
144 ++&i2c4 {
145 ++ touchscreen@49 {
146 ++ status = "disabled";
147 ++ };
148 ++};
149 ++
150 + &i2c5 { /* TP7/TP8 */
151 + pinctrl-names = "default";
152 + pinctrl-0 = <&i2c5_pins_a>;
153 +@@ -104,7 +110,7 @@
154 + * are used for on-board microSD slot instead.
155 + */
156 + /delete-property/broken-cd;
157 +- cd-gpios = <&gpioi 10 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
158 ++ cd-gpios = <&gpioi 10 GPIO_ACTIVE_HIGH>;
159 + disable-wp;
160 + };
161 +
162 +diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
163 +index f796a6150313e..2d027dafb7bce 100644
164 +--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
165 ++++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
166 +@@ -353,7 +353,8 @@
167 + pinctrl-0 = <&sdmmc1_b4_pins_a &sdmmc1_dir_pins_a>;
168 + pinctrl-1 = <&sdmmc1_b4_od_pins_a &sdmmc1_dir_pins_a>;
169 + pinctrl-2 = <&sdmmc1_b4_sleep_pins_a &sdmmc1_dir_sleep_pins_a>;
170 +- broken-cd;
171 ++ cd-gpios = <&gpiog 1 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
172 ++ disable-wp;
173 + st,sig-dir;
174 + st,neg-edge;
175 + st,use-ckin;
176 +diff --git a/arch/arm/boot/dts/sun7i-a20-bananapro.dts b/arch/arm/boot/dts/sun7i-a20-bananapro.dts
177 +index 01ccff756996d..5740f9442705c 100644
178 +--- a/arch/arm/boot/dts/sun7i-a20-bananapro.dts
179 ++++ b/arch/arm/boot/dts/sun7i-a20-bananapro.dts
180 +@@ -110,7 +110,7 @@
181 + pinctrl-names = "default";
182 + pinctrl-0 = <&gmac_rgmii_pins>;
183 + phy-handle = <&phy1>;
184 +- phy-mode = "rgmii";
185 ++ phy-mode = "rgmii-id";
186 + phy-supply = <&reg_gmac_3v3>;
187 + status = "okay";
188 + };
189 +diff --git a/arch/arm/include/debug/tegra.S b/arch/arm/include/debug/tegra.S
190 +index 98daa7f483148..7454480d084b2 100644
191 +--- a/arch/arm/include/debug/tegra.S
192 ++++ b/arch/arm/include/debug/tegra.S
193 +@@ -149,7 +149,34 @@
194 +
195 + .align
196 + 99: .word .
197 ++#if defined(ZIMAGE)
198 ++ .word . + 4
199 ++/*
200 ++ * Storage for the state maintained by the macro.
201 ++ *
202 ++ * In the kernel proper, this data is located in arch/arm/mach-tegra/tegra.c.
203 ++ * That's because this header is included from multiple files, and we only
204 ++ * want a single copy of the data. In particular, the UART probing code above
205 ++ * assumes it's running using physical addresses. This is true when this file
206 ++ * is included from head.o, but not when included from debug.o. So we need
207 ++ * to share the probe results between the two copies, rather than having
208 ++ * to re-run the probing again later.
209 ++ *
210 ++ * In the decompressor, we put the storage right here, since common.c
211 ++ * isn't included in the decompressor build. This storage data gets put in
212 ++ * .text even though it's really data, since .data is discarded from the
213 ++ * decompressor. Luckily, .text is writeable in the decompressor, unless
214 ++ * CONFIG_ZBOOT_ROM. That dependency is handled in arch/arm/Kconfig.debug.
215 ++ */
216 ++ /* Debug UART initialization required */
217 ++ .word 1
218 ++ /* Debug UART physical address */
219 ++ .word 0
220 ++ /* Debug UART virtual address */
221 ++ .word 0
222 ++#else
223 + .word tegra_uart_config
224 ++#endif
225 + .ltorg
226 +
227 + /* Load previously selected UART address */
228 +@@ -189,30 +216,3 @@
229 +
230 + .macro waituarttxrdy,rd,rx
231 + .endm
232 +-
233 +-/*
234 +- * Storage for the state maintained by the macros above.
235 +- *
236 +- * In the kernel proper, this data is located in arch/arm/mach-tegra/tegra.c.
237 +- * That's because this header is included from multiple files, and we only
238 +- * want a single copy of the data. In particular, the UART probing code above
239 +- * assumes it's running using physical addresses. This is true when this file
240 +- * is included from head.o, but not when included from debug.o. So we need
241 +- * to share the probe results between the two copies, rather than having
242 +- * to re-run the probing again later.
243 +- *
244 +- * In the decompressor, we put the symbol/storage right here, since common.c
245 +- * isn't included in the decompressor build. This symbol gets put in .text
246 +- * even though it's really data, since .data is discarded from the
247 +- * decompressor. Luckily, .text is writeable in the decompressor, unless
248 +- * CONFIG_ZBOOT_ROM. That dependency is handled in arch/arm/Kconfig.debug.
249 +- */
250 +-#if defined(ZIMAGE)
251 +-tegra_uart_config:
252 +- /* Debug UART initialization required */
253 +- .word 1
254 +- /* Debug UART physical address */
255 +- .word 0
256 +- /* Debug UART virtual address */
257 +- .word 0
258 +-#endif
259 +diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
260 +index 416462e3f5d63..f9713dc561cf7 100644
261 +--- a/arch/arm/mach-footbridge/dc21285.c
262 ++++ b/arch/arm/mach-footbridge/dc21285.c
263 +@@ -65,15 +65,15 @@ dc21285_read_config(struct pci_bus *bus, unsigned int devfn, int where,
264 + if (addr)
265 + switch (size) {
266 + case 1:
267 +- asm("ldrb %0, [%1, %2]"
268 ++ asm volatile("ldrb %0, [%1, %2]"
269 + : "=r" (v) : "r" (addr), "r" (where) : "cc");
270 + break;
271 + case 2:
272 +- asm("ldrh %0, [%1, %2]"
273 ++ asm volatile("ldrh %0, [%1, %2]"
274 + : "=r" (v) : "r" (addr), "r" (where) : "cc");
275 + break;
276 + case 4:
277 +- asm("ldr %0, [%1, %2]"
278 ++ asm volatile("ldr %0, [%1, %2]"
279 + : "=r" (v) : "r" (addr), "r" (where) : "cc");
280 + break;
281 + }
282 +@@ -99,17 +99,17 @@ dc21285_write_config(struct pci_bus *bus, unsigned int devfn, int where,
283 + if (addr)
284 + switch (size) {
285 + case 1:
286 +- asm("strb %0, [%1, %2]"
287 ++ asm volatile("strb %0, [%1, %2]"
288 + : : "r" (value), "r" (addr), "r" (where)
289 + : "cc");
290 + break;
291 + case 2:
292 +- asm("strh %0, [%1, %2]"
293 ++ asm volatile("strh %0, [%1, %2]"
294 + : : "r" (value), "r" (addr), "r" (where)
295 + : "cc");
296 + break;
297 + case 4:
298 +- asm("str %0, [%1, %2]"
299 ++ asm volatile("str %0, [%1, %2]"
300 + : : "r" (value), "r" (addr), "r" (where)
301 + : "cc");
302 + break;
303 +diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
304 +index a720259099edf..0a4c9b0b13b0c 100644
305 +--- a/arch/arm/mach-omap1/board-osk.c
306 ++++ b/arch/arm/mach-omap1/board-osk.c
307 +@@ -203,6 +203,8 @@ static int osk_tps_setup(struct i2c_client *client, void *context)
308 + */
309 + gpio_request(OSK_TPS_GPIO_USB_PWR_EN, "n_vbus_en");
310 + gpio_direction_output(OSK_TPS_GPIO_USB_PWR_EN, 1);
311 ++ /* Free the GPIO again as the driver will request it */
312 ++ gpio_free(OSK_TPS_GPIO_USB_PWR_EN);
313 +
314 + /* Set GPIO 2 high so LED D3 is off by default */
315 + tps65010_set_gpio_out_value(GPIO2, HIGH);
316 +diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
317 +index 8514fe6a275a3..a6127002573bd 100644
318 +--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
319 ++++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
320 +@@ -2384,7 +2384,7 @@
321 + interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
322 + dr_mode = "host";
323 + snps,dis_u2_susphy_quirk;
324 +- snps,quirk-frame-length-adjustment;
325 ++ snps,quirk-frame-length-adjustment = <0x20>;
326 + snps,parkmode-disable-ss-quirk;
327 + };
328 + };
329 +diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts
330 +index cf5a98f0e47c8..a712273c905af 100644
331 +--- a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts
332 ++++ b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts
333 +@@ -52,7 +52,7 @@
334 + regulator-min-microvolt = <3300000>;
335 + regulator-max-microvolt = <3300000>;
336 +
337 +- gpio = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_HIGH>;
338 ++ gpio = <&gpio_ao GPIOAO_3 GPIO_OPEN_DRAIN>;
339 + enable-active-high;
340 + regulator-always-on;
341 + };
342 +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
343 +index 1fa39bacff4b3..0b4545012d43e 100644
344 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
345 ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
346 +@@ -385,7 +385,7 @@
347 +
348 + dcfg: dcfg@1ee0000 {
349 + compatible = "fsl,ls1046a-dcfg", "syscon";
350 +- reg = <0x0 0x1ee0000 0x0 0x10000>;
351 ++ reg = <0x0 0x1ee0000 0x0 0x1000>;
352 + big-endian;
353 + };
354 +
355 +diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
356 +index 76a8c996d497f..d70aae77a6e84 100644
357 +--- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
358 ++++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
359 +@@ -263,6 +263,8 @@
360 + &i2c3 {
361 + status = "okay";
362 + clock-frequency = <400000>;
363 ++ /* Overwrite pinctrl-0 from sdm845.dtsi */
364 ++ pinctrl-0 = <&qup_i2c3_default &i2c3_hid_active>;
365 +
366 + tsel: hid@15 {
367 + compatible = "hid-over-i2c";
368 +@@ -270,9 +272,6 @@
369 + hid-descr-addr = <0x1>;
370 +
371 + interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>;
372 +-
373 +- pinctrl-names = "default";
374 +- pinctrl-0 = <&i2c3_hid_active>;
375 + };
376 +
377 + tsc2: hid@2c {
378 +@@ -281,11 +280,6 @@
379 + hid-descr-addr = <0x20>;
380 +
381 + interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>;
382 +-
383 +- pinctrl-names = "default";
384 +- pinctrl-0 = <&i2c3_hid_active>;
385 +-
386 +- status = "disabled";
387 + };
388 + };
389 +
390 +diff --git a/arch/arm64/boot/dts/rockchip/px30.dtsi b/arch/arm64/boot/dts/rockchip/px30.dtsi
391 +index 2695ea8cda142..64193292d26c3 100644
392 +--- a/arch/arm64/boot/dts/rockchip/px30.dtsi
393 ++++ b/arch/arm64/boot/dts/rockchip/px30.dtsi
394 +@@ -1097,7 +1097,7 @@
395 + vopl_mmu: iommu@ff470f00 {
396 + compatible = "rockchip,iommu";
397 + reg = <0x0 0xff470f00 0x0 0x100>;
398 +- interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
399 ++ interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
400 + interrupt-names = "vopl_mmu";
401 + clocks = <&cru ACLK_VOPL>, <&cru HCLK_VOPL>;
402 + clock-names = "aclk", "iface";
403 +diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
404 +index 06d48338c8362..219b7507a10fb 100644
405 +--- a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
406 ++++ b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
407 +@@ -790,7 +790,6 @@
408 + &pcie0 {
409 + bus-scan-delay-ms = <1000>;
410 + ep-gpios = <&gpio2 RK_PD4 GPIO_ACTIVE_HIGH>;
411 +- max-link-speed = <2>;
412 + num-lanes = <4>;
413 + pinctrl-names = "default";
414 + pinctrl-0 = <&pcie_clkreqn_cpm>;
415 +diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
416 +index 234a21d26f674..3474286e59db7 100644
417 +--- a/arch/riscv/Kconfig
418 ++++ b/arch/riscv/Kconfig
419 +@@ -252,8 +252,10 @@ choice
420 + default MAXPHYSMEM_128GB if 64BIT && CMODEL_MEDANY
421 +
422 + config MAXPHYSMEM_1GB
423 ++ depends on 32BIT
424 + bool "1GiB"
425 + config MAXPHYSMEM_2GB
426 ++ depends on 64BIT && CMODEL_MEDLOW
427 + bool "2GiB"
428 + config MAXPHYSMEM_128GB
429 + depends on 64BIT && CMODEL_MEDANY
430 +diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c
431 +index a6c4bb6c2c012..c17b8e5ec1869 100644
432 +--- a/arch/um/drivers/virtio_uml.c
433 ++++ b/arch/um/drivers/virtio_uml.c
434 +@@ -1083,6 +1083,7 @@ static void virtio_uml_release_dev(struct device *d)
435 + }
436 +
437 + os_close_file(vu_dev->sock);
438 ++ kfree(vu_dev);
439 + }
440 +
441 + /* Platform device */
442 +@@ -1096,7 +1097,7 @@ static int virtio_uml_probe(struct platform_device *pdev)
443 + if (!pdata)
444 + return -EINVAL;
445 +
446 +- vu_dev = devm_kzalloc(&pdev->dev, sizeof(*vu_dev), GFP_KERNEL);
447 ++ vu_dev = kzalloc(sizeof(*vu_dev), GFP_KERNEL);
448 + if (!vu_dev)
449 + return -ENOMEM;
450 +
451 +diff --git a/arch/x86/Makefile b/arch/x86/Makefile
452 +index 1bf21746f4cea..6a7efa78eba22 100644
453 +--- a/arch/x86/Makefile
454 ++++ b/arch/x86/Makefile
455 +@@ -127,6 +127,9 @@ else
456 +
457 + KBUILD_CFLAGS += -mno-red-zone
458 + KBUILD_CFLAGS += -mcmodel=kernel
459 ++
460 ++ # Intel CET isn't enabled in the kernel
461 ++ KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
462 + endif
463 +
464 + ifdef CONFIG_X86_X32
465 +diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
466 +index 57af25cb44f63..51abd44ab8c2d 100644
467 +--- a/arch/x86/include/asm/apic.h
468 ++++ b/arch/x86/include/asm/apic.h
469 +@@ -197,16 +197,6 @@ static inline bool apic_needs_pit(void) { return true; }
470 + #endif /* !CONFIG_X86_LOCAL_APIC */
471 +
472 + #ifdef CONFIG_X86_X2APIC
473 +-/*
474 +- * Make previous memory operations globally visible before
475 +- * sending the IPI through x2apic wrmsr. We need a serializing instruction or
476 +- * mfence for this.
477 +- */
478 +-static inline void x2apic_wrmsr_fence(void)
479 +-{
480 +- asm volatile("mfence" : : : "memory");
481 +-}
482 +-
483 + static inline void native_apic_msr_write(u32 reg, u32 v)
484 + {
485 + if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR ||
486 +diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
487 +index 7f828fe497978..4819d5e5a3353 100644
488 +--- a/arch/x86/include/asm/barrier.h
489 ++++ b/arch/x86/include/asm/barrier.h
490 +@@ -84,4 +84,22 @@ do { \
491 +
492 + #include <asm-generic/barrier.h>
493 +
494 ++/*
495 ++ * Make previous memory operations globally visible before
496 ++ * a WRMSR.
497 ++ *
498 ++ * MFENCE makes writes visible, but only affects load/store
499 ++ * instructions. WRMSR is unfortunately not a load/store
500 ++ * instruction and is unaffected by MFENCE. The LFENCE ensures
501 ++ * that the WRMSR is not reordered.
502 ++ *
503 ++ * Most WRMSRs are full serializing instructions themselves and
504 ++ * do not require this barrier. This is only required for the
505 ++ * IA32_TSC_DEADLINE and X2APIC MSRs.
506 ++ */
507 ++static inline void weak_wrmsr_fence(void)
508 ++{
509 ++ asm volatile("mfence; lfence" : : : "memory");
510 ++}
511 ++
512 + #endif /* _ASM_X86_BARRIER_H */
513 +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
514 +index 113f6ca7b8284..f4c0514fc5108 100644
515 +--- a/arch/x86/kernel/apic/apic.c
516 ++++ b/arch/x86/kernel/apic/apic.c
517 +@@ -41,6 +41,7 @@
518 + #include <asm/perf_event.h>
519 + #include <asm/x86_init.h>
520 + #include <linux/atomic.h>
521 ++#include <asm/barrier.h>
522 + #include <asm/mpspec.h>
523 + #include <asm/i8259.h>
524 + #include <asm/proto.h>
525 +@@ -472,6 +473,9 @@ static int lapic_next_deadline(unsigned long delta,
526 + {
527 + u64 tsc;
528 +
529 ++ /* This MSR is special and need a special fence: */
530 ++ weak_wrmsr_fence();
531 ++
532 + tsc = rdtsc();
533 + wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
534 + return 0;
535 +diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
536 +index b0889c48a2ac5..7eec3c154fa24 100644
537 +--- a/arch/x86/kernel/apic/x2apic_cluster.c
538 ++++ b/arch/x86/kernel/apic/x2apic_cluster.c
539 +@@ -29,7 +29,8 @@ static void x2apic_send_IPI(int cpu, int vector)
540 + {
541 + u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
542 +
543 +- x2apic_wrmsr_fence();
544 ++ /* x2apic MSRs are special and need a special fence: */
545 ++ weak_wrmsr_fence();
546 + __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
547 + }
548 +
549 +@@ -41,7 +42,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
550 + unsigned long flags;
551 + u32 dest;
552 +
553 +- x2apic_wrmsr_fence();
554 ++ /* x2apic MSRs are special and need a special fence: */
555 ++ weak_wrmsr_fence();
556 + local_irq_save(flags);
557 +
558 + tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask);
559 +diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
560 +index e14eae6d6ea71..032a00e5d9fa6 100644
561 +--- a/arch/x86/kernel/apic/x2apic_phys.c
562 ++++ b/arch/x86/kernel/apic/x2apic_phys.c
563 +@@ -43,7 +43,8 @@ static void x2apic_send_IPI(int cpu, int vector)
564 + {
565 + u32 dest = per_cpu(x86_cpu_to_apicid, cpu);
566 +
567 +- x2apic_wrmsr_fence();
568 ++ /* x2apic MSRs are special and need a special fence: */
569 ++ weak_wrmsr_fence();
570 + __x2apic_send_IPI_dest(dest, vector, APIC_DEST_PHYSICAL);
571 + }
572 +
573 +@@ -54,7 +55,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
574 + unsigned long this_cpu;
575 + unsigned long flags;
576 +
577 +- x2apic_wrmsr_fence();
578 ++ /* x2apic MSRs are special and need a special fence: */
579 ++ weak_wrmsr_fence();
580 +
581 + local_irq_save(flags);
582 +
583 +@@ -125,7 +127,8 @@ void __x2apic_send_IPI_shorthand(int vector, u32 which)
584 + {
585 + unsigned long cfg = __prepare_ICR(which, vector, 0);
586 +
587 +- x2apic_wrmsr_fence();
588 ++ /* x2apic MSRs are special and need a special fence: */
589 ++ weak_wrmsr_fence();
590 + native_x2apic_icr_write(cfg, 0);
591 + }
592 +
593 +diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
594 +index 03aa33b581658..668a4a6533d92 100644
595 +--- a/arch/x86/kernel/hw_breakpoint.c
596 ++++ b/arch/x86/kernel/hw_breakpoint.c
597 +@@ -269,6 +269,20 @@ static inline bool within_cpu_entry(unsigned long addr, unsigned long end)
598 + CPU_ENTRY_AREA_TOTAL_SIZE))
599 + return true;
600 +
601 ++ /*
602 ++ * When FSGSBASE is enabled, paranoid_entry() fetches the per-CPU
603 ++ * GSBASE value via __per_cpu_offset or pcpu_unit_offsets.
604 ++ */
605 ++#ifdef CONFIG_SMP
606 ++ if (within_area(addr, end, (unsigned long)__per_cpu_offset,
607 ++ sizeof(unsigned long) * nr_cpu_ids))
608 ++ return true;
609 ++#else
610 ++ if (within_area(addr, end, (unsigned long)&pcpu_unit_offsets,
611 ++ sizeof(pcpu_unit_offsets)))
612 ++ return true;
613 ++#endif
614 ++
615 + for_each_possible_cpu(cpu) {
616 + /* The original rw GDT is being used after load_direct_gdt() */
617 + if (within_area(addr, end, (unsigned long)get_cpu_gdt_rw(cpu),
618 +@@ -293,6 +307,14 @@ static inline bool within_cpu_entry(unsigned long addr, unsigned long end)
619 + (unsigned long)&per_cpu(cpu_tlbstate, cpu),
620 + sizeof(struct tlb_state)))
621 + return true;
622 ++
623 ++ /*
624 ++ * When in guest (X86_FEATURE_HYPERVISOR), local_db_save()
625 ++ * will read per-cpu cpu_dr7 before clear dr7 register.
626 ++ */
627 ++ if (within_area(addr, end, (unsigned long)&per_cpu(cpu_dr7, cpu),
628 ++ sizeof(cpu_dr7)))
629 ++ return true;
630 + }
631 +
632 + return false;
633 +@@ -491,15 +513,12 @@ static int hw_breakpoint_handler(struct die_args *args)
634 + struct perf_event *bp;
635 + unsigned long *dr6_p;
636 + unsigned long dr6;
637 ++ bool bpx;
638 +
639 + /* The DR6 value is pointed by args->err */
640 + dr6_p = (unsigned long *)ERR_PTR(args->err);
641 + dr6 = *dr6_p;
642 +
643 +- /* If it's a single step, TRAP bits are random */
644 +- if (dr6 & DR_STEP)
645 +- return NOTIFY_DONE;
646 +-
647 + /* Do an early return if no trap bits are set in DR6 */
648 + if ((dr6 & DR_TRAP_BITS) == 0)
649 + return NOTIFY_DONE;
650 +@@ -509,28 +528,29 @@ static int hw_breakpoint_handler(struct die_args *args)
651 + if (likely(!(dr6 & (DR_TRAP0 << i))))
652 + continue;
653 +
654 ++ bp = this_cpu_read(bp_per_reg[i]);
655 ++ if (!bp)
656 ++ continue;
657 ++
658 ++ bpx = bp->hw.info.type == X86_BREAKPOINT_EXECUTE;
659 ++
660 + /*
661 +- * The counter may be concurrently released but that can only
662 +- * occur from a call_rcu() path. We can then safely fetch
663 +- * the breakpoint, use its callback, touch its counter
664 +- * while we are in an rcu_read_lock() path.
665 ++ * TF and data breakpoints are traps and can be merged, however
666 ++ * instruction breakpoints are faults and will be raised
667 ++ * separately.
668 ++ *
669 ++ * However DR6 can indicate both TF and instruction
670 ++ * breakpoints. In that case take TF as that has precedence and
671 ++ * delay the instruction breakpoint for the next exception.
672 + */
673 +- rcu_read_lock();
674 ++ if (bpx && (dr6 & DR_STEP))
675 ++ continue;
676 +
677 +- bp = this_cpu_read(bp_per_reg[i]);
678 + /*
679 + * Reset the 'i'th TRAP bit in dr6 to denote completion of
680 + * exception handling
681 + */
682 + (*dr6_p) &= ~(DR_TRAP0 << i);
683 +- /*
684 +- * bp can be NULL due to lazy debug register switching
685 +- * or due to concurrent perf counter removing.
686 +- */
687 +- if (!bp) {
688 +- rcu_read_unlock();
689 +- break;
690 +- }
691 +
692 + perf_bp_event(bp, args->regs);
693 +
694 +@@ -538,11 +558,10 @@ static int hw_breakpoint_handler(struct die_args *args)
695 + * Set up resume flag to avoid breakpoint recursion when
696 + * returning back to origin.
697 + */
698 +- if (bp->hw.info.type == X86_BREAKPOINT_EXECUTE)
699 ++ if (bpx)
700 + args->regs->flags |= X86_EFLAGS_RF;
701 +-
702 +- rcu_read_unlock();
703 + }
704 ++
705 + /*
706 + * Further processing in do_debug() is needed for a) user-space
707 + * breakpoints (to generate signals) and b) when the system has
708 +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
709 +index 83637a2ff6052..62157b1000f08 100644
710 +--- a/arch/x86/kvm/cpuid.c
711 ++++ b/arch/x86/kvm/cpuid.c
712 +@@ -320,7 +320,7 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
713 + if (cpuid->nent < vcpu->arch.cpuid_nent)
714 + goto out;
715 + r = -EFAULT;
716 +- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
717 ++ if (copy_to_user(entries, vcpu->arch.cpuid_entries,
718 + vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
719 + goto out;
720 + return 0;
721 +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
722 +index 56cae1ff9e3fe..66a08322988f2 100644
723 +--- a/arch/x86/kvm/emulate.c
724 ++++ b/arch/x86/kvm/emulate.c
725 +@@ -2879,6 +2879,8 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
726 + ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
727 + *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
728 + (u32)msr_data;
729 ++ if (efer & EFER_LMA)
730 ++ ctxt->mode = X86EMUL_MODE_PROT64;
731 +
732 + return X86EMUL_CONTINUE;
733 + }
734 +diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
735 +index b9265a585ea3c..c842d17240ccb 100644
736 +--- a/arch/x86/kvm/mmu/tdp_mmu.c
737 ++++ b/arch/x86/kvm/mmu/tdp_mmu.c
738 +@@ -1037,8 +1037,8 @@ bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot)
739 + }
740 +
741 + /*
742 +- * Clear non-leaf entries (and free associated page tables) which could
743 +- * be replaced by large mappings, for GFNs within the slot.
744 ++ * Clear leaf entries which could be replaced by large mappings, for
745 ++ * GFNs within the slot.
746 + */
747 + static void zap_collapsible_spte_range(struct kvm *kvm,
748 + struct kvm_mmu_page *root,
749 +@@ -1050,7 +1050,7 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
750 +
751 + tdp_root_for_each_pte(iter, root, start, end) {
752 + if (!is_shadow_present_pte(iter.old_spte) ||
753 +- is_last_spte(iter.old_spte, iter.level))
754 ++ !is_last_spte(iter.old_spte, iter.level))
755 + continue;
756 +
757 + pfn = spte_to_pfn(iter.old_spte);
758 +diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
759 +index 5c9630c3f6ba1..e3e04988fdabe 100644
760 +--- a/arch/x86/kvm/svm/sev.c
761 ++++ b/arch/x86/kvm/svm/sev.c
762 +@@ -320,6 +320,8 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
763 + unsigned long first, last;
764 + int ret;
765 +
766 ++ lockdep_assert_held(&kvm->lock);
767 ++
768 + if (ulen == 0 || uaddr + ulen < uaddr)
769 + return ERR_PTR(-EINVAL);
770 +
771 +@@ -1001,12 +1003,20 @@ int svm_register_enc_region(struct kvm *kvm,
772 + if (!region)
773 + return -ENOMEM;
774 +
775 ++ mutex_lock(&kvm->lock);
776 + region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
777 + if (IS_ERR(region->pages)) {
778 + ret = PTR_ERR(region->pages);
779 ++ mutex_unlock(&kvm->lock);
780 + goto e_free;
781 + }
782 +
783 ++ region->uaddr = range->addr;
784 ++ region->size = range->size;
785 ++
786 ++ list_add_tail(&region->list, &sev->regions_list);
787 ++ mutex_unlock(&kvm->lock);
788 ++
789 + /*
790 + * The guest may change the memory encryption attribute from C=0 -> C=1
791 + * or vice versa for this memory range. Lets make sure caches are
792 +@@ -1015,13 +1025,6 @@ int svm_register_enc_region(struct kvm *kvm,
793 + */
794 + sev_clflush_pages(region->pages, region->npages);
795 +
796 +- region->uaddr = range->addr;
797 +- region->size = range->size;
798 +-
799 +- mutex_lock(&kvm->lock);
800 +- list_add_tail(&region->list, &sev->regions_list);
801 +- mutex_unlock(&kvm->lock);
802 +-
803 + return ret;
804 +
805 + e_free:
806 +diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
807 +index 94b0cb8330451..f4ae3871e412a 100644
808 +--- a/arch/x86/kvm/svm/svm.c
809 ++++ b/arch/x86/kvm/svm/svm.c
810 +@@ -438,6 +438,11 @@ static int has_svm(void)
811 + return 0;
812 + }
813 +
814 ++ if (sev_active()) {
815 ++ pr_info("KVM is unsupported when running as an SEV guest\n");
816 ++ return 0;
817 ++ }
818 ++
819 + return 1;
820 + }
821 +
822 +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
823 +index c01aac2bac37c..82af43e14b09c 100644
824 +--- a/arch/x86/kvm/vmx/vmx.c
825 ++++ b/arch/x86/kvm/vmx/vmx.c
826 +@@ -6874,11 +6874,20 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
827 + switch (index) {
828 + case MSR_IA32_TSX_CTRL:
829 + /*
830 +- * No need to pass TSX_CTRL_CPUID_CLEAR through, so
831 +- * let's avoid changing CPUID bits under the host
832 +- * kernel's feet.
833 ++ * TSX_CTRL_CPUID_CLEAR is handled in the CPUID
834 ++ * interception. Keep the host value unchanged to avoid
835 ++ * changing CPUID bits under the host kernel's feet.
836 ++ *
837 ++ * hle=0, rtm=0, tsx_ctrl=1 can be found with some
838 ++ * combinations of new kernel and old userspace. If
839 ++ * those guests run on a tsx=off host, do allow guests
840 ++ * to use TSX_CTRL, but do not change the value on the
841 ++ * host so that TSX remains always disabled.
842 + */
843 +- vmx->guest_uret_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
844 ++ if (boot_cpu_has(X86_FEATURE_RTM))
845 ++ vmx->guest_uret_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
846 ++ else
847 ++ vmx->guest_uret_msrs[j].mask = 0;
848 + break;
849 + default:
850 + vmx->guest_uret_msrs[j].mask = -1ull;
851 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
852 +index 0a302685e4d62..18a315bbcb79e 100644
853 +--- a/arch/x86/kvm/x86.c
854 ++++ b/arch/x86/kvm/x86.c
855 +@@ -1376,16 +1376,24 @@ static u64 kvm_get_arch_capabilities(void)
856 + if (!boot_cpu_has_bug(X86_BUG_MDS))
857 + data |= ARCH_CAP_MDS_NO;
858 +
859 +- /*
860 +- * On TAA affected systems:
861 +- * - nothing to do if TSX is disabled on the host.
862 +- * - we emulate TSX_CTRL if present on the host.
863 +- * This lets the guest use VERW to clear CPU buffers.
864 +- */
865 +- if (!boot_cpu_has(X86_FEATURE_RTM))
866 +- data &= ~(ARCH_CAP_TAA_NO | ARCH_CAP_TSX_CTRL_MSR);
867 +- else if (!boot_cpu_has_bug(X86_BUG_TAA))
868 ++ if (!boot_cpu_has(X86_FEATURE_RTM)) {
869 ++ /*
870 ++ * If RTM=0 because the kernel has disabled TSX, the host might
871 ++ * have TAA_NO or TSX_CTRL. Clear TAA_NO (the guest sees RTM=0
872 ++ * and therefore knows that there cannot be TAA) but keep
873 ++ * TSX_CTRL: some buggy userspaces leave it set on tsx=on hosts,
874 ++ * and we want to allow migrating those guests to tsx=off hosts.
875 ++ */
876 ++ data &= ~ARCH_CAP_TAA_NO;
877 ++ } else if (!boot_cpu_has_bug(X86_BUG_TAA)) {
878 + data |= ARCH_CAP_TAA_NO;
879 ++ } else {
880 ++ /*
881 ++ * Nothing to do here; we emulate TSX_CTRL if present on the
882 ++ * host so the guest can choose between disabling TSX or
883 ++ * using VERW to clear CPU buffers.
884 ++ */
885 ++ }
886 +
887 + return data;
888 + }
889 +@@ -9907,6 +9915,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
890 + fx_init(vcpu);
891 +
892 + vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
893 ++ vcpu->arch.cr3_lm_rsvd_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63);
894 +
895 + vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;
896 +
897 +diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
898 +index bc0833713be95..f80d10d39cf6d 100644
899 +--- a/arch/x86/mm/mem_encrypt.c
900 ++++ b/arch/x86/mm/mem_encrypt.c
901 +@@ -351,6 +351,7 @@ bool sev_active(void)
902 + {
903 + return sev_status & MSR_AMD64_SEV_ENABLED;
904 + }
905 ++EXPORT_SYMBOL_GPL(sev_active);
906 +
907 + /* Needs to be called from non-instrumentable code */
908 + bool noinstr sev_es_active(void)
909 +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
910 +index 4ad3c4b276dcf..7e17d4edccb12 100644
911 +--- a/drivers/gpio/gpiolib.c
912 ++++ b/drivers/gpio/gpiolib.c
913 +@@ -602,7 +602,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
914 + ret = gdev->id;
915 + goto err_free_gdev;
916 + }
917 +- dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id);
918 ++
919 ++ ret = dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id);
920 ++ if (ret)
921 ++ goto err_free_ida;
922 ++
923 + device_initialize(&gdev->dev);
924 + dev_set_drvdata(&gdev->dev, gdev);
925 + if (gc->parent && gc->parent->driver)
926 +@@ -616,7 +620,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
927 + gdev->descs = kcalloc(gc->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
928 + if (!gdev->descs) {
929 + ret = -ENOMEM;
930 +- goto err_free_ida;
931 ++ goto err_free_dev_name;
932 + }
933 +
934 + if (gc->ngpio == 0) {
935 +@@ -767,6 +771,8 @@ err_free_label:
936 + kfree_const(gdev->label);
937 + err_free_descs:
938 + kfree(gdev->descs);
939 ++err_free_dev_name:
940 ++ kfree(dev_name(&gdev->dev));
941 + err_free_ida:
942 + ida_free(&gpio_ida, gdev->id);
943 + err_free_gdev:
944 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
945 +index 0f7749e9424d4..580880212e551 100644
946 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
947 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
948 +@@ -2278,8 +2278,6 @@ void amdgpu_dm_update_connector_after_detect(
949 +
950 + drm_connector_update_edid_property(connector,
951 + aconnector->edid);
952 +- drm_add_edid_modes(connector, aconnector->edid);
953 +-
954 + if (aconnector->dc_link->aux_mode)
955 + drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
956 + aconnector->edid);
957 +diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
958 +index e875425336406..7749b0ceabba9 100644
959 +--- a/drivers/gpu/drm/drm_dp_mst_topology.c
960 ++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
961 +@@ -3629,14 +3629,26 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
962 + return 0;
963 + }
964 +
965 +-static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count)
966 ++/**
967 ++ * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link
968 ++ * @link_rate: link rate in 10kbits/s units
969 ++ * @link_lane_count: lane count
970 ++ *
971 ++ * Calculate the total bandwidth of a MultiStream Transport link. The returned
972 ++ * value is in units of PBNs/(timeslots/1 MTP). This value can be used to
973 ++ * convert the number of PBNs required for a given stream to the number of
974 ++ * timeslots this stream requires in each MTP.
975 ++ */
976 ++int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count)
977 + {
978 +- if (dp_link_bw == 0 || dp_link_count == 0)
979 +- DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
980 +- dp_link_bw, dp_link_count);
981 ++ if (link_rate == 0 || link_lane_count == 0)
982 ++ DRM_DEBUG_KMS("invalid link rate/lane count: (%d / %d)\n",
983 ++ link_rate, link_lane_count);
984 +
985 +- return dp_link_bw * dp_link_count / 2;
986 ++ /* See DP v2.0 2.6.4.2, VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
987 ++ return link_rate * link_lane_count / 54000;
988 + }
989 ++EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);
990 +
991 + /**
992 + * drm_dp_read_mst_cap() - check whether or not a sink supports MST
993 +@@ -3692,7 +3704,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
994 + goto out_unlock;
995 + }
996 +
997 +- mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1],
998 ++ mgr->pbn_div = drm_dp_get_vc_payload_bw(drm_dp_bw_code_to_link_rate(mgr->dpcd[1]),
999 + mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
1000 + if (mgr->pbn_div == 0) {
1001 + ret = -EINVAL;
1002 +diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
1003 +index 3f2bbd9370a86..40dfb4d0ffbec 100644
1004 +--- a/drivers/gpu/drm/i915/display/intel_ddi.c
1005 ++++ b/drivers/gpu/drm/i915/display/intel_ddi.c
1006 +@@ -3274,6 +3274,23 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
1007 + intel_de_posting_read(dev_priv, intel_dp->regs.dp_tp_ctl);
1008 + }
1009 +
1010 ++static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
1011 ++ const struct intel_crtc_state *crtc_state)
1012 ++{
1013 ++ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1014 ++ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1015 ++ enum phy phy = intel_port_to_phy(i915, encoder->port);
1016 ++
1017 ++ if (intel_phy_is_combo(i915, phy)) {
1018 ++ bool lane_reversal =
1019 ++ dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
1020 ++
1021 ++ intel_combo_phy_power_up_lanes(i915, phy, false,
1022 ++ crtc_state->lane_count,
1023 ++ lane_reversal);
1024 ++ }
1025 ++}
1026 ++
1027 + static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
1028 + struct intel_encoder *encoder,
1029 + const struct intel_crtc_state *crtc_state,
1030 +@@ -3367,14 +3384,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
1031 + * 7.f Combo PHY: Configure PORT_CL_DW10 Static Power Down to power up
1032 + * the used lanes of the DDI.
1033 + */
1034 +- if (intel_phy_is_combo(dev_priv, phy)) {
1035 +- bool lane_reversal =
1036 +- dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
1037 +-
1038 +- intel_combo_phy_power_up_lanes(dev_priv, phy, false,
1039 +- crtc_state->lane_count,
1040 +- lane_reversal);
1041 +- }
1042 ++ intel_ddi_power_up_lanes(encoder, crtc_state);
1043 +
1044 + /*
1045 + * 7.g Configure and enable DDI_BUF_CTL
1046 +@@ -3458,14 +3468,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
1047 + else
1048 + intel_prepare_dp_ddi_buffers(encoder, crtc_state);
1049 +
1050 +- if (intel_phy_is_combo(dev_priv, phy)) {
1051 +- bool lane_reversal =
1052 +- dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
1053 +-
1054 +- intel_combo_phy_power_up_lanes(dev_priv, phy, false,
1055 +- crtc_state->lane_count,
1056 +- lane_reversal);
1057 +- }
1058 ++ intel_ddi_power_up_lanes(encoder, crtc_state);
1059 +
1060 + intel_ddi_init_dp_buf_reg(encoder);
1061 + if (!is_mst)
1062 +@@ -3933,6 +3936,8 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
1063 + intel_de_write(dev_priv, reg, val);
1064 + }
1065 +
1066 ++ intel_ddi_power_up_lanes(encoder, crtc_state);
1067 ++
1068 + /* In HDMI/DVI mode, the port width, and swing/emphasis values
1069 + * are ignored so nothing special needs to be done besides
1070 + * enabling the port.
1071 +diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
1072 +index aabf09f89cada..45c2556d63955 100644
1073 +--- a/drivers/gpu/drm/i915/display/intel_display.c
1074 ++++ b/drivers/gpu/drm/i915/display/intel_display.c
1075 +@@ -2294,7 +2294,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
1076 + */
1077 + ret = i915_vma_pin_fence(vma);
1078 + if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
1079 +- i915_gem_object_unpin_from_display_plane(vma);
1080 ++ i915_vma_unpin(vma);
1081 + vma = ERR_PTR(ret);
1082 + goto err;
1083 + }
1084 +@@ -2312,12 +2312,9 @@ err:
1085 +
1086 + void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
1087 + {
1088 +- i915_gem_object_lock(vma->obj, NULL);
1089 + if (flags & PLANE_HAS_FENCE)
1090 + i915_vma_unpin_fence(vma);
1091 +- i915_gem_object_unpin_from_display_plane(vma);
1092 +- i915_gem_object_unlock(vma->obj);
1093 +-
1094 ++ i915_vma_unpin(vma);
1095 + i915_vma_put(vma);
1096 + }
1097 +
1098 +@@ -4883,6 +4880,8 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
1099 + plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
1100 + } else if (fb->format->is_yuv) {
1101 + plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
1102 ++ if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
1103 ++ plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
1104 + }
1105 +
1106 + return plane_color_ctl;
1107 +diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
1108 +index 5d745d9b99b2a..ecaa538b2d357 100644
1109 +--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
1110 ++++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
1111 +@@ -68,7 +68,9 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
1112 +
1113 + slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
1114 + connector->port,
1115 +- crtc_state->pbn, 0);
1116 ++ crtc_state->pbn,
1117 ++ drm_dp_get_vc_payload_bw(crtc_state->port_clock,
1118 ++ crtc_state->lane_count));
1119 + if (slots == -EDEADLK)
1120 + return slots;
1121 + if (slots >= 0)
1122 +diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
1123 +index 52b4f6193b4ce..0095c8cac9b40 100644
1124 +--- a/drivers/gpu/drm/i915/display/intel_overlay.c
1125 ++++ b/drivers/gpu/drm/i915/display/intel_overlay.c
1126 +@@ -359,7 +359,7 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
1127 + intel_frontbuffer_flip_complete(overlay->i915,
1128 + INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
1129 +
1130 +- i915_gem_object_unpin_from_display_plane(vma);
1131 ++ i915_vma_unpin(vma);
1132 + i915_vma_put(vma);
1133 + }
1134 +
1135 +@@ -860,7 +860,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
1136 + return 0;
1137 +
1138 + out_unpin:
1139 +- i915_gem_object_unpin_from_display_plane(vma);
1140 ++ i915_vma_unpin(vma);
1141 + out_pin_section:
1142 + atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
1143 +
1144 +diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
1145 +index 63040cb0d4e10..12f7128b777f6 100644
1146 +--- a/drivers/gpu/drm/i915/display/intel_sprite.c
1147 ++++ b/drivers/gpu/drm/i915/display/intel_sprite.c
1148 +@@ -469,13 +469,19 @@ skl_program_scaler(struct intel_plane *plane,
1149 +
1150 + /* Preoffset values for YUV to RGB Conversion */
1151 + #define PREOFF_YUV_TO_RGB_HI 0x1800
1152 +-#define PREOFF_YUV_TO_RGB_ME 0x1F00
1153 ++#define PREOFF_YUV_TO_RGB_ME 0x0000
1154 + #define PREOFF_YUV_TO_RGB_LO 0x1800
1155 +
1156 + #define ROFF(x) (((x) & 0xffff) << 16)
1157 + #define GOFF(x) (((x) & 0xffff) << 0)
1158 + #define BOFF(x) (((x) & 0xffff) << 16)
1159 +
1160 ++/*
1161 ++ * Programs the input color space conversion stage for ICL HDR planes.
1162 ++ * Note that it is assumed that this stage always happens after YUV
1163 ++ * range correction. Thus, the input to this stage is assumed to be
1164 ++ * in full-range YCbCr.
1165 ++ */
1166 + static void
1167 + icl_program_input_csc(struct intel_plane *plane,
1168 + const struct intel_crtc_state *crtc_state,
1169 +@@ -523,52 +529,7 @@ icl_program_input_csc(struct intel_plane *plane,
1170 + 0x0, 0x7800, 0x7F10,
1171 + },
1172 + };
1173 +-
1174 +- /* Matrix for Limited Range to Full Range Conversion */
1175 +- static const u16 input_csc_matrix_lr[][9] = {
1176 +- /*
1177 +- * BT.601 Limted range YCbCr -> full range RGB
1178 +- * The matrix required is :
1179 +- * [1.164384, 0.000, 1.596027,
1180 +- * 1.164384, -0.39175, -0.812813,
1181 +- * 1.164384, 2.017232, 0.0000]
1182 +- */
1183 +- [DRM_COLOR_YCBCR_BT601] = {
1184 +- 0x7CC8, 0x7950, 0x0,
1185 +- 0x8D00, 0x7950, 0x9C88,
1186 +- 0x0, 0x7950, 0x6810,
1187 +- },
1188 +- /*
1189 +- * BT.709 Limited range YCbCr -> full range RGB
1190 +- * The matrix required is :
1191 +- * [1.164384, 0.000, 1.792741,
1192 +- * 1.164384, -0.213249, -0.532909,
1193 +- * 1.164384, 2.112402, 0.0000]
1194 +- */
1195 +- [DRM_COLOR_YCBCR_BT709] = {
1196 +- 0x7E58, 0x7950, 0x0,
1197 +- 0x8888, 0x7950, 0xADA8,
1198 +- 0x0, 0x7950, 0x6870,
1199 +- },
1200 +- /*
1201 +- * BT.2020 Limited range YCbCr -> full range RGB
1202 +- * The matrix required is :
1203 +- * [1.164, 0.000, 1.678,
1204 +- * 1.164, -0.1873, -0.6504,
1205 +- * 1.164, 2.1417, 0.0000]
1206 +- */
1207 +- [DRM_COLOR_YCBCR_BT2020] = {
1208 +- 0x7D70, 0x7950, 0x0,
1209 +- 0x8A68, 0x7950, 0xAC00,
1210 +- 0x0, 0x7950, 0x6890,
1211 +- },
1212 +- };
1213 +- const u16 *csc;
1214 +-
1215 +- if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
1216 +- csc = input_csc_matrix[plane_state->hw.color_encoding];
1217 +- else
1218 +- csc = input_csc_matrix_lr[plane_state->hw.color_encoding];
1219 ++ const u16 *csc = input_csc_matrix[plane_state->hw.color_encoding];
1220 +
1221 + intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0),
1222 + ROFF(csc[0]) | GOFF(csc[1]));
1223 +@@ -585,14 +546,8 @@ icl_program_input_csc(struct intel_plane *plane,
1224 +
1225 + intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
1226 + PREOFF_YUV_TO_RGB_HI);
1227 +- if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
1228 +- intel_de_write_fw(dev_priv,
1229 +- PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
1230 +- 0);
1231 +- else
1232 +- intel_de_write_fw(dev_priv,
1233 +- PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
1234 +- PREOFF_YUV_TO_RGB_ME);
1235 ++ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
1236 ++ PREOFF_YUV_TO_RGB_ME);
1237 + intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
1238 + PREOFF_YUV_TO_RGB_LO);
1239 + intel_de_write_fw(dev_priv,
1240 +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
1241 +index fcce6909f2017..3d435bfff7649 100644
1242 +--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
1243 ++++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
1244 +@@ -387,48 +387,6 @@ err:
1245 + return vma;
1246 + }
1247 +
1248 +-static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
1249 +-{
1250 +- struct drm_i915_private *i915 = to_i915(obj->base.dev);
1251 +- struct i915_vma *vma;
1252 +-
1253 +- if (list_empty(&obj->vma.list))
1254 +- return;
1255 +-
1256 +- mutex_lock(&i915->ggtt.vm.mutex);
1257 +- spin_lock(&obj->vma.lock);
1258 +- for_each_ggtt_vma(vma, obj) {
1259 +- if (!drm_mm_node_allocated(&vma->node))
1260 +- continue;
1261 +-
1262 +- GEM_BUG_ON(vma->vm != &i915->ggtt.vm);
1263 +- list_move_tail(&vma->vm_link, &vma->vm->bound_list);
1264 +- }
1265 +- spin_unlock(&obj->vma.lock);
1266 +- mutex_unlock(&i915->ggtt.vm.mutex);
1267 +-
1268 +- if (i915_gem_object_is_shrinkable(obj)) {
1269 +- unsigned long flags;
1270 +-
1271 +- spin_lock_irqsave(&i915->mm.obj_lock, flags);
1272 +-
1273 +- if (obj->mm.madv == I915_MADV_WILLNEED &&
1274 +- !atomic_read(&obj->mm.shrink_pin))
1275 +- list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
1276 +-
1277 +- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
1278 +- }
1279 +-}
1280 +-
1281 +-void
1282 +-i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
1283 +-{
1284 +- /* Bump the LRU to try and avoid premature eviction whilst flipping */
1285 +- i915_gem_object_bump_inactive_ggtt(vma->obj);
1286 +-
1287 +- i915_vma_unpin(vma);
1288 +-}
1289 +-
1290 + /**
1291 + * Moves a single object to the CPU read, and possibly write domain.
1292 + * @obj: object to act on
1293 +@@ -569,9 +527,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1294 + else
1295 + err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
1296 +
1297 +- /* And bump the LRU for this access */
1298 +- i915_gem_object_bump_inactive_ggtt(obj);
1299 +-
1300 + i915_gem_object_unlock(obj);
1301 +
1302 + if (write_domain)
1303 +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
1304 +index d46db8d8f38e4..bc48717971204 100644
1305 +--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
1306 ++++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
1307 +@@ -471,7 +471,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
1308 + u32 alignment,
1309 + const struct i915_ggtt_view *view,
1310 + unsigned int flags);
1311 +-void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
1312 +
1313 + void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
1314 + void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
1315 +diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
1316 +index 0625cbb3b4312..0040b4765a54d 100644
1317 +--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
1318 ++++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
1319 +@@ -451,10 +451,12 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq)
1320 + struct intel_context *ce = rq->context;
1321 + bool release;
1322 +
1323 +- if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
1324 ++ spin_lock(&ce->signal_lock);
1325 ++ if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
1326 ++ spin_unlock(&ce->signal_lock);
1327 + return;
1328 ++ }
1329 +
1330 +- spin_lock(&ce->signal_lock);
1331 + list_del_rcu(&rq->signal_link);
1332 + release = remove_signaling_context(rq->engine->breadcrumbs, ce);
1333 + spin_unlock(&ce->signal_lock);
1334 +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
1335 +index 8c73377ac82ca..3d004ca76b6ed 100644
1336 +--- a/drivers/input/joystick/xpad.c
1337 ++++ b/drivers/input/joystick/xpad.c
1338 +@@ -215,9 +215,17 @@ static const struct xpad_device {
1339 + { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
1340 + { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
1341 + { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
1342 +- { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
1343 ++ { 0x0e6f, 0x02a0, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
1344 ++ { 0x0e6f, 0x02a1, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
1345 ++ { 0x0e6f, 0x02a2, "PDP Wired Controller for Xbox One - Crimson Red", 0, XTYPE_XBOXONE },
1346 + { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
1347 + { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE },
1348 ++ { 0x0e6f, 0x02a7, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
1349 ++ { 0x0e6f, 0x02a8, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
1350 ++ { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
1351 ++ { 0x0e6f, 0x02ad, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
1352 ++ { 0x0e6f, 0x02b3, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
1353 ++ { 0x0e6f, 0x02b8, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
1354 + { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
1355 + { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
1356 + { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
1357 +@@ -296,6 +304,9 @@ static const struct xpad_device {
1358 + { 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 },
1359 + { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 },
1360 + { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 },
1361 ++ { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
1362 ++ { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
1363 ++ { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
1364 + { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
1365 + { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
1366 + { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
1367 +@@ -429,8 +440,12 @@ static const struct usb_device_id xpad_table[] = {
1368 + XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
1369 + XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
1370 + XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
1371 ++ XPAD_XBOX360_VENDOR(0x20d6), /* PowerA Controllers */
1372 ++ XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */
1373 + XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
1374 + XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
1375 ++ XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */
1376 ++ XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */
1377 + { }
1378 + };
1379 +
1380 +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
1381 +index 3a2dcf0805f12..c74b020796a94 100644
1382 +--- a/drivers/input/serio/i8042-x86ia64io.h
1383 ++++ b/drivers/input/serio/i8042-x86ia64io.h
1384 +@@ -219,6 +219,8 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
1385 + DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
1386 + DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
1387 + },
1388 ++ },
1389 ++ {
1390 + .matches = {
1391 + DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
1392 + DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
1393 +diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
1394 +index 6612f9e2d7e83..45113767db964 100644
1395 +--- a/drivers/input/touchscreen/goodix.c
1396 ++++ b/drivers/input/touchscreen/goodix.c
1397 +@@ -157,6 +157,7 @@ static const struct goodix_chip_id goodix_chip_ids[] = {
1398 + { .id = "5663", .data = &gt1x_chip_data },
1399 + { .id = "5688", .data = &gt1x_chip_data },
1400 + { .id = "917S", .data = &gt1x_chip_data },
1401 ++ { .id = "9286", .data = &gt1x_chip_data },
1402 +
1403 + { .id = "911", .data = &gt911_chip_data },
1404 + { .id = "9271", .data = &gt911_chip_data },
1405 +@@ -1445,6 +1446,7 @@ static const struct of_device_id goodix_of_match[] = {
1406 + { .compatible = "goodix,gt927" },
1407 + { .compatible = "goodix,gt9271" },
1408 + { .compatible = "goodix,gt928" },
1409 ++ { .compatible = "goodix,gt9286" },
1410 + { .compatible = "goodix,gt967" },
1411 + { }
1412 + };
1413 +diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
1414 +index 199cf3daec106..d8fccf048bf44 100644
1415 +--- a/drivers/input/touchscreen/ili210x.c
1416 ++++ b/drivers/input/touchscreen/ili210x.c
1417 +@@ -29,11 +29,13 @@ struct ili2xxx_chip {
1418 + void *buf, size_t len);
1419 + int (*get_touch_data)(struct i2c_client *client, u8 *data);
1420 + bool (*parse_touch_data)(const u8 *data, unsigned int finger,
1421 +- unsigned int *x, unsigned int *y);
1422 ++ unsigned int *x, unsigned int *y,
1423 ++ unsigned int *z);
1424 + bool (*continue_polling)(const u8 *data, bool touch);
1425 + unsigned int max_touches;
1426 + unsigned int resolution;
1427 + bool has_calibrate_reg;
1428 ++ bool has_pressure_reg;
1429 + };
1430 +
1431 + struct ili210x {
1432 +@@ -82,7 +84,8 @@ static int ili210x_read_touch_data(struct i2c_client *client, u8 *data)
1433 +
1434 + static bool ili210x_touchdata_to_coords(const u8 *touchdata,
1435 + unsigned int finger,
1436 +- unsigned int *x, unsigned int *y)
1437 ++ unsigned int *x, unsigned int *y,
1438 ++ unsigned int *z)
1439 + {
1440 + if (touchdata[0] & BIT(finger))
1441 + return false;
1442 +@@ -137,7 +140,8 @@ static int ili211x_read_touch_data(struct i2c_client *client, u8 *data)
1443 +
1444 + static bool ili211x_touchdata_to_coords(const u8 *touchdata,
1445 + unsigned int finger,
1446 +- unsigned int *x, unsigned int *y)
1447 ++ unsigned int *x, unsigned int *y,
1448 ++ unsigned int *z)
1449 + {
1450 + u32 data;
1451 +
1452 +@@ -169,7 +173,8 @@ static const struct ili2xxx_chip ili211x_chip = {
1453 +
1454 + static bool ili212x_touchdata_to_coords(const u8 *touchdata,
1455 + unsigned int finger,
1456 +- unsigned int *x, unsigned int *y)
1457 ++ unsigned int *x, unsigned int *y,
1458 ++ unsigned int *z)
1459 + {
1460 + u16 val;
1461 +
1462 +@@ -235,7 +240,8 @@ static int ili251x_read_touch_data(struct i2c_client *client, u8 *data)
1463 +
1464 + static bool ili251x_touchdata_to_coords(const u8 *touchdata,
1465 + unsigned int finger,
1466 +- unsigned int *x, unsigned int *y)
1467 ++ unsigned int *x, unsigned int *y,
1468 ++ unsigned int *z)
1469 + {
1470 + u16 val;
1471 +
1472 +@@ -245,6 +251,7 @@ static bool ili251x_touchdata_to_coords(const u8 *touchdata,
1473 +
1474 + *x = val & 0x3fff;
1475 + *y = get_unaligned_be16(touchdata + 1 + (finger * 5) + 2);
1476 ++ *z = touchdata[1 + (finger * 5) + 4];
1477 +
1478 + return true;
1479 + }
1480 +@@ -261,6 +268,7 @@ static const struct ili2xxx_chip ili251x_chip = {
1481 + .continue_polling = ili251x_check_continue_polling,
1482 + .max_touches = 10,
1483 + .has_calibrate_reg = true,
1484 ++ .has_pressure_reg = true,
1485 + };
1486 +
1487 + static bool ili210x_report_events(struct ili210x *priv, u8 *touchdata)
1488 +@@ -268,14 +276,16 @@ static bool ili210x_report_events(struct ili210x *priv, u8 *touchdata)
1489 + struct input_dev *input = priv->input;
1490 + int i;
1491 + bool contact = false, touch;
1492 +- unsigned int x = 0, y = 0;
1493 ++ unsigned int x = 0, y = 0, z = 0;
1494 +
1495 + for (i = 0; i < priv->chip->max_touches; i++) {
1496 +- touch = priv->chip->parse_touch_data(touchdata, i, &x, &y);
1497 ++ touch = priv->chip->parse_touch_data(touchdata, i, &x, &y, &z);
1498 +
1499 + input_mt_slot(input, i);
1500 + if (input_mt_report_slot_state(input, MT_TOOL_FINGER, touch)) {
1501 + touchscreen_report_pos(input, &priv->prop, x, y, true);
1502 ++ if (priv->chip->has_pressure_reg)
1503 ++ input_report_abs(input, ABS_MT_PRESSURE, z);
1504 + contact = true;
1505 + }
1506 + }
1507 +@@ -437,6 +447,8 @@ static int ili210x_i2c_probe(struct i2c_client *client,
1508 + max_xy = (chip->resolution ?: SZ_64K) - 1;
1509 + input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_xy, 0, 0);
1510 + input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_xy, 0, 0);
1511 ++ if (priv->chip->has_pressure_reg)
1512 ++ input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xa, 0, 0);
1513 + touchscreen_parse_properties(input, true, &priv->prop);
1514 +
1515 + error = input_mt_init_slots(input, priv->chip->max_touches,
1516 +diff --git a/drivers/md/md.c b/drivers/md/md.c
1517 +index 3be74cf3635fe..7a0a228d64bbe 100644
1518 +--- a/drivers/md/md.c
1519 ++++ b/drivers/md/md.c
1520 +@@ -639,8 +639,10 @@ static void md_submit_flush_data(struct work_struct *ws)
1521 + * could wait for this and below md_handle_request could wait for those
1522 + * bios because of suspend check
1523 + */
1524 ++ spin_lock_irq(&mddev->lock);
1525 + mddev->last_flush = mddev->start_flush;
1526 + mddev->flush_bio = NULL;
1527 ++ spin_unlock_irq(&mddev->lock);
1528 + wake_up(&mddev->sb_wait);
1529 +
1530 + if (bio->bi_iter.bi_size == 0) {
1531 +diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
1532 +index 44bea5e4aeda1..b23773583179d 100644
1533 +--- a/drivers/mmc/core/sdio_cis.c
1534 ++++ b/drivers/mmc/core/sdio_cis.c
1535 +@@ -20,6 +20,8 @@
1536 + #include "sdio_cis.h"
1537 + #include "sdio_ops.h"
1538 +
1539 ++#define SDIO_READ_CIS_TIMEOUT_MS (10 * 1000) /* 10s */
1540 ++
1541 + static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
1542 + const unsigned char *buf, unsigned size)
1543 + {
1544 +@@ -274,6 +276,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
1545 +
1546 + do {
1547 + unsigned char tpl_code, tpl_link;
1548 ++ unsigned long timeout = jiffies +
1549 ++ msecs_to_jiffies(SDIO_READ_CIS_TIMEOUT_MS);
1550 +
1551 + ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code);
1552 + if (ret)
1553 +@@ -326,6 +330,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
1554 + prev = &this->next;
1555 +
1556 + if (ret == -ENOENT) {
1557 ++ if (time_after(jiffies, timeout))
1558 ++ break;
1559 + /* warn about unknown tuples */
1560 + pr_warn_ratelimited("%s: queuing unknown"
1561 + " CIS tuple 0x%02x (%u bytes)\n",
1562 +diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
1563 +index 6301b81cf5731..9bd717ff784be 100644
1564 +--- a/drivers/mmc/host/sdhci-pltfm.h
1565 ++++ b/drivers/mmc/host/sdhci-pltfm.h
1566 +@@ -111,8 +111,13 @@ static inline void *sdhci_pltfm_priv(struct sdhci_pltfm_host *host)
1567 + return host->private;
1568 + }
1569 +
1570 ++extern const struct dev_pm_ops sdhci_pltfm_pmops;
1571 ++#ifdef CONFIG_PM_SLEEP
1572 + int sdhci_pltfm_suspend(struct device *dev);
1573 + int sdhci_pltfm_resume(struct device *dev);
1574 +-extern const struct dev_pm_ops sdhci_pltfm_pmops;
1575 ++#else
1576 ++static inline int sdhci_pltfm_suspend(struct device *dev) { return 0; }
1577 ++static inline int sdhci_pltfm_resume(struct device *dev) { return 0; }
1578 ++#endif
1579 +
1580 + #endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
1581 +diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
1582 +index 34cca0a4b31c7..87160e723dfcf 100644
1583 +--- a/drivers/net/dsa/mv88e6xxx/chip.c
1584 ++++ b/drivers/net/dsa/mv88e6xxx/chip.c
1585 +@@ -1669,7 +1669,11 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
1586 + if (!entry.portvec)
1587 + entry.state = 0;
1588 + } else {
1589 +- entry.portvec |= BIT(port);
1590 ++ if (state == MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC)
1591 ++ entry.portvec = BIT(port);
1592 ++ else
1593 ++ entry.portvec |= BIT(port);
1594 ++
1595 + entry.state = state;
1596 + }
1597 +
1598 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
1599 +index 627ce1a20473a..2f281d0f98070 100644
1600 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
1601 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
1602 +@@ -5339,11 +5339,6 @@ static int ibmvnic_remove(struct vio_dev *dev)
1603 + unsigned long flags;
1604 +
1605 + spin_lock_irqsave(&adapter->state_lock, flags);
1606 +- if (test_bit(0, &adapter->resetting)) {
1607 +- spin_unlock_irqrestore(&adapter->state_lock, flags);
1608 +- return -EBUSY;
1609 +- }
1610 +-
1611 + adapter->state = VNIC_REMOVING;
1612 + spin_unlock_irqrestore(&adapter->state_lock, flags);
1613 +
1614 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
1615 +index 2872c4dc77f07..3b269c70dcfe1 100644
1616 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
1617 ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
1618 +@@ -55,12 +55,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
1619 +
1620 + pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
1621 + pfe.severity = PF_EVENT_SEVERITY_INFO;
1622 +-
1623 +- /* Always report link is down if the VF queues aren't enabled */
1624 +- if (!vf->queues_enabled) {
1625 +- pfe.event_data.link_event.link_status = false;
1626 +- pfe.event_data.link_event.link_speed = 0;
1627 +- } else if (vf->link_forced) {
1628 ++ if (vf->link_forced) {
1629 + pfe.event_data.link_event.link_status = vf->link_up;
1630 + pfe.event_data.link_event.link_speed =
1631 + (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0);
1632 +@@ -70,7 +65,6 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
1633 + pfe.event_data.link_event.link_speed =
1634 + i40e_virtchnl_link_speed(ls->link_speed);
1635 + }
1636 +-
1637 + i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
1638 + 0, (u8 *)&pfe, sizeof(pfe), NULL);
1639 + }
1640 +@@ -2443,8 +2437,6 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
1641 + }
1642 + }
1643 +
1644 +- vf->queues_enabled = true;
1645 +-
1646 + error_param:
1647 + /* send the response to the VF */
1648 + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
1649 +@@ -2466,9 +2458,6 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
1650 + struct i40e_pf *pf = vf->pf;
1651 + i40e_status aq_ret = 0;
1652 +
1653 +- /* Immediately mark queues as disabled */
1654 +- vf->queues_enabled = false;
1655 +-
1656 + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
1657 + aq_ret = I40E_ERR_PARAM;
1658 + goto error_param;
1659 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
1660 +index 5491215d81deb..091e32c1bb46f 100644
1661 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
1662 ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
1663 +@@ -98,7 +98,6 @@ struct i40e_vf {
1664 + unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
1665 + bool link_forced;
1666 + bool link_up; /* only valid if VF link is forced */
1667 +- bool queues_enabled; /* true if the VF queues are enabled */
1668 + bool spoofchk;
1669 + u16 num_vlan;
1670 +
1671 +diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
1672 +index 831f2f09de5fb..ec8cd69d49928 100644
1673 +--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
1674 ++++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
1675 +@@ -1714,7 +1714,8 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
1676 + Asym_Pause);
1677 + }
1678 +
1679 +- status = rd32(IGC_STATUS);
1680 ++ status = pm_runtime_suspended(&adapter->pdev->dev) ?
1681 ++ 0 : rd32(IGC_STATUS);
1682 +
1683 + if (status & IGC_STATUS_LU) {
1684 + if (status & IGC_STATUS_SPEED_1000) {
1685 +diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
1686 +index 8b67d9b49a83a..7ec04e48860c6 100644
1687 +--- a/drivers/net/ethernet/intel/igc/igc_i225.c
1688 ++++ b/drivers/net/ethernet/intel/igc/igc_i225.c
1689 +@@ -219,9 +219,9 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,
1690 + u16 *data)
1691 + {
1692 + struct igc_nvm_info *nvm = &hw->nvm;
1693 ++ s32 ret_val = -IGC_ERR_NVM;
1694 + u32 attempts = 100000;
1695 + u32 i, k, eewr = 0;
1696 +- s32 ret_val = 0;
1697 +
1698 + /* A check for invalid values: offset too large, too many words,
1699 + * too many words for the offset, and not enough words.
1700 +@@ -229,7 +229,6 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,
1701 + if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) ||
1702 + words == 0) {
1703 + hw_dbg("nvm parameter(s) out of bounds\n");
1704 +- ret_val = -IGC_ERR_NVM;
1705 + goto out;
1706 + }
1707 +
1708 +diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c
1709 +index 09cd0ec7ee87d..67b8ffd21d8af 100644
1710 +--- a/drivers/net/ethernet/intel/igc/igc_mac.c
1711 ++++ b/drivers/net/ethernet/intel/igc/igc_mac.c
1712 +@@ -638,7 +638,7 @@ s32 igc_config_fc_after_link_up(struct igc_hw *hw)
1713 + }
1714 +
1715 + out:
1716 +- return 0;
1717 ++ return ret_val;
1718 + }
1719 +
1720 + /**
1721 +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
1722 +index a30eb90ba3d28..dd590086fe6a5 100644
1723 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
1724 ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
1725 +@@ -29,16 +29,16 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1726 + /* Clear entry invalidation bit */
1727 + pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1728 +
1729 +- /* Write tcam index - indirect access */
1730 +- mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1731 +- for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1732 +- mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
1733 +-
1734 + /* Write sram index - indirect access */
1735 + mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1736 + for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1737 + mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]);
1738 +
1739 ++ /* Write tcam index - indirect access */
1740 ++ mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1741 ++ for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1742 ++ mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
1743 ++
1744 + return 0;
1745 + }
1746 +
1747 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1748 +index c9b5d7f29911e..42848db8f8dd6 100644
1749 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1750 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1751 +@@ -3593,12 +3593,10 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
1752 +
1753 + err = mlx5e_safe_switch_channels(priv, &new_channels,
1754 + mlx5e_num_channels_changed_ctx, NULL);
1755 +- if (err)
1756 +- goto out;
1757 +
1758 +- priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
1759 +- new_channels.params.num_tc);
1760 + out:
1761 ++ priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
1762 ++ priv->channels.params.num_tc);
1763 + mutex_unlock(&priv->state_lock);
1764 + return err;
1765 + }
1766 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1767 +index 6628a0197b4e0..6d2ba8b84187c 100644
1768 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1769 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
1770 +@@ -1262,8 +1262,10 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1771 + mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1772 +
1773 + if (mlx5e_cqe_regb_chain(cqe))
1774 +- if (!mlx5e_tc_update_skb(cqe, skb))
1775 ++ if (!mlx5e_tc_update_skb(cqe, skb)) {
1776 ++ dev_kfree_skb_any(skb);
1777 + goto free_wqe;
1778 ++ }
1779 +
1780 + napi_gro_receive(rq->cq.napi, skb);
1781 +
1782 +@@ -1316,8 +1318,10 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1783 + if (rep->vlan && skb_vlan_tag_present(skb))
1784 + skb_vlan_pop(skb);
1785 +
1786 +- if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))
1787 ++ if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) {
1788 ++ dev_kfree_skb_any(skb);
1789 + goto free_wqe;
1790 ++ }
1791 +
1792 + napi_gro_receive(rq->cq.napi, skb);
1793 +
1794 +@@ -1371,8 +1375,10 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
1795 +
1796 + mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1797 +
1798 +- if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))
1799 ++ if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) {
1800 ++ dev_kfree_skb_any(skb);
1801 + goto mpwrq_cqe_out;
1802 ++ }
1803 +
1804 + napi_gro_receive(rq->cq.napi, skb);
1805 +
1806 +@@ -1528,8 +1534,10 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
1807 + mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1808 +
1809 + if (mlx5e_cqe_regb_chain(cqe))
1810 +- if (!mlx5e_tc_update_skb(cqe, skb))
1811 ++ if (!mlx5e_tc_update_skb(cqe, skb)) {
1812 ++ dev_kfree_skb_any(skb);
1813 + goto mpwrq_cqe_out;
1814 ++ }
1815 +
1816 + napi_gro_receive(rq->cq.napi, skb);
1817 +
1818 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1819 +index 634c2bfd25be1..79fc5755735fa 100644
1820 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1821 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1822 +@@ -1764,6 +1764,7 @@ search_again_locked:
1823 + if (!fte_tmp)
1824 + continue;
1825 + rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
1826 ++ /* No error check needed here, because insert_fte() is not called */
1827 + up_write_ref_node(&fte_tmp->node, false);
1828 + tree_put_node(&fte_tmp->node, false);
1829 + kmem_cache_free(steering->ftes_cache, fte);
1830 +@@ -1816,6 +1817,8 @@ skip_search:
1831 + up_write_ref_node(&g->node, false);
1832 + rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1833 + up_write_ref_node(&fte->node, false);
1834 ++ if (IS_ERR(rule))
1835 ++ tree_put_node(&fte->node, false);
1836 + return rule;
1837 + }
1838 + rule = ERR_PTR(-ENOENT);
1839 +@@ -1914,6 +1917,8 @@ search_again_locked:
1840 + up_write_ref_node(&g->node, false);
1841 + rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1842 + up_write_ref_node(&fte->node, false);
1843 ++ if (IS_ERR(rule))
1844 ++ tree_put_node(&fte->node, false);
1845 + tree_put_node(&g->node, false);
1846 + return rule;
1847 +
1848 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
1849 +index a3e0c71831928..a44a2bad5bbb5 100644
1850 +--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
1851 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
1852 +@@ -76,7 +76,7 @@ enum {
1853 +
1854 + static u32 get_function(u16 func_id, bool ec_function)
1855 + {
1856 +- return func_id & (ec_function << 16);
1857 ++ return (u32)func_id | (ec_function << 16);
1858 + }
1859 +
1860 + static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
1861 +diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
1862 +index 762cabf16157b..75f774347f6d1 100644
1863 +--- a/drivers/net/ethernet/realtek/r8169_main.c
1864 ++++ b/drivers/net/ethernet/realtek/r8169_main.c
1865 +@@ -4082,17 +4082,72 @@ err_out:
1866 + return -EIO;
1867 + }
1868 +
1869 +-static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp)
1870 ++static bool rtl_skb_is_udp(struct sk_buff *skb)
1871 ++{
1872 ++ int no = skb_network_offset(skb);
1873 ++ struct ipv6hdr *i6h, _i6h;
1874 ++ struct iphdr *ih, _ih;
1875 ++
1876 ++ switch (vlan_get_protocol(skb)) {
1877 ++ case htons(ETH_P_IP):
1878 ++ ih = skb_header_pointer(skb, no, sizeof(_ih), &_ih);
1879 ++ return ih && ih->protocol == IPPROTO_UDP;
1880 ++ case htons(ETH_P_IPV6):
1881 ++ i6h = skb_header_pointer(skb, no, sizeof(_i6h), &_i6h);
1882 ++ return i6h && i6h->nexthdr == IPPROTO_UDP;
1883 ++ default:
1884 ++ return false;
1885 ++ }
1886 ++}
1887 ++
1888 ++#define RTL_MIN_PATCH_LEN 47
1889 ++
1890 ++/* see rtl8125_get_patch_pad_len() in r8125 vendor driver */
1891 ++static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp,
1892 ++ struct sk_buff *skb)
1893 + {
1894 ++ unsigned int padto = 0, len = skb->len;
1895 ++
1896 ++ if (rtl_is_8125(tp) && len < 128 + RTL_MIN_PATCH_LEN &&
1897 ++ rtl_skb_is_udp(skb) && skb_transport_header_was_set(skb)) {
1898 ++ unsigned int trans_data_len = skb_tail_pointer(skb) -
1899 ++ skb_transport_header(skb);
1900 ++
1901 ++ if (trans_data_len >= offsetof(struct udphdr, len) &&
1902 ++ trans_data_len < RTL_MIN_PATCH_LEN) {
1903 ++ u16 dest = ntohs(udp_hdr(skb)->dest);
1904 ++
1905 ++ /* dest is a standard PTP port */
1906 ++ if (dest == 319 || dest == 320)
1907 ++ padto = len + RTL_MIN_PATCH_LEN - trans_data_len;
1908 ++ }
1909 ++
1910 ++ if (trans_data_len < sizeof(struct udphdr))
1911 ++ padto = max_t(unsigned int, padto,
1912 ++ len + sizeof(struct udphdr) - trans_data_len);
1913 ++ }
1914 ++
1915 ++ return padto;
1916 ++}
1917 ++
1918 ++static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
1919 ++ struct sk_buff *skb)
1920 ++{
1921 ++ unsigned int padto;
1922 ++
1923 ++ padto = rtl8125_quirk_udp_padto(tp, skb);
1924 ++
1925 + switch (tp->mac_version) {
1926 + case RTL_GIGA_MAC_VER_34:
1927 + case RTL_GIGA_MAC_VER_60:
1928 + case RTL_GIGA_MAC_VER_61:
1929 + case RTL_GIGA_MAC_VER_63:
1930 +- return true;
1931 ++ padto = max_t(unsigned int, padto, ETH_ZLEN);
1932 + default:
1933 +- return false;
1934 ++ break;
1935 + }
1936 ++
1937 ++ return padto;
1938 + }
1939 +
1940 + static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts)
1941 +@@ -4164,9 +4219,10 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
1942 +
1943 + opts[1] |= transport_offset << TCPHO_SHIFT;
1944 + } else {
1945 +- if (unlikely(skb->len < ETH_ZLEN && rtl_test_hw_pad_bug(tp)))
1946 +- /* eth_skb_pad would free the skb on error */
1947 +- return !__skb_put_padto(skb, ETH_ZLEN, false);
1948 ++ unsigned int padto = rtl_quirk_packet_padto(tp, skb);
1949 ++
1950 ++ /* skb_padto would free the skb on error */
1951 ++ return !__skb_put_padto(skb, padto, false);
1952 + }
1953 +
1954 + return true;
1955 +@@ -4349,6 +4405,9 @@ static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
1956 + if (skb->len < ETH_ZLEN)
1957 + features &= ~NETIF_F_CSUM_MASK;
1958 +
1959 ++ if (rtl_quirk_packet_padto(tp, skb))
1960 ++ features &= ~NETIF_F_CSUM_MASK;
1961 ++
1962 + if (transport_offset > TCPHO_MAX &&
1963 + rtl_chip_supports_csum_v2(tp))
1964 + features &= ~NETIF_F_CSUM_MASK;
1965 +@@ -4694,10 +4753,10 @@ static int rtl8169_close(struct net_device *dev)
1966 +
1967 + cancel_work_sync(&tp->wk.work);
1968 +
1969 +- phy_disconnect(tp->phydev);
1970 +-
1971 + free_irq(pci_irq_vector(pdev, 0), tp);
1972 +
1973 ++ phy_disconnect(tp->phydev);
1974 ++
1975 + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
1976 + tp->RxPhyAddr);
1977 + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
1978 +diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
1979 +index 6bfac1efe037c..4a68da7115d19 100644
1980 +--- a/drivers/net/ipa/gsi.c
1981 ++++ b/drivers/net/ipa/gsi.c
1982 +@@ -1256,7 +1256,7 @@ static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
1983 + /* Hardware requires a 2^n ring size, with alignment equal to size */
1984 + ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
1985 + if (ring->virt && addr % size) {
1986 +- dma_free_coherent(dev, size, ring->virt, ring->addr);
1987 ++ dma_free_coherent(dev, size, ring->virt, addr);
1988 + dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n",
1989 + size);
1990 + return -EINVAL; /* Not a good error value, but distinct */
1991 +diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
1992 +index b59032e0859b7..9d208570d059a 100644
1993 +--- a/drivers/nvdimm/dimm_devs.c
1994 ++++ b/drivers/nvdimm/dimm_devs.c
1995 +@@ -335,16 +335,16 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
1996 + }
1997 + static DEVICE_ATTR_RO(state);
1998 +
1999 +-static ssize_t available_slots_show(struct device *dev,
2000 +- struct device_attribute *attr, char *buf)
2001 ++static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf)
2002 + {
2003 +- struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
2004 ++ struct device *dev;
2005 + ssize_t rc;
2006 + u32 nfree;
2007 +
2008 + if (!ndd)
2009 + return -ENXIO;
2010 +
2011 ++ dev = ndd->dev;
2012 + nvdimm_bus_lock(dev);
2013 + nfree = nd_label_nfree(ndd);
2014 + if (nfree - 1 > nfree) {
2015 +@@ -356,6 +356,18 @@ static ssize_t available_slots_show(struct device *dev,
2016 + nvdimm_bus_unlock(dev);
2017 + return rc;
2018 + }
2019 ++
2020 ++static ssize_t available_slots_show(struct device *dev,
2021 ++ struct device_attribute *attr, char *buf)
2022 ++{
2023 ++ ssize_t rc;
2024 ++
2025 ++ nd_device_lock(dev);
2026 ++ rc = __available_slots_show(dev_get_drvdata(dev), buf);
2027 ++ nd_device_unlock(dev);
2028 ++
2029 ++ return rc;
2030 ++}
2031 + static DEVICE_ATTR_RO(available_slots);
2032 +
2033 + __weak ssize_t security_show(struct device *dev,
2034 +diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
2035 +index 6da67f4d641a2..2403b71b601e9 100644
2036 +--- a/drivers/nvdimm/namespace_devs.c
2037 ++++ b/drivers/nvdimm/namespace_devs.c
2038 +@@ -1635,11 +1635,11 @@ static umode_t namespace_visible(struct kobject *kobj,
2039 + return a->mode;
2040 + }
2041 +
2042 +- if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr
2043 +- || a == &dev_attr_holder.attr
2044 +- || a == &dev_attr_holder_class.attr
2045 +- || a == &dev_attr_force_raw.attr
2046 +- || a == &dev_attr_mode.attr)
2047 ++ /* base is_namespace_io() attributes */
2048 ++ if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr ||
2049 ++ a == &dev_attr_holder.attr || a == &dev_attr_holder_class.attr ||
2050 ++ a == &dev_attr_force_raw.attr || a == &dev_attr_mode.attr ||
2051 ++ a == &dev_attr_resource.attr)
2052 + return a->mode;
2053 +
2054 + return 0;
2055 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
2056 +index a3486c1c27f0c..a32494cde61f7 100644
2057 +--- a/drivers/nvme/host/pci.c
2058 ++++ b/drivers/nvme/host/pci.c
2059 +@@ -3262,6 +3262,8 @@ static const struct pci_device_id nvme_id_table[] = {
2060 + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
2061 + { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */
2062 + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
2063 ++ { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
2064 ++ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
2065 + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
2066 + .driver_data = NVME_QUIRK_SINGLE_VECTOR },
2067 + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
2068 +diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
2069 +index dc1f0f6471896..aacf06f0b4312 100644
2070 +--- a/drivers/nvme/target/tcp.c
2071 ++++ b/drivers/nvme/target/tcp.c
2072 +@@ -305,7 +305,7 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
2073 + length = cmd->pdu_len;
2074 + cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE);
2075 + offset = cmd->rbytes_done;
2076 +- cmd->sg_idx = DIV_ROUND_UP(offset, PAGE_SIZE);
2077 ++ cmd->sg_idx = offset / PAGE_SIZE;
2078 + sg_offset = offset % PAGE_SIZE;
2079 + sg = &cmd->req.sg[cmd->sg_idx];
2080 +
2081 +@@ -318,6 +318,7 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
2082 + length -= iov_len;
2083 + sg = sg_next(sg);
2084 + iov++;
2085 ++ sg_offset = 0;
2086 + }
2087 +
2088 + iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
2089 +diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c
2090 +index a5f988a9f9482..b5442f979b4d0 100644
2091 +--- a/drivers/thunderbolt/acpi.c
2092 ++++ b/drivers/thunderbolt/acpi.c
2093 +@@ -56,7 +56,7 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data,
2094 + * managed with the xHCI and the SuperSpeed hub so we create the
2095 + * link from xHCI instead.
2096 + */
2097 +- while (!dev_is_pci(dev))
2098 ++ while (dev && !dev_is_pci(dev))
2099 + dev = dev->parent;
2100 +
2101 + if (!dev)
2102 +diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
2103 +index 134dc2005ce97..c9f6e97582885 100644
2104 +--- a/drivers/usb/class/usblp.c
2105 ++++ b/drivers/usb/class/usblp.c
2106 +@@ -1329,14 +1329,17 @@ static int usblp_set_protocol(struct usblp *usblp, int protocol)
2107 + if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL)
2108 + return -EINVAL;
2109 +
2110 +- alts = usblp->protocol[protocol].alt_setting;
2111 +- if (alts < 0)
2112 +- return -EINVAL;
2113 +- r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
2114 +- if (r < 0) {
2115 +- printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
2116 +- alts, usblp->ifnum);
2117 +- return r;
2118 ++ /* Don't unnecessarily set the interface if there's a single alt. */
2119 ++ if (usblp->intf->num_altsetting > 1) {
2120 ++ alts = usblp->protocol[protocol].alt_setting;
2121 ++ if (alts < 0)
2122 ++ return -EINVAL;
2123 ++ r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
2124 ++ if (r < 0) {
2125 ++ printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
2126 ++ alts, usblp->ifnum);
2127 ++ return r;
2128 ++ }
2129 + }
2130 +
2131 + usblp->bidir = (usblp->protocol[protocol].epread != NULL);
2132 +diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
2133 +index 0a0d11151cfb8..ad4c94366dadf 100644
2134 +--- a/drivers/usb/dwc2/gadget.c
2135 ++++ b/drivers/usb/dwc2/gadget.c
2136 +@@ -1543,7 +1543,6 @@ static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep,
2137 + static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
2138 + u32 windex)
2139 + {
2140 +- struct dwc2_hsotg_ep *ep;
2141 + int dir = (windex & USB_DIR_IN) ? 1 : 0;
2142 + int idx = windex & 0x7F;
2143 +
2144 +@@ -1553,12 +1552,7 @@ static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
2145 + if (idx > hsotg->num_of_eps)
2146 + return NULL;
2147 +
2148 +- ep = index_to_ep(hsotg, idx, dir);
2149 +-
2150 +- if (idx && ep->dir_in != dir)
2151 +- return NULL;
2152 +-
2153 +- return ep;
2154 ++ return index_to_ep(hsotg, idx, dir);
2155 + }
2156 +
2157 + /**
2158 +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
2159 +index 841daec70b6ef..3101f0dcf6ae8 100644
2160 +--- a/drivers/usb/dwc3/core.c
2161 ++++ b/drivers/usb/dwc3/core.c
2162 +@@ -1758,7 +1758,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
2163 + if (PMSG_IS_AUTO(msg))
2164 + break;
2165 +
2166 +- ret = dwc3_core_init(dwc);
2167 ++ ret = dwc3_core_init_for_resume(dwc);
2168 + if (ret)
2169 + return ret;
2170 +
2171 +diff --git a/drivers/usb/gadget/legacy/ether.c b/drivers/usb/gadget/legacy/ether.c
2172 +index 30313b233680d..99c7fc0d1d597 100644
2173 +--- a/drivers/usb/gadget/legacy/ether.c
2174 ++++ b/drivers/usb/gadget/legacy/ether.c
2175 +@@ -403,8 +403,10 @@ static int eth_bind(struct usb_composite_dev *cdev)
2176 + struct usb_descriptor_header *usb_desc;
2177 +
2178 + usb_desc = usb_otg_descriptor_alloc(gadget);
2179 +- if (!usb_desc)
2180 ++ if (!usb_desc) {
2181 ++ status = -ENOMEM;
2182 + goto fail1;
2183 ++ }
2184 + usb_otg_descriptor_init(gadget, usb_desc);
2185 + otg_desc[0] = usb_desc;
2186 + otg_desc[1] = NULL;
2187 +diff --git a/drivers/usb/gadget/udc/aspeed-vhub/hub.c b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
2188 +index 6497185ec4e7a..bfd8e77788e29 100644
2189 +--- a/drivers/usb/gadget/udc/aspeed-vhub/hub.c
2190 ++++ b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
2191 +@@ -999,8 +999,10 @@ static int ast_vhub_of_parse_str_desc(struct ast_vhub *vhub,
2192 + str_array[offset].s = NULL;
2193 +
2194 + ret = ast_vhub_str_alloc_add(vhub, &lang_str);
2195 +- if (ret)
2196 ++ if (ret) {
2197 ++ of_node_put(child);
2198 + break;
2199 ++ }
2200 + }
2201 +
2202 + return ret;
2203 +diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
2204 +index 45c54d56ecbd5..b45e5bf089979 100644
2205 +--- a/drivers/usb/host/xhci-mtk-sch.c
2206 ++++ b/drivers/usb/host/xhci-mtk-sch.c
2207 +@@ -200,6 +200,8 @@ static struct mu3h_sch_ep_info *create_sch_ep(struct usb_device *udev,
2208 +
2209 + sch_ep->sch_tt = tt;
2210 + sch_ep->ep = ep;
2211 ++ INIT_LIST_HEAD(&sch_ep->endpoint);
2212 ++ INIT_LIST_HEAD(&sch_ep->tt_endpoint);
2213 +
2214 + return sch_ep;
2215 + }
2216 +@@ -373,6 +375,7 @@ static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
2217 + sch_ep->bw_budget_table[j];
2218 + }
2219 + }
2220 ++ sch_ep->allocated = used;
2221 + }
2222 +
2223 + static int check_sch_tt(struct usb_device *udev,
2224 +@@ -541,6 +544,22 @@ static int check_sch_bw(struct usb_device *udev,
2225 + return 0;
2226 + }
2227 +
2228 ++static void destroy_sch_ep(struct usb_device *udev,
2229 ++ struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep)
2230 ++{
2231 ++ /* only release ep bw check passed by check_sch_bw() */
2232 ++ if (sch_ep->allocated)
2233 ++ update_bus_bw(sch_bw, sch_ep, 0);
2234 ++
2235 ++ list_del(&sch_ep->endpoint);
2236 ++
2237 ++ if (sch_ep->sch_tt) {
2238 ++ list_del(&sch_ep->tt_endpoint);
2239 ++ drop_tt(udev);
2240 ++ }
2241 ++ kfree(sch_ep);
2242 ++}
2243 ++
2244 + static bool need_bw_sch(struct usb_host_endpoint *ep,
2245 + enum usb_device_speed speed, int has_tt)
2246 + {
2247 +@@ -583,6 +602,8 @@ int xhci_mtk_sch_init(struct xhci_hcd_mtk *mtk)
2248 +
2249 + mtk->sch_array = sch_array;
2250 +
2251 ++ INIT_LIST_HEAD(&mtk->bw_ep_chk_list);
2252 ++
2253 + return 0;
2254 + }
2255 + EXPORT_SYMBOL_GPL(xhci_mtk_sch_init);
2256 +@@ -601,19 +622,14 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
2257 + struct xhci_ep_ctx *ep_ctx;
2258 + struct xhci_slot_ctx *slot_ctx;
2259 + struct xhci_virt_device *virt_dev;
2260 +- struct mu3h_sch_bw_info *sch_bw;
2261 + struct mu3h_sch_ep_info *sch_ep;
2262 +- struct mu3h_sch_bw_info *sch_array;
2263 + unsigned int ep_index;
2264 +- int bw_index;
2265 +- int ret = 0;
2266 +
2267 + xhci = hcd_to_xhci(hcd);
2268 + virt_dev = xhci->devs[udev->slot_id];
2269 + ep_index = xhci_get_endpoint_index(&ep->desc);
2270 + slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2271 + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
2272 +- sch_array = mtk->sch_array;
2273 +
2274 + xhci_dbg(xhci, "%s() type:%d, speed:%d, mpkt:%d, dir:%d, ep:%p\n",
2275 + __func__, usb_endpoint_type(&ep->desc), udev->speed,
2276 +@@ -632,35 +648,13 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
2277 + return 0;
2278 + }
2279 +
2280 +- bw_index = get_bw_index(xhci, udev, ep);
2281 +- sch_bw = &sch_array[bw_index];
2282 +-
2283 + sch_ep = create_sch_ep(udev, ep, ep_ctx);
2284 + if (IS_ERR_OR_NULL(sch_ep))
2285 + return -ENOMEM;
2286 +
2287 + setup_sch_info(udev, ep_ctx, sch_ep);
2288 +
2289 +- ret = check_sch_bw(udev, sch_bw, sch_ep);
2290 +- if (ret) {
2291 +- xhci_err(xhci, "Not enough bandwidth!\n");
2292 +- if (is_fs_or_ls(udev->speed))
2293 +- drop_tt(udev);
2294 +-
2295 +- kfree(sch_ep);
2296 +- return -ENOSPC;
2297 +- }
2298 +-
2299 +- list_add_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
2300 +-
2301 +- ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
2302 +- | EP_BCSCOUNT(sch_ep->cs_count) | EP_BBM(sch_ep->burst_mode));
2303 +- ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset)
2304 +- | EP_BREPEAT(sch_ep->repeat));
2305 +-
2306 +- xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
2307 +- sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode,
2308 +- sch_ep->offset, sch_ep->repeat);
2309 ++ list_add_tail(&sch_ep->endpoint, &mtk->bw_ep_chk_list);
2310 +
2311 + return 0;
2312 + }
2313 +@@ -675,7 +669,7 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
2314 + struct xhci_virt_device *virt_dev;
2315 + struct mu3h_sch_bw_info *sch_array;
2316 + struct mu3h_sch_bw_info *sch_bw;
2317 +- struct mu3h_sch_ep_info *sch_ep;
2318 ++ struct mu3h_sch_ep_info *sch_ep, *tmp;
2319 + int bw_index;
2320 +
2321 + xhci = hcd_to_xhci(hcd);
2322 +@@ -694,17 +688,79 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
2323 + bw_index = get_bw_index(xhci, udev, ep);
2324 + sch_bw = &sch_array[bw_index];
2325 +
2326 +- list_for_each_entry(sch_ep, &sch_bw->bw_ep_list, endpoint) {
2327 ++ list_for_each_entry_safe(sch_ep, tmp, &sch_bw->bw_ep_list, endpoint) {
2328 + if (sch_ep->ep == ep) {
2329 +- update_bus_bw(sch_bw, sch_ep, 0);
2330 +- list_del(&sch_ep->endpoint);
2331 +- if (is_fs_or_ls(udev->speed)) {
2332 +- list_del(&sch_ep->tt_endpoint);
2333 +- drop_tt(udev);
2334 +- }
2335 +- kfree(sch_ep);
2336 ++ destroy_sch_ep(udev, sch_bw, sch_ep);
2337 + break;
2338 + }
2339 + }
2340 + }
2341 + EXPORT_SYMBOL_GPL(xhci_mtk_drop_ep_quirk);
2342 ++
2343 ++int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2344 ++{
2345 ++ struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
2346 ++ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2347 ++ struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
2348 ++ struct mu3h_sch_bw_info *sch_bw;
2349 ++ struct mu3h_sch_ep_info *sch_ep, *tmp;
2350 ++ int bw_index, ret;
2351 ++
2352 ++ xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev));
2353 ++
2354 ++ list_for_each_entry(sch_ep, &mtk->bw_ep_chk_list, endpoint) {
2355 ++ bw_index = get_bw_index(xhci, udev, sch_ep->ep);
2356 ++ sch_bw = &mtk->sch_array[bw_index];
2357 ++
2358 ++ ret = check_sch_bw(udev, sch_bw, sch_ep);
2359 ++ if (ret) {
2360 ++ xhci_err(xhci, "Not enough bandwidth!\n");
2361 ++ return -ENOSPC;
2362 ++ }
2363 ++ }
2364 ++
2365 ++ list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) {
2366 ++ struct xhci_ep_ctx *ep_ctx;
2367 ++ struct usb_host_endpoint *ep = sch_ep->ep;
2368 ++ unsigned int ep_index = xhci_get_endpoint_index(&ep->desc);
2369 ++
2370 ++ bw_index = get_bw_index(xhci, udev, ep);
2371 ++ sch_bw = &mtk->sch_array[bw_index];
2372 ++
2373 ++ list_move_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
2374 ++
2375 ++ ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
2376 ++ ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
2377 ++ | EP_BCSCOUNT(sch_ep->cs_count)
2378 ++ | EP_BBM(sch_ep->burst_mode));
2379 ++ ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset)
2380 ++ | EP_BREPEAT(sch_ep->repeat));
2381 ++
2382 ++ xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
2383 ++ sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode,
2384 ++ sch_ep->offset, sch_ep->repeat);
2385 ++ }
2386 ++
2387 ++ return xhci_check_bandwidth(hcd, udev);
2388 ++}
2389 ++EXPORT_SYMBOL_GPL(xhci_mtk_check_bandwidth);
2390 ++
2391 ++void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2392 ++{
2393 ++ struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
2394 ++ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2395 ++ struct mu3h_sch_bw_info *sch_bw;
2396 ++ struct mu3h_sch_ep_info *sch_ep, *tmp;
2397 ++ int bw_index;
2398 ++
2399 ++ xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev));
2400 ++
2401 ++ list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) {
2402 ++ bw_index = get_bw_index(xhci, udev, sch_ep->ep);
2403 ++ sch_bw = &mtk->sch_array[bw_index];
2404 ++ destroy_sch_ep(udev, sch_bw, sch_ep);
2405 ++ }
2406 ++
2407 ++ xhci_reset_bandwidth(hcd, udev);
2408 ++}
2409 ++EXPORT_SYMBOL_GPL(xhci_mtk_reset_bandwidth);
2410 +diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
2411 +index 8f321f39ab960..fe010cc61f19b 100644
2412 +--- a/drivers/usb/host/xhci-mtk.c
2413 ++++ b/drivers/usb/host/xhci-mtk.c
2414 +@@ -347,6 +347,8 @@ static void usb_wakeup_set(struct xhci_hcd_mtk *mtk, bool enable)
2415 + static int xhci_mtk_setup(struct usb_hcd *hcd);
2416 + static const struct xhci_driver_overrides xhci_mtk_overrides __initconst = {
2417 + .reset = xhci_mtk_setup,
2418 ++ .check_bandwidth = xhci_mtk_check_bandwidth,
2419 ++ .reset_bandwidth = xhci_mtk_reset_bandwidth,
2420 + };
2421 +
2422 + static struct hc_driver __read_mostly xhci_mtk_hc_driver;
2423 +diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
2424 +index a93cfe8179049..cbb09dfea62e0 100644
2425 +--- a/drivers/usb/host/xhci-mtk.h
2426 ++++ b/drivers/usb/host/xhci-mtk.h
2427 +@@ -59,6 +59,7 @@ struct mu3h_sch_bw_info {
2428 + * @ep_type: endpoint type
2429 + * @maxpkt: max packet size of endpoint
2430 + * @ep: address of usb_host_endpoint struct
2431 ++ * @allocated: the bandwidth is aready allocated from bus_bw
2432 + * @offset: which uframe of the interval that transfer should be
2433 + * scheduled first time within the interval
2434 + * @repeat: the time gap between two uframes that transfers are
2435 +@@ -86,6 +87,7 @@ struct mu3h_sch_ep_info {
2436 + u32 ep_type;
2437 + u32 maxpkt;
2438 + void *ep;
2439 ++ bool allocated;
2440 + /*
2441 + * mtk xHCI scheduling information put into reserved DWs
2442 + * in ep context
2443 +@@ -131,6 +133,7 @@ struct xhci_hcd_mtk {
2444 + struct device *dev;
2445 + struct usb_hcd *hcd;
2446 + struct mu3h_sch_bw_info *sch_array;
2447 ++ struct list_head bw_ep_chk_list;
2448 + struct mu3c_ippc_regs __iomem *ippc_regs;
2449 + bool has_ippc;
2450 + int num_u2_ports;
2451 +@@ -166,6 +169,8 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
2452 + struct usb_host_endpoint *ep);
2453 + void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
2454 + struct usb_host_endpoint *ep);
2455 ++int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
2456 ++void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
2457 +
2458 + #else
2459 + static inline int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd,
2460 +@@ -179,6 +184,16 @@ static inline void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd,
2461 + {
2462 + }
2463 +
2464 ++static inline int xhci_mtk_check_bandwidth(struct usb_hcd *hcd,
2465 ++ struct usb_device *udev)
2466 ++{
2467 ++ return 0;
2468 ++}
2469 ++
2470 ++static inline void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd,
2471 ++ struct usb_device *udev)
2472 ++{
2473 ++}
2474 + #endif
2475 +
2476 + #endif /* _XHCI_MTK_H_ */
2477 +diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c
2478 +index 60651a50770f9..8ca1a235d1645 100644
2479 +--- a/drivers/usb/host/xhci-mvebu.c
2480 ++++ b/drivers/usb/host/xhci-mvebu.c
2481 +@@ -8,6 +8,7 @@
2482 + #include <linux/mbus.h>
2483 + #include <linux/of.h>
2484 + #include <linux/platform_device.h>
2485 ++#include <linux/phy/phy.h>
2486 +
2487 + #include <linux/usb.h>
2488 + #include <linux/usb/hcd.h>
2489 +@@ -74,6 +75,47 @@ int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
2490 + return 0;
2491 + }
2492 +
2493 ++int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd)
2494 ++{
2495 ++ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2496 ++ struct device *dev = hcd->self.controller;
2497 ++ struct phy *phy;
2498 ++ int ret;
2499 ++
2500 ++ /* Old bindings miss the PHY handle */
2501 ++ phy = of_phy_get(dev->of_node, "usb3-phy");
2502 ++ if (IS_ERR(phy) && PTR_ERR(phy) == -EPROBE_DEFER)
2503 ++ return -EPROBE_DEFER;
2504 ++ else if (IS_ERR(phy))
2505 ++ goto phy_out;
2506 ++
2507 ++ ret = phy_init(phy);
2508 ++ if (ret)
2509 ++ goto phy_put;
2510 ++
2511 ++ ret = phy_set_mode(phy, PHY_MODE_USB_HOST_SS);
2512 ++ if (ret)
2513 ++ goto phy_exit;
2514 ++
2515 ++ ret = phy_power_on(phy);
2516 ++ if (ret == -EOPNOTSUPP) {
2517 ++ /* Skip initializatin of XHCI PHY when it is unsupported by firmware */
2518 ++ dev_warn(dev, "PHY unsupported by firmware\n");
2519 ++ xhci->quirks |= XHCI_SKIP_PHY_INIT;
2520 ++ }
2521 ++ if (ret)
2522 ++ goto phy_exit;
2523 ++
2524 ++ phy_power_off(phy);
2525 ++phy_exit:
2526 ++ phy_exit(phy);
2527 ++phy_put:
2528 ++ of_phy_put(phy);
2529 ++phy_out:
2530 ++
2531 ++ return 0;
2532 ++}
2533 ++
2534 + int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd)
2535 + {
2536 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2537 +diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h
2538 +index 3be021793cc8b..01bf3fcb3eca5 100644
2539 +--- a/drivers/usb/host/xhci-mvebu.h
2540 ++++ b/drivers/usb/host/xhci-mvebu.h
2541 +@@ -12,6 +12,7 @@ struct usb_hcd;
2542 +
2543 + #if IS_ENABLED(CONFIG_USB_XHCI_MVEBU)
2544 + int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd);
2545 ++int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd);
2546 + int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd);
2547 + #else
2548 + static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
2549 +@@ -19,6 +20,11 @@ static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
2550 + return 0;
2551 + }
2552 +
2553 ++static inline int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd)
2554 ++{
2555 ++ return 0;
2556 ++}
2557 ++
2558 + static inline int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd)
2559 + {
2560 + return 0;
2561 +diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
2562 +index 4d34f6005381e..c1edcc9b13cec 100644
2563 +--- a/drivers/usb/host/xhci-plat.c
2564 ++++ b/drivers/usb/host/xhci-plat.c
2565 +@@ -44,6 +44,16 @@ static void xhci_priv_plat_start(struct usb_hcd *hcd)
2566 + priv->plat_start(hcd);
2567 + }
2568 +
2569 ++static int xhci_priv_plat_setup(struct usb_hcd *hcd)
2570 ++{
2571 ++ struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
2572 ++
2573 ++ if (!priv->plat_setup)
2574 ++ return 0;
2575 ++
2576 ++ return priv->plat_setup(hcd);
2577 ++}
2578 ++
2579 + static int xhci_priv_init_quirk(struct usb_hcd *hcd)
2580 + {
2581 + struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
2582 +@@ -111,6 +121,7 @@ static const struct xhci_plat_priv xhci_plat_marvell_armada = {
2583 + };
2584 +
2585 + static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = {
2586 ++ .plat_setup = xhci_mvebu_a3700_plat_setup,
2587 + .init_quirk = xhci_mvebu_a3700_init_quirk,
2588 + };
2589 +
2590 +@@ -330,7 +341,14 @@ static int xhci_plat_probe(struct platform_device *pdev)
2591 +
2592 + hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
2593 + xhci->shared_hcd->tpl_support = hcd->tpl_support;
2594 +- if (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))
2595 ++
2596 ++ if (priv) {
2597 ++ ret = xhci_priv_plat_setup(hcd);
2598 ++ if (ret)
2599 ++ goto disable_usb_phy;
2600 ++ }
2601 ++
2602 ++ if ((xhci->quirks & XHCI_SKIP_PHY_INIT) || (priv && (priv->quirks & XHCI_SKIP_PHY_INIT)))
2603 + hcd->skip_phy_initialization = 1;
2604 +
2605 + if (priv && (priv->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK))
2606 +diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
2607 +index 1fb149d1fbcea..561d0b7bce098 100644
2608 +--- a/drivers/usb/host/xhci-plat.h
2609 ++++ b/drivers/usb/host/xhci-plat.h
2610 +@@ -13,6 +13,7 @@
2611 + struct xhci_plat_priv {
2612 + const char *firmware_name;
2613 + unsigned long long quirks;
2614 ++ int (*plat_setup)(struct usb_hcd *);
2615 + void (*plat_start)(struct usb_hcd *);
2616 + int (*init_quirk)(struct usb_hcd *);
2617 + int (*suspend_quirk)(struct usb_hcd *);
2618 +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
2619 +index db8612ec82d3e..061d5c51405fb 100644
2620 +--- a/drivers/usb/host/xhci-ring.c
2621 ++++ b/drivers/usb/host/xhci-ring.c
2622 +@@ -699,11 +699,16 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
2623 + dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
2624 + DMA_FROM_DEVICE);
2625 + /* for in tranfers we need to copy the data from bounce to sg */
2626 +- len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
2627 +- seg->bounce_len, seg->bounce_offs);
2628 +- if (len != seg->bounce_len)
2629 +- xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
2630 +- len, seg->bounce_len);
2631 ++ if (urb->num_sgs) {
2632 ++ len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
2633 ++ seg->bounce_len, seg->bounce_offs);
2634 ++ if (len != seg->bounce_len)
2635 ++ xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
2636 ++ len, seg->bounce_len);
2637 ++ } else {
2638 ++ memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf,
2639 ++ seg->bounce_len);
2640 ++ }
2641 + seg->bounce_len = 0;
2642 + seg->bounce_offs = 0;
2643 + }
2644 +@@ -3275,12 +3280,16 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
2645 +
2646 + /* create a max max_pkt sized bounce buffer pointed to by last trb */
2647 + if (usb_urb_dir_out(urb)) {
2648 +- len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
2649 +- seg->bounce_buf, new_buff_len, enqd_len);
2650 +- if (len != new_buff_len)
2651 +- xhci_warn(xhci,
2652 +- "WARN Wrong bounce buffer write length: %zu != %d\n",
2653 +- len, new_buff_len);
2654 ++ if (urb->num_sgs) {
2655 ++ len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
2656 ++ seg->bounce_buf, new_buff_len, enqd_len);
2657 ++ if (len != new_buff_len)
2658 ++ xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n",
2659 ++ len, new_buff_len);
2660 ++ } else {
2661 ++ memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len);
2662 ++ }
2663 ++
2664 + seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
2665 + max_pkt, DMA_TO_DEVICE);
2666 + } else {
2667 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
2668 +index 73f1373d517a2..d17bbb162810a 100644
2669 +--- a/drivers/usb/host/xhci.c
2670 ++++ b/drivers/usb/host/xhci.c
2671 +@@ -2861,7 +2861,7 @@ static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
2672 + * else should be touching the xhci->devs[slot_id] structure, so we
2673 + * don't need to take the xhci->lock for manipulating that.
2674 + */
2675 +-static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2676 ++int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2677 + {
2678 + int i;
2679 + int ret = 0;
2680 +@@ -2959,7 +2959,7 @@ command_cleanup:
2681 + return ret;
2682 + }
2683 +
2684 +-static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2685 ++void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2686 + {
2687 + struct xhci_hcd *xhci;
2688 + struct xhci_virt_device *virt_dev;
2689 +@@ -5385,6 +5385,10 @@ void xhci_init_driver(struct hc_driver *drv,
2690 + drv->reset = over->reset;
2691 + if (over->start)
2692 + drv->start = over->start;
2693 ++ if (over->check_bandwidth)
2694 ++ drv->check_bandwidth = over->check_bandwidth;
2695 ++ if (over->reset_bandwidth)
2696 ++ drv->reset_bandwidth = over->reset_bandwidth;
2697 + }
2698 + }
2699 + EXPORT_SYMBOL_GPL(xhci_init_driver);
2700 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
2701 +index d90c0d5df3b37..045740ad9c1ec 100644
2702 +--- a/drivers/usb/host/xhci.h
2703 ++++ b/drivers/usb/host/xhci.h
2704 +@@ -1916,6 +1916,8 @@ struct xhci_driver_overrides {
2705 + size_t extra_priv_size;
2706 + int (*reset)(struct usb_hcd *hcd);
2707 + int (*start)(struct usb_hcd *hcd);
2708 ++ int (*check_bandwidth)(struct usb_hcd *, struct usb_device *);
2709 ++ void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
2710 + };
2711 +
2712 + #define XHCI_CFC_DELAY 10
2713 +@@ -2070,6 +2072,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
2714 + void xhci_shutdown(struct usb_hcd *hcd);
2715 + void xhci_init_driver(struct hc_driver *drv,
2716 + const struct xhci_driver_overrides *over);
2717 ++int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
2718 ++void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
2719 + int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id);
2720 + int xhci_ext_cap_init(struct xhci_hcd *xhci);
2721 +
2722 +diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
2723 +index ac9a81ae82164..e6fa137018082 100644
2724 +--- a/drivers/usb/renesas_usbhs/fifo.c
2725 ++++ b/drivers/usb/renesas_usbhs/fifo.c
2726 +@@ -126,6 +126,7 @@ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
2727 + }
2728 +
2729 + usbhs_pipe_clear_without_sequence(pipe, 0, 0);
2730 ++ usbhs_pipe_running(pipe, 0);
2731 +
2732 + __usbhsf_pkt_del(pkt);
2733 + }
2734 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
2735 +index d0c05aa8a0d6e..bf11f86896837 100644
2736 +--- a/drivers/usb/serial/cp210x.c
2737 ++++ b/drivers/usb/serial/cp210x.c
2738 +@@ -64,6 +64,7 @@ static const struct usb_device_id id_table[] = {
2739 + { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
2740 + { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
2741 + { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
2742 ++ { USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */
2743 + { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
2744 + { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
2745 + { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
2746 +@@ -204,6 +205,7 @@ static const struct usb_device_id id_table[] = {
2747 + { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
2748 + { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */
2749 + { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */
2750 ++ { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */
2751 + { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
2752 + { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
2753 + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
2754 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2755 +index 3fe959104311b..2049e66f34a3f 100644
2756 +--- a/drivers/usb/serial/option.c
2757 ++++ b/drivers/usb/serial/option.c
2758 +@@ -425,6 +425,8 @@ static void option_instat_callback(struct urb *urb);
2759 + #define CINTERION_PRODUCT_AHXX_2RMNET 0x0084
2760 + #define CINTERION_PRODUCT_AHXX_AUDIO 0x0085
2761 + #define CINTERION_PRODUCT_CLS8 0x00b0
2762 ++#define CINTERION_PRODUCT_MV31_MBIM 0x00b3
2763 ++#define CINTERION_PRODUCT_MV31_RMNET 0x00b7
2764 +
2765 + /* Olivetti products */
2766 + #define OLIVETTI_VENDOR_ID 0x0b3c
2767 +@@ -1914,6 +1916,10 @@ static const struct usb_device_id option_ids[] = {
2768 + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
2769 + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
2770 + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
2771 ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_MBIM, 0xff),
2772 ++ .driver_info = RSVD(3)},
2773 ++ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff),
2774 ++ .driver_info = RSVD(0)},
2775 + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
2776 + .driver_info = RSVD(4) },
2777 + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
2778 +diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
2779 +index 5c92a576edae8..08f742fd24099 100644
2780 +--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
2781 ++++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
2782 +@@ -15,6 +15,7 @@ struct mlx5_vdpa_direct_mr {
2783 + struct sg_table sg_head;
2784 + int log_size;
2785 + int nsg;
2786 ++ int nent;
2787 + struct list_head list;
2788 + u64 offset;
2789 + };
2790 +diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
2791 +index 4b6195666c589..d300f799efcd1 100644
2792 +--- a/drivers/vdpa/mlx5/core/mr.c
2793 ++++ b/drivers/vdpa/mlx5/core/mr.c
2794 +@@ -25,17 +25,6 @@ static int get_octo_len(u64 len, int page_shift)
2795 + return (npages + 1) / 2;
2796 + }
2797 +
2798 +-static void fill_sg(struct mlx5_vdpa_direct_mr *mr, void *in)
2799 +-{
2800 +- struct scatterlist *sg;
2801 +- __be64 *pas;
2802 +- int i;
2803 +-
2804 +- pas = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
2805 +- for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i)
2806 +- (*pas) = cpu_to_be64(sg_dma_address(sg));
2807 +-}
2808 +-
2809 + static void mlx5_set_access_mode(void *mkc, int mode)
2810 + {
2811 + MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
2812 +@@ -45,10 +34,18 @@ static void mlx5_set_access_mode(void *mkc, int mode)
2813 + static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt)
2814 + {
2815 + struct scatterlist *sg;
2816 ++ int nsg = mr->nsg;
2817 ++ u64 dma_addr;
2818 ++ u64 dma_len;
2819 ++ int j = 0;
2820 + int i;
2821 +
2822 +- for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i)
2823 +- mtt[i] = cpu_to_be64(sg_dma_address(sg));
2824 ++ for_each_sg(mr->sg_head.sgl, sg, mr->nent, i) {
2825 ++ for (dma_addr = sg_dma_address(sg), dma_len = sg_dma_len(sg);
2826 ++ nsg && dma_len;
2827 ++ nsg--, dma_addr += BIT(mr->log_size), dma_len -= BIT(mr->log_size))
2828 ++ mtt[j++] = cpu_to_be64(dma_addr);
2829 ++ }
2830 + }
2831 +
2832 + static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
2833 +@@ -64,7 +61,6 @@ static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct
2834 + return -ENOMEM;
2835 +
2836 + MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
2837 +- fill_sg(mr, in);
2838 + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
2839 + MLX5_SET(mkc, mkc, lw, !!(mr->perm & VHOST_MAP_WO));
2840 + MLX5_SET(mkc, mkc, lr, !!(mr->perm & VHOST_MAP_RO));
2841 +@@ -276,8 +272,8 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr
2842 + done:
2843 + mr->log_size = log_entity_size;
2844 + mr->nsg = nsg;
2845 +- err = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
2846 +- if (!err)
2847 ++ mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
2848 ++ if (!mr->nent)
2849 + goto err_map;
2850 +
2851 + err = create_direct_mr(mvdev, mr);
2852 +diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
2853 +index 81b932f72e103..c6529f7c3034a 100644
2854 +--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
2855 ++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
2856 +@@ -77,6 +77,7 @@ struct mlx5_vq_restore_info {
2857 + u64 device_addr;
2858 + u64 driver_addr;
2859 + u16 avail_index;
2860 ++ u16 used_index;
2861 + bool ready;
2862 + struct vdpa_callback cb;
2863 + bool restore;
2864 +@@ -111,6 +112,7 @@ struct mlx5_vdpa_virtqueue {
2865 + u32 virtq_id;
2866 + struct mlx5_vdpa_net *ndev;
2867 + u16 avail_idx;
2868 ++ u16 used_idx;
2869 + int fw_state;
2870 +
2871 + /* keep last in the struct */
2872 +@@ -789,6 +791,7 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
2873 +
2874 + obj_context = MLX5_ADDR_OF(create_virtio_net_q_in, in, obj_context);
2875 + MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx);
2876 ++ MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx);
2877 + MLX5_SET(virtio_net_q_object, obj_context, queue_feature_bit_mask_12_3,
2878 + get_features_12_3(ndev->mvdev.actual_features));
2879 + vq_ctx = MLX5_ADDR_OF(virtio_net_q_object, obj_context, virtio_q_context);
2880 +@@ -1007,6 +1010,7 @@ static int connect_qps(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *m
2881 + struct mlx5_virtq_attr {
2882 + u8 state;
2883 + u16 available_index;
2884 ++ u16 used_index;
2885 + };
2886 +
2887 + static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
2888 +@@ -1037,6 +1041,7 @@ static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueu
2889 + memset(attr, 0, sizeof(*attr));
2890 + attr->state = MLX5_GET(virtio_net_q_object, obj_context, state);
2891 + attr->available_index = MLX5_GET(virtio_net_q_object, obj_context, hw_available_index);
2892 ++ attr->used_index = MLX5_GET(virtio_net_q_object, obj_context, hw_used_index);
2893 + kfree(out);
2894 + return 0;
2895 +
2896 +@@ -1520,6 +1525,16 @@ static void teardown_virtqueues(struct mlx5_vdpa_net *ndev)
2897 + }
2898 + }
2899 +
2900 ++static void clear_virtqueues(struct mlx5_vdpa_net *ndev)
2901 ++{
2902 ++ int i;
2903 ++
2904 ++ for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) {
2905 ++ ndev->vqs[i].avail_idx = 0;
2906 ++ ndev->vqs[i].used_idx = 0;
2907 ++ }
2908 ++}
2909 ++
2910 + /* TODO: cross-endian support */
2911 + static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
2912 + {
2913 +@@ -1595,6 +1610,7 @@ static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqu
2914 + return err;
2915 +
2916 + ri->avail_index = attr.available_index;
2917 ++ ri->used_index = attr.used_index;
2918 + ri->ready = mvq->ready;
2919 + ri->num_ent = mvq->num_ent;
2920 + ri->desc_addr = mvq->desc_addr;
2921 +@@ -1639,6 +1655,7 @@ static void restore_channels_info(struct mlx5_vdpa_net *ndev)
2922 + continue;
2923 +
2924 + mvq->avail_idx = ri->avail_index;
2925 ++ mvq->used_idx = ri->used_index;
2926 + mvq->ready = ri->ready;
2927 + mvq->num_ent = ri->num_ent;
2928 + mvq->desc_addr = ri->desc_addr;
2929 +@@ -1753,6 +1770,7 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
2930 + if (!status) {
2931 + mlx5_vdpa_info(mvdev, "performing device reset\n");
2932 + teardown_driver(ndev);
2933 ++ clear_virtqueues(ndev);
2934 + mlx5_vdpa_destroy_mr(&ndev->mvdev);
2935 + ndev->mvdev.status = 0;
2936 + ndev->mvdev.mlx_features = 0;
2937 +diff --git a/fs/afs/main.c b/fs/afs/main.c
2938 +index accdd8970e7c0..b2975256dadbd 100644
2939 +--- a/fs/afs/main.c
2940 ++++ b/fs/afs/main.c
2941 +@@ -193,7 +193,7 @@ static int __init afs_init(void)
2942 + goto error_cache;
2943 + #endif
2944 +
2945 +- ret = register_pernet_subsys(&afs_net_ops);
2946 ++ ret = register_pernet_device(&afs_net_ops);
2947 + if (ret < 0)
2948 + goto error_net;
2949 +
2950 +@@ -213,7 +213,7 @@ static int __init afs_init(void)
2951 + error_proc:
2952 + afs_fs_exit();
2953 + error_fs:
2954 +- unregister_pernet_subsys(&afs_net_ops);
2955 ++ unregister_pernet_device(&afs_net_ops);
2956 + error_net:
2957 + #ifdef CONFIG_AFS_FSCACHE
2958 + fscache_unregister_netfs(&afs_cache_netfs);
2959 +@@ -244,7 +244,7 @@ static void __exit afs_exit(void)
2960 +
2961 + proc_remove(afs_proc_symlink);
2962 + afs_fs_exit();
2963 +- unregister_pernet_subsys(&afs_net_ops);
2964 ++ unregister_pernet_device(&afs_net_ops);
2965 + #ifdef CONFIG_AFS_FSCACHE
2966 + fscache_unregister_netfs(&afs_cache_netfs);
2967 + #endif
2968 +diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
2969 +index 398c1eef71906..0d7238cb45b56 100644
2970 +--- a/fs/cifs/dir.c
2971 ++++ b/fs/cifs/dir.c
2972 +@@ -736,6 +736,7 @@ static int
2973 + cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
2974 + {
2975 + struct inode *inode;
2976 ++ int rc;
2977 +
2978 + if (flags & LOOKUP_RCU)
2979 + return -ECHILD;
2980 +@@ -745,8 +746,25 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
2981 + if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode)))
2982 + CIFS_I(inode)->time = 0; /* force reval */
2983 +
2984 +- if (cifs_revalidate_dentry(direntry))
2985 +- return 0;
2986 ++ rc = cifs_revalidate_dentry(direntry);
2987 ++ if (rc) {
2988 ++ cifs_dbg(FYI, "cifs_revalidate_dentry failed with rc=%d", rc);
2989 ++ switch (rc) {
2990 ++ case -ENOENT:
2991 ++ case -ESTALE:
2992 ++ /*
2993 ++ * Those errors mean the dentry is invalid
2994 ++ * (file was deleted or recreated)
2995 ++ */
2996 ++ return 0;
2997 ++ default:
2998 ++ /*
2999 ++ * Otherwise some unexpected error happened
3000 ++ * report it as-is to VFS layer
3001 ++ */
3002 ++ return rc;
3003 ++ }
3004 ++ }
3005 + else {
3006 + /*
3007 + * If the inode wasn't known to be a dfs entry when
3008 +diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
3009 +index 204a622b89ed3..56ec9fba3925b 100644
3010 +--- a/fs/cifs/smb2pdu.h
3011 ++++ b/fs/cifs/smb2pdu.h
3012 +@@ -286,7 +286,7 @@ struct smb2_negotiate_req {
3013 + __le32 NegotiateContextOffset; /* SMB3.1.1 only. MBZ earlier */
3014 + __le16 NegotiateContextCount; /* SMB3.1.1 only. MBZ earlier */
3015 + __le16 Reserved2;
3016 +- __le16 Dialects[1]; /* One dialect (vers=) at a time for now */
3017 ++ __le16 Dialects[4]; /* BB expand this if autonegotiate > 4 dialects */
3018 + } __packed;
3019 +
3020 + /* Dialects */
3021 +diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
3022 +index b1c2f416b9bd9..9391cd17a2b55 100644
3023 +--- a/fs/cifs/transport.c
3024 ++++ b/fs/cifs/transport.c
3025 +@@ -655,10 +655,22 @@ wait_for_compound_request(struct TCP_Server_Info *server, int num,
3026 + spin_lock(&server->req_lock);
3027 + if (*credits < num) {
3028 + /*
3029 +- * Return immediately if not too many requests in flight since
3030 +- * we will likely be stuck on waiting for credits.
3031 ++ * If the server is tight on resources or just gives us less
3032 ++ * credits for other reasons (e.g. requests are coming out of
3033 ++ * order and the server delays granting more credits until it
3034 ++ * processes a missing mid) and we exhausted most available
3035 ++ * credits there may be situations when we try to send
3036 ++ * a compound request but we don't have enough credits. At this
3037 ++ * point the client needs to decide if it should wait for
3038 ++ * additional credits or fail the request. If at least one
3039 ++ * request is in flight there is a high probability that the
3040 ++ * server will return enough credits to satisfy this compound
3041 ++ * request.
3042 ++ *
3043 ++ * Return immediately if no requests in flight since we will be
3044 ++ * stuck on waiting for credits.
3045 + */
3046 +- if (server->in_flight < num - *credits) {
3047 ++ if (server->in_flight == 0) {
3048 + spin_unlock(&server->req_lock);
3049 + return -ENOTSUPP;
3050 + }
3051 +diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
3052 +index b5c109703daaf..21c20fd5f9ee7 100644
3053 +--- a/fs/hugetlbfs/inode.c
3054 ++++ b/fs/hugetlbfs/inode.c
3055 +@@ -735,9 +735,10 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
3056 +
3057 + mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3058 +
3059 ++ set_page_huge_active(page);
3060 + /*
3061 + * unlock_page because locked by add_to_page_cache()
3062 +- * page_put due to reference from alloc_huge_page()
3063 ++ * put_page() due to reference from alloc_huge_page()
3064 + */
3065 + unlock_page(page);
3066 + put_page(page);
3067 +diff --git a/fs/io_uring.c b/fs/io_uring.c
3068 +index 907ecaffc3386..3b6307f6bd93d 100644
3069 +--- a/fs/io_uring.c
3070 ++++ b/fs/io_uring.c
3071 +@@ -8782,12 +8782,6 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
3072 +
3073 + if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
3074 + atomic_dec(&task->io_uring->in_idle);
3075 +- /*
3076 +- * If the files that are going away are the ones in the thread
3077 +- * identity, clear them out.
3078 +- */
3079 +- if (task->io_uring->identity->files == files)
3080 +- task->io_uring->identity->files = NULL;
3081 + io_sq_thread_unpark(ctx->sq_data);
3082 + }
3083 + }
3084 +diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
3085 +index 28a075b5f5b2e..d1efa3a5a5032 100644
3086 +--- a/fs/overlayfs/dir.c
3087 ++++ b/fs/overlayfs/dir.c
3088 +@@ -992,8 +992,8 @@ static char *ovl_get_redirect(struct dentry *dentry, bool abs_redirect)
3089 +
3090 + buflen -= thislen;
3091 + memcpy(&buf[buflen], name, thislen);
3092 +- tmp = dget_dlock(d->d_parent);
3093 + spin_unlock(&d->d_lock);
3094 ++ tmp = dget_parent(d);
3095 +
3096 + dput(d);
3097 + d = tmp;
3098 +diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
3099 +index a1f72ac053e5f..5c5c3972ebd0a 100644
3100 +--- a/fs/overlayfs/file.c
3101 ++++ b/fs/overlayfs/file.c
3102 +@@ -445,8 +445,9 @@ static int ovl_fsync(struct file *file, loff_t start, loff_t end, int datasync)
3103 + const struct cred *old_cred;
3104 + int ret;
3105 +
3106 +- if (!ovl_should_sync(OVL_FS(file_inode(file)->i_sb)))
3107 +- return 0;
3108 ++ ret = ovl_sync_status(OVL_FS(file_inode(file)->i_sb));
3109 ++ if (ret <= 0)
3110 ++ return ret;
3111 +
3112 + ret = ovl_real_fdget_meta(file, &real, !datasync);
3113 + if (ret)
3114 +diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
3115 +index f8880aa2ba0ec..9f7af98ae2005 100644
3116 +--- a/fs/overlayfs/overlayfs.h
3117 ++++ b/fs/overlayfs/overlayfs.h
3118 +@@ -322,6 +322,7 @@ int ovl_check_metacopy_xattr(struct ovl_fs *ofs, struct dentry *dentry);
3119 + bool ovl_is_metacopy_dentry(struct dentry *dentry);
3120 + char *ovl_get_redirect_xattr(struct ovl_fs *ofs, struct dentry *dentry,
3121 + int padding);
3122 ++int ovl_sync_status(struct ovl_fs *ofs);
3123 +
3124 + static inline bool ovl_is_impuredir(struct super_block *sb,
3125 + struct dentry *dentry)
3126 +diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
3127 +index 1b5a2094df8eb..b208eba5d0b64 100644
3128 +--- a/fs/overlayfs/ovl_entry.h
3129 ++++ b/fs/overlayfs/ovl_entry.h
3130 +@@ -79,6 +79,8 @@ struct ovl_fs {
3131 + atomic_long_t last_ino;
3132 + /* Whiteout dentry cache */
3133 + struct dentry *whiteout;
3134 ++ /* r/o snapshot of upperdir sb's only taken on volatile mounts */
3135 ++ errseq_t errseq;
3136 + };
3137 +
3138 + static inline struct vfsmount *ovl_upper_mnt(struct ovl_fs *ofs)
3139 +diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
3140 +index 01620ebae1bd4..f404a78e6b607 100644
3141 +--- a/fs/overlayfs/readdir.c
3142 ++++ b/fs/overlayfs/readdir.c
3143 +@@ -865,7 +865,7 @@ struct file *ovl_dir_real_file(const struct file *file, bool want_upper)
3144 +
3145 + struct ovl_dir_file *od = file->private_data;
3146 + struct dentry *dentry = file->f_path.dentry;
3147 +- struct file *realfile = od->realfile;
3148 ++ struct file *old, *realfile = od->realfile;
3149 +
3150 + if (!OVL_TYPE_UPPER(ovl_path_type(dentry)))
3151 + return want_upper ? NULL : realfile;
3152 +@@ -874,29 +874,20 @@ struct file *ovl_dir_real_file(const struct file *file, bool want_upper)
3153 + * Need to check if we started out being a lower dir, but got copied up
3154 + */
3155 + if (!od->is_upper) {
3156 +- struct inode *inode = file_inode(file);
3157 +-
3158 + realfile = READ_ONCE(od->upperfile);
3159 + if (!realfile) {
3160 + struct path upperpath;
3161 +
3162 + ovl_path_upper(dentry, &upperpath);
3163 + realfile = ovl_dir_open_realfile(file, &upperpath);
3164 ++ if (IS_ERR(realfile))
3165 ++ return realfile;
3166 +
3167 +- inode_lock(inode);
3168 +- if (!od->upperfile) {
3169 +- if (IS_ERR(realfile)) {
3170 +- inode_unlock(inode);
3171 +- return realfile;
3172 +- }
3173 +- smp_store_release(&od->upperfile, realfile);
3174 +- } else {
3175 +- /* somebody has beaten us to it */
3176 +- if (!IS_ERR(realfile))
3177 +- fput(realfile);
3178 +- realfile = od->upperfile;
3179 ++ old = cmpxchg_release(&od->upperfile, NULL, realfile);
3180 ++ if (old) {
3181 ++ fput(realfile);
3182 ++ realfile = old;
3183 + }
3184 +- inode_unlock(inode);
3185 + }
3186 + }
3187 +
3188 +@@ -909,8 +900,9 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
3189 + struct file *realfile;
3190 + int err;
3191 +
3192 +- if (!ovl_should_sync(OVL_FS(file->f_path.dentry->d_sb)))
3193 +- return 0;
3194 ++ err = ovl_sync_status(OVL_FS(file->f_path.dentry->d_sb));
3195 ++ if (err <= 0)
3196 ++ return err;
3197 +
3198 + realfile = ovl_dir_real_file(file, true);
3199 + err = PTR_ERR_OR_ZERO(realfile);
3200 +diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
3201 +index 290983bcfbb35..d23177a53c95f 100644
3202 +--- a/fs/overlayfs/super.c
3203 ++++ b/fs/overlayfs/super.c
3204 +@@ -261,11 +261,20 @@ static int ovl_sync_fs(struct super_block *sb, int wait)
3205 + struct super_block *upper_sb;
3206 + int ret;
3207 +
3208 +- if (!ovl_upper_mnt(ofs))
3209 +- return 0;
3210 ++ ret = ovl_sync_status(ofs);
3211 ++ /*
3212 ++ * We have to always set the err, because the return value isn't
3213 ++ * checked in syncfs, and instead indirectly return an error via
3214 ++ * the sb's writeback errseq, which VFS inspects after this call.
3215 ++ */
3216 ++ if (ret < 0) {
3217 ++ errseq_set(&sb->s_wb_err, -EIO);
3218 ++ return -EIO;
3219 ++ }
3220 ++
3221 ++ if (!ret)
3222 ++ return ret;
3223 +
3224 +- if (!ovl_should_sync(ofs))
3225 +- return 0;
3226 + /*
3227 + * Not called for sync(2) call or an emergency sync (SB_I_SKIP_SYNC).
3228 + * All the super blocks will be iterated, including upper_sb.
3229 +@@ -1927,6 +1936,8 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
3230 + sb->s_op = &ovl_super_operations;
3231 +
3232 + if (ofs->config.upperdir) {
3233 ++ struct super_block *upper_sb;
3234 ++
3235 + if (!ofs->config.workdir) {
3236 + pr_err("missing 'workdir'\n");
3237 + goto out_err;
3238 +@@ -1936,6 +1947,16 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
3239 + if (err)
3240 + goto out_err;
3241 +
3242 ++ upper_sb = ovl_upper_mnt(ofs)->mnt_sb;
3243 ++ if (!ovl_should_sync(ofs)) {
3244 ++ ofs->errseq = errseq_sample(&upper_sb->s_wb_err);
3245 ++ if (errseq_check(&upper_sb->s_wb_err, ofs->errseq)) {
3246 ++ err = -EIO;
3247 ++ pr_err("Cannot mount volatile when upperdir has an unseen error. Sync upperdir fs to clear state.\n");
3248 ++ goto out_err;
3249 ++ }
3250 ++ }
3251 ++
3252 + err = ovl_get_workdir(sb, ofs, &upperpath);
3253 + if (err)
3254 + goto out_err;
3255 +@@ -1943,9 +1964,8 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
3256 + if (!ofs->workdir)
3257 + sb->s_flags |= SB_RDONLY;
3258 +
3259 +- sb->s_stack_depth = ovl_upper_mnt(ofs)->mnt_sb->s_stack_depth;
3260 +- sb->s_time_gran = ovl_upper_mnt(ofs)->mnt_sb->s_time_gran;
3261 +-
3262 ++ sb->s_stack_depth = upper_sb->s_stack_depth;
3263 ++ sb->s_time_gran = upper_sb->s_time_gran;
3264 + }
3265 + oe = ovl_get_lowerstack(sb, splitlower, numlower, ofs, layers);
3266 + err = PTR_ERR(oe);
3267 +diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
3268 +index 23f475627d07f..6e7b8c882045c 100644
3269 +--- a/fs/overlayfs/util.c
3270 ++++ b/fs/overlayfs/util.c
3271 +@@ -950,3 +950,30 @@ err_free:
3272 + kfree(buf);
3273 + return ERR_PTR(res);
3274 + }
3275 ++
3276 ++/*
3277 ++ * ovl_sync_status() - Check fs sync status for volatile mounts
3278 ++ *
3279 ++ * Returns 1 if this is not a volatile mount and a real sync is required.
3280 ++ *
3281 ++ * Returns 0 if syncing can be skipped because mount is volatile, and no errors
3282 ++ * have occurred on the upperdir since the mount.
3283 ++ *
3284 ++ * Returns -errno if it is a volatile mount, and the error that occurred since
3285 ++ * the last mount. If the error code changes, it'll return the latest error
3286 ++ * code.
3287 ++ */
3288 ++
3289 ++int ovl_sync_status(struct ovl_fs *ofs)
3290 ++{
3291 ++ struct vfsmount *mnt;
3292 ++
3293 ++ if (ovl_should_sync(ofs))
3294 ++ return 1;
3295 ++
3296 ++ mnt = ovl_upper_mnt(ofs);
3297 ++ if (!mnt)
3298 ++ return 0;
3299 ++
3300 ++ return errseq_check(&mnt->mnt_sb->s_wb_err, ofs->errseq);
3301 ++}
3302 +diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
3303 +index f5e92fe9151c3..bd1c39907b924 100644
3304 +--- a/include/drm/drm_dp_mst_helper.h
3305 ++++ b/include/drm/drm_dp_mst_helper.h
3306 +@@ -783,6 +783,7 @@ drm_dp_mst_detect_port(struct drm_connector *connector,
3307 +
3308 + struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
3309 +
3310 ++int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count);
3311 +
3312 + int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);
3313 +
3314 +diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
3315 +index ebca2ef022127..b5807f23caf80 100644
3316 +--- a/include/linux/hugetlb.h
3317 ++++ b/include/linux/hugetlb.h
3318 +@@ -770,6 +770,8 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
3319 + }
3320 + #endif
3321 +
3322 ++void set_page_huge_active(struct page *page);
3323 ++
3324 + #else /* CONFIG_HUGETLB_PAGE */
3325 + struct hstate {};
3326 +
3327 +diff --git a/include/linux/iommu.h b/include/linux/iommu.h
3328 +index b95a6f8db6ff9..9bbcfe3b0bb12 100644
3329 +--- a/include/linux/iommu.h
3330 ++++ b/include/linux/iommu.h
3331 +@@ -614,7 +614,10 @@ static inline void dev_iommu_fwspec_set(struct device *dev,
3332 +
3333 + static inline void *dev_iommu_priv_get(struct device *dev)
3334 + {
3335 +- return dev->iommu->priv;
3336 ++ if (dev->iommu)
3337 ++ return dev->iommu->priv;
3338 ++ else
3339 ++ return NULL;
3340 + }
3341 +
3342 + static inline void dev_iommu_priv_set(struct device *dev, void *priv)
3343 +diff --git a/include/linux/irq.h b/include/linux/irq.h
3344 +index c54365309e975..a36d35c259963 100644
3345 +--- a/include/linux/irq.h
3346 ++++ b/include/linux/irq.h
3347 +@@ -922,7 +922,7 @@ int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
3348 + __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL)
3349 +
3350 + #define irq_alloc_desc(node) \
3351 +- irq_alloc_descs(-1, 0, 1, node)
3352 ++ irq_alloc_descs(-1, 1, 1, node)
3353 +
3354 + #define irq_alloc_desc_at(at, node) \
3355 + irq_alloc_descs(at, at, 1, node)
3356 +@@ -937,7 +937,7 @@ int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
3357 + __devm_irq_alloc_descs(dev, irq, from, cnt, node, THIS_MODULE, NULL)
3358 +
3359 + #define devm_irq_alloc_desc(dev, node) \
3360 +- devm_irq_alloc_descs(dev, -1, 0, 1, node)
3361 ++ devm_irq_alloc_descs(dev, -1, 1, 1, node)
3362 +
3363 + #define devm_irq_alloc_desc_at(dev, at, node) \
3364 + devm_irq_alloc_descs(dev, at, at, 1, node)
3365 +diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
3366 +index 629abaf25681d..21f21f7f878ce 100644
3367 +--- a/include/linux/kprobes.h
3368 ++++ b/include/linux/kprobes.h
3369 +@@ -251,7 +251,7 @@ extern void kprobes_inc_nmissed_count(struct kprobe *p);
3370 + extern bool arch_within_kprobe_blacklist(unsigned long addr);
3371 + extern int arch_populate_kprobe_blacklist(void);
3372 + extern bool arch_kprobe_on_func_entry(unsigned long offset);
3373 +-extern bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
3374 ++extern int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
3375 +
3376 + extern bool within_kprobe_blacklist(unsigned long addr);
3377 + extern int kprobe_add_ksym_blacklist(unsigned long entry);
3378 +diff --git a/include/linux/msi.h b/include/linux/msi.h
3379 +index 6b584cc4757cd..2a3e997751cea 100644
3380 +--- a/include/linux/msi.h
3381 ++++ b/include/linux/msi.h
3382 +@@ -139,6 +139,12 @@ struct msi_desc {
3383 + list_for_each_entry((desc), dev_to_msi_list((dev)), list)
3384 + #define for_each_msi_entry_safe(desc, tmp, dev) \
3385 + list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
3386 ++#define for_each_msi_vector(desc, __irq, dev) \
3387 ++ for_each_msi_entry((desc), (dev)) \
3388 ++ if ((desc)->irq) \
3389 ++ for (__irq = (desc)->irq; \
3390 ++ __irq < ((desc)->irq + (desc)->nvec_used); \
3391 ++ __irq++)
3392 +
3393 + #ifdef CONFIG_IRQ_MSI_IOMMU
3394 + static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
3395 +diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
3396 +index 0f21617f1a668..966ed89803274 100644
3397 +--- a/include/linux/tracepoint.h
3398 ++++ b/include/linux/tracepoint.h
3399 +@@ -307,11 +307,13 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
3400 + \
3401 + it_func_ptr = \
3402 + rcu_dereference_raw((&__tracepoint_##_name)->funcs); \
3403 +- do { \
3404 +- it_func = (it_func_ptr)->func; \
3405 +- __data = (it_func_ptr)->data; \
3406 +- ((void(*)(void *, proto))(it_func))(__data, args); \
3407 +- } while ((++it_func_ptr)->func); \
3408 ++ if (it_func_ptr) { \
3409 ++ do { \
3410 ++ it_func = (it_func_ptr)->func; \
3411 ++ __data = (it_func_ptr)->data; \
3412 ++ ((void(*)(void *, proto))(it_func))(__data, args); \
3413 ++ } while ((++it_func_ptr)->func); \
3414 ++ } \
3415 + return 0; \
3416 + } \
3417 + DEFINE_STATIC_CALL(tp_func_##_name, __traceiter_##_name);
3418 +diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
3419 +index 938eaf9517e26..76dad53a410ac 100644
3420 +--- a/include/linux/vmalloc.h
3421 ++++ b/include/linux/vmalloc.h
3422 +@@ -24,7 +24,8 @@ struct notifier_block; /* in notifier.h */
3423 + #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
3424 + #define VM_NO_GUARD 0x00000040 /* don't add guard page */
3425 + #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
3426 +-#define VM_MAP_PUT_PAGES 0x00000100 /* put pages and free array in vfree */
3427 ++#define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
3428 ++#define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */
3429 +
3430 + /*
3431 + * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC.
3432 +@@ -37,12 +38,6 @@ struct notifier_block; /* in notifier.h */
3433 + * determine which allocations need the module shadow freed.
3434 + */
3435 +
3436 +-/*
3437 +- * Memory with VM_FLUSH_RESET_PERMS cannot be freed in an interrupt or with
3438 +- * vfree_atomic().
3439 +- */
3440 +-#define VM_FLUSH_RESET_PERMS 0x00000100 /* Reset direct map and flush TLB on unmap */
3441 +-
3442 + /* bits [20..32] reserved for arch specific ioremap internals */
3443 +
3444 + /*
3445 +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
3446 +index d8fd8676fc724..3648164faa060 100644
3447 +--- a/include/net/sch_generic.h
3448 ++++ b/include/net/sch_generic.h
3449 +@@ -1155,7 +1155,7 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
3450 + old = *pold;
3451 + *pold = new;
3452 + if (old != NULL)
3453 +- qdisc_tree_flush_backlog(old);
3454 ++ qdisc_purge_queue(old);
3455 + sch_tree_unlock(sch);
3456 +
3457 + return old;
3458 +diff --git a/include/net/udp.h b/include/net/udp.h
3459 +index 295d52a735982..949ae14a54250 100644
3460 +--- a/include/net/udp.h
3461 ++++ b/include/net/udp.h
3462 +@@ -178,7 +178,7 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
3463 + int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
3464 +
3465 + struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
3466 +- netdev_features_t features);
3467 ++ netdev_features_t features, bool is_ipv6);
3468 +
3469 + static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
3470 + {
3471 +diff --git a/init/init_task.c b/init/init_task.c
3472 +index 15f6eb93a04fa..16d14c2ebb552 100644
3473 +--- a/init/init_task.c
3474 ++++ b/init/init_task.c
3475 +@@ -198,7 +198,8 @@ struct task_struct init_task
3476 + .lockdep_recursion = 0,
3477 + #endif
3478 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3479 +- .ret_stack = NULL,
3480 ++ .ret_stack = NULL,
3481 ++ .tracing_graph_pause = ATOMIC_INIT(0),
3482 + #endif
3483 + #if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPTION)
3484 + .trace_recursion = 0,
3485 +diff --git a/kernel/bpf/bpf_inode_storage.c b/kernel/bpf/bpf_inode_storage.c
3486 +index dbc1dbdd2cbf0..c2a501cd90eba 100644
3487 +--- a/kernel/bpf/bpf_inode_storage.c
3488 ++++ b/kernel/bpf/bpf_inode_storage.c
3489 +@@ -125,8 +125,12 @@ static int bpf_fd_inode_storage_update_elem(struct bpf_map *map, void *key,
3490 +
3491 + fd = *(int *)key;
3492 + f = fget_raw(fd);
3493 +- if (!f || !inode_storage_ptr(f->f_inode))
3494 ++ if (!f)
3495 ++ return -EBADF;
3496 ++ if (!inode_storage_ptr(f->f_inode)) {
3497 ++ fput(f);
3498 + return -EBADF;
3499 ++ }
3500 +
3501 + sdata = bpf_local_storage_update(f->f_inode,
3502 + (struct bpf_local_storage_map *)map,
3503 +diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
3504 +index 96555a8a2c545..6aa9e10c6335a 100644
3505 +--- a/kernel/bpf/cgroup.c
3506 ++++ b/kernel/bpf/cgroup.c
3507 +@@ -1442,6 +1442,11 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
3508 + goto out;
3509 + }
3510 +
3511 ++ if (ctx.optlen < 0) {
3512 ++ ret = -EFAULT;
3513 ++ goto out;
3514 ++ }
3515 ++
3516 + if (copy_from_user(ctx.optval, optval,
3517 + min(ctx.optlen, max_optlen)) != 0) {
3518 + ret = -EFAULT;
3519 +@@ -1459,7 +1464,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
3520 + goto out;
3521 + }
3522 +
3523 +- if (ctx.optlen > max_optlen) {
3524 ++ if (ctx.optlen > max_optlen || ctx.optlen < 0) {
3525 + ret = -EFAULT;
3526 + goto out;
3527 + }
3528 +diff --git a/kernel/bpf/preload/Makefile b/kernel/bpf/preload/Makefile
3529 +index 23ee310b6eb49..1951332dd15f5 100644
3530 +--- a/kernel/bpf/preload/Makefile
3531 ++++ b/kernel/bpf/preload/Makefile
3532 +@@ -4,8 +4,11 @@ LIBBPF_SRCS = $(srctree)/tools/lib/bpf/
3533 + LIBBPF_A = $(obj)/libbpf.a
3534 + LIBBPF_OUT = $(abspath $(obj))
3535 +
3536 ++# Although not in use by libbpf's Makefile, set $(O) so that the "dummy" test
3537 ++# in tools/scripts/Makefile.include always succeeds when building the kernel
3538 ++# with $(O) pointing to a relative path, as in "make O=build bindeb-pkg".
3539 + $(LIBBPF_A):
3540 +- $(Q)$(MAKE) -C $(LIBBPF_SRCS) OUTPUT=$(LIBBPF_OUT)/ $(LIBBPF_OUT)/libbpf.a
3541 ++ $(Q)$(MAKE) -C $(LIBBPF_SRCS) O=$(LIBBPF_OUT)/ OUTPUT=$(LIBBPF_OUT)/ $(LIBBPF_OUT)/libbpf.a
3542 +
3543 + userccflags += -I $(srctree)/tools/include/ -I $(srctree)/tools/include/uapi \
3544 + -I $(srctree)/tools/lib/ -Wno-unused-result
3545 +diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
3546 +index 2c0c4d6d0f83a..d924676c8781b 100644
3547 +--- a/kernel/irq/msi.c
3548 ++++ b/kernel/irq/msi.c
3549 +@@ -436,22 +436,22 @@ int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
3550 +
3551 + can_reserve = msi_check_reservation_mode(domain, info, dev);
3552 +
3553 +- for_each_msi_entry(desc, dev) {
3554 +- virq = desc->irq;
3555 +- if (desc->nvec_used == 1)
3556 +- dev_dbg(dev, "irq %d for MSI\n", virq);
3557 +- else
3558 ++ /*
3559 ++ * This flag is set by the PCI layer as we need to activate
3560 ++ * the MSI entries before the PCI layer enables MSI in the
3561 ++ * card. Otherwise the card latches a random msi message.
3562 ++ */
3563 ++ if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY))
3564 ++ goto skip_activate;
3565 ++
3566 ++ for_each_msi_vector(desc, i, dev) {
3567 ++ if (desc->irq == i) {
3568 ++ virq = desc->irq;
3569 + dev_dbg(dev, "irq [%d-%d] for MSI\n",
3570 + virq, virq + desc->nvec_used - 1);
3571 +- /*
3572 +- * This flag is set by the PCI layer as we need to activate
3573 +- * the MSI entries before the PCI layer enables MSI in the
3574 +- * card. Otherwise the card latches a random msi message.
3575 +- */
3576 +- if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY))
3577 +- continue;
3578 ++ }
3579 +
3580 +- irq_data = irq_domain_get_irq_data(domain, desc->irq);
3581 ++ irq_data = irq_domain_get_irq_data(domain, i);
3582 + if (!can_reserve) {
3583 + irqd_clr_can_reserve(irq_data);
3584 + if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK)
3585 +@@ -462,28 +462,24 @@ int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
3586 + goto cleanup;
3587 + }
3588 +
3589 ++skip_activate:
3590 + /*
3591 + * If these interrupts use reservation mode, clear the activated bit
3592 + * so request_irq() will assign the final vector.
3593 + */
3594 + if (can_reserve) {
3595 +- for_each_msi_entry(desc, dev) {
3596 +- irq_data = irq_domain_get_irq_data(domain, desc->irq);
3597 ++ for_each_msi_vector(desc, i, dev) {
3598 ++ irq_data = irq_domain_get_irq_data(domain, i);
3599 + irqd_clr_activated(irq_data);
3600 + }
3601 + }
3602 + return 0;
3603 +
3604 + cleanup:
3605 +- for_each_msi_entry(desc, dev) {
3606 +- struct irq_data *irqd;
3607 +-
3608 +- if (desc->irq == virq)
3609 +- break;
3610 +-
3611 +- irqd = irq_domain_get_irq_data(domain, desc->irq);
3612 +- if (irqd_is_activated(irqd))
3613 +- irq_domain_deactivate_irq(irqd);
3614 ++ for_each_msi_vector(desc, i, dev) {
3615 ++ irq_data = irq_domain_get_irq_data(domain, i);
3616 ++ if (irqd_is_activated(irq_data))
3617 ++ irq_domain_deactivate_irq(irq_data);
3618 + }
3619 + msi_domain_free_irqs(domain, dev);
3620 + return ret;
3621 +diff --git a/kernel/kprobes.c b/kernel/kprobes.c
3622 +index 41fdbb7953c60..911c77ef5bbcd 100644
3623 +--- a/kernel/kprobes.c
3624 ++++ b/kernel/kprobes.c
3625 +@@ -2082,28 +2082,48 @@ bool __weak arch_kprobe_on_func_entry(unsigned long offset)
3626 + return !offset;
3627 + }
3628 +
3629 +-bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
3630 ++/**
3631 ++ * kprobe_on_func_entry() -- check whether given address is function entry
3632 ++ * @addr: Target address
3633 ++ * @sym: Target symbol name
3634 ++ * @offset: The offset from the symbol or the address
3635 ++ *
3636 ++ * This checks whether the given @addr+@offset or @sym+@offset is on the
3637 ++ * function entry address or not.
3638 ++ * This returns 0 if it is the function entry, or -EINVAL if it is not.
3639 ++ * And also it returns -ENOENT if it fails the symbol or address lookup.
3640 ++ * Caller must pass @addr or @sym (either one must be NULL), or this
3641 ++ * returns -EINVAL.
3642 ++ */
3643 ++int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
3644 + {
3645 + kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);
3646 +
3647 + if (IS_ERR(kp_addr))
3648 +- return false;
3649 ++ return PTR_ERR(kp_addr);
3650 +
3651 +- if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) ||
3652 +- !arch_kprobe_on_func_entry(offset))
3653 +- return false;
3654 ++ if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset))
3655 ++ return -ENOENT;
3656 +
3657 +- return true;
3658 ++ if (!arch_kprobe_on_func_entry(offset))
3659 ++ return -EINVAL;
3660 ++
3661 ++ return 0;
3662 + }
3663 +
3664 + int register_kretprobe(struct kretprobe *rp)
3665 + {
3666 +- int ret = 0;
3667 ++ int ret;
3668 + struct kretprobe_instance *inst;
3669 + int i;
3670 + void *addr;
3671 +
3672 +- if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset))
3673 ++ ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset);
3674 ++ if (ret)
3675 ++ return ret;
3676 ++
3677 ++ /* If only rp->kp.addr is specified, check reregistering kprobes */
3678 ++ if (rp->kp.addr && check_kprobe_rereg(&rp->kp))
3679 + return -EINVAL;
3680 +
3681 + if (kretprobe_blacklist_size) {
3682 +diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
3683 +index 5658f13037b3d..a58da91eadb5c 100644
3684 +--- a/kernel/trace/fgraph.c
3685 ++++ b/kernel/trace/fgraph.c
3686 +@@ -395,7 +395,6 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3687 + }
3688 +
3689 + if (t->ret_stack == NULL) {
3690 +- atomic_set(&t->tracing_graph_pause, 0);
3691 + atomic_set(&t->trace_overrun, 0);
3692 + t->curr_ret_stack = -1;
3693 + t->curr_ret_depth = -1;
3694 +@@ -490,7 +489,6 @@ static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
3695 + static void
3696 + graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
3697 + {
3698 +- atomic_set(&t->tracing_graph_pause, 0);
3699 + atomic_set(&t->trace_overrun, 0);
3700 + t->ftrace_timestamp = 0;
3701 + /* make curr_ret_stack visible before we add the ret_stack */
3702 +diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
3703 +index 10bbb0f381d56..ee4571b624bcb 100644
3704 +--- a/kernel/trace/trace_irqsoff.c
3705 ++++ b/kernel/trace/trace_irqsoff.c
3706 +@@ -562,6 +562,8 @@ static int __irqsoff_tracer_init(struct trace_array *tr)
3707 + /* non overwrite screws up the latency tracers */
3708 + set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
3709 + set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
3710 ++ /* without pause, we will produce garbage if another latency occurs */
3711 ++ set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, 1);
3712 +
3713 + tr->max_latency = 0;
3714 + irqsoff_trace = tr;
3715 +@@ -583,11 +585,13 @@ static void __irqsoff_tracer_reset(struct trace_array *tr)
3716 + {
3717 + int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
3718 + int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
3719 ++ int pause_flag = save_flags & TRACE_ITER_PAUSE_ON_TRACE;
3720 +
3721 + stop_irqsoff_tracer(tr, is_graph(tr));
3722 +
3723 + set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
3724 + set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
3725 ++ set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, pause_flag);
3726 + ftrace_reset_array_ops(tr);
3727 +
3728 + irqsoff_busy = false;
3729 +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
3730 +index 5fff39541b8ae..68150b9cbde92 100644
3731 +--- a/kernel/trace/trace_kprobe.c
3732 ++++ b/kernel/trace/trace_kprobe.c
3733 +@@ -221,9 +221,9 @@ bool trace_kprobe_on_func_entry(struct trace_event_call *call)
3734 + {
3735 + struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
3736 +
3737 +- return tk ? kprobe_on_func_entry(tk->rp.kp.addr,
3738 ++ return tk ? (kprobe_on_func_entry(tk->rp.kp.addr,
3739 + tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
3740 +- tk->rp.kp.addr ? 0 : tk->rp.kp.offset) : false;
3741 ++ tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false;
3742 + }
3743 +
3744 + bool trace_kprobe_error_injectable(struct trace_event_call *call)
3745 +@@ -828,9 +828,11 @@ static int trace_kprobe_create(int argc, const char *argv[])
3746 + }
3747 + if (is_return)
3748 + flags |= TPARG_FL_RETURN;
3749 +- if (kprobe_on_func_entry(NULL, symbol, offset))
3750 ++ ret = kprobe_on_func_entry(NULL, symbol, offset);
3751 ++ if (ret == 0)
3752 + flags |= TPARG_FL_FENTRY;
3753 +- if (offset && is_return && !(flags & TPARG_FL_FENTRY)) {
3754 ++ /* Defer the ENOENT case until register kprobe */
3755 ++ if (ret == -EINVAL && is_return) {
3756 + trace_probe_log_err(0, BAD_RETPROBE);
3757 + goto parse_error;
3758 + }
3759 +diff --git a/mm/compaction.c b/mm/compaction.c
3760 +index 13cb7a961b319..0846d4ffa3387 100644
3761 +--- a/mm/compaction.c
3762 ++++ b/mm/compaction.c
3763 +@@ -1302,7 +1302,7 @@ fast_isolate_freepages(struct compact_control *cc)
3764 + {
3765 + unsigned int limit = min(1U, freelist_scan_limit(cc) >> 1);
3766 + unsigned int nr_scanned = 0;
3767 +- unsigned long low_pfn, min_pfn, high_pfn = 0, highest = 0;
3768 ++ unsigned long low_pfn, min_pfn, highest = 0;
3769 + unsigned long nr_isolated = 0;
3770 + unsigned long distance;
3771 + struct page *page = NULL;
3772 +@@ -1347,6 +1347,7 @@ fast_isolate_freepages(struct compact_control *cc)
3773 + struct page *freepage;
3774 + unsigned long flags;
3775 + unsigned int order_scanned = 0;
3776 ++ unsigned long high_pfn = 0;
3777 +
3778 + if (!area->nr_free)
3779 + continue;
3780 +diff --git a/mm/filemap.c b/mm/filemap.c
3781 +index 0b2067b3c3283..125b69f59caad 100644
3782 +--- a/mm/filemap.c
3783 ++++ b/mm/filemap.c
3784 +@@ -835,6 +835,7 @@ noinline int __add_to_page_cache_locked(struct page *page,
3785 + XA_STATE(xas, &mapping->i_pages, offset);
3786 + int huge = PageHuge(page);
3787 + int error;
3788 ++ bool charged = false;
3789 +
3790 + VM_BUG_ON_PAGE(!PageLocked(page), page);
3791 + VM_BUG_ON_PAGE(PageSwapBacked(page), page);
3792 +@@ -848,6 +849,7 @@ noinline int __add_to_page_cache_locked(struct page *page,
3793 + error = mem_cgroup_charge(page, current->mm, gfp);
3794 + if (error)
3795 + goto error;
3796 ++ charged = true;
3797 + }
3798 +
3799 + gfp &= GFP_RECLAIM_MASK;
3800 +@@ -896,6 +898,8 @@ unlock:
3801 +
3802 + if (xas_error(&xas)) {
3803 + error = xas_error(&xas);
3804 ++ if (charged)
3805 ++ mem_cgroup_uncharge(page);
3806 + goto error;
3807 + }
3808 +
3809 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
3810 +index 85eda66eb625d..4a78514830d5a 100644
3811 +--- a/mm/huge_memory.c
3812 ++++ b/mm/huge_memory.c
3813 +@@ -2188,7 +2188,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
3814 + {
3815 + spinlock_t *ptl;
3816 + struct mmu_notifier_range range;
3817 +- bool was_locked = false;
3818 ++ bool do_unlock_page = false;
3819 + pmd_t _pmd;
3820 +
3821 + mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
3822 +@@ -2204,7 +2204,6 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
3823 + VM_BUG_ON(freeze && !page);
3824 + if (page) {
3825 + VM_WARN_ON_ONCE(!PageLocked(page));
3826 +- was_locked = true;
3827 + if (page != pmd_page(*pmd))
3828 + goto out;
3829 + }
3830 +@@ -2213,19 +2212,29 @@ repeat:
3831 + if (pmd_trans_huge(*pmd)) {
3832 + if (!page) {
3833 + page = pmd_page(*pmd);
3834 +- if (unlikely(!trylock_page(page))) {
3835 +- get_page(page);
3836 +- _pmd = *pmd;
3837 +- spin_unlock(ptl);
3838 +- lock_page(page);
3839 +- spin_lock(ptl);
3840 +- if (unlikely(!pmd_same(*pmd, _pmd))) {
3841 +- unlock_page(page);
3842 ++ /*
3843 ++ * An anonymous page must be locked, to ensure that a
3844 ++ * concurrent reuse_swap_page() sees stable mapcount;
3845 ++ * but reuse_swap_page() is not used on shmem or file,
3846 ++ * and page lock must not be taken when zap_pmd_range()
3847 ++ * calls __split_huge_pmd() while i_mmap_lock is held.
3848 ++ */
3849 ++ if (PageAnon(page)) {
3850 ++ if (unlikely(!trylock_page(page))) {
3851 ++ get_page(page);
3852 ++ _pmd = *pmd;
3853 ++ spin_unlock(ptl);
3854 ++ lock_page(page);
3855 ++ spin_lock(ptl);
3856 ++ if (unlikely(!pmd_same(*pmd, _pmd))) {
3857 ++ unlock_page(page);
3858 ++ put_page(page);
3859 ++ page = NULL;
3860 ++ goto repeat;
3861 ++ }
3862 + put_page(page);
3863 +- page = NULL;
3864 +- goto repeat;
3865 + }
3866 +- put_page(page);
3867 ++ do_unlock_page = true;
3868 + }
3869 + }
3870 + if (PageMlocked(page))
3871 +@@ -2235,7 +2244,7 @@ repeat:
3872 + __split_huge_pmd_locked(vma, pmd, range.start, freeze);
3873 + out:
3874 + spin_unlock(ptl);
3875 +- if (!was_locked && page)
3876 ++ if (do_unlock_page)
3877 + unlock_page(page);
3878 + /*
3879 + * No need to double call mmu_notifier->invalidate_range() callback.
3880 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
3881 +index 9a3f06cdcc2a8..26909396898b6 100644
3882 +--- a/mm/hugetlb.c
3883 ++++ b/mm/hugetlb.c
3884 +@@ -79,6 +79,21 @@ DEFINE_SPINLOCK(hugetlb_lock);
3885 + static int num_fault_mutexes;
3886 + struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
3887 +
3888 ++static inline bool PageHugeFreed(struct page *head)
3889 ++{
3890 ++ return page_private(head + 4) == -1UL;
3891 ++}
3892 ++
3893 ++static inline void SetPageHugeFreed(struct page *head)
3894 ++{
3895 ++ set_page_private(head + 4, -1UL);
3896 ++}
3897 ++
3898 ++static inline void ClearPageHugeFreed(struct page *head)
3899 ++{
3900 ++ set_page_private(head + 4, 0);
3901 ++}
3902 ++
3903 + /* Forward declaration */
3904 + static int hugetlb_acct_memory(struct hstate *h, long delta);
3905 +
3906 +@@ -1028,6 +1043,7 @@ static void enqueue_huge_page(struct hstate *h, struct page *page)
3907 + list_move(&page->lru, &h->hugepage_freelists[nid]);
3908 + h->free_huge_pages++;
3909 + h->free_huge_pages_node[nid]++;
3910 ++ SetPageHugeFreed(page);
3911 + }
3912 +
3913 + static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
3914 +@@ -1044,6 +1060,7 @@ static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
3915 +
3916 + list_move(&page->lru, &h->hugepage_activelist);
3917 + set_page_refcounted(page);
3918 ++ ClearPageHugeFreed(page);
3919 + h->free_huge_pages--;
3920 + h->free_huge_pages_node[nid]--;
3921 + return page;
3922 +@@ -1344,12 +1361,11 @@ struct hstate *size_to_hstate(unsigned long size)
3923 + */
3924 + bool page_huge_active(struct page *page)
3925 + {
3926 +- VM_BUG_ON_PAGE(!PageHuge(page), page);
3927 +- return PageHead(page) && PagePrivate(&page[1]);
3928 ++ return PageHeadHuge(page) && PagePrivate(&page[1]);
3929 + }
3930 +
3931 + /* never called for tail page */
3932 +-static void set_page_huge_active(struct page *page)
3933 ++void set_page_huge_active(struct page *page)
3934 + {
3935 + VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
3936 + SetPagePrivate(&page[1]);
3937 +@@ -1505,6 +1521,7 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
3938 + spin_lock(&hugetlb_lock);
3939 + h->nr_huge_pages++;
3940 + h->nr_huge_pages_node[nid]++;
3941 ++ ClearPageHugeFreed(page);
3942 + spin_unlock(&hugetlb_lock);
3943 + }
3944 +
3945 +@@ -1755,6 +1772,7 @@ int dissolve_free_huge_page(struct page *page)
3946 + {
3947 + int rc = -EBUSY;
3948 +
3949 ++retry:
3950 + /* Not to disrupt normal path by vainly holding hugetlb_lock */
3951 + if (!PageHuge(page))
3952 + return 0;
3953 +@@ -1771,6 +1789,26 @@ int dissolve_free_huge_page(struct page *page)
3954 + int nid = page_to_nid(head);
3955 + if (h->free_huge_pages - h->resv_huge_pages == 0)
3956 + goto out;
3957 ++
3958 ++ /*
3959 ++ * We should make sure that the page is already on the free list
3960 ++ * when it is dissolved.
3961 ++ */
3962 ++ if (unlikely(!PageHugeFreed(head))) {
3963 ++ spin_unlock(&hugetlb_lock);
3964 ++ cond_resched();
3965 ++
3966 ++ /*
3967 ++ * Theoretically, we should return -EBUSY when we
3968 ++ * encounter this race. In fact, we have a chance
3969 ++ * to successfully dissolve the page if we do a
3970 ++ * retry. Because the race window is quite small.
3971 ++ * If we seize this opportunity, it is an optimization
3972 ++ * for increasing the success rate of dissolving page.
3973 ++ */
3974 ++ goto retry;
3975 ++ }
3976 ++
3977 + /*
3978 + * Move PageHWPoison flag from head page to the raw error page,
3979 + * which makes any subpages rather than the error page reusable.
3980 +@@ -5556,9 +5594,9 @@ bool isolate_huge_page(struct page *page, struct list_head *list)
3981 + {
3982 + bool ret = true;
3983 +
3984 +- VM_BUG_ON_PAGE(!PageHead(page), page);
3985 + spin_lock(&hugetlb_lock);
3986 +- if (!page_huge_active(page) || !get_page_unless_zero(page)) {
3987 ++ if (!PageHeadHuge(page) || !page_huge_active(page) ||
3988 ++ !get_page_unless_zero(page)) {
3989 + ret = false;
3990 + goto unlock;
3991 + }
3992 +diff --git a/mm/memblock.c b/mm/memblock.c
3993 +index b68ee86788af9..10bd7d1ef0f49 100644
3994 +--- a/mm/memblock.c
3995 ++++ b/mm/memblock.c
3996 +@@ -275,14 +275,6 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
3997 + *
3998 + * Find @size free area aligned to @align in the specified range and node.
3999 + *
4000 +- * When allocation direction is bottom-up, the @start should be greater
4001 +- * than the end of the kernel image. Otherwise, it will be trimmed. The
4002 +- * reason is that we want the bottom-up allocation just near the kernel
4003 +- * image so it is highly likely that the allocated memory and the kernel
4004 +- * will reside in the same node.
4005 +- *
4006 +- * If bottom-up allocation failed, will try to allocate memory top-down.
4007 +- *
4008 + * Return:
4009 + * Found address on success, 0 on failure.
4010 + */
4011 +@@ -291,8 +283,6 @@ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
4012 + phys_addr_t end, int nid,
4013 + enum memblock_flags flags)
4014 + {
4015 +- phys_addr_t kernel_end, ret;
4016 +-
4017 + /* pump up @end */
4018 + if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
4019 + end == MEMBLOCK_ALLOC_KASAN)
4020 +@@ -301,40 +291,13 @@ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
4021 + /* avoid allocating the first page */
4022 + start = max_t(phys_addr_t, start, PAGE_SIZE);
4023 + end = max(start, end);
4024 +- kernel_end = __pa_symbol(_end);
4025 +-
4026 +- /*
4027 +- * try bottom-up allocation only when bottom-up mode
4028 +- * is set and @end is above the kernel image.
4029 +- */
4030 +- if (memblock_bottom_up() && end > kernel_end) {
4031 +- phys_addr_t bottom_up_start;
4032 +-
4033 +- /* make sure we will allocate above the kernel */
4034 +- bottom_up_start = max(start, kernel_end);
4035 +
4036 +- /* ok, try bottom-up allocation first */
4037 +- ret = __memblock_find_range_bottom_up(bottom_up_start, end,
4038 +- size, align, nid, flags);
4039 +- if (ret)
4040 +- return ret;
4041 +-
4042 +- /*
4043 +- * we always limit bottom-up allocation above the kernel,
4044 +- * but top-down allocation doesn't have the limit, so
4045 +- * retrying top-down allocation may succeed when bottom-up
4046 +- * allocation failed.
4047 +- *
4048 +- * bottom-up allocation is expected to be fail very rarely,
4049 +- * so we use WARN_ONCE() here to see the stack trace if
4050 +- * fail happens.
4051 +- */
4052 +- WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE),
4053 +- "memblock: bottom-up allocation failed, memory hotremove may be affected\n");
4054 +- }
4055 +-
4056 +- return __memblock_find_range_top_down(start, end, size, align, nid,
4057 +- flags);
4058 ++ if (memblock_bottom_up())
4059 ++ return __memblock_find_range_bottom_up(start, end, size, align,
4060 ++ nid, flags);
4061 ++ else
4062 ++ return __memblock_find_range_top_down(start, end, size, align,
4063 ++ nid, flags);
4064 + }
4065 +
4066 + /**
4067 +diff --git a/net/core/neighbour.c b/net/core/neighbour.c
4068 +index 9500d28a43b0e..2fe4bbb6b80cf 100644
4069 +--- a/net/core/neighbour.c
4070 ++++ b/net/core/neighbour.c
4071 +@@ -1245,13 +1245,14 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
4072 + old = neigh->nud_state;
4073 + err = -EPERM;
4074 +
4075 +- if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
4076 +- (old & (NUD_NOARP | NUD_PERMANENT)))
4077 +- goto out;
4078 + if (neigh->dead) {
4079 + NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
4080 ++ new = old;
4081 + goto out;
4082 + }
4083 ++ if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
4084 ++ (old & (NUD_NOARP | NUD_PERMANENT)))
4085 ++ goto out;
4086 +
4087 + ext_learn_change = neigh_update_ext_learned(neigh, flags, &notify);
4088 +
4089 +diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
4090 +index 64594aa755f05..76a420c76f16e 100644
4091 +--- a/net/ipv4/ip_tunnel.c
4092 ++++ b/net/ipv4/ip_tunnel.c
4093 +@@ -317,7 +317,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
4094 + }
4095 +
4096 + dev->needed_headroom = t_hlen + hlen;
4097 +- mtu -= (dev->hard_header_len + t_hlen);
4098 ++ mtu -= t_hlen;
4099 +
4100 + if (mtu < IPV4_MIN_MTU)
4101 + mtu = IPV4_MIN_MTU;
4102 +@@ -347,7 +347,7 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net,
4103 + nt = netdev_priv(dev);
4104 + t_hlen = nt->hlen + sizeof(struct iphdr);
4105 + dev->min_mtu = ETH_MIN_MTU;
4106 +- dev->max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen;
4107 ++ dev->max_mtu = IP_MAX_MTU - t_hlen;
4108 + ip_tunnel_add(itn, nt);
4109 + return nt;
4110 +
4111 +@@ -488,11 +488,10 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
4112 + int mtu;
4113 +
4114 + tunnel_hlen = md ? tunnel_hlen : tunnel->hlen;
4115 +- pkt_size = skb->len - tunnel_hlen - dev->hard_header_len;
4116 ++ pkt_size = skb->len - tunnel_hlen;
4117 +
4118 + if (df)
4119 +- mtu = dst_mtu(&rt->dst) - dev->hard_header_len
4120 +- - sizeof(struct iphdr) - tunnel_hlen;
4121 ++ mtu = dst_mtu(&rt->dst) - (sizeof(struct iphdr) + tunnel_hlen);
4122 + else
4123 + mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
4124 +
4125 +@@ -972,7 +971,7 @@ int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
4126 + {
4127 + struct ip_tunnel *tunnel = netdev_priv(dev);
4128 + int t_hlen = tunnel->hlen + sizeof(struct iphdr);
4129 +- int max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen;
4130 ++ int max_mtu = IP_MAX_MTU - t_hlen;
4131 +
4132 + if (new_mtu < ETH_MIN_MTU)
4133 + return -EINVAL;
4134 +@@ -1149,10 +1148,9 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
4135 +
4136 + mtu = ip_tunnel_bind_dev(dev);
4137 + if (tb[IFLA_MTU]) {
4138 +- unsigned int max = IP_MAX_MTU - dev->hard_header_len - nt->hlen;
4139 ++ unsigned int max = IP_MAX_MTU - (nt->hlen + sizeof(struct iphdr));
4140 +
4141 +- mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU,
4142 +- (unsigned int)(max - sizeof(struct iphdr)));
4143 ++ mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, max);
4144 + }
4145 +
4146 + err = dev_set_mtu(dev, mtu);
4147 +diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
4148 +index c62805cd31319..cfdaac4a57e41 100644
4149 +--- a/net/ipv4/udp_offload.c
4150 ++++ b/net/ipv4/udp_offload.c
4151 +@@ -184,8 +184,67 @@ out_unlock:
4152 + }
4153 + EXPORT_SYMBOL(skb_udp_tunnel_segment);
4154 +
4155 ++static void __udpv4_gso_segment_csum(struct sk_buff *seg,
4156 ++ __be32 *oldip, __be32 *newip,
4157 ++ __be16 *oldport, __be16 *newport)
4158 ++{
4159 ++ struct udphdr *uh;
4160 ++ struct iphdr *iph;
4161 ++
4162 ++ if (*oldip == *newip && *oldport == *newport)
4163 ++ return;
4164 ++
4165 ++ uh = udp_hdr(seg);
4166 ++ iph = ip_hdr(seg);
4167 ++
4168 ++ if (uh->check) {
4169 ++ inet_proto_csum_replace4(&uh->check, seg, *oldip, *newip,
4170 ++ true);
4171 ++ inet_proto_csum_replace2(&uh->check, seg, *oldport, *newport,
4172 ++ false);
4173 ++ if (!uh->check)
4174 ++ uh->check = CSUM_MANGLED_0;
4175 ++ }
4176 ++ *oldport = *newport;
4177 ++
4178 ++ csum_replace4(&iph->check, *oldip, *newip);
4179 ++ *oldip = *newip;
4180 ++}
4181 ++
4182 ++static struct sk_buff *__udpv4_gso_segment_list_csum(struct sk_buff *segs)
4183 ++{
4184 ++ struct sk_buff *seg;
4185 ++ struct udphdr *uh, *uh2;
4186 ++ struct iphdr *iph, *iph2;
4187 ++
4188 ++ seg = segs;
4189 ++ uh = udp_hdr(seg);
4190 ++ iph = ip_hdr(seg);
4191 ++
4192 ++ if ((udp_hdr(seg)->dest == udp_hdr(seg->next)->dest) &&
4193 ++ (udp_hdr(seg)->source == udp_hdr(seg->next)->source) &&
4194 ++ (ip_hdr(seg)->daddr == ip_hdr(seg->next)->daddr) &&
4195 ++ (ip_hdr(seg)->saddr == ip_hdr(seg->next)->saddr))
4196 ++ return segs;
4197 ++
4198 ++ while ((seg = seg->next)) {
4199 ++ uh2 = udp_hdr(seg);
4200 ++ iph2 = ip_hdr(seg);
4201 ++
4202 ++ __udpv4_gso_segment_csum(seg,
4203 ++ &iph2->saddr, &iph->saddr,
4204 ++ &uh2->source, &uh->source);
4205 ++ __udpv4_gso_segment_csum(seg,
4206 ++ &iph2->daddr, &iph->daddr,
4207 ++ &uh2->dest, &uh->dest);
4208 ++ }
4209 ++
4210 ++ return segs;
4211 ++}
4212 ++
4213 + static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb,
4214 +- netdev_features_t features)
4215 ++ netdev_features_t features,
4216 ++ bool is_ipv6)
4217 + {
4218 + unsigned int mss = skb_shinfo(skb)->gso_size;
4219 +
4220 +@@ -195,11 +254,11 @@ static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb,
4221 +
4222 + udp_hdr(skb)->len = htons(sizeof(struct udphdr) + mss);
4223 +
4224 +- return skb;
4225 ++ return is_ipv6 ? skb : __udpv4_gso_segment_list_csum(skb);
4226 + }
4227 +
4228 + struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
4229 +- netdev_features_t features)
4230 ++ netdev_features_t features, bool is_ipv6)
4231 + {
4232 + struct sock *sk = gso_skb->sk;
4233 + unsigned int sum_truesize = 0;
4234 +@@ -211,7 +270,7 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
4235 + __be16 newlen;
4236 +
4237 + if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST)
4238 +- return __udp_gso_segment_list(gso_skb, features);
4239 ++ return __udp_gso_segment_list(gso_skb, features, is_ipv6);
4240 +
4241 + mss = skb_shinfo(gso_skb)->gso_size;
4242 + if (gso_skb->len <= sizeof(*uh) + mss)
4243 +@@ -325,7 +384,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
4244 + goto out;
4245 +
4246 + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
4247 +- return __udp_gso_segment(skb, features);
4248 ++ return __udp_gso_segment(skb, features, false);
4249 +
4250 + mss = skb_shinfo(skb)->gso_size;
4251 + if (unlikely(skb->len <= mss))
4252 +diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
4253 +index f9e888d1b9af8..ebee748f25b9e 100644
4254 +--- a/net/ipv6/udp_offload.c
4255 ++++ b/net/ipv6/udp_offload.c
4256 +@@ -46,7 +46,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
4257 + goto out;
4258 +
4259 + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
4260 +- return __udp_gso_segment(skb, features);
4261 ++ return __udp_gso_segment(skb, features, true);
4262 +
4263 + /* Do software UFO. Complete and fill in the UDP checksum as HW cannot
4264 + * do checksum of UDP packets sent as multiple IP fragments.
4265 +diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c
4266 +index 7a4d0715d1c32..a966d29c772d9 100644
4267 +--- a/net/lapb/lapb_out.c
4268 ++++ b/net/lapb/lapb_out.c
4269 +@@ -82,7 +82,8 @@ void lapb_kick(struct lapb_cb *lapb)
4270 + skb = skb_dequeue(&lapb->write_queue);
4271 +
4272 + do {
4273 +- if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
4274 ++ skbn = skb_copy(skb, GFP_ATOMIC);
4275 ++ if (!skbn) {
4276 + skb_queue_head(&lapb->write_queue, skb);
4277 + break;
4278 + }
4279 +diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
4280 +index c9a8a2433e8ac..48322e45e7ddb 100644
4281 +--- a/net/mac80211/driver-ops.c
4282 ++++ b/net/mac80211/driver-ops.c
4283 +@@ -125,8 +125,11 @@ int drv_sta_state(struct ieee80211_local *local,
4284 + } else if (old_state == IEEE80211_STA_AUTH &&
4285 + new_state == IEEE80211_STA_ASSOC) {
4286 + ret = drv_sta_add(local, sdata, &sta->sta);
4287 +- if (ret == 0)
4288 ++ if (ret == 0) {
4289 + sta->uploaded = true;
4290 ++ if (rcu_access_pointer(sta->sta.rates))
4291 ++ drv_sta_rate_tbl_update(local, sdata, &sta->sta);
4292 ++ }
4293 + } else if (old_state == IEEE80211_STA_ASSOC &&
4294 + new_state == IEEE80211_STA_AUTH) {
4295 + drv_sta_remove(local, sdata, &sta->sta);
4296 +diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
4297 +index 45927202c71c6..63652c39c8e07 100644
4298 +--- a/net/mac80211/rate.c
4299 ++++ b/net/mac80211/rate.c
4300 +@@ -960,7 +960,8 @@ int rate_control_set_rates(struct ieee80211_hw *hw,
4301 + if (old)
4302 + kfree_rcu(old, rcu_head);
4303 +
4304 +- drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta);
4305 ++ if (sta->uploaded)
4306 ++ drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta);
4307 +
4308 + ieee80211_sta_set_expected_throughput(pubsta, sta_get_expected_throughput(sta));
4309 +
4310 +diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
4311 +index 0a2f4817ec6cf..41671af6b33f9 100644
4312 +--- a/net/rxrpc/af_rxrpc.c
4313 ++++ b/net/rxrpc/af_rxrpc.c
4314 +@@ -990,7 +990,7 @@ static int __init af_rxrpc_init(void)
4315 + goto error_security;
4316 + }
4317 +
4318 +- ret = register_pernet_subsys(&rxrpc_net_ops);
4319 ++ ret = register_pernet_device(&rxrpc_net_ops);
4320 + if (ret)
4321 + goto error_pernet;
4322 +
4323 +@@ -1035,7 +1035,7 @@ error_key_type:
4324 + error_sock:
4325 + proto_unregister(&rxrpc_proto);
4326 + error_proto:
4327 +- unregister_pernet_subsys(&rxrpc_net_ops);
4328 ++ unregister_pernet_device(&rxrpc_net_ops);
4329 + error_pernet:
4330 + rxrpc_exit_security();
4331 + error_security:
4332 +@@ -1057,7 +1057,7 @@ static void __exit af_rxrpc_exit(void)
4333 + unregister_key_type(&key_type_rxrpc);
4334 + sock_unregister(PF_RXRPC);
4335 + proto_unregister(&rxrpc_proto);
4336 +- unregister_pernet_subsys(&rxrpc_net_ops);
4337 ++ unregister_pernet_device(&rxrpc_net_ops);
4338 + ASSERTCMP(atomic_read(&rxrpc_n_tx_skbs), ==, 0);
4339 + ASSERTCMP(atomic_read(&rxrpc_n_rx_skbs), ==, 0);
4340 +
4341 +diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
4342 +index 4404c491eb388..fa7b7ae2c2c5f 100644
4343 +--- a/net/sunrpc/svcsock.c
4344 ++++ b/net/sunrpc/svcsock.c
4345 +@@ -1113,14 +1113,15 @@ static int svc_tcp_sendmsg(struct socket *sock, struct msghdr *msg,
4346 + unsigned int offset, len, remaining;
4347 + struct bio_vec *bvec;
4348 +
4349 +- bvec = xdr->bvec;
4350 +- offset = xdr->page_base;
4351 ++ bvec = xdr->bvec + (xdr->page_base >> PAGE_SHIFT);
4352 ++ offset = offset_in_page(xdr->page_base);
4353 + remaining = xdr->page_len;
4354 + flags = MSG_MORE | MSG_SENDPAGE_NOTLAST;
4355 + while (remaining > 0) {
4356 + if (remaining <= PAGE_SIZE && tail->iov_len == 0)
4357 + flags = 0;
4358 +- len = min(remaining, bvec->bv_len);
4359 ++
4360 ++ len = min(remaining, bvec->bv_len - offset);
4361 + ret = kernel_sendpage(sock, bvec->bv_page,
4362 + bvec->bv_offset + offset,
4363 + len, flags);
4364 +diff --git a/scripts/Makefile b/scripts/Makefile
4365 +index b5418ec587fbd..9de3c03b94aa7 100644
4366 +--- a/scripts/Makefile
4367 ++++ b/scripts/Makefile
4368 +@@ -3,6 +3,9 @@
4369 + # scripts contains sources for various helper programs used throughout
4370 + # the kernel for the build process.
4371 +
4372 ++CRYPTO_LIBS = $(shell pkg-config --libs libcrypto 2> /dev/null || echo -lcrypto)
4373 ++CRYPTO_CFLAGS = $(shell pkg-config --cflags libcrypto 2> /dev/null)
4374 ++
4375 + hostprogs-always-$(CONFIG_BUILD_BIN2C) += bin2c
4376 + hostprogs-always-$(CONFIG_KALLSYMS) += kallsyms
4377 + hostprogs-always-$(BUILD_C_RECORDMCOUNT) += recordmcount
4378 +@@ -14,8 +17,9 @@ hostprogs-always-$(CONFIG_SYSTEM_EXTRA_CERTIFICATE) += insert-sys-cert
4379 +
4380 + HOSTCFLAGS_sorttable.o = -I$(srctree)/tools/include
4381 + HOSTCFLAGS_asn1_compiler.o = -I$(srctree)/include
4382 +-HOSTLDLIBS_sign-file = -lcrypto
4383 +-HOSTLDLIBS_extract-cert = -lcrypto
4384 ++HOSTLDLIBS_sign-file = $(CRYPTO_LIBS)
4385 ++HOSTCFLAGS_extract-cert.o = $(CRYPTO_CFLAGS)
4386 ++HOSTLDLIBS_extract-cert = $(CRYPTO_LIBS)
4387 +
4388 + ifdef CONFIG_UNWINDER_ORC
4389 + ifeq ($(ARCH),x86_64)