Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.0 commit in: /
Date: Wed, 17 Apr 2019 07:32:38
Message-Id: 1555486337.f5cf400c13c66c3c62cc0b83cd9894bec6c56983.alicef@gentoo
1 commit: f5cf400c13c66c3c62cc0b83cd9894bec6c56983
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Wed Apr 17 07:31:29 2019 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Wed Apr 17 07:32:17 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=f5cf400c
7
8 Linux Patch 5.0.8
9
10 Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>
11
12 0000_README | 4 +
13 1007_linux-5.0.8.patch | 36018 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 36022 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 0545dfc..2dd07a5 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -71,6 +71,10 @@ Patch: 1006_linux-5.0.7.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.0.7
23
24 +Patch: 1007_linux-5.0.8.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.0.8
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1007_linux-5.0.8.patch b/1007_linux-5.0.8.patch
33 new file mode 100644
34 index 0000000..2e45798
35 --- /dev/null
36 +++ b/1007_linux-5.0.8.patch
37 @@ -0,0 +1,36018 @@
38 +diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
39 +index e133ccd60228..acfe3d0f78d1 100644
40 +--- a/Documentation/DMA-API.txt
41 ++++ b/Documentation/DMA-API.txt
42 +@@ -195,6 +195,14 @@ Requesting the required mask does not alter the current mask. If you
43 + wish to take advantage of it, you should issue a dma_set_mask()
44 + call to set the mask to the value returned.
45 +
46 ++::
47 ++
48 ++ size_t
49 ++ dma_direct_max_mapping_size(struct device *dev);
50 ++
51 ++Returns the maximum size of a mapping for the device. The size parameter
52 ++of the mapping functions like dma_map_single(), dma_map_page() and
53 ++others should not be larger than the returned value.
54 +
55 + Part Id - Streaming DMA mappings
56 + --------------------------------
57 +diff --git a/Documentation/arm/kernel_mode_neon.txt b/Documentation/arm/kernel_mode_neon.txt
58 +index 525452726d31..b9e060c5b61e 100644
59 +--- a/Documentation/arm/kernel_mode_neon.txt
60 ++++ b/Documentation/arm/kernel_mode_neon.txt
61 +@@ -6,7 +6,7 @@ TL;DR summary
62 + * Use only NEON instructions, or VFP instructions that don't rely on support
63 + code
64 + * Isolate your NEON code in a separate compilation unit, and compile it with
65 +- '-mfpu=neon -mfloat-abi=softfp'
66 ++ '-march=armv7-a -mfpu=neon -mfloat-abi=softfp'
67 + * Put kernel_neon_begin() and kernel_neon_end() calls around the calls into your
68 + NEON code
69 + * Don't sleep in your NEON code, and be aware that it will be executed with
70 +@@ -87,7 +87,7 @@ instructions appearing in unexpected places if no special care is taken.
71 + Therefore, the recommended and only supported way of using NEON/VFP in the
72 + kernel is by adhering to the following rules:
73 + * isolate the NEON code in a separate compilation unit and compile it with
74 +- '-mfpu=neon -mfloat-abi=softfp';
75 ++ '-march=armv7-a -mfpu=neon -mfloat-abi=softfp';
76 + * issue the calls to kernel_neon_begin(), kernel_neon_end() as well as the calls
77 + into the unit containing the NEON code from a compilation unit which is *not*
78 + built with the GCC flag '-mfpu=neon' set.
79 +diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
80 +index 1f09d043d086..ddb8ce5333ba 100644
81 +--- a/Documentation/arm64/silicon-errata.txt
82 ++++ b/Documentation/arm64/silicon-errata.txt
83 +@@ -44,6 +44,8 @@ stable kernels.
84 +
85 + | Implementor | Component | Erratum ID | Kconfig |
86 + +----------------+-----------------+-----------------+-----------------------------+
87 ++| Allwinner | A64/R18 | UNKNOWN1 | SUN50I_ERRATUM_UNKNOWN1 |
88 ++| | | | |
89 + | ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 |
90 + | ARM | Cortex-A53 | #827319 | ARM64_ERRATUM_827319 |
91 + | ARM | Cortex-A53 | #824069 | ARM64_ERRATUM_824069 |
92 +diff --git a/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt
93 +index a10c1f89037d..e1fe02f3e3e9 100644
94 +--- a/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt
95 ++++ b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt
96 +@@ -11,11 +11,13 @@ New driver handles the following
97 +
98 + Required properties:
99 + - compatible: Must be "samsung,exynos-adc-v1"
100 +- for exynos4412/5250 controllers.
101 ++ for Exynos5250 controllers.
102 + Must be "samsung,exynos-adc-v2" for
103 + future controllers.
104 + Must be "samsung,exynos3250-adc" for
105 + controllers compatible with ADC of Exynos3250.
106 ++ Must be "samsung,exynos4212-adc" for
107 ++ controllers compatible with ADC of Exynos4212 and Exynos4412.
108 + Must be "samsung,exynos7-adc" for
109 + the ADC in Exynos7 and compatibles
110 + Must be "samsung,s3c2410-adc" for
111 +diff --git a/Documentation/process/stable-kernel-rules.rst b/Documentation/process/stable-kernel-rules.rst
112 +index 0de6f6145cc6..7ba8cd567f84 100644
113 +--- a/Documentation/process/stable-kernel-rules.rst
114 ++++ b/Documentation/process/stable-kernel-rules.rst
115 +@@ -38,6 +38,9 @@ Procedure for submitting patches to the -stable tree
116 + - If the patch covers files in net/ or drivers/net please follow netdev stable
117 + submission guidelines as described in
118 + :ref:`Documentation/networking/netdev-FAQ.rst <netdev-FAQ>`
119 ++ after first checking the stable networking queue at
120 ++ https://patchwork.ozlabs.org/bundle/davem/stable/?series=&submitter=&state=*&q=&archive=
121 ++ to ensure the requested patch is not already queued up.
122 + - Security patches should not be handled (solely) by the -stable review
123 + process but should follow the procedures in
124 + :ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>`.
125 +diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
126 +index 356156f5c52d..ba8927c0d45c 100644
127 +--- a/Documentation/virtual/kvm/api.txt
128 ++++ b/Documentation/virtual/kvm/api.txt
129 +@@ -13,7 +13,7 @@ of a virtual machine. The ioctls belong to three classes
130 +
131 + - VM ioctls: These query and set attributes that affect an entire virtual
132 + machine, for example memory layout. In addition a VM ioctl is used to
133 +- create virtual cpus (vcpus).
134 ++ create virtual cpus (vcpus) and devices.
135 +
136 + Only run VM ioctls from the same process (address space) that was used
137 + to create the VM.
138 +@@ -24,6 +24,11 @@ of a virtual machine. The ioctls belong to three classes
139 + Only run vcpu ioctls from the same thread that was used to create the
140 + vcpu.
141 +
142 ++ - device ioctls: These query and set attributes that control the operation
143 ++ of a single device.
144 ++
145 ++ device ioctls must be issued from the same process (address space) that
146 ++ was used to create the VM.
147 +
148 + 2. File descriptors
149 + -------------------
150 +@@ -32,10 +37,11 @@ The kvm API is centered around file descriptors. An initial
151 + open("/dev/kvm") obtains a handle to the kvm subsystem; this handle
152 + can be used to issue system ioctls. A KVM_CREATE_VM ioctl on this
153 + handle will create a VM file descriptor which can be used to issue VM
154 +-ioctls. A KVM_CREATE_VCPU ioctl on a VM fd will create a virtual cpu
155 +-and return a file descriptor pointing to it. Finally, ioctls on a vcpu
156 +-fd can be used to control the vcpu, including the important task of
157 +-actually running guest code.
158 ++ioctls. A KVM_CREATE_VCPU or KVM_CREATE_DEVICE ioctl on a VM fd will
159 ++create a virtual cpu or device and return a file descriptor pointing to
160 ++the new resource. Finally, ioctls on a vcpu or device fd can be used
161 ++to control the vcpu or device. For vcpus, this includes the important
162 ++task of actually running guest code.
163 +
164 + In general file descriptors can be migrated among processes by means
165 + of fork() and the SCM_RIGHTS facility of unix domain socket. These
166 +diff --git a/Makefile b/Makefile
167 +index d5713e7b1e50..f7666051de66 100644
168 +--- a/Makefile
169 ++++ b/Makefile
170 +@@ -1,7 +1,7 @@
171 + # SPDX-License-Identifier: GPL-2.0
172 + VERSION = 5
173 + PATCHLEVEL = 0
174 +-SUBLEVEL = 0
175 ++SUBLEVEL = 8
176 + EXTRAVERSION =
177 + NAME = Shy Crocodile
178 +
179 +@@ -15,19 +15,6 @@ NAME = Shy Crocodile
180 + PHONY := _all
181 + _all:
182 +
183 +-# Do not use make's built-in rules and variables
184 +-# (this increases performance and avoids hard-to-debug behaviour)
185 +-MAKEFLAGS += -rR
186 +-
187 +-# Avoid funny character set dependencies
188 +-unexport LC_ALL
189 +-LC_COLLATE=C
190 +-LC_NUMERIC=C
191 +-export LC_COLLATE LC_NUMERIC
192 +-
193 +-# Avoid interference with shell env settings
194 +-unexport GREP_OPTIONS
195 +-
196 + # We are using a recursive build, so we need to do a little thinking
197 + # to get the ordering right.
198 + #
199 +@@ -44,6 +31,21 @@ unexport GREP_OPTIONS
200 + # descending is started. They are now explicitly listed as the
201 + # prepare rule.
202 +
203 ++ifneq ($(sub-make-done),1)
204 ++
205 ++# Do not use make's built-in rules and variables
206 ++# (this increases performance and avoids hard-to-debug behaviour)
207 ++MAKEFLAGS += -rR
208 ++
209 ++# Avoid funny character set dependencies
210 ++unexport LC_ALL
211 ++LC_COLLATE=C
212 ++LC_NUMERIC=C
213 ++export LC_COLLATE LC_NUMERIC
214 ++
215 ++# Avoid interference with shell env settings
216 ++unexport GREP_OPTIONS
217 ++
218 + # Beautify output
219 + # ---------------------------------------------------------------------------
220 + #
221 +@@ -112,7 +114,6 @@ export quiet Q KBUILD_VERBOSE
222 +
223 + # KBUILD_SRC is not intended to be used by the regular user (for now),
224 + # it is set on invocation of make with KBUILD_OUTPUT or O= specified.
225 +-ifeq ($(KBUILD_SRC),)
226 +
227 + # OK, Make called in directory where kernel src resides
228 + # Do we want to locate output files in a separate directory?
229 +@@ -142,6 +143,24 @@ $(if $(KBUILD_OUTPUT),, \
230 + # 'sub-make' below.
231 + MAKEFLAGS += --include-dir=$(CURDIR)
232 +
233 ++need-sub-make := 1
234 ++else
235 ++
236 ++# Do not print "Entering directory ..." at all for in-tree build.
237 ++MAKEFLAGS += --no-print-directory
238 ++
239 ++endif # ifneq ($(KBUILD_OUTPUT),)
240 ++
241 ++ifneq ($(filter 3.%,$(MAKE_VERSION)),)
242 ++# 'MAKEFLAGS += -rR' does not immediately become effective for GNU Make 3.x
243 ++# We need to invoke sub-make to avoid implicit rules in the top Makefile.
244 ++need-sub-make := 1
245 ++# Cancel implicit rules for this Makefile.
246 ++$(lastword $(MAKEFILE_LIST)): ;
247 ++endif
248 ++
249 ++ifeq ($(need-sub-make),1)
250 ++
251 + PHONY += $(MAKECMDGOALS) sub-make
252 +
253 + $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
254 +@@ -149,16 +168,15 @@ $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
255 +
256 + # Invoke a second make in the output directory, passing relevant variables
257 + sub-make:
258 +- $(Q)$(MAKE) -C $(KBUILD_OUTPUT) KBUILD_SRC=$(CURDIR) \
259 ++ $(Q)$(MAKE) sub-make-done=1 \
260 ++ $(if $(KBUILD_OUTPUT),-C $(KBUILD_OUTPUT) KBUILD_SRC=$(CURDIR)) \
261 + -f $(CURDIR)/Makefile $(filter-out _all sub-make,$(MAKECMDGOALS))
262 +
263 +-# Leave processing to above invocation of make
264 +-skip-makefile := 1
265 +-endif # ifneq ($(KBUILD_OUTPUT),)
266 +-endif # ifeq ($(KBUILD_SRC),)
267 ++endif # need-sub-make
268 ++endif # sub-make-done
269 +
270 + # We process the rest of the Makefile if this is the final invocation of make
271 +-ifeq ($(skip-makefile),)
272 ++ifeq ($(need-sub-make),)
273 +
274 + # Do not print "Entering directory ...",
275 + # but we want to display it when entering to the output directory
276 +@@ -492,7 +510,7 @@ endif
277 + ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
278 + ifneq ($(CROSS_COMPILE),)
279 + CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%))
280 +-GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
281 ++GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
282 + CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)
283 + GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
284 + endif
285 +@@ -625,12 +643,15 @@ ifeq ($(may-sync-config),1)
286 + -include include/config/auto.conf.cmd
287 +
288 + # To avoid any implicit rule to kick in, define an empty command
289 +-$(KCONFIG_CONFIG) include/config/auto.conf.cmd: ;
290 ++$(KCONFIG_CONFIG): ;
291 +
292 + # The actual configuration files used during the build are stored in
293 + # include/generated/ and include/config/. Update them if .config is newer than
294 + # include/config/auto.conf (which mirrors .config).
295 +-include/config/%.conf: $(KCONFIG_CONFIG) include/config/auto.conf.cmd
296 ++#
297 ++# This exploits the 'multi-target pattern rule' trick.
298 ++# The syncconfig should be executed only once to make all the targets.
299 ++%/auto.conf %/auto.conf.cmd %/tristate.conf: $(KCONFIG_CONFIG)
300 + $(Q)$(MAKE) -f $(srctree)/Makefile syncconfig
301 + else
302 + # External modules and some install targets need include/generated/autoconf.h
303 +@@ -944,9 +965,11 @@ mod_sign_cmd = true
304 + endif
305 + export mod_sign_cmd
306 +
307 ++HOST_LIBELF_LIBS = $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
308 ++
309 + ifdef CONFIG_STACK_VALIDATION
310 + has_libelf := $(call try-run,\
311 +- echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf -,1,0)
312 ++ echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0)
313 + ifeq ($(has_libelf),1)
314 + objtool_target := tools/objtool FORCE
315 + else
316 +@@ -1754,7 +1777,7 @@ $(cmd_files): ; # Do not try to update included dependency files
317 +
318 + endif # ifeq ($(config-targets),1)
319 + endif # ifeq ($(mixed-targets),1)
320 +-endif # skip-makefile
321 ++endif # need-sub-make
322 +
323 + PHONY += FORCE
324 + FORCE:
325 +diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl
326 +index 7b56a53be5e3..e09558edae73 100644
327 +--- a/arch/alpha/kernel/syscalls/syscall.tbl
328 ++++ b/arch/alpha/kernel/syscalls/syscall.tbl
329 +@@ -451,3 +451,4 @@
330 + 520 common preadv2 sys_preadv2
331 + 521 common pwritev2 sys_pwritev2
332 + 522 common statx sys_statx
333 ++523 common io_pgetevents sys_io_pgetevents
334 +diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
335 +index dce5be5df97b..edcff79879e7 100644
336 +--- a/arch/arm/boot/dts/am335x-evm.dts
337 ++++ b/arch/arm/boot/dts/am335x-evm.dts
338 +@@ -57,6 +57,24 @@
339 + enable-active-high;
340 + };
341 +
342 ++ /* TPS79501 */
343 ++ v1_8d_reg: fixedregulator-v1_8d {
344 ++ compatible = "regulator-fixed";
345 ++ regulator-name = "v1_8d";
346 ++ vin-supply = <&vbat>;
347 ++ regulator-min-microvolt = <1800000>;
348 ++ regulator-max-microvolt = <1800000>;
349 ++ };
350 ++
351 ++ /* TPS79501 */
352 ++ v3_3d_reg: fixedregulator-v3_3d {
353 ++ compatible = "regulator-fixed";
354 ++ regulator-name = "v3_3d";
355 ++ vin-supply = <&vbat>;
356 ++ regulator-min-microvolt = <3300000>;
357 ++ regulator-max-microvolt = <3300000>;
358 ++ };
359 ++
360 + matrix_keypad: matrix_keypad0 {
361 + compatible = "gpio-matrix-keypad";
362 + debounce-delay-ms = <5>;
363 +@@ -499,10 +517,10 @@
364 + status = "okay";
365 +
366 + /* Regulators */
367 +- AVDD-supply = <&vaux2_reg>;
368 +- IOVDD-supply = <&vaux2_reg>;
369 +- DRVDD-supply = <&vaux2_reg>;
370 +- DVDD-supply = <&vbat>;
371 ++ AVDD-supply = <&v3_3d_reg>;
372 ++ IOVDD-supply = <&v3_3d_reg>;
373 ++ DRVDD-supply = <&v3_3d_reg>;
374 ++ DVDD-supply = <&v1_8d_reg>;
375 + };
376 + };
377 +
378 +diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
379 +index b128998097ce..2c2d8b5b8cf5 100644
380 +--- a/arch/arm/boot/dts/am335x-evmsk.dts
381 ++++ b/arch/arm/boot/dts/am335x-evmsk.dts
382 +@@ -73,6 +73,24 @@
383 + enable-active-high;
384 + };
385 +
386 ++ /* TPS79518 */
387 ++ v1_8d_reg: fixedregulator-v1_8d {
388 ++ compatible = "regulator-fixed";
389 ++ regulator-name = "v1_8d";
390 ++ vin-supply = <&vbat>;
391 ++ regulator-min-microvolt = <1800000>;
392 ++ regulator-max-microvolt = <1800000>;
393 ++ };
394 ++
395 ++ /* TPS78633 */
396 ++ v3_3d_reg: fixedregulator-v3_3d {
397 ++ compatible = "regulator-fixed";
398 ++ regulator-name = "v3_3d";
399 ++ vin-supply = <&vbat>;
400 ++ regulator-min-microvolt = <3300000>;
401 ++ regulator-max-microvolt = <3300000>;
402 ++ };
403 ++
404 + leds {
405 + pinctrl-names = "default";
406 + pinctrl-0 = <&user_leds_s0>;
407 +@@ -501,10 +519,10 @@
408 + status = "okay";
409 +
410 + /* Regulators */
411 +- AVDD-supply = <&vaux2_reg>;
412 +- IOVDD-supply = <&vaux2_reg>;
413 +- DRVDD-supply = <&vaux2_reg>;
414 +- DVDD-supply = <&vbat>;
415 ++ AVDD-supply = <&v3_3d_reg>;
416 ++ IOVDD-supply = <&v3_3d_reg>;
417 ++ DRVDD-supply = <&v3_3d_reg>;
418 ++ DVDD-supply = <&v1_8d_reg>;
419 + };
420 + };
421 +
422 +diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
423 +index 608d17454179..5892a9f7622f 100644
424 +--- a/arch/arm/boot/dts/exynos3250.dtsi
425 ++++ b/arch/arm/boot/dts/exynos3250.dtsi
426 +@@ -168,6 +168,9 @@
427 + interrupt-controller;
428 + #interrupt-cells = <3>;
429 + interrupt-parent = <&gic>;
430 ++ clock-names = "clkout8";
431 ++ clocks = <&cmu CLK_FIN_PLL>;
432 ++ #clock-cells = <1>;
433 + };
434 +
435 + mipi_phy: video-phy {
436 +diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
437 +index 3a9eb1e91c45..8a64c4e8c474 100644
438 +--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
439 ++++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
440 +@@ -49,7 +49,7 @@
441 + };
442 +
443 + emmc_pwrseq: pwrseq {
444 +- pinctrl-0 = <&sd1_cd>;
445 ++ pinctrl-0 = <&emmc_rstn>;
446 + pinctrl-names = "default";
447 + compatible = "mmc-pwrseq-emmc";
448 + reset-gpios = <&gpk1 2 GPIO_ACTIVE_LOW>;
449 +@@ -165,12 +165,6 @@
450 + cpu0-supply = <&buck2_reg>;
451 + };
452 +
453 +-/* RSTN signal for eMMC */
454 +-&sd1_cd {
455 +- samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
456 +- samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
457 +-};
458 +-
459 + &pinctrl_1 {
460 + gpio_power_key: power_key {
461 + samsung,pins = "gpx1-3";
462 +@@ -188,6 +182,11 @@
463 + samsung,pins = "gpx3-7";
464 + samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
465 + };
466 ++
467 ++ emmc_rstn: emmc-rstn {
468 ++ samsung,pins = "gpk1-2";
469 ++ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
470 ++ };
471 + };
472 +
473 + &ehci {
474 +diff --git a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
475 +index bf09eab90f8a..6bf3661293ee 100644
476 +--- a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
477 ++++ b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
478 +@@ -468,7 +468,7 @@
479 + buck8_reg: BUCK8 {
480 + regulator-name = "vdd_1.8v_ldo";
481 + regulator-min-microvolt = <800000>;
482 +- regulator-max-microvolt = <1500000>;
483 ++ regulator-max-microvolt = <2000000>;
484 + regulator-always-on;
485 + regulator-boot-on;
486 + };
487 +diff --git a/arch/arm/boot/dts/lpc32xx.dtsi b/arch/arm/boot/dts/lpc32xx.dtsi
488 +index b7303a4e4236..ed0d6fb20122 100644
489 +--- a/arch/arm/boot/dts/lpc32xx.dtsi
490 ++++ b/arch/arm/boot/dts/lpc32xx.dtsi
491 +@@ -230,7 +230,7 @@
492 + status = "disabled";
493 + };
494 +
495 +- i2s1: i2s@2009C000 {
496 ++ i2s1: i2s@2009c000 {
497 + compatible = "nxp,lpc3220-i2s";
498 + reg = <0x2009C000 0x1000>;
499 + };
500 +@@ -273,7 +273,7 @@
501 + status = "disabled";
502 + };
503 +
504 +- i2c1: i2c@400A0000 {
505 ++ i2c1: i2c@400a0000 {
506 + compatible = "nxp,pnx-i2c";
507 + reg = <0x400A0000 0x100>;
508 + interrupt-parent = <&sic1>;
509 +@@ -284,7 +284,7 @@
510 + clocks = <&clk LPC32XX_CLK_I2C1>;
511 + };
512 +
513 +- i2c2: i2c@400A8000 {
514 ++ i2c2: i2c@400a8000 {
515 + compatible = "nxp,pnx-i2c";
516 + reg = <0x400A8000 0x100>;
517 + interrupt-parent = <&sic1>;
518 +@@ -295,7 +295,7 @@
519 + clocks = <&clk LPC32XX_CLK_I2C2>;
520 + };
521 +
522 +- mpwm: mpwm@400E8000 {
523 ++ mpwm: mpwm@400e8000 {
524 + compatible = "nxp,lpc3220-motor-pwm";
525 + reg = <0x400E8000 0x78>;
526 + status = "disabled";
527 +@@ -394,7 +394,7 @@
528 + #gpio-cells = <3>; /* bank, pin, flags */
529 + };
530 +
531 +- timer4: timer@4002C000 {
532 ++ timer4: timer@4002c000 {
533 + compatible = "nxp,lpc3220-timer";
534 + reg = <0x4002C000 0x1000>;
535 + interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
536 +@@ -412,7 +412,7 @@
537 + status = "disabled";
538 + };
539 +
540 +- watchdog: watchdog@4003C000 {
541 ++ watchdog: watchdog@4003c000 {
542 + compatible = "nxp,pnx4008-wdt";
543 + reg = <0x4003C000 0x1000>;
544 + clocks = <&clk LPC32XX_CLK_WDOG>;
545 +@@ -451,7 +451,7 @@
546 + status = "disabled";
547 + };
548 +
549 +- timer1: timer@4004C000 {
550 ++ timer1: timer@4004c000 {
551 + compatible = "nxp,lpc3220-timer";
552 + reg = <0x4004C000 0x1000>;
553 + interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
554 +@@ -475,7 +475,7 @@
555 + status = "disabled";
556 + };
557 +
558 +- pwm1: pwm@4005C000 {
559 ++ pwm1: pwm@4005c000 {
560 + compatible = "nxp,lpc3220-pwm";
561 + reg = <0x4005C000 0x4>;
562 + clocks = <&clk LPC32XX_CLK_PWM1>;
563 +@@ -484,7 +484,7 @@
564 + status = "disabled";
565 + };
566 +
567 +- pwm2: pwm@4005C004 {
568 ++ pwm2: pwm@4005c004 {
569 + compatible = "nxp,lpc3220-pwm";
570 + reg = <0x4005C004 0x4>;
571 + clocks = <&clk LPC32XX_CLK_PWM2>;
572 +diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi
573 +index 22d775460767..dc125769fe85 100644
574 +--- a/arch/arm/boot/dts/meson8b.dtsi
575 ++++ b/arch/arm/boot/dts/meson8b.dtsi
576 +@@ -270,9 +270,7 @@
577 + groups = "eth_tx_clk",
578 + "eth_tx_en",
579 + "eth_txd1_0",
580 +- "eth_txd1_1",
581 + "eth_txd0_0",
582 +- "eth_txd0_1",
583 + "eth_rx_clk",
584 + "eth_rx_dv",
585 + "eth_rxd1",
586 +@@ -281,7 +279,9 @@
587 + "eth_mdc",
588 + "eth_ref_clk",
589 + "eth_txd2",
590 +- "eth_txd3";
591 ++ "eth_txd3",
592 ++ "eth_rxd3",
593 ++ "eth_rxd2";
594 + function = "ethernet";
595 + bias-disable;
596 + };
597 +diff --git a/arch/arm/boot/dts/rk3288-tinker.dtsi b/arch/arm/boot/dts/rk3288-tinker.dtsi
598 +index aa107ee41b8b..ef653c3209bc 100644
599 +--- a/arch/arm/boot/dts/rk3288-tinker.dtsi
600 ++++ b/arch/arm/boot/dts/rk3288-tinker.dtsi
601 +@@ -254,6 +254,7 @@
602 + };
603 +
604 + vccio_sd: LDO_REG5 {
605 ++ regulator-boot-on;
606 + regulator-min-microvolt = <1800000>;
607 + regulator-max-microvolt = <3300000>;
608 + regulator-name = "vccio_sd";
609 +@@ -430,7 +431,7 @@
610 + bus-width = <4>;
611 + cap-mmc-highspeed;
612 + cap-sd-highspeed;
613 +- card-detect-delay = <200>;
614 ++ broken-cd;
615 + disable-wp; /* wp not hooked up */
616 + pinctrl-names = "default";
617 + pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;
618 +diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
619 +index ca7d52daa8fb..09868dcee34b 100644
620 +--- a/arch/arm/boot/dts/rk3288.dtsi
621 ++++ b/arch/arm/boot/dts/rk3288.dtsi
622 +@@ -70,7 +70,7 @@
623 + compatible = "arm,cortex-a12";
624 + reg = <0x501>;
625 + resets = <&cru SRST_CORE1>;
626 +- operating-points = <&cpu_opp_table>;
627 ++ operating-points-v2 = <&cpu_opp_table>;
628 + #cooling-cells = <2>; /* min followed by max */
629 + clock-latency = <40000>;
630 + clocks = <&cru ARMCLK>;
631 +@@ -80,7 +80,7 @@
632 + compatible = "arm,cortex-a12";
633 + reg = <0x502>;
634 + resets = <&cru SRST_CORE2>;
635 +- operating-points = <&cpu_opp_table>;
636 ++ operating-points-v2 = <&cpu_opp_table>;
637 + #cooling-cells = <2>; /* min followed by max */
638 + clock-latency = <40000>;
639 + clocks = <&cru ARMCLK>;
640 +@@ -90,7 +90,7 @@
641 + compatible = "arm,cortex-a12";
642 + reg = <0x503>;
643 + resets = <&cru SRST_CORE3>;
644 +- operating-points = <&cpu_opp_table>;
645 ++ operating-points-v2 = <&cpu_opp_table>;
646 + #cooling-cells = <2>; /* min followed by max */
647 + clock-latency = <40000>;
648 + clocks = <&cru ARMCLK>;
649 +diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h b/arch/arm/boot/dts/sama5d2-pinfunc.h
650 +index 1c01a6f843d8..28a2e45752fe 100644
651 +--- a/arch/arm/boot/dts/sama5d2-pinfunc.h
652 ++++ b/arch/arm/boot/dts/sama5d2-pinfunc.h
653 +@@ -518,7 +518,7 @@
654 + #define PIN_PC9__GPIO PINMUX_PIN(PIN_PC9, 0, 0)
655 + #define PIN_PC9__FIQ PINMUX_PIN(PIN_PC9, 1, 3)
656 + #define PIN_PC9__GTSUCOMP PINMUX_PIN(PIN_PC9, 2, 1)
657 +-#define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 2, 1)
658 ++#define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 3, 1)
659 + #define PIN_PC9__TIOA4 PINMUX_PIN(PIN_PC9, 4, 2)
660 + #define PIN_PC10 74
661 + #define PIN_PC10__GPIO PINMUX_PIN(PIN_PC10, 0, 0)
662 +diff --git a/arch/arm/crypto/crct10dif-ce-core.S b/arch/arm/crypto/crct10dif-ce-core.S
663 +index ce45ba0c0687..16019b5961e7 100644
664 +--- a/arch/arm/crypto/crct10dif-ce-core.S
665 ++++ b/arch/arm/crypto/crct10dif-ce-core.S
666 +@@ -124,10 +124,10 @@ ENTRY(crc_t10dif_pmull)
667 + vext.8 q10, qzr, q0, #4
668 +
669 + // receive the initial 64B data, xor the initial crc value
670 +- vld1.64 {q0-q1}, [arg2, :128]!
671 +- vld1.64 {q2-q3}, [arg2, :128]!
672 +- vld1.64 {q4-q5}, [arg2, :128]!
673 +- vld1.64 {q6-q7}, [arg2, :128]!
674 ++ vld1.64 {q0-q1}, [arg2]!
675 ++ vld1.64 {q2-q3}, [arg2]!
676 ++ vld1.64 {q4-q5}, [arg2]!
677 ++ vld1.64 {q6-q7}, [arg2]!
678 + CPU_LE( vrev64.8 q0, q0 )
679 + CPU_LE( vrev64.8 q1, q1 )
680 + CPU_LE( vrev64.8 q2, q2 )
681 +@@ -167,7 +167,7 @@ CPU_LE( vrev64.8 q7, q7 )
682 + _fold_64_B_loop:
683 +
684 + .macro fold64, reg1, reg2
685 +- vld1.64 {q11-q12}, [arg2, :128]!
686 ++ vld1.64 {q11-q12}, [arg2]!
687 +
688 + vmull.p64 q8, \reg1\()h, d21
689 + vmull.p64 \reg1, \reg1\()l, d20
690 +@@ -238,7 +238,7 @@ _16B_reduction_loop:
691 + vmull.p64 q7, d15, d21
692 + veor.8 q7, q7, q8
693 +
694 +- vld1.64 {q0}, [arg2, :128]!
695 ++ vld1.64 {q0}, [arg2]!
696 + CPU_LE( vrev64.8 q0, q0 )
697 + vswp d0, d1
698 + veor.8 q7, q7, q0
699 +@@ -335,7 +335,7 @@ _less_than_128:
700 + vmov.i8 q0, #0
701 + vmov s3, arg1_low32 // get the initial crc value
702 +
703 +- vld1.64 {q7}, [arg2, :128]!
704 ++ vld1.64 {q7}, [arg2]!
705 + CPU_LE( vrev64.8 q7, q7 )
706 + vswp d14, d15
707 + veor.8 q7, q7, q0
708 +diff --git a/arch/arm/crypto/crct10dif-ce-glue.c b/arch/arm/crypto/crct10dif-ce-glue.c
709 +index d428355cf38d..14c19c70a841 100644
710 +--- a/arch/arm/crypto/crct10dif-ce-glue.c
711 ++++ b/arch/arm/crypto/crct10dif-ce-glue.c
712 +@@ -35,26 +35,15 @@ static int crct10dif_update(struct shash_desc *desc, const u8 *data,
713 + unsigned int length)
714 + {
715 + u16 *crc = shash_desc_ctx(desc);
716 +- unsigned int l;
717 +
718 +- if (!may_use_simd()) {
719 +- *crc = crc_t10dif_generic(*crc, data, length);
720 ++ if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
721 ++ kernel_neon_begin();
722 ++ *crc = crc_t10dif_pmull(*crc, data, length);
723 ++ kernel_neon_end();
724 + } else {
725 +- if (unlikely((u32)data % CRC_T10DIF_PMULL_CHUNK_SIZE)) {
726 +- l = min_t(u32, length, CRC_T10DIF_PMULL_CHUNK_SIZE -
727 +- ((u32)data % CRC_T10DIF_PMULL_CHUNK_SIZE));
728 +-
729 +- *crc = crc_t10dif_generic(*crc, data, l);
730 +-
731 +- length -= l;
732 +- data += l;
733 +- }
734 +- if (length > 0) {
735 +- kernel_neon_begin();
736 +- *crc = crc_t10dif_pmull(*crc, data, length);
737 +- kernel_neon_end();
738 +- }
739 ++ *crc = crc_t10dif_generic(*crc, data, length);
740 + }
741 ++
742 + return 0;
743 + }
744 +
745 +diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
746 +index 69772e742a0a..83ae97c049d9 100644
747 +--- a/arch/arm/include/asm/barrier.h
748 ++++ b/arch/arm/include/asm/barrier.h
749 +@@ -11,6 +11,8 @@
750 + #define sev() __asm__ __volatile__ ("sev" : : : "memory")
751 + #define wfe() __asm__ __volatile__ ("wfe" : : : "memory")
752 + #define wfi() __asm__ __volatile__ ("wfi" : : : "memory")
753 ++#else
754 ++#define wfe() do { } while (0)
755 + #endif
756 +
757 + #if __LINUX_ARM_ARCH__ >= 7
758 +diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
759 +index 120f4c9bbfde..57fe73ea0f72 100644
760 +--- a/arch/arm/include/asm/processor.h
761 ++++ b/arch/arm/include/asm/processor.h
762 +@@ -89,7 +89,11 @@ extern void release_thread(struct task_struct *);
763 + unsigned long get_wchan(struct task_struct *p);
764 +
765 + #if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
766 +-#define cpu_relax() smp_mb()
767 ++#define cpu_relax() \
768 ++ do { \
769 ++ smp_mb(); \
770 ++ __asm__ __volatile__("nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;"); \
771 ++ } while (0)
772 + #else
773 + #define cpu_relax() barrier()
774 + #endif
775 +diff --git a/arch/arm/include/asm/v7m.h b/arch/arm/include/asm/v7m.h
776 +index 187ccf6496ad..2cb00d15831b 100644
777 +--- a/arch/arm/include/asm/v7m.h
778 ++++ b/arch/arm/include/asm/v7m.h
779 +@@ -49,7 +49,7 @@
780 + * (0 -> msp; 1 -> psp). Bits [1:0] are fixed to 0b01.
781 + */
782 + #define EXC_RET_STACK_MASK 0x00000004
783 +-#define EXC_RET_THREADMODE_PROCESSSTACK 0xfffffffd
784 ++#define EXC_RET_THREADMODE_PROCESSSTACK (3 << 2)
785 +
786 + /* Cache related definitions */
787 +
788 +diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
789 +index 773424843d6e..62db1c9746cb 100644
790 +--- a/arch/arm/kernel/entry-header.S
791 ++++ b/arch/arm/kernel/entry-header.S
792 +@@ -127,7 +127,8 @@
793 + */
794 + .macro v7m_exception_slow_exit ret_r0
795 + cpsid i
796 +- ldr lr, =EXC_RET_THREADMODE_PROCESSSTACK
797 ++ ldr lr, =exc_ret
798 ++ ldr lr, [lr]
799 +
800 + @ read original r12, sp, lr, pc and xPSR
801 + add r12, sp, #S_IP
802 +diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S
803 +index abcf47848525..19d2dcd6530d 100644
804 +--- a/arch/arm/kernel/entry-v7m.S
805 ++++ b/arch/arm/kernel/entry-v7m.S
806 +@@ -146,3 +146,7 @@ ENTRY(vector_table)
807 + .rept CONFIG_CPU_V7M_NUM_IRQ
808 + .long __irq_entry @ External Interrupts
809 + .endr
810 ++ .align 2
811 ++ .globl exc_ret
812 ++exc_ret:
813 ++ .space 4
814 +diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
815 +index dd2eb5f76b9f..76300f3813e8 100644
816 +--- a/arch/arm/kernel/machine_kexec.c
817 ++++ b/arch/arm/kernel/machine_kexec.c
818 +@@ -91,8 +91,11 @@ void machine_crash_nonpanic_core(void *unused)
819 +
820 + set_cpu_online(smp_processor_id(), false);
821 + atomic_dec(&waiting_for_crash_ipi);
822 +- while (1)
823 ++
824 ++ while (1) {
825 + cpu_relax();
826 ++ wfe();
827 ++ }
828 + }
829 +
830 + void crash_smp_send_stop(void)
831 +diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
832 +index 1d6f5ea522f4..a3ce7c5365fa 100644
833 +--- a/arch/arm/kernel/smp.c
834 ++++ b/arch/arm/kernel/smp.c
835 +@@ -604,8 +604,10 @@ static void ipi_cpu_stop(unsigned int cpu)
836 + local_fiq_disable();
837 + local_irq_disable();
838 +
839 +- while (1)
840 ++ while (1) {
841 + cpu_relax();
842 ++ wfe();
843 ++ }
844 + }
845 +
846 + static DEFINE_PER_CPU(struct completion *, cpu_completion);
847 +diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
848 +index 0bee233fef9a..314cfb232a63 100644
849 +--- a/arch/arm/kernel/unwind.c
850 ++++ b/arch/arm/kernel/unwind.c
851 +@@ -93,7 +93,7 @@ extern const struct unwind_idx __start_unwind_idx[];
852 + static const struct unwind_idx *__origin_unwind_idx;
853 + extern const struct unwind_idx __stop_unwind_idx[];
854 +
855 +-static DEFINE_SPINLOCK(unwind_lock);
856 ++static DEFINE_RAW_SPINLOCK(unwind_lock);
857 + static LIST_HEAD(unwind_tables);
858 +
859 + /* Convert a prel31 symbol to an absolute address */
860 +@@ -201,7 +201,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
861 + /* module unwind tables */
862 + struct unwind_table *table;
863 +
864 +- spin_lock_irqsave(&unwind_lock, flags);
865 ++ raw_spin_lock_irqsave(&unwind_lock, flags);
866 + list_for_each_entry(table, &unwind_tables, list) {
867 + if (addr >= table->begin_addr &&
868 + addr < table->end_addr) {
869 +@@ -213,7 +213,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
870 + break;
871 + }
872 + }
873 +- spin_unlock_irqrestore(&unwind_lock, flags);
874 ++ raw_spin_unlock_irqrestore(&unwind_lock, flags);
875 + }
876 +
877 + pr_debug("%s: idx = %p\n", __func__, idx);
878 +@@ -529,9 +529,9 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
879 + tab->begin_addr = text_addr;
880 + tab->end_addr = text_addr + text_size;
881 +
882 +- spin_lock_irqsave(&unwind_lock, flags);
883 ++ raw_spin_lock_irqsave(&unwind_lock, flags);
884 + list_add_tail(&tab->list, &unwind_tables);
885 +- spin_unlock_irqrestore(&unwind_lock, flags);
886 ++ raw_spin_unlock_irqrestore(&unwind_lock, flags);
887 +
888 + return tab;
889 + }
890 +@@ -543,9 +543,9 @@ void unwind_table_del(struct unwind_table *tab)
891 + if (!tab)
892 + return;
893 +
894 +- spin_lock_irqsave(&unwind_lock, flags);
895 ++ raw_spin_lock_irqsave(&unwind_lock, flags);
896 + list_del(&tab->list);
897 +- spin_unlock_irqrestore(&unwind_lock, flags);
898 ++ raw_spin_unlock_irqrestore(&unwind_lock, flags);
899 +
900 + kfree(tab);
901 + }
902 +diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
903 +index ad25fd1872c7..0bff0176db2c 100644
904 +--- a/arch/arm/lib/Makefile
905 ++++ b/arch/arm/lib/Makefile
906 +@@ -39,7 +39,7 @@ $(obj)/csumpartialcopy.o: $(obj)/csumpartialcopygeneric.S
907 + $(obj)/csumpartialcopyuser.o: $(obj)/csumpartialcopygeneric.S
908 +
909 + ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
910 +- NEON_FLAGS := -mfloat-abi=softfp -mfpu=neon
911 ++ NEON_FLAGS := -march=armv7-a -mfloat-abi=softfp -mfpu=neon
912 + CFLAGS_xor-neon.o += $(NEON_FLAGS)
913 + obj-$(CONFIG_XOR_BLOCKS) += xor-neon.o
914 + endif
915 +diff --git a/arch/arm/lib/xor-neon.c b/arch/arm/lib/xor-neon.c
916 +index 2c40aeab3eaa..c691b901092f 100644
917 +--- a/arch/arm/lib/xor-neon.c
918 ++++ b/arch/arm/lib/xor-neon.c
919 +@@ -14,7 +14,7 @@
920 + MODULE_LICENSE("GPL");
921 +
922 + #ifndef __ARM_NEON__
923 +-#error You should compile this file with '-mfloat-abi=softfp -mfpu=neon'
924 ++#error You should compile this file with '-march=armv7-a -mfloat-abi=softfp -mfpu=neon'
925 + #endif
926 +
927 + /*
928 +diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c
929 +index bfeb25aaf9a2..326e870d7123 100644
930 +--- a/arch/arm/mach-imx/cpuidle-imx6q.c
931 ++++ b/arch/arm/mach-imx/cpuidle-imx6q.c
932 +@@ -16,30 +16,23 @@
933 + #include "cpuidle.h"
934 + #include "hardware.h"
935 +
936 +-static atomic_t master = ATOMIC_INIT(0);
937 +-static DEFINE_SPINLOCK(master_lock);
938 ++static int num_idle_cpus = 0;
939 ++static DEFINE_SPINLOCK(cpuidle_lock);
940 +
941 + static int imx6q_enter_wait(struct cpuidle_device *dev,
942 + struct cpuidle_driver *drv, int index)
943 + {
944 +- if (atomic_inc_return(&master) == num_online_cpus()) {
945 +- /*
946 +- * With this lock, we prevent other cpu to exit and enter
947 +- * this function again and become the master.
948 +- */
949 +- if (!spin_trylock(&master_lock))
950 +- goto idle;
951 ++ spin_lock(&cpuidle_lock);
952 ++ if (++num_idle_cpus == num_online_cpus())
953 + imx6_set_lpm(WAIT_UNCLOCKED);
954 +- cpu_do_idle();
955 +- imx6_set_lpm(WAIT_CLOCKED);
956 +- spin_unlock(&master_lock);
957 +- goto done;
958 +- }
959 ++ spin_unlock(&cpuidle_lock);
960 +
961 +-idle:
962 + cpu_do_idle();
963 +-done:
964 +- atomic_dec(&master);
965 ++
966 ++ spin_lock(&cpuidle_lock);
967 ++ if (num_idle_cpus-- == num_online_cpus())
968 ++ imx6_set_lpm(WAIT_CLOCKED);
969 ++ spin_unlock(&cpuidle_lock);
970 +
971 + return index;
972 + }
973 +diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
974 +index c4c0a8ea11e4..ee410ae7369e 100644
975 +--- a/arch/arm/mach-omap1/board-ams-delta.c
976 ++++ b/arch/arm/mach-omap1/board-ams-delta.c
977 +@@ -182,6 +182,7 @@ static struct resource latch1_resources[] = {
978 +
979 + static struct bgpio_pdata latch1_pdata = {
980 + .label = LATCH1_LABEL,
981 ++ .base = -1,
982 + .ngpio = LATCH1_NGPIO,
983 + };
984 +
985 +@@ -219,6 +220,7 @@ static struct resource latch2_resources[] = {
986 +
987 + static struct bgpio_pdata latch2_pdata = {
988 + .label = LATCH2_LABEL,
989 ++ .base = -1,
990 + .ngpio = LATCH2_NGPIO,
991 + };
992 +
993 +diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c
994 +index 058a37e6d11c..fd6e0671f957 100644
995 +--- a/arch/arm/mach-omap2/prm_common.c
996 ++++ b/arch/arm/mach-omap2/prm_common.c
997 +@@ -523,8 +523,10 @@ void omap_prm_reset_system(void)
998 +
999 + prm_ll_data->reset_system();
1000 +
1001 +- while (1)
1002 ++ while (1) {
1003 + cpu_relax();
1004 ++ wfe();
1005 ++ }
1006 + }
1007 +
1008 + /**
1009 +diff --git a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
1010 +index 058ce73137e8..5d819b6ea428 100644
1011 +--- a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
1012 ++++ b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
1013 +@@ -65,16 +65,16 @@ static int osiris_dvs_notify(struct notifier_block *nb,
1014 +
1015 + switch (val) {
1016 + case CPUFREQ_PRECHANGE:
1017 +- if (old_dvs & !new_dvs ||
1018 +- cur_dvs & !new_dvs) {
1019 ++ if ((old_dvs && !new_dvs) ||
1020 ++ (cur_dvs && !new_dvs)) {
1021 + pr_debug("%s: exiting dvs\n", __func__);
1022 + cur_dvs = false;
1023 + gpio_set_value(OSIRIS_GPIO_DVS, 1);
1024 + }
1025 + break;
1026 + case CPUFREQ_POSTCHANGE:
1027 +- if (!old_dvs & new_dvs ||
1028 +- !cur_dvs & new_dvs) {
1029 ++ if ((!old_dvs && new_dvs) ||
1030 ++ (!cur_dvs && new_dvs)) {
1031 + pr_debug("entering dvs\n");
1032 + cur_dvs = true;
1033 + gpio_set_value(OSIRIS_GPIO_DVS, 0);
1034 +diff --git a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
1035 +index 8e50daa99151..dc526ef2e9b3 100644
1036 +--- a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
1037 ++++ b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
1038 +@@ -40,6 +40,7 @@
1039 + struct regulator_quirk {
1040 + struct list_head list;
1041 + const struct of_device_id *id;
1042 ++ struct device_node *np;
1043 + struct of_phandle_args irq_args;
1044 + struct i2c_msg i2c_msg;
1045 + bool shared; /* IRQ line is shared */
1046 +@@ -101,6 +102,9 @@ static int regulator_quirk_notify(struct notifier_block *nb,
1047 + if (!pos->shared)
1048 + continue;
1049 +
1050 ++ if (pos->np->parent != client->dev.parent->of_node)
1051 ++ continue;
1052 ++
1053 + dev_info(&client->dev, "clearing %s@0x%02x interrupts\n",
1054 + pos->id->compatible, pos->i2c_msg.addr);
1055 +
1056 +@@ -165,6 +169,7 @@ static int __init rcar_gen2_regulator_quirk(void)
1057 + memcpy(&quirk->i2c_msg, id->data, sizeof(quirk->i2c_msg));
1058 +
1059 + quirk->id = id;
1060 ++ quirk->np = np;
1061 + quirk->i2c_msg.addr = addr;
1062 +
1063 + ret = of_irq_parse_one(np, 0, argsa);
1064 +diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
1065 +index b03202cddddb..f74cdce6d4da 100644
1066 +--- a/arch/arm/mm/copypage-v4mc.c
1067 ++++ b/arch/arm/mm/copypage-v4mc.c
1068 +@@ -45,6 +45,7 @@ static void mc_copy_user_page(void *from, void *to)
1069 + int tmp;
1070 +
1071 + asm volatile ("\
1072 ++ .syntax unified\n\
1073 + ldmia %0!, {r2, r3, ip, lr} @ 4\n\
1074 + 1: mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\
1075 + stmia %1!, {r2, r3, ip, lr} @ 4\n\
1076 +@@ -56,7 +57,7 @@ static void mc_copy_user_page(void *from, void *to)
1077 + ldmia %0!, {r2, r3, ip, lr} @ 4\n\
1078 + subs %2, %2, #1 @ 1\n\
1079 + stmia %1!, {r2, r3, ip, lr} @ 4\n\
1080 +- ldmneia %0!, {r2, r3, ip, lr} @ 4\n\
1081 ++ ldmiane %0!, {r2, r3, ip, lr} @ 4\n\
1082 + bne 1b @ "
1083 + : "+&r" (from), "+&r" (to), "=&r" (tmp)
1084 + : "2" (PAGE_SIZE / 64)
1085 +diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c
1086 +index cd3e165afeed..6d336740aae4 100644
1087 +--- a/arch/arm/mm/copypage-v4wb.c
1088 ++++ b/arch/arm/mm/copypage-v4wb.c
1089 +@@ -27,6 +27,7 @@ static void v4wb_copy_user_page(void *kto, const void *kfrom)
1090 + int tmp;
1091 +
1092 + asm volatile ("\
1093 ++ .syntax unified\n\
1094 + ldmia %1!, {r3, r4, ip, lr} @ 4\n\
1095 + 1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
1096 + stmia %0!, {r3, r4, ip, lr} @ 4\n\
1097 +@@ -38,7 +39,7 @@ static void v4wb_copy_user_page(void *kto, const void *kfrom)
1098 + ldmia %1!, {r3, r4, ip, lr} @ 4\n\
1099 + subs %2, %2, #1 @ 1\n\
1100 + stmia %0!, {r3, r4, ip, lr} @ 4\n\
1101 +- ldmneia %1!, {r3, r4, ip, lr} @ 4\n\
1102 ++ ldmiane %1!, {r3, r4, ip, lr} @ 4\n\
1103 + bne 1b @ 1\n\
1104 + mcr p15, 0, %1, c7, c10, 4 @ 1 drain WB"
1105 + : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
1106 +diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c
1107 +index 8614572e1296..3851bb396442 100644
1108 +--- a/arch/arm/mm/copypage-v4wt.c
1109 ++++ b/arch/arm/mm/copypage-v4wt.c
1110 +@@ -25,6 +25,7 @@ static void v4wt_copy_user_page(void *kto, const void *kfrom)
1111 + int tmp;
1112 +
1113 + asm volatile ("\
1114 ++ .syntax unified\n\
1115 + ldmia %1!, {r3, r4, ip, lr} @ 4\n\
1116 + 1: stmia %0!, {r3, r4, ip, lr} @ 4\n\
1117 + ldmia %1!, {r3, r4, ip, lr} @ 4+1\n\
1118 +@@ -34,7 +35,7 @@ static void v4wt_copy_user_page(void *kto, const void *kfrom)
1119 + ldmia %1!, {r3, r4, ip, lr} @ 4\n\
1120 + subs %2, %2, #1 @ 1\n\
1121 + stmia %0!, {r3, r4, ip, lr} @ 4\n\
1122 +- ldmneia %1!, {r3, r4, ip, lr} @ 4\n\
1123 ++ ldmiane %1!, {r3, r4, ip, lr} @ 4\n\
1124 + bne 1b @ 1\n\
1125 + mcr p15, 0, %2, c7, c7, 0 @ flush ID cache"
1126 + : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
1127 +diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
1128 +index 47a5acc64433..92e84181933a 100644
1129 +--- a/arch/arm/mm/proc-v7m.S
1130 ++++ b/arch/arm/mm/proc-v7m.S
1131 +@@ -139,6 +139,9 @@ __v7m_setup_cont:
1132 + cpsie i
1133 + svc #0
1134 + 1: cpsid i
1135 ++ ldr r0, =exc_ret
1136 ++ orr lr, lr, #EXC_RET_THREADMODE_PROCESSSTACK
1137 ++ str lr, [r0]
1138 + ldmia sp, {r0-r3, r12}
1139 + str r5, [r12, #11 * 4] @ restore the original SVC vector entry
1140 + mov lr, r6 @ restore LR
1141 +diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
1142 +index 610235028cc7..c14205cd6bf5 100644
1143 +--- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
1144 ++++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
1145 +@@ -118,6 +118,7 @@
1146 + reset-gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
1147 + clocks = <&pmic>;
1148 + clock-names = "ext_clock";
1149 ++ post-power-on-delay-ms = <10>;
1150 + power-off-delay-us = <10>;
1151 + };
1152 +
1153 +@@ -300,7 +301,6 @@
1154 +
1155 + dwmmc_0: dwmmc0@f723d000 {
1156 + cap-mmc-highspeed;
1157 +- mmc-hs200-1_8v;
1158 + non-removable;
1159 + bus-width = <0x8>;
1160 + vmmc-supply = <&ldo19>;
1161 +diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
1162 +index 040b36ef0dd2..520ed8e474be 100644
1163 +--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
1164 ++++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
1165 +@@ -46,8 +46,7 @@
1166 +
1167 + vcc_host1_5v: vcc_otg_5v: vcc-host1-5v-regulator {
1168 + compatible = "regulator-fixed";
1169 +- enable-active-high;
1170 +- gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_HIGH>;
1171 ++ gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_LOW>;
1172 + pinctrl-names = "default";
1173 + pinctrl-0 = <&usb20_host_drv>;
1174 + regulator-name = "vcc_host1_5v";
1175 +diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
1176 +index ecd7f19c3542..97aa65455b4a 100644
1177 +--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
1178 ++++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
1179 +@@ -1431,11 +1431,11 @@
1180 +
1181 + sdmmc0 {
1182 + sdmmc0_clk: sdmmc0-clk {
1183 +- rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_4ma>;
1184 ++ rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_8ma>;
1185 + };
1186 +
1187 + sdmmc0_cmd: sdmmc0-cmd {
1188 +- rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_4ma>;
1189 ++ rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_8ma>;
1190 + };
1191 +
1192 + sdmmc0_dectn: sdmmc0-dectn {
1193 +@@ -1447,14 +1447,14 @@
1194 + };
1195 +
1196 + sdmmc0_bus1: sdmmc0-bus1 {
1197 +- rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>;
1198 ++ rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>;
1199 + };
1200 +
1201 + sdmmc0_bus4: sdmmc0-bus4 {
1202 +- rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>,
1203 +- <1 RK_PA1 1 &pcfg_pull_up_4ma>,
1204 +- <1 RK_PA2 1 &pcfg_pull_up_4ma>,
1205 +- <1 RK_PA3 1 &pcfg_pull_up_4ma>;
1206 ++ rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>,
1207 ++ <1 RK_PA1 1 &pcfg_pull_up_8ma>,
1208 ++ <1 RK_PA2 1 &pcfg_pull_up_8ma>,
1209 ++ <1 RK_PA3 1 &pcfg_pull_up_8ma>;
1210 + };
1211 +
1212 + sdmmc0_gpio: sdmmc0-gpio {
1213 +@@ -1628,50 +1628,50 @@
1214 + rgmiim1_pins: rgmiim1-pins {
1215 + rockchip,pins =
1216 + /* mac_txclk */
1217 +- <1 RK_PB4 2 &pcfg_pull_none_12ma>,
1218 ++ <1 RK_PB4 2 &pcfg_pull_none_8ma>,
1219 + /* mac_rxclk */
1220 +- <1 RK_PB5 2 &pcfg_pull_none_2ma>,
1221 ++ <1 RK_PB5 2 &pcfg_pull_none_4ma>,
1222 + /* mac_mdio */
1223 +- <1 RK_PC3 2 &pcfg_pull_none_2ma>,
1224 ++ <1 RK_PC3 2 &pcfg_pull_none_4ma>,
1225 + /* mac_txen */
1226 +- <1 RK_PD1 2 &pcfg_pull_none_12ma>,
1227 ++ <1 RK_PD1 2 &pcfg_pull_none_8ma>,
1228 + /* mac_clk */
1229 +- <1 RK_PC5 2 &pcfg_pull_none_2ma>,
1230 ++ <1 RK_PC5 2 &pcfg_pull_none_4ma>,
1231 + /* mac_rxdv */
1232 +- <1 RK_PC6 2 &pcfg_pull_none_2ma>,
1233 ++ <1 RK_PC6 2 &pcfg_pull_none_4ma>,
1234 + /* mac_mdc */
1235 +- <1 RK_PC7 2 &pcfg_pull_none_2ma>,
1236 ++ <1 RK_PC7 2 &pcfg_pull_none_4ma>,
1237 + /* mac_rxd1 */
1238 +- <1 RK_PB2 2 &pcfg_pull_none_2ma>,
1239 ++ <1 RK_PB2 2 &pcfg_pull_none_4ma>,
1240 + /* mac_rxd0 */
1241 +- <1 RK_PB3 2 &pcfg_pull_none_2ma>,
1242 ++ <1 RK_PB3 2 &pcfg_pull_none_4ma>,
1243 + /* mac_txd1 */
1244 +- <1 RK_PB0 2 &pcfg_pull_none_12ma>,
1245 ++ <1 RK_PB0 2 &pcfg_pull_none_8ma>,
1246 + /* mac_txd0 */
1247 +- <1 RK_PB1 2 &pcfg_pull_none_12ma>,
1248 ++ <1 RK_PB1 2 &pcfg_pull_none_8ma>,
1249 + /* mac_rxd3 */
1250 +- <1 RK_PB6 2 &pcfg_pull_none_2ma>,
1251 ++ <1 RK_PB6 2 &pcfg_pull_none_4ma>,
1252 + /* mac_rxd2 */
1253 +- <1 RK_PB7 2 &pcfg_pull_none_2ma>,
1254 ++ <1 RK_PB7 2 &pcfg_pull_none_4ma>,
1255 + /* mac_txd3 */
1256 +- <1 RK_PC0 2 &pcfg_pull_none_12ma>,
1257 ++ <1 RK_PC0 2 &pcfg_pull_none_8ma>,
1258 + /* mac_txd2 */
1259 +- <1 RK_PC1 2 &pcfg_pull_none_12ma>,
1260 ++ <1 RK_PC1 2 &pcfg_pull_none_8ma>,
1261 +
1262 + /* mac_txclk */
1263 +- <0 RK_PB0 1 &pcfg_pull_none>,
1264 ++ <0 RK_PB0 1 &pcfg_pull_none_8ma>,
1265 + /* mac_txen */
1266 +- <0 RK_PB4 1 &pcfg_pull_none>,
1267 ++ <0 RK_PB4 1 &pcfg_pull_none_8ma>,
1268 + /* mac_clk */
1269 +- <0 RK_PD0 1 &pcfg_pull_none>,
1270 ++ <0 RK_PD0 1 &pcfg_pull_none_4ma>,
1271 + /* mac_txd1 */
1272 +- <0 RK_PC0 1 &pcfg_pull_none>,
1273 ++ <0 RK_PC0 1 &pcfg_pull_none_8ma>,
1274 + /* mac_txd0 */
1275 +- <0 RK_PC1 1 &pcfg_pull_none>,
1276 ++ <0 RK_PC1 1 &pcfg_pull_none_8ma>,
1277 + /* mac_txd3 */
1278 +- <0 RK_PC7 1 &pcfg_pull_none>,
1279 ++ <0 RK_PC7 1 &pcfg_pull_none_8ma>,
1280 + /* mac_txd2 */
1281 +- <0 RK_PC6 1 &pcfg_pull_none>;
1282 ++ <0 RK_PC6 1 &pcfg_pull_none_8ma>;
1283 + };
1284 +
1285 + rmiim1_pins: rmiim1-pins {
1286 +diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
1287 +index 13a0a028df98..e5699d0d91e4 100644
1288 +--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
1289 ++++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
1290 +@@ -101,6 +101,7 @@
1291 + sdio_pwrseq: sdio-pwrseq {
1292 + compatible = "mmc-pwrseq-simple";
1293 + reset-gpios = <&gpio 7 GPIO_ACTIVE_LOW>; /* WIFI_EN */
1294 ++ post-power-on-delay-ms = <10>;
1295 + };
1296 + };
1297 +
1298 +diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S
1299 +index e3a375c4cb83..1b151442dac1 100644
1300 +--- a/arch/arm64/crypto/aes-ce-ccm-core.S
1301 ++++ b/arch/arm64/crypto/aes-ce-ccm-core.S
1302 +@@ -74,12 +74,13 @@ ENTRY(ce_aes_ccm_auth_data)
1303 + beq 10f
1304 + ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */
1305 + b 7b
1306 +-8: mov w7, w8
1307 ++8: cbz w8, 91f
1308 ++ mov w7, w8
1309 + add w8, w8, #16
1310 + 9: ext v1.16b, v1.16b, v1.16b, #1
1311 + adds w7, w7, #1
1312 + bne 9b
1313 +- eor v0.16b, v0.16b, v1.16b
1314 ++91: eor v0.16b, v0.16b, v1.16b
1315 + st1 {v0.16b}, [x0]
1316 + 10: str w8, [x3]
1317 + ret
1318 +diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
1319 +index 68b11aa690e4..986191e8c058 100644
1320 +--- a/arch/arm64/crypto/aes-ce-ccm-glue.c
1321 ++++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
1322 +@@ -125,7 +125,7 @@ static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
1323 + abytes -= added;
1324 + }
1325 +
1326 +- while (abytes > AES_BLOCK_SIZE) {
1327 ++ while (abytes >= AES_BLOCK_SIZE) {
1328 + __aes_arm64_encrypt(key->key_enc, mac, mac,
1329 + num_rounds(key));
1330 + crypto_xor(mac, in, AES_BLOCK_SIZE);
1331 +@@ -139,8 +139,6 @@ static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
1332 + num_rounds(key));
1333 + crypto_xor(mac, in, abytes);
1334 + *macp = abytes;
1335 +- } else {
1336 +- *macp = 0;
1337 + }
1338 + }
1339 + }
1340 +diff --git a/arch/arm64/crypto/aes-neonbs-core.S b/arch/arm64/crypto/aes-neonbs-core.S
1341 +index e613a87f8b53..8432c8d0dea6 100644
1342 +--- a/arch/arm64/crypto/aes-neonbs-core.S
1343 ++++ b/arch/arm64/crypto/aes-neonbs-core.S
1344 +@@ -971,18 +971,22 @@ CPU_LE( rev x8, x8 )
1345 +
1346 + 8: next_ctr v0
1347 + st1 {v0.16b}, [x24]
1348 +- cbz x23, 0f
1349 ++ cbz x23, .Lctr_done
1350 +
1351 + cond_yield_neon 98b
1352 + b 99b
1353 +
1354 +-0: frame_pop
1355 ++.Lctr_done:
1356 ++ frame_pop
1357 + ret
1358 +
1359 + /*
1360 + * If we are handling the tail of the input (x6 != NULL), return the
1361 + * final keystream block back to the caller.
1362 + */
1363 ++0: cbz x25, 8b
1364 ++ st1 {v0.16b}, [x25]
1365 ++ b 8b
1366 + 1: cbz x25, 8b
1367 + st1 {v1.16b}, [x25]
1368 + b 8b
1369 +diff --git a/arch/arm64/crypto/crct10dif-ce-glue.c b/arch/arm64/crypto/crct10dif-ce-glue.c
1370 +index b461d62023f2..567c24f3d224 100644
1371 +--- a/arch/arm64/crypto/crct10dif-ce-glue.c
1372 ++++ b/arch/arm64/crypto/crct10dif-ce-glue.c
1373 +@@ -39,26 +39,13 @@ static int crct10dif_update(struct shash_desc *desc, const u8 *data,
1374 + unsigned int length)
1375 + {
1376 + u16 *crc = shash_desc_ctx(desc);
1377 +- unsigned int l;
1378 +
1379 +- if (unlikely((u64)data % CRC_T10DIF_PMULL_CHUNK_SIZE)) {
1380 +- l = min_t(u32, length, CRC_T10DIF_PMULL_CHUNK_SIZE -
1381 +- ((u64)data % CRC_T10DIF_PMULL_CHUNK_SIZE));
1382 +-
1383 +- *crc = crc_t10dif_generic(*crc, data, l);
1384 +-
1385 +- length -= l;
1386 +- data += l;
1387 +- }
1388 +-
1389 +- if (length > 0) {
1390 +- if (may_use_simd()) {
1391 +- kernel_neon_begin();
1392 +- *crc = crc_t10dif_pmull(*crc, data, length);
1393 +- kernel_neon_end();
1394 +- } else {
1395 +- *crc = crc_t10dif_generic(*crc, data, length);
1396 +- }
1397 ++ if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
1398 ++ kernel_neon_begin();
1399 ++ *crc = crc_t10dif_pmull(*crc, data, length);
1400 ++ kernel_neon_end();
1401 ++ } else {
1402 ++ *crc = crc_t10dif_generic(*crc, data, length);
1403 + }
1404 +
1405 + return 0;
1406 +diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
1407 +index cccb83ad7fa8..e1d95f08f8e1 100644
1408 +--- a/arch/arm64/include/asm/futex.h
1409 ++++ b/arch/arm64/include/asm/futex.h
1410 +@@ -30,8 +30,8 @@ do { \
1411 + " prfm pstl1strm, %2\n" \
1412 + "1: ldxr %w1, %2\n" \
1413 + insn "\n" \
1414 +-"2: stlxr %w3, %w0, %2\n" \
1415 +-" cbnz %w3, 1b\n" \
1416 ++"2: stlxr %w0, %w3, %2\n" \
1417 ++" cbnz %w0, 1b\n" \
1418 + " dmb ish\n" \
1419 + "3:\n" \
1420 + " .pushsection .fixup,\"ax\"\n" \
1421 +@@ -50,30 +50,30 @@ do { \
1422 + static inline int
1423 + arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
1424 + {
1425 +- int oldval = 0, ret, tmp;
1426 ++ int oldval, ret, tmp;
1427 + u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
1428 +
1429 + pagefault_disable();
1430 +
1431 + switch (op) {
1432 + case FUTEX_OP_SET:
1433 +- __futex_atomic_op("mov %w0, %w4",
1434 ++ __futex_atomic_op("mov %w3, %w4",
1435 + ret, oldval, uaddr, tmp, oparg);
1436 + break;
1437 + case FUTEX_OP_ADD:
1438 +- __futex_atomic_op("add %w0, %w1, %w4",
1439 ++ __futex_atomic_op("add %w3, %w1, %w4",
1440 + ret, oldval, uaddr, tmp, oparg);
1441 + break;
1442 + case FUTEX_OP_OR:
1443 +- __futex_atomic_op("orr %w0, %w1, %w4",
1444 ++ __futex_atomic_op("orr %w3, %w1, %w4",
1445 + ret, oldval, uaddr, tmp, oparg);
1446 + break;
1447 + case FUTEX_OP_ANDN:
1448 +- __futex_atomic_op("and %w0, %w1, %w4",
1449 ++ __futex_atomic_op("and %w3, %w1, %w4",
1450 + ret, oldval, uaddr, tmp, ~oparg);
1451 + break;
1452 + case FUTEX_OP_XOR:
1453 +- __futex_atomic_op("eor %w0, %w1, %w4",
1454 ++ __futex_atomic_op("eor %w3, %w1, %w4",
1455 + ret, oldval, uaddr, tmp, oparg);
1456 + break;
1457 + default:
1458 +diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
1459 +index 1473fc2f7ab7..89691c86640a 100644
1460 +--- a/arch/arm64/include/asm/hardirq.h
1461 ++++ b/arch/arm64/include/asm/hardirq.h
1462 +@@ -17,8 +17,12 @@
1463 + #define __ASM_HARDIRQ_H
1464 +
1465 + #include <linux/cache.h>
1466 ++#include <linux/percpu.h>
1467 + #include <linux/threads.h>
1468 ++#include <asm/barrier.h>
1469 + #include <asm/irq.h>
1470 ++#include <asm/kvm_arm.h>
1471 ++#include <asm/sysreg.h>
1472 +
1473 + #define NR_IPI 7
1474 +
1475 +@@ -37,6 +41,33 @@ u64 smp_irq_stat_cpu(unsigned int cpu);
1476 +
1477 + #define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
1478 +
1479 ++struct nmi_ctx {
1480 ++ u64 hcr;
1481 ++};
1482 ++
1483 ++DECLARE_PER_CPU(struct nmi_ctx, nmi_contexts);
1484 ++
1485 ++#define arch_nmi_enter() \
1486 ++ do { \
1487 ++ if (is_kernel_in_hyp_mode()) { \
1488 ++ struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \
1489 ++ nmi_ctx->hcr = read_sysreg(hcr_el2); \
1490 ++ if (!(nmi_ctx->hcr & HCR_TGE)) { \
1491 ++ write_sysreg(nmi_ctx->hcr | HCR_TGE, hcr_el2); \
1492 ++ isb(); \
1493 ++ } \
1494 ++ } \
1495 ++ } while (0)
1496 ++
1497 ++#define arch_nmi_exit() \
1498 ++ do { \
1499 ++ if (is_kernel_in_hyp_mode()) { \
1500 ++ struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \
1501 ++ if (!(nmi_ctx->hcr & HCR_TGE)) \
1502 ++ write_sysreg(nmi_ctx->hcr, hcr_el2); \
1503 ++ } \
1504 ++ } while (0)
1505 ++
1506 + static inline void ack_bad_irq(unsigned int irq)
1507 + {
1508 + extern unsigned long irq_err_count;
1509 +diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
1510 +index 905e1bb0e7bd..cd9f4e9d04d3 100644
1511 +--- a/arch/arm64/include/asm/module.h
1512 ++++ b/arch/arm64/include/asm/module.h
1513 +@@ -73,4 +73,9 @@ static inline bool is_forbidden_offset_for_adrp(void *place)
1514 + struct plt_entry get_plt_entry(u64 dst, void *pc);
1515 + bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b);
1516 +
1517 ++static inline bool plt_entry_is_initialized(const struct plt_entry *e)
1518 ++{
1519 ++ return e->adrp || e->add || e->br;
1520 ++}
1521 ++
1522 + #endif /* __ASM_MODULE_H */
1523 +diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
1524 +index 8e4431a8821f..07b298120182 100644
1525 +--- a/arch/arm64/kernel/ftrace.c
1526 ++++ b/arch/arm64/kernel/ftrace.c
1527 +@@ -107,8 +107,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
1528 + trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline);
1529 + if (!plt_entries_equal(mod->arch.ftrace_trampoline,
1530 + &trampoline)) {
1531 +- if (!plt_entries_equal(mod->arch.ftrace_trampoline,
1532 +- &(struct plt_entry){})) {
1533 ++ if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) {
1534 + pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
1535 + return -EINVAL;
1536 + }
1537 +diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
1538 +index 780a12f59a8f..92fa81798fb9 100644
1539 +--- a/arch/arm64/kernel/irq.c
1540 ++++ b/arch/arm64/kernel/irq.c
1541 +@@ -33,6 +33,9 @@
1542 +
1543 + unsigned long irq_err_count;
1544 +
1545 ++/* Only access this in an NMI enter/exit */
1546 ++DEFINE_PER_CPU(struct nmi_ctx, nmi_contexts);
1547 ++
1548 + DEFINE_PER_CPU(unsigned long *, irq_stack_ptr);
1549 +
1550 + int arch_show_interrupts(struct seq_file *p, int prec)
1551 +diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
1552 +index ce46c4cdf368..691854b77c7f 100644
1553 +--- a/arch/arm64/kernel/kgdb.c
1554 ++++ b/arch/arm64/kernel/kgdb.c
1555 +@@ -244,27 +244,33 @@ int kgdb_arch_handle_exception(int exception_vector, int signo,
1556 +
1557 + static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr)
1558 + {
1559 ++ if (user_mode(regs))
1560 ++ return DBG_HOOK_ERROR;
1561 ++
1562 + kgdb_handle_exception(1, SIGTRAP, 0, regs);
1563 +- return 0;
1564 ++ return DBG_HOOK_HANDLED;
1565 + }
1566 + NOKPROBE_SYMBOL(kgdb_brk_fn)
1567 +
1568 + static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr)
1569 + {
1570 ++ if (user_mode(regs))
1571 ++ return DBG_HOOK_ERROR;
1572 ++
1573 + compiled_break = 1;
1574 + kgdb_handle_exception(1, SIGTRAP, 0, regs);
1575 +
1576 +- return 0;
1577 ++ return DBG_HOOK_HANDLED;
1578 + }
1579 + NOKPROBE_SYMBOL(kgdb_compiled_brk_fn);
1580 +
1581 + static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
1582 + {
1583 +- if (!kgdb_single_step)
1584 ++ if (user_mode(regs) || !kgdb_single_step)
1585 + return DBG_HOOK_ERROR;
1586 +
1587 + kgdb_handle_exception(1, SIGTRAP, 0, regs);
1588 +- return 0;
1589 ++ return DBG_HOOK_HANDLED;
1590 + }
1591 + NOKPROBE_SYMBOL(kgdb_step_brk_fn);
1592 +
1593 +diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
1594 +index f17afb99890c..7fb6f3aa5ceb 100644
1595 +--- a/arch/arm64/kernel/probes/kprobes.c
1596 ++++ b/arch/arm64/kernel/probes/kprobes.c
1597 +@@ -450,6 +450,9 @@ kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
1598 + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1599 + int retval;
1600 +
1601 ++ if (user_mode(regs))
1602 ++ return DBG_HOOK_ERROR;
1603 ++
1604 + /* return error if this is not our step */
1605 + retval = kprobe_ss_hit(kcb, instruction_pointer(regs));
1606 +
1607 +@@ -466,6 +469,9 @@ kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
1608 + int __kprobes
1609 + kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
1610 + {
1611 ++ if (user_mode(regs))
1612 ++ return DBG_HOOK_ERROR;
1613 ++
1614 + kprobe_handler(regs);
1615 + return DBG_HOOK_HANDLED;
1616 + }
1617 +diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
1618 +index 4e2fb877f8d5..92bfeb3e8d7c 100644
1619 +--- a/arch/arm64/kernel/traps.c
1620 ++++ b/arch/arm64/kernel/traps.c
1621 +@@ -102,10 +102,16 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
1622 + void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
1623 + {
1624 + struct stackframe frame;
1625 +- int skip;
1626 ++ int skip = 0;
1627 +
1628 + pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
1629 +
1630 ++ if (regs) {
1631 ++ if (user_mode(regs))
1632 ++ return;
1633 ++ skip = 1;
1634 ++ }
1635 ++
1636 + if (!tsk)
1637 + tsk = current;
1638 +
1639 +@@ -126,7 +132,6 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
1640 + frame.graph = 0;
1641 + #endif
1642 +
1643 +- skip = !!regs;
1644 + printk("Call trace:\n");
1645 + do {
1646 + /* skip until specified stack frame */
1647 +@@ -176,15 +181,13 @@ static int __die(const char *str, int err, struct pt_regs *regs)
1648 + return ret;
1649 +
1650 + print_modules();
1651 +- __show_regs(regs);
1652 + pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
1653 + TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
1654 + end_of_stack(tsk));
1655 ++ show_regs(regs);
1656 +
1657 +- if (!user_mode(regs)) {
1658 +- dump_backtrace(regs, tsk);
1659 ++ if (!user_mode(regs))
1660 + dump_instr(KERN_EMERG, regs);
1661 +- }
1662 +
1663 + return ret;
1664 + }
1665 +diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
1666 +index c936aa40c3f4..b6dac3a68508 100644
1667 +--- a/arch/arm64/kvm/sys_regs.c
1668 ++++ b/arch/arm64/kvm/sys_regs.c
1669 +@@ -1476,7 +1476,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
1670 +
1671 + { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
1672 + { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
1673 +- { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x70 },
1674 ++ { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
1675 + };
1676 +
1677 + static bool trap_dbgidr(struct kvm_vcpu *vcpu,
1678 +diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
1679 +index efb7b2cbead5..ef46925096f0 100644
1680 +--- a/arch/arm64/mm/fault.c
1681 ++++ b/arch/arm64/mm/fault.c
1682 +@@ -824,11 +824,12 @@ void __init hook_debug_fault_code(int nr,
1683 + debug_fault_info[nr].name = name;
1684 + }
1685 +
1686 +-asmlinkage int __exception do_debug_exception(unsigned long addr,
1687 ++asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint,
1688 + unsigned int esr,
1689 + struct pt_regs *regs)
1690 + {
1691 + const struct fault_info *inf = esr_to_debug_fault_info(esr);
1692 ++ unsigned long pc = instruction_pointer(regs);
1693 + int rv;
1694 +
1695 + /*
1696 +@@ -838,14 +839,14 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
1697 + if (interrupts_enabled(regs))
1698 + trace_hardirqs_off();
1699 +
1700 +- if (user_mode(regs) && !is_ttbr0_addr(instruction_pointer(regs)))
1701 ++ if (user_mode(regs) && !is_ttbr0_addr(pc))
1702 + arm64_apply_bp_hardening();
1703 +
1704 +- if (!inf->fn(addr, esr, regs)) {
1705 ++ if (!inf->fn(addr_if_watchpoint, esr, regs)) {
1706 + rv = 1;
1707 + } else {
1708 + arm64_notify_die(inf->name, regs,
1709 +- inf->sig, inf->code, (void __user *)addr, esr);
1710 ++ inf->sig, inf->code, (void __user *)pc, esr);
1711 + rv = 0;
1712 + }
1713 +
1714 +diff --git a/arch/csky/include/asm/syscall.h b/arch/csky/include/asm/syscall.h
1715 +index d637445737b7..9a9cd81e66c1 100644
1716 +--- a/arch/csky/include/asm/syscall.h
1717 ++++ b/arch/csky/include/asm/syscall.h
1718 +@@ -49,10 +49,11 @@ syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
1719 + if (i == 0) {
1720 + args[0] = regs->orig_a0;
1721 + args++;
1722 +- i++;
1723 + n--;
1724 ++ } else {
1725 ++ i--;
1726 + }
1727 +- memcpy(args, &regs->a1 + i * sizeof(regs->a1), n * sizeof(args[0]));
1728 ++ memcpy(args, &regs->a1 + i, n * sizeof(args[0]));
1729 + }
1730 +
1731 + static inline void
1732 +@@ -63,10 +64,11 @@ syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
1733 + if (i == 0) {
1734 + regs->orig_a0 = args[0];
1735 + args++;
1736 +- i++;
1737 + n--;
1738 ++ } else {
1739 ++ i--;
1740 + }
1741 +- memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
1742 ++ memcpy(&regs->a1 + i, args, n * sizeof(regs->a1));
1743 + }
1744 +
1745 + static inline int
1746 +diff --git a/arch/h8300/Makefile b/arch/h8300/Makefile
1747 +index f801f3708a89..ba0f26cfad61 100644
1748 +--- a/arch/h8300/Makefile
1749 ++++ b/arch/h8300/Makefile
1750 +@@ -27,7 +27,7 @@ KBUILD_LDFLAGS += $(ldflags-y)
1751 + CHECKFLAGS += -msize-long
1752 +
1753 + ifeq ($(CROSS_COMPILE),)
1754 +-CROSS_COMPILE := h8300-unknown-linux-
1755 ++CROSS_COMPILE := $(call cc-cross-prefix, h8300-unknown-linux- h8300-linux-)
1756 + endif
1757 +
1758 + core-y += arch/$(ARCH)/kernel/ arch/$(ARCH)/mm/
1759 +diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile
1760 +index f00ca53f8c14..482513b9af2c 100644
1761 +--- a/arch/m68k/Makefile
1762 ++++ b/arch/m68k/Makefile
1763 +@@ -58,7 +58,10 @@ cpuflags-$(CONFIG_M5206e) := $(call cc-option,-mcpu=5206e,-m5200)
1764 + cpuflags-$(CONFIG_M5206) := $(call cc-option,-mcpu=5206,-m5200)
1765 +
1766 + KBUILD_AFLAGS += $(cpuflags-y)
1767 +-KBUILD_CFLAGS += $(cpuflags-y) -pipe
1768 ++KBUILD_CFLAGS += $(cpuflags-y)
1769 ++
1770 ++KBUILD_CFLAGS += -pipe -ffreestanding
1771 ++
1772 + ifdef CONFIG_MMU
1773 + # without -fno-strength-reduce the 53c7xx.c driver fails ;-(
1774 + KBUILD_CFLAGS += -fno-strength-reduce -ffixed-a2
1775 +diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
1776 +index e77672539e8e..e4456e450f94 100644
1777 +--- a/arch/mips/include/asm/jump_label.h
1778 ++++ b/arch/mips/include/asm/jump_label.h
1779 +@@ -21,15 +21,15 @@
1780 + #endif
1781 +
1782 + #ifdef CONFIG_CPU_MICROMIPS
1783 +-#define NOP_INSN "nop32"
1784 ++#define B_INSN "b32"
1785 + #else
1786 +-#define NOP_INSN "nop"
1787 ++#define B_INSN "b"
1788 + #endif
1789 +
1790 + static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
1791 + {
1792 +- asm_volatile_goto("1:\t" NOP_INSN "\n\t"
1793 +- "nop\n\t"
1794 ++ asm_volatile_goto("1:\t" B_INSN " 2f\n\t"
1795 ++ "2:\tnop\n\t"
1796 + ".pushsection __jump_table, \"aw\"\n\t"
1797 + WORD_INSN " 1b, %l[l_yes], %0\n\t"
1798 + ".popsection\n\t"
1799 +diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
1800 +index d2abd98471e8..41204a49cf95 100644
1801 +--- a/arch/mips/include/asm/kvm_host.h
1802 ++++ b/arch/mips/include/asm/kvm_host.h
1803 +@@ -1134,7 +1134,7 @@ static inline void kvm_arch_hardware_unsetup(void) {}
1804 + static inline void kvm_arch_sync_events(struct kvm *kvm) {}
1805 + static inline void kvm_arch_free_memslot(struct kvm *kvm,
1806 + struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
1807 +-static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
1808 ++static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
1809 + static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
1810 + static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
1811 + static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
1812 +diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
1813 +index ba150c755fcc..85b6c60f285d 100644
1814 +--- a/arch/mips/kernel/irq.c
1815 ++++ b/arch/mips/kernel/irq.c
1816 +@@ -52,6 +52,7 @@ asmlinkage void spurious_interrupt(void)
1817 + void __init init_IRQ(void)
1818 + {
1819 + int i;
1820 ++ unsigned int order = get_order(IRQ_STACK_SIZE);
1821 +
1822 + for (i = 0; i < NR_IRQS; i++)
1823 + irq_set_noprobe(i);
1824 +@@ -62,8 +63,7 @@ void __init init_IRQ(void)
1825 + arch_init_irq();
1826 +
1827 + for_each_possible_cpu(i) {
1828 +- int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE;
1829 +- void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages);
1830 ++ void *s = (void *)__get_free_pages(GFP_KERNEL, order);
1831 +
1832 + irq_stack[i] = s;
1833 + pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
1834 +diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
1835 +index cb7e9ed7a453..33ee0d18fb0a 100644
1836 +--- a/arch/mips/kernel/vmlinux.lds.S
1837 ++++ b/arch/mips/kernel/vmlinux.lds.S
1838 +@@ -140,6 +140,13 @@ SECTIONS
1839 + PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
1840 + #endif
1841 +
1842 ++#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
1843 ++ .appended_dtb : AT(ADDR(.appended_dtb) - LOAD_OFFSET) {
1844 ++ *(.appended_dtb)
1845 ++ KEEP(*(.appended_dtb))
1846 ++ }
1847 ++#endif
1848 ++
1849 + #ifdef CONFIG_RELOCATABLE
1850 + . = ALIGN(4);
1851 +
1852 +@@ -164,11 +171,6 @@ SECTIONS
1853 + __appended_dtb = .;
1854 + /* leave space for appended DTB */
1855 + . += 0x100000;
1856 +-#elif defined(CONFIG_MIPS_ELF_APPENDED_DTB)
1857 +- .appended_dtb : AT(ADDR(.appended_dtb) - LOAD_OFFSET) {
1858 +- *(.appended_dtb)
1859 +- KEEP(*(.appended_dtb))
1860 +- }
1861 + #endif
1862 + /*
1863 + * Align to 64K in attempt to eliminate holes before the
1864 +diff --git a/arch/mips/loongson64/lemote-2f/irq.c b/arch/mips/loongson64/lemote-2f/irq.c
1865 +index 9e33e45aa17c..b213cecb8e3a 100644
1866 +--- a/arch/mips/loongson64/lemote-2f/irq.c
1867 ++++ b/arch/mips/loongson64/lemote-2f/irq.c
1868 +@@ -103,7 +103,7 @@ static struct irqaction ip6_irqaction = {
1869 + static struct irqaction cascade_irqaction = {
1870 + .handler = no_action,
1871 + .name = "cascade",
1872 +- .flags = IRQF_NO_THREAD,
1873 ++ .flags = IRQF_NO_THREAD | IRQF_NO_SUSPEND,
1874 + };
1875 +
1876 + void __init mach_init_irq(void)
1877 +diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h
1878 +index 2a27b275ab09..9ff033d261ab 100644
1879 +--- a/arch/parisc/include/asm/ptrace.h
1880 ++++ b/arch/parisc/include/asm/ptrace.h
1881 +@@ -22,13 +22,14 @@ unsigned long profile_pc(struct pt_regs *);
1882 +
1883 + static inline unsigned long regs_return_value(struct pt_regs *regs)
1884 + {
1885 +- return regs->gr[20];
1886 ++ return regs->gr[28];
1887 + }
1888 +
1889 + static inline void instruction_pointer_set(struct pt_regs *regs,
1890 + unsigned long val)
1891 + {
1892 +- regs->iaoq[0] = val;
1893 ++ regs->iaoq[0] = val;
1894 ++ regs->iaoq[1] = val + 4;
1895 + }
1896 +
1897 + /* Query offset/name of register from its name/offset */
1898 +diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
1899 +index eb39e7e380d7..841db71958cd 100644
1900 +--- a/arch/parisc/kernel/process.c
1901 ++++ b/arch/parisc/kernel/process.c
1902 +@@ -210,12 +210,6 @@ void __cpuidle arch_cpu_idle(void)
1903 +
1904 + static int __init parisc_idle_init(void)
1905 + {
1906 +- const char *marker;
1907 +-
1908 +- /* check QEMU/SeaBIOS marker in PAGE0 */
1909 +- marker = (char *) &PAGE0->pad0;
1910 +- running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);
1911 +-
1912 + if (!running_on_qemu)
1913 + cpu_idle_poll_ctrl(1);
1914 +
1915 +diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
1916 +index f2cf86ac279b..25946624ce6a 100644
1917 +--- a/arch/parisc/kernel/setup.c
1918 ++++ b/arch/parisc/kernel/setup.c
1919 +@@ -396,6 +396,9 @@ void __init start_parisc(void)
1920 + int ret, cpunum;
1921 + struct pdc_coproc_cfg coproc_cfg;
1922 +
1923 ++ /* check QEMU/SeaBIOS marker in PAGE0 */
1924 ++ running_on_qemu = (memcmp(&PAGE0->pad0, "SeaBIOS", 8) == 0);
1925 ++
1926 + cpunum = smp_processor_id();
1927 +
1928 + init_cpu_topology();
1929 +diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h
1930 +index 5b0177733994..46130ef4941c 100644
1931 +--- a/arch/powerpc/include/asm/book3s/64/hugetlb.h
1932 ++++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h
1933 +@@ -35,6 +35,14 @@ static inline int hstate_get_psize(struct hstate *hstate)
1934 + #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1935 + static inline bool gigantic_page_supported(void)
1936 + {
1937 ++ /*
1938 ++ * We used gigantic page reservation with hypervisor assist in some case.
1939 ++ * We cannot use runtime allocation of gigantic pages in those platforms
1940 ++ * This is hash translation mode LPARs.
1941 ++ */
1942 ++ if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
1943 ++ return false;
1944 ++
1945 + return true;
1946 + }
1947 + #endif
1948 +diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
1949 +index 0f98f00da2ea..19693b8add93 100644
1950 +--- a/arch/powerpc/include/asm/kvm_host.h
1951 ++++ b/arch/powerpc/include/asm/kvm_host.h
1952 +@@ -837,7 +837,7 @@ struct kvm_vcpu_arch {
1953 + static inline void kvm_arch_hardware_disable(void) {}
1954 + static inline void kvm_arch_hardware_unsetup(void) {}
1955 + static inline void kvm_arch_sync_events(struct kvm *kvm) {}
1956 +-static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
1957 ++static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
1958 + static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
1959 + static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
1960 + static inline void kvm_arch_exit(void) {}
1961 +diff --git a/arch/powerpc/include/asm/powernv.h b/arch/powerpc/include/asm/powernv.h
1962 +index 2f3ff7a27881..d85fcfea32ca 100644
1963 +--- a/arch/powerpc/include/asm/powernv.h
1964 ++++ b/arch/powerpc/include/asm/powernv.h
1965 +@@ -23,6 +23,8 @@ extern int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea,
1966 + unsigned long *flags, unsigned long *status,
1967 + int count);
1968 +
1969 ++void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val);
1970 ++
1971 + void pnv_tm_init(void);
1972 + #else
1973 + static inline void powernv_set_nmmu_ptcr(unsigned long ptcr) { }
1974 +diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
1975 +index 19a8834e0398..0690a306f6ca 100644
1976 +--- a/arch/powerpc/include/asm/ppc-opcode.h
1977 ++++ b/arch/powerpc/include/asm/ppc-opcode.h
1978 +@@ -302,6 +302,7 @@
1979 + /* Misc instructions for BPF compiler */
1980 + #define PPC_INST_LBZ 0x88000000
1981 + #define PPC_INST_LD 0xe8000000
1982 ++#define PPC_INST_LDX 0x7c00002a
1983 + #define PPC_INST_LHZ 0xa0000000
1984 + #define PPC_INST_LWZ 0x80000000
1985 + #define PPC_INST_LHBRX 0x7c00062c
1986 +@@ -309,6 +310,7 @@
1987 + #define PPC_INST_STB 0x98000000
1988 + #define PPC_INST_STH 0xb0000000
1989 + #define PPC_INST_STD 0xf8000000
1990 ++#define PPC_INST_STDX 0x7c00012a
1991 + #define PPC_INST_STDU 0xf8000001
1992 + #define PPC_INST_STW 0x90000000
1993 + #define PPC_INST_STWU 0x94000000
1994 +diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
1995 +index a4a718dbfec6..f85e2b01c3df 100644
1996 +--- a/arch/powerpc/include/asm/topology.h
1997 ++++ b/arch/powerpc/include/asm/topology.h
1998 +@@ -132,6 +132,8 @@ static inline void shared_proc_topology_init(void) {}
1999 + #define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
2000 + #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
2001 + #define topology_core_id(cpu) (cpu_to_core_id(cpu))
2002 ++
2003 ++int dlpar_cpu_readd(int cpu);
2004 + #endif
2005 + #endif
2006 +
2007 +diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h
2008 +index 1afe90ade595..bbc06bd72b1f 100644
2009 +--- a/arch/powerpc/include/asm/vdso_datapage.h
2010 ++++ b/arch/powerpc/include/asm/vdso_datapage.h
2011 +@@ -82,10 +82,10 @@ struct vdso_data {
2012 + __u32 icache_block_size; /* L1 i-cache block size */
2013 + __u32 dcache_log_block_size; /* L1 d-cache log block size */
2014 + __u32 icache_log_block_size; /* L1 i-cache log block size */
2015 +- __s32 wtom_clock_sec; /* Wall to monotonic clock */
2016 +- __s32 wtom_clock_nsec;
2017 +- struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */
2018 +- __u32 stamp_sec_fraction; /* fractional seconds of stamp_xtime */
2019 ++ __u32 stamp_sec_fraction; /* fractional seconds of stamp_xtime */
2020 ++ __s32 wtom_clock_nsec; /* Wall to monotonic clock nsec */
2021 ++ __s64 wtom_clock_sec; /* Wall to monotonic clock sec */
2022 ++ struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */
2023 + __u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */
2024 + __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
2025 + };
2026 +diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
2027 +index 0768dfd8a64e..fdd528cdb2ee 100644
2028 +--- a/arch/powerpc/kernel/entry_32.S
2029 ++++ b/arch/powerpc/kernel/entry_32.S
2030 +@@ -745,6 +745,9 @@ fast_exception_return:
2031 + mtcr r10
2032 + lwz r10,_LINK(r11)
2033 + mtlr r10
2034 ++ /* Clear the exception_marker on the stack to avoid confusing stacktrace */
2035 ++ li r10, 0
2036 ++ stw r10, 8(r11)
2037 + REST_GPR(10, r11)
2038 + #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
2039 + mtspr SPRN_NRI, r0
2040 +@@ -982,6 +985,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
2041 + mtcrf 0xFF,r10
2042 + mtlr r11
2043 +
2044 ++ /* Clear the exception_marker on the stack to avoid confusing stacktrace */
2045 ++ li r10, 0
2046 ++ stw r10, 8(r1)
2047 + /*
2048 + * Once we put values in SRR0 and SRR1, we are in a state
2049 + * where exceptions are not recoverable, since taking an
2050 +@@ -1021,6 +1027,9 @@ exc_exit_restart_end:
2051 + mtlr r11
2052 + lwz r10,_CCR(r1)
2053 + mtcrf 0xff,r10
2054 ++ /* Clear the exception_marker on the stack to avoid confusing stacktrace */
2055 ++ li r10, 0
2056 ++ stw r10, 8(r1)
2057 + REST_2GPRS(9, r1)
2058 + .globl exc_exit_restart
2059 + exc_exit_restart:
2060 +diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
2061 +index 435927f549c4..a2c168b395d2 100644
2062 +--- a/arch/powerpc/kernel/entry_64.S
2063 ++++ b/arch/powerpc/kernel/entry_64.S
2064 +@@ -1002,6 +1002,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
2065 + ld r2,_NIP(r1)
2066 + mtspr SPRN_SRR0,r2
2067 +
2068 ++ /*
2069 ++ * Leaving a stale exception_marker on the stack can confuse
2070 ++ * the reliable stack unwinder later on. Clear it.
2071 ++ */
2072 ++ li r2,0
2073 ++ std r2,STACK_FRAME_OVERHEAD-16(r1)
2074 ++
2075 + ld r0,GPR0(r1)
2076 + ld r2,GPR2(r1)
2077 + ld r3,GPR3(r1)
2078 +diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
2079 +index afb638778f44..447defdd4503 100644
2080 +--- a/arch/powerpc/kernel/exceptions-64e.S
2081 ++++ b/arch/powerpc/kernel/exceptions-64e.S
2082 +@@ -349,6 +349,7 @@ ret_from_mc_except:
2083 + #define GEN_BTB_FLUSH
2084 + #define CRIT_BTB_FLUSH
2085 + #define DBG_BTB_FLUSH
2086 ++#define MC_BTB_FLUSH
2087 + #define GDBELL_BTB_FLUSH
2088 + #endif
2089 +
2090 +diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
2091 +index 9e253ce27e08..4fee6c9887db 100644
2092 +--- a/arch/powerpc/kernel/exceptions-64s.S
2093 ++++ b/arch/powerpc/kernel/exceptions-64s.S
2094 +@@ -612,11 +612,17 @@ EXC_COMMON_BEGIN(data_access_slb_common)
2095 + ld r4,PACA_EXSLB+EX_DAR(r13)
2096 + std r4,_DAR(r1)
2097 + addi r3,r1,STACK_FRAME_OVERHEAD
2098 ++BEGIN_MMU_FTR_SECTION
2099 ++ /* HPT case, do SLB fault */
2100 + bl do_slb_fault
2101 + cmpdi r3,0
2102 + bne- 1f
2103 + b fast_exception_return
2104 + 1: /* Error case */
2105 ++MMU_FTR_SECTION_ELSE
2106 ++ /* Radix case, access is outside page table range */
2107 ++ li r3,-EFAULT
2108 ++ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
2109 + std r3,RESULT(r1)
2110 + bl save_nvgprs
2111 + RECONCILE_IRQ_STATE(r10, r11)
2112 +@@ -661,11 +667,17 @@ EXC_COMMON_BEGIN(instruction_access_slb_common)
2113 + EXCEPTION_PROLOG_COMMON(0x480, PACA_EXSLB)
2114 + ld r4,_NIP(r1)
2115 + addi r3,r1,STACK_FRAME_OVERHEAD
2116 ++BEGIN_MMU_FTR_SECTION
2117 ++ /* HPT case, do SLB fault */
2118 + bl do_slb_fault
2119 + cmpdi r3,0
2120 + bne- 1f
2121 + b fast_exception_return
2122 + 1: /* Error case */
2123 ++MMU_FTR_SECTION_ELSE
2124 ++ /* Radix case, access is outside page table range */
2125 ++ li r3,-EFAULT
2126 ++ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
2127 + std r3,RESULT(r1)
2128 + bl save_nvgprs
2129 + RECONCILE_IRQ_STATE(r10, r11)
2130 +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
2131 +index ce393df243aa..71bad4b6f80d 100644
2132 +--- a/arch/powerpc/kernel/process.c
2133 ++++ b/arch/powerpc/kernel/process.c
2134 +@@ -176,7 +176,7 @@ static void __giveup_fpu(struct task_struct *tsk)
2135 +
2136 + save_fpu(tsk);
2137 + msr = tsk->thread.regs->msr;
2138 +- msr &= ~MSR_FP;
2139 ++ msr &= ~(MSR_FP|MSR_FE0|MSR_FE1);
2140 + #ifdef CONFIG_VSX
2141 + if (cpu_has_feature(CPU_FTR_VSX))
2142 + msr &= ~MSR_VSX;
2143 +diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
2144 +index cdd5d1d3ae41..d9ac7d94656e 100644
2145 +--- a/arch/powerpc/kernel/ptrace.c
2146 ++++ b/arch/powerpc/kernel/ptrace.c
2147 +@@ -33,6 +33,7 @@
2148 + #include <linux/hw_breakpoint.h>
2149 + #include <linux/perf_event.h>
2150 + #include <linux/context_tracking.h>
2151 ++#include <linux/nospec.h>
2152 +
2153 + #include <linux/uaccess.h>
2154 + #include <linux/pkeys.h>
2155 +@@ -274,6 +275,8 @@ static int set_user_trap(struct task_struct *task, unsigned long trap)
2156 + */
2157 + int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
2158 + {
2159 ++ unsigned int regs_max;
2160 ++
2161 + if ((task->thread.regs == NULL) || !data)
2162 + return -EIO;
2163 +
2164 +@@ -297,7 +300,9 @@ int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
2165 + }
2166 + #endif
2167 +
2168 +- if (regno < (sizeof(struct user_pt_regs) / sizeof(unsigned long))) {
2169 ++ regs_max = sizeof(struct user_pt_regs) / sizeof(unsigned long);
2170 ++ if (regno < regs_max) {
2171 ++ regno = array_index_nospec(regno, regs_max);
2172 + *data = ((unsigned long *)task->thread.regs)[regno];
2173 + return 0;
2174 + }
2175 +@@ -321,6 +326,7 @@ int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
2176 + return set_user_dscr(task, data);
2177 +
2178 + if (regno <= PT_MAX_PUT_REG) {
2179 ++ regno = array_index_nospec(regno, PT_MAX_PUT_REG + 1);
2180 + ((unsigned long *)task->thread.regs)[regno] = data;
2181 + return 0;
2182 + }
2183 +@@ -561,6 +567,7 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset,
2184 + /*
2185 + * Copy out only the low-order word of vrsave.
2186 + */
2187 ++ int start, end;
2188 + union {
2189 + elf_vrreg_t reg;
2190 + u32 word;
2191 +@@ -569,8 +576,10 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset,
2192 +
2193 + vrsave.word = target->thread.vrsave;
2194 +
2195 ++ start = 33 * sizeof(vector128);
2196 ++ end = start + sizeof(vrsave);
2197 + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
2198 +- 33 * sizeof(vector128), -1);
2199 ++ start, end);
2200 + }
2201 +
2202 + return ret;
2203 +@@ -608,6 +617,7 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
2204 + /*
2205 + * We use only the first word of vrsave.
2206 + */
2207 ++ int start, end;
2208 + union {
2209 + elf_vrreg_t reg;
2210 + u32 word;
2211 +@@ -616,8 +626,10 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
2212 +
2213 + vrsave.word = target->thread.vrsave;
2214 +
2215 ++ start = 33 * sizeof(vector128);
2216 ++ end = start + sizeof(vrsave);
2217 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
2218 +- 33 * sizeof(vector128), -1);
2219 ++ start, end);
2220 + if (!ret)
2221 + target->thread.vrsave = vrsave.word;
2222 + }
2223 +diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
2224 +index 9b8631533e02..b33bafb8fcea 100644
2225 +--- a/arch/powerpc/kernel/security.c
2226 ++++ b/arch/powerpc/kernel/security.c
2227 +@@ -190,29 +190,22 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
2228 + bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
2229 + ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
2230 +
2231 +- if (bcs || ccd || count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
2232 +- bool comma = false;
2233 ++ if (bcs || ccd) {
2234 + seq_buf_printf(&s, "Mitigation: ");
2235 +
2236 +- if (bcs) {
2237 ++ if (bcs)
2238 + seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
2239 +- comma = true;
2240 +- }
2241 +
2242 +- if (ccd) {
2243 +- if (comma)
2244 +- seq_buf_printf(&s, ", ");
2245 +- seq_buf_printf(&s, "Indirect branch cache disabled");
2246 +- comma = true;
2247 +- }
2248 +-
2249 +- if (comma)
2250 ++ if (bcs && ccd)
2251 + seq_buf_printf(&s, ", ");
2252 +
2253 +- seq_buf_printf(&s, "Software count cache flush");
2254 ++ if (ccd)
2255 ++ seq_buf_printf(&s, "Indirect branch cache disabled");
2256 ++ } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
2257 ++ seq_buf_printf(&s, "Mitigation: Software count cache flush");
2258 +
2259 + if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
2260 +- seq_buf_printf(&s, "(hardware accelerated)");
2261 ++ seq_buf_printf(&s, " (hardware accelerated)");
2262 + } else if (btb_flush_enabled) {
2263 + seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
2264 + } else {
2265 +diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
2266 +index 3f15edf25a0d..6e521a3f67ca 100644
2267 +--- a/arch/powerpc/kernel/smp.c
2268 ++++ b/arch/powerpc/kernel/smp.c
2269 +@@ -358,13 +358,12 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
2270 + * NMI IPIs may not be recoverable, so should not be used as ongoing part of
2271 + * a running system. They can be used for crash, debug, halt/reboot, etc.
2272 + *
2273 +- * NMI IPIs are globally single threaded. No more than one in progress at
2274 +- * any time.
2275 +- *
2276 + * The IPI call waits with interrupts disabled until all targets enter the
2277 +- * NMI handler, then the call returns.
2278 ++ * NMI handler, then returns. Subsequent IPIs can be issued before targets
2279 ++ * have returned from their handlers, so there is no guarantee about
2280 ++ * concurrency or re-entrancy.
2281 + *
2282 +- * No new NMI can be initiated until targets exit the handler.
2283 ++ * A new NMI can be issued before all targets exit the handler.
2284 + *
2285 + * The IPI call may time out without all targets entering the NMI handler.
2286 + * In that case, there is some logic to recover (and ignore subsequent
2287 +@@ -375,7 +374,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
2288 +
2289 + static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
2290 + static struct cpumask nmi_ipi_pending_mask;
2291 +-static int nmi_ipi_busy_count = 0;
2292 ++static bool nmi_ipi_busy = false;
2293 + static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
2294 +
2295 + static void nmi_ipi_lock_start(unsigned long *flags)
2296 +@@ -414,7 +413,7 @@ static void nmi_ipi_unlock_end(unsigned long *flags)
2297 + */
2298 + int smp_handle_nmi_ipi(struct pt_regs *regs)
2299 + {
2300 +- void (*fn)(struct pt_regs *);
2301 ++ void (*fn)(struct pt_regs *) = NULL;
2302 + unsigned long flags;
2303 + int me = raw_smp_processor_id();
2304 + int ret = 0;
2305 +@@ -425,29 +424,17 @@ int smp_handle_nmi_ipi(struct pt_regs *regs)
2306 + * because the caller may have timed out.
2307 + */
2308 + nmi_ipi_lock_start(&flags);
2309 +- if (!nmi_ipi_busy_count)
2310 +- goto out;
2311 +- if (!cpumask_test_cpu(me, &nmi_ipi_pending_mask))
2312 +- goto out;
2313 +-
2314 +- fn = nmi_ipi_function;
2315 +- if (!fn)
2316 +- goto out;
2317 +-
2318 +- cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
2319 +- nmi_ipi_busy_count++;
2320 +- nmi_ipi_unlock();
2321 +-
2322 +- ret = 1;
2323 +-
2324 +- fn(regs);
2325 +-
2326 +- nmi_ipi_lock();
2327 +- if (nmi_ipi_busy_count > 1) /* Can race with caller time-out */
2328 +- nmi_ipi_busy_count--;
2329 +-out:
2330 ++ if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) {
2331 ++ cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
2332 ++ fn = READ_ONCE(nmi_ipi_function);
2333 ++ WARN_ON_ONCE(!fn);
2334 ++ ret = 1;
2335 ++ }
2336 + nmi_ipi_unlock_end(&flags);
2337 +
2338 ++ if (fn)
2339 ++ fn(regs);
2340 ++
2341 + return ret;
2342 + }
2343 +
2344 +@@ -473,7 +460,7 @@ static void do_smp_send_nmi_ipi(int cpu, bool safe)
2345 + * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
2346 + * - fn is the target callback function.
2347 + * - delay_us > 0 is the delay before giving up waiting for targets to
2348 +- * complete executing the handler, == 0 specifies indefinite delay.
2349 ++ * begin executing the handler, == 0 specifies indefinite delay.
2350 + */
2351 + int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool safe)
2352 + {
2353 +@@ -487,31 +474,33 @@ int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool
2354 + if (unlikely(!smp_ops))
2355 + return 0;
2356 +
2357 +- /* Take the nmi_ipi_busy count/lock with interrupts hard disabled */
2358 + nmi_ipi_lock_start(&flags);
2359 +- while (nmi_ipi_busy_count) {
2360 ++ while (nmi_ipi_busy) {
2361 + nmi_ipi_unlock_end(&flags);
2362 +- spin_until_cond(nmi_ipi_busy_count == 0);
2363 ++ spin_until_cond(!nmi_ipi_busy);
2364 + nmi_ipi_lock_start(&flags);
2365 + }
2366 +-
2367 ++ nmi_ipi_busy = true;
2368 + nmi_ipi_function = fn;
2369 +
2370 ++ WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask));
2371 ++
2372 + if (cpu < 0) {
2373 + /* ALL_OTHERS */
2374 + cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
2375 + cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
2376 + } else {
2377 +- /* cpumask starts clear */
2378 + cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
2379 + }
2380 +- nmi_ipi_busy_count++;
2381 ++
2382 + nmi_ipi_unlock();
2383 +
2384 ++ /* Interrupts remain hard disabled */
2385 ++
2386 + do_smp_send_nmi_ipi(cpu, safe);
2387 +
2388 + nmi_ipi_lock();
2389 +- /* nmi_ipi_busy_count is held here, so unlock/lock is okay */
2390 ++ /* nmi_ipi_busy is set here, so unlock/lock is okay */
2391 + while (!cpumask_empty(&nmi_ipi_pending_mask)) {
2392 + nmi_ipi_unlock();
2393 + udelay(1);
2394 +@@ -523,29 +512,15 @@ int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool
2395 + }
2396 + }
2397 +
2398 +- while (nmi_ipi_busy_count > 1) {
2399 +- nmi_ipi_unlock();
2400 +- udelay(1);
2401 +- nmi_ipi_lock();
2402 +- if (delay_us) {
2403 +- delay_us--;
2404 +- if (!delay_us)
2405 +- break;
2406 +- }
2407 +- }
2408 +-
2409 + if (!cpumask_empty(&nmi_ipi_pending_mask)) {
2410 + /* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
2411 + ret = 0;
2412 + cpumask_clear(&nmi_ipi_pending_mask);
2413 + }
2414 +- if (nmi_ipi_busy_count > 1) {
2415 +- /* Timeout waiting for CPUs to execute fn */
2416 +- ret = 0;
2417 +- nmi_ipi_busy_count = 1;
2418 +- }
2419 +
2420 +- nmi_ipi_busy_count--;
2421 ++ nmi_ipi_function = NULL;
2422 ++ nmi_ipi_busy = false;
2423 ++
2424 + nmi_ipi_unlock_end(&flags);
2425 +
2426 + return ret;
2427 +@@ -613,17 +588,8 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
2428 + static void nmi_stop_this_cpu(struct pt_regs *regs)
2429 + {
2430 + /*
2431 +- * This is a special case because it never returns, so the NMI IPI
2432 +- * handling would never mark it as done, which makes any later
2433 +- * smp_send_nmi_ipi() call spin forever. Mark it done now.
2434 +- *
2435 + * IRQs are already hard disabled by the smp_handle_nmi_ipi.
2436 + */
2437 +- nmi_ipi_lock();
2438 +- if (nmi_ipi_busy_count > 1)
2439 +- nmi_ipi_busy_count--;
2440 +- nmi_ipi_unlock();
2441 +-
2442 + spin_begin();
2443 + while (1)
2444 + spin_cpu_relax();
2445 +diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
2446 +index 64936b60d521..7a1de34f38c8 100644
2447 +--- a/arch/powerpc/kernel/traps.c
2448 ++++ b/arch/powerpc/kernel/traps.c
2449 +@@ -763,15 +763,15 @@ void machine_check_exception(struct pt_regs *regs)
2450 + if (check_io_access(regs))
2451 + goto bail;
2452 +
2453 +- /* Must die if the interrupt is not recoverable */
2454 +- if (!(regs->msr & MSR_RI))
2455 +- nmi_panic(regs, "Unrecoverable Machine check");
2456 +-
2457 + if (!nested)
2458 + nmi_exit();
2459 +
2460 + die("Machine check", regs, SIGBUS);
2461 +
2462 ++ /* Must die if the interrupt is not recoverable */
2463 ++ if (!(regs->msr & MSR_RI))
2464 ++ nmi_panic(regs, "Unrecoverable Machine check");
2465 ++
2466 + return;
2467 +
2468 + bail:
2469 +@@ -1542,8 +1542,8 @@ bail:
2470 +
2471 + void StackOverflow(struct pt_regs *regs)
2472 + {
2473 +- printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
2474 +- current, regs->gpr[1]);
2475 ++ pr_crit("Kernel stack overflow in process %s[%d], r1=%lx\n",
2476 ++ current->comm, task_pid_nr(current), regs->gpr[1]);
2477 + debugger(regs);
2478 + show_regs(regs);
2479 + panic("kernel stack overflow");
2480 +diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S
2481 +index a4ed9edfd5f0..1f324c28705b 100644
2482 +--- a/arch/powerpc/kernel/vdso64/gettimeofday.S
2483 ++++ b/arch/powerpc/kernel/vdso64/gettimeofday.S
2484 +@@ -92,7 +92,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
2485 + * At this point, r4,r5 contain our sec/nsec values.
2486 + */
2487 +
2488 +- lwa r6,WTOM_CLOCK_SEC(r3)
2489 ++ ld r6,WTOM_CLOCK_SEC(r3)
2490 + lwa r9,WTOM_CLOCK_NSEC(r3)
2491 +
2492 + /* We now have our result in r6,r9. We create a fake dependency
2493 +@@ -125,7 +125,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
2494 + bne cr6,75f
2495 +
2496 + /* CLOCK_MONOTONIC_COARSE */
2497 +- lwa r6,WTOM_CLOCK_SEC(r3)
2498 ++ ld r6,WTOM_CLOCK_SEC(r3)
2499 + lwa r9,WTOM_CLOCK_NSEC(r3)
2500 +
2501 + /* check if counter has updated */
2502 +diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
2503 +index 9b8d50a7cbaf..45b06e239d1f 100644
2504 +--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
2505 ++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
2506 +@@ -58,6 +58,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
2507 + #define STACK_SLOT_DAWR (SFS-56)
2508 + #define STACK_SLOT_DAWRX (SFS-64)
2509 + #define STACK_SLOT_HFSCR (SFS-72)
2510 ++#define STACK_SLOT_AMR (SFS-80)
2511 ++#define STACK_SLOT_UAMOR (SFS-88)
2512 + /* the following is used by the P9 short path */
2513 + #define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */
2514 +
2515 +@@ -726,11 +728,9 @@ BEGIN_FTR_SECTION
2516 + mfspr r5, SPRN_TIDR
2517 + mfspr r6, SPRN_PSSCR
2518 + mfspr r7, SPRN_PID
2519 +- mfspr r8, SPRN_IAMR
2520 + std r5, STACK_SLOT_TID(r1)
2521 + std r6, STACK_SLOT_PSSCR(r1)
2522 + std r7, STACK_SLOT_PID(r1)
2523 +- std r8, STACK_SLOT_IAMR(r1)
2524 + mfspr r5, SPRN_HFSCR
2525 + std r5, STACK_SLOT_HFSCR(r1)
2526 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2527 +@@ -738,11 +738,18 @@ BEGIN_FTR_SECTION
2528 + mfspr r5, SPRN_CIABR
2529 + mfspr r6, SPRN_DAWR
2530 + mfspr r7, SPRN_DAWRX
2531 ++ mfspr r8, SPRN_IAMR
2532 + std r5, STACK_SLOT_CIABR(r1)
2533 + std r6, STACK_SLOT_DAWR(r1)
2534 + std r7, STACK_SLOT_DAWRX(r1)
2535 ++ std r8, STACK_SLOT_IAMR(r1)
2536 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2537 +
2538 ++ mfspr r5, SPRN_AMR
2539 ++ std r5, STACK_SLOT_AMR(r1)
2540 ++ mfspr r6, SPRN_UAMOR
2541 ++ std r6, STACK_SLOT_UAMOR(r1)
2542 ++
2543 + BEGIN_FTR_SECTION
2544 + /* Set partition DABR */
2545 + /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
2546 +@@ -1631,22 +1638,25 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
2547 + mtspr SPRN_PSPB, r0
2548 + mtspr SPRN_WORT, r0
2549 + BEGIN_FTR_SECTION
2550 +- mtspr SPRN_IAMR, r0
2551 + mtspr SPRN_TCSCR, r0
2552 + /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
2553 + li r0, 1
2554 + sldi r0, r0, 31
2555 + mtspr SPRN_MMCRS, r0
2556 + END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
2557 +-8:
2558 +
2559 +- /* Save and reset AMR and UAMOR before turning on the MMU */
2560 ++ /* Save and restore AMR, IAMR and UAMOR before turning on the MMU */
2561 ++ ld r8, STACK_SLOT_IAMR(r1)
2562 ++ mtspr SPRN_IAMR, r8
2563 ++
2564 ++8: /* Power7 jumps back in here */
2565 + mfspr r5,SPRN_AMR
2566 + mfspr r6,SPRN_UAMOR
2567 + std r5,VCPU_AMR(r9)
2568 + std r6,VCPU_UAMOR(r9)
2569 +- li r6,0
2570 +- mtspr SPRN_AMR,r6
2571 ++ ld r5,STACK_SLOT_AMR(r1)
2572 ++ ld r6,STACK_SLOT_UAMOR(r1)
2573 ++ mtspr SPRN_AMR, r5
2574 + mtspr SPRN_UAMOR, r6
2575 +
2576 + /* Switch DSCR back to host value */
2577 +@@ -1746,11 +1756,9 @@ BEGIN_FTR_SECTION
2578 + ld r5, STACK_SLOT_TID(r1)
2579 + ld r6, STACK_SLOT_PSSCR(r1)
2580 + ld r7, STACK_SLOT_PID(r1)
2581 +- ld r8, STACK_SLOT_IAMR(r1)
2582 + mtspr SPRN_TIDR, r5
2583 + mtspr SPRN_PSSCR, r6
2584 + mtspr SPRN_PID, r7
2585 +- mtspr SPRN_IAMR, r8
2586 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2587 +
2588 + #ifdef CONFIG_PPC_RADIX_MMU
2589 +diff --git a/arch/powerpc/lib/memcmp_64.S b/arch/powerpc/lib/memcmp_64.S
2590 +index 844d8e774492..b7f6f6e0b6e8 100644
2591 +--- a/arch/powerpc/lib/memcmp_64.S
2592 ++++ b/arch/powerpc/lib/memcmp_64.S
2593 +@@ -215,11 +215,20 @@ _GLOBAL_TOC(memcmp)
2594 + beq .Lzero
2595 +
2596 + .Lcmp_rest_lt8bytes:
2597 +- /* Here we have only less than 8 bytes to compare with. at least s1
2598 +- * Address is aligned with 8 bytes.
2599 +- * The next double words are load and shift right with appropriate
2600 +- * bits.
2601 ++ /*
2602 ++ * Here we have less than 8 bytes to compare. At least s1 is aligned to
2603 ++ * 8 bytes, but s2 may not be. We must make sure s2 + 7 doesn't cross a
2604 ++ * page boundary, otherwise we might read past the end of the buffer and
2605 ++ * trigger a page fault. We use 4K as the conservative minimum page
2606 ++ * size. If we detect that case we go to the byte-by-byte loop.
2607 ++ *
2608 ++ * Otherwise the next double word is loaded from s1 and s2, and shifted
2609 ++ * right to compare the appropriate bits.
2610 + */
2611 ++ clrldi r6,r4,(64-12) // r6 = r4 & 0xfff
2612 ++ cmpdi r6,0xff8
2613 ++ bgt .Lshort
2614 ++
2615 + subfic r6,r5,8
2616 + slwi r6,r6,3
2617 + LD rA,0,r3
2618 +diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c
2619 +index 2486bee0f93e..97c7a39ebc00 100644
2620 +--- a/arch/powerpc/mm/hugetlbpage-radix.c
2621 ++++ b/arch/powerpc/mm/hugetlbpage-radix.c
2622 +@@ -1,6 +1,7 @@
2623 + // SPDX-License-Identifier: GPL-2.0
2624 + #include <linux/mm.h>
2625 + #include <linux/hugetlb.h>
2626 ++#include <linux/security.h>
2627 + #include <asm/pgtable.h>
2628 + #include <asm/pgalloc.h>
2629 + #include <asm/cacheflush.h>
2630 +@@ -73,7 +74,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
2631 + if (addr) {
2632 + addr = ALIGN(addr, huge_page_size(h));
2633 + vma = find_vma(mm, addr);
2634 +- if (high_limit - len >= addr &&
2635 ++ if (high_limit - len >= addr && addr >= mmap_min_addr &&
2636 + (!vma || addr + len <= vm_start_gap(vma)))
2637 + return addr;
2638 + }
2639 +@@ -83,7 +84,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
2640 + */
2641 + info.flags = VM_UNMAPPED_AREA_TOPDOWN;
2642 + info.length = len;
2643 +- info.low_limit = PAGE_SIZE;
2644 ++ info.low_limit = max(PAGE_SIZE, mmap_min_addr);
2645 + info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
2646 + info.align_mask = PAGE_MASK & ~huge_page_mask(h);
2647 + info.align_offset = 0;
2648 +diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
2649 +index 87f0dd004295..b5d1c45c1475 100644
2650 +--- a/arch/powerpc/mm/numa.c
2651 ++++ b/arch/powerpc/mm/numa.c
2652 +@@ -1460,13 +1460,6 @@ static void reset_topology_timer(void)
2653 +
2654 + #ifdef CONFIG_SMP
2655 +
2656 +-static void stage_topology_update(int core_id)
2657 +-{
2658 +- cpumask_or(&cpu_associativity_changes_mask,
2659 +- &cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
2660 +- reset_topology_timer();
2661 +-}
2662 +-
2663 + static int dt_update_callback(struct notifier_block *nb,
2664 + unsigned long action, void *data)
2665 + {
2666 +@@ -1479,7 +1472,7 @@ static int dt_update_callback(struct notifier_block *nb,
2667 + !of_prop_cmp(update->prop->name, "ibm,associativity")) {
2668 + u32 core_id;
2669 + of_property_read_u32(update->dn, "reg", &core_id);
2670 +- stage_topology_update(core_id);
2671 ++ rc = dlpar_cpu_readd(core_id);
2672 + rc = NOTIFY_OK;
2673 + }
2674 + break;
2675 +diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
2676 +index bc3914d54e26..5986df48359b 100644
2677 +--- a/arch/powerpc/mm/slb.c
2678 ++++ b/arch/powerpc/mm/slb.c
2679 +@@ -69,6 +69,11 @@ static void assert_slb_presence(bool present, unsigned long ea)
2680 + if (!cpu_has_feature(CPU_FTR_ARCH_206))
2681 + return;
2682 +
2683 ++ /*
2684 ++ * slbfee. requires bit 24 (PPC bit 39) be clear in RB. Hardware
2685 ++ * ignores all other bits from 0-27, so just clear them all.
2686 ++ */
2687 ++ ea &= ~((1UL << 28) - 1);
2688 + asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0");
2689 +
2690 + WARN_ON(present == (tmp == 0));
2691 +diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
2692 +index c2d5192ed64f..e52e30bf7d86 100644
2693 +--- a/arch/powerpc/net/bpf_jit.h
2694 ++++ b/arch/powerpc/net/bpf_jit.h
2695 +@@ -51,6 +51,8 @@
2696 + #define PPC_LIS(r, i) PPC_ADDIS(r, 0, i)
2697 + #define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \
2698 + ___PPC_RA(base) | ((i) & 0xfffc))
2699 ++#define PPC_STDX(r, base, b) EMIT(PPC_INST_STDX | ___PPC_RS(r) | \
2700 ++ ___PPC_RA(base) | ___PPC_RB(b))
2701 + #define PPC_STDU(r, base, i) EMIT(PPC_INST_STDU | ___PPC_RS(r) | \
2702 + ___PPC_RA(base) | ((i) & 0xfffc))
2703 + #define PPC_STW(r, base, i) EMIT(PPC_INST_STW | ___PPC_RS(r) | \
2704 +@@ -65,7 +67,9 @@
2705 + #define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \
2706 + ___PPC_RA(base) | IMM_L(i))
2707 + #define PPC_LD(r, base, i) EMIT(PPC_INST_LD | ___PPC_RT(r) | \
2708 +- ___PPC_RA(base) | IMM_L(i))
2709 ++ ___PPC_RA(base) | ((i) & 0xfffc))
2710 ++#define PPC_LDX(r, base, b) EMIT(PPC_INST_LDX | ___PPC_RT(r) | \
2711 ++ ___PPC_RA(base) | ___PPC_RB(b))
2712 + #define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | ___PPC_RT(r) | \
2713 + ___PPC_RA(base) | IMM_L(i))
2714 + #define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | ___PPC_RT(r) | \
2715 +@@ -85,17 +89,6 @@
2716 + ___PPC_RA(a) | ___PPC_RB(b))
2717 + #define PPC_BPF_STDCX(s, a, b) EMIT(PPC_INST_STDCX | ___PPC_RS(s) | \
2718 + ___PPC_RA(a) | ___PPC_RB(b))
2719 +-
2720 +-#ifdef CONFIG_PPC64
2721 +-#define PPC_BPF_LL(r, base, i) do { PPC_LD(r, base, i); } while(0)
2722 +-#define PPC_BPF_STL(r, base, i) do { PPC_STD(r, base, i); } while(0)
2723 +-#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
2724 +-#else
2725 +-#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
2726 +-#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
2727 +-#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
2728 +-#endif
2729 +-
2730 + #define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i))
2731 + #define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i))
2732 + #define PPC_CMPW(a, b) EMIT(PPC_INST_CMPW | ___PPC_RA(a) | \
2733 +diff --git a/arch/powerpc/net/bpf_jit32.h b/arch/powerpc/net/bpf_jit32.h
2734 +index 6f4daacad296..ade04547703f 100644
2735 +--- a/arch/powerpc/net/bpf_jit32.h
2736 ++++ b/arch/powerpc/net/bpf_jit32.h
2737 +@@ -123,6 +123,10 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
2738 + #define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i)
2739 + #endif
2740 +
2741 ++#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
2742 ++#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
2743 ++#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
2744 ++
2745 + #define SEEN_DATAREF 0x10000 /* might call external helpers */
2746 + #define SEEN_XREG 0x20000 /* X reg is used */
2747 + #define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary
2748 +diff --git a/arch/powerpc/net/bpf_jit64.h b/arch/powerpc/net/bpf_jit64.h
2749 +index 3609be4692b3..47f441f351a6 100644
2750 +--- a/arch/powerpc/net/bpf_jit64.h
2751 ++++ b/arch/powerpc/net/bpf_jit64.h
2752 +@@ -68,6 +68,26 @@ static const int b2p[] = {
2753 + /* PPC NVR range -- update this if we ever use NVRs below r27 */
2754 + #define BPF_PPC_NVR_MIN 27
2755 +
2756 ++/*
2757 ++ * WARNING: These can use TMP_REG_2 if the offset is not at word boundary,
2758 ++ * so ensure that it isn't in use already.
2759 ++ */
2760 ++#define PPC_BPF_LL(r, base, i) do { \
2761 ++ if ((i) % 4) { \
2762 ++ PPC_LI(b2p[TMP_REG_2], (i)); \
2763 ++ PPC_LDX(r, base, b2p[TMP_REG_2]); \
2764 ++ } else \
2765 ++ PPC_LD(r, base, i); \
2766 ++ } while(0)
2767 ++#define PPC_BPF_STL(r, base, i) do { \
2768 ++ if ((i) % 4) { \
2769 ++ PPC_LI(b2p[TMP_REG_2], (i)); \
2770 ++ PPC_STDX(r, base, b2p[TMP_REG_2]); \
2771 ++ } else \
2772 ++ PPC_STD(r, base, i); \
2773 ++ } while(0)
2774 ++#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
2775 ++
2776 + #define SEEN_FUNC 0x1000 /* might call external helpers */
2777 + #define SEEN_STACK 0x2000 /* uses BPF stack */
2778 + #define SEEN_TAILCALL 0x4000 /* uses tail calls */
2779 +diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
2780 +index 7ce57657d3b8..b1a116eecae2 100644
2781 +--- a/arch/powerpc/net/bpf_jit_comp64.c
2782 ++++ b/arch/powerpc/net/bpf_jit_comp64.c
2783 +@@ -252,7 +252,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
2784 + * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
2785 + * goto out;
2786 + */
2787 +- PPC_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
2788 ++ PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
2789 + PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT);
2790 + PPC_BCC(COND_GT, out);
2791 +
2792 +@@ -265,7 +265,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
2793 + /* prog = array->ptrs[index]; */
2794 + PPC_MULI(b2p[TMP_REG_1], b2p_index, 8);
2795 + PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array);
2796 +- PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
2797 ++ PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
2798 +
2799 + /*
2800 + * if (prog == NULL)
2801 +@@ -275,7 +275,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
2802 + PPC_BCC(COND_EQ, out);
2803 +
2804 + /* goto *(prog->bpf_func + prologue_size); */
2805 +- PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
2806 ++ PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
2807 + #ifdef PPC64_ELF_ABI_v1
2808 + /* skip past the function descriptor */
2809 + PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
2810 +@@ -606,7 +606,7 @@ bpf_alu32_trunc:
2811 + * the instructions generated will remain the
2812 + * same across all passes
2813 + */
2814 +- PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx));
2815 ++ PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
2816 + PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
2817 + PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
2818 + break;
2819 +@@ -662,7 +662,7 @@ emit_clear:
2820 + PPC_LI32(b2p[TMP_REG_1], imm);
2821 + src_reg = b2p[TMP_REG_1];
2822 + }
2823 +- PPC_STD(src_reg, dst_reg, off);
2824 ++ PPC_BPF_STL(src_reg, dst_reg, off);
2825 + break;
2826 +
2827 + /*
2828 +@@ -709,7 +709,7 @@ emit_clear:
2829 + break;
2830 + /* dst = *(u64 *)(ul) (src + off) */
2831 + case BPF_LDX | BPF_MEM | BPF_DW:
2832 +- PPC_LD(dst_reg, src_reg, off);
2833 ++ PPC_BPF_LL(dst_reg, src_reg, off);
2834 + break;
2835 +
2836 + /*
2837 +diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
2838 +index 4a9a72d01c3c..35be81fd2dc2 100644
2839 +--- a/arch/powerpc/platforms/44x/Kconfig
2840 ++++ b/arch/powerpc/platforms/44x/Kconfig
2841 +@@ -180,6 +180,7 @@ config CURRITUCK
2842 + depends on PPC_47x
2843 + select SWIOTLB
2844 + select 476FPE
2845 ++ select FORCE_PCI
2846 + select PPC4xx_PCI_EXPRESS
2847 + help
2848 + This option enables support for the IBM Currituck (476fpe) evaluation board
2849 +diff --git a/arch/powerpc/platforms/83xx/suspend-asm.S b/arch/powerpc/platforms/83xx/suspend-asm.S
2850 +index 3d1ecd211776..8137f77abad5 100644
2851 +--- a/arch/powerpc/platforms/83xx/suspend-asm.S
2852 ++++ b/arch/powerpc/platforms/83xx/suspend-asm.S
2853 +@@ -26,13 +26,13 @@
2854 + #define SS_MSR 0x74
2855 + #define SS_SDR1 0x78
2856 + #define SS_LR 0x7c
2857 +-#define SS_SPRG 0x80 /* 4 SPRGs */
2858 +-#define SS_DBAT 0x90 /* 8 DBATs */
2859 +-#define SS_IBAT 0xd0 /* 8 IBATs */
2860 +-#define SS_TB 0x110
2861 +-#define SS_CR 0x118
2862 +-#define SS_GPREG 0x11c /* r12-r31 */
2863 +-#define STATE_SAVE_SIZE 0x16c
2864 ++#define SS_SPRG 0x80 /* 8 SPRGs */
2865 ++#define SS_DBAT 0xa0 /* 8 DBATs */
2866 ++#define SS_IBAT 0xe0 /* 8 IBATs */
2867 ++#define SS_TB 0x120
2868 ++#define SS_CR 0x128
2869 ++#define SS_GPREG 0x12c /* r12-r31 */
2870 ++#define STATE_SAVE_SIZE 0x17c
2871 +
2872 + .section .data
2873 + .align 5
2874 +@@ -103,6 +103,16 @@ _GLOBAL(mpc83xx_enter_deep_sleep)
2875 + stw r7, SS_SPRG+12(r3)
2876 + stw r8, SS_SDR1(r3)
2877 +
2878 ++ mfspr r4, SPRN_SPRG4
2879 ++ mfspr r5, SPRN_SPRG5
2880 ++ mfspr r6, SPRN_SPRG6
2881 ++ mfspr r7, SPRN_SPRG7
2882 ++
2883 ++ stw r4, SS_SPRG+16(r3)
2884 ++ stw r5, SS_SPRG+20(r3)
2885 ++ stw r6, SS_SPRG+24(r3)
2886 ++ stw r7, SS_SPRG+28(r3)
2887 ++
2888 + mfspr r4, SPRN_DBAT0U
2889 + mfspr r5, SPRN_DBAT0L
2890 + mfspr r6, SPRN_DBAT1U
2891 +@@ -493,6 +503,16 @@ mpc83xx_deep_resume:
2892 + mtspr SPRN_IBAT7U, r6
2893 + mtspr SPRN_IBAT7L, r7
2894 +
2895 ++ lwz r4, SS_SPRG+16(r3)
2896 ++ lwz r5, SS_SPRG+20(r3)
2897 ++ lwz r6, SS_SPRG+24(r3)
2898 ++ lwz r7, SS_SPRG+28(r3)
2899 ++
2900 ++ mtspr SPRN_SPRG4, r4
2901 ++ mtspr SPRN_SPRG5, r5
2902 ++ mtspr SPRN_SPRG6, r6
2903 ++ mtspr SPRN_SPRG7, r7
2904 ++
2905 + lwz r4, SS_SPRG+0(r3)
2906 + lwz r5, SS_SPRG+4(r3)
2907 + lwz r6, SS_SPRG+8(r3)
2908 +diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
2909 +index ecf703ee3a76..ac4ee88efc80 100644
2910 +--- a/arch/powerpc/platforms/embedded6xx/wii.c
2911 ++++ b/arch/powerpc/platforms/embedded6xx/wii.c
2912 +@@ -83,6 +83,10 @@ unsigned long __init wii_mmu_mapin_mem2(unsigned long top)
2913 + /* MEM2 64MB@0x10000000 */
2914 + delta = wii_hole_start + wii_hole_size;
2915 + size = top - delta;
2916 ++
2917 ++ if (__map_without_bats)
2918 ++ return delta;
2919 ++
2920 + for (bl = 128<<10; bl < max_size; bl <<= 1) {
2921 + if (bl * 2 > size)
2922 + break;
2923 +diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
2924 +index 35f699ebb662..e52f9b06dd9c 100644
2925 +--- a/arch/powerpc/platforms/powernv/idle.c
2926 ++++ b/arch/powerpc/platforms/powernv/idle.c
2927 +@@ -458,7 +458,8 @@ EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_release);
2928 + #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
2929 +
2930 + #ifdef CONFIG_HOTPLUG_CPU
2931 +-static void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val)
2932 ++
2933 ++void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val)
2934 + {
2935 + u64 pir = get_hard_smp_processor_id(cpu);
2936 +
2937 +@@ -481,20 +482,6 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
2938 + {
2939 + unsigned long srr1;
2940 + u32 idle_states = pnv_get_supported_cpuidle_states();
2941 +- u64 lpcr_val;
2942 +-
2943 +- /*
2944 +- * We don't want to take decrementer interrupts while we are
2945 +- * offline, so clear LPCR:PECE1. We keep PECE2 (and
2946 +- * LPCR_PECE_HVEE on P9) enabled as to let IPIs in.
2947 +- *
2948 +- * If the CPU gets woken up by a special wakeup, ensure that
2949 +- * the SLW engine sets LPCR with decrementer bit cleared, else
2950 +- * the CPU will come back to the kernel due to a spurious
2951 +- * wakeup.
2952 +- */
2953 +- lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
2954 +- pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
2955 +
2956 + __ppc64_runlatch_off();
2957 +
2958 +@@ -526,16 +513,6 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
2959 +
2960 + __ppc64_runlatch_on();
2961 +
2962 +- /*
2963 +- * Re-enable decrementer interrupts in LPCR.
2964 +- *
2965 +- * Further, we want stop states to be woken up by decrementer
2966 +- * for non-hotplug cases. So program the LPCR via stop api as
2967 +- * well.
2968 +- */
2969 +- lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1;
2970 +- pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
2971 +-
2972 + return srr1;
2973 + }
2974 + #endif
2975 +diff --git a/arch/powerpc/platforms/powernv/opal-msglog.c b/arch/powerpc/platforms/powernv/opal-msglog.c
2976 +index acd3206dfae3..06628c71cef6 100644
2977 +--- a/arch/powerpc/platforms/powernv/opal-msglog.c
2978 ++++ b/arch/powerpc/platforms/powernv/opal-msglog.c
2979 +@@ -98,7 +98,7 @@ static ssize_t opal_msglog_read(struct file *file, struct kobject *kobj,
2980 + }
2981 +
2982 + static struct bin_attribute opal_msglog_attr = {
2983 +- .attr = {.name = "msglog", .mode = 0444},
2984 ++ .attr = {.name = "msglog", .mode = 0400},
2985 + .read = opal_msglog_read
2986 + };
2987 +
2988 +diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
2989 +index 697449afb3f7..e28f03e1eb5e 100644
2990 +--- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c
2991 ++++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
2992 +@@ -313,7 +313,6 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
2993 + page_shift);
2994 + tbl->it_level_size = 1ULL << (level_shift - 3);
2995 + tbl->it_indirect_levels = levels - 1;
2996 +- tbl->it_allocated_size = total_allocated;
2997 + tbl->it_userspace = uas;
2998 + tbl->it_nid = nid;
2999 +
3000 +diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
3001 +index 145373f0e5dc..2d62c58f9a4c 100644
3002 +--- a/arch/powerpc/platforms/powernv/pci-ioda.c
3003 ++++ b/arch/powerpc/platforms/powernv/pci-ioda.c
3004 +@@ -2594,8 +2594,13 @@ static long pnv_pci_ioda2_create_table_userspace(
3005 + int num, __u32 page_shift, __u64 window_size, __u32 levels,
3006 + struct iommu_table **ptbl)
3007 + {
3008 +- return pnv_pci_ioda2_create_table(table_group,
3009 ++ long ret = pnv_pci_ioda2_create_table(table_group,
3010 + num, page_shift, window_size, levels, true, ptbl);
3011 ++
3012 ++ if (!ret)
3013 ++ (*ptbl)->it_allocated_size = pnv_pci_ioda2_get_table_size(
3014 ++ page_shift, window_size, levels);
3015 ++ return ret;
3016 + }
3017 +
3018 + static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
3019 +diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
3020 +index 0d354e19ef92..db09c7022635 100644
3021 +--- a/arch/powerpc/platforms/powernv/smp.c
3022 ++++ b/arch/powerpc/platforms/powernv/smp.c
3023 +@@ -39,6 +39,7 @@
3024 + #include <asm/cpuidle.h>
3025 + #include <asm/kexec.h>
3026 + #include <asm/reg.h>
3027 ++#include <asm/powernv.h>
3028 +
3029 + #include "powernv.h"
3030 +
3031 +@@ -153,6 +154,7 @@ static void pnv_smp_cpu_kill_self(void)
3032 + {
3033 + unsigned int cpu;
3034 + unsigned long srr1, wmask;
3035 ++ u64 lpcr_val;
3036 +
3037 + /* Standard hot unplug procedure */
3038 + /*
3039 +@@ -174,6 +176,19 @@ static void pnv_smp_cpu_kill_self(void)
3040 + if (cpu_has_feature(CPU_FTR_ARCH_207S))
3041 + wmask = SRR1_WAKEMASK_P8;
3042 +
3043 ++ /*
3044 ++ * We don't want to take decrementer interrupts while we are
3045 ++ * offline, so clear LPCR:PECE1. We keep PECE2 (and
3046 ++ * LPCR_PECE_HVEE on P9) enabled so as to let IPIs in.
3047 ++ *
3048 ++ * If the CPU gets woken up by a special wakeup, ensure that
3049 ++ * the SLW engine sets LPCR with decrementer bit cleared, else
3050 ++ * the CPU will come back to the kernel due to a spurious
3051 ++ * wakeup.
3052 ++ */
3053 ++ lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
3054 ++ pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
3055 ++
3056 + while (!generic_check_cpu_restart(cpu)) {
3057 + /*
3058 + * Clear IPI flag, since we don't handle IPIs while
3059 +@@ -246,6 +261,16 @@ static void pnv_smp_cpu_kill_self(void)
3060 +
3061 + }
3062 +
3063 ++ /*
3064 ++ * Re-enable decrementer interrupts in LPCR.
3065 ++ *
3066 ++ * Further, we want stop states to be woken up by decrementer
3067 ++ * for non-hotplug cases. So program the LPCR via stop api as
3068 ++ * well.
3069 ++ */
3070 ++ lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1;
3071 ++ pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
3072 ++
3073 + DBG("CPU%d coming online...\n", cpu);
3074 + }
3075 +
3076 +diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
3077 +index 2f8e62163602..97feb6e79f1a 100644
3078 +--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
3079 ++++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
3080 +@@ -802,6 +802,25 @@ static int dlpar_cpu_add_by_count(u32 cpus_to_add)
3081 + return rc;
3082 + }
3083 +
3084 ++int dlpar_cpu_readd(int cpu)
3085 ++{
3086 ++ struct device_node *dn;
3087 ++ struct device *dev;
3088 ++ u32 drc_index;
3089 ++ int rc;
3090 ++
3091 ++ dev = get_cpu_device(cpu);
3092 ++ dn = dev->of_node;
3093 ++
3094 ++ rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index);
3095 ++
3096 ++ rc = dlpar_cpu_remove_by_index(drc_index);
3097 ++ if (!rc)
3098 ++ rc = dlpar_cpu_add(drc_index);
3099 ++
3100 ++ return rc;
3101 ++}
3102 ++
3103 + int dlpar_cpu(struct pseries_hp_errorlog *hp_elog)
3104 + {
3105 + u32 count, drc_index;
3106 +diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c
3107 +index 6ed22127391b..921f12182f3e 100644
3108 +--- a/arch/powerpc/platforms/pseries/pseries_energy.c
3109 ++++ b/arch/powerpc/platforms/pseries/pseries_energy.c
3110 +@@ -77,18 +77,27 @@ static u32 cpu_to_drc_index(int cpu)
3111 +
3112 + ret = drc.drc_index_start + (thread_index * drc.sequential_inc);
3113 + } else {
3114 +- const __be32 *indexes;
3115 +-
3116 +- indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
3117 +- if (indexes == NULL)
3118 +- goto err_of_node_put;
3119 ++ u32 nr_drc_indexes, thread_drc_index;
3120 +
3121 + /*
3122 +- * The first element indexes[0] is the number of drc_indexes
3123 +- * returned in the list. Hence thread_index+1 will get the
3124 +- * drc_index corresponding to core number thread_index.
3125 ++ * The first element of ibm,drc-indexes array is the
3126 ++ * number of drc_indexes returned in the list. Hence
3127 ++ * thread_index+1 will get the drc_index corresponding
3128 ++ * to core number thread_index.
3129 + */
3130 +- ret = indexes[thread_index + 1];
3131 ++ rc = of_property_read_u32_index(dn, "ibm,drc-indexes",
3132 ++ 0, &nr_drc_indexes);
3133 ++ if (rc)
3134 ++ goto err_of_node_put;
3135 ++
3136 ++ WARN_ON_ONCE(thread_index > nr_drc_indexes);
3137 ++ rc = of_property_read_u32_index(dn, "ibm,drc-indexes",
3138 ++ thread_index + 1,
3139 ++ &thread_drc_index);
3140 ++ if (rc)
3141 ++ goto err_of_node_put;
3142 ++
3143 ++ ret = thread_drc_index;
3144 + }
3145 +
3146 + rc = 0;
3147 +diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
3148 +index d97d52772789..452dcfd7e5dd 100644
3149 +--- a/arch/powerpc/platforms/pseries/ras.c
3150 ++++ b/arch/powerpc/platforms/pseries/ras.c
3151 +@@ -550,6 +550,7 @@ static void pseries_print_mce_info(struct pt_regs *regs,
3152 + "UE",
3153 + "SLB",
3154 + "ERAT",
3155 ++ "Unknown",
3156 + "TLB",
3157 + "D-Cache",
3158 + "Unknown",
3159 +diff --git a/arch/powerpc/xmon/ppc-dis.c b/arch/powerpc/xmon/ppc-dis.c
3160 +index 9deea5ee13f6..27f1e6415036 100644
3161 +--- a/arch/powerpc/xmon/ppc-dis.c
3162 ++++ b/arch/powerpc/xmon/ppc-dis.c
3163 +@@ -158,7 +158,7 @@ int print_insn_powerpc (unsigned long insn, unsigned long memaddr)
3164 + dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7
3165 + | PPC_OPCODE_POWER8 | PPC_OPCODE_POWER9 | PPC_OPCODE_HTM
3166 + | PPC_OPCODE_ALTIVEC | PPC_OPCODE_ALTIVEC2
3167 +- | PPC_OPCODE_VSX | PPC_OPCODE_VSX3),
3168 ++ | PPC_OPCODE_VSX | PPC_OPCODE_VSX3);
3169 +
3170 + /* Get the major opcode of the insn. */
3171 + opcode = NULL;
3172 +diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h
3173 +index bba3da6ef157..6ea9e1804233 100644
3174 +--- a/arch/riscv/include/asm/syscall.h
3175 ++++ b/arch/riscv/include/asm/syscall.h
3176 +@@ -79,10 +79,11 @@ static inline void syscall_get_arguments(struct task_struct *task,
3177 + if (i == 0) {
3178 + args[0] = regs->orig_a0;
3179 + args++;
3180 +- i++;
3181 + n--;
3182 ++ } else {
3183 ++ i--;
3184 + }
3185 +- memcpy(args, &regs->a1 + i * sizeof(regs->a1), n * sizeof(args[0]));
3186 ++ memcpy(args, &regs->a1 + i, n * sizeof(args[0]));
3187 + }
3188 +
3189 + static inline void syscall_set_arguments(struct task_struct *task,
3190 +@@ -94,10 +95,11 @@ static inline void syscall_set_arguments(struct task_struct *task,
3191 + if (i == 0) {
3192 + regs->orig_a0 = args[0];
3193 + args++;
3194 +- i++;
3195 + n--;
3196 +- }
3197 +- memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
3198 ++ } else {
3199 ++ i--;
3200 ++ }
3201 ++ memcpy(&regs->a1 + i, args, n * sizeof(regs->a1));
3202 + }
3203 +
3204 + static inline int syscall_get_arch(void)
3205 +diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
3206 +index d5d24889c3bc..c2b8c8c6c9be 100644
3207 +--- a/arch/s390/include/asm/kvm_host.h
3208 ++++ b/arch/s390/include/asm/kvm_host.h
3209 +@@ -878,7 +878,7 @@ static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
3210 + static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
3211 + static inline void kvm_arch_free_memslot(struct kvm *kvm,
3212 + struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
3213 +-static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {}
3214 ++static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
3215 + static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
3216 + static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
3217 + struct kvm_memory_slot *slot) {}
3218 +diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
3219 +index bfabeb1889cc..1266194afb02 100644
3220 +--- a/arch/s390/kernel/perf_cpum_sf.c
3221 ++++ b/arch/s390/kernel/perf_cpum_sf.c
3222 +@@ -1600,7 +1600,7 @@ static void aux_sdb_init(unsigned long sdb)
3223 +
3224 + /*
3225 + * aux_buffer_setup() - Setup AUX buffer for diagnostic mode sampling
3226 +- * @cpu: On which to allocate, -1 means current
3227 ++ * @event: Event the buffer is setup for, event->cpu == -1 means current
3228 + * @pages: Array of pointers to buffer pages passed from perf core
3229 + * @nr_pages: Total pages
3230 + * @snapshot: Flag for snapshot mode
3231 +@@ -1612,8 +1612,8 @@ static void aux_sdb_init(unsigned long sdb)
3232 + *
3233 + * Return the private AUX buffer structure if success or NULL if fails.
3234 + */
3235 +-static void *aux_buffer_setup(int cpu, void **pages, int nr_pages,
3236 +- bool snapshot)
3237 ++static void *aux_buffer_setup(struct perf_event *event, void **pages,
3238 ++ int nr_pages, bool snapshot)
3239 + {
3240 + struct sf_buffer *sfb;
3241 + struct aux_buffer *aux;
3242 +diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
3243 +index 7ed90a759135..01a3f4964d57 100644
3244 +--- a/arch/s390/kernel/setup.c
3245 ++++ b/arch/s390/kernel/setup.c
3246 +@@ -369,7 +369,7 @@ void __init arch_call_rest_init(void)
3247 + : : [_frame] "a" (frame));
3248 + }
3249 +
3250 +-static void __init setup_lowcore(void)
3251 ++static void __init setup_lowcore_dat_off(void)
3252 + {
3253 + struct lowcore *lc;
3254 +
3255 +@@ -380,19 +380,16 @@ static void __init setup_lowcore(void)
3256 + lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc));
3257 + lc->restart_psw.mask = PSW_KERNEL_BITS;
3258 + lc->restart_psw.addr = (unsigned long) restart_int_handler;
3259 +- lc->external_new_psw.mask = PSW_KERNEL_BITS |
3260 +- PSW_MASK_DAT | PSW_MASK_MCHECK;
3261 ++ lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
3262 + lc->external_new_psw.addr = (unsigned long) ext_int_handler;
3263 + lc->svc_new_psw.mask = PSW_KERNEL_BITS |
3264 +- PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
3265 ++ PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
3266 + lc->svc_new_psw.addr = (unsigned long) system_call;
3267 +- lc->program_new_psw.mask = PSW_KERNEL_BITS |
3268 +- PSW_MASK_DAT | PSW_MASK_MCHECK;
3269 ++ lc->program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
3270 + lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
3271 + lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
3272 + lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler;
3273 +- lc->io_new_psw.mask = PSW_KERNEL_BITS |
3274 +- PSW_MASK_DAT | PSW_MASK_MCHECK;
3275 ++ lc->io_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
3276 + lc->io_new_psw.addr = (unsigned long) io_int_handler;
3277 + lc->clock_comparator = clock_comparator_max;
3278 + lc->nodat_stack = ((unsigned long) &init_thread_union)
3279 +@@ -452,6 +449,16 @@ static void __init setup_lowcore(void)
3280 + lowcore_ptr[0] = lc;
3281 + }
3282 +
3283 ++static void __init setup_lowcore_dat_on(void)
3284 ++{
3285 ++ __ctl_clear_bit(0, 28);
3286 ++ S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
3287 ++ S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT;
3288 ++ S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT;
3289 ++ S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
3290 ++ __ctl_set_bit(0, 28);
3291 ++}
3292 ++
3293 + static struct resource code_resource = {
3294 + .name = "Kernel code",
3295 + .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
3296 +@@ -1072,7 +1079,7 @@ void __init setup_arch(char **cmdline_p)
3297 + #endif
3298 +
3299 + setup_resources();
3300 +- setup_lowcore();
3301 ++ setup_lowcore_dat_off();
3302 + smp_fill_possible_mask();
3303 + cpu_detect_mhz_feature();
3304 + cpu_init();
3305 +@@ -1085,6 +1092,12 @@ void __init setup_arch(char **cmdline_p)
3306 + */
3307 + paging_init();
3308 +
3309 ++ /*
3310 ++ * After paging_init created the kernel page table, the new PSWs
3311 ++ * in lowcore can now run with DAT enabled.
3312 ++ */
3313 ++ setup_lowcore_dat_on();
3314 ++
3315 + /* Setup default console */
3316 + conmode_default();
3317 + set_preferred_console();
3318 +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
3319 +index 68261430fe6e..64d5a3327030 100644
3320 +--- a/arch/x86/Kconfig
3321 ++++ b/arch/x86/Kconfig
3322 +@@ -2221,14 +2221,8 @@ config RANDOMIZE_MEMORY_PHYSICAL_PADDING
3323 + If unsure, leave at the default value.
3324 +
3325 + config HOTPLUG_CPU
3326 +- bool "Support for hot-pluggable CPUs"
3327 ++ def_bool y
3328 + depends on SMP
3329 +- ---help---
3330 +- Say Y here to allow turning CPUs off and on. CPUs can be
3331 +- controlled through /sys/devices/system/cpu.
3332 +- ( Note: power management support will enable this option
3333 +- automatically on SMP systems. )
3334 +- Say N if you want to disable CPU hotplug.
3335 +
3336 + config BOOTPARAM_HOTPLUG_CPU0
3337 + bool "Set default setting of cpu0_hotpluggable"
3338 +diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
3339 +index 9b5adae9cc40..e2839b5c246c 100644
3340 +--- a/arch/x86/boot/Makefile
3341 ++++ b/arch/x86/boot/Makefile
3342 +@@ -100,7 +100,7 @@ $(obj)/zoffset.h: $(obj)/compressed/vmlinux FORCE
3343 + AFLAGS_header.o += -I$(objtree)/$(obj)
3344 + $(obj)/header.o: $(obj)/zoffset.h
3345 +
3346 +-LDFLAGS_setup.elf := -T
3347 ++LDFLAGS_setup.elf := -m elf_i386 -T
3348 + $(obj)/setup.elf: $(src)/setup.ld $(SETUP_OBJS) FORCE
3349 + $(call if_changed,ld)
3350 +
3351 +diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
3352 +index 9e2157371491..f8debf7aeb4c 100644
3353 +--- a/arch/x86/boot/compressed/pgtable_64.c
3354 ++++ b/arch/x86/boot/compressed/pgtable_64.c
3355 +@@ -1,5 +1,7 @@
3356 ++#include <linux/efi.h>
3357 + #include <asm/e820/types.h>
3358 + #include <asm/processor.h>
3359 ++#include <asm/efi.h>
3360 + #include "pgtable.h"
3361 + #include "../string.h"
3362 +
3363 +@@ -37,9 +39,10 @@ int cmdline_find_option_bool(const char *option);
3364 +
3365 + static unsigned long find_trampoline_placement(void)
3366 + {
3367 +- unsigned long bios_start, ebda_start;
3368 ++ unsigned long bios_start = 0, ebda_start = 0;
3369 + unsigned long trampoline_start;
3370 + struct boot_e820_entry *entry;
3371 ++ char *signature;
3372 + int i;
3373 +
3374 + /*
3375 +@@ -47,8 +50,18 @@ static unsigned long find_trampoline_placement(void)
3376 + * This code is based on reserve_bios_regions().
3377 + */
3378 +
3379 +- ebda_start = *(unsigned short *)0x40e << 4;
3380 +- bios_start = *(unsigned short *)0x413 << 10;
3381 ++ /*
3382 ++ * EFI systems may not provide legacy ROM. The memory may not be mapped
3383 ++ * at all.
3384 ++ *
3385 ++ * Only look for values in the legacy ROM for non-EFI system.
3386 ++ */
3387 ++ signature = (char *)&boot_params->efi_info.efi_loader_signature;
3388 ++ if (strncmp(signature, EFI32_LOADER_SIGNATURE, 4) &&
3389 ++ strncmp(signature, EFI64_LOADER_SIGNATURE, 4)) {
3390 ++ ebda_start = *(unsigned short *)0x40e << 4;
3391 ++ bios_start = *(unsigned short *)0x413 << 10;
3392 ++ }
3393 +
3394 + if (bios_start < BIOS_START_MIN || bios_start > BIOS_START_MAX)
3395 + bios_start = BIOS_START_MAX;
3396 +diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
3397 +index 2a356b948720..3ea71b871813 100644
3398 +--- a/arch/x86/crypto/aegis128-aesni-glue.c
3399 ++++ b/arch/x86/crypto/aegis128-aesni-glue.c
3400 +@@ -119,31 +119,20 @@ static void crypto_aegis128_aesni_process_ad(
3401 + }
3402 +
3403 + static void crypto_aegis128_aesni_process_crypt(
3404 +- struct aegis_state *state, struct aead_request *req,
3405 ++ struct aegis_state *state, struct skcipher_walk *walk,
3406 + const struct aegis_crypt_ops *ops)
3407 + {
3408 +- struct skcipher_walk walk;
3409 +- u8 *src, *dst;
3410 +- unsigned int chunksize, base;
3411 +-
3412 +- ops->skcipher_walk_init(&walk, req, false);
3413 +-
3414 +- while (walk.nbytes) {
3415 +- src = walk.src.virt.addr;
3416 +- dst = walk.dst.virt.addr;
3417 +- chunksize = walk.nbytes;
3418 +-
3419 +- ops->crypt_blocks(state, chunksize, src, dst);
3420 +-
3421 +- base = chunksize & ~(AEGIS128_BLOCK_SIZE - 1);
3422 +- src += base;
3423 +- dst += base;
3424 +- chunksize &= AEGIS128_BLOCK_SIZE - 1;
3425 +-
3426 +- if (chunksize > 0)
3427 +- ops->crypt_tail(state, chunksize, src, dst);
3428 ++ while (walk->nbytes >= AEGIS128_BLOCK_SIZE) {
3429 ++ ops->crypt_blocks(state,
3430 ++ round_down(walk->nbytes, AEGIS128_BLOCK_SIZE),
3431 ++ walk->src.virt.addr, walk->dst.virt.addr);
3432 ++ skcipher_walk_done(walk, walk->nbytes % AEGIS128_BLOCK_SIZE);
3433 ++ }
3434 +
3435 +- skcipher_walk_done(&walk, 0);
3436 ++ if (walk->nbytes) {
3437 ++ ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
3438 ++ walk->dst.virt.addr);
3439 ++ skcipher_walk_done(walk, 0);
3440 + }
3441 + }
3442 +
3443 +@@ -186,13 +175,16 @@ static void crypto_aegis128_aesni_crypt(struct aead_request *req,
3444 + {
3445 + struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3446 + struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(tfm);
3447 ++ struct skcipher_walk walk;
3448 + struct aegis_state state;
3449 +
3450 ++ ops->skcipher_walk_init(&walk, req, true);
3451 ++
3452 + kernel_fpu_begin();
3453 +
3454 + crypto_aegis128_aesni_init(&state, ctx->key.bytes, req->iv);
3455 + crypto_aegis128_aesni_process_ad(&state, req->src, req->assoclen);
3456 +- crypto_aegis128_aesni_process_crypt(&state, req, ops);
3457 ++ crypto_aegis128_aesni_process_crypt(&state, &walk, ops);
3458 + crypto_aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
3459 +
3460 + kernel_fpu_end();
3461 +diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c
3462 +index dbe8bb980da1..1b1b39c66c5e 100644
3463 +--- a/arch/x86/crypto/aegis128l-aesni-glue.c
3464 ++++ b/arch/x86/crypto/aegis128l-aesni-glue.c
3465 +@@ -119,31 +119,20 @@ static void crypto_aegis128l_aesni_process_ad(
3466 + }
3467 +
3468 + static void crypto_aegis128l_aesni_process_crypt(
3469 +- struct aegis_state *state, struct aead_request *req,
3470 ++ struct aegis_state *state, struct skcipher_walk *walk,
3471 + const struct aegis_crypt_ops *ops)
3472 + {
3473 +- struct skcipher_walk walk;
3474 +- u8 *src, *dst;
3475 +- unsigned int chunksize, base;
3476 +-
3477 +- ops->skcipher_walk_init(&walk, req, false);
3478 +-
3479 +- while (walk.nbytes) {
3480 +- src = walk.src.virt.addr;
3481 +- dst = walk.dst.virt.addr;
3482 +- chunksize = walk.nbytes;
3483 +-
3484 +- ops->crypt_blocks(state, chunksize, src, dst);
3485 +-
3486 +- base = chunksize & ~(AEGIS128L_BLOCK_SIZE - 1);
3487 +- src += base;
3488 +- dst += base;
3489 +- chunksize &= AEGIS128L_BLOCK_SIZE - 1;
3490 +-
3491 +- if (chunksize > 0)
3492 +- ops->crypt_tail(state, chunksize, src, dst);
3493 ++ while (walk->nbytes >= AEGIS128L_BLOCK_SIZE) {
3494 ++ ops->crypt_blocks(state, round_down(walk->nbytes,
3495 ++ AEGIS128L_BLOCK_SIZE),
3496 ++ walk->src.virt.addr, walk->dst.virt.addr);
3497 ++ skcipher_walk_done(walk, walk->nbytes % AEGIS128L_BLOCK_SIZE);
3498 ++ }
3499 +
3500 +- skcipher_walk_done(&walk, 0);
3501 ++ if (walk->nbytes) {
3502 ++ ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
3503 ++ walk->dst.virt.addr);
3504 ++ skcipher_walk_done(walk, 0);
3505 + }
3506 + }
3507 +
3508 +@@ -186,13 +175,16 @@ static void crypto_aegis128l_aesni_crypt(struct aead_request *req,
3509 + {
3510 + struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3511 + struct aegis_ctx *ctx = crypto_aegis128l_aesni_ctx(tfm);
3512 ++ struct skcipher_walk walk;
3513 + struct aegis_state state;
3514 +
3515 ++ ops->skcipher_walk_init(&walk, req, true);
3516 ++
3517 + kernel_fpu_begin();
3518 +
3519 + crypto_aegis128l_aesni_init(&state, ctx->key.bytes, req->iv);
3520 + crypto_aegis128l_aesni_process_ad(&state, req->src, req->assoclen);
3521 +- crypto_aegis128l_aesni_process_crypt(&state, req, ops);
3522 ++ crypto_aegis128l_aesni_process_crypt(&state, &walk, ops);
3523 + crypto_aegis128l_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
3524 +
3525 + kernel_fpu_end();
3526 +diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c
3527 +index 8bebda2de92f..6227ca3220a0 100644
3528 +--- a/arch/x86/crypto/aegis256-aesni-glue.c
3529 ++++ b/arch/x86/crypto/aegis256-aesni-glue.c
3530 +@@ -119,31 +119,20 @@ static void crypto_aegis256_aesni_process_ad(
3531 + }
3532 +
3533 + static void crypto_aegis256_aesni_process_crypt(
3534 +- struct aegis_state *state, struct aead_request *req,
3535 ++ struct aegis_state *state, struct skcipher_walk *walk,
3536 + const struct aegis_crypt_ops *ops)
3537 + {
3538 +- struct skcipher_walk walk;
3539 +- u8 *src, *dst;
3540 +- unsigned int chunksize, base;
3541 +-
3542 +- ops->skcipher_walk_init(&walk, req, false);
3543 +-
3544 +- while (walk.nbytes) {
3545 +- src = walk.src.virt.addr;
3546 +- dst = walk.dst.virt.addr;
3547 +- chunksize = walk.nbytes;
3548 +-
3549 +- ops->crypt_blocks(state, chunksize, src, dst);
3550 +-
3551 +- base = chunksize & ~(AEGIS256_BLOCK_SIZE - 1);
3552 +- src += base;
3553 +- dst += base;
3554 +- chunksize &= AEGIS256_BLOCK_SIZE - 1;
3555 +-
3556 +- if (chunksize > 0)
3557 +- ops->crypt_tail(state, chunksize, src, dst);
3558 ++ while (walk->nbytes >= AEGIS256_BLOCK_SIZE) {
3559 ++ ops->crypt_blocks(state,
3560 ++ round_down(walk->nbytes, AEGIS256_BLOCK_SIZE),
3561 ++ walk->src.virt.addr, walk->dst.virt.addr);
3562 ++ skcipher_walk_done(walk, walk->nbytes % AEGIS256_BLOCK_SIZE);
3563 ++ }
3564 +
3565 +- skcipher_walk_done(&walk, 0);
3566 ++ if (walk->nbytes) {
3567 ++ ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
3568 ++ walk->dst.virt.addr);
3569 ++ skcipher_walk_done(walk, 0);
3570 + }
3571 + }
3572 +
3573 +@@ -186,13 +175,16 @@ static void crypto_aegis256_aesni_crypt(struct aead_request *req,
3574 + {
3575 + struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3576 + struct aegis_ctx *ctx = crypto_aegis256_aesni_ctx(tfm);
3577 ++ struct skcipher_walk walk;
3578 + struct aegis_state state;
3579 +
3580 ++ ops->skcipher_walk_init(&walk, req, true);
3581 ++
3582 + kernel_fpu_begin();
3583 +
3584 + crypto_aegis256_aesni_init(&state, ctx->key, req->iv);
3585 + crypto_aegis256_aesni_process_ad(&state, req->src, req->assoclen);
3586 +- crypto_aegis256_aesni_process_crypt(&state, req, ops);
3587 ++ crypto_aegis256_aesni_process_crypt(&state, &walk, ops);
3588 + crypto_aegis256_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
3589 +
3590 + kernel_fpu_end();
3591 +diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
3592 +index 1321700d6647..ae30c8b6ec4d 100644
3593 +--- a/arch/x86/crypto/aesni-intel_glue.c
3594 ++++ b/arch/x86/crypto/aesni-intel_glue.c
3595 +@@ -821,11 +821,14 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
3596 + scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
3597 + }
3598 +
3599 +- src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
3600 +- scatterwalk_start(&src_sg_walk, src_sg);
3601 +- if (req->src != req->dst) {
3602 +- dst_sg = scatterwalk_ffwd(dst_start, req->dst, req->assoclen);
3603 +- scatterwalk_start(&dst_sg_walk, dst_sg);
3604 ++ if (left) {
3605 ++ src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
3606 ++ scatterwalk_start(&src_sg_walk, src_sg);
3607 ++ if (req->src != req->dst) {
3608 ++ dst_sg = scatterwalk_ffwd(dst_start, req->dst,
3609 ++ req->assoclen);
3610 ++ scatterwalk_start(&dst_sg_walk, dst_sg);
3611 ++ }
3612 + }
3613 +
3614 + kernel_fpu_begin();
3615 +diff --git a/arch/x86/crypto/morus1280_glue.c b/arch/x86/crypto/morus1280_glue.c
3616 +index 0dccdda1eb3a..7e600f8bcdad 100644
3617 +--- a/arch/x86/crypto/morus1280_glue.c
3618 ++++ b/arch/x86/crypto/morus1280_glue.c
3619 +@@ -85,31 +85,20 @@ static void crypto_morus1280_glue_process_ad(
3620 +
3621 + static void crypto_morus1280_glue_process_crypt(struct morus1280_state *state,
3622 + struct morus1280_ops ops,
3623 +- struct aead_request *req)
3624 ++ struct skcipher_walk *walk)
3625 + {
3626 +- struct skcipher_walk walk;
3627 +- u8 *cursor_src, *cursor_dst;
3628 +- unsigned int chunksize, base;
3629 +-
3630 +- ops.skcipher_walk_init(&walk, req, false);
3631 +-
3632 +- while (walk.nbytes) {
3633 +- cursor_src = walk.src.virt.addr;
3634 +- cursor_dst = walk.dst.virt.addr;
3635 +- chunksize = walk.nbytes;
3636 +-
3637 +- ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize);
3638 +-
3639 +- base = chunksize & ~(MORUS1280_BLOCK_SIZE - 1);
3640 +- cursor_src += base;
3641 +- cursor_dst += base;
3642 +- chunksize &= MORUS1280_BLOCK_SIZE - 1;
3643 +-
3644 +- if (chunksize > 0)
3645 +- ops.crypt_tail(state, cursor_src, cursor_dst,
3646 +- chunksize);
3647 ++ while (walk->nbytes >= MORUS1280_BLOCK_SIZE) {
3648 ++ ops.crypt_blocks(state, walk->src.virt.addr,
3649 ++ walk->dst.virt.addr,
3650 ++ round_down(walk->nbytes,
3651 ++ MORUS1280_BLOCK_SIZE));
3652 ++ skcipher_walk_done(walk, walk->nbytes % MORUS1280_BLOCK_SIZE);
3653 ++ }
3654 +
3655 +- skcipher_walk_done(&walk, 0);
3656 ++ if (walk->nbytes) {
3657 ++ ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr,
3658 ++ walk->nbytes);
3659 ++ skcipher_walk_done(walk, 0);
3660 + }
3661 + }
3662 +
3663 +@@ -147,12 +136,15 @@ static void crypto_morus1280_glue_crypt(struct aead_request *req,
3664 + struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3665 + struct morus1280_ctx *ctx = crypto_aead_ctx(tfm);
3666 + struct morus1280_state state;
3667 ++ struct skcipher_walk walk;
3668 ++
3669 ++ ops.skcipher_walk_init(&walk, req, true);
3670 +
3671 + kernel_fpu_begin();
3672 +
3673 + ctx->ops->init(&state, &ctx->key, req->iv);
3674 + crypto_morus1280_glue_process_ad(&state, ctx->ops, req->src, req->assoclen);
3675 +- crypto_morus1280_glue_process_crypt(&state, ops, req);
3676 ++ crypto_morus1280_glue_process_crypt(&state, ops, &walk);
3677 + ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen);
3678 +
3679 + kernel_fpu_end();
3680 +diff --git a/arch/x86/crypto/morus640_glue.c b/arch/x86/crypto/morus640_glue.c
3681 +index 7b58fe4d9bd1..cb3a81732016 100644
3682 +--- a/arch/x86/crypto/morus640_glue.c
3683 ++++ b/arch/x86/crypto/morus640_glue.c
3684 +@@ -85,31 +85,19 @@ static void crypto_morus640_glue_process_ad(
3685 +
3686 + static void crypto_morus640_glue_process_crypt(struct morus640_state *state,
3687 + struct morus640_ops ops,
3688 +- struct aead_request *req)
3689 ++ struct skcipher_walk *walk)
3690 + {
3691 +- struct skcipher_walk walk;
3692 +- u8 *cursor_src, *cursor_dst;
3693 +- unsigned int chunksize, base;
3694 +-
3695 +- ops.skcipher_walk_init(&walk, req, false);
3696 +-
3697 +- while (walk.nbytes) {
3698 +- cursor_src = walk.src.virt.addr;
3699 +- cursor_dst = walk.dst.virt.addr;
3700 +- chunksize = walk.nbytes;
3701 +-
3702 +- ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize);
3703 +-
3704 +- base = chunksize & ~(MORUS640_BLOCK_SIZE - 1);
3705 +- cursor_src += base;
3706 +- cursor_dst += base;
3707 +- chunksize &= MORUS640_BLOCK_SIZE - 1;
3708 +-
3709 +- if (chunksize > 0)
3710 +- ops.crypt_tail(state, cursor_src, cursor_dst,
3711 +- chunksize);
3712 ++ while (walk->nbytes >= MORUS640_BLOCK_SIZE) {
3713 ++ ops.crypt_blocks(state, walk->src.virt.addr,
3714 ++ walk->dst.virt.addr,
3715 ++ round_down(walk->nbytes, MORUS640_BLOCK_SIZE));
3716 ++ skcipher_walk_done(walk, walk->nbytes % MORUS640_BLOCK_SIZE);
3717 ++ }
3718 +
3719 +- skcipher_walk_done(&walk, 0);
3720 ++ if (walk->nbytes) {
3721 ++ ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr,
3722 ++ walk->nbytes);
3723 ++ skcipher_walk_done(walk, 0);
3724 + }
3725 + }
3726 +
3727 +@@ -143,12 +131,15 @@ static void crypto_morus640_glue_crypt(struct aead_request *req,
3728 + struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3729 + struct morus640_ctx *ctx = crypto_aead_ctx(tfm);
3730 + struct morus640_state state;
3731 ++ struct skcipher_walk walk;
3732 ++
3733 ++ ops.skcipher_walk_init(&walk, req, true);
3734 +
3735 + kernel_fpu_begin();
3736 +
3737 + ctx->ops->init(&state, &ctx->key, req->iv);
3738 + crypto_morus640_glue_process_ad(&state, ctx->ops, req->src, req->assoclen);
3739 +- crypto_morus640_glue_process_crypt(&state, ops, req);
3740 ++ crypto_morus640_glue_process_crypt(&state, ops, &walk);
3741 + ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen);
3742 +
3743 + kernel_fpu_end();
3744 +diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
3745 +index 7d2d7c801dba..0ecfac84ba91 100644
3746 +--- a/arch/x86/events/amd/core.c
3747 ++++ b/arch/x86/events/amd/core.c
3748 +@@ -3,10 +3,14 @@
3749 + #include <linux/types.h>
3750 + #include <linux/init.h>
3751 + #include <linux/slab.h>
3752 ++#include <linux/delay.h>
3753 + #include <asm/apicdef.h>
3754 ++#include <asm/nmi.h>
3755 +
3756 + #include "../perf_event.h"
3757 +
3758 ++static DEFINE_PER_CPU(unsigned int, perf_nmi_counter);
3759 ++
3760 + static __initconst const u64 amd_hw_cache_event_ids
3761 + [PERF_COUNT_HW_CACHE_MAX]
3762 + [PERF_COUNT_HW_CACHE_OP_MAX]
3763 +@@ -429,6 +433,132 @@ static void amd_pmu_cpu_dead(int cpu)
3764 + }
3765 + }
3766 +
3767 ++/*
3768 ++ * When a PMC counter overflows, an NMI is used to process the event and
3769 ++ * reset the counter. NMI latency can result in the counter being updated
3770 ++ * before the NMI can run, which can result in what appear to be spurious
3771 ++ * NMIs. This function is intended to wait for the NMI to run and reset
3772 ++ * the counter to avoid possible unhandled NMI messages.
3773 ++ */
3774 ++#define OVERFLOW_WAIT_COUNT 50
3775 ++
3776 ++static void amd_pmu_wait_on_overflow(int idx)
3777 ++{
3778 ++ unsigned int i;
3779 ++ u64 counter;
3780 ++
3781 ++ /*
3782 ++ * Wait for the counter to be reset if it has overflowed. This loop
3783 ++ * should exit very, very quickly, but just in case, don't wait
3784 ++ * forever...
3785 ++ */
3786 ++ for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) {
3787 ++ rdmsrl(x86_pmu_event_addr(idx), counter);
3788 ++ if (counter & (1ULL << (x86_pmu.cntval_bits - 1)))
3789 ++ break;
3790 ++
3791 ++ /* Might be in IRQ context, so can't sleep */
3792 ++ udelay(1);
3793 ++ }
3794 ++}
3795 ++
3796 ++static void amd_pmu_disable_all(void)
3797 ++{
3798 ++ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3799 ++ int idx;
3800 ++
3801 ++ x86_pmu_disable_all();
3802 ++
3803 ++ /*
3804 ++ * This shouldn't be called from NMI context, but add a safeguard here
3805 ++ * to return, since if we're in NMI context we can't wait for an NMI
3806 ++ * to reset an overflowed counter value.
3807 ++ */
3808 ++ if (in_nmi())
3809 ++ return;
3810 ++
3811 ++ /*
3812 ++ * Check each counter for overflow and wait for it to be reset by the
3813 ++ * NMI if it has overflowed. This relies on the fact that all active
3814 ++ * counters are always enabled when this function is caled and
3815 ++ * ARCH_PERFMON_EVENTSEL_INT is always set.
3816 ++ */
3817 ++ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
3818 ++ if (!test_bit(idx, cpuc->active_mask))
3819 ++ continue;
3820 ++
3821 ++ amd_pmu_wait_on_overflow(idx);
3822 ++ }
3823 ++}
3824 ++
3825 ++static void amd_pmu_disable_event(struct perf_event *event)
3826 ++{
3827 ++ x86_pmu_disable_event(event);
3828 ++
3829 ++ /*
3830 ++ * This can be called from NMI context (via x86_pmu_stop). The counter
3831 ++ * may have overflowed, but either way, we'll never see it get reset
3832 ++ * by the NMI if we're already in the NMI. And the NMI latency support
3833 ++ * below will take care of any pending NMI that might have been
3834 ++ * generated by the overflow.
3835 ++ */
3836 ++ if (in_nmi())
3837 ++ return;
3838 ++
3839 ++ amd_pmu_wait_on_overflow(event->hw.idx);
3840 ++}
3841 ++
3842 ++/*
3843 ++ * Because of NMI latency, if multiple PMC counters are active or other sources
3844 ++ * of NMIs are received, the perf NMI handler can handle one or more overflowed
3845 ++ * PMC counters outside of the NMI associated with the PMC overflow. If the NMI
3846 ++ * doesn't arrive at the LAPIC in time to become a pending NMI, then the kernel
3847 ++ * back-to-back NMI support won't be active. This PMC handler needs to take into
3848 ++ * account that this can occur, otherwise this could result in unknown NMI
3849 ++ * messages being issued. Examples of this is PMC overflow while in the NMI
3850 ++ * handler when multiple PMCs are active or PMC overflow while handling some
3851 ++ * other source of an NMI.
3852 ++ *
3853 ++ * Attempt to mitigate this by using the number of active PMCs to determine
3854 ++ * whether to return NMI_HANDLED if the perf NMI handler did not handle/reset
3855 ++ * any PMCs. The per-CPU perf_nmi_counter variable is set to a minimum of the
3856 ++ * number of active PMCs or 2. The value of 2 is used in case an NMI does not
3857 ++ * arrive at the LAPIC in time to be collapsed into an already pending NMI.
3858 ++ */
3859 ++static int amd_pmu_handle_irq(struct pt_regs *regs)
3860 ++{
3861 ++ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3862 ++ int active, handled;
3863 ++
3864 ++ /*
3865 ++ * Obtain the active count before calling x86_pmu_handle_irq() since
3866 ++ * it is possible that x86_pmu_handle_irq() may make a counter
3867 ++ * inactive (through x86_pmu_stop).
3868 ++ */
3869 ++ active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX);
3870 ++
3871 ++ /* Process any counter overflows */
3872 ++ handled = x86_pmu_handle_irq(regs);
3873 ++
3874 ++ /*
3875 ++ * If a counter was handled, record the number of possible remaining
3876 ++ * NMIs that can occur.
3877 ++ */
3878 ++ if (handled) {
3879 ++ this_cpu_write(perf_nmi_counter,
3880 ++ min_t(unsigned int, 2, active));
3881 ++
3882 ++ return handled;
3883 ++ }
3884 ++
3885 ++ if (!this_cpu_read(perf_nmi_counter))
3886 ++ return NMI_DONE;
3887 ++
3888 ++ this_cpu_dec(perf_nmi_counter);
3889 ++
3890 ++ return NMI_HANDLED;
3891 ++}
3892 ++
3893 + static struct event_constraint *
3894 + amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3895 + struct perf_event *event)
3896 +@@ -621,11 +751,11 @@ static ssize_t amd_event_sysfs_show(char *page, u64 config)
3897 +
3898 + static __initconst const struct x86_pmu amd_pmu = {
3899 + .name = "AMD",
3900 +- .handle_irq = x86_pmu_handle_irq,
3901 +- .disable_all = x86_pmu_disable_all,
3902 ++ .handle_irq = amd_pmu_handle_irq,
3903 ++ .disable_all = amd_pmu_disable_all,
3904 + .enable_all = x86_pmu_enable_all,
3905 + .enable = x86_pmu_enable_event,
3906 +- .disable = x86_pmu_disable_event,
3907 ++ .disable = amd_pmu_disable_event,
3908 + .hw_config = amd_pmu_hw_config,
3909 + .schedule_events = x86_schedule_events,
3910 + .eventsel = MSR_K7_EVNTSEL0,
3911 +@@ -732,7 +862,7 @@ void amd_pmu_enable_virt(void)
3912 + cpuc->perf_ctr_virt_mask = 0;
3913 +
3914 + /* Reload all events */
3915 +- x86_pmu_disable_all();
3916 ++ amd_pmu_disable_all();
3917 + x86_pmu_enable_all(0);
3918 + }
3919 + EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
3920 +@@ -750,7 +880,7 @@ void amd_pmu_disable_virt(void)
3921 + cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
3922 +
3923 + /* Reload all events */
3924 +- x86_pmu_disable_all();
3925 ++ amd_pmu_disable_all();
3926 + x86_pmu_enable_all(0);
3927 + }
3928 + EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);
3929 +diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
3930 +index b684f0294f35..81911e11a15d 100644
3931 +--- a/arch/x86/events/core.c
3932 ++++ b/arch/x86/events/core.c
3933 +@@ -1349,8 +1349,9 @@ void x86_pmu_stop(struct perf_event *event, int flags)
3934 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3935 + struct hw_perf_event *hwc = &event->hw;
3936 +
3937 +- if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
3938 ++ if (test_bit(hwc->idx, cpuc->active_mask)) {
3939 + x86_pmu.disable(event);
3940 ++ __clear_bit(hwc->idx, cpuc->active_mask);
3941 + cpuc->events[hwc->idx] = NULL;
3942 + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
3943 + hwc->state |= PERF_HES_STOPPED;
3944 +@@ -1447,16 +1448,8 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
3945 + apic_write(APIC_LVTPC, APIC_DM_NMI);
3946 +
3947 + for (idx = 0; idx < x86_pmu.num_counters; idx++) {
3948 +- if (!test_bit(idx, cpuc->active_mask)) {
3949 +- /*
3950 +- * Though we deactivated the counter some cpus
3951 +- * might still deliver spurious interrupts still
3952 +- * in flight. Catch them:
3953 +- */
3954 +- if (__test_and_clear_bit(idx, cpuc->running))
3955 +- handled++;
3956 ++ if (!test_bit(idx, cpuc->active_mask))
3957 + continue;
3958 +- }
3959 +
3960 + event = cpuc->events[idx];
3961 +
3962 +@@ -1995,7 +1988,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu)
3963 + */
3964 + static void free_fake_cpuc(struct cpu_hw_events *cpuc)
3965 + {
3966 +- kfree(cpuc->shared_regs);
3967 ++ intel_cpuc_finish(cpuc);
3968 + kfree(cpuc);
3969 + }
3970 +
3971 +@@ -2007,14 +2000,11 @@ static struct cpu_hw_events *allocate_fake_cpuc(void)
3972 + cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
3973 + if (!cpuc)
3974 + return ERR_PTR(-ENOMEM);
3975 +-
3976 +- /* only needed, if we have extra_regs */
3977 +- if (x86_pmu.extra_regs) {
3978 +- cpuc->shared_regs = allocate_shared_regs(cpu);
3979 +- if (!cpuc->shared_regs)
3980 +- goto error;
3981 +- }
3982 + cpuc->is_fake = 1;
3983 ++
3984 ++ if (intel_cpuc_prepare(cpuc, cpu))
3985 ++ goto error;
3986 ++
3987 + return cpuc;
3988 + error:
3989 + free_fake_cpuc(cpuc);
3990 +diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
3991 +index a01ef1b0f883..7cdd7b13bbda 100644
3992 +--- a/arch/x86/events/intel/bts.c
3993 ++++ b/arch/x86/events/intel/bts.c
3994 +@@ -77,10 +77,12 @@ static size_t buf_size(struct page *page)
3995 + }
3996 +
3997 + static void *
3998 +-bts_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool overwrite)
3999 ++bts_buffer_setup_aux(struct perf_event *event, void **pages,
4000 ++ int nr_pages, bool overwrite)
4001 + {
4002 + struct bts_buffer *buf;
4003 + struct page *page;
4004 ++ int cpu = event->cpu;
4005 + int node = (cpu == -1) ? cpu : cpu_to_node(cpu);
4006 + unsigned long offset;
4007 + size_t size = nr_pages << PAGE_SHIFT;
4008 +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
4009 +index 730978dff63f..2480feb07df3 100644
4010 +--- a/arch/x86/events/intel/core.c
4011 ++++ b/arch/x86/events/intel/core.c
4012 +@@ -1999,6 +1999,39 @@ static void intel_pmu_nhm_enable_all(int added)
4013 + intel_pmu_enable_all(added);
4014 + }
4015 +
4016 ++static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
4017 ++{
4018 ++ u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
4019 ++
4020 ++ if (cpuc->tfa_shadow != val) {
4021 ++ cpuc->tfa_shadow = val;
4022 ++ wrmsrl(MSR_TSX_FORCE_ABORT, val);
4023 ++ }
4024 ++}
4025 ++
4026 ++static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
4027 ++{
4028 ++ /*
4029 ++ * We're going to use PMC3, make sure TFA is set before we touch it.
4030 ++ */
4031 ++ if (cntr == 3 && !cpuc->is_fake)
4032 ++ intel_set_tfa(cpuc, true);
4033 ++}
4034 ++
4035 ++static void intel_tfa_pmu_enable_all(int added)
4036 ++{
4037 ++ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4038 ++
4039 ++ /*
4040 ++ * If we find PMC3 is no longer used when we enable the PMU, we can
4041 ++ * clear TFA.
4042 ++ */
4043 ++ if (!test_bit(3, cpuc->active_mask))
4044 ++ intel_set_tfa(cpuc, false);
4045 ++
4046 ++ intel_pmu_enable_all(added);
4047 ++}
4048 ++
4049 + static void enable_counter_freeze(void)
4050 + {
4051 + update_debugctlmsr(get_debugctlmsr() |
4052 +@@ -2768,6 +2801,35 @@ intel_stop_scheduling(struct cpu_hw_events *cpuc)
4053 + raw_spin_unlock(&excl_cntrs->lock);
4054 + }
4055 +
4056 ++static struct event_constraint *
4057 ++dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
4058 ++{
4059 ++ WARN_ON_ONCE(!cpuc->constraint_list);
4060 ++
4061 ++ if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
4062 ++ struct event_constraint *cx;
4063 ++
4064 ++ /*
4065 ++ * grab pre-allocated constraint entry
4066 ++ */
4067 ++ cx = &cpuc->constraint_list[idx];
4068 ++
4069 ++ /*
4070 ++ * initialize dynamic constraint
4071 ++ * with static constraint
4072 ++ */
4073 ++ *cx = *c;
4074 ++
4075 ++ /*
4076 ++ * mark constraint as dynamic
4077 ++ */
4078 ++ cx->flags |= PERF_X86_EVENT_DYNAMIC;
4079 ++ c = cx;
4080 ++ }
4081 ++
4082 ++ return c;
4083 ++}
4084 ++
4085 + static struct event_constraint *
4086 + intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
4087 + int idx, struct event_constraint *c)
4088 +@@ -2798,27 +2860,7 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
4089 + * only needed when constraint has not yet
4090 + * been cloned (marked dynamic)
4091 + */
4092 +- if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
4093 +- struct event_constraint *cx;
4094 +-
4095 +- /*
4096 +- * grab pre-allocated constraint entry
4097 +- */
4098 +- cx = &cpuc->constraint_list[idx];
4099 +-
4100 +- /*
4101 +- * initialize dynamic constraint
4102 +- * with static constraint
4103 +- */
4104 +- *cx = *c;
4105 +-
4106 +- /*
4107 +- * mark constraint as dynamic, so we
4108 +- * can free it later on
4109 +- */
4110 +- cx->flags |= PERF_X86_EVENT_DYNAMIC;
4111 +- c = cx;
4112 +- }
4113 ++ c = dyn_constraint(cpuc, c, idx);
4114 +
4115 + /*
4116 + * From here on, the constraint is dynamic.
4117 +@@ -3345,6 +3387,26 @@ glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4118 + return c;
4119 + }
4120 +
4121 ++static bool allow_tsx_force_abort = true;
4122 ++
4123 ++static struct event_constraint *
4124 ++tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4125 ++ struct perf_event *event)
4126 ++{
4127 ++ struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
4128 ++
4129 ++ /*
4130 ++ * Without TFA we must not use PMC3.
4131 ++ */
4132 ++ if (!allow_tsx_force_abort && test_bit(3, c->idxmsk) && idx >= 0) {
4133 ++ c = dyn_constraint(cpuc, c, idx);
4134 ++ c->idxmsk64 &= ~(1ULL << 3);
4135 ++ c->weight--;
4136 ++ }
4137 ++
4138 ++ return c;
4139 ++}
4140 ++
4141 + /*
4142 + * Broadwell:
4143 + *
4144 +@@ -3398,7 +3460,7 @@ ssize_t intel_event_sysfs_show(char *page, u64 config)
4145 + return x86_event_sysfs_show(page, config, event);
4146 + }
4147 +
4148 +-struct intel_shared_regs *allocate_shared_regs(int cpu)
4149 ++static struct intel_shared_regs *allocate_shared_regs(int cpu)
4150 + {
4151 + struct intel_shared_regs *regs;
4152 + int i;
4153 +@@ -3430,23 +3492,24 @@ static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
4154 + return c;
4155 + }
4156 +
4157 +-static int intel_pmu_cpu_prepare(int cpu)
4158 +-{
4159 +- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
4160 +
4161 ++int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
4162 ++{
4163 + if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
4164 + cpuc->shared_regs = allocate_shared_regs(cpu);
4165 + if (!cpuc->shared_regs)
4166 + goto err;
4167 + }
4168 +
4169 +- if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
4170 ++ if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
4171 + size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
4172 +
4173 +- cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
4174 ++ cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
4175 + if (!cpuc->constraint_list)
4176 + goto err_shared_regs;
4177 ++ }
4178 +
4179 ++ if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
4180 + cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
4181 + if (!cpuc->excl_cntrs)
4182 + goto err_constraint_list;
4183 +@@ -3468,6 +3531,11 @@ err:
4184 + return -ENOMEM;
4185 + }
4186 +
4187 ++static int intel_pmu_cpu_prepare(int cpu)
4188 ++{
4189 ++ return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
4190 ++}
4191 ++
4192 + static void flip_smm_bit(void *data)
4193 + {
4194 + unsigned long set = *(unsigned long *)data;
4195 +@@ -3542,9 +3610,8 @@ static void intel_pmu_cpu_starting(int cpu)
4196 + }
4197 + }
4198 +
4199 +-static void free_excl_cntrs(int cpu)
4200 ++static void free_excl_cntrs(struct cpu_hw_events *cpuc)
4201 + {
4202 +- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
4203 + struct intel_excl_cntrs *c;
4204 +
4205 + c = cpuc->excl_cntrs;
4206 +@@ -3552,9 +3619,10 @@ static void free_excl_cntrs(int cpu)
4207 + if (c->core_id == -1 || --c->refcnt == 0)
4208 + kfree(c);
4209 + cpuc->excl_cntrs = NULL;
4210 +- kfree(cpuc->constraint_list);
4211 +- cpuc->constraint_list = NULL;
4212 + }
4213 ++
4214 ++ kfree(cpuc->constraint_list);
4215 ++ cpuc->constraint_list = NULL;
4216 + }
4217 +
4218 + static void intel_pmu_cpu_dying(int cpu)
4219 +@@ -3565,9 +3633,8 @@ static void intel_pmu_cpu_dying(int cpu)
4220 + disable_counter_freeze();
4221 + }
4222 +
4223 +-static void intel_pmu_cpu_dead(int cpu)
4224 ++void intel_cpuc_finish(struct cpu_hw_events *cpuc)
4225 + {
4226 +- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
4227 + struct intel_shared_regs *pc;
4228 +
4229 + pc = cpuc->shared_regs;
4230 +@@ -3577,7 +3644,12 @@ static void intel_pmu_cpu_dead(int cpu)
4231 + cpuc->shared_regs = NULL;
4232 + }
4233 +
4234 +- free_excl_cntrs(cpu);
4235 ++ free_excl_cntrs(cpuc);
4236 ++}
4237 ++
4238 ++static void intel_pmu_cpu_dead(int cpu)
4239 ++{
4240 ++ intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
4241 + }
4242 +
4243 + static void intel_pmu_sched_task(struct perf_event_context *ctx,
4244 +@@ -4070,8 +4142,11 @@ static struct attribute *intel_pmu_caps_attrs[] = {
4245 + NULL
4246 + };
4247 +
4248 ++static DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort);
4249 ++
4250 + static struct attribute *intel_pmu_attrs[] = {
4251 + &dev_attr_freeze_on_smi.attr,
4252 ++ NULL, /* &dev_attr_allow_tsx_force_abort.attr.attr */
4253 + NULL,
4254 + };
4255 +
4256 +@@ -4564,6 +4639,15 @@ __init int intel_pmu_init(void)
4257 + tsx_attr = hsw_tsx_events_attrs;
4258 + intel_pmu_pebs_data_source_skl(
4259 + boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
4260 ++
4261 ++ if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
4262 ++ x86_pmu.flags |= PMU_FL_TFA;
4263 ++ x86_pmu.get_event_constraints = tfa_get_event_constraints;
4264 ++ x86_pmu.enable_all = intel_tfa_pmu_enable_all;
4265 ++ x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
4266 ++ intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr.attr;
4267 ++ }
4268 ++
4269 + pr_cont("Skylake events, ");
4270 + name = "skylake";
4271 + break;
4272 +@@ -4715,7 +4799,7 @@ static __init int fixup_ht_bug(void)
4273 + hardlockup_detector_perf_restart();
4274 +
4275 + for_each_online_cpu(c)
4276 +- free_excl_cntrs(c);
4277 ++ free_excl_cntrs(&per_cpu(cpu_hw_events, c));
4278 +
4279 + cpus_read_unlock();
4280 + pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
4281 +diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
4282 +index 9494ca68fd9d..c0e86ff21f81 100644
4283 +--- a/arch/x86/events/intel/pt.c
4284 ++++ b/arch/x86/events/intel/pt.c
4285 +@@ -1114,10 +1114,11 @@ static int pt_buffer_init_topa(struct pt_buffer *buf, unsigned long nr_pages,
4286 + * Return: Our private PT buffer structure.
4287 + */
4288 + static void *
4289 +-pt_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool snapshot)
4290 ++pt_buffer_setup_aux(struct perf_event *event, void **pages,
4291 ++ int nr_pages, bool snapshot)
4292 + {
4293 + struct pt_buffer *buf;
4294 +- int node, ret;
4295 ++ int node, ret, cpu = event->cpu;
4296 +
4297 + if (!nr_pages)
4298 + return NULL;
4299 +diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
4300 +index 27a461414b30..2690135bf83f 100644
4301 +--- a/arch/x86/events/intel/uncore.c
4302 ++++ b/arch/x86/events/intel/uncore.c
4303 +@@ -740,6 +740,7 @@ static int uncore_pmu_event_init(struct perf_event *event)
4304 + /* fixed counters have event field hardcoded to zero */
4305 + hwc->config = 0ULL;
4306 + } else if (is_freerunning_event(event)) {
4307 ++ hwc->config = event->attr.config;
4308 + if (!check_valid_freerunning_event(box, event))
4309 + return -EINVAL;
4310 + event->hw.idx = UNCORE_PMC_IDX_FREERUNNING;
4311 +diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
4312 +index cb46d602a6b8..853a49a8ccf6 100644
4313 +--- a/arch/x86/events/intel/uncore.h
4314 ++++ b/arch/x86/events/intel/uncore.h
4315 +@@ -292,8 +292,8 @@ static inline
4316 + unsigned int uncore_freerunning_counter(struct intel_uncore_box *box,
4317 + struct perf_event *event)
4318 + {
4319 +- unsigned int type = uncore_freerunning_type(event->attr.config);
4320 +- unsigned int idx = uncore_freerunning_idx(event->attr.config);
4321 ++ unsigned int type = uncore_freerunning_type(event->hw.config);
4322 ++ unsigned int idx = uncore_freerunning_idx(event->hw.config);
4323 + struct intel_uncore_pmu *pmu = box->pmu;
4324 +
4325 + return pmu->type->freerunning[type].counter_base +
4326 +@@ -377,7 +377,7 @@ static inline
4327 + unsigned int uncore_freerunning_bits(struct intel_uncore_box *box,
4328 + struct perf_event *event)
4329 + {
4330 +- unsigned int type = uncore_freerunning_type(event->attr.config);
4331 ++ unsigned int type = uncore_freerunning_type(event->hw.config);
4332 +
4333 + return box->pmu->type->freerunning[type].bits;
4334 + }
4335 +@@ -385,7 +385,7 @@ unsigned int uncore_freerunning_bits(struct intel_uncore_box *box,
4336 + static inline int uncore_num_freerunning(struct intel_uncore_box *box,
4337 + struct perf_event *event)
4338 + {
4339 +- unsigned int type = uncore_freerunning_type(event->attr.config);
4340 ++ unsigned int type = uncore_freerunning_type(event->hw.config);
4341 +
4342 + return box->pmu->type->freerunning[type].num_counters;
4343 + }
4344 +@@ -399,8 +399,8 @@ static inline int uncore_num_freerunning_types(struct intel_uncore_box *box,
4345 + static inline bool check_valid_freerunning_event(struct intel_uncore_box *box,
4346 + struct perf_event *event)
4347 + {
4348 +- unsigned int type = uncore_freerunning_type(event->attr.config);
4349 +- unsigned int idx = uncore_freerunning_idx(event->attr.config);
4350 ++ unsigned int type = uncore_freerunning_type(event->hw.config);
4351 ++ unsigned int idx = uncore_freerunning_idx(event->hw.config);
4352 +
4353 + return (type < uncore_num_freerunning_types(box, event)) &&
4354 + (idx < uncore_num_freerunning(box, event));
4355 +diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
4356 +index 2593b0d7aeee..ef7faf486a1a 100644
4357 +--- a/arch/x86/events/intel/uncore_snb.c
4358 ++++ b/arch/x86/events/intel/uncore_snb.c
4359 +@@ -448,9 +448,11 @@ static int snb_uncore_imc_event_init(struct perf_event *event)
4360 +
4361 + /* must be done before validate_group */
4362 + event->hw.event_base = base;
4363 +- event->hw.config = cfg;
4364 + event->hw.idx = idx;
4365 +
4366 ++ /* Convert to standard encoding format for freerunning counters */
4367 ++ event->hw.config = ((cfg - 1) << 8) | 0x10ff;
4368 ++
4369 + /* no group validation needed, we have free running counters */
4370 +
4371 + return 0;
4372 +diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
4373 +index d46fd6754d92..acd72e669c04 100644
4374 +--- a/arch/x86/events/perf_event.h
4375 ++++ b/arch/x86/events/perf_event.h
4376 +@@ -242,6 +242,11 @@ struct cpu_hw_events {
4377 + struct intel_excl_cntrs *excl_cntrs;
4378 + int excl_thread_id; /* 0 or 1 */
4379 +
4380 ++ /*
4381 ++ * SKL TSX_FORCE_ABORT shadow
4382 ++ */
4383 ++ u64 tfa_shadow;
4384 ++
4385 + /*
4386 + * AMD specific bits
4387 + */
4388 +@@ -681,6 +686,7 @@ do { \
4389 + #define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */
4390 + #define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */
4391 + #define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */
4392 ++#define PMU_FL_TFA 0x20 /* deal with TSX force abort */
4393 +
4394 + #define EVENT_VAR(_id) event_attr_##_id
4395 + #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
4396 +@@ -889,7 +895,8 @@ struct event_constraint *
4397 + x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4398 + struct perf_event *event);
4399 +
4400 +-struct intel_shared_regs *allocate_shared_regs(int cpu);
4401 ++extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
4402 ++extern void intel_cpuc_finish(struct cpu_hw_events *cpuc);
4403 +
4404 + int intel_pmu_init(void);
4405 +
4406 +@@ -1025,9 +1032,13 @@ static inline int intel_pmu_init(void)
4407 + return 0;
4408 + }
4409 +
4410 +-static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
4411 ++static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
4412 ++{
4413 ++ return 0;
4414 ++}
4415 ++
4416 ++static inline void intel_cpuc_finish(struct cpu_hw_events *cpuc)
4417 + {
4418 +- return NULL;
4419 + }
4420 +
4421 + static inline int is_ht_workaround_enabled(void)
4422 +diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
4423 +index 7abb09e2eeb8..d3f42b6bbdac 100644
4424 +--- a/arch/x86/hyperv/hv_init.c
4425 ++++ b/arch/x86/hyperv/hv_init.c
4426 +@@ -406,6 +406,13 @@ void hyperv_cleanup(void)
4427 + /* Reset our OS id */
4428 + wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
4429 +
4430 ++ /*
4431 ++ * Reset hypercall page reference before reset the page,
4432 ++ * let hypercall operations fail safely rather than
4433 ++ * panic the kernel for using invalid hypercall page
4434 ++ */
4435 ++ hv_hypercall_pg = NULL;
4436 ++
4437 + /* Reset the hypercall page */
4438 + hypercall_msr.as_uint64 = 0;
4439 + wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
4440 +diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
4441 +index ad7b210aa3f6..8e790ec219a5 100644
4442 +--- a/arch/x86/include/asm/bitops.h
4443 ++++ b/arch/x86/include/asm/bitops.h
4444 +@@ -36,22 +36,17 @@
4445 + * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
4446 + */
4447 +
4448 +-#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
4449 +-/* Technically wrong, but this avoids compilation errors on some gcc
4450 +- versions. */
4451 +-#define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
4452 +-#else
4453 +-#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
4454 +-#endif
4455 ++#define RLONG_ADDR(x) "m" (*(volatile long *) (x))
4456 ++#define WBYTE_ADDR(x) "+m" (*(volatile char *) (x))
4457 +
4458 +-#define ADDR BITOP_ADDR(addr)
4459 ++#define ADDR RLONG_ADDR(addr)
4460 +
4461 + /*
4462 + * We do the locked ops that don't return the old value as
4463 + * a mask operation on a byte.
4464 + */
4465 + #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
4466 +-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
4467 ++#define CONST_MASK_ADDR(nr, addr) WBYTE_ADDR((void *)(addr) + ((nr)>>3))
4468 + #define CONST_MASK(nr) (1 << ((nr) & 7))
4469 +
4470 + /**
4471 +@@ -79,7 +74,7 @@ set_bit(long nr, volatile unsigned long *addr)
4472 + : "memory");
4473 + } else {
4474 + asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
4475 +- : BITOP_ADDR(addr) : "Ir" (nr) : "memory");
4476 ++ : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
4477 + }
4478 + }
4479 +
4480 +@@ -94,7 +89,7 @@ set_bit(long nr, volatile unsigned long *addr)
4481 + */
4482 + static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
4483 + {
4484 +- asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory");
4485 ++ asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
4486 + }
4487 +
4488 + /**
4489 +@@ -116,8 +111,7 @@ clear_bit(long nr, volatile unsigned long *addr)
4490 + : "iq" ((u8)~CONST_MASK(nr)));
4491 + } else {
4492 + asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
4493 +- : BITOP_ADDR(addr)
4494 +- : "Ir" (nr));
4495 ++ : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
4496 + }
4497 + }
4498 +
4499 +@@ -137,7 +131,7 @@ static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *ad
4500 +
4501 + static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
4502 + {
4503 +- asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr));
4504 ++ asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
4505 + }
4506 +
4507 + static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
4508 +@@ -145,7 +139,7 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile
4509 + bool negative;
4510 + asm volatile(LOCK_PREFIX "andb %2,%1"
4511 + CC_SET(s)
4512 +- : CC_OUT(s) (negative), ADDR
4513 ++ : CC_OUT(s) (negative), WBYTE_ADDR(addr)
4514 + : "ir" ((char) ~(1 << nr)) : "memory");
4515 + return negative;
4516 + }
4517 +@@ -161,13 +155,9 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile
4518 + * __clear_bit() is non-atomic and implies release semantics before the memory
4519 + * operation. It can be used for an unlock if no other CPUs can concurrently
4520 + * modify other bits in the word.
4521 +- *
4522 +- * No memory barrier is required here, because x86 cannot reorder stores past
4523 +- * older loads. Same principle as spin_unlock.
4524 + */
4525 + static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
4526 + {
4527 +- barrier();
4528 + __clear_bit(nr, addr);
4529 + }
4530 +
4531 +@@ -182,7 +172,7 @@ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *
4532 + */
4533 + static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
4534 + {
4535 +- asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr));
4536 ++ asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
4537 + }
4538 +
4539 + /**
4540 +@@ -202,8 +192,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
4541 + : "iq" ((u8)CONST_MASK(nr)));
4542 + } else {
4543 + asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
4544 +- : BITOP_ADDR(addr)
4545 +- : "Ir" (nr));
4546 ++ : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
4547 + }
4548 + }
4549 +
4550 +@@ -248,8 +237,8 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
4551 +
4552 + asm(__ASM_SIZE(bts) " %2,%1"
4553 + CC_SET(c)
4554 +- : CC_OUT(c) (oldbit), ADDR
4555 +- : "Ir" (nr));
4556 ++ : CC_OUT(c) (oldbit)
4557 ++ : ADDR, "Ir" (nr) : "memory");
4558 + return oldbit;
4559 + }
4560 +
4561 +@@ -288,8 +277,8 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long
4562 +
4563 + asm volatile(__ASM_SIZE(btr) " %2,%1"
4564 + CC_SET(c)
4565 +- : CC_OUT(c) (oldbit), ADDR
4566 +- : "Ir" (nr));
4567 ++ : CC_OUT(c) (oldbit)
4568 ++ : ADDR, "Ir" (nr) : "memory");
4569 + return oldbit;
4570 + }
4571 +
4572 +@@ -300,8 +289,8 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon
4573 +
4574 + asm volatile(__ASM_SIZE(btc) " %2,%1"
4575 + CC_SET(c)
4576 +- : CC_OUT(c) (oldbit), ADDR
4577 +- : "Ir" (nr) : "memory");
4578 ++ : CC_OUT(c) (oldbit)
4579 ++ : ADDR, "Ir" (nr) : "memory");
4580 +
4581 + return oldbit;
4582 + }
4583 +@@ -332,7 +321,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l
4584 + asm volatile(__ASM_SIZE(bt) " %2,%1"
4585 + CC_SET(c)
4586 + : CC_OUT(c) (oldbit)
4587 +- : "m" (*(unsigned long *)addr), "Ir" (nr));
4588 ++ : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
4589 +
4590 + return oldbit;
4591 + }
4592 +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
4593 +index 6d6122524711..981ff9479648 100644
4594 +--- a/arch/x86/include/asm/cpufeatures.h
4595 ++++ b/arch/x86/include/asm/cpufeatures.h
4596 +@@ -344,6 +344,7 @@
4597 + /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
4598 + #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
4599 + #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
4600 ++#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
4601 + #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
4602 + #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
4603 + #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
4604 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
4605 +index 180373360e34..71d763ad2637 100644
4606 +--- a/arch/x86/include/asm/kvm_host.h
4607 ++++ b/arch/x86/include/asm/kvm_host.h
4608 +@@ -352,6 +352,7 @@ struct kvm_mmu_page {
4609 + };
4610 +
4611 + struct kvm_pio_request {
4612 ++ unsigned long linear_rip;
4613 + unsigned long count;
4614 + int in;
4615 + int port;
4616 +@@ -570,6 +571,7 @@ struct kvm_vcpu_arch {
4617 + bool tpr_access_reporting;
4618 + u64 ia32_xss;
4619 + u64 microcode_version;
4620 ++ u64 arch_capabilities;
4621 +
4622 + /*
4623 + * Paging state of the vcpu
4624 +@@ -1255,7 +1257,7 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
4625 + struct kvm_memory_slot *slot,
4626 + gfn_t gfn_offset, unsigned long mask);
4627 + void kvm_mmu_zap_all(struct kvm *kvm);
4628 +-void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots);
4629 ++void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
4630 + unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
4631 + void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
4632 +
4633 +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
4634 +index 8e40c2446fd1..ca5bc0eacb95 100644
4635 +--- a/arch/x86/include/asm/msr-index.h
4636 ++++ b/arch/x86/include/asm/msr-index.h
4637 +@@ -666,6 +666,12 @@
4638 +
4639 + #define MSR_IA32_TSC_DEADLINE 0x000006E0
4640 +
4641 ++
4642 ++#define MSR_TSX_FORCE_ABORT 0x0000010F
4643 ++
4644 ++#define MSR_TFA_RTM_FORCE_ABORT_BIT 0
4645 ++#define MSR_TFA_RTM_FORCE_ABORT BIT_ULL(MSR_TFA_RTM_FORCE_ABORT_BIT)
4646 ++
4647 + /* P4/Xeon+ specific */
4648 + #define MSR_IA32_MCG_EAX 0x00000180
4649 + #define MSR_IA32_MCG_EBX 0x00000181
4650 +diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h
4651 +index 55d392c6bd29..2fd165f1cffa 100644
4652 +--- a/arch/x86/include/asm/string_32.h
4653 ++++ b/arch/x86/include/asm/string_32.h
4654 +@@ -179,14 +179,7 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len)
4655 + * No 3D Now!
4656 + */
4657 +
4658 +-#if (__GNUC__ >= 4)
4659 + #define memcpy(t, f, n) __builtin_memcpy(t, f, n)
4660 +-#else
4661 +-#define memcpy(t, f, n) \
4662 +- (__builtin_constant_p((n)) \
4663 +- ? __constant_memcpy((t), (f), (n)) \
4664 +- : __memcpy((t), (f), (n)))
4665 +-#endif
4666 +
4667 + #endif
4668 + #endif /* !CONFIG_FORTIFY_SOURCE */
4669 +@@ -282,12 +275,7 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern,
4670 +
4671 + {
4672 + int d0, d1;
4673 +-#if __GNUC__ == 4 && __GNUC_MINOR__ == 0
4674 +- /* Workaround for broken gcc 4.0 */
4675 +- register unsigned long eax asm("%eax") = pattern;
4676 +-#else
4677 + unsigned long eax = pattern;
4678 +-#endif
4679 +
4680 + switch (count % 4) {
4681 + case 0:
4682 +@@ -321,15 +309,7 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern,
4683 + #define __HAVE_ARCH_MEMSET
4684 + extern void *memset(void *, int, size_t);
4685 + #ifndef CONFIG_FORTIFY_SOURCE
4686 +-#if (__GNUC__ >= 4)
4687 + #define memset(s, c, count) __builtin_memset(s, c, count)
4688 +-#else
4689 +-#define memset(s, c, count) \
4690 +- (__builtin_constant_p(c) \
4691 +- ? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \
4692 +- (count)) \
4693 +- : __memset((s), (c), (count)))
4694 +-#endif
4695 + #endif /* !CONFIG_FORTIFY_SOURCE */
4696 +
4697 + #define __HAVE_ARCH_MEMSET16
4698 +diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
4699 +index 4e4194e21a09..75314c3dbe47 100644
4700 +--- a/arch/x86/include/asm/string_64.h
4701 ++++ b/arch/x86/include/asm/string_64.h
4702 +@@ -14,21 +14,6 @@
4703 + extern void *memcpy(void *to, const void *from, size_t len);
4704 + extern void *__memcpy(void *to, const void *from, size_t len);
4705 +
4706 +-#ifndef CONFIG_FORTIFY_SOURCE
4707 +-#if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
4708 +-#define memcpy(dst, src, len) \
4709 +-({ \
4710 +- size_t __len = (len); \
4711 +- void *__ret; \
4712 +- if (__builtin_constant_p(len) && __len >= 64) \
4713 +- __ret = __memcpy((dst), (src), __len); \
4714 +- else \
4715 +- __ret = __builtin_memcpy((dst), (src), __len); \
4716 +- __ret; \
4717 +-})
4718 +-#endif
4719 +-#endif /* !CONFIG_FORTIFY_SOURCE */
4720 +-
4721 + #define __HAVE_ARCH_MEMSET
4722 + void *memset(void *s, int c, size_t n);
4723 + void *__memset(void *s, int c, size_t n);
4724 +diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
4725 +index c1334aaaa78d..f3aed639dccd 100644
4726 +--- a/arch/x86/include/asm/uaccess.h
4727 ++++ b/arch/x86/include/asm/uaccess.h
4728 +@@ -76,7 +76,7 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
4729 + #endif
4730 +
4731 + /**
4732 +- * access_ok: - Checks if a user space pointer is valid
4733 ++ * access_ok - Checks if a user space pointer is valid
4734 + * @addr: User space pointer to start of block to check
4735 + * @size: Size of block to check
4736 + *
4737 +@@ -85,12 +85,12 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
4738 + *
4739 + * Checks if a pointer to a block of memory in user space is valid.
4740 + *
4741 +- * Returns true (nonzero) if the memory block may be valid, false (zero)
4742 +- * if it is definitely invalid.
4743 +- *
4744 + * Note that, depending on architecture, this function probably just
4745 + * checks that the pointer is in the user space range - after calling
4746 + * this function, memory access functions may still return -EFAULT.
4747 ++ *
4748 ++ * Return: true (nonzero) if the memory block may be valid, false (zero)
4749 ++ * if it is definitely invalid.
4750 + */
4751 + #define access_ok(addr, size) \
4752 + ({ \
4753 +@@ -135,7 +135,7 @@ extern int __get_user_bad(void);
4754 + __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
4755 +
4756 + /**
4757 +- * get_user: - Get a simple variable from user space.
4758 ++ * get_user - Get a simple variable from user space.
4759 + * @x: Variable to store result.
4760 + * @ptr: Source address, in user space.
4761 + *
4762 +@@ -149,7 +149,7 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
4763 + * @ptr must have pointer-to-simple-variable type, and the result of
4764 + * dereferencing @ptr must be assignable to @x without a cast.
4765 + *
4766 +- * Returns zero on success, or -EFAULT on error.
4767 ++ * Return: zero on success, or -EFAULT on error.
4768 + * On error, the variable @x is set to zero.
4769 + */
4770 + /*
4771 +@@ -227,7 +227,7 @@ extern void __put_user_4(void);
4772 + extern void __put_user_8(void);
4773 +
4774 + /**
4775 +- * put_user: - Write a simple value into user space.
4776 ++ * put_user - Write a simple value into user space.
4777 + * @x: Value to copy to user space.
4778 + * @ptr: Destination address, in user space.
4779 + *
4780 +@@ -241,7 +241,7 @@ extern void __put_user_8(void);
4781 + * @ptr must have pointer-to-simple-variable type, and @x must be assignable
4782 + * to the result of dereferencing @ptr.
4783 + *
4784 +- * Returns zero on success, or -EFAULT on error.
4785 ++ * Return: zero on success, or -EFAULT on error.
4786 + */
4787 + #define put_user(x, ptr) \
4788 + ({ \
4789 +@@ -503,7 +503,7 @@ struct __large_struct { unsigned long buf[100]; };
4790 + } while (0)
4791 +
4792 + /**
4793 +- * __get_user: - Get a simple variable from user space, with less checking.
4794 ++ * __get_user - Get a simple variable from user space, with less checking.
4795 + * @x: Variable to store result.
4796 + * @ptr: Source address, in user space.
4797 + *
4798 +@@ -520,7 +520,7 @@ struct __large_struct { unsigned long buf[100]; };
4799 + * Caller must check the pointer with access_ok() before calling this
4800 + * function.
4801 + *
4802 +- * Returns zero on success, or -EFAULT on error.
4803 ++ * Return: zero on success, or -EFAULT on error.
4804 + * On error, the variable @x is set to zero.
4805 + */
4806 +
4807 +@@ -528,7 +528,7 @@ struct __large_struct { unsigned long buf[100]; };
4808 + __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
4809 +
4810 + /**
4811 +- * __put_user: - Write a simple value into user space, with less checking.
4812 ++ * __put_user - Write a simple value into user space, with less checking.
4813 + * @x: Value to copy to user space.
4814 + * @ptr: Destination address, in user space.
4815 + *
4816 +@@ -545,7 +545,7 @@ struct __large_struct { unsigned long buf[100]; };
4817 + * Caller must check the pointer with access_ok() before calling this
4818 + * function.
4819 + *
4820 +- * Returns zero on success, or -EFAULT on error.
4821 ++ * Return: zero on success, or -EFAULT on error.
4822 + */
4823 +
4824 + #define __put_user(x, ptr) \
4825 +diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h
4826 +index 1f86e1b0a5cd..499578f7e6d7 100644
4827 +--- a/arch/x86/include/asm/unwind.h
4828 ++++ b/arch/x86/include/asm/unwind.h
4829 +@@ -23,6 +23,12 @@ struct unwind_state {
4830 + #elif defined(CONFIG_UNWINDER_FRAME_POINTER)
4831 + bool got_irq;
4832 + unsigned long *bp, *orig_sp, ip;
4833 ++ /*
4834 ++ * If non-NULL: The current frame is incomplete and doesn't contain a
4835 ++ * valid BP. When looking for the next frame, use this instead of the
4836 ++ * non-existent saved BP.
4837 ++ */
4838 ++ unsigned long *next_bp;
4839 + struct pt_regs *regs;
4840 + #else
4841 + unsigned long *sp;
4842 +diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
4843 +index ef05bea7010d..6b5c710846f5 100644
4844 +--- a/arch/x86/include/asm/xen/hypercall.h
4845 ++++ b/arch/x86/include/asm/xen/hypercall.h
4846 +@@ -206,6 +206,9 @@ xen_single_call(unsigned int call,
4847 + __HYPERCALL_DECLS;
4848 + __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
4849 +
4850 ++ if (call >= PAGE_SIZE / sizeof(hypercall_page[0]))
4851 ++ return -EINVAL;
4852 ++
4853 + asm volatile(CALL_NOSPEC
4854 + : __HYPERCALL_5PARAM
4855 + : [thunk_target] "a" (&hypercall_page[call])
4856 +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
4857 +index 69f6bbb41be0..01004bfb1a1b 100644
4858 +--- a/arch/x86/kernel/cpu/amd.c
4859 ++++ b/arch/x86/kernel/cpu/amd.c
4860 +@@ -819,11 +819,9 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
4861 + static void init_amd_zn(struct cpuinfo_x86 *c)
4862 + {
4863 + set_cpu_cap(c, X86_FEATURE_ZEN);
4864 +- /*
4865 +- * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
4866 +- * all up to and including B1.
4867 +- */
4868 +- if (c->x86_model <= 1 && c->x86_stepping <= 1)
4869 ++
4870 ++ /* Fix erratum 1076: CPB feature bit not being set in CPUID. */
4871 ++ if (!cpu_has(c, X86_FEATURE_CPB))
4872 + set_cpu_cap(c, X86_FEATURE_CPB);
4873 + }
4874 +
4875 +diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
4876 +index 8257a59704ae..763d4264d16a 100644
4877 +--- a/arch/x86/kernel/ftrace.c
4878 ++++ b/arch/x86/kernel/ftrace.c
4879 +@@ -49,7 +49,7 @@ int ftrace_arch_code_modify_post_process(void)
4880 + union ftrace_code_union {
4881 + char code[MCOUNT_INSN_SIZE];
4882 + struct {
4883 +- unsigned char e8;
4884 ++ unsigned char op;
4885 + int offset;
4886 + } __attribute__((packed));
4887 + };
4888 +@@ -59,20 +59,23 @@ static int ftrace_calc_offset(long ip, long addr)
4889 + return (int)(addr - ip);
4890 + }
4891 +
4892 +-static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
4893 ++static unsigned char *
4894 ++ftrace_text_replace(unsigned char op, unsigned long ip, unsigned long addr)
4895 + {
4896 + static union ftrace_code_union calc;
4897 +
4898 +- calc.e8 = 0xe8;
4899 ++ calc.op = op;
4900 + calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
4901 +
4902 +- /*
4903 +- * No locking needed, this must be called via kstop_machine
4904 +- * which in essence is like running on a uniprocessor machine.
4905 +- */
4906 + return calc.code;
4907 + }
4908 +
4909 ++static unsigned char *
4910 ++ftrace_call_replace(unsigned long ip, unsigned long addr)
4911 ++{
4912 ++ return ftrace_text_replace(0xe8, ip, addr);
4913 ++}
4914 ++
4915 + static inline int
4916 + within(unsigned long addr, unsigned long start, unsigned long end)
4917 + {
4918 +@@ -664,22 +667,6 @@ int __init ftrace_dyn_arch_init(void)
4919 + return 0;
4920 + }
4921 +
4922 +-#if defined(CONFIG_X86_64) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
4923 +-static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
4924 +-{
4925 +- static union ftrace_code_union calc;
4926 +-
4927 +- /* Jmp not a call (ignore the .e8) */
4928 +- calc.e8 = 0xe9;
4929 +- calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
4930 +-
4931 +- /*
4932 +- * ftrace external locks synchronize the access to the static variable.
4933 +- */
4934 +- return calc.code;
4935 +-}
4936 +-#endif
4937 +-
4938 + /* Currently only x86_64 supports dynamic trampolines */
4939 + #ifdef CONFIG_X86_64
4940 +
4941 +@@ -891,8 +878,8 @@ static void *addr_from_call(void *ptr)
4942 + return NULL;
4943 +
4944 + /* Make sure this is a call */
4945 +- if (WARN_ON_ONCE(calc.e8 != 0xe8)) {
4946 +- pr_warn("Expected e8, got %x\n", calc.e8);
4947 ++ if (WARN_ON_ONCE(calc.op != 0xe8)) {
4948 ++ pr_warn("Expected e8, got %x\n", calc.op);
4949 + return NULL;
4950 + }
4951 +
4952 +@@ -963,6 +950,11 @@ void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
4953 + #ifdef CONFIG_DYNAMIC_FTRACE
4954 + extern void ftrace_graph_call(void);
4955 +
4956 ++static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
4957 ++{
4958 ++ return ftrace_text_replace(0xe9, ip, addr);
4959 ++}
4960 ++
4961 + static int ftrace_mod_jmp(unsigned long ip, void *func)
4962 + {
4963 + unsigned char *new;
4964 +diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
4965 +index 53917a3ebf94..1f3b77367948 100644
4966 +--- a/arch/x86/kernel/kexec-bzimage64.c
4967 ++++ b/arch/x86/kernel/kexec-bzimage64.c
4968 +@@ -218,6 +218,9 @@ setup_boot_parameters(struct kimage *image, struct boot_params *params,
4969 + params->screen_info.ext_mem_k = 0;
4970 + params->alt_mem_k = 0;
4971 +
4972 ++ /* Always fill in RSDP: it is either 0 or a valid value */
4973 ++ params->acpi_rsdp_addr = boot_params.acpi_rsdp_addr;
4974 ++
4975 + /* Default APM info */
4976 + memset(&params->apm_bios_info, 0, sizeof(params->apm_bios_info));
4977 +
4978 +@@ -256,7 +259,6 @@ setup_boot_parameters(struct kimage *image, struct boot_params *params,
4979 + setup_efi_state(params, params_load_addr, efi_map_offset, efi_map_sz,
4980 + efi_setup_data_offset);
4981 + #endif
4982 +-
4983 + /* Setup EDD info */
4984 + memcpy(params->eddbuf, boot_params.eddbuf,
4985 + EDDMAXNR * sizeof(struct edd_info));
4986 +diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
4987 +index 6adf6e6c2933..544bd41a514c 100644
4988 +--- a/arch/x86/kernel/kprobes/opt.c
4989 ++++ b/arch/x86/kernel/kprobes/opt.c
4990 +@@ -141,6 +141,11 @@ asm (
4991 +
4992 + void optprobe_template_func(void);
4993 + STACK_FRAME_NON_STANDARD(optprobe_template_func);
4994 ++NOKPROBE_SYMBOL(optprobe_template_func);
4995 ++NOKPROBE_SYMBOL(optprobe_template_entry);
4996 ++NOKPROBE_SYMBOL(optprobe_template_val);
4997 ++NOKPROBE_SYMBOL(optprobe_template_call);
4998 ++NOKPROBE_SYMBOL(optprobe_template_end);
4999 +
5000 + #define TMPL_MOVE_IDX \
5001 + ((long)optprobe_template_val - (long)optprobe_template_entry)
5002 +diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
5003 +index e811d4d1c824..d908a37bf3f3 100644
5004 +--- a/arch/x86/kernel/kvmclock.c
5005 ++++ b/arch/x86/kernel/kvmclock.c
5006 +@@ -104,12 +104,8 @@ static u64 kvm_sched_clock_read(void)
5007 +
5008 + static inline void kvm_sched_clock_init(bool stable)
5009 + {
5010 +- if (!stable) {
5011 +- pv_ops.time.sched_clock = kvm_clock_read;
5012 ++ if (!stable)
5013 + clear_sched_clock_stable();
5014 +- return;
5015 +- }
5016 +-
5017 + kvm_sched_clock_offset = kvm_clock_read();
5018 + pv_ops.time.sched_clock = kvm_sched_clock_read;
5019 +
5020 +diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
5021 +index 3dc26f95d46e..9b9fd4826e7a 100644
5022 +--- a/arch/x86/kernel/unwind_frame.c
5023 ++++ b/arch/x86/kernel/unwind_frame.c
5024 +@@ -320,10 +320,14 @@ bool unwind_next_frame(struct unwind_state *state)
5025 + }
5026 +
5027 + /* Get the next frame pointer: */
5028 +- if (state->regs)
5029 ++ if (state->next_bp) {
5030 ++ next_bp = state->next_bp;
5031 ++ state->next_bp = NULL;
5032 ++ } else if (state->regs) {
5033 + next_bp = (unsigned long *)state->regs->bp;
5034 +- else
5035 ++ } else {
5036 + next_bp = (unsigned long *)READ_ONCE_TASK_STACK(state->task, *state->bp);
5037 ++ }
5038 +
5039 + /* Move to the next frame if it's safe: */
5040 + if (!update_stack_state(state, next_bp))
5041 +@@ -398,6 +402,21 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
5042 +
5043 + bp = get_frame_pointer(task, regs);
5044 +
5045 ++ /*
5046 ++ * If we crash with IP==0, the last successfully executed instruction
5047 ++ * was probably an indirect function call with a NULL function pointer.
5048 ++ * That means that SP points into the middle of an incomplete frame:
5049 ++ * *SP is a return pointer, and *(SP-sizeof(unsigned long)) is where we
5050 ++ * would have written a frame pointer if we hadn't crashed.
5051 ++ * Pretend that the frame is complete and that BP points to it, but save
5052 ++ * the real BP so that we can use it when looking for the next frame.
5053 ++ */
5054 ++ if (regs && regs->ip == 0 &&
5055 ++ (unsigned long *)kernel_stack_pointer(regs) >= first_frame) {
5056 ++ state->next_bp = bp;
5057 ++ bp = ((unsigned long *)kernel_stack_pointer(regs)) - 1;
5058 ++ }
5059 ++
5060 + /* Initialize stack info and make sure the frame data is accessible: */
5061 + get_stack_info(bp, state->task, &state->stack_info,
5062 + &state->stack_mask);
5063 +@@ -410,7 +429,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
5064 + */
5065 + while (!unwind_done(state) &&
5066 + (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
5067 +- state->bp < first_frame))
5068 ++ (state->next_bp == NULL && state->bp < first_frame)))
5069 + unwind_next_frame(state);
5070 + }
5071 + EXPORT_SYMBOL_GPL(__unwind_start);
5072 +diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
5073 +index 26038eacf74a..89be1be1790c 100644
5074 +--- a/arch/x86/kernel/unwind_orc.c
5075 ++++ b/arch/x86/kernel/unwind_orc.c
5076 +@@ -113,6 +113,20 @@ static struct orc_entry *orc_ftrace_find(unsigned long ip)
5077 + }
5078 + #endif
5079 +
5080 ++/*
5081 ++ * If we crash with IP==0, the last successfully executed instruction
5082 ++ * was probably an indirect function call with a NULL function pointer,
5083 ++ * and we don't have unwind information for NULL.
5084 ++ * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function
5085 ++ * pointer into its parent and then continue normally from there.
5086 ++ */
5087 ++static struct orc_entry null_orc_entry = {
5088 ++ .sp_offset = sizeof(long),
5089 ++ .sp_reg = ORC_REG_SP,
5090 ++ .bp_reg = ORC_REG_UNDEFINED,
5091 ++ .type = ORC_TYPE_CALL
5092 ++};
5093 ++
5094 + static struct orc_entry *orc_find(unsigned long ip)
5095 + {
5096 + static struct orc_entry *orc;
5097 +@@ -120,6 +134,9 @@ static struct orc_entry *orc_find(unsigned long ip)
5098 + if (!orc_init)
5099 + return NULL;
5100 +
5101 ++ if (ip == 0)
5102 ++ return &null_orc_entry;
5103 ++
5104 + /* For non-init vmlinux addresses, use the fast lookup table: */
5105 + if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) {
5106 + unsigned int idx, start, stop;
5107 +diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
5108 +index 0d618ee634ac..ee3b5c7d662e 100644
5109 +--- a/arch/x86/kernel/vmlinux.lds.S
5110 ++++ b/arch/x86/kernel/vmlinux.lds.S
5111 +@@ -401,7 +401,7 @@ SECTIONS
5112 + * Per-cpu symbols which need to be offset from __per_cpu_load
5113 + * for the boot processor.
5114 + */
5115 +-#define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
5116 ++#define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load
5117 + INIT_PER_CPU(gdt_page);
5118 + INIT_PER_CPU(irq_stack_union);
5119 +
5120 +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
5121 +index f2d1d230d5b8..9ab33cab9486 100644
5122 +--- a/arch/x86/kvm/mmu.c
5123 ++++ b/arch/x86/kvm/mmu.c
5124 +@@ -5635,13 +5635,8 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
5125 + {
5126 + struct kvm_memslots *slots;
5127 + struct kvm_memory_slot *memslot;
5128 +- bool flush_tlb = true;
5129 +- bool flush = false;
5130 + int i;
5131 +
5132 +- if (kvm_available_flush_tlb_with_range())
5133 +- flush_tlb = false;
5134 +-
5135 + spin_lock(&kvm->mmu_lock);
5136 + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
5137 + slots = __kvm_memslots(kvm, i);
5138 +@@ -5653,17 +5648,12 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
5139 + if (start >= end)
5140 + continue;
5141 +
5142 +- flush |= slot_handle_level_range(kvm, memslot,
5143 +- kvm_zap_rmapp, PT_PAGE_TABLE_LEVEL,
5144 +- PT_MAX_HUGEPAGE_LEVEL, start,
5145 +- end - 1, flush_tlb);
5146 ++ slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
5147 ++ PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL,
5148 ++ start, end - 1, true);
5149 + }
5150 + }
5151 +
5152 +- if (flush)
5153 +- kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
5154 +- gfn_end - gfn_start + 1);
5155 +-
5156 + spin_unlock(&kvm->mmu_lock);
5157 + }
5158 +
5159 +@@ -5901,13 +5891,30 @@ static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
5160 + return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
5161 + }
5162 +
5163 +-void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots)
5164 ++void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
5165 + {
5166 ++ gen &= MMIO_GEN_MASK;
5167 ++
5168 ++ /*
5169 ++ * Shift to eliminate the "update in-progress" flag, which isn't
5170 ++ * included in the spte's generation number.
5171 ++ */
5172 ++ gen >>= 1;
5173 ++
5174 ++ /*
5175 ++ * Generation numbers are incremented in multiples of the number of
5176 ++ * address spaces in order to provide unique generations across all
5177 ++ * address spaces. Strip what is effectively the address space
5178 ++ * modifier prior to checking for a wrap of the MMIO generation so
5179 ++ * that a wrap in any address space is detected.
5180 ++ */
5181 ++ gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1);
5182 ++
5183 + /*
5184 +- * The very rare case: if the generation-number is round,
5185 ++ * The very rare case: if the MMIO generation number has wrapped,
5186 + * zap all shadow pages.
5187 + */
5188 +- if (unlikely((slots->generation & MMIO_GEN_MASK) == 0)) {
5189 ++ if (unlikely(gen == 0)) {
5190 + kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
5191 + kvm_mmu_invalidate_zap_all_pages(kvm);
5192 + }
5193 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
5194 +index f13a3a24d360..a9b8e38d78ad 100644
5195 +--- a/arch/x86/kvm/svm.c
5196 ++++ b/arch/x86/kvm/svm.c
5197 +@@ -6422,11 +6422,11 @@ e_free:
5198 + return ret;
5199 + }
5200 +
5201 +-static int get_num_contig_pages(int idx, struct page **inpages,
5202 +- unsigned long npages)
5203 ++static unsigned long get_num_contig_pages(unsigned long idx,
5204 ++ struct page **inpages, unsigned long npages)
5205 + {
5206 + unsigned long paddr, next_paddr;
5207 +- int i = idx + 1, pages = 1;
5208 ++ unsigned long i = idx + 1, pages = 1;
5209 +
5210 + /* find the number of contiguous pages starting from idx */
5211 + paddr = __sme_page_pa(inpages[idx]);
5212 +@@ -6445,12 +6445,12 @@ static int get_num_contig_pages(int idx, struct page **inpages,
5213 +
5214 + static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
5215 + {
5216 +- unsigned long vaddr, vaddr_end, next_vaddr, npages, size;
5217 ++ unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
5218 + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
5219 + struct kvm_sev_launch_update_data params;
5220 + struct sev_data_launch_update_data *data;
5221 + struct page **inpages;
5222 +- int i, ret, pages;
5223 ++ int ret;
5224 +
5225 + if (!sev_guest(kvm))
5226 + return -ENOTTY;
5227 +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
5228 +index d737a51a53ca..f90b3a948291 100644
5229 +--- a/arch/x86/kvm/vmx/nested.c
5230 ++++ b/arch/x86/kvm/vmx/nested.c
5231 +@@ -500,6 +500,17 @@ static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
5232 + }
5233 + }
5234 +
5235 ++static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) {
5236 ++ int msr;
5237 ++
5238 ++ for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
5239 ++ unsigned word = msr / BITS_PER_LONG;
5240 ++
5241 ++ msr_bitmap[word] = ~0;
5242 ++ msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
5243 ++ }
5244 ++}
5245 ++
5246 + /*
5247 + * Merge L0's and L1's MSR bitmap, return false to indicate that
5248 + * we do not use the hardware.
5249 +@@ -541,39 +552,44 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
5250 + return false;
5251 +
5252 + msr_bitmap_l1 = (unsigned long *)kmap(page);
5253 +- if (nested_cpu_has_apic_reg_virt(vmcs12)) {
5254 +- /*
5255 +- * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it
5256 +- * just lets the processor take the value from the virtual-APIC page;
5257 +- * take those 256 bits directly from the L1 bitmap.
5258 +- */
5259 +- for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
5260 +- unsigned word = msr / BITS_PER_LONG;
5261 +- msr_bitmap_l0[word] = msr_bitmap_l1[word];
5262 +- msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
5263 +- }
5264 +- } else {
5265 +- for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
5266 +- unsigned word = msr / BITS_PER_LONG;
5267 +- msr_bitmap_l0[word] = ~0;
5268 +- msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
5269 +- }
5270 +- }
5271 +
5272 +- nested_vmx_disable_intercept_for_msr(
5273 +- msr_bitmap_l1, msr_bitmap_l0,
5274 +- X2APIC_MSR(APIC_TASKPRI),
5275 +- MSR_TYPE_W);
5276 ++ /*
5277 ++ * To keep the control flow simple, pay eight 8-byte writes (sixteen
5278 ++ * 4-byte writes on 32-bit systems) up front to enable intercepts for
5279 ++ * the x2APIC MSR range and selectively disable them below.
5280 ++ */
5281 ++ enable_x2apic_msr_intercepts(msr_bitmap_l0);
5282 ++
5283 ++ if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
5284 ++ if (nested_cpu_has_apic_reg_virt(vmcs12)) {
5285 ++ /*
5286 ++ * L0 need not intercept reads for MSRs between 0x800
5287 ++ * and 0x8ff, it just lets the processor take the value
5288 ++ * from the virtual-APIC page; take those 256 bits
5289 ++ * directly from the L1 bitmap.
5290 ++ */
5291 ++ for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
5292 ++ unsigned word = msr / BITS_PER_LONG;
5293 ++
5294 ++ msr_bitmap_l0[word] = msr_bitmap_l1[word];
5295 ++ }
5296 ++ }
5297 +
5298 +- if (nested_cpu_has_vid(vmcs12)) {
5299 +- nested_vmx_disable_intercept_for_msr(
5300 +- msr_bitmap_l1, msr_bitmap_l0,
5301 +- X2APIC_MSR(APIC_EOI),
5302 +- MSR_TYPE_W);
5303 + nested_vmx_disable_intercept_for_msr(
5304 + msr_bitmap_l1, msr_bitmap_l0,
5305 +- X2APIC_MSR(APIC_SELF_IPI),
5306 +- MSR_TYPE_W);
5307 ++ X2APIC_MSR(APIC_TASKPRI),
5308 ++ MSR_TYPE_R | MSR_TYPE_W);
5309 ++
5310 ++ if (nested_cpu_has_vid(vmcs12)) {
5311 ++ nested_vmx_disable_intercept_for_msr(
5312 ++ msr_bitmap_l1, msr_bitmap_l0,
5313 ++ X2APIC_MSR(APIC_EOI),
5314 ++ MSR_TYPE_W);
5315 ++ nested_vmx_disable_intercept_for_msr(
5316 ++ msr_bitmap_l1, msr_bitmap_l0,
5317 ++ X2APIC_MSR(APIC_SELF_IPI),
5318 ++ MSR_TYPE_W);
5319 ++ }
5320 + }
5321 +
5322 + if (spec_ctrl)
5323 +@@ -2765,7 +2781,7 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
5324 + "add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */
5325 +
5326 + /* Check if vmlaunch or vmresume is needed */
5327 +- "cmpl $0, %c[launched](%% " _ASM_CX")\n\t"
5328 ++ "cmpb $0, %c[launched](%% " _ASM_CX")\n\t"
5329 +
5330 + "call vmx_vmenter\n\t"
5331 +
5332 +@@ -4035,25 +4051,50 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
5333 + /* Addr = segment_base + offset */
5334 + /* offset = base + [index * scale] + displacement */
5335 + off = exit_qualification; /* holds the displacement */
5336 ++ if (addr_size == 1)
5337 ++ off = (gva_t)sign_extend64(off, 31);
5338 ++ else if (addr_size == 0)
5339 ++ off = (gva_t)sign_extend64(off, 15);
5340 + if (base_is_valid)
5341 + off += kvm_register_read(vcpu, base_reg);
5342 + if (index_is_valid)
5343 + off += kvm_register_read(vcpu, index_reg)<<scaling;
5344 + vmx_get_segment(vcpu, &s, seg_reg);
5345 +- *ret = s.base + off;
5346 +
5347 ++ /*
5348 ++ * The effective address, i.e. @off, of a memory operand is truncated
5349 ++ * based on the address size of the instruction. Note that this is
5350 ++ * the *effective address*, i.e. the address prior to accounting for
5351 ++ * the segment's base.
5352 ++ */
5353 + if (addr_size == 1) /* 32 bit */
5354 +- *ret &= 0xffffffff;
5355 ++ off &= 0xffffffff;
5356 ++ else if (addr_size == 0) /* 16 bit */
5357 ++ off &= 0xffff;
5358 +
5359 + /* Checks for #GP/#SS exceptions. */
5360 + exn = false;
5361 + if (is_long_mode(vcpu)) {
5362 ++ /*
5363 ++ * The virtual/linear address is never truncated in 64-bit
5364 ++ * mode, e.g. a 32-bit address size can yield a 64-bit virtual
5365 ++ * address when using FS/GS with a non-zero base.
5366 ++ */
5367 ++ *ret = s.base + off;
5368 ++
5369 + /* Long mode: #GP(0)/#SS(0) if the memory address is in a
5370 + * non-canonical form. This is the only check on the memory
5371 + * destination for long mode!
5372 + */
5373 + exn = is_noncanonical_address(*ret, vcpu);
5374 + } else if (is_protmode(vcpu)) {
5375 ++ /*
5376 ++ * When not in long mode, the virtual/linear address is
5377 ++ * unconditionally truncated to 32 bits regardless of the
5378 ++ * address size.
5379 ++ */
5380 ++ *ret = (s.base + off) & 0xffffffff;
5381 ++
5382 + /* Protected mode: apply checks for segment validity in the
5383 + * following order:
5384 + * - segment type check (#GP(0) may be thrown)
5385 +@@ -4077,10 +4118,16 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
5386 + /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
5387 + */
5388 + exn = (s.unusable != 0);
5389 +- /* Protected mode: #GP(0)/#SS(0) if the memory
5390 +- * operand is outside the segment limit.
5391 ++
5392 ++ /*
5393 ++ * Protected mode: #GP(0)/#SS(0) if the memory operand is
5394 ++ * outside the segment limit. All CPUs that support VMX ignore
5395 ++ * limit checks for flat segments, i.e. segments with base==0,
5396 ++ * limit==0xffffffff and of type expand-up data or code.
5397 + */
5398 +- exn = exn || (off + sizeof(u64) > s.limit);
5399 ++ if (!(s.base == 0 && s.limit == 0xffffffff &&
5400 ++ ((s.type & 8) || !(s.type & 4))))
5401 ++ exn = exn || (off + sizeof(u64) > s.limit);
5402 + }
5403 + if (exn) {
5404 + kvm_queue_exception_e(vcpu,
5405 +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
5406 +index 30a6bcd735ec..a0a770816429 100644
5407 +--- a/arch/x86/kvm/vmx/vmx.c
5408 ++++ b/arch/x86/kvm/vmx/vmx.c
5409 +@@ -1679,12 +1679,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
5410 +
5411 + msr_info->data = to_vmx(vcpu)->spec_ctrl;
5412 + break;
5413 +- case MSR_IA32_ARCH_CAPABILITIES:
5414 +- if (!msr_info->host_initiated &&
5415 +- !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
5416 +- return 1;
5417 +- msr_info->data = to_vmx(vcpu)->arch_capabilities;
5418 +- break;
5419 + case MSR_IA32_SYSENTER_CS:
5420 + msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
5421 + break;
5422 +@@ -1891,11 +1885,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
5423 + vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD,
5424 + MSR_TYPE_W);
5425 + break;
5426 +- case MSR_IA32_ARCH_CAPABILITIES:
5427 +- if (!msr_info->host_initiated)
5428 +- return 1;
5429 +- vmx->arch_capabilities = data;
5430 +- break;
5431 + case MSR_IA32_CR_PAT:
5432 + if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
5433 + if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
5434 +@@ -4083,8 +4072,6 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
5435 + ++vmx->nmsrs;
5436 + }
5437 +
5438 +- vmx->arch_capabilities = kvm_get_arch_capabilities();
5439 +-
5440 + vm_exit_controls_init(vmx, vmx_vmexit_ctrl());
5441 +
5442 + /* 22.2.1, 20.8.1 */
5443 +@@ -6399,7 +6386,7 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
5444 + "mov %%" _ASM_AX", %%cr2 \n\t"
5445 + "3: \n\t"
5446 + /* Check if vmlaunch or vmresume is needed */
5447 +- "cmpl $0, %c[launched](%%" _ASM_CX ") \n\t"
5448 ++ "cmpb $0, %c[launched](%%" _ASM_CX ") \n\t"
5449 + /* Load guest registers. Don't clobber flags. */
5450 + "mov %c[rax](%%" _ASM_CX "), %%" _ASM_AX " \n\t"
5451 + "mov %c[rbx](%%" _ASM_CX "), %%" _ASM_BX " \n\t"
5452 +@@ -6449,10 +6436,15 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
5453 + "mov %%r13, %c[r13](%%" _ASM_CX ") \n\t"
5454 + "mov %%r14, %c[r14](%%" _ASM_CX ") \n\t"
5455 + "mov %%r15, %c[r15](%%" _ASM_CX ") \n\t"
5456 ++
5457 + /*
5458 +- * Clear host registers marked as clobbered to prevent
5459 +- * speculative use.
5460 +- */
5461 ++ * Clear all general purpose registers (except RSP, which is loaded by
5462 ++ * the CPU during VM-Exit) to prevent speculative use of the guest's
5463 ++ * values, even those that are saved/loaded via the stack. In theory,
5464 ++ * an L1 cache miss when restoring registers could lead to speculative
5465 ++ * execution with the guest's values. Zeroing XORs are dirt cheap,
5466 ++ * i.e. the extra paranoia is essentially free.
5467 ++ */
5468 + "xor %%r8d, %%r8d \n\t"
5469 + "xor %%r9d, %%r9d \n\t"
5470 + "xor %%r10d, %%r10d \n\t"
5471 +@@ -6467,8 +6459,11 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
5472 +
5473 + "xor %%eax, %%eax \n\t"
5474 + "xor %%ebx, %%ebx \n\t"
5475 ++ "xor %%ecx, %%ecx \n\t"
5476 ++ "xor %%edx, %%edx \n\t"
5477 + "xor %%esi, %%esi \n\t"
5478 + "xor %%edi, %%edi \n\t"
5479 ++ "xor %%ebp, %%ebp \n\t"
5480 + "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t"
5481 + : ASM_CALL_CONSTRAINT
5482 + : "c"(vmx), "d"((unsigned long)HOST_RSP), "S"(evmcs_rsp),
5483 +diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
5484 +index 0ac0a64c7790..1abae731c3e4 100644
5485 +--- a/arch/x86/kvm/vmx/vmx.h
5486 ++++ b/arch/x86/kvm/vmx/vmx.h
5487 +@@ -191,7 +191,6 @@ struct vcpu_vmx {
5488 + u64 msr_guest_kernel_gs_base;
5489 + #endif
5490 +
5491 +- u64 arch_capabilities;
5492 + u64 spec_ctrl;
5493 +
5494 + u32 vm_entry_controls_shadow;
5495 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
5496 +index 941f932373d0..7ee802a92bc8 100644
5497 +--- a/arch/x86/kvm/x86.c
5498 ++++ b/arch/x86/kvm/x86.c
5499 +@@ -2443,6 +2443,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
5500 + if (msr_info->host_initiated)
5501 + vcpu->arch.microcode_version = data;
5502 + break;
5503 ++ case MSR_IA32_ARCH_CAPABILITIES:
5504 ++ if (!msr_info->host_initiated)
5505 ++ return 1;
5506 ++ vcpu->arch.arch_capabilities = data;
5507 ++ break;
5508 + case MSR_EFER:
5509 + return set_efer(vcpu, data);
5510 + case MSR_K7_HWCR:
5511 +@@ -2747,6 +2752,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
5512 + case MSR_IA32_UCODE_REV:
5513 + msr_info->data = vcpu->arch.microcode_version;
5514 + break;
5515 ++ case MSR_IA32_ARCH_CAPABILITIES:
5516 ++ if (!msr_info->host_initiated &&
5517 ++ !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
5518 ++ return 1;
5519 ++ msr_info->data = vcpu->arch.arch_capabilities;
5520 ++ break;
5521 + case MSR_IA32_TSC:
5522 + msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset;
5523 + break;
5524 +@@ -6522,14 +6533,27 @@ int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
5525 + }
5526 + EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer);
5527 +
5528 ++static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
5529 ++{
5530 ++ vcpu->arch.pio.count = 0;
5531 ++
5532 ++ if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip)))
5533 ++ return 1;
5534 ++
5535 ++ return kvm_skip_emulated_instruction(vcpu);
5536 ++}
5537 ++
5538 + static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
5539 + unsigned short port)
5540 + {
5541 + unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
5542 + int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
5543 + size, port, &val, 1);
5544 +- /* do not return to emulator after return from userspace */
5545 +- vcpu->arch.pio.count = 0;
5546 ++
5547 ++ if (!ret) {
5548 ++ vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
5549 ++ vcpu->arch.complete_userspace_io = complete_fast_pio_out;
5550 ++ }
5551 + return ret;
5552 + }
5553 +
5554 +@@ -6540,6 +6564,11 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
5555 + /* We should only ever be called with arch.pio.count equal to 1 */
5556 + BUG_ON(vcpu->arch.pio.count != 1);
5557 +
5558 ++ if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) {
5559 ++ vcpu->arch.pio.count = 0;
5560 ++ return 1;
5561 ++ }
5562 ++
5563 + /* For size less than 4 we merge, else we zero extend */
5564 + val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX)
5565 + : 0;
5566 +@@ -6552,7 +6581,7 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
5567 + vcpu->arch.pio.port, &val, 1);
5568 + kvm_register_write(vcpu, VCPU_REGS_RAX, val);
5569 +
5570 +- return 1;
5571 ++ return kvm_skip_emulated_instruction(vcpu);
5572 + }
5573 +
5574 + static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
5575 +@@ -6571,6 +6600,7 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
5576 + return ret;
5577 + }
5578 +
5579 ++ vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
5580 + vcpu->arch.complete_userspace_io = complete_fast_pio_in;
5581 +
5582 + return 0;
5583 +@@ -6578,16 +6608,13 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
5584 +
5585 + int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in)
5586 + {
5587 +- int ret = kvm_skip_emulated_instruction(vcpu);
5588 ++ int ret;
5589 +
5590 +- /*
5591 +- * TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered
5592 +- * KVM_EXIT_DEBUG here.
5593 +- */
5594 + if (in)
5595 +- return kvm_fast_pio_in(vcpu, size, port) && ret;
5596 ++ ret = kvm_fast_pio_in(vcpu, size, port);
5597 + else
5598 +- return kvm_fast_pio_out(vcpu, size, port) && ret;
5599 ++ ret = kvm_fast_pio_out(vcpu, size, port);
5600 ++ return ret && kvm_skip_emulated_instruction(vcpu);
5601 + }
5602 + EXPORT_SYMBOL_GPL(kvm_fast_pio);
5603 +
5604 +@@ -8725,6 +8752,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
5605 +
5606 + int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
5607 + {
5608 ++ vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
5609 + vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
5610 + kvm_vcpu_mtrr_init(vcpu);
5611 + vcpu_load(vcpu);
5612 +@@ -9348,13 +9376,13 @@ out_free:
5613 + return -ENOMEM;
5614 + }
5615 +
5616 +-void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
5617 ++void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
5618 + {
5619 + /*
5620 + * memslots->generation has been incremented.
5621 + * mmio generation may have reached its maximum value.
5622 + */
5623 +- kvm_mmu_invalidate_mmio_sptes(kvm, slots);
5624 ++ kvm_mmu_invalidate_mmio_sptes(kvm, gen);
5625 + }
5626 +
5627 + int kvm_arch_prepare_memory_region(struct kvm *kvm,
5628 +diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
5629 +index 224cd0a47568..20ede17202bf 100644
5630 +--- a/arch/x86/kvm/x86.h
5631 ++++ b/arch/x86/kvm/x86.h
5632 +@@ -181,6 +181,11 @@ static inline bool emul_is_noncanonical_address(u64 la,
5633 + static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
5634 + gva_t gva, gfn_t gfn, unsigned access)
5635 + {
5636 ++ u64 gen = kvm_memslots(vcpu->kvm)->generation;
5637 ++
5638 ++ if (unlikely(gen & 1))
5639 ++ return;
5640 ++
5641 + /*
5642 + * If this is a shadow nested page table, the "GVA" is
5643 + * actually a nGPA.
5644 +@@ -188,7 +193,7 @@ static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
5645 + vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
5646 + vcpu->arch.access = access;
5647 + vcpu->arch.mmio_gfn = gfn;
5648 +- vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation;
5649 ++ vcpu->arch.mmio_gen = gen;
5650 + }
5651 +
5652 + static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
5653 +diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
5654 +index bfd94e7812fc..7d290777246d 100644
5655 +--- a/arch/x86/lib/usercopy_32.c
5656 ++++ b/arch/x86/lib/usercopy_32.c
5657 +@@ -54,13 +54,13 @@ do { \
5658 + } while (0)
5659 +
5660 + /**
5661 +- * clear_user: - Zero a block of memory in user space.
5662 ++ * clear_user - Zero a block of memory in user space.
5663 + * @to: Destination address, in user space.
5664 + * @n: Number of bytes to zero.
5665 + *
5666 + * Zero a block of memory in user space.
5667 + *
5668 +- * Returns number of bytes that could not be cleared.
5669 ++ * Return: number of bytes that could not be cleared.
5670 + * On success, this will be zero.
5671 + */
5672 + unsigned long
5673 +@@ -74,14 +74,14 @@ clear_user(void __user *to, unsigned long n)
5674 + EXPORT_SYMBOL(clear_user);
5675 +
5676 + /**
5677 +- * __clear_user: - Zero a block of memory in user space, with less checking.
5678 ++ * __clear_user - Zero a block of memory in user space, with less checking.
5679 + * @to: Destination address, in user space.
5680 + * @n: Number of bytes to zero.
5681 + *
5682 + * Zero a block of memory in user space. Caller must check
5683 + * the specified block with access_ok() before calling this function.
5684 + *
5685 +- * Returns number of bytes that could not be cleared.
5686 ++ * Return: number of bytes that could not be cleared.
5687 + * On success, this will be zero.
5688 + */
5689 + unsigned long
5690 +diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
5691 +index 30a5111ae5fd..527e69b12002 100644
5692 +--- a/arch/x86/pci/fixup.c
5693 ++++ b/arch/x86/pci/fixup.c
5694 +@@ -635,6 +635,22 @@ static void quirk_no_aersid(struct pci_dev *pdev)
5695 + DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
5696 + PCI_CLASS_BRIDGE_PCI, 8, quirk_no_aersid);
5697 +
5698 ++static void quirk_intel_th_dnv(struct pci_dev *dev)
5699 ++{
5700 ++ struct resource *r = &dev->resource[4];
5701 ++
5702 ++ /*
5703 ++ * Denverton reports 2k of RTIT_BAR (intel_th resource 4), which
5704 ++ * appears to be 4 MB in reality.
5705 ++ */
5706 ++ if (r->end == r->start + 0x7ff) {
5707 ++ r->start = 0;
5708 ++ r->end = 0x3fffff;
5709 ++ r->flags |= IORESOURCE_UNSET;
5710 ++ }
5711 ++}
5712 ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x19e1, quirk_intel_th_dnv);
5713 ++
5714 + #ifdef CONFIG_PHYS_ADDR_T_64BIT
5715 +
5716 + #define AMD_141b_MMIO_BASE(x) (0x80 + (x) * 0x8)
5717 +diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
5718 +index 17456a1d3f04..6c571ae86947 100644
5719 +--- a/arch/x86/platform/efi/quirks.c
5720 ++++ b/arch/x86/platform/efi/quirks.c
5721 +@@ -717,7 +717,7 @@ void efi_recover_from_page_fault(unsigned long phys_addr)
5722 + * "efi_mm" cannot be used to check if the page fault had occurred
5723 + * in the firmware context because efi=old_map doesn't use efi_pgd.
5724 + */
5725 +- if (efi_rts_work.efi_rts_id == NONE)
5726 ++ if (efi_rts_work.efi_rts_id == EFI_NONE)
5727 + return;
5728 +
5729 + /*
5730 +@@ -742,7 +742,7 @@ void efi_recover_from_page_fault(unsigned long phys_addr)
5731 + * because this case occurs *very* rarely and hence could be improved
5732 + * on a need by basis.
5733 + */
5734 +- if (efi_rts_work.efi_rts_id == RESET_SYSTEM) {
5735 ++ if (efi_rts_work.efi_rts_id == EFI_RESET_SYSTEM) {
5736 + pr_info("efi_reset_system() buggy! Reboot through BIOS\n");
5737 + machine_real_restart(MRR_BIOS);
5738 + return;
5739 +diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
5740 +index 4463fa72db94..96cb20de08af 100644
5741 +--- a/arch/x86/realmode/rm/Makefile
5742 ++++ b/arch/x86/realmode/rm/Makefile
5743 +@@ -47,7 +47,7 @@ $(obj)/pasyms.h: $(REALMODE_OBJS) FORCE
5744 + targets += realmode.lds
5745 + $(obj)/realmode.lds: $(obj)/pasyms.h
5746 +
5747 +-LDFLAGS_realmode.elf := --emit-relocs -T
5748 ++LDFLAGS_realmode.elf := -m elf_i386 --emit-relocs -T
5749 + CPPFLAGS_realmode.lds += -P -C -I$(objtree)/$(obj)
5750 +
5751 + targets += realmode.elf
5752 +diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
5753 +index 0f4fe206dcc2..20701977e6c0 100644
5754 +--- a/arch/x86/xen/mmu_pv.c
5755 ++++ b/arch/x86/xen/mmu_pv.c
5756 +@@ -2114,10 +2114,10 @@ void __init xen_relocate_p2m(void)
5757 + pt = early_memremap(pt_phys, PAGE_SIZE);
5758 + clear_page(pt);
5759 + for (idx_pte = 0;
5760 +- idx_pte < min(n_pte, PTRS_PER_PTE);
5761 +- idx_pte++) {
5762 +- set_pte(pt + idx_pte,
5763 +- pfn_pte(p2m_pfn, PAGE_KERNEL));
5764 ++ idx_pte < min(n_pte, PTRS_PER_PTE);
5765 ++ idx_pte++) {
5766 ++ pt[idx_pte] = pfn_pte(p2m_pfn,
5767 ++ PAGE_KERNEL);
5768 + p2m_pfn++;
5769 + }
5770 + n_pte -= PTRS_PER_PTE;
5771 +@@ -2125,8 +2125,7 @@ void __init xen_relocate_p2m(void)
5772 + make_lowmem_page_readonly(__va(pt_phys));
5773 + pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
5774 + PFN_DOWN(pt_phys));
5775 +- set_pmd(pmd + idx_pt,
5776 +- __pmd(_PAGE_TABLE | pt_phys));
5777 ++ pmd[idx_pt] = __pmd(_PAGE_TABLE | pt_phys);
5778 + pt_phys += PAGE_SIZE;
5779 + }
5780 + n_pt -= PTRS_PER_PMD;
5781 +@@ -2134,7 +2133,7 @@ void __init xen_relocate_p2m(void)
5782 + make_lowmem_page_readonly(__va(pmd_phys));
5783 + pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
5784 + PFN_DOWN(pmd_phys));
5785 +- set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys));
5786 ++ pud[idx_pmd] = __pud(_PAGE_TABLE | pmd_phys);
5787 + pmd_phys += PAGE_SIZE;
5788 + }
5789 + n_pmd -= PTRS_PER_PUD;
5790 +diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
5791 +index 74969a437a37..2e73395f0560 100644
5792 +--- a/arch/xtensa/kernel/process.c
5793 ++++ b/arch/xtensa/kernel/process.c
5794 +@@ -321,8 +321,8 @@ unsigned long get_wchan(struct task_struct *p)
5795 +
5796 + /* Stack layout: sp-4: ra, sp-3: sp' */
5797 +
5798 +- pc = MAKE_PC_FROM_RA(*(unsigned long*)sp - 4, sp);
5799 +- sp = *(unsigned long *)sp - 3;
5800 ++ pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), sp);
5801 ++ sp = SPILL_SLOT(sp, 1);
5802 + } while (count++ < 16);
5803 + return 0;
5804 + }
5805 +diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c
5806 +index 174c11f13bba..b9f82510c650 100644
5807 +--- a/arch/xtensa/kernel/stacktrace.c
5808 ++++ b/arch/xtensa/kernel/stacktrace.c
5809 +@@ -253,10 +253,14 @@ static int return_address_cb(struct stackframe *frame, void *data)
5810 + return 1;
5811 + }
5812 +
5813 ++/*
5814 ++ * level == 0 is for the return address from the caller of this function,
5815 ++ * not from this function itself.
5816 ++ */
5817 + unsigned long return_address(unsigned level)
5818 + {
5819 + struct return_addr_data r = {
5820 +- .skip = level + 1,
5821 ++ .skip = level,
5822 + };
5823 + walk_stackframe(stack_pointer(NULL), return_address_cb, &r);
5824 + return r.addr;
5825 +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
5826 +index cd307767a134..e5ed28629271 100644
5827 +--- a/block/bfq-iosched.c
5828 ++++ b/block/bfq-iosched.c
5829 +@@ -747,6 +747,7 @@ void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
5830 +
5831 + inc_counter:
5832 + bfqq->weight_counter->num_active++;
5833 ++ bfqq->ref++;
5834 + }
5835 +
5836 + /*
5837 +@@ -771,6 +772,7 @@ void __bfq_weights_tree_remove(struct bfq_data *bfqd,
5838 +
5839 + reset_entity_pointer:
5840 + bfqq->weight_counter = NULL;
5841 ++ bfq_put_queue(bfqq);
5842 + }
5843 +
5844 + /*
5845 +@@ -782,9 +784,6 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
5846 + {
5847 + struct bfq_entity *entity = bfqq->entity.parent;
5848 +
5849 +- __bfq_weights_tree_remove(bfqd, bfqq,
5850 +- &bfqd->queue_weights_tree);
5851 +-
5852 + for_each_entity(entity) {
5853 + struct bfq_sched_data *sd = entity->my_sched_data;
5854 +
5855 +@@ -818,6 +817,15 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
5856 + bfqd->num_groups_with_pending_reqs--;
5857 + }
5858 + }
5859 ++
5860 ++ /*
5861 ++ * Next function is invoked last, because it causes bfqq to be
5862 ++ * freed if the following holds: bfqq is not in service and
5863 ++ * has no dispatched request. DO NOT use bfqq after the next
5864 ++ * function invocation.
5865 ++ */
5866 ++ __bfq_weights_tree_remove(bfqd, bfqq,
5867 ++ &bfqd->queue_weights_tree);
5868 + }
5869 +
5870 + /*
5871 +@@ -1011,7 +1019,8 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
5872 +
5873 + static int bfqq_process_refs(struct bfq_queue *bfqq)
5874 + {
5875 +- return bfqq->ref - bfqq->allocated - bfqq->entity.on_st;
5876 ++ return bfqq->ref - bfqq->allocated - bfqq->entity.on_st -
5877 ++ (bfqq->weight_counter != NULL);
5878 + }
5879 +
5880 + /* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */
5881 +@@ -2224,7 +2233,8 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
5882 +
5883 + if (in_service_bfqq && in_service_bfqq != bfqq &&
5884 + likely(in_service_bfqq != &bfqd->oom_bfqq) &&
5885 +- bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
5886 ++ bfq_rq_close_to_sector(io_struct, request,
5887 ++ bfqd->in_serv_last_pos) &&
5888 + bfqq->entity.parent == in_service_bfqq->entity.parent &&
5889 + bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
5890 + new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
5891 +@@ -2764,6 +2774,8 @@ update_rate_and_reset:
5892 + bfq_update_rate_reset(bfqd, rq);
5893 + update_last_values:
5894 + bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
5895 ++ if (RQ_BFQQ(rq) == bfqd->in_service_queue)
5896 ++ bfqd->in_serv_last_pos = bfqd->last_position;
5897 + bfqd->last_dispatch = now_ns;
5898 + }
5899 +
5900 +diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
5901 +index 0b02bf302de0..746bd570b85a 100644
5902 +--- a/block/bfq-iosched.h
5903 ++++ b/block/bfq-iosched.h
5904 +@@ -537,6 +537,9 @@ struct bfq_data {
5905 + /* on-disk position of the last served request */
5906 + sector_t last_position;
5907 +
5908 ++ /* position of the last served request for the in-service queue */
5909 ++ sector_t in_serv_last_pos;
5910 ++
5911 + /* time of last request completion (ns) */
5912 + u64 last_completion;
5913 +
5914 +diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
5915 +index 72adbbe975d5..4aab1a8191f0 100644
5916 +--- a/block/bfq-wf2q.c
5917 ++++ b/block/bfq-wf2q.c
5918 +@@ -1667,15 +1667,15 @@ void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
5919 +
5920 + bfqd->busy_queues--;
5921 +
5922 +- if (!bfqq->dispatched)
5923 +- bfq_weights_tree_remove(bfqd, bfqq);
5924 +-
5925 + if (bfqq->wr_coeff > 1)
5926 + bfqd->wr_busy_queues--;
5927 +
5928 + bfqg_stats_update_dequeue(bfqq_group(bfqq));
5929 +
5930 + bfq_deactivate_bfqq(bfqd, bfqq, true, expiration);
5931 ++
5932 ++ if (!bfqq->dispatched)
5933 ++ bfq_weights_tree_remove(bfqd, bfqq);
5934 + }
5935 +
5936 + /*
5937 +diff --git a/block/bio.c b/block/bio.c
5938 +index 4db1008309ed..a06f58bd4c72 100644
5939 +--- a/block/bio.c
5940 ++++ b/block/bio.c
5941 +@@ -1238,8 +1238,11 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
5942 + }
5943 + }
5944 +
5945 +- if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
5946 ++ if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) {
5947 ++ if (!map_data)
5948 ++ __free_page(page);
5949 + break;
5950 ++ }
5951 +
5952 + len -= bytes;
5953 + offset = 0;
5954 +diff --git a/block/blk-core.c b/block/blk-core.c
5955 +index 6b78ec56a4f2..5bde73a49399 100644
5956 +--- a/block/blk-core.c
5957 ++++ b/block/blk-core.c
5958 +@@ -1246,8 +1246,6 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
5959 + */
5960 + blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
5961 + {
5962 +- blk_qc_t unused;
5963 +-
5964 + if (blk_cloned_rq_check_limits(q, rq))
5965 + return BLK_STS_IOERR;
5966 +
5967 +@@ -1263,7 +1261,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
5968 + * bypass a potential scheduler on the bottom device for
5969 + * insert.
5970 + */
5971 +- return blk_mq_try_issue_directly(rq->mq_hctx, rq, &unused, true, true);
5972 ++ return blk_mq_request_issue_directly(rq, true);
5973 + }
5974 + EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
5975 +
5976 +diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
5977 +index 140933e4a7d1..0c98b6c1ca49 100644
5978 +--- a/block/blk-mq-sched.c
5979 ++++ b/block/blk-mq-sched.c
5980 +@@ -423,10 +423,12 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
5981 + * busy in case of 'none' scheduler, and this way may save
5982 + * us one extra enqueue & dequeue to sw queue.
5983 + */
5984 +- if (!hctx->dispatch_busy && !e && !run_queue_async)
5985 ++ if (!hctx->dispatch_busy && !e && !run_queue_async) {
5986 + blk_mq_try_issue_list_directly(hctx, list);
5987 +- else
5988 +- blk_mq_insert_requests(hctx, ctx, list);
5989 ++ if (list_empty(list))
5990 ++ return;
5991 ++ }
5992 ++ blk_mq_insert_requests(hctx, ctx, list);
5993 + }
5994 +
5995 + blk_mq_run_hw_queue(hctx, run_queue_async);
5996 +diff --git a/block/blk-mq.c b/block/blk-mq.c
5997 +index 9437a5eb07cf..16f9675c57e6 100644
5998 +--- a/block/blk-mq.c
5999 ++++ b/block/blk-mq.c
6000 +@@ -1076,7 +1076,13 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
6001 + hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
6002 +
6003 + spin_lock(&hctx->dispatch_wait_lock);
6004 +- list_del_init(&wait->entry);
6005 ++ if (!list_empty(&wait->entry)) {
6006 ++ struct sbitmap_queue *sbq;
6007 ++
6008 ++ list_del_init(&wait->entry);
6009 ++ sbq = &hctx->tags->bitmap_tags;
6010 ++ atomic_dec(&sbq->ws_active);
6011 ++ }
6012 + spin_unlock(&hctx->dispatch_wait_lock);
6013 +
6014 + blk_mq_run_hw_queue(hctx, true);
6015 +@@ -1092,6 +1098,7 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
6016 + static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
6017 + struct request *rq)
6018 + {
6019 ++ struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
6020 + struct wait_queue_head *wq;
6021 + wait_queue_entry_t *wait;
6022 + bool ret;
6023 +@@ -1115,7 +1122,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
6024 + if (!list_empty_careful(&wait->entry))
6025 + return false;
6026 +
6027 +- wq = &bt_wait_ptr(&hctx->tags->bitmap_tags, hctx)->wait;
6028 ++ wq = &bt_wait_ptr(sbq, hctx)->wait;
6029 +
6030 + spin_lock_irq(&wq->lock);
6031 + spin_lock(&hctx->dispatch_wait_lock);
6032 +@@ -1125,6 +1132,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
6033 + return false;
6034 + }
6035 +
6036 ++ atomic_inc(&sbq->ws_active);
6037 + wait->flags &= ~WQ_FLAG_EXCLUSIVE;
6038 + __add_wait_queue(wq, wait);
6039 +
6040 +@@ -1145,6 +1153,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
6041 + * someone else gets the wakeup.
6042 + */
6043 + list_del_init(&wait->entry);
6044 ++ atomic_dec(&sbq->ws_active);
6045 + spin_unlock(&hctx->dispatch_wait_lock);
6046 + spin_unlock_irq(&wq->lock);
6047 +
6048 +@@ -1796,74 +1805,76 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
6049 + return ret;
6050 + }
6051 +
6052 +-blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
6053 ++static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
6054 + struct request *rq,
6055 + blk_qc_t *cookie,
6056 +- bool bypass, bool last)
6057 ++ bool bypass_insert, bool last)
6058 + {
6059 + struct request_queue *q = rq->q;
6060 + bool run_queue = true;
6061 +- blk_status_t ret = BLK_STS_RESOURCE;
6062 +- int srcu_idx;
6063 +- bool force = false;
6064 +
6065 +- hctx_lock(hctx, &srcu_idx);
6066 + /*
6067 +- * hctx_lock is needed before checking quiesced flag.
6068 ++ * RCU or SRCU read lock is needed before checking quiesced flag.
6069 + *
6070 +- * When queue is stopped or quiesced, ignore 'bypass', insert
6071 +- * and return BLK_STS_OK to caller, and avoid driver to try to
6072 +- * dispatch again.
6073 ++ * When queue is stopped or quiesced, ignore 'bypass_insert' from
6074 ++ * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
6075 ++ * and avoid driver to try to dispatch again.
6076 + */
6077 +- if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) {
6078 ++ if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
6079 + run_queue = false;
6080 +- bypass = false;
6081 +- goto out_unlock;
6082 ++ bypass_insert = false;
6083 ++ goto insert;
6084 + }
6085 +
6086 +- if (unlikely(q->elevator && !bypass))
6087 +- goto out_unlock;
6088 ++ if (q->elevator && !bypass_insert)
6089 ++ goto insert;
6090 +
6091 + if (!blk_mq_get_dispatch_budget(hctx))
6092 +- goto out_unlock;
6093 ++ goto insert;
6094 +
6095 + if (!blk_mq_get_driver_tag(rq)) {
6096 + blk_mq_put_dispatch_budget(hctx);
6097 +- goto out_unlock;
6098 ++ goto insert;
6099 + }
6100 +
6101 +- /*
6102 +- * Always add a request that has been through
6103 +- *.queue_rq() to the hardware dispatch list.
6104 +- */
6105 +- force = true;
6106 +- ret = __blk_mq_issue_directly(hctx, rq, cookie, last);
6107 +-out_unlock:
6108 ++ return __blk_mq_issue_directly(hctx, rq, cookie, last);
6109 ++insert:
6110 ++ if (bypass_insert)
6111 ++ return BLK_STS_RESOURCE;
6112 ++
6113 ++ blk_mq_request_bypass_insert(rq, run_queue);
6114 ++ return BLK_STS_OK;
6115 ++}
6116 ++
6117 ++static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
6118 ++ struct request *rq, blk_qc_t *cookie)
6119 ++{
6120 ++ blk_status_t ret;
6121 ++ int srcu_idx;
6122 ++
6123 ++ might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
6124 ++
6125 ++ hctx_lock(hctx, &srcu_idx);
6126 ++
6127 ++ ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
6128 ++ if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
6129 ++ blk_mq_request_bypass_insert(rq, true);
6130 ++ else if (ret != BLK_STS_OK)
6131 ++ blk_mq_end_request(rq, ret);
6132 ++
6133 ++ hctx_unlock(hctx, srcu_idx);
6134 ++}
6135 ++
6136 ++blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
6137 ++{
6138 ++ blk_status_t ret;
6139 ++ int srcu_idx;
6140 ++ blk_qc_t unused_cookie;
6141 ++ struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
6142 ++
6143 ++ hctx_lock(hctx, &srcu_idx);
6144 ++ ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
6145 + hctx_unlock(hctx, srcu_idx);
6146 +- switch (ret) {
6147 +- case BLK_STS_OK:
6148 +- break;
6149 +- case BLK_STS_DEV_RESOURCE:
6150 +- case BLK_STS_RESOURCE:
6151 +- if (force) {
6152 +- blk_mq_request_bypass_insert(rq, run_queue);
6153 +- /*
6154 +- * We have to return BLK_STS_OK for the DM
6155 +- * to avoid livelock. Otherwise, we return
6156 +- * the real result to indicate whether the
6157 +- * request is direct-issued successfully.
6158 +- */
6159 +- ret = bypass ? BLK_STS_OK : ret;
6160 +- } else if (!bypass) {
6161 +- blk_mq_sched_insert_request(rq, false,
6162 +- run_queue, false);
6163 +- }
6164 +- break;
6165 +- default:
6166 +- if (!bypass)
6167 +- blk_mq_end_request(rq, ret);
6168 +- break;
6169 +- }
6170 +
6171 + return ret;
6172 + }
6173 +@@ -1871,20 +1882,22 @@ out_unlock:
6174 + void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
6175 + struct list_head *list)
6176 + {
6177 +- blk_qc_t unused;
6178 +- blk_status_t ret = BLK_STS_OK;
6179 +-
6180 + while (!list_empty(list)) {
6181 ++ blk_status_t ret;
6182 + struct request *rq = list_first_entry(list, struct request,
6183 + queuelist);
6184 +
6185 + list_del_init(&rq->queuelist);
6186 +- if (ret == BLK_STS_OK)
6187 +- ret = blk_mq_try_issue_directly(hctx, rq, &unused,
6188 +- false,
6189 ++ ret = blk_mq_request_issue_directly(rq, list_empty(list));
6190 ++ if (ret != BLK_STS_OK) {
6191 ++ if (ret == BLK_STS_RESOURCE ||
6192 ++ ret == BLK_STS_DEV_RESOURCE) {
6193 ++ blk_mq_request_bypass_insert(rq,
6194 + list_empty(list));
6195 +- else
6196 +- blk_mq_sched_insert_request(rq, false, true, false);
6197 ++ break;
6198 ++ }
6199 ++ blk_mq_end_request(rq, ret);
6200 ++ }
6201 + }
6202 +
6203 + /*
6204 +@@ -1892,7 +1905,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
6205 + * the driver there was more coming, but that turned out to
6206 + * be a lie.
6207 + */
6208 +- if (ret != BLK_STS_OK && hctx->queue->mq_ops->commit_rqs)
6209 ++ if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs)
6210 + hctx->queue->mq_ops->commit_rqs(hctx);
6211 + }
6212 +
6213 +@@ -2005,13 +2018,13 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
6214 + if (same_queue_rq) {
6215 + data.hctx = same_queue_rq->mq_hctx;
6216 + blk_mq_try_issue_directly(data.hctx, same_queue_rq,
6217 +- &cookie, false, true);
6218 ++ &cookie);
6219 + }
6220 + } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
6221 + !data.hctx->dispatch_busy)) {
6222 + blk_mq_put_ctx(data.ctx);
6223 + blk_mq_bio_to_request(rq, bio);
6224 +- blk_mq_try_issue_directly(data.hctx, rq, &cookie, false, true);
6225 ++ blk_mq_try_issue_directly(data.hctx, rq, &cookie);
6226 + } else {
6227 + blk_mq_put_ctx(data.ctx);
6228 + blk_mq_bio_to_request(rq, bio);
6229 +diff --git a/block/blk-mq.h b/block/blk-mq.h
6230 +index d0b3dd54ef8d..a3a684a8c633 100644
6231 +--- a/block/blk-mq.h
6232 ++++ b/block/blk-mq.h
6233 +@@ -67,10 +67,8 @@ void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
6234 + void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
6235 + struct list_head *list);
6236 +
6237 +-blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
6238 +- struct request *rq,
6239 +- blk_qc_t *cookie,
6240 +- bool bypass, bool last);
6241 ++/* Used by blk_insert_cloned_request() to issue request directly */
6242 ++blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
6243 + void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
6244 + struct list_head *list);
6245 +
6246 +diff --git a/crypto/aead.c b/crypto/aead.c
6247 +index 189c52d1f63a..4908b5e846f0 100644
6248 +--- a/crypto/aead.c
6249 ++++ b/crypto/aead.c
6250 +@@ -61,8 +61,10 @@ int crypto_aead_setkey(struct crypto_aead *tfm,
6251 + else
6252 + err = crypto_aead_alg(tfm)->setkey(tfm, key, keylen);
6253 +
6254 +- if (err)
6255 ++ if (unlikely(err)) {
6256 ++ crypto_aead_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
6257 + return err;
6258 ++ }
6259 +
6260 + crypto_aead_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
6261 + return 0;
6262 +diff --git a/crypto/aegis128.c b/crypto/aegis128.c
6263 +index c22f4414856d..789716f92e4c 100644
6264 +--- a/crypto/aegis128.c
6265 ++++ b/crypto/aegis128.c
6266 +@@ -290,19 +290,19 @@ static void crypto_aegis128_process_crypt(struct aegis_state *state,
6267 + const struct aegis128_ops *ops)
6268 + {
6269 + struct skcipher_walk walk;
6270 +- u8 *src, *dst;
6271 +- unsigned int chunksize;
6272 +
6273 + ops->skcipher_walk_init(&walk, req, false);
6274 +
6275 + while (walk.nbytes) {
6276 +- src = walk.src.virt.addr;
6277 +- dst = walk.dst.virt.addr;
6278 +- chunksize = walk.nbytes;
6279 ++ unsigned int nbytes = walk.nbytes;
6280 +
6281 +- ops->crypt_chunk(state, dst, src, chunksize);
6282 ++ if (nbytes < walk.total)
6283 ++ nbytes = round_down(nbytes, walk.stride);
6284 +
6285 +- skcipher_walk_done(&walk, 0);
6286 ++ ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
6287 ++ nbytes);
6288 ++
6289 ++ skcipher_walk_done(&walk, walk.nbytes - nbytes);
6290 + }
6291 + }
6292 +
6293 +diff --git a/crypto/aegis128l.c b/crypto/aegis128l.c
6294 +index b6fb21ebdc3e..73811448cb6b 100644
6295 +--- a/crypto/aegis128l.c
6296 ++++ b/crypto/aegis128l.c
6297 +@@ -353,19 +353,19 @@ static void crypto_aegis128l_process_crypt(struct aegis_state *state,
6298 + const struct aegis128l_ops *ops)
6299 + {
6300 + struct skcipher_walk walk;
6301 +- u8 *src, *dst;
6302 +- unsigned int chunksize;
6303 +
6304 + ops->skcipher_walk_init(&walk, req, false);
6305 +
6306 + while (walk.nbytes) {
6307 +- src = walk.src.virt.addr;
6308 +- dst = walk.dst.virt.addr;
6309 +- chunksize = walk.nbytes;
6310 ++ unsigned int nbytes = walk.nbytes;
6311 +
6312 +- ops->crypt_chunk(state, dst, src, chunksize);
6313 ++ if (nbytes < walk.total)
6314 ++ nbytes = round_down(nbytes, walk.stride);
6315 +
6316 +- skcipher_walk_done(&walk, 0);
6317 ++ ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
6318 ++ nbytes);
6319 ++
6320 ++ skcipher_walk_done(&walk, walk.nbytes - nbytes);
6321 + }
6322 + }
6323 +
6324 +diff --git a/crypto/aegis256.c b/crypto/aegis256.c
6325 +index 11f0f8ec9c7c..8a71e9c06193 100644
6326 +--- a/crypto/aegis256.c
6327 ++++ b/crypto/aegis256.c
6328 +@@ -303,19 +303,19 @@ static void crypto_aegis256_process_crypt(struct aegis_state *state,
6329 + const struct aegis256_ops *ops)
6330 + {
6331 + struct skcipher_walk walk;
6332 +- u8 *src, *dst;
6333 +- unsigned int chunksize;
6334 +
6335 + ops->skcipher_walk_init(&walk, req, false);
6336 +
6337 + while (walk.nbytes) {
6338 +- src = walk.src.virt.addr;
6339 +- dst = walk.dst.virt.addr;
6340 +- chunksize = walk.nbytes;
6341 ++ unsigned int nbytes = walk.nbytes;
6342 +
6343 +- ops->crypt_chunk(state, dst, src, chunksize);
6344 ++ if (nbytes < walk.total)
6345 ++ nbytes = round_down(nbytes, walk.stride);
6346 +
6347 +- skcipher_walk_done(&walk, 0);
6348 ++ ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
6349 ++ nbytes);
6350 ++
6351 ++ skcipher_walk_done(&walk, walk.nbytes - nbytes);
6352 + }
6353 + }
6354 +
6355 +diff --git a/crypto/ahash.c b/crypto/ahash.c
6356 +index 5d320a811f75..81e2767e2164 100644
6357 +--- a/crypto/ahash.c
6358 ++++ b/crypto/ahash.c
6359 +@@ -86,17 +86,17 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
6360 + int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
6361 + {
6362 + unsigned int alignmask = walk->alignmask;
6363 +- unsigned int nbytes = walk->entrylen;
6364 +
6365 + walk->data -= walk->offset;
6366 +
6367 +- if (nbytes && walk->offset & alignmask && !err) {
6368 +- walk->offset = ALIGN(walk->offset, alignmask + 1);
6369 +- nbytes = min(nbytes,
6370 +- ((unsigned int)(PAGE_SIZE)) - walk->offset);
6371 +- walk->entrylen -= nbytes;
6372 ++ if (walk->entrylen && (walk->offset & alignmask) && !err) {
6373 ++ unsigned int nbytes;
6374 +
6375 ++ walk->offset = ALIGN(walk->offset, alignmask + 1);
6376 ++ nbytes = min(walk->entrylen,
6377 ++ (unsigned int)(PAGE_SIZE - walk->offset));
6378 + if (nbytes) {
6379 ++ walk->entrylen -= nbytes;
6380 + walk->data += walk->offset;
6381 + return nbytes;
6382 + }
6383 +@@ -116,7 +116,7 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
6384 + if (err)
6385 + return err;
6386 +
6387 +- if (nbytes) {
6388 ++ if (walk->entrylen) {
6389 + walk->offset = 0;
6390 + walk->pg++;
6391 + return hash_walk_next(walk);
6392 +@@ -190,6 +190,21 @@ static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
6393 + return ret;
6394 + }
6395 +
6396 ++static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
6397 ++ unsigned int keylen)
6398 ++{
6399 ++ return -ENOSYS;
6400 ++}
6401 ++
6402 ++static void ahash_set_needkey(struct crypto_ahash *tfm)
6403 ++{
6404 ++ const struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
6405 ++
6406 ++ if (tfm->setkey != ahash_nosetkey &&
6407 ++ !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
6408 ++ crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
6409 ++}
6410 ++
6411 + int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
6412 + unsigned int keylen)
6413 + {
6414 +@@ -201,20 +216,16 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
6415 + else
6416 + err = tfm->setkey(tfm, key, keylen);
6417 +
6418 +- if (err)
6419 ++ if (unlikely(err)) {
6420 ++ ahash_set_needkey(tfm);
6421 + return err;
6422 ++ }
6423 +
6424 + crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
6425 + return 0;
6426 + }
6427 + EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
6428 +
6429 +-static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
6430 +- unsigned int keylen)
6431 +-{
6432 +- return -ENOSYS;
6433 +-}
6434 +-
6435 + static inline unsigned int ahash_align_buffer_size(unsigned len,
6436 + unsigned long mask)
6437 + {
6438 +@@ -489,8 +500,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
6439 +
6440 + if (alg->setkey) {
6441 + hash->setkey = alg->setkey;
6442 +- if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
6443 +- crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
6444 ++ ahash_set_needkey(hash);
6445 + }
6446 +
6447 + return 0;
6448 +diff --git a/crypto/cfb.c b/crypto/cfb.c
6449 +index e81e45673498..4abfe32ff845 100644
6450 +--- a/crypto/cfb.c
6451 ++++ b/crypto/cfb.c
6452 +@@ -77,12 +77,14 @@ static int crypto_cfb_encrypt_segment(struct skcipher_walk *walk,
6453 + do {
6454 + crypto_cfb_encrypt_one(tfm, iv, dst);
6455 + crypto_xor(dst, src, bsize);
6456 +- memcpy(iv, dst, bsize);
6457 ++ iv = dst;
6458 +
6459 + src += bsize;
6460 + dst += bsize;
6461 + } while ((nbytes -= bsize) >= bsize);
6462 +
6463 ++ memcpy(walk->iv, iv, bsize);
6464 ++
6465 + return nbytes;
6466 + }
6467 +
6468 +@@ -162,7 +164,7 @@ static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk,
6469 + const unsigned int bsize = crypto_cfb_bsize(tfm);
6470 + unsigned int nbytes = walk->nbytes;
6471 + u8 *src = walk->src.virt.addr;
6472 +- u8 *iv = walk->iv;
6473 ++ u8 * const iv = walk->iv;
6474 + u8 tmp[MAX_CIPHER_BLOCKSIZE];
6475 +
6476 + do {
6477 +@@ -172,8 +174,6 @@ static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk,
6478 + src += bsize;
6479 + } while ((nbytes -= bsize) >= bsize);
6480 +
6481 +- memcpy(walk->iv, iv, bsize);
6482 +-
6483 + return nbytes;
6484 + }
6485 +
6486 +@@ -298,6 +298,12 @@ static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
6487 + inst->alg.base.cra_blocksize = 1;
6488 + inst->alg.base.cra_alignmask = alg->cra_alignmask;
6489 +
6490 ++ /*
6491 ++ * To simplify the implementation, configure the skcipher walk to only
6492 ++ * give a partial block at the very end, never earlier.
6493 ++ */
6494 ++ inst->alg.chunksize = alg->cra_blocksize;
6495 ++
6496 + inst->alg.ivsize = alg->cra_blocksize;
6497 + inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
6498 + inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
6499 +diff --git a/crypto/morus1280.c b/crypto/morus1280.c
6500 +index 3889c188f266..b83576b4eb55 100644
6501 +--- a/crypto/morus1280.c
6502 ++++ b/crypto/morus1280.c
6503 +@@ -366,18 +366,19 @@ static void crypto_morus1280_process_crypt(struct morus1280_state *state,
6504 + const struct morus1280_ops *ops)
6505 + {
6506 + struct skcipher_walk walk;
6507 +- u8 *dst;
6508 +- const u8 *src;
6509 +
6510 + ops->skcipher_walk_init(&walk, req, false);
6511 +
6512 + while (walk.nbytes) {
6513 +- src = walk.src.virt.addr;
6514 +- dst = walk.dst.virt.addr;
6515 ++ unsigned int nbytes = walk.nbytes;
6516 +
6517 +- ops->crypt_chunk(state, dst, src, walk.nbytes);
6518 ++ if (nbytes < walk.total)
6519 ++ nbytes = round_down(nbytes, walk.stride);
6520 +
6521 +- skcipher_walk_done(&walk, 0);
6522 ++ ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
6523 ++ nbytes);
6524 ++
6525 ++ skcipher_walk_done(&walk, walk.nbytes - nbytes);
6526 + }
6527 + }
6528 +
6529 +diff --git a/crypto/morus640.c b/crypto/morus640.c
6530 +index da06ec2f6a80..b6a477444f6d 100644
6531 +--- a/crypto/morus640.c
6532 ++++ b/crypto/morus640.c
6533 +@@ -365,18 +365,19 @@ static void crypto_morus640_process_crypt(struct morus640_state *state,
6534 + const struct morus640_ops *ops)
6535 + {
6536 + struct skcipher_walk walk;
6537 +- u8 *dst;
6538 +- const u8 *src;
6539 +
6540 + ops->skcipher_walk_init(&walk, req, false);
6541 +
6542 + while (walk.nbytes) {
6543 +- src = walk.src.virt.addr;
6544 +- dst = walk.dst.virt.addr;
6545 ++ unsigned int nbytes = walk.nbytes;
6546 +
6547 +- ops->crypt_chunk(state, dst, src, walk.nbytes);
6548 ++ if (nbytes < walk.total)
6549 ++ nbytes = round_down(nbytes, walk.stride);
6550 +
6551 +- skcipher_walk_done(&walk, 0);
6552 ++ ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
6553 ++ nbytes);
6554 ++
6555 ++ skcipher_walk_done(&walk, walk.nbytes - nbytes);
6556 + }
6557 + }
6558 +
6559 +diff --git a/crypto/ofb.c b/crypto/ofb.c
6560 +index 886631708c5e..cab0b80953fe 100644
6561 +--- a/crypto/ofb.c
6562 ++++ b/crypto/ofb.c
6563 +@@ -5,9 +5,6 @@
6564 + *
6565 + * Copyright (C) 2018 ARM Limited or its affiliates.
6566 + * All rights reserved.
6567 +- *
6568 +- * Based loosely on public domain code gleaned from libtomcrypt
6569 +- * (https://github.com/libtom/libtomcrypt).
6570 + */
6571 +
6572 + #include <crypto/algapi.h>
6573 +@@ -21,7 +18,6 @@
6574 +
6575 + struct crypto_ofb_ctx {
6576 + struct crypto_cipher *child;
6577 +- int cnt;
6578 + };
6579 +
6580 +
6581 +@@ -41,58 +37,40 @@ static int crypto_ofb_setkey(struct crypto_skcipher *parent, const u8 *key,
6582 + return err;
6583 + }
6584 +
6585 +-static int crypto_ofb_encrypt_segment(struct crypto_ofb_ctx *ctx,
6586 +- struct skcipher_walk *walk,
6587 +- struct crypto_cipher *tfm)
6588 ++static int crypto_ofb_crypt(struct skcipher_request *req)
6589 + {
6590 +- int bsize = crypto_cipher_blocksize(tfm);
6591 +- int nbytes = walk->nbytes;
6592 +-
6593 +- u8 *src = walk->src.virt.addr;
6594 +- u8 *dst = walk->dst.virt.addr;
6595 +- u8 *iv = walk->iv;
6596 +-
6597 +- do {
6598 +- if (ctx->cnt == bsize) {
6599 +- if (nbytes < bsize)
6600 +- break;
6601 +- crypto_cipher_encrypt_one(tfm, iv, iv);
6602 +- ctx->cnt = 0;
6603 +- }
6604 +- *dst = *src ^ iv[ctx->cnt];
6605 +- src++;
6606 +- dst++;
6607 +- ctx->cnt++;
6608 +- } while (--nbytes);
6609 +- return nbytes;
6610 +-}
6611 +-
6612 +-static int crypto_ofb_encrypt(struct skcipher_request *req)
6613 +-{
6614 +- struct skcipher_walk walk;
6615 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
6616 +- unsigned int bsize;
6617 + struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(tfm);
6618 +- struct crypto_cipher *child = ctx->child;
6619 +- int ret = 0;
6620 ++ struct crypto_cipher *cipher = ctx->child;
6621 ++ const unsigned int bsize = crypto_cipher_blocksize(cipher);
6622 ++ struct skcipher_walk walk;
6623 ++ int err;
6624 +
6625 +- bsize = crypto_cipher_blocksize(child);
6626 +- ctx->cnt = bsize;
6627 ++ err = skcipher_walk_virt(&walk, req, false);
6628 +
6629 +- ret = skcipher_walk_virt(&walk, req, false);
6630 ++ while (walk.nbytes >= bsize) {
6631 ++ const u8 *src = walk.src.virt.addr;
6632 ++ u8 *dst = walk.dst.virt.addr;
6633 ++ u8 * const iv = walk.iv;
6634 ++ unsigned int nbytes = walk.nbytes;
6635 +
6636 +- while (walk.nbytes) {
6637 +- ret = crypto_ofb_encrypt_segment(ctx, &walk, child);
6638 +- ret = skcipher_walk_done(&walk, ret);
6639 +- }
6640 ++ do {
6641 ++ crypto_cipher_encrypt_one(cipher, iv, iv);
6642 ++ crypto_xor_cpy(dst, src, iv, bsize);
6643 ++ dst += bsize;
6644 ++ src += bsize;
6645 ++ } while ((nbytes -= bsize) >= bsize);
6646 +
6647 +- return ret;
6648 +-}
6649 ++ err = skcipher_walk_done(&walk, nbytes);
6650 ++ }
6651 +
6652 +-/* OFB encrypt and decrypt are identical */
6653 +-static int crypto_ofb_decrypt(struct skcipher_request *req)
6654 +-{
6655 +- return crypto_ofb_encrypt(req);
6656 ++ if (walk.nbytes) {
6657 ++ crypto_cipher_encrypt_one(cipher, walk.iv, walk.iv);
6658 ++ crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, walk.iv,
6659 ++ walk.nbytes);
6660 ++ err = skcipher_walk_done(&walk, 0);
6661 ++ }
6662 ++ return err;
6663 + }
6664 +
6665 + static int crypto_ofb_init_tfm(struct crypto_skcipher *tfm)
6666 +@@ -165,13 +143,18 @@ static int crypto_ofb_create(struct crypto_template *tmpl, struct rtattr **tb)
6667 + if (err)
6668 + goto err_drop_spawn;
6669 +
6670 ++ /* OFB mode is a stream cipher. */
6671 ++ inst->alg.base.cra_blocksize = 1;
6672 ++
6673 ++ /*
6674 ++ * To simplify the implementation, configure the skcipher walk to only
6675 ++ * give a partial block at the very end, never earlier.
6676 ++ */
6677 ++ inst->alg.chunksize = alg->cra_blocksize;
6678 ++
6679 + inst->alg.base.cra_priority = alg->cra_priority;
6680 +- inst->alg.base.cra_blocksize = alg->cra_blocksize;
6681 + inst->alg.base.cra_alignmask = alg->cra_alignmask;
6682 +
6683 +- /* We access the data as u32s when xoring. */
6684 +- inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
6685 +-
6686 + inst->alg.ivsize = alg->cra_blocksize;
6687 + inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
6688 + inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
6689 +@@ -182,8 +165,8 @@ static int crypto_ofb_create(struct crypto_template *tmpl, struct rtattr **tb)
6690 + inst->alg.exit = crypto_ofb_exit_tfm;
6691 +
6692 + inst->alg.setkey = crypto_ofb_setkey;
6693 +- inst->alg.encrypt = crypto_ofb_encrypt;
6694 +- inst->alg.decrypt = crypto_ofb_decrypt;
6695 ++ inst->alg.encrypt = crypto_ofb_crypt;
6696 ++ inst->alg.decrypt = crypto_ofb_crypt;
6697 +
6698 + inst->free = crypto_ofb_free;
6699 +
6700 +diff --git a/crypto/pcbc.c b/crypto/pcbc.c
6701 +index 8aa10144407c..1b182dfedc94 100644
6702 +--- a/crypto/pcbc.c
6703 ++++ b/crypto/pcbc.c
6704 +@@ -51,7 +51,7 @@ static int crypto_pcbc_encrypt_segment(struct skcipher_request *req,
6705 + unsigned int nbytes = walk->nbytes;
6706 + u8 *src = walk->src.virt.addr;
6707 + u8 *dst = walk->dst.virt.addr;
6708 +- u8 *iv = walk->iv;
6709 ++ u8 * const iv = walk->iv;
6710 +
6711 + do {
6712 + crypto_xor(iv, src, bsize);
6713 +@@ -72,7 +72,7 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req,
6714 + int bsize = crypto_cipher_blocksize(tfm);
6715 + unsigned int nbytes = walk->nbytes;
6716 + u8 *src = walk->src.virt.addr;
6717 +- u8 *iv = walk->iv;
6718 ++ u8 * const iv = walk->iv;
6719 + u8 tmpbuf[MAX_CIPHER_BLOCKSIZE];
6720 +
6721 + do {
6722 +@@ -84,8 +84,6 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req,
6723 + src += bsize;
6724 + } while ((nbytes -= bsize) >= bsize);
6725 +
6726 +- memcpy(walk->iv, iv, bsize);
6727 +-
6728 + return nbytes;
6729 + }
6730 +
6731 +@@ -121,7 +119,7 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req,
6732 + unsigned int nbytes = walk->nbytes;
6733 + u8 *src = walk->src.virt.addr;
6734 + u8 *dst = walk->dst.virt.addr;
6735 +- u8 *iv = walk->iv;
6736 ++ u8 * const iv = walk->iv;
6737 +
6738 + do {
6739 + crypto_cipher_decrypt_one(tfm, dst, src);
6740 +@@ -132,8 +130,6 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req,
6741 + dst += bsize;
6742 + } while ((nbytes -= bsize) >= bsize);
6743 +
6744 +- memcpy(walk->iv, iv, bsize);
6745 +-
6746 + return nbytes;
6747 + }
6748 +
6749 +@@ -144,7 +140,7 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req,
6750 + int bsize = crypto_cipher_blocksize(tfm);
6751 + unsigned int nbytes = walk->nbytes;
6752 + u8 *src = walk->src.virt.addr;
6753 +- u8 *iv = walk->iv;
6754 ++ u8 * const iv = walk->iv;
6755 + u8 tmpbuf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(u32));
6756 +
6757 + do {
6758 +@@ -156,8 +152,6 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req,
6759 + src += bsize;
6760 + } while ((nbytes -= bsize) >= bsize);
6761 +
6762 +- memcpy(walk->iv, iv, bsize);
6763 +-
6764 + return nbytes;
6765 + }
6766 +
6767 +diff --git a/crypto/shash.c b/crypto/shash.c
6768 +index 44d297b82a8f..40311ccad3fa 100644
6769 +--- a/crypto/shash.c
6770 ++++ b/crypto/shash.c
6771 +@@ -53,6 +53,13 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
6772 + return err;
6773 + }
6774 +
6775 ++static void shash_set_needkey(struct crypto_shash *tfm, struct shash_alg *alg)
6776 ++{
6777 ++ if (crypto_shash_alg_has_setkey(alg) &&
6778 ++ !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
6779 ++ crypto_shash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
6780 ++}
6781 ++
6782 + int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
6783 + unsigned int keylen)
6784 + {
6785 +@@ -65,8 +72,10 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
6786 + else
6787 + err = shash->setkey(tfm, key, keylen);
6788 +
6789 +- if (err)
6790 ++ if (unlikely(err)) {
6791 ++ shash_set_needkey(tfm, shash);
6792 + return err;
6793 ++ }
6794 +
6795 + crypto_shash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
6796 + return 0;
6797 +@@ -373,7 +382,8 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
6798 + crt->final = shash_async_final;
6799 + crt->finup = shash_async_finup;
6800 + crt->digest = shash_async_digest;
6801 +- crt->setkey = shash_async_setkey;
6802 ++ if (crypto_shash_alg_has_setkey(alg))
6803 ++ crt->setkey = shash_async_setkey;
6804 +
6805 + crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
6806 + CRYPTO_TFM_NEED_KEY);
6807 +@@ -395,9 +405,7 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
6808 +
6809 + hash->descsize = alg->descsize;
6810 +
6811 +- if (crypto_shash_alg_has_setkey(alg) &&
6812 +- !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
6813 +- crypto_shash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
6814 ++ shash_set_needkey(hash, alg);
6815 +
6816 + return 0;
6817 + }
6818 +diff --git a/crypto/skcipher.c b/crypto/skcipher.c
6819 +index 2a969296bc24..de09ff60991e 100644
6820 +--- a/crypto/skcipher.c
6821 ++++ b/crypto/skcipher.c
6822 +@@ -585,6 +585,12 @@ static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
6823 + return crypto_alg_extsize(alg);
6824 + }
6825 +
6826 ++static void skcipher_set_needkey(struct crypto_skcipher *tfm)
6827 ++{
6828 ++ if (tfm->keysize)
6829 ++ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
6830 ++}
6831 ++
6832 + static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
6833 + const u8 *key, unsigned int keylen)
6834 + {
6835 +@@ -598,8 +604,10 @@ static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
6836 + err = crypto_blkcipher_setkey(blkcipher, key, keylen);
6837 + crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
6838 + CRYPTO_TFM_RES_MASK);
6839 +- if (err)
6840 ++ if (unlikely(err)) {
6841 ++ skcipher_set_needkey(tfm);
6842 + return err;
6843 ++ }
6844 +
6845 + crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
6846 + return 0;
6847 +@@ -677,8 +685,7 @@ static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
6848 + skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
6849 + skcipher->keysize = calg->cra_blkcipher.max_keysize;
6850 +
6851 +- if (skcipher->keysize)
6852 +- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
6853 ++ skcipher_set_needkey(skcipher);
6854 +
6855 + return 0;
6856 + }
6857 +@@ -698,8 +705,10 @@ static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
6858 + crypto_skcipher_set_flags(tfm,
6859 + crypto_ablkcipher_get_flags(ablkcipher) &
6860 + CRYPTO_TFM_RES_MASK);
6861 +- if (err)
6862 ++ if (unlikely(err)) {
6863 ++ skcipher_set_needkey(tfm);
6864 + return err;
6865 ++ }
6866 +
6867 + crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
6868 + return 0;
6869 +@@ -776,8 +785,7 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
6870 + sizeof(struct ablkcipher_request);
6871 + skcipher->keysize = calg->cra_ablkcipher.max_keysize;
6872 +
6873 +- if (skcipher->keysize)
6874 +- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
6875 ++ skcipher_set_needkey(skcipher);
6876 +
6877 + return 0;
6878 + }
6879 +@@ -820,8 +828,10 @@ static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
6880 + else
6881 + err = cipher->setkey(tfm, key, keylen);
6882 +
6883 +- if (err)
6884 ++ if (unlikely(err)) {
6885 ++ skcipher_set_needkey(tfm);
6886 + return err;
6887 ++ }
6888 +
6889 + crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
6890 + return 0;
6891 +@@ -852,8 +862,7 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
6892 + skcipher->ivsize = alg->ivsize;
6893 + skcipher->keysize = alg->max_keysize;
6894 +
6895 +- if (skcipher->keysize)
6896 +- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
6897 ++ skcipher_set_needkey(skcipher);
6898 +
6899 + if (alg->exit)
6900 + skcipher->base.exit = crypto_skcipher_exit_tfm;
6901 +diff --git a/crypto/testmgr.c b/crypto/testmgr.c
6902 +index 0f684a414acb..b8e4a3ccbfe0 100644
6903 +--- a/crypto/testmgr.c
6904 ++++ b/crypto/testmgr.c
6905 +@@ -1894,14 +1894,21 @@ static int alg_test_crc32c(const struct alg_test_desc *desc,
6906 +
6907 + err = alg_test_hash(desc, driver, type, mask);
6908 + if (err)
6909 +- goto out;
6910 ++ return err;
6911 +
6912 + tfm = crypto_alloc_shash(driver, type, mask);
6913 + if (IS_ERR(tfm)) {
6914 ++ if (PTR_ERR(tfm) == -ENOENT) {
6915 ++ /*
6916 ++ * This crc32c implementation is only available through
6917 ++ * ahash API, not the shash API, so the remaining part
6918 ++ * of the test is not applicable to it.
6919 ++ */
6920 ++ return 0;
6921 ++ }
6922 + printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
6923 + "%ld\n", driver, PTR_ERR(tfm));
6924 +- err = PTR_ERR(tfm);
6925 +- goto out;
6926 ++ return PTR_ERR(tfm);
6927 + }
6928 +
6929 + do {
6930 +@@ -1928,7 +1935,6 @@ static int alg_test_crc32c(const struct alg_test_desc *desc,
6931 +
6932 + crypto_free_shash(tfm);
6933 +
6934 +-out:
6935 + return err;
6936 + }
6937 +
6938 +diff --git a/crypto/testmgr.h b/crypto/testmgr.h
6939 +index e8f47d7b92cd..ca8e8ebef309 100644
6940 +--- a/crypto/testmgr.h
6941 ++++ b/crypto/testmgr.h
6942 +@@ -12870,6 +12870,31 @@ static const struct cipher_testvec aes_cfb_tv_template[] = {
6943 + "\x75\xa3\x85\x74\x1a\xb9\xce\xf8"
6944 + "\x20\x31\x62\x3d\x55\xb1\xe4\x71",
6945 + .len = 64,
6946 ++ .also_non_np = 1,
6947 ++ .np = 2,
6948 ++ .tap = { 31, 33 },
6949 ++ }, { /* > 16 bytes, not a multiple of 16 bytes */
6950 ++ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
6951 ++ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
6952 ++ .klen = 16,
6953 ++ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
6954 ++ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
6955 ++ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
6956 ++ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
6957 ++ "\xae",
6958 ++ .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
6959 ++ "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
6960 ++ "\xc8",
6961 ++ .len = 17,
6962 ++ }, { /* < 16 bytes */
6963 ++ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
6964 ++ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
6965 ++ .klen = 16,
6966 ++ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
6967 ++ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
6968 ++ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f",
6969 ++ .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad",
6970 ++ .len = 7,
6971 + },
6972 + };
6973 +
6974 +@@ -16656,8 +16681,7 @@ static const struct cipher_testvec aes_ctr_rfc3686_tv_template[] = {
6975 + };
6976 +
6977 + static const struct cipher_testvec aes_ofb_tv_template[] = {
6978 +- /* From NIST Special Publication 800-38A, Appendix F.5 */
6979 +- {
6980 ++ { /* From NIST Special Publication 800-38A, Appendix F.5 */
6981 + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
6982 + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
6983 + .klen = 16,
6984 +@@ -16680,6 +16704,31 @@ static const struct cipher_testvec aes_ofb_tv_template[] = {
6985 + "\x30\x4c\x65\x28\xf6\x59\xc7\x78"
6986 + "\x66\xa5\x10\xd9\xc1\xd6\xae\x5e",
6987 + .len = 64,
6988 ++ .also_non_np = 1,
6989 ++ .np = 2,
6990 ++ .tap = { 31, 33 },
6991 ++ }, { /* > 16 bytes, not a multiple of 16 bytes */
6992 ++ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
6993 ++ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
6994 ++ .klen = 16,
6995 ++ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
6996 ++ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
6997 ++ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
6998 ++ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
6999 ++ "\xae",
7000 ++ .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
7001 ++ "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
7002 ++ "\x77",
7003 ++ .len = 17,
7004 ++ }, { /* < 16 bytes */
7005 ++ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
7006 ++ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
7007 ++ .klen = 16,
7008 ++ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
7009 ++ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
7010 ++ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f",
7011 ++ .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad",
7012 ++ .len = 7,
7013 + }
7014 + };
7015 +
7016 +diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
7017 +index f0b52266b3ac..d73afb562ad9 100644
7018 +--- a/drivers/acpi/acpi_video.c
7019 ++++ b/drivers/acpi/acpi_video.c
7020 +@@ -2124,21 +2124,29 @@ static int __init intel_opregion_present(void)
7021 + return opregion;
7022 + }
7023 +
7024 ++/* Check if the chassis-type indicates there is no builtin LCD panel */
7025 + static bool dmi_is_desktop(void)
7026 + {
7027 + const char *chassis_type;
7028 ++ unsigned long type;
7029 +
7030 + chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
7031 + if (!chassis_type)
7032 + return false;
7033 +
7034 +- if (!strcmp(chassis_type, "3") || /* 3: Desktop */
7035 +- !strcmp(chassis_type, "4") || /* 4: Low Profile Desktop */
7036 +- !strcmp(chassis_type, "5") || /* 5: Pizza Box */
7037 +- !strcmp(chassis_type, "6") || /* 6: Mini Tower */
7038 +- !strcmp(chassis_type, "7") || /* 7: Tower */
7039 +- !strcmp(chassis_type, "11")) /* 11: Main Server Chassis */
7040 ++ if (kstrtoul(chassis_type, 10, &type) != 0)
7041 ++ return false;
7042 ++
7043 ++ switch (type) {
7044 ++ case 0x03: /* Desktop */
7045 ++ case 0x04: /* Low Profile Desktop */
7046 ++ case 0x05: /* Pizza Box */
7047 ++ case 0x06: /* Mini Tower */
7048 ++ case 0x07: /* Tower */
7049 ++ case 0x10: /* Lunch Box */
7050 ++ case 0x11: /* Main Server Chassis */
7051 + return true;
7052 ++ }
7053 +
7054 + return false;
7055 + }
7056 +diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
7057 +index e10fec99a182..4424997ecf30 100644
7058 +--- a/drivers/acpi/acpica/evgpe.c
7059 ++++ b/drivers/acpi/acpica/evgpe.c
7060 +@@ -81,8 +81,12 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
7061 +
7062 + ACPI_FUNCTION_TRACE(ev_enable_gpe);
7063 +
7064 +- /* Enable the requested GPE */
7065 ++ /* Clear the GPE status */
7066 ++ status = acpi_hw_clear_gpe(gpe_event_info);
7067 ++ if (ACPI_FAILURE(status))
7068 ++ return_ACPI_STATUS(status);
7069 +
7070 ++ /* Enable the requested GPE */
7071 + status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
7072 + return_ACPI_STATUS(status);
7073 + }
7074 +diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
7075 +index 8638f43cfc3d..79d86da1c892 100644
7076 +--- a/drivers/acpi/acpica/nsobject.c
7077 ++++ b/drivers/acpi/acpica/nsobject.c
7078 +@@ -186,6 +186,10 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node)
7079 + }
7080 + }
7081 +
7082 ++ if (obj_desc->common.type == ACPI_TYPE_REGION) {
7083 ++ acpi_ut_remove_address_range(obj_desc->region.space_id, node);
7084 ++ }
7085 ++
7086 + /* Clear the Node entry in all cases */
7087 +
7088 + node->object = NULL;
7089 +diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
7090 +index 217a782c3e55..7aa08884ed48 100644
7091 +--- a/drivers/acpi/cppc_acpi.c
7092 ++++ b/drivers/acpi/cppc_acpi.c
7093 +@@ -1108,8 +1108,13 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
7094 + cpc_read(cpunum, nominal_reg, &nom);
7095 + perf_caps->nominal_perf = nom;
7096 +
7097 +- cpc_read(cpunum, guaranteed_reg, &guaranteed);
7098 +- perf_caps->guaranteed_perf = guaranteed;
7099 ++ if (guaranteed_reg->type != ACPI_TYPE_BUFFER ||
7100 ++ IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
7101 ++ perf_caps->guaranteed_perf = 0;
7102 ++ } else {
7103 ++ cpc_read(cpunum, guaranteed_reg, &guaranteed);
7104 ++ perf_caps->guaranteed_perf = guaranteed;
7105 ++ }
7106 +
7107 + cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
7108 + perf_caps->lowest_nonlinear_perf = min_nonlinear;
7109 +diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
7110 +index 545e91420cde..8940054d6250 100644
7111 +--- a/drivers/acpi/device_sysfs.c
7112 ++++ b/drivers/acpi/device_sysfs.c
7113 +@@ -202,11 +202,15 @@ static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias,
7114 + {
7115 + struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
7116 + const union acpi_object *of_compatible, *obj;
7117 ++ acpi_status status;
7118 + int len, count;
7119 + int i, nval;
7120 + char *c;
7121 +
7122 +- acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
7123 ++ status = acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
7124 ++ if (ACPI_FAILURE(status))
7125 ++ return -ENODEV;
7126 ++
7127 + /* DT strings are all in lower case */
7128 + for (c = buf.pointer; *c != '\0'; c++)
7129 + *c = tolower(*c);
7130 +diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
7131 +index e18ade5d74e9..f75f8f870ce3 100644
7132 +--- a/drivers/acpi/nfit/core.c
7133 ++++ b/drivers/acpi/nfit/core.c
7134 +@@ -415,7 +415,7 @@ static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
7135 + if (call_pkg) {
7136 + int i;
7137 +
7138 +- if (nfit_mem->family != call_pkg->nd_family)
7139 ++ if (nfit_mem && nfit_mem->family != call_pkg->nd_family)
7140 + return -ENOTTY;
7141 +
7142 + for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
7143 +@@ -424,6 +424,10 @@ static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
7144 + return call_pkg->nd_command;
7145 + }
7146 +
7147 ++ /* In the !call_pkg case, bus commands == bus functions */
7148 ++ if (!nfit_mem)
7149 ++ return cmd;
7150 ++
7151 + /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
7152 + if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
7153 + return cmd;
7154 +@@ -454,17 +458,18 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
7155 + if (cmd_rc)
7156 + *cmd_rc = -EINVAL;
7157 +
7158 ++ if (cmd == ND_CMD_CALL)
7159 ++ call_pkg = buf;
7160 ++ func = cmd_to_func(nfit_mem, cmd, call_pkg);
7161 ++ if (func < 0)
7162 ++ return func;
7163 ++
7164 + if (nvdimm) {
7165 + struct acpi_device *adev = nfit_mem->adev;
7166 +
7167 + if (!adev)
7168 + return -ENOTTY;
7169 +
7170 +- if (cmd == ND_CMD_CALL)
7171 +- call_pkg = buf;
7172 +- func = cmd_to_func(nfit_mem, cmd, call_pkg);
7173 +- if (func < 0)
7174 +- return func;
7175 + dimm_name = nvdimm_name(nvdimm);
7176 + cmd_name = nvdimm_cmd_name(cmd);
7177 + cmd_mask = nvdimm_cmd_mask(nvdimm);
7178 +@@ -475,12 +480,9 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
7179 + } else {
7180 + struct acpi_device *adev = to_acpi_dev(acpi_desc);
7181 +
7182 +- func = cmd;
7183 + cmd_name = nvdimm_bus_cmd_name(cmd);
7184 + cmd_mask = nd_desc->cmd_mask;
7185 +- dsm_mask = cmd_mask;
7186 +- if (cmd == ND_CMD_CALL)
7187 +- dsm_mask = nd_desc->bus_dsm_mask;
7188 ++ dsm_mask = nd_desc->bus_dsm_mask;
7189 + desc = nd_cmd_bus_desc(cmd);
7190 + guid = to_nfit_uuid(NFIT_DEV_BUS);
7191 + handle = adev->handle;
7192 +@@ -554,6 +556,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
7193 + return -EINVAL;
7194 + }
7195 +
7196 ++ if (out_obj->type != ACPI_TYPE_BUFFER) {
7197 ++ dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
7198 ++ dimm_name, cmd_name, out_obj->type);
7199 ++ rc = -EINVAL;
7200 ++ goto out;
7201 ++ }
7202 ++
7203 + if (call_pkg) {
7204 + call_pkg->nd_fw_size = out_obj->buffer.length;
7205 + memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
7206 +@@ -572,13 +581,6 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
7207 + return 0;
7208 + }
7209 +
7210 +- if (out_obj->package.type != ACPI_TYPE_BUFFER) {
7211 +- dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
7212 +- dimm_name, cmd_name, out_obj->type);
7213 +- rc = -EINVAL;
7214 +- goto out;
7215 +- }
7216 +-
7217 + dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
7218 + cmd_name, out_obj->buffer.length);
7219 + print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
7220 +@@ -1759,14 +1761,14 @@ static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method)
7221 +
7222 + __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
7223 + {
7224 ++ struct device *dev = &nfit_mem->adev->dev;
7225 + struct nd_intel_smart smart = { 0 };
7226 + union acpi_object in_buf = {
7227 +- .type = ACPI_TYPE_BUFFER,
7228 +- .buffer.pointer = (char *) &smart,
7229 +- .buffer.length = sizeof(smart),
7230 ++ .buffer.type = ACPI_TYPE_BUFFER,
7231 ++ .buffer.length = 0,
7232 + };
7233 + union acpi_object in_obj = {
7234 +- .type = ACPI_TYPE_PACKAGE,
7235 ++ .package.type = ACPI_TYPE_PACKAGE,
7236 + .package.count = 1,
7237 + .package.elements = &in_buf,
7238 + };
7239 +@@ -1781,8 +1783,15 @@ __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
7240 + return;
7241 +
7242 + out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
7243 +- if (!out_obj)
7244 ++ if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER
7245 ++ || out_obj->buffer.length < sizeof(smart)) {
7246 ++ dev_dbg(dev->parent, "%s: failed to retrieve initial health\n",
7247 ++ dev_name(dev));
7248 ++ ACPI_FREE(out_obj);
7249 + return;
7250 ++ }
7251 ++ memcpy(&smart, out_obj->buffer.pointer, sizeof(smart));
7252 ++ ACPI_FREE(out_obj);
7253 +
7254 + if (smart.flags & ND_INTEL_SMART_SHUTDOWN_VALID) {
7255 + if (smart.shutdown_state)
7256 +@@ -1793,7 +1802,6 @@ __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
7257 + set_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags);
7258 + nfit_mem->dirty_shutdown = smart.shutdown_count;
7259 + }
7260 +- ACPI_FREE(out_obj);
7261 + }
7262 +
7263 + static void populate_shutdown_status(struct nfit_mem *nfit_mem)
7264 +@@ -1915,18 +1923,19 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
7265 + | 1 << ND_CMD_SET_CONFIG_DATA;
7266 + if (family == NVDIMM_FAMILY_INTEL
7267 + && (dsm_mask & label_mask) == label_mask)
7268 +- return 0;
7269 +-
7270 +- if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
7271 +- && acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
7272 +- dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
7273 +- set_bit(NFIT_MEM_LSR, &nfit_mem->flags);
7274 +- }
7275 ++ /* skip _LS{I,R,W} enabling */;
7276 ++ else {
7277 ++ if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
7278 ++ && acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
7279 ++ dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
7280 ++ set_bit(NFIT_MEM_LSR, &nfit_mem->flags);
7281 ++ }
7282 +
7283 +- if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
7284 +- && acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
7285 +- dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
7286 +- set_bit(NFIT_MEM_LSW, &nfit_mem->flags);
7287 ++ if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
7288 ++ && acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
7289 ++ dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
7290 ++ set_bit(NFIT_MEM_LSW, &nfit_mem->flags);
7291 ++ }
7292 + }
7293 +
7294 + populate_shutdown_status(nfit_mem);
7295 +@@ -3004,14 +3013,16 @@ static int ars_register(struct acpi_nfit_desc *acpi_desc,
7296 + {
7297 + int rc;
7298 +
7299 +- if (no_init_ars || test_bit(ARS_FAILED, &nfit_spa->ars_state))
7300 ++ if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
7301 + return acpi_nfit_register_region(acpi_desc, nfit_spa);
7302 +
7303 + set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
7304 +- set_bit(ARS_REQ_LONG, &nfit_spa->ars_state);
7305 ++ if (!no_init_ars)
7306 ++ set_bit(ARS_REQ_LONG, &nfit_spa->ars_state);
7307 +
7308 + switch (acpi_nfit_query_poison(acpi_desc)) {
7309 + case 0:
7310 ++ case -ENOSPC:
7311 + case -EAGAIN:
7312 + rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT);
7313 + /* shouldn't happen, try again later */
7314 +@@ -3036,7 +3047,6 @@ static int ars_register(struct acpi_nfit_desc *acpi_desc,
7315 + break;
7316 + case -EBUSY:
7317 + case -ENOMEM:
7318 +- case -ENOSPC:
7319 + /*
7320 + * BIOS was using ARS, wait for it to complete (or
7321 + * resources to become available) and then perform our
7322 +diff --git a/drivers/android/binder.c b/drivers/android/binder.c
7323 +index 4d2b2ad1ee0e..01f80cbd2741 100644
7324 +--- a/drivers/android/binder.c
7325 ++++ b/drivers/android/binder.c
7326 +@@ -329,6 +329,8 @@ struct binder_error {
7327 + * (invariant after initialized)
7328 + * @min_priority: minimum scheduling priority
7329 + * (invariant after initialized)
7330 ++ * @txn_security_ctx: require sender's security context
7331 ++ * (invariant after initialized)
7332 + * @async_todo: list of async work items
7333 + * (protected by @proc->inner_lock)
7334 + *
7335 +@@ -365,6 +367,7 @@ struct binder_node {
7336 + * invariant after initialization
7337 + */
7338 + u8 accept_fds:1;
7339 ++ u8 txn_security_ctx:1;
7340 + u8 min_priority;
7341 + };
7342 + bool has_async_transaction;
7343 +@@ -615,6 +618,7 @@ struct binder_transaction {
7344 + long saved_priority;
7345 + kuid_t sender_euid;
7346 + struct list_head fd_fixups;
7347 ++ binder_uintptr_t security_ctx;
7348 + /**
7349 + * @lock: protects @from, @to_proc, and @to_thread
7350 + *
7351 +@@ -1152,6 +1156,7 @@ static struct binder_node *binder_init_node_ilocked(
7352 + node->work.type = BINDER_WORK_NODE;
7353 + node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
7354 + node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
7355 ++ node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
7356 + spin_lock_init(&node->lock);
7357 + INIT_LIST_HEAD(&node->work.entry);
7358 + INIT_LIST_HEAD(&node->async_todo);
7359 +@@ -2778,6 +2783,8 @@ static void binder_transaction(struct binder_proc *proc,
7360 + binder_size_t last_fixup_min_off = 0;
7361 + struct binder_context *context = proc->context;
7362 + int t_debug_id = atomic_inc_return(&binder_last_id);
7363 ++ char *secctx = NULL;
7364 ++ u32 secctx_sz = 0;
7365 +
7366 + e = binder_transaction_log_add(&binder_transaction_log);
7367 + e->debug_id = t_debug_id;
7368 +@@ -3020,6 +3027,20 @@ static void binder_transaction(struct binder_proc *proc,
7369 + t->flags = tr->flags;
7370 + t->priority = task_nice(current);
7371 +
7372 ++ if (target_node && target_node->txn_security_ctx) {
7373 ++ u32 secid;
7374 ++
7375 ++ security_task_getsecid(proc->tsk, &secid);
7376 ++ ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
7377 ++ if (ret) {
7378 ++ return_error = BR_FAILED_REPLY;
7379 ++ return_error_param = ret;
7380 ++ return_error_line = __LINE__;
7381 ++ goto err_get_secctx_failed;
7382 ++ }
7383 ++ extra_buffers_size += ALIGN(secctx_sz, sizeof(u64));
7384 ++ }
7385 ++
7386 + trace_binder_transaction(reply, t, target_node);
7387 +
7388 + t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
7389 +@@ -3036,6 +3057,19 @@ static void binder_transaction(struct binder_proc *proc,
7390 + t->buffer = NULL;
7391 + goto err_binder_alloc_buf_failed;
7392 + }
7393 ++ if (secctx) {
7394 ++ size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
7395 ++ ALIGN(tr->offsets_size, sizeof(void *)) +
7396 ++ ALIGN(extra_buffers_size, sizeof(void *)) -
7397 ++ ALIGN(secctx_sz, sizeof(u64));
7398 ++ char *kptr = t->buffer->data + buf_offset;
7399 ++
7400 ++ t->security_ctx = (uintptr_t)kptr +
7401 ++ binder_alloc_get_user_buffer_offset(&target_proc->alloc);
7402 ++ memcpy(kptr, secctx, secctx_sz);
7403 ++ security_release_secctx(secctx, secctx_sz);
7404 ++ secctx = NULL;
7405 ++ }
7406 + t->buffer->debug_id = t->debug_id;
7407 + t->buffer->transaction = t;
7408 + t->buffer->target_node = target_node;
7409 +@@ -3305,6 +3339,9 @@ err_copy_data_failed:
7410 + t->buffer->transaction = NULL;
7411 + binder_alloc_free_buf(&target_proc->alloc, t->buffer);
7412 + err_binder_alloc_buf_failed:
7413 ++ if (secctx)
7414 ++ security_release_secctx(secctx, secctx_sz);
7415 ++err_get_secctx_failed:
7416 + kfree(tcomplete);
7417 + binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
7418 + err_alloc_tcomplete_failed:
7419 +@@ -4036,11 +4073,13 @@ retry:
7420 +
7421 + while (1) {
7422 + uint32_t cmd;
7423 +- struct binder_transaction_data tr;
7424 ++ struct binder_transaction_data_secctx tr;
7425 ++ struct binder_transaction_data *trd = &tr.transaction_data;
7426 + struct binder_work *w = NULL;
7427 + struct list_head *list = NULL;
7428 + struct binder_transaction *t = NULL;
7429 + struct binder_thread *t_from;
7430 ++ size_t trsize = sizeof(*trd);
7431 +
7432 + binder_inner_proc_lock(proc);
7433 + if (!binder_worklist_empty_ilocked(&thread->todo))
7434 +@@ -4240,8 +4279,8 @@ retry:
7435 + if (t->buffer->target_node) {
7436 + struct binder_node *target_node = t->buffer->target_node;
7437 +
7438 +- tr.target.ptr = target_node->ptr;
7439 +- tr.cookie = target_node->cookie;
7440 ++ trd->target.ptr = target_node->ptr;
7441 ++ trd->cookie = target_node->cookie;
7442 + t->saved_priority = task_nice(current);
7443 + if (t->priority < target_node->min_priority &&
7444 + !(t->flags & TF_ONE_WAY))
7445 +@@ -4251,22 +4290,23 @@ retry:
7446 + binder_set_nice(target_node->min_priority);
7447 + cmd = BR_TRANSACTION;
7448 + } else {
7449 +- tr.target.ptr = 0;
7450 +- tr.cookie = 0;
7451 ++ trd->target.ptr = 0;
7452 ++ trd->cookie = 0;
7453 + cmd = BR_REPLY;
7454 + }
7455 +- tr.code = t->code;
7456 +- tr.flags = t->flags;
7457 +- tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
7458 ++ trd->code = t->code;
7459 ++ trd->flags = t->flags;
7460 ++ trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
7461 +
7462 + t_from = binder_get_txn_from(t);
7463 + if (t_from) {
7464 + struct task_struct *sender = t_from->proc->tsk;
7465 +
7466 +- tr.sender_pid = task_tgid_nr_ns(sender,
7467 +- task_active_pid_ns(current));
7468 ++ trd->sender_pid =
7469 ++ task_tgid_nr_ns(sender,
7470 ++ task_active_pid_ns(current));
7471 + } else {
7472 +- tr.sender_pid = 0;
7473 ++ trd->sender_pid = 0;
7474 + }
7475 +
7476 + ret = binder_apply_fd_fixups(t);
7477 +@@ -4297,15 +4337,20 @@ retry:
7478 + }
7479 + continue;
7480 + }
7481 +- tr.data_size = t->buffer->data_size;
7482 +- tr.offsets_size = t->buffer->offsets_size;
7483 +- tr.data.ptr.buffer = (binder_uintptr_t)
7484 ++ trd->data_size = t->buffer->data_size;
7485 ++ trd->offsets_size = t->buffer->offsets_size;
7486 ++ trd->data.ptr.buffer = (binder_uintptr_t)
7487 + ((uintptr_t)t->buffer->data +
7488 + binder_alloc_get_user_buffer_offset(&proc->alloc));
7489 +- tr.data.ptr.offsets = tr.data.ptr.buffer +
7490 ++ trd->data.ptr.offsets = trd->data.ptr.buffer +
7491 + ALIGN(t->buffer->data_size,
7492 + sizeof(void *));
7493 +
7494 ++ tr.secctx = t->security_ctx;
7495 ++ if (t->security_ctx) {
7496 ++ cmd = BR_TRANSACTION_SEC_CTX;
7497 ++ trsize = sizeof(tr);
7498 ++ }
7499 + if (put_user(cmd, (uint32_t __user *)ptr)) {
7500 + if (t_from)
7501 + binder_thread_dec_tmpref(t_from);
7502 +@@ -4316,7 +4361,7 @@ retry:
7503 + return -EFAULT;
7504 + }
7505 + ptr += sizeof(uint32_t);
7506 +- if (copy_to_user(ptr, &tr, sizeof(tr))) {
7507 ++ if (copy_to_user(ptr, &tr, trsize)) {
7508 + if (t_from)
7509 + binder_thread_dec_tmpref(t_from);
7510 +
7511 +@@ -4325,7 +4370,7 @@ retry:
7512 +
7513 + return -EFAULT;
7514 + }
7515 +- ptr += sizeof(tr);
7516 ++ ptr += trsize;
7517 +
7518 + trace_binder_transaction_received(t);
7519 + binder_stat_br(proc, thread, cmd);
7520 +@@ -4333,16 +4378,18 @@ retry:
7521 + "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
7522 + proc->pid, thread->pid,
7523 + (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
7524 +- "BR_REPLY",
7525 ++ (cmd == BR_TRANSACTION_SEC_CTX) ?
7526 ++ "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
7527 + t->debug_id, t_from ? t_from->proc->pid : 0,
7528 + t_from ? t_from->pid : 0, cmd,
7529 + t->buffer->data_size, t->buffer->offsets_size,
7530 +- (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
7531 ++ (u64)trd->data.ptr.buffer,
7532 ++ (u64)trd->data.ptr.offsets);
7533 +
7534 + if (t_from)
7535 + binder_thread_dec_tmpref(t_from);
7536 + t->buffer->allow_user_free = 1;
7537 +- if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
7538 ++ if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
7539 + binder_inner_proc_lock(thread->proc);
7540 + t->to_parent = thread->transaction_stack;
7541 + t->to_thread = thread;
7542 +@@ -4690,7 +4737,8 @@ out:
7543 + return ret;
7544 + }
7545 +
7546 +-static int binder_ioctl_set_ctx_mgr(struct file *filp)
7547 ++static int binder_ioctl_set_ctx_mgr(struct file *filp,
7548 ++ struct flat_binder_object *fbo)
7549 + {
7550 + int ret = 0;
7551 + struct binder_proc *proc = filp->private_data;
7552 +@@ -4719,7 +4767,7 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
7553 + } else {
7554 + context->binder_context_mgr_uid = curr_euid;
7555 + }
7556 +- new_node = binder_new_node(proc, NULL);
7557 ++ new_node = binder_new_node(proc, fbo);
7558 + if (!new_node) {
7559 + ret = -ENOMEM;
7560 + goto out;
7561 +@@ -4842,8 +4890,20 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
7562 + binder_inner_proc_unlock(proc);
7563 + break;
7564 + }
7565 ++ case BINDER_SET_CONTEXT_MGR_EXT: {
7566 ++ struct flat_binder_object fbo;
7567 ++
7568 ++ if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
7569 ++ ret = -EINVAL;
7570 ++ goto err;
7571 ++ }
7572 ++ ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
7573 ++ if (ret)
7574 ++ goto err;
7575 ++ break;
7576 ++ }
7577 + case BINDER_SET_CONTEXT_MGR:
7578 +- ret = binder_ioctl_set_ctx_mgr(filp);
7579 ++ ret = binder_ioctl_set_ctx_mgr(filp, NULL);
7580 + if (ret)
7581 + goto err;
7582 + break;
7583 +diff --git a/drivers/base/dd.c b/drivers/base/dd.c
7584 +index 8ac10af17c00..d62487d02455 100644
7585 +--- a/drivers/base/dd.c
7586 ++++ b/drivers/base/dd.c
7587 +@@ -968,9 +968,9 @@ static void __device_release_driver(struct device *dev, struct device *parent)
7588 + drv->remove(dev);
7589 +
7590 + device_links_driver_cleanup(dev);
7591 +- arch_teardown_dma_ops(dev);
7592 +
7593 + devres_release_all(dev);
7594 ++ arch_teardown_dma_ops(dev);
7595 + dev->driver = NULL;
7596 + dev_set_drvdata(dev, NULL);
7597 + if (dev->pm_domain && dev->pm_domain->dismiss)
7598 +diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
7599 +index 5fa1898755a3..7c84f64c74f7 100644
7600 +--- a/drivers/base/power/wakeup.c
7601 ++++ b/drivers/base/power/wakeup.c
7602 +@@ -118,7 +118,6 @@ void wakeup_source_drop(struct wakeup_source *ws)
7603 + if (!ws)
7604 + return;
7605 +
7606 +- del_timer_sync(&ws->timer);
7607 + __pm_relax(ws);
7608 + }
7609 + EXPORT_SYMBOL_GPL(wakeup_source_drop);
7610 +@@ -205,6 +204,13 @@ void wakeup_source_remove(struct wakeup_source *ws)
7611 + list_del_rcu(&ws->entry);
7612 + raw_spin_unlock_irqrestore(&events_lock, flags);
7613 + synchronize_srcu(&wakeup_srcu);
7614 ++
7615 ++ del_timer_sync(&ws->timer);
7616 ++ /*
7617 ++ * Clear timer.function to make wakeup_source_not_registered() treat
7618 ++ * this wakeup source as not registered.
7619 ++ */
7620 ++ ws->timer.function = NULL;
7621 + }
7622 + EXPORT_SYMBOL_GPL(wakeup_source_remove);
7623 +
7624 +diff --git a/drivers/block/loop.c b/drivers/block/loop.c
7625 +index cf5538942834..9a8d83bc1e75 100644
7626 +--- a/drivers/block/loop.c
7627 ++++ b/drivers/block/loop.c
7628 +@@ -656,7 +656,7 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
7629 + return -EBADF;
7630 +
7631 + l = f->f_mapping->host->i_bdev->bd_disk->private_data;
7632 +- if (l->lo_state == Lo_unbound) {
7633 ++ if (l->lo_state != Lo_bound) {
7634 + return -EINVAL;
7635 + }
7636 + f = l->lo_backing_file;
7637 +@@ -1089,16 +1089,12 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
7638 + kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
7639 + }
7640 + mapping_set_gfp_mask(filp->f_mapping, gfp);
7641 +- lo->lo_state = Lo_unbound;
7642 + /* This is safe: open() is still holding a reference. */
7643 + module_put(THIS_MODULE);
7644 + blk_mq_unfreeze_queue(lo->lo_queue);
7645 +
7646 + partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev;
7647 + lo_number = lo->lo_number;
7648 +- lo->lo_flags = 0;
7649 +- if (!part_shift)
7650 +- lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
7651 + loop_unprepare_queue(lo);
7652 + out_unlock:
7653 + mutex_unlock(&loop_ctl_mutex);
7654 +@@ -1120,6 +1116,23 @@ out_unlock:
7655 + /* Device is gone, no point in returning error */
7656 + err = 0;
7657 + }
7658 ++
7659 ++ /*
7660 ++ * lo->lo_state is set to Lo_unbound here after above partscan has
7661 ++ * finished.
7662 ++ *
7663 ++ * There cannot be anybody else entering __loop_clr_fd() as
7664 ++ * lo->lo_backing_file is already cleared and Lo_rundown state
7665 ++ * protects us from all the other places trying to change the 'lo'
7666 ++ * device.
7667 ++ */
7668 ++ mutex_lock(&loop_ctl_mutex);
7669 ++ lo->lo_flags = 0;
7670 ++ if (!part_shift)
7671 ++ lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
7672 ++ lo->lo_state = Lo_unbound;
7673 ++ mutex_unlock(&loop_ctl_mutex);
7674 ++
7675 + /*
7676 + * Need not hold loop_ctl_mutex to fput backing file.
7677 + * Calling fput holding loop_ctl_mutex triggers a circular
7678 +diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
7679 +index 04ca65912638..684854d3b0ad 100644
7680 +--- a/drivers/block/zram/zram_drv.c
7681 ++++ b/drivers/block/zram/zram_drv.c
7682 +@@ -290,18 +290,8 @@ static ssize_t idle_store(struct device *dev,
7683 + struct zram *zram = dev_to_zram(dev);
7684 + unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
7685 + int index;
7686 +- char mode_buf[8];
7687 +- ssize_t sz;
7688 +
7689 +- sz = strscpy(mode_buf, buf, sizeof(mode_buf));
7690 +- if (sz <= 0)
7691 +- return -EINVAL;
7692 +-
7693 +- /* ignore trailing new line */
7694 +- if (mode_buf[sz - 1] == '\n')
7695 +- mode_buf[sz - 1] = 0x00;
7696 +-
7697 +- if (strcmp(mode_buf, "all"))
7698 ++ if (!sysfs_streq(buf, "all"))
7699 + return -EINVAL;
7700 +
7701 + down_read(&zram->init_lock);
7702 +@@ -635,25 +625,15 @@ static ssize_t writeback_store(struct device *dev,
7703 + struct bio bio;
7704 + struct bio_vec bio_vec;
7705 + struct page *page;
7706 +- ssize_t ret, sz;
7707 +- char mode_buf[8];
7708 +- int mode = -1;
7709 ++ ssize_t ret;
7710 ++ int mode;
7711 + unsigned long blk_idx = 0;
7712 +
7713 +- sz = strscpy(mode_buf, buf, sizeof(mode_buf));
7714 +- if (sz <= 0)
7715 +- return -EINVAL;
7716 +-
7717 +- /* ignore trailing newline */
7718 +- if (mode_buf[sz - 1] == '\n')
7719 +- mode_buf[sz - 1] = 0x00;
7720 +-
7721 +- if (!strcmp(mode_buf, "idle"))
7722 ++ if (sysfs_streq(buf, "idle"))
7723 + mode = IDLE_WRITEBACK;
7724 +- else if (!strcmp(mode_buf, "huge"))
7725 ++ else if (sysfs_streq(buf, "huge"))
7726 + mode = HUGE_WRITEBACK;
7727 +-
7728 +- if (mode == -1)
7729 ++ else
7730 + return -EINVAL;
7731 +
7732 + down_read(&zram->init_lock);
7733 +diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
7734 +index 41405de27d66..c91bba00df4e 100644
7735 +--- a/drivers/bluetooth/btrtl.c
7736 ++++ b/drivers/bluetooth/btrtl.c
7737 +@@ -552,10 +552,9 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
7738 + hdev->bus);
7739 +
7740 + if (!btrtl_dev->ic_info) {
7741 +- rtl_dev_err(hdev, "rtl: unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x",
7742 ++ rtl_dev_info(hdev, "rtl: unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x",
7743 + lmp_subver, hci_rev, hci_ver);
7744 +- ret = -EINVAL;
7745 +- goto err_free;
7746 ++ return btrtl_dev;
7747 + }
7748 +
7749 + if (btrtl_dev->ic_info->has_rom_version) {
7750 +@@ -610,6 +609,11 @@ int btrtl_download_firmware(struct hci_dev *hdev,
7751 + * standard btusb. Once that firmware is uploaded, the subver changes
7752 + * to a different value.
7753 + */
7754 ++ if (!btrtl_dev->ic_info) {
7755 ++ rtl_dev_info(hdev, "rtl: assuming no firmware upload needed\n");
7756 ++ return 0;
7757 ++ }
7758 ++
7759 + switch (btrtl_dev->ic_info->lmp_subver) {
7760 + case RTL_ROM_LMP_8723A:
7761 + case RTL_ROM_LMP_3499:
7762 +diff --git a/drivers/bluetooth/h4_recv.h b/drivers/bluetooth/h4_recv.h
7763 +index b432651f8236..307d82166f48 100644
7764 +--- a/drivers/bluetooth/h4_recv.h
7765 ++++ b/drivers/bluetooth/h4_recv.h
7766 +@@ -60,6 +60,10 @@ static inline struct sk_buff *h4_recv_buf(struct hci_dev *hdev,
7767 + const struct h4_recv_pkt *pkts,
7768 + int pkts_count)
7769 + {
7770 ++ /* Check for error from previous call */
7771 ++ if (IS_ERR(skb))
7772 ++ skb = NULL;
7773 ++
7774 + while (count) {
7775 + int i, len;
7776 +
7777 +diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
7778 +index fb97a3bf069b..5d97d77627c1 100644
7779 +--- a/drivers/bluetooth/hci_h4.c
7780 ++++ b/drivers/bluetooth/hci_h4.c
7781 +@@ -174,6 +174,10 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
7782 + struct hci_uart *hu = hci_get_drvdata(hdev);
7783 + u8 alignment = hu->alignment ? hu->alignment : 1;
7784 +
7785 ++ /* Check for error from previous call */
7786 ++ if (IS_ERR(skb))
7787 ++ skb = NULL;
7788 ++
7789 + while (count) {
7790 + int i, len;
7791 +
7792 +diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
7793 +index fbf7b4df23ab..9562e72c1ae5 100644
7794 +--- a/drivers/bluetooth/hci_ldisc.c
7795 ++++ b/drivers/bluetooth/hci_ldisc.c
7796 +@@ -207,11 +207,11 @@ void hci_uart_init_work(struct work_struct *work)
7797 + err = hci_register_dev(hu->hdev);
7798 + if (err < 0) {
7799 + BT_ERR("Can't register HCI device");
7800 ++ clear_bit(HCI_UART_PROTO_READY, &hu->flags);
7801 ++ hu->proto->close(hu);
7802 + hdev = hu->hdev;
7803 + hu->hdev = NULL;
7804 + hci_free_dev(hdev);
7805 +- clear_bit(HCI_UART_PROTO_READY, &hu->flags);
7806 +- hu->proto->close(hu);
7807 + return;
7808 + }
7809 +
7810 +@@ -616,6 +616,7 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data,
7811 + static int hci_uart_register_dev(struct hci_uart *hu)
7812 + {
7813 + struct hci_dev *hdev;
7814 ++ int err;
7815 +
7816 + BT_DBG("");
7817 +
7818 +@@ -659,11 +660,22 @@ static int hci_uart_register_dev(struct hci_uart *hu)
7819 + else
7820 + hdev->dev_type = HCI_PRIMARY;
7821 +
7822 ++ /* Only call open() for the protocol after hdev is fully initialized as
7823 ++ * open() (or a timer/workqueue it starts) may attempt to reference it.
7824 ++ */
7825 ++ err = hu->proto->open(hu);
7826 ++ if (err) {
7827 ++ hu->hdev = NULL;
7828 ++ hci_free_dev(hdev);
7829 ++ return err;
7830 ++ }
7831 ++
7832 + if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags))
7833 + return 0;
7834 +
7835 + if (hci_register_dev(hdev) < 0) {
7836 + BT_ERR("Can't register HCI device");
7837 ++ hu->proto->close(hu);
7838 + hu->hdev = NULL;
7839 + hci_free_dev(hdev);
7840 + return -ENODEV;
7841 +@@ -683,20 +695,14 @@ static int hci_uart_set_proto(struct hci_uart *hu, int id)
7842 + if (!p)
7843 + return -EPROTONOSUPPORT;
7844 +
7845 +- err = p->open(hu);
7846 +- if (err)
7847 +- return err;
7848 +-
7849 + hu->proto = p;
7850 +- set_bit(HCI_UART_PROTO_READY, &hu->flags);
7851 +
7852 + err = hci_uart_register_dev(hu);
7853 + if (err) {
7854 +- clear_bit(HCI_UART_PROTO_READY, &hu->flags);
7855 +- p->close(hu);
7856 + return err;
7857 + }
7858 +
7859 ++ set_bit(HCI_UART_PROTO_READY, &hu->flags);
7860 + return 0;
7861 + }
7862 +
7863 +diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
7864 +index 614ecdbb4ab7..933268b8d6a5 100644
7865 +--- a/drivers/cdrom/cdrom.c
7866 ++++ b/drivers/cdrom/cdrom.c
7867 +@@ -265,6 +265,7 @@
7868 + /* #define ERRLOGMASK (CD_WARNING|CD_OPEN|CD_COUNT_TRACKS|CD_CLOSE) */
7869 + /* #define ERRLOGMASK (CD_WARNING|CD_REG_UNREG|CD_DO_IOCTL|CD_OPEN|CD_CLOSE|CD_COUNT_TRACKS) */
7870 +
7871 ++#include <linux/atomic.h>
7872 + #include <linux/module.h>
7873 + #include <linux/fs.h>
7874 + #include <linux/major.h>
7875 +@@ -3692,9 +3693,9 @@ static struct ctl_table_header *cdrom_sysctl_header;
7876 +
7877 + static void cdrom_sysctl_register(void)
7878 + {
7879 +- static int initialized;
7880 ++ static atomic_t initialized = ATOMIC_INIT(0);
7881 +
7882 +- if (initialized == 1)
7883 ++ if (!atomic_add_unless(&initialized, 1, 1))
7884 + return;
7885 +
7886 + cdrom_sysctl_header = register_sysctl_table(cdrom_root_table);
7887 +@@ -3705,8 +3706,6 @@ static void cdrom_sysctl_register(void)
7888 + cdrom_sysctl_settings.debug = debug;
7889 + cdrom_sysctl_settings.lock = lockdoor;
7890 + cdrom_sysctl_settings.check = check_media_type;
7891 +-
7892 +- initialized = 1;
7893 + }
7894 +
7895 + static void cdrom_sysctl_unregister(void)
7896 +diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
7897 +index 2e2ffe7010aa..51c77f0e47b2 100644
7898 +--- a/drivers/char/Kconfig
7899 ++++ b/drivers/char/Kconfig
7900 +@@ -351,7 +351,7 @@ config XILINX_HWICAP
7901 +
7902 + config R3964
7903 + tristate "Siemens R3964 line discipline"
7904 +- depends on TTY
7905 ++ depends on TTY && BROKEN
7906 + ---help---
7907 + This driver allows synchronous communication with devices using the
7908 + Siemens R3964 packet protocol. Unless you are dealing with special
7909 +diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
7910 +index c0a5b1f3a986..4ccc39e00ced 100644
7911 +--- a/drivers/char/applicom.c
7912 ++++ b/drivers/char/applicom.c
7913 +@@ -32,6 +32,7 @@
7914 + #include <linux/wait.h>
7915 + #include <linux/init.h>
7916 + #include <linux/fs.h>
7917 ++#include <linux/nospec.h>
7918 +
7919 + #include <asm/io.h>
7920 + #include <linux/uaccess.h>
7921 +@@ -386,7 +387,11 @@ static ssize_t ac_write(struct file *file, const char __user *buf, size_t count,
7922 + TicCard = st_loc.tic_des_from_pc; /* tic number to send */
7923 + IndexCard = NumCard - 1;
7924 +
7925 +- if((NumCard < 1) || (NumCard > MAX_BOARD) || !apbs[IndexCard].RamIO)
7926 ++ if (IndexCard >= MAX_BOARD)
7927 ++ return -EINVAL;
7928 ++ IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
7929 ++
7930 ++ if (!apbs[IndexCard].RamIO)
7931 + return -EINVAL;
7932 +
7933 + #ifdef DEBUG
7934 +@@ -697,6 +702,7 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
7935 + unsigned char IndexCard;
7936 + void __iomem *pmem;
7937 + int ret = 0;
7938 ++ static int warncount = 10;
7939 + volatile unsigned char byte_reset_it;
7940 + struct st_ram_io *adgl;
7941 + void __user *argp = (void __user *)arg;
7942 +@@ -711,16 +717,12 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
7943 + mutex_lock(&ac_mutex);
7944 + IndexCard = adgl->num_card-1;
7945 +
7946 +- if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) {
7947 +- static int warncount = 10;
7948 +- if (warncount) {
7949 +- printk( KERN_WARNING "APPLICOM driver IOCTL, bad board number %d\n",(int)IndexCard+1);
7950 +- warncount--;
7951 +- }
7952 +- kfree(adgl);
7953 +- mutex_unlock(&ac_mutex);
7954 +- return -EINVAL;
7955 +- }
7956 ++ if (cmd != 6 && IndexCard >= MAX_BOARD)
7957 ++ goto err;
7958 ++ IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
7959 ++
7960 ++ if (cmd != 6 && !apbs[IndexCard].RamIO)
7961 ++ goto err;
7962 +
7963 + switch (cmd) {
7964 +
7965 +@@ -838,5 +840,16 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
7966 + kfree(adgl);
7967 + mutex_unlock(&ac_mutex);
7968 + return 0;
7969 ++
7970 ++err:
7971 ++ if (warncount) {
7972 ++ pr_warn("APPLICOM driver IOCTL, bad board number %d\n",
7973 ++ (int)IndexCard + 1);
7974 ++ warncount--;
7975 ++ }
7976 ++ kfree(adgl);
7977 ++ mutex_unlock(&ac_mutex);
7978 ++ return -EINVAL;
7979 ++
7980 + }
7981 +
7982 +diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
7983 +index 4a22b4b41aef..9bffcd37cc7b 100644
7984 +--- a/drivers/char/hpet.c
7985 ++++ b/drivers/char/hpet.c
7986 +@@ -377,7 +377,7 @@ static __init int hpet_mmap_enable(char *str)
7987 + pr_info("HPET mmap %s\n", hpet_mmap_enabled ? "enabled" : "disabled");
7988 + return 1;
7989 + }
7990 +-__setup("hpet_mmap", hpet_mmap_enable);
7991 ++__setup("hpet_mmap=", hpet_mmap_enable);
7992 +
7993 + static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
7994 + {
7995 +diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
7996 +index b89df66ea1ae..7abd604e938c 100644
7997 +--- a/drivers/char/hw_random/virtio-rng.c
7998 ++++ b/drivers/char/hw_random/virtio-rng.c
7999 +@@ -73,7 +73,7 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
8000 +
8001 + if (!vi->busy) {
8002 + vi->busy = true;
8003 +- init_completion(&vi->have_data);
8004 ++ reinit_completion(&vi->have_data);
8005 + register_buffer(vi, buf, size);
8006 + }
8007 +
8008 +diff --git a/drivers/char/ipmi/ipmi_si.h b/drivers/char/ipmi/ipmi_si.h
8009 +index 52f6152d1fcb..7ae52c17618e 100644
8010 +--- a/drivers/char/ipmi/ipmi_si.h
8011 ++++ b/drivers/char/ipmi/ipmi_si.h
8012 +@@ -25,7 +25,9 @@ void ipmi_irq_finish_setup(struct si_sm_io *io);
8013 + int ipmi_si_remove_by_dev(struct device *dev);
8014 + void ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
8015 + unsigned long addr);
8016 +-int ipmi_si_hardcode_find_bmc(void);
8017 ++void ipmi_hardcode_init(void);
8018 ++void ipmi_si_hardcode_exit(void);
8019 ++int ipmi_si_hardcode_match(int addr_type, unsigned long addr);
8020 + void ipmi_si_platform_init(void);
8021 + void ipmi_si_platform_shutdown(void);
8022 +
8023 +diff --git a/drivers/char/ipmi/ipmi_si_hardcode.c b/drivers/char/ipmi/ipmi_si_hardcode.c
8024 +index 487642809c58..1e5783961b0d 100644
8025 +--- a/drivers/char/ipmi/ipmi_si_hardcode.c
8026 ++++ b/drivers/char/ipmi/ipmi_si_hardcode.c
8027 +@@ -3,6 +3,7 @@
8028 + #define pr_fmt(fmt) "ipmi_hardcode: " fmt
8029 +
8030 + #include <linux/moduleparam.h>
8031 ++#include <linux/platform_device.h>
8032 + #include "ipmi_si.h"
8033 +
8034 + /*
8035 +@@ -12,23 +13,22 @@
8036 +
8037 + #define SI_MAX_PARMS 4
8038 +
8039 +-static char *si_type[SI_MAX_PARMS];
8040 + #define MAX_SI_TYPE_STR 30
8041 +-static char si_type_str[MAX_SI_TYPE_STR];
8042 ++static char si_type_str[MAX_SI_TYPE_STR] __initdata;
8043 + static unsigned long addrs[SI_MAX_PARMS];
8044 + static unsigned int num_addrs;
8045 + static unsigned int ports[SI_MAX_PARMS];
8046 + static unsigned int num_ports;
8047 +-static int irqs[SI_MAX_PARMS];
8048 +-static unsigned int num_irqs;
8049 +-static int regspacings[SI_MAX_PARMS];
8050 +-static unsigned int num_regspacings;
8051 +-static int regsizes[SI_MAX_PARMS];
8052 +-static unsigned int num_regsizes;
8053 +-static int regshifts[SI_MAX_PARMS];
8054 +-static unsigned int num_regshifts;
8055 +-static int slave_addrs[SI_MAX_PARMS]; /* Leaving 0 chooses the default value */
8056 +-static unsigned int num_slave_addrs;
8057 ++static int irqs[SI_MAX_PARMS] __initdata;
8058 ++static unsigned int num_irqs __initdata;
8059 ++static int regspacings[SI_MAX_PARMS] __initdata;
8060 ++static unsigned int num_regspacings __initdata;
8061 ++static int regsizes[SI_MAX_PARMS] __initdata;
8062 ++static unsigned int num_regsizes __initdata;
8063 ++static int regshifts[SI_MAX_PARMS] __initdata;
8064 ++static unsigned int num_regshifts __initdata;
8065 ++static int slave_addrs[SI_MAX_PARMS] __initdata;
8066 ++static unsigned int num_slave_addrs __initdata;
8067 +
8068 + module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
8069 + MODULE_PARM_DESC(type, "Defines the type of each interface, each"
8070 +@@ -73,12 +73,133 @@ MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
8071 + " overridden by this parm. This is an array indexed"
8072 + " by interface number.");
8073 +
8074 +-int ipmi_si_hardcode_find_bmc(void)
8075 ++static struct platform_device *ipmi_hc_pdevs[SI_MAX_PARMS];
8076 ++
8077 ++static void __init ipmi_hardcode_init_one(const char *si_type_str,
8078 ++ unsigned int i,
8079 ++ unsigned long addr,
8080 ++ unsigned int flags)
8081 + {
8082 +- int ret = -ENODEV;
8083 +- int i;
8084 +- struct si_sm_io io;
8085 ++ struct platform_device *pdev;
8086 ++ unsigned int num_r = 1, size;
8087 ++ struct resource r[4];
8088 ++ struct property_entry p[6];
8089 ++ enum si_type si_type;
8090 ++ unsigned int regspacing, regsize;
8091 ++ int rv;
8092 ++
8093 ++ memset(p, 0, sizeof(p));
8094 ++ memset(r, 0, sizeof(r));
8095 ++
8096 ++ if (!si_type_str || !*si_type_str || strcmp(si_type_str, "kcs") == 0) {
8097 ++ size = 2;
8098 ++ si_type = SI_KCS;
8099 ++ } else if (strcmp(si_type_str, "smic") == 0) {
8100 ++ size = 2;
8101 ++ si_type = SI_SMIC;
8102 ++ } else if (strcmp(si_type_str, "bt") == 0) {
8103 ++ size = 3;
8104 ++ si_type = SI_BT;
8105 ++ } else if (strcmp(si_type_str, "invalid") == 0) {
8106 ++ /*
8107 ++ * Allow a firmware-specified interface to be
8108 ++ * disabled.
8109 ++ */
8110 ++ size = 1;
8111 ++ si_type = SI_TYPE_INVALID;
8112 ++ } else {
8113 ++ pr_warn("Interface type specified for interface %d, was invalid: %s\n",
8114 ++ i, si_type_str);
8115 ++ return;
8116 ++ }
8117 ++
8118 ++ regsize = regsizes[i];
8119 ++ if (regsize == 0)
8120 ++ regsize = DEFAULT_REGSIZE;
8121 ++
8122 ++ p[0] = PROPERTY_ENTRY_U8("ipmi-type", si_type);
8123 ++ p[1] = PROPERTY_ENTRY_U8("slave-addr", slave_addrs[i]);
8124 ++ p[2] = PROPERTY_ENTRY_U8("addr-source", SI_HARDCODED);
8125 ++ p[3] = PROPERTY_ENTRY_U8("reg-shift", regshifts[i]);
8126 ++ p[4] = PROPERTY_ENTRY_U8("reg-size", regsize);
8127 ++ /* Last entry must be left NULL to terminate it. */
8128 ++
8129 ++ /*
8130 ++ * Register spacing is derived from the resources in
8131 ++ * the IPMI platform code.
8132 ++ */
8133 ++ regspacing = regspacings[i];
8134 ++ if (regspacing == 0)
8135 ++ regspacing = regsize;
8136 ++
8137 ++ r[0].start = addr;
8138 ++ r[0].end = r[0].start + regsize - 1;
8139 ++ r[0].name = "IPMI Address 1";
8140 ++ r[0].flags = flags;
8141 ++
8142 ++ if (size > 1) {
8143 ++ r[1].start = r[0].start + regspacing;
8144 ++ r[1].end = r[1].start + regsize - 1;
8145 ++ r[1].name = "IPMI Address 2";
8146 ++ r[1].flags = flags;
8147 ++ num_r++;
8148 ++ }
8149 ++
8150 ++ if (size > 2) {
8151 ++ r[2].start = r[1].start + regspacing;
8152 ++ r[2].end = r[2].start + regsize - 1;
8153 ++ r[2].name = "IPMI Address 3";
8154 ++ r[2].flags = flags;
8155 ++ num_r++;
8156 ++ }
8157 ++
8158 ++ if (irqs[i]) {
8159 ++ r[num_r].start = irqs[i];
8160 ++ r[num_r].end = irqs[i];
8161 ++ r[num_r].name = "IPMI IRQ";
8162 ++ r[num_r].flags = IORESOURCE_IRQ;
8163 ++ num_r++;
8164 ++ }
8165 ++
8166 ++ pdev = platform_device_alloc("hardcode-ipmi-si", i);
8167 ++ if (!pdev) {
8168 ++ pr_err("Error allocating IPMI platform device %d\n", i);
8169 ++ return;
8170 ++ }
8171 ++
8172 ++ rv = platform_device_add_resources(pdev, r, num_r);
8173 ++ if (rv) {
8174 ++ dev_err(&pdev->dev,
8175 ++ "Unable to add hard-code resources: %d\n", rv);
8176 ++ goto err;
8177 ++ }
8178 ++
8179 ++ rv = platform_device_add_properties(pdev, p);
8180 ++ if (rv) {
8181 ++ dev_err(&pdev->dev,
8182 ++ "Unable to add hard-code properties: %d\n", rv);
8183 ++ goto err;
8184 ++ }
8185 ++
8186 ++ rv = platform_device_add(pdev);
8187 ++ if (rv) {
8188 ++ dev_err(&pdev->dev,
8189 ++ "Unable to add hard-code device: %d\n", rv);
8190 ++ goto err;
8191 ++ }
8192 ++
8193 ++ ipmi_hc_pdevs[i] = pdev;
8194 ++ return;
8195 ++
8196 ++err:
8197 ++ platform_device_put(pdev);
8198 ++}
8199 ++
8200 ++void __init ipmi_hardcode_init(void)
8201 ++{
8202 ++ unsigned int i;
8203 + char *str;
8204 ++ char *si_type[SI_MAX_PARMS];
8205 +
8206 + /* Parse out the si_type string into its components. */
8207 + str = si_type_str;
8208 +@@ -95,54 +216,45 @@ int ipmi_si_hardcode_find_bmc(void)
8209 + }
8210 + }
8211 +
8212 +- memset(&io, 0, sizeof(io));
8213 + for (i = 0; i < SI_MAX_PARMS; i++) {
8214 +- if (!ports[i] && !addrs[i])
8215 +- continue;
8216 +-
8217 +- io.addr_source = SI_HARDCODED;
8218 +- pr_info("probing via hardcoded address\n");
8219 +-
8220 +- if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
8221 +- io.si_type = SI_KCS;
8222 +- } else if (strcmp(si_type[i], "smic") == 0) {
8223 +- io.si_type = SI_SMIC;
8224 +- } else if (strcmp(si_type[i], "bt") == 0) {
8225 +- io.si_type = SI_BT;
8226 +- } else {
8227 +- pr_warn("Interface type specified for interface %d, was invalid: %s\n",
8228 +- i, si_type[i]);
8229 +- continue;
8230 +- }
8231 ++ if (i < num_ports && ports[i])
8232 ++ ipmi_hardcode_init_one(si_type[i], i, ports[i],
8233 ++ IORESOURCE_IO);
8234 ++ if (i < num_addrs && addrs[i])
8235 ++ ipmi_hardcode_init_one(si_type[i], i, addrs[i],
8236 ++ IORESOURCE_MEM);
8237 ++ }
8238 ++}
8239 +
8240 +- if (ports[i]) {
8241 +- /* An I/O port */
8242 +- io.addr_data = ports[i];
8243 +- io.addr_type = IPMI_IO_ADDR_SPACE;
8244 +- } else if (addrs[i]) {
8245 +- /* A memory port */
8246 +- io.addr_data = addrs[i];
8247 +- io.addr_type = IPMI_MEM_ADDR_SPACE;
8248 +- } else {
8249 +- pr_warn("Interface type specified for interface %d, but port and address were not set or set to zero\n",
8250 +- i);
8251 +- continue;
8252 +- }
8253 ++void ipmi_si_hardcode_exit(void)
8254 ++{
8255 ++ unsigned int i;
8256 +
8257 +- io.addr = NULL;
8258 +- io.regspacing = regspacings[i];
8259 +- if (!io.regspacing)
8260 +- io.regspacing = DEFAULT_REGSPACING;
8261 +- io.regsize = regsizes[i];
8262 +- if (!io.regsize)
8263 +- io.regsize = DEFAULT_REGSIZE;
8264 +- io.regshift = regshifts[i];
8265 +- io.irq = irqs[i];
8266 +- if (io.irq)
8267 +- io.irq_setup = ipmi_std_irq_setup;
8268 +- io.slave_addr = slave_addrs[i];
8269 +-
8270 +- ret = ipmi_si_add_smi(&io);
8271 ++ for (i = 0; i < SI_MAX_PARMS; i++) {
8272 ++ if (ipmi_hc_pdevs[i])
8273 ++ platform_device_unregister(ipmi_hc_pdevs[i]);
8274 + }
8275 +- return ret;
8276 ++}
8277 ++
8278 ++/*
8279 ++ * Returns true of the given address exists as a hardcoded address,
8280 ++ * false if not.
8281 ++ */
8282 ++int ipmi_si_hardcode_match(int addr_type, unsigned long addr)
8283 ++{
8284 ++ unsigned int i;
8285 ++
8286 ++ if (addr_type == IPMI_IO_ADDR_SPACE) {
8287 ++ for (i = 0; i < num_ports; i++) {
8288 ++ if (ports[i] == addr)
8289 ++ return 1;
8290 ++ }
8291 ++ } else {
8292 ++ for (i = 0; i < num_addrs; i++) {
8293 ++ if (addrs[i] == addr)
8294 ++ return 1;
8295 ++ }
8296 ++ }
8297 ++
8298 ++ return 0;
8299 + }
8300 +diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
8301 +index dc8603d34320..5294abc4c96c 100644
8302 +--- a/drivers/char/ipmi/ipmi_si_intf.c
8303 ++++ b/drivers/char/ipmi/ipmi_si_intf.c
8304 +@@ -1862,6 +1862,18 @@ int ipmi_si_add_smi(struct si_sm_io *io)
8305 + int rv = 0;
8306 + struct smi_info *new_smi, *dup;
8307 +
8308 ++ /*
8309 ++ * If the user gave us a hard-coded device at the same
8310 ++ * address, they presumably want us to use it and not what is
8311 ++ * in the firmware.
8312 ++ */
8313 ++ if (io->addr_source != SI_HARDCODED &&
8314 ++ ipmi_si_hardcode_match(io->addr_type, io->addr_data)) {
8315 ++ dev_info(io->dev,
8316 ++ "Hard-coded device at this address already exists");
8317 ++ return -ENODEV;
8318 ++ }
8319 ++
8320 + if (!io->io_setup) {
8321 + if (io->addr_type == IPMI_IO_ADDR_SPACE) {
8322 + io->io_setup = ipmi_si_port_setup;
8323 +@@ -2085,11 +2097,16 @@ static int try_smi_init(struct smi_info *new_smi)
8324 + WARN_ON(new_smi->io.dev->init_name != NULL);
8325 +
8326 + out_err:
8327 ++ if (rv && new_smi->io.io_cleanup) {
8328 ++ new_smi->io.io_cleanup(&new_smi->io);
8329 ++ new_smi->io.io_cleanup = NULL;
8330 ++ }
8331 ++
8332 + kfree(init_name);
8333 + return rv;
8334 + }
8335 +
8336 +-static int init_ipmi_si(void)
8337 ++static int __init init_ipmi_si(void)
8338 + {
8339 + struct smi_info *e;
8340 + enum ipmi_addr_src type = SI_INVALID;
8341 +@@ -2097,11 +2114,9 @@ static int init_ipmi_si(void)
8342 + if (initialized)
8343 + return 0;
8344 +
8345 +- pr_info("IPMI System Interface driver\n");
8346 ++ ipmi_hardcode_init();
8347 +
8348 +- /* If the user gave us a device, they presumably want us to use it */
8349 +- if (!ipmi_si_hardcode_find_bmc())
8350 +- goto do_scan;
8351 ++ pr_info("IPMI System Interface driver\n");
8352 +
8353 + ipmi_si_platform_init();
8354 +
8355 +@@ -2113,7 +2128,6 @@ static int init_ipmi_si(void)
8356 + with multiple BMCs we assume that there will be several instances
8357 + of a given type so if we succeed in registering a type then also
8358 + try to register everything else of the same type */
8359 +-do_scan:
8360 + mutex_lock(&smi_infos_lock);
8361 + list_for_each_entry(e, &smi_infos, link) {
8362 + /* Try to register a device if it has an IRQ and we either
8363 +@@ -2299,6 +2313,8 @@ static void cleanup_ipmi_si(void)
8364 + list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
8365 + cleanup_one_si(e);
8366 + mutex_unlock(&smi_infos_lock);
8367 ++
8368 ++ ipmi_si_hardcode_exit();
8369 + }
8370 + module_exit(cleanup_ipmi_si);
8371 +
8372 +diff --git a/drivers/char/ipmi/ipmi_si_mem_io.c b/drivers/char/ipmi/ipmi_si_mem_io.c
8373 +index fd0ec8d6bf0e..75583612ab10 100644
8374 +--- a/drivers/char/ipmi/ipmi_si_mem_io.c
8375 ++++ b/drivers/char/ipmi/ipmi_si_mem_io.c
8376 +@@ -81,8 +81,6 @@ int ipmi_si_mem_setup(struct si_sm_io *io)
8377 + if (!addr)
8378 + return -ENODEV;
8379 +
8380 +- io->io_cleanup = mem_cleanup;
8381 +-
8382 + /*
8383 + * Figure out the actual readb/readw/readl/etc routine to use based
8384 + * upon the register size.
8385 +@@ -141,5 +139,8 @@ int ipmi_si_mem_setup(struct si_sm_io *io)
8386 + mem_region_cleanup(io, io->io_size);
8387 + return -EIO;
8388 + }
8389 ++
8390 ++ io->io_cleanup = mem_cleanup;
8391 ++
8392 + return 0;
8393 + }
8394 +diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
8395 +index 15cf819f884f..8158d03542f4 100644
8396 +--- a/drivers/char/ipmi/ipmi_si_platform.c
8397 ++++ b/drivers/char/ipmi/ipmi_si_platform.c
8398 +@@ -128,8 +128,6 @@ ipmi_get_info_from_resources(struct platform_device *pdev,
8399 + if (res_second->start > io->addr_data)
8400 + io->regspacing = res_second->start - io->addr_data;
8401 + }
8402 +- io->regsize = DEFAULT_REGSIZE;
8403 +- io->regshift = 0;
8404 +
8405 + return res;
8406 + }
8407 +@@ -137,7 +135,7 @@ ipmi_get_info_from_resources(struct platform_device *pdev,
8408 + static int platform_ipmi_probe(struct platform_device *pdev)
8409 + {
8410 + struct si_sm_io io;
8411 +- u8 type, slave_addr, addr_source;
8412 ++ u8 type, slave_addr, addr_source, regsize, regshift;
8413 + int rv;
8414 +
8415 + rv = device_property_read_u8(&pdev->dev, "addr-source", &addr_source);
8416 +@@ -149,7 +147,7 @@ static int platform_ipmi_probe(struct platform_device *pdev)
8417 + if (addr_source == SI_SMBIOS) {
8418 + if (!si_trydmi)
8419 + return -ENODEV;
8420 +- } else {
8421 ++ } else if (addr_source != SI_HARDCODED) {
8422 + if (!si_tryplatform)
8423 + return -ENODEV;
8424 + }
8425 +@@ -169,11 +167,23 @@ static int platform_ipmi_probe(struct platform_device *pdev)
8426 + case SI_BT:
8427 + io.si_type = type;
8428 + break;
8429 ++ case SI_TYPE_INVALID: /* User disabled this in hardcode. */
8430 ++ return -ENODEV;
8431 + default:
8432 + dev_err(&pdev->dev, "ipmi-type property is invalid\n");
8433 + return -EINVAL;
8434 + }
8435 +
8436 ++ io.regsize = DEFAULT_REGSIZE;
8437 ++ rv = device_property_read_u8(&pdev->dev, "reg-size", &regsize);
8438 ++ if (!rv)
8439 ++ io.regsize = regsize;
8440 ++
8441 ++ io.regshift = 0;
8442 ++ rv = device_property_read_u8(&pdev->dev, "reg-shift", &regshift);
8443 ++ if (!rv)
8444 ++ io.regshift = regshift;
8445 ++
8446 + if (!ipmi_get_info_from_resources(pdev, &io))
8447 + return -EINVAL;
8448 +
8449 +@@ -193,7 +203,8 @@ static int platform_ipmi_probe(struct platform_device *pdev)
8450 +
8451 + io.dev = &pdev->dev;
8452 +
8453 +- pr_info("ipmi_si: SMBIOS: %s %#lx regsize %d spacing %d irq %d\n",
8454 ++ pr_info("ipmi_si: %s: %s %#lx regsize %d spacing %d irq %d\n",
8455 ++ ipmi_addr_src_to_str(addr_source),
8456 + (io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
8457 + io.addr_data, io.regsize, io.regspacing, io.irq);
8458 +
8459 +@@ -358,6 +369,9 @@ static int acpi_ipmi_probe(struct platform_device *pdev)
8460 + goto err_free;
8461 + }
8462 +
8463 ++ io.regsize = DEFAULT_REGSIZE;
8464 ++ io.regshift = 0;
8465 ++
8466 + res = ipmi_get_info_from_resources(pdev, &io);
8467 + if (!res) {
8468 + rv = -EINVAL;
8469 +@@ -420,8 +434,9 @@ static int ipmi_remove(struct platform_device *pdev)
8470 + }
8471 +
8472 + static const struct platform_device_id si_plat_ids[] = {
8473 +- { "dmi-ipmi-si", 0 },
8474 +- { }
8475 ++ { "dmi-ipmi-si", 0 },
8476 ++ { "hardcode-ipmi-si", 0 },
8477 ++ { }
8478 + };
8479 +
8480 + struct platform_driver ipmi_platform_driver = {
8481 +diff --git a/drivers/char/ipmi/ipmi_si_port_io.c b/drivers/char/ipmi/ipmi_si_port_io.c
8482 +index ef6dffcea9fa..03924c32b6e9 100644
8483 +--- a/drivers/char/ipmi/ipmi_si_port_io.c
8484 ++++ b/drivers/char/ipmi/ipmi_si_port_io.c
8485 +@@ -68,8 +68,6 @@ int ipmi_si_port_setup(struct si_sm_io *io)
8486 + if (!addr)
8487 + return -ENODEV;
8488 +
8489 +- io->io_cleanup = port_cleanup;
8490 +-
8491 + /*
8492 + * Figure out the actual inb/inw/inl/etc routine to use based
8493 + * upon the register size.
8494 +@@ -109,5 +107,8 @@ int ipmi_si_port_setup(struct si_sm_io *io)
8495 + return -EIO;
8496 + }
8497 + }
8498 ++
8499 ++ io->io_cleanup = port_cleanup;
8500 ++
8501 + return 0;
8502 + }
8503 +diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
8504 +index 64dc560859f2..13dc614b7ebc 100644
8505 +--- a/drivers/char/tpm/st33zp24/st33zp24.c
8506 ++++ b/drivers/char/tpm/st33zp24/st33zp24.c
8507 +@@ -436,7 +436,7 @@ static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf,
8508 + goto out_err;
8509 + }
8510 +
8511 +- return len;
8512 ++ return 0;
8513 + out_err:
8514 + st33zp24_cancel(chip);
8515 + release_locality(chip);
8516 +diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
8517 +index d9439f9abe78..88d2e01a651d 100644
8518 +--- a/drivers/char/tpm/tpm-interface.c
8519 ++++ b/drivers/char/tpm/tpm-interface.c
8520 +@@ -230,10 +230,19 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip,
8521 + if (rc < 0) {
8522 + if (rc != -EPIPE)
8523 + dev_err(&chip->dev,
8524 +- "%s: tpm_send: error %d\n", __func__, rc);
8525 ++ "%s: send(): error %d\n", __func__, rc);
8526 + goto out;
8527 + }
8528 +
8529 ++ /* A sanity check. send() should just return zero on success e.g.
8530 ++ * not the command length.
8531 ++ */
8532 ++ if (rc > 0) {
8533 ++ dev_warn(&chip->dev,
8534 ++ "%s: send(): invalid value %d\n", __func__, rc);
8535 ++ rc = 0;
8536 ++ }
8537 ++
8538 + if (chip->flags & TPM_CHIP_FLAG_IRQ)
8539 + goto out_recv;
8540 +
8541 +diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
8542 +index 66a14526aaf4..a290b30a0c35 100644
8543 +--- a/drivers/char/tpm/tpm_atmel.c
8544 ++++ b/drivers/char/tpm/tpm_atmel.c
8545 +@@ -105,7 +105,7 @@ static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count)
8546 + iowrite8(buf[i], priv->iobase);
8547 + }
8548 +
8549 +- return count;
8550 ++ return 0;
8551 + }
8552 +
8553 + static void tpm_atml_cancel(struct tpm_chip *chip)
8554 +diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
8555 +index 36952ef98f90..763fc7e6c005 100644
8556 +--- a/drivers/char/tpm/tpm_crb.c
8557 ++++ b/drivers/char/tpm/tpm_crb.c
8558 +@@ -287,19 +287,29 @@ static int crb_recv(struct tpm_chip *chip, u8 *buf, size_t count)
8559 + struct crb_priv *priv = dev_get_drvdata(&chip->dev);
8560 + unsigned int expected;
8561 +
8562 +- /* sanity check */
8563 +- if (count < 6)
8564 ++ /* A sanity check that the upper layer wants to get at least the header
8565 ++ * as that is the minimum size for any TPM response.
8566 ++ */
8567 ++ if (count < TPM_HEADER_SIZE)
8568 + return -EIO;
8569 +
8570 ++ /* If this bit is set, according to the spec, the TPM is in
8571 ++ * unrecoverable condition.
8572 ++ */
8573 + if (ioread32(&priv->regs_t->ctrl_sts) & CRB_CTRL_STS_ERROR)
8574 + return -EIO;
8575 +
8576 +- memcpy_fromio(buf, priv->rsp, 6);
8577 +- expected = be32_to_cpup((__be32 *) &buf[2]);
8578 +- if (expected > count || expected < 6)
8579 ++ /* Read the first 8 bytes in order to get the length of the response.
8580 ++ * We read exactly a quad word in order to make sure that the remaining
8581 ++ * reads will be aligned.
8582 ++ */
8583 ++ memcpy_fromio(buf, priv->rsp, 8);
8584 ++
8585 ++ expected = be32_to_cpup((__be32 *)&buf[2]);
8586 ++ if (expected > count || expected < TPM_HEADER_SIZE)
8587 + return -EIO;
8588 +
8589 +- memcpy_fromio(&buf[6], &priv->rsp[6], expected - 6);
8590 ++ memcpy_fromio(&buf[8], &priv->rsp[8], expected - 8);
8591 +
8592 + return expected;
8593 + }
8594 +diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
8595 +index 95ce2e9ccdc6..32a8e27c5382 100644
8596 +--- a/drivers/char/tpm/tpm_i2c_atmel.c
8597 ++++ b/drivers/char/tpm/tpm_i2c_atmel.c
8598 +@@ -65,7 +65,11 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
8599 + dev_dbg(&chip->dev,
8600 + "%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__,
8601 + (int)min_t(size_t, 64, len), buf, len, status);
8602 +- return status;
8603 ++
8604 ++ if (status < 0)
8605 ++ return status;
8606 ++
8607 ++ return 0;
8608 + }
8609 +
8610 + static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
8611 +diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
8612 +index 9086edc9066b..977fd42daa1b 100644
8613 +--- a/drivers/char/tpm/tpm_i2c_infineon.c
8614 ++++ b/drivers/char/tpm/tpm_i2c_infineon.c
8615 +@@ -587,7 +587,7 @@ static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len)
8616 + /* go and do it */
8617 + iic_tpm_write(TPM_STS(tpm_dev.locality), &sts, 1);
8618 +
8619 +- return len;
8620 ++ return 0;
8621 + out_err:
8622 + tpm_tis_i2c_ready(chip);
8623 + /* The TPM needs some time to clean up here,
8624 +diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
8625 +index 217f7f1cbde8..058220edb8b3 100644
8626 +--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
8627 ++++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
8628 +@@ -467,7 +467,7 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
8629 + }
8630 +
8631 + dev_dbg(dev, "%s() -> %zd\n", __func__, len);
8632 +- return len;
8633 ++ return 0;
8634 + }
8635 +
8636 + static bool i2c_nuvoton_req_canceled(struct tpm_chip *chip, u8 status)
8637 +diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
8638 +index 07b5a487d0c8..757ca45b39b8 100644
8639 +--- a/drivers/char/tpm/tpm_ibmvtpm.c
8640 ++++ b/drivers/char/tpm/tpm_ibmvtpm.c
8641 +@@ -139,14 +139,14 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
8642 + }
8643 +
8644 + /**
8645 +- * tpm_ibmvtpm_send - Send tpm request
8646 +- *
8647 ++ * tpm_ibmvtpm_send() - Send a TPM command
8648 + * @chip: tpm chip struct
8649 + * @buf: buffer contains data to send
8650 + * @count: size of buffer
8651 + *
8652 + * Return:
8653 +- * Number of bytes sent or < 0 on error.
8654 ++ * 0 on success,
8655 ++ * -errno on error
8656 + */
8657 + static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
8658 + {
8659 +@@ -192,7 +192,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
8660 + rc = 0;
8661 + ibmvtpm->tpm_processing_cmd = false;
8662 + } else
8663 +- rc = count;
8664 ++ rc = 0;
8665 +
8666 + spin_unlock(&ibmvtpm->rtce_lock);
8667 + return rc;
8668 +diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
8669 +index d8f10047fbba..97f6d4fe0aee 100644
8670 +--- a/drivers/char/tpm/tpm_infineon.c
8671 ++++ b/drivers/char/tpm/tpm_infineon.c
8672 +@@ -354,7 +354,7 @@ static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count)
8673 + for (i = 0; i < count; i++) {
8674 + wait_and_send(chip, buf[i]);
8675 + }
8676 +- return count;
8677 ++ return 0;
8678 + }
8679 +
8680 + static void tpm_inf_cancel(struct tpm_chip *chip)
8681 +diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
8682 +index 5d6cce74cd3f..9bee3c5eb4bf 100644
8683 +--- a/drivers/char/tpm/tpm_nsc.c
8684 ++++ b/drivers/char/tpm/tpm_nsc.c
8685 +@@ -226,7 +226,7 @@ static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count)
8686 + }
8687 + outb(NSC_COMMAND_EOC, priv->base + NSC_COMMAND);
8688 +
8689 +- return count;
8690 ++ return 0;
8691 + }
8692 +
8693 + static void tpm_nsc_cancel(struct tpm_chip *chip)
8694 +diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
8695 +index bf7e49cfa643..bb0c2e160562 100644
8696 +--- a/drivers/char/tpm/tpm_tis_core.c
8697 ++++ b/drivers/char/tpm/tpm_tis_core.c
8698 +@@ -481,7 +481,7 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
8699 + goto out_err;
8700 + }
8701 + }
8702 +- return len;
8703 ++ return 0;
8704 + out_err:
8705 + tpm_tis_ready(chip);
8706 + return rc;
8707 +diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
8708 +index 87a0ce47f201..ecbb63f8d231 100644
8709 +--- a/drivers/char/tpm/tpm_vtpm_proxy.c
8710 ++++ b/drivers/char/tpm/tpm_vtpm_proxy.c
8711 +@@ -335,7 +335,6 @@ static int vtpm_proxy_is_driver_command(struct tpm_chip *chip,
8712 + static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count)
8713 + {
8714 + struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
8715 +- int rc = 0;
8716 +
8717 + if (count > sizeof(proxy_dev->buffer)) {
8718 + dev_err(&chip->dev,
8719 +@@ -366,7 +365,7 @@ static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count)
8720 +
8721 + wake_up_interruptible(&proxy_dev->wq);
8722 +
8723 +- return rc;
8724 ++ return 0;
8725 + }
8726 +
8727 + static void vtpm_proxy_tpm_op_cancel(struct tpm_chip *chip)
8728 +diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
8729 +index b150f87f38f5..5a327eb7f63a 100644
8730 +--- a/drivers/char/tpm/xen-tpmfront.c
8731 ++++ b/drivers/char/tpm/xen-tpmfront.c
8732 +@@ -173,7 +173,7 @@ static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
8733 + return -ETIME;
8734 + }
8735 +
8736 +- return count;
8737 ++ return 0;
8738 + }
8739 +
8740 + static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
8741 +diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
8742 +index 545dceec0bbf..fdfe2e423d15 100644
8743 +--- a/drivers/clk/clk-fractional-divider.c
8744 ++++ b/drivers/clk/clk-fractional-divider.c
8745 +@@ -79,7 +79,7 @@ static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate,
8746 + unsigned long m, n;
8747 + u64 ret;
8748 +
8749 +- if (!rate || rate >= *parent_rate)
8750 ++ if (!rate || (!clk_hw_can_set_rate_parent(hw) && rate >= *parent_rate))
8751 + return *parent_rate;
8752 +
8753 + if (fd->approximation)
8754 +diff --git a/drivers/clk/clk-twl6040.c b/drivers/clk/clk-twl6040.c
8755 +index ea846f77750b..0cad5748bf0e 100644
8756 +--- a/drivers/clk/clk-twl6040.c
8757 ++++ b/drivers/clk/clk-twl6040.c
8758 +@@ -41,6 +41,43 @@ static int twl6040_pdmclk_is_prepared(struct clk_hw *hw)
8759 + return pdmclk->enabled;
8760 + }
8761 +
8762 ++static int twl6040_pdmclk_reset_one_clock(struct twl6040_pdmclk *pdmclk,
8763 ++ unsigned int reg)
8764 ++{
8765 ++ const u8 reset_mask = TWL6040_HPLLRST; /* Same for HPPLL and LPPLL */
8766 ++ int ret;
8767 ++
8768 ++ ret = twl6040_set_bits(pdmclk->twl6040, reg, reset_mask);
8769 ++ if (ret < 0)
8770 ++ return ret;
8771 ++
8772 ++ ret = twl6040_clear_bits(pdmclk->twl6040, reg, reset_mask);
8773 ++ if (ret < 0)
8774 ++ return ret;
8775 ++
8776 ++ return 0;
8777 ++}
8778 ++
8779 ++/*
8780 ++ * TWL6040A2 Phoenix Audio IC erratum #6: "PDM Clock Generation Issue At
8781 ++ * Cold Temperature". This affects cold boot and deeper idle states it
8782 ++ * seems. The workaround consists of resetting HPPLL and LPPLL.
8783 ++ */
8784 ++static int twl6040_pdmclk_quirk_reset_clocks(struct twl6040_pdmclk *pdmclk)
8785 ++{
8786 ++ int ret;
8787 ++
8788 ++ ret = twl6040_pdmclk_reset_one_clock(pdmclk, TWL6040_REG_HPPLLCTL);
8789 ++ if (ret)
8790 ++ return ret;
8791 ++
8792 ++ ret = twl6040_pdmclk_reset_one_clock(pdmclk, TWL6040_REG_LPPLLCTL);
8793 ++ if (ret)
8794 ++ return ret;
8795 ++
8796 ++ return 0;
8797 ++}
8798 ++
8799 + static int twl6040_pdmclk_prepare(struct clk_hw *hw)
8800 + {
8801 + struct twl6040_pdmclk *pdmclk = container_of(hw, struct twl6040_pdmclk,
8802 +@@ -48,8 +85,20 @@ static int twl6040_pdmclk_prepare(struct clk_hw *hw)
8803 + int ret;
8804 +
8805 + ret = twl6040_power(pdmclk->twl6040, 1);
8806 +- if (!ret)
8807 +- pdmclk->enabled = 1;
8808 ++ if (ret)
8809 ++ return ret;
8810 ++
8811 ++ ret = twl6040_pdmclk_quirk_reset_clocks(pdmclk);
8812 ++ if (ret)
8813 ++ goto out_err;
8814 ++
8815 ++ pdmclk->enabled = 1;
8816 ++
8817 ++ return 0;
8818 ++
8819 ++out_err:
8820 ++ dev_err(pdmclk->dev, "%s: error %i\n", __func__, ret);
8821 ++ twl6040_power(pdmclk->twl6040, 0);
8822 +
8823 + return ret;
8824 + }
8825 +diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
8826 +index 5ef7d9ba2195..b40160eb3372 100644
8827 +--- a/drivers/clk/ingenic/cgu.c
8828 ++++ b/drivers/clk/ingenic/cgu.c
8829 +@@ -426,16 +426,16 @@ ingenic_clk_round_rate(struct clk_hw *hw, unsigned long req_rate,
8830 + struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
8831 + struct ingenic_cgu *cgu = ingenic_clk->cgu;
8832 + const struct ingenic_cgu_clk_info *clk_info;
8833 +- long rate = *parent_rate;
8834 ++ unsigned int div = 1;
8835 +
8836 + clk_info = &cgu->clock_info[ingenic_clk->idx];
8837 +
8838 + if (clk_info->type & CGU_CLK_DIV)
8839 +- rate /= ingenic_clk_calc_div(clk_info, *parent_rate, req_rate);
8840 ++ div = ingenic_clk_calc_div(clk_info, *parent_rate, req_rate);
8841 + else if (clk_info->type & CGU_CLK_FIXDIV)
8842 +- rate /= clk_info->fixdiv.div;
8843 ++ div = clk_info->fixdiv.div;
8844 +
8845 +- return rate;
8846 ++ return DIV_ROUND_UP(*parent_rate, div);
8847 + }
8848 +
8849 + static int
8850 +@@ -455,7 +455,7 @@ ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate,
8851 +
8852 + if (clk_info->type & CGU_CLK_DIV) {
8853 + div = ingenic_clk_calc_div(clk_info, parent_rate, req_rate);
8854 +- rate = parent_rate / div;
8855 ++ rate = DIV_ROUND_UP(parent_rate, div);
8856 +
8857 + if (rate != req_rate)
8858 + return -EINVAL;
8859 +diff --git a/drivers/clk/ingenic/cgu.h b/drivers/clk/ingenic/cgu.h
8860 +index 502bcbb61b04..e12716d8ce3c 100644
8861 +--- a/drivers/clk/ingenic/cgu.h
8862 ++++ b/drivers/clk/ingenic/cgu.h
8863 +@@ -80,7 +80,7 @@ struct ingenic_cgu_mux_info {
8864 + * @reg: offset of the divider control register within the CGU
8865 + * @shift: number of bits to left shift the divide value by (ie. the index of
8866 + * the lowest bit of the divide value within its control register)
8867 +- * @div: number of bits to divide the divider value by (i.e. if the
8868 ++ * @div: number to divide the divider value by (i.e. if the
8869 + * effective divider value is the value written to the register
8870 + * multiplied by some constant)
8871 + * @bits: the size of the divide value in bits
8872 +diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
8873 +index faa94adb2a37..65ab5c2f48b0 100644
8874 +--- a/drivers/clk/rockchip/clk-rk3328.c
8875 ++++ b/drivers/clk/rockchip/clk-rk3328.c
8876 +@@ -78,17 +78,17 @@ static struct rockchip_pll_rate_table rk3328_pll_rates[] = {
8877 +
8878 + static struct rockchip_pll_rate_table rk3328_pll_frac_rates[] = {
8879 + /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */
8880 +- RK3036_PLL_RATE(1016064000, 3, 127, 1, 1, 0, 134217),
8881 ++ RK3036_PLL_RATE(1016064000, 3, 127, 1, 1, 0, 134218),
8882 + /* vco = 1016064000 */
8883 +- RK3036_PLL_RATE(983040000, 24, 983, 1, 1, 0, 671088),
8884 ++ RK3036_PLL_RATE(983040000, 24, 983, 1, 1, 0, 671089),
8885 + /* vco = 983040000 */
8886 +- RK3036_PLL_RATE(491520000, 24, 983, 2, 1, 0, 671088),
8887 ++ RK3036_PLL_RATE(491520000, 24, 983, 2, 1, 0, 671089),
8888 + /* vco = 983040000 */
8889 +- RK3036_PLL_RATE(61440000, 6, 215, 7, 2, 0, 671088),
8890 ++ RK3036_PLL_RATE(61440000, 6, 215, 7, 2, 0, 671089),
8891 + /* vco = 860156000 */
8892 +- RK3036_PLL_RATE(56448000, 12, 451, 4, 4, 0, 9797894),
8893 ++ RK3036_PLL_RATE(56448000, 12, 451, 4, 4, 0, 9797895),
8894 + /* vco = 903168000 */
8895 +- RK3036_PLL_RATE(40960000, 12, 409, 4, 5, 0, 10066329),
8896 ++ RK3036_PLL_RATE(40960000, 12, 409, 4, 5, 0, 10066330),
8897 + /* vco = 819200000 */
8898 + { /* sentinel */ },
8899 + };
8900 +diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.c b/drivers/clk/samsung/clk-exynos5-subcmu.c
8901 +index 93306283d764..8ae44b5db4c2 100644
8902 +--- a/drivers/clk/samsung/clk-exynos5-subcmu.c
8903 ++++ b/drivers/clk/samsung/clk-exynos5-subcmu.c
8904 +@@ -136,15 +136,20 @@ static int __init exynos5_clk_register_subcmu(struct device *parent,
8905 + {
8906 + struct of_phandle_args genpdspec = { .np = pd_node };
8907 + struct platform_device *pdev;
8908 ++ int ret;
8909 ++
8910 ++ pdev = platform_device_alloc("exynos5-subcmu", PLATFORM_DEVID_AUTO);
8911 ++ if (!pdev)
8912 ++ return -ENOMEM;
8913 +
8914 +- pdev = platform_device_alloc(info->pd_name, -1);
8915 + pdev->dev.parent = parent;
8916 +- pdev->driver_override = "exynos5-subcmu";
8917 + platform_set_drvdata(pdev, (void *)info);
8918 + of_genpd_add_device(&genpdspec, &pdev->dev);
8919 +- platform_device_add(pdev);
8920 ++ ret = platform_device_add(pdev);
8921 ++ if (ret)
8922 ++ platform_device_put(pdev);
8923 +
8924 +- return 0;
8925 ++ return ret;
8926 + }
8927 +
8928 + static int __init exynos5_clk_probe(struct platform_device *pdev)
8929 +diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
8930 +index 40630eb950fc..85d7f301149b 100644
8931 +--- a/drivers/clk/ti/clkctrl.c
8932 ++++ b/drivers/clk/ti/clkctrl.c
8933 +@@ -530,7 +530,7 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
8934 + * Create default clkdm name, replace _cm from end of parent
8935 + * node name with _clkdm
8936 + */
8937 +- provider->clkdm_name[strlen(provider->clkdm_name) - 5] = 0;
8938 ++ provider->clkdm_name[strlen(provider->clkdm_name) - 2] = 0;
8939 + } else {
8940 + provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFn", node);
8941 + if (!provider->clkdm_name) {
8942 +diff --git a/drivers/clk/uniphier/clk-uniphier-cpugear.c b/drivers/clk/uniphier/clk-uniphier-cpugear.c
8943 +index ec11f55594ad..5d2d42b7e182 100644
8944 +--- a/drivers/clk/uniphier/clk-uniphier-cpugear.c
8945 ++++ b/drivers/clk/uniphier/clk-uniphier-cpugear.c
8946 +@@ -47,7 +47,7 @@ static int uniphier_clk_cpugear_set_parent(struct clk_hw *hw, u8 index)
8947 + return ret;
8948 +
8949 + ret = regmap_write_bits(gear->regmap,
8950 +- gear->regbase + UNIPHIER_CLK_CPUGEAR_SET,
8951 ++ gear->regbase + UNIPHIER_CLK_CPUGEAR_UPD,
8952 + UNIPHIER_CLK_CPUGEAR_UPD_BIT,
8953 + UNIPHIER_CLK_CPUGEAR_UPD_BIT);
8954 + if (ret)
8955 +diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
8956 +index a9e26f6a81a1..8dfd3bc448d0 100644
8957 +--- a/drivers/clocksource/Kconfig
8958 ++++ b/drivers/clocksource/Kconfig
8959 +@@ -360,6 +360,16 @@ config ARM64_ERRATUM_858921
8960 + The workaround will be dynamically enabled when an affected
8961 + core is detected.
8962 +
8963 ++config SUN50I_ERRATUM_UNKNOWN1
8964 ++ bool "Workaround for Allwinner A64 erratum UNKNOWN1"
8965 ++ default y
8966 ++ depends on ARM_ARCH_TIMER && ARM64 && ARCH_SUNXI
8967 ++ select ARM_ARCH_TIMER_OOL_WORKAROUND
8968 ++ help
8969 ++ This option enables a workaround for instability in the timer on
8970 ++ the Allwinner A64 SoC. The workaround will only be active if the
8971 ++ allwinner,erratum-unknown1 property is found in the timer node.
8972 ++
8973 + config ARM_GLOBAL_TIMER
8974 + bool "Support for the ARM global timer" if COMPILE_TEST
8975 + select TIMER_OF if OF
8976 +diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
8977 +index 9a7d4dc00b6e..a8b20b65bd4b 100644
8978 +--- a/drivers/clocksource/arm_arch_timer.c
8979 ++++ b/drivers/clocksource/arm_arch_timer.c
8980 +@@ -326,6 +326,48 @@ static u64 notrace arm64_1188873_read_cntvct_el0(void)
8981 + }
8982 + #endif
8983 +
8984 ++#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
8985 ++/*
8986 ++ * The low bits of the counter registers are indeterminate while bit 10 or
8987 ++ * greater is rolling over. Since the counter value can jump both backward
8988 ++ * (7ff -> 000 -> 800) and forward (7ff -> fff -> 800), ignore register values
8989 ++ * with all ones or all zeros in the low bits. Bound the loop by the maximum
8990 ++ * number of CPU cycles in 3 consecutive 24 MHz counter periods.
8991 ++ */
8992 ++#define __sun50i_a64_read_reg(reg) ({ \
8993 ++ u64 _val; \
8994 ++ int _retries = 150; \
8995 ++ \
8996 ++ do { \
8997 ++ _val = read_sysreg(reg); \
8998 ++ _retries--; \
8999 ++ } while (((_val + 1) & GENMASK(9, 0)) <= 1 && _retries); \
9000 ++ \
9001 ++ WARN_ON_ONCE(!_retries); \
9002 ++ _val; \
9003 ++})
9004 ++
9005 ++static u64 notrace sun50i_a64_read_cntpct_el0(void)
9006 ++{
9007 ++ return __sun50i_a64_read_reg(cntpct_el0);
9008 ++}
9009 ++
9010 ++static u64 notrace sun50i_a64_read_cntvct_el0(void)
9011 ++{
9012 ++ return __sun50i_a64_read_reg(cntvct_el0);
9013 ++}
9014 ++
9015 ++static u32 notrace sun50i_a64_read_cntp_tval_el0(void)
9016 ++{
9017 ++ return read_sysreg(cntp_cval_el0) - sun50i_a64_read_cntpct_el0();
9018 ++}
9019 ++
9020 ++static u32 notrace sun50i_a64_read_cntv_tval_el0(void)
9021 ++{
9022 ++ return read_sysreg(cntv_cval_el0) - sun50i_a64_read_cntvct_el0();
9023 ++}
9024 ++#endif
9025 ++
9026 + #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
9027 + DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
9028 + EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
9029 +@@ -423,6 +465,19 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
9030 + .read_cntvct_el0 = arm64_1188873_read_cntvct_el0,
9031 + },
9032 + #endif
9033 ++#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
9034 ++ {
9035 ++ .match_type = ate_match_dt,
9036 ++ .id = "allwinner,erratum-unknown1",
9037 ++ .desc = "Allwinner erratum UNKNOWN1",
9038 ++ .read_cntp_tval_el0 = sun50i_a64_read_cntp_tval_el0,
9039 ++ .read_cntv_tval_el0 = sun50i_a64_read_cntv_tval_el0,
9040 ++ .read_cntpct_el0 = sun50i_a64_read_cntpct_el0,
9041 ++ .read_cntvct_el0 = sun50i_a64_read_cntvct_el0,
9042 ++ .set_next_event_phys = erratum_set_next_event_tval_phys,
9043 ++ .set_next_event_virt = erratum_set_next_event_tval_virt,
9044 ++ },
9045 ++#endif
9046 + };
9047 +
9048 + typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
9049 +diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
9050 +index 7a244b681876..d55c30f6981d 100644
9051 +--- a/drivers/clocksource/exynos_mct.c
9052 ++++ b/drivers/clocksource/exynos_mct.c
9053 +@@ -388,6 +388,13 @@ static void exynos4_mct_tick_start(unsigned long cycles,
9054 + exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
9055 + }
9056 +
9057 ++static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
9058 ++{
9059 ++ /* Clear the MCT tick interrupt */
9060 ++ if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1)
9061 ++ exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
9062 ++}
9063 ++
9064 + static int exynos4_tick_set_next_event(unsigned long cycles,
9065 + struct clock_event_device *evt)
9066 + {
9067 +@@ -404,6 +411,7 @@ static int set_state_shutdown(struct clock_event_device *evt)
9068 +
9069 + mevt = container_of(evt, struct mct_clock_event_device, evt);
9070 + exynos4_mct_tick_stop(mevt);
9071 ++ exynos4_mct_tick_clear(mevt);
9072 + return 0;
9073 + }
9074 +
9075 +@@ -420,8 +428,11 @@ static int set_state_periodic(struct clock_event_device *evt)
9076 + return 0;
9077 + }
9078 +
9079 +-static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
9080 ++static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
9081 + {
9082 ++ struct mct_clock_event_device *mevt = dev_id;
9083 ++ struct clock_event_device *evt = &mevt->evt;
9084 ++
9085 + /*
9086 + * This is for supporting oneshot mode.
9087 + * Mct would generate interrupt periodically
9088 +@@ -430,16 +441,6 @@ static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
9089 + if (!clockevent_state_periodic(&mevt->evt))
9090 + exynos4_mct_tick_stop(mevt);
9091 +
9092 +- /* Clear the MCT tick interrupt */
9093 +- if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1)
9094 +- exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
9095 +-}
9096 +-
9097 +-static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
9098 +-{
9099 +- struct mct_clock_event_device *mevt = dev_id;
9100 +- struct clock_event_device *evt = &mevt->evt;
9101 +-
9102 + exynos4_mct_tick_clear(mevt);
9103 +
9104 + evt->event_handler(evt);
9105 +diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
9106 +index 431892200a08..ead71bfac689 100644
9107 +--- a/drivers/clocksource/timer-riscv.c
9108 ++++ b/drivers/clocksource/timer-riscv.c
9109 +@@ -58,7 +58,7 @@ static u64 riscv_sched_clock(void)
9110 + static DEFINE_PER_CPU(struct clocksource, riscv_clocksource) = {
9111 + .name = "riscv_clocksource",
9112 + .rating = 300,
9113 +- .mask = CLOCKSOURCE_MASK(BITS_PER_LONG),
9114 ++ .mask = CLOCKSOURCE_MASK(64),
9115 + .flags = CLOCK_SOURCE_IS_CONTINUOUS,
9116 + .read = riscv_clocksource_rdtime,
9117 + };
9118 +@@ -103,8 +103,7 @@ static int __init riscv_timer_init_dt(struct device_node *n)
9119 + cs = per_cpu_ptr(&riscv_clocksource, cpuid);
9120 + clocksource_register_hz(cs, riscv_timebase);
9121 +
9122 +- sched_clock_register(riscv_sched_clock,
9123 +- BITS_PER_LONG, riscv_timebase);
9124 ++ sched_clock_register(riscv_sched_clock, 64, riscv_timebase);
9125 +
9126 + error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING,
9127 + "clockevents/riscv/timer:starting",
9128 +diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
9129 +index ed5e42461094..ad48fd52cb53 100644
9130 +--- a/drivers/connector/cn_proc.c
9131 ++++ b/drivers/connector/cn_proc.c
9132 +@@ -250,6 +250,7 @@ void proc_coredump_connector(struct task_struct *task)
9133 + {
9134 + struct cn_msg *msg;
9135 + struct proc_event *ev;
9136 ++ struct task_struct *parent;
9137 + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
9138 +
9139 + if (atomic_read(&proc_event_num_listeners) < 1)
9140 +@@ -262,8 +263,14 @@ void proc_coredump_connector(struct task_struct *task)
9141 + ev->what = PROC_EVENT_COREDUMP;
9142 + ev->event_data.coredump.process_pid = task->pid;
9143 + ev->event_data.coredump.process_tgid = task->tgid;
9144 +- ev->event_data.coredump.parent_pid = task->real_parent->pid;
9145 +- ev->event_data.coredump.parent_tgid = task->real_parent->tgid;
9146 ++
9147 ++ rcu_read_lock();
9148 ++ if (pid_alive(task)) {
9149 ++ parent = rcu_dereference(task->real_parent);
9150 ++ ev->event_data.coredump.parent_pid = parent->pid;
9151 ++ ev->event_data.coredump.parent_tgid = parent->tgid;
9152 ++ }
9153 ++ rcu_read_unlock();
9154 +
9155 + memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
9156 + msg->ack = 0; /* not used */
9157 +@@ -276,6 +283,7 @@ void proc_exit_connector(struct task_struct *task)
9158 + {
9159 + struct cn_msg *msg;
9160 + struct proc_event *ev;
9161 ++ struct task_struct *parent;
9162 + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
9163 +
9164 + if (atomic_read(&proc_event_num_listeners) < 1)
9165 +@@ -290,8 +298,14 @@ void proc_exit_connector(struct task_struct *task)
9166 + ev->event_data.exit.process_tgid = task->tgid;
9167 + ev->event_data.exit.exit_code = task->exit_code;
9168 + ev->event_data.exit.exit_signal = task->exit_signal;
9169 +- ev->event_data.exit.parent_pid = task->real_parent->pid;
9170 +- ev->event_data.exit.parent_tgid = task->real_parent->tgid;
9171 ++
9172 ++ rcu_read_lock();
9173 ++ if (pid_alive(task)) {
9174 ++ parent = rcu_dereference(task->real_parent);
9175 ++ ev->event_data.exit.parent_pid = parent->pid;
9176 ++ ev->event_data.exit.parent_tgid = parent->tgid;
9177 ++ }
9178 ++ rcu_read_unlock();
9179 +
9180 + memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
9181 + msg->ack = 0; /* not used */
9182 +diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
9183 +index d62fd374d5c7..c72258a44ba4 100644
9184 +--- a/drivers/cpufreq/acpi-cpufreq.c
9185 ++++ b/drivers/cpufreq/acpi-cpufreq.c
9186 +@@ -916,8 +916,10 @@ static void __init acpi_cpufreq_boost_init(void)
9187 + {
9188 + int ret;
9189 +
9190 +- if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)))
9191 ++ if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) {
9192 ++ pr_debug("Boost capabilities not present in the processor\n");
9193 + return;
9194 ++ }
9195 +
9196 + acpi_cpufreq_driver.set_boost = set_boost;
9197 + acpi_cpufreq_driver.boost_enabled = boost_state(0);
9198 +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
9199 +index e35a886e00bc..ef0e33e21b98 100644
9200 +--- a/drivers/cpufreq/cpufreq.c
9201 ++++ b/drivers/cpufreq/cpufreq.c
9202 +@@ -545,13 +545,13 @@ EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
9203 + * SYSFS INTERFACE *
9204 + *********************************************************************/
9205 + static ssize_t show_boost(struct kobject *kobj,
9206 +- struct attribute *attr, char *buf)
9207 ++ struct kobj_attribute *attr, char *buf)
9208 + {
9209 + return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
9210 + }
9211 +
9212 +-static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
9213 +- const char *buf, size_t count)
9214 ++static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
9215 ++ const char *buf, size_t count)
9216 + {
9217 + int ret, enable;
9218 +
9219 +diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
9220 +index dd66decf2087..a579ca4552df 100644
9221 +--- a/drivers/cpufreq/intel_pstate.c
9222 ++++ b/drivers/cpufreq/intel_pstate.c
9223 +@@ -383,7 +383,10 @@ static int intel_pstate_get_cppc_guranteed(int cpu)
9224 + if (ret)
9225 + return ret;
9226 +
9227 +- return cppc_perf.guaranteed_perf;
9228 ++ if (cppc_perf.guaranteed_perf)
9229 ++ return cppc_perf.guaranteed_perf;
9230 ++
9231 ++ return cppc_perf.nominal_perf;
9232 + }
9233 +
9234 + #else /* CONFIG_ACPI_CPPC_LIB */
9235 +@@ -895,7 +898,7 @@ static void intel_pstate_update_policies(void)
9236 + /************************** sysfs begin ************************/
9237 + #define show_one(file_name, object) \
9238 + static ssize_t show_##file_name \
9239 +- (struct kobject *kobj, struct attribute *attr, char *buf) \
9240 ++ (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
9241 + { \
9242 + return sprintf(buf, "%u\n", global.object); \
9243 + }
9244 +@@ -904,7 +907,7 @@ static ssize_t intel_pstate_show_status(char *buf);
9245 + static int intel_pstate_update_status(const char *buf, size_t size);
9246 +
9247 + static ssize_t show_status(struct kobject *kobj,
9248 +- struct attribute *attr, char *buf)
9249 ++ struct kobj_attribute *attr, char *buf)
9250 + {
9251 + ssize_t ret;
9252 +
9253 +@@ -915,7 +918,7 @@ static ssize_t show_status(struct kobject *kobj,
9254 + return ret;
9255 + }
9256 +
9257 +-static ssize_t store_status(struct kobject *a, struct attribute *b,
9258 ++static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
9259 + const char *buf, size_t count)
9260 + {
9261 + char *p = memchr(buf, '\n', count);
9262 +@@ -929,7 +932,7 @@ static ssize_t store_status(struct kobject *a, struct attribute *b,
9263 + }
9264 +
9265 + static ssize_t show_turbo_pct(struct kobject *kobj,
9266 +- struct attribute *attr, char *buf)
9267 ++ struct kobj_attribute *attr, char *buf)
9268 + {
9269 + struct cpudata *cpu;
9270 + int total, no_turbo, turbo_pct;
9271 +@@ -955,7 +958,7 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
9272 + }
9273 +
9274 + static ssize_t show_num_pstates(struct kobject *kobj,
9275 +- struct attribute *attr, char *buf)
9276 ++ struct kobj_attribute *attr, char *buf)
9277 + {
9278 + struct cpudata *cpu;
9279 + int total;
9280 +@@ -976,7 +979,7 @@ static ssize_t show_num_pstates(struct kobject *kobj,
9281 + }
9282 +
9283 + static ssize_t show_no_turbo(struct kobject *kobj,
9284 +- struct attribute *attr, char *buf)
9285 ++ struct kobj_attribute *attr, char *buf)
9286 + {
9287 + ssize_t ret;
9288 +
9289 +@@ -998,7 +1001,7 @@ static ssize_t show_no_turbo(struct kobject *kobj,
9290 + return ret;
9291 + }
9292 +
9293 +-static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
9294 ++static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
9295 + const char *buf, size_t count)
9296 + {
9297 + unsigned int input;
9298 +@@ -1045,7 +1048,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
9299 + return count;
9300 + }
9301 +
9302 +-static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
9303 ++static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
9304 + const char *buf, size_t count)
9305 + {
9306 + unsigned int input;
9307 +@@ -1075,7 +1078,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
9308 + return count;
9309 + }
9310 +
9311 +-static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
9312 ++static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
9313 + const char *buf, size_t count)
9314 + {
9315 + unsigned int input;
9316 +@@ -1107,12 +1110,13 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
9317 + }
9318 +
9319 + static ssize_t show_hwp_dynamic_boost(struct kobject *kobj,
9320 +- struct attribute *attr, char *buf)
9321 ++ struct kobj_attribute *attr, char *buf)
9322 + {
9323 + return sprintf(buf, "%u\n", hwp_boost);
9324 + }
9325 +
9326 +-static ssize_t store_hwp_dynamic_boost(struct kobject *a, struct attribute *b,
9327 ++static ssize_t store_hwp_dynamic_boost(struct kobject *a,
9328 ++ struct kobj_attribute *b,
9329 + const char *buf, size_t count)
9330 + {
9331 + unsigned int input;
9332 +diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
9333 +index 46254e583982..74e0e0c20c46 100644
9334 +--- a/drivers/cpufreq/pxa2xx-cpufreq.c
9335 ++++ b/drivers/cpufreq/pxa2xx-cpufreq.c
9336 +@@ -143,7 +143,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
9337 + return ret;
9338 + }
9339 +
9340 +-static void __init pxa_cpufreq_init_voltages(void)
9341 ++static void pxa_cpufreq_init_voltages(void)
9342 + {
9343 + vcc_core = regulator_get(NULL, "vcc_core");
9344 + if (IS_ERR(vcc_core)) {
9345 +@@ -159,7 +159,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
9346 + return 0;
9347 + }
9348 +
9349 +-static void __init pxa_cpufreq_init_voltages(void) { }
9350 ++static void pxa_cpufreq_init_voltages(void) { }
9351 + #endif
9352 +
9353 + static void find_freq_tables(struct cpufreq_frequency_table **freq_table,
9354 +diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c
9355 +index 2a3675c24032..a472b814058f 100644
9356 +--- a/drivers/cpufreq/qcom-cpufreq-kryo.c
9357 ++++ b/drivers/cpufreq/qcom-cpufreq-kryo.c
9358 +@@ -75,7 +75,7 @@ static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void)
9359 +
9360 + static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
9361 + {
9362 +- struct opp_table *opp_tables[NR_CPUS] = {0};
9363 ++ struct opp_table **opp_tables;
9364 + enum _msm8996_version msm8996_version;
9365 + struct nvmem_cell *speedbin_nvmem;
9366 + struct device_node *np;
9367 +@@ -133,6 +133,10 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
9368 + }
9369 + kfree(speedbin);
9370 +
9371 ++ opp_tables = kcalloc(num_possible_cpus(), sizeof(*opp_tables), GFP_KERNEL);
9372 ++ if (!opp_tables)
9373 ++ return -ENOMEM;
9374 ++
9375 + for_each_possible_cpu(cpu) {
9376 + cpu_dev = get_cpu_device(cpu);
9377 + if (NULL == cpu_dev) {
9378 +@@ -151,8 +155,10 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
9379 +
9380 + cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
9381 + NULL, 0);
9382 +- if (!IS_ERR(cpufreq_dt_pdev))
9383 ++ if (!IS_ERR(cpufreq_dt_pdev)) {
9384 ++ platform_set_drvdata(pdev, opp_tables);
9385 + return 0;
9386 ++ }
9387 +
9388 + ret = PTR_ERR(cpufreq_dt_pdev);
9389 + dev_err(cpu_dev, "Failed to register platform device\n");
9390 +@@ -163,13 +169,23 @@ free_opp:
9391 + break;
9392 + dev_pm_opp_put_supported_hw(opp_tables[cpu]);
9393 + }
9394 ++ kfree(opp_tables);
9395 +
9396 + return ret;
9397 + }
9398 +
9399 + static int qcom_cpufreq_kryo_remove(struct platform_device *pdev)
9400 + {
9401 ++ struct opp_table **opp_tables = platform_get_drvdata(pdev);
9402 ++ unsigned int cpu;
9403 ++
9404 + platform_device_unregister(cpufreq_dt_pdev);
9405 ++
9406 ++ for_each_possible_cpu(cpu)
9407 ++ dev_pm_opp_put_supported_hw(opp_tables[cpu]);
9408 ++
9409 ++ kfree(opp_tables);
9410 ++
9411 + return 0;
9412 + }
9413 +
9414 +diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
9415 +index 99449738faa4..632ccf82c5d3 100644
9416 +--- a/drivers/cpufreq/scpi-cpufreq.c
9417 ++++ b/drivers/cpufreq/scpi-cpufreq.c
9418 +@@ -189,8 +189,8 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
9419 + cpufreq_cooling_unregister(priv->cdev);
9420 + clk_put(priv->clk);
9421 + dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
9422 +- kfree(priv);
9423 + dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
9424 ++ kfree(priv);
9425 +
9426 + return 0;
9427 + }
9428 +diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c
9429 +index 43530254201a..4bb154f6c54c 100644
9430 +--- a/drivers/cpufreq/tegra124-cpufreq.c
9431 ++++ b/drivers/cpufreq/tegra124-cpufreq.c
9432 +@@ -134,6 +134,8 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
9433 +
9434 + platform_set_drvdata(pdev, priv);
9435 +
9436 ++ of_node_put(np);
9437 ++
9438 + return 0;
9439 +
9440 + out_switch_to_pllx:
9441 +diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
9442 +index bb93e5cf6a4a..9fddf828a76f 100644
9443 +--- a/drivers/cpuidle/governor.c
9444 ++++ b/drivers/cpuidle/governor.c
9445 +@@ -89,6 +89,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
9446 + mutex_lock(&cpuidle_lock);
9447 + if (__cpuidle_find_governor(gov->name) == NULL) {
9448 + ret = 0;
9449 ++ list_add_tail(&gov->governor_list, &cpuidle_governors);
9450 + if (!cpuidle_curr_governor ||
9451 + !strncasecmp(param_governor, gov->name, CPUIDLE_NAME_LEN) ||
9452 + (cpuidle_curr_governor->rating < gov->rating &&
9453 +diff --git a/drivers/crypto/amcc/crypto4xx_trng.c b/drivers/crypto/amcc/crypto4xx_trng.c
9454 +index 5e63742b0d22..53ab1f140a26 100644
9455 +--- a/drivers/crypto/amcc/crypto4xx_trng.c
9456 ++++ b/drivers/crypto/amcc/crypto4xx_trng.c
9457 +@@ -80,8 +80,10 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev)
9458 +
9459 + /* Find the TRNG device node and map it */
9460 + trng = of_find_matching_node(NULL, ppc4xx_trng_match);
9461 +- if (!trng || !of_device_is_available(trng))
9462 ++ if (!trng || !of_device_is_available(trng)) {
9463 ++ of_node_put(trng);
9464 + return;
9465 ++ }
9466 +
9467 + dev->trng_base = of_iomap(trng, 0);
9468 + of_node_put(trng);
9469 +diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
9470 +index 80ae69f906fb..1c4f3a046dc5 100644
9471 +--- a/drivers/crypto/caam/caamalg.c
9472 ++++ b/drivers/crypto/caam/caamalg.c
9473 +@@ -1040,6 +1040,7 @@ static void init_aead_job(struct aead_request *req,
9474 + if (unlikely(req->src != req->dst)) {
9475 + if (edesc->dst_nents == 1) {
9476 + dst_dma = sg_dma_address(req->dst);
9477 ++ out_options = 0;
9478 + } else {
9479 + dst_dma = edesc->sec4_sg_dma +
9480 + sec4_sg_index *
9481 +diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
9482 +index bb1a2cdf1951..0f11811a3585 100644
9483 +--- a/drivers/crypto/caam/caamhash.c
9484 ++++ b/drivers/crypto/caam/caamhash.c
9485 +@@ -113,6 +113,7 @@ struct caam_hash_ctx {
9486 + struct caam_hash_state {
9487 + dma_addr_t buf_dma;
9488 + dma_addr_t ctx_dma;
9489 ++ int ctx_dma_len;
9490 + u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
9491 + int buflen_0;
9492 + u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
9493 +@@ -165,6 +166,7 @@ static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
9494 + struct caam_hash_state *state,
9495 + int ctx_len)
9496 + {
9497 ++ state->ctx_dma_len = ctx_len;
9498 + state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
9499 + ctx_len, DMA_FROM_DEVICE);
9500 + if (dma_mapping_error(jrdev, state->ctx_dma)) {
9501 +@@ -178,18 +180,6 @@ static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
9502 + return 0;
9503 + }
9504 +
9505 +-/* Map req->result, and append seq_out_ptr command that points to it */
9506 +-static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
9507 +- u8 *result, int digestsize)
9508 +-{
9509 +- dma_addr_t dst_dma;
9510 +-
9511 +- dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
9512 +- append_seq_out_ptr(desc, dst_dma, digestsize, 0);
9513 +-
9514 +- return dst_dma;
9515 +-}
9516 +-
9517 + /* Map current buffer in state (if length > 0) and put it in link table */
9518 + static inline int buf_map_to_sec4_sg(struct device *jrdev,
9519 + struct sec4_sg_entry *sec4_sg,
9520 +@@ -218,6 +208,7 @@ static inline int ctx_map_to_sec4_sg(struct device *jrdev,
9521 + struct caam_hash_state *state, int ctx_len,
9522 + struct sec4_sg_entry *sec4_sg, u32 flag)
9523 + {
9524 ++ state->ctx_dma_len = ctx_len;
9525 + state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
9526 + if (dma_mapping_error(jrdev, state->ctx_dma)) {
9527 + dev_err(jrdev, "unable to map ctx\n");
9528 +@@ -426,7 +417,6 @@ static int ahash_setkey(struct crypto_ahash *ahash,
9529 +
9530 + /*
9531 + * ahash_edesc - s/w-extended ahash descriptor
9532 +- * @dst_dma: physical mapped address of req->result
9533 + * @sec4_sg_dma: physical mapped address of h/w link table
9534 + * @src_nents: number of segments in input scatterlist
9535 + * @sec4_sg_bytes: length of dma mapped sec4_sg space
9536 +@@ -434,7 +424,6 @@ static int ahash_setkey(struct crypto_ahash *ahash,
9537 + * @sec4_sg: h/w link table
9538 + */
9539 + struct ahash_edesc {
9540 +- dma_addr_t dst_dma;
9541 + dma_addr_t sec4_sg_dma;
9542 + int src_nents;
9543 + int sec4_sg_bytes;
9544 +@@ -450,8 +439,6 @@ static inline void ahash_unmap(struct device *dev,
9545 +
9546 + if (edesc->src_nents)
9547 + dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
9548 +- if (edesc->dst_dma)
9549 +- dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
9550 +
9551 + if (edesc->sec4_sg_bytes)
9552 + dma_unmap_single(dev, edesc->sec4_sg_dma,
9553 +@@ -468,12 +455,10 @@ static inline void ahash_unmap_ctx(struct device *dev,
9554 + struct ahash_edesc *edesc,
9555 + struct ahash_request *req, int dst_len, u32 flag)
9556 + {
9557 +- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9558 +- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9559 + struct caam_hash_state *state = ahash_request_ctx(req);
9560 +
9561 + if (state->ctx_dma) {
9562 +- dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
9563 ++ dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
9564 + state->ctx_dma = 0;
9565 + }
9566 + ahash_unmap(dev, edesc, req, dst_len);
9567 +@@ -486,9 +471,9 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
9568 + struct ahash_edesc *edesc;
9569 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9570 + int digestsize = crypto_ahash_digestsize(ahash);
9571 ++ struct caam_hash_state *state = ahash_request_ctx(req);
9572 + #ifdef DEBUG
9573 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9574 +- struct caam_hash_state *state = ahash_request_ctx(req);
9575 +
9576 + dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
9577 + #endif
9578 +@@ -497,17 +482,14 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
9579 + if (err)
9580 + caam_jr_strstatus(jrdev, err);
9581 +
9582 +- ahash_unmap(jrdev, edesc, req, digestsize);
9583 ++ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
9584 ++ memcpy(req->result, state->caam_ctx, digestsize);
9585 + kfree(edesc);
9586 +
9587 + #ifdef DEBUG
9588 + print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
9589 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
9590 + ctx->ctx_len, 1);
9591 +- if (req->result)
9592 +- print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
9593 +- DUMP_PREFIX_ADDRESS, 16, 4, req->result,
9594 +- digestsize, 1);
9595 + #endif
9596 +
9597 + req->base.complete(&req->base, err);
9598 +@@ -555,9 +537,9 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
9599 + struct ahash_edesc *edesc;
9600 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9601 + int digestsize = crypto_ahash_digestsize(ahash);
9602 ++ struct caam_hash_state *state = ahash_request_ctx(req);
9603 + #ifdef DEBUG
9604 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9605 +- struct caam_hash_state *state = ahash_request_ctx(req);
9606 +
9607 + dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
9608 + #endif
9609 +@@ -566,17 +548,14 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
9610 + if (err)
9611 + caam_jr_strstatus(jrdev, err);
9612 +
9613 +- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
9614 ++ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
9615 ++ memcpy(req->result, state->caam_ctx, digestsize);
9616 + kfree(edesc);
9617 +
9618 + #ifdef DEBUG
9619 + print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
9620 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
9621 + ctx->ctx_len, 1);
9622 +- if (req->result)
9623 +- print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
9624 +- DUMP_PREFIX_ADDRESS, 16, 4, req->result,
9625 +- digestsize, 1);
9626 + #endif
9627 +
9628 + req->base.complete(&req->base, err);
9629 +@@ -837,7 +816,7 @@ static int ahash_final_ctx(struct ahash_request *req)
9630 + edesc->sec4_sg_bytes = sec4_sg_bytes;
9631 +
9632 + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
9633 +- edesc->sec4_sg, DMA_TO_DEVICE);
9634 ++ edesc->sec4_sg, DMA_BIDIRECTIONAL);
9635 + if (ret)
9636 + goto unmap_ctx;
9637 +
9638 +@@ -857,14 +836,7 @@ static int ahash_final_ctx(struct ahash_request *req)
9639 +
9640 + append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
9641 + LDST_SGF);
9642 +-
9643 +- edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
9644 +- digestsize);
9645 +- if (dma_mapping_error(jrdev, edesc->dst_dma)) {
9646 +- dev_err(jrdev, "unable to map dst\n");
9647 +- ret = -ENOMEM;
9648 +- goto unmap_ctx;
9649 +- }
9650 ++ append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
9651 +
9652 + #ifdef DEBUG
9653 + print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
9654 +@@ -877,7 +849,7 @@ static int ahash_final_ctx(struct ahash_request *req)
9655 +
9656 + return -EINPROGRESS;
9657 + unmap_ctx:
9658 +- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
9659 ++ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
9660 + kfree(edesc);
9661 + return ret;
9662 + }
9663 +@@ -931,7 +903,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
9664 + edesc->src_nents = src_nents;
9665 +
9666 + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
9667 +- edesc->sec4_sg, DMA_TO_DEVICE);
9668 ++ edesc->sec4_sg, DMA_BIDIRECTIONAL);
9669 + if (ret)
9670 + goto unmap_ctx;
9671 +
9672 +@@ -945,13 +917,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
9673 + if (ret)
9674 + goto unmap_ctx;
9675 +
9676 +- edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
9677 +- digestsize);
9678 +- if (dma_mapping_error(jrdev, edesc->dst_dma)) {
9679 +- dev_err(jrdev, "unable to map dst\n");
9680 +- ret = -ENOMEM;
9681 +- goto unmap_ctx;
9682 +- }
9683 ++ append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
9684 +
9685 + #ifdef DEBUG
9686 + print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
9687 +@@ -964,7 +930,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
9688 +
9689 + return -EINPROGRESS;
9690 + unmap_ctx:
9691 +- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
9692 ++ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
9693 + kfree(edesc);
9694 + return ret;
9695 + }
9696 +@@ -1023,10 +989,8 @@ static int ahash_digest(struct ahash_request *req)
9697 +
9698 + desc = edesc->hw_desc;
9699 +
9700 +- edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
9701 +- digestsize);
9702 +- if (dma_mapping_error(jrdev, edesc->dst_dma)) {
9703 +- dev_err(jrdev, "unable to map dst\n");
9704 ++ ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
9705 ++ if (ret) {
9706 + ahash_unmap(jrdev, edesc, req, digestsize);
9707 + kfree(edesc);
9708 + return -ENOMEM;
9709 +@@ -1041,7 +1005,7 @@ static int ahash_digest(struct ahash_request *req)
9710 + if (!ret) {
9711 + ret = -EINPROGRESS;
9712 + } else {
9713 +- ahash_unmap(jrdev, edesc, req, digestsize);
9714 ++ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
9715 + kfree(edesc);
9716 + }
9717 +
9718 +@@ -1083,12 +1047,9 @@ static int ahash_final_no_ctx(struct ahash_request *req)
9719 + append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
9720 + }
9721 +
9722 +- edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
9723 +- digestsize);
9724 +- if (dma_mapping_error(jrdev, edesc->dst_dma)) {
9725 +- dev_err(jrdev, "unable to map dst\n");
9726 ++ ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
9727 ++ if (ret)
9728 + goto unmap;
9729 +- }
9730 +
9731 + #ifdef DEBUG
9732 + print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
9733 +@@ -1099,7 +1060,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
9734 + if (!ret) {
9735 + ret = -EINPROGRESS;
9736 + } else {
9737 +- ahash_unmap(jrdev, edesc, req, digestsize);
9738 ++ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
9739 + kfree(edesc);
9740 + }
9741 +
9742 +@@ -1298,12 +1259,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
9743 + goto unmap;
9744 + }
9745 +
9746 +- edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
9747 +- digestsize);
9748 +- if (dma_mapping_error(jrdev, edesc->dst_dma)) {
9749 +- dev_err(jrdev, "unable to map dst\n");
9750 ++ ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
9751 ++ if (ret)
9752 + goto unmap;
9753 +- }
9754 +
9755 + #ifdef DEBUG
9756 + print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
9757 +@@ -1314,7 +1272,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
9758 + if (!ret) {
9759 + ret = -EINPROGRESS;
9760 + } else {
9761 +- ahash_unmap(jrdev, edesc, req, digestsize);
9762 ++ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
9763 + kfree(edesc);
9764 + }
9765 +
9766 +@@ -1446,6 +1404,7 @@ static int ahash_init(struct ahash_request *req)
9767 + state->final = ahash_final_no_ctx;
9768 +
9769 + state->ctx_dma = 0;
9770 ++ state->ctx_dma_len = 0;
9771 + state->current_buf = 0;
9772 + state->buf_dma = 0;
9773 + state->buflen_0 = 0;
9774 +diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c
9775 +index be055b9547f6..6183f9128a8a 100644
9776 +--- a/drivers/crypto/cavium/zip/zip_main.c
9777 ++++ b/drivers/crypto/cavium/zip/zip_main.c
9778 +@@ -351,6 +351,7 @@ static struct pci_driver zip_driver = {
9779 +
9780 + static struct crypto_alg zip_comp_deflate = {
9781 + .cra_name = "deflate",
9782 ++ .cra_driver_name = "deflate-cavium",
9783 + .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
9784 + .cra_ctxsize = sizeof(struct zip_kernel_ctx),
9785 + .cra_priority = 300,
9786 +@@ -365,6 +366,7 @@ static struct crypto_alg zip_comp_deflate = {
9787 +
9788 + static struct crypto_alg zip_comp_lzs = {
9789 + .cra_name = "lzs",
9790 ++ .cra_driver_name = "lzs-cavium",
9791 + .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
9792 + .cra_ctxsize = sizeof(struct zip_kernel_ctx),
9793 + .cra_priority = 300,
9794 +@@ -384,7 +386,7 @@ static struct scomp_alg zip_scomp_deflate = {
9795 + .decompress = zip_scomp_decompress,
9796 + .base = {
9797 + .cra_name = "deflate",
9798 +- .cra_driver_name = "deflate-scomp",
9799 ++ .cra_driver_name = "deflate-scomp-cavium",
9800 + .cra_module = THIS_MODULE,
9801 + .cra_priority = 300,
9802 + }
9803 +@@ -397,7 +399,7 @@ static struct scomp_alg zip_scomp_lzs = {
9804 + .decompress = zip_scomp_decompress,
9805 + .base = {
9806 + .cra_name = "lzs",
9807 +- .cra_driver_name = "lzs-scomp",
9808 ++ .cra_driver_name = "lzs-scomp-cavium",
9809 + .cra_module = THIS_MODULE,
9810 + .cra_priority = 300,
9811 + }
9812 +diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
9813 +index dd948e1df9e5..3bcb6bce666e 100644
9814 +--- a/drivers/crypto/ccree/cc_buffer_mgr.c
9815 ++++ b/drivers/crypto/ccree/cc_buffer_mgr.c
9816 +@@ -614,10 +614,10 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
9817 + hw_iv_size, DMA_BIDIRECTIONAL);
9818 + }
9819 +
9820 +- /*In case a pool was set, a table was
9821 +- *allocated and should be released
9822 +- */
9823 +- if (areq_ctx->mlli_params.curr_pool) {
9824 ++ /* Release pool */
9825 ++ if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
9826 ++ areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) &&
9827 ++ (areq_ctx->mlli_params.mlli_virt_addr)) {
9828 + dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
9829 + &areq_ctx->mlli_params.mlli_dma_addr,
9830 + areq_ctx->mlli_params.mlli_virt_addr);
9831 +diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
9832 +index cc92b031fad1..4ec93079daaf 100644
9833 +--- a/drivers/crypto/ccree/cc_cipher.c
9834 ++++ b/drivers/crypto/ccree/cc_cipher.c
9835 +@@ -80,6 +80,7 @@ static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
9836 + default:
9837 + break;
9838 + }
9839 ++ break;
9840 + case S_DIN_to_DES:
9841 + if (size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE)
9842 + return 0;
9843 +@@ -652,6 +653,8 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
9844 + unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
9845 + unsigned int len;
9846 +
9847 ++ cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
9848 ++
9849 + switch (ctx_p->cipher_mode) {
9850 + case DRV_CIPHER_CBC:
9851 + /*
9852 +@@ -681,7 +684,6 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
9853 + break;
9854 + }
9855 +
9856 +- cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
9857 + kzfree(req_ctx->iv);
9858 +
9859 + skcipher_request_complete(req, err);
9860 +@@ -799,7 +801,8 @@ static int cc_cipher_decrypt(struct skcipher_request *req)
9861 +
9862 + memset(req_ctx, 0, sizeof(*req_ctx));
9863 +
9864 +- if (ctx_p->cipher_mode == DRV_CIPHER_CBC) {
9865 ++ if ((ctx_p->cipher_mode == DRV_CIPHER_CBC) &&
9866 ++ (req->cryptlen >= ivsize)) {
9867 +
9868 + /* Allocate and save the last IV sized bytes of the source,
9869 + * which will be lost in case of in-place decryption.
9870 +diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
9871 +index c9d622abd90c..0ce4a65b95f5 100644
9872 +--- a/drivers/crypto/rockchip/rk3288_crypto.c
9873 ++++ b/drivers/crypto/rockchip/rk3288_crypto.c
9874 +@@ -119,7 +119,7 @@ static int rk_load_data(struct rk_crypto_info *dev,
9875 + count = (dev->left_bytes > PAGE_SIZE) ?
9876 + PAGE_SIZE : dev->left_bytes;
9877 +
9878 +- if (!sg_pcopy_to_buffer(dev->first, dev->nents,
9879 ++ if (!sg_pcopy_to_buffer(dev->first, dev->src_nents,
9880 + dev->addr_vir, count,
9881 + dev->total - dev->left_bytes)) {
9882 + dev_err(dev->dev, "[%s:%d] pcopy err\n",
9883 +diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
9884 +index d5fb4013fb42..54ee5b3ed9db 100644
9885 +--- a/drivers/crypto/rockchip/rk3288_crypto.h
9886 ++++ b/drivers/crypto/rockchip/rk3288_crypto.h
9887 +@@ -207,7 +207,8 @@ struct rk_crypto_info {
9888 + void *addr_vir;
9889 + int aligned;
9890 + int align_size;
9891 +- size_t nents;
9892 ++ size_t src_nents;
9893 ++ size_t dst_nents;
9894 + unsigned int total;
9895 + unsigned int count;
9896 + dma_addr_t addr_in;
9897 +@@ -244,6 +245,7 @@ struct rk_cipher_ctx {
9898 + struct rk_crypto_info *dev;
9899 + unsigned int keylen;
9900 + u32 mode;
9901 ++ u8 iv[AES_BLOCK_SIZE];
9902 + };
9903 +
9904 + enum alg_type {
9905 +diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
9906 +index 639c15c5364b..23305f22072f 100644
9907 +--- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
9908 ++++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
9909 +@@ -242,6 +242,17 @@ static void crypto_dma_start(struct rk_crypto_info *dev)
9910 + static int rk_set_data_start(struct rk_crypto_info *dev)
9911 + {
9912 + int err;
9913 ++ struct ablkcipher_request *req =
9914 ++ ablkcipher_request_cast(dev->async_req);
9915 ++ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
9916 ++ struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
9917 ++ u32 ivsize = crypto_ablkcipher_ivsize(tfm);
9918 ++ u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
9919 ++ dev->sg_src->offset + dev->sg_src->length - ivsize;
9920 ++
9921 ++ /* store the iv that need to be updated in chain mode */
9922 ++ if (ctx->mode & RK_CRYPTO_DEC)
9923 ++ memcpy(ctx->iv, src_last_blk, ivsize);
9924 +
9925 + err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
9926 + if (!err)
9927 +@@ -260,8 +271,9 @@ static int rk_ablk_start(struct rk_crypto_info *dev)
9928 + dev->total = req->nbytes;
9929 + dev->sg_src = req->src;
9930 + dev->first = req->src;
9931 +- dev->nents = sg_nents(req->src);
9932 ++ dev->src_nents = sg_nents(req->src);
9933 + dev->sg_dst = req->dst;
9934 ++ dev->dst_nents = sg_nents(req->dst);
9935 + dev->aligned = 1;
9936 +
9937 + spin_lock_irqsave(&dev->lock, flags);
9938 +@@ -285,6 +297,28 @@ static void rk_iv_copyback(struct rk_crypto_info *dev)
9939 + memcpy_fromio(req->info, dev->reg + RK_CRYPTO_AES_IV_0, ivsize);
9940 + }
9941 +
9942 ++static void rk_update_iv(struct rk_crypto_info *dev)
9943 ++{
9944 ++ struct ablkcipher_request *req =
9945 ++ ablkcipher_request_cast(dev->async_req);
9946 ++ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
9947 ++ struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
9948 ++ u32 ivsize = crypto_ablkcipher_ivsize(tfm);
9949 ++ u8 *new_iv = NULL;
9950 ++
9951 ++ if (ctx->mode & RK_CRYPTO_DEC) {
9952 ++ new_iv = ctx->iv;
9953 ++ } else {
9954 ++ new_iv = page_address(sg_page(dev->sg_dst)) +
9955 ++ dev->sg_dst->offset + dev->sg_dst->length - ivsize;
9956 ++ }
9957 ++
9958 ++ if (ivsize == DES_BLOCK_SIZE)
9959 ++ memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize);
9960 ++ else if (ivsize == AES_BLOCK_SIZE)
9961 ++ memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize);
9962 ++}
9963 ++
9964 + /* return:
9965 + * true some err was occurred
9966 + * fault no err, continue
9967 +@@ -297,7 +331,7 @@ static int rk_ablk_rx(struct rk_crypto_info *dev)
9968 +
9969 + dev->unload_data(dev);
9970 + if (!dev->aligned) {
9971 +- if (!sg_pcopy_from_buffer(req->dst, dev->nents,
9972 ++ if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents,
9973 + dev->addr_vir, dev->count,
9974 + dev->total - dev->left_bytes -
9975 + dev->count)) {
9976 +@@ -306,6 +340,7 @@ static int rk_ablk_rx(struct rk_crypto_info *dev)
9977 + }
9978 + }
9979 + if (dev->left_bytes) {
9980 ++ rk_update_iv(dev);
9981 + if (dev->aligned) {
9982 + if (sg_is_last(dev->sg_src)) {
9983 + dev_err(dev->dev, "[%s:%d] Lack of data\n",
9984 +diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
9985 +index 821a506b9e17..c336ae75e361 100644
9986 +--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
9987 ++++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
9988 +@@ -206,7 +206,7 @@ static int rk_ahash_start(struct rk_crypto_info *dev)
9989 + dev->sg_dst = NULL;
9990 + dev->sg_src = req->src;
9991 + dev->first = req->src;
9992 +- dev->nents = sg_nents(req->src);
9993 ++ dev->src_nents = sg_nents(req->src);
9994 + rctx = ahash_request_ctx(req);
9995 + rctx->mode = 0;
9996 +
9997 +diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
9998 +index 4a09af3cd546..7b9a7fb28bb9 100644
9999 +--- a/drivers/dma/imx-dma.c
10000 ++++ b/drivers/dma/imx-dma.c
10001 +@@ -285,7 +285,7 @@ static inline int imxdma_sg_next(struct imxdma_desc *d)
10002 + struct scatterlist *sg = d->sg;
10003 + unsigned long now;
10004 +
10005 +- now = min(d->len, sg_dma_len(sg));
10006 ++ now = min_t(size_t, d->len, sg_dma_len(sg));
10007 + if (d->len != IMX_DMA_LENGTH_LOOP)
10008 + d->len -= now;
10009 +
10010 +diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
10011 +index 43d4b00b8138..411f91fde734 100644
10012 +--- a/drivers/dma/qcom/hidma.c
10013 ++++ b/drivers/dma/qcom/hidma.c
10014 +@@ -138,24 +138,25 @@ static void hidma_process_completed(struct hidma_chan *mchan)
10015 + desc = &mdesc->desc;
10016 + last_cookie = desc->cookie;
10017 +
10018 ++ llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
10019 ++
10020 + spin_lock_irqsave(&mchan->lock, irqflags);
10021 ++ if (llstat == DMA_COMPLETE) {
10022 ++ mchan->last_success = last_cookie;
10023 ++ result.result = DMA_TRANS_NOERROR;
10024 ++ } else {
10025 ++ result.result = DMA_TRANS_ABORTED;
10026 ++ }
10027 ++
10028 + dma_cookie_complete(desc);
10029 + spin_unlock_irqrestore(&mchan->lock, irqflags);
10030 +
10031 +- llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
10032 + dmaengine_desc_get_callback(desc, &cb);
10033 +
10034 + dma_run_dependencies(desc);
10035 +
10036 + spin_lock_irqsave(&mchan->lock, irqflags);
10037 + list_move(&mdesc->node, &mchan->free);
10038 +-
10039 +- if (llstat == DMA_COMPLETE) {
10040 +- mchan->last_success = last_cookie;
10041 +- result.result = DMA_TRANS_NOERROR;
10042 +- } else
10043 +- result.result = DMA_TRANS_ABORTED;
10044 +-
10045 + spin_unlock_irqrestore(&mchan->lock, irqflags);
10046 +
10047 + dmaengine_desc_callback_invoke(&cb, &result);
10048 +@@ -415,6 +416,7 @@ hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
10049 + if (!mdesc)
10050 + return NULL;
10051 +
10052 ++ mdesc->desc.flags = flags;
10053 + hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
10054 + src, dest, len, flags,
10055 + HIDMA_TRE_MEMCPY);
10056 +@@ -447,6 +449,7 @@ hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
10057 + if (!mdesc)
10058 + return NULL;
10059 +
10060 ++ mdesc->desc.flags = flags;
10061 + hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
10062 + value, dest, len, flags,
10063 + HIDMA_TRE_MEMSET);
10064 +diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
10065 +index 7f7184c3cf95..59403f6d008a 100644
10066 +--- a/drivers/dma/sh/usb-dmac.c
10067 ++++ b/drivers/dma/sh/usb-dmac.c
10068 +@@ -694,6 +694,8 @@ static int usb_dmac_runtime_resume(struct device *dev)
10069 + #endif /* CONFIG_PM */
10070 +
10071 + static const struct dev_pm_ops usb_dmac_pm = {
10072 ++ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
10073 ++ pm_runtime_force_resume)
10074 + SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume,
10075 + NULL)
10076 + };
10077 +diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
10078 +index 9a558e30c461..8219ab88a507 100644
10079 +--- a/drivers/dma/tegra20-apb-dma.c
10080 ++++ b/drivers/dma/tegra20-apb-dma.c
10081 +@@ -636,7 +636,10 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
10082 +
10083 + sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
10084 + dma_desc = sgreq->dma_desc;
10085 +- dma_desc->bytes_transferred += sgreq->req_len;
10086 ++ /* if we dma for long enough the transfer count will wrap */
10087 ++ dma_desc->bytes_transferred =
10088 ++ (dma_desc->bytes_transferred + sgreq->req_len) %
10089 ++ dma_desc->bytes_requested;
10090 +
10091 + /* Callback need to be call */
10092 + if (!dma_desc->cb_count)
10093 +diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
10094 +index a7902fccdcfa..6090d25dce85 100644
10095 +--- a/drivers/firmware/efi/cper.c
10096 ++++ b/drivers/firmware/efi/cper.c
10097 +@@ -546,19 +546,24 @@ EXPORT_SYMBOL_GPL(cper_estatus_check_header);
10098 + int cper_estatus_check(const struct acpi_hest_generic_status *estatus)
10099 + {
10100 + struct acpi_hest_generic_data *gdata;
10101 +- unsigned int data_len, gedata_len;
10102 ++ unsigned int data_len, record_size;
10103 + int rc;
10104 +
10105 + rc = cper_estatus_check_header(estatus);
10106 + if (rc)
10107 + return rc;
10108 ++
10109 + data_len = estatus->data_length;
10110 +
10111 + apei_estatus_for_each_section(estatus, gdata) {
10112 +- gedata_len = acpi_hest_get_error_length(gdata);
10113 +- if (gedata_len > data_len - acpi_hest_get_size(gdata))
10114 ++ if (sizeof(struct acpi_hest_generic_data) > data_len)
10115 ++ return -EINVAL;
10116 ++
10117 ++ record_size = acpi_hest_get_record_size(gdata);
10118 ++ if (record_size > data_len)
10119 + return -EINVAL;
10120 +- data_len -= acpi_hest_get_record_size(gdata);
10121 ++
10122 ++ data_len -= record_size;
10123 + }
10124 + if (data_len)
10125 + return -EINVAL;
10126 +diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
10127 +index c037c6c5d0b7..04e6ecd72cd9 100644
10128 +--- a/drivers/firmware/efi/libstub/arm-stub.c
10129 ++++ b/drivers/firmware/efi/libstub/arm-stub.c
10130 +@@ -367,6 +367,11 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
10131 + paddr = in->phys_addr;
10132 + size = in->num_pages * EFI_PAGE_SIZE;
10133 +
10134 ++ if (novamap()) {
10135 ++ in->virt_addr = in->phys_addr;
10136 ++ continue;
10137 ++ }
10138 ++
10139 + /*
10140 + * Make the mapping compatible with 64k pages: this allows
10141 + * a 4k page size kernel to kexec a 64k page size kernel and
10142 +diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
10143 +index e94975f4655b..442f51c2a53d 100644
10144 +--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
10145 ++++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
10146 +@@ -34,6 +34,7 @@ static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
10147 +
10148 + static int __section(.data) __nokaslr;
10149 + static int __section(.data) __quiet;
10150 ++static int __section(.data) __novamap;
10151 +
10152 + int __pure nokaslr(void)
10153 + {
10154 +@@ -43,6 +44,10 @@ int __pure is_quiet(void)
10155 + {
10156 + return __quiet;
10157 + }
10158 ++int __pure novamap(void)
10159 ++{
10160 ++ return __novamap;
10161 ++}
10162 +
10163 + #define EFI_MMAP_NR_SLACK_SLOTS 8
10164 +
10165 +@@ -482,6 +487,11 @@ efi_status_t efi_parse_options(char const *cmdline)
10166 + __chunk_size = -1UL;
10167 + }
10168 +
10169 ++ if (!strncmp(str, "novamap", 7)) {
10170 ++ str += strlen("novamap");
10171 ++ __novamap = 1;
10172 ++ }
10173 ++
10174 + /* Group words together, delimited by "," */
10175 + while (*str && *str != ' ' && *str != ',')
10176 + str++;
10177 +diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
10178 +index 32799cf039ef..337b52c4702c 100644
10179 +--- a/drivers/firmware/efi/libstub/efistub.h
10180 ++++ b/drivers/firmware/efi/libstub/efistub.h
10181 +@@ -27,6 +27,7 @@
10182 +
10183 + extern int __pure nokaslr(void);
10184 + extern int __pure is_quiet(void);
10185 ++extern int __pure novamap(void);
10186 +
10187 + #define pr_efi(sys_table, msg) do { \
10188 + if (!is_quiet()) efi_printk(sys_table, "EFI stub: "msg); \
10189 +diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
10190 +index 0dc7b4987cc2..f8f89f995e9d 100644
10191 +--- a/drivers/firmware/efi/libstub/fdt.c
10192 ++++ b/drivers/firmware/efi/libstub/fdt.c
10193 +@@ -327,6 +327,9 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
10194 + if (status == EFI_SUCCESS) {
10195 + efi_set_virtual_address_map_t *svam;
10196 +
10197 ++ if (novamap())
10198 ++ return EFI_SUCCESS;
10199 ++
10200 + /* Install the new virtual address map */
10201 + svam = sys_table->runtime->set_virtual_address_map;
10202 + status = svam(runtime_entry_count * desc_size, desc_size,
10203 +diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
10204 +index 8986757eafaf..aac972b056d9 100644
10205 +--- a/drivers/firmware/efi/memattr.c
10206 ++++ b/drivers/firmware/efi/memattr.c
10207 +@@ -94,7 +94,7 @@ static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out)
10208 +
10209 + if (!(md->attribute & EFI_MEMORY_RUNTIME))
10210 + continue;
10211 +- if (md->virt_addr == 0) {
10212 ++ if (md->virt_addr == 0 && md->phys_addr != 0) {
10213 + /* no virtual mapping has been installed by the stub */
10214 + break;
10215 + }
10216 +diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
10217 +index e2abfdb5cee6..698745c249e8 100644
10218 +--- a/drivers/firmware/efi/runtime-wrappers.c
10219 ++++ b/drivers/firmware/efi/runtime-wrappers.c
10220 +@@ -85,7 +85,7 @@ struct efi_runtime_work efi_rts_work;
10221 + pr_err("Failed to queue work to efi_rts_wq.\n"); \
10222 + \
10223 + exit: \
10224 +- efi_rts_work.efi_rts_id = NONE; \
10225 ++ efi_rts_work.efi_rts_id = EFI_NONE; \
10226 + efi_rts_work.status; \
10227 + })
10228 +
10229 +@@ -175,50 +175,50 @@ static void efi_call_rts(struct work_struct *work)
10230 + arg5 = efi_rts_work.arg5;
10231 +
10232 + switch (efi_rts_work.efi_rts_id) {
10233 +- case GET_TIME:
10234 ++ case EFI_GET_TIME:
10235 + status = efi_call_virt(get_time, (efi_time_t *)arg1,
10236 + (efi_time_cap_t *)arg2);
10237 + break;
10238 +- case SET_TIME:
10239 ++ case EFI_SET_TIME:
10240 + status = efi_call_virt(set_time, (efi_time_t *)arg1);
10241 + break;
10242 +- case GET_WAKEUP_TIME:
10243 ++ case EFI_GET_WAKEUP_TIME:
10244 + status = efi_call_virt(get_wakeup_time, (efi_bool_t *)arg1,
10245 + (efi_bool_t *)arg2, (efi_time_t *)arg3);
10246 + break;
10247 +- case SET_WAKEUP_TIME:
10248 ++ case EFI_SET_WAKEUP_TIME:
10249 + status = efi_call_virt(set_wakeup_time, *(efi_bool_t *)arg1,
10250 + (efi_time_t *)arg2);
10251 + break;
10252 +- case GET_VARIABLE:
10253 ++ case EFI_GET_VARIABLE:
10254 + status = efi_call_virt(get_variable, (efi_char16_t *)arg1,
10255 + (efi_guid_t *)arg2, (u32 *)arg3,
10256 + (unsigned long *)arg4, (void *)arg5);
10257 + break;
10258 +- case GET_NEXT_VARIABLE:
10259 ++ case EFI_GET_NEXT_VARIABLE:
10260 + status = efi_call_virt(get_next_variable, (unsigned long *)arg1,
10261 + (efi_char16_t *)arg2,
10262 + (efi_guid_t *)arg3);
10263 + break;
10264 +- case SET_VARIABLE:
10265 ++ case EFI_SET_VARIABLE:
10266 + status = efi_call_virt(set_variable, (efi_char16_t *)arg1,
10267 + (efi_guid_t *)arg2, *(u32 *)arg3,
10268 + *(unsigned long *)arg4, (void *)arg5);
10269 + break;
10270 +- case QUERY_VARIABLE_INFO:
10271 ++ case EFI_QUERY_VARIABLE_INFO:
10272 + status = efi_call_virt(query_variable_info, *(u32 *)arg1,
10273 + (u64 *)arg2, (u64 *)arg3, (u64 *)arg4);
10274 + break;
10275 +- case GET_NEXT_HIGH_MONO_COUNT:
10276 ++ case EFI_GET_NEXT_HIGH_MONO_COUNT:
10277 + status = efi_call_virt(get_next_high_mono_count, (u32 *)arg1);
10278 + break;
10279 +- case UPDATE_CAPSULE:
10280 ++ case EFI_UPDATE_CAPSULE:
10281 + status = efi_call_virt(update_capsule,
10282 + (efi_capsule_header_t **)arg1,
10283 + *(unsigned long *)arg2,
10284 + *(unsigned long *)arg3);
10285 + break;
10286 +- case QUERY_CAPSULE_CAPS:
10287 ++ case EFI_QUERY_CAPSULE_CAPS:
10288 + status = efi_call_virt(query_capsule_caps,
10289 + (efi_capsule_header_t **)arg1,
10290 + *(unsigned long *)arg2, (u64 *)arg3,
10291 +@@ -242,7 +242,7 @@ static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
10292 +
10293 + if (down_interruptible(&efi_runtime_lock))
10294 + return EFI_ABORTED;
10295 +- status = efi_queue_work(GET_TIME, tm, tc, NULL, NULL, NULL);
10296 ++ status = efi_queue_work(EFI_GET_TIME, tm, tc, NULL, NULL, NULL);
10297 + up(&efi_runtime_lock);
10298 + return status;
10299 + }
10300 +@@ -253,7 +253,7 @@ static efi_status_t virt_efi_set_time(efi_time_t *tm)
10301 +
10302 + if (down_interruptible(&efi_runtime_lock))
10303 + return EFI_ABORTED;
10304 +- status = efi_queue_work(SET_TIME, tm, NULL, NULL, NULL, NULL);
10305 ++ status = efi_queue_work(EFI_SET_TIME, tm, NULL, NULL, NULL, NULL);
10306 + up(&efi_runtime_lock);
10307 + return status;
10308 + }
10309 +@@ -266,7 +266,7 @@ static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled,
10310 +
10311 + if (down_interruptible(&efi_runtime_lock))
10312 + return EFI_ABORTED;
10313 +- status = efi_queue_work(GET_WAKEUP_TIME, enabled, pending, tm, NULL,
10314 ++ status = efi_queue_work(EFI_GET_WAKEUP_TIME, enabled, pending, tm, NULL,
10315 + NULL);
10316 + up(&efi_runtime_lock);
10317 + return status;
10318 +@@ -278,7 +278,7 @@ static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
10319 +
10320 + if (down_interruptible(&efi_runtime_lock))
10321 + return EFI_ABORTED;
10322 +- status = efi_queue_work(SET_WAKEUP_TIME, &enabled, tm, NULL, NULL,
10323 ++ status = efi_queue_work(EFI_SET_WAKEUP_TIME, &enabled, tm, NULL, NULL,
10324 + NULL);
10325 + up(&efi_runtime_lock);
10326 + return status;
10327 +@@ -294,7 +294,7 @@ static efi_status_t virt_efi_get_variable(efi_char16_t *name,
10328 +
10329 + if (down_interruptible(&efi_runtime_lock))
10330 + return EFI_ABORTED;
10331 +- status = efi_queue_work(GET_VARIABLE, name, vendor, attr, data_size,
10332 ++ status = efi_queue_work(EFI_GET_VARIABLE, name, vendor, attr, data_size,
10333 + data);
10334 + up(&efi_runtime_lock);
10335 + return status;
10336 +@@ -308,7 +308,7 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
10337 +
10338 + if (down_interruptible(&efi_runtime_lock))
10339 + return EFI_ABORTED;
10340 +- status = efi_queue_work(GET_NEXT_VARIABLE, name_size, name, vendor,
10341 ++ status = efi_queue_work(EFI_GET_NEXT_VARIABLE, name_size, name, vendor,
10342 + NULL, NULL);
10343 + up(&efi_runtime_lock);
10344 + return status;
10345 +@@ -324,7 +324,7 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
10346 +
10347 + if (down_interruptible(&efi_runtime_lock))
10348 + return EFI_ABORTED;
10349 +- status = efi_queue_work(SET_VARIABLE, name, vendor, &attr, &data_size,
10350 ++ status = efi_queue_work(EFI_SET_VARIABLE, name, vendor, &attr, &data_size,
10351 + data);
10352 + up(&efi_runtime_lock);
10353 + return status;
10354 +@@ -359,7 +359,7 @@ static efi_status_t virt_efi_query_variable_info(u32 attr,
10355 +
10356 + if (down_interruptible(&efi_runtime_lock))
10357 + return EFI_ABORTED;
10358 +- status = efi_queue_work(QUERY_VARIABLE_INFO, &attr, storage_space,
10359 ++ status = efi_queue_work(EFI_QUERY_VARIABLE_INFO, &attr, storage_space,
10360 + remaining_space, max_variable_size, NULL);
10361 + up(&efi_runtime_lock);
10362 + return status;
10363 +@@ -391,7 +391,7 @@ static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
10364 +
10365 + if (down_interruptible(&efi_runtime_lock))
10366 + return EFI_ABORTED;
10367 +- status = efi_queue_work(GET_NEXT_HIGH_MONO_COUNT, count, NULL, NULL,
10368 ++ status = efi_queue_work(EFI_GET_NEXT_HIGH_MONO_COUNT, count, NULL, NULL,
10369 + NULL, NULL);
10370 + up(&efi_runtime_lock);
10371 + return status;
10372 +@@ -407,7 +407,7 @@ static void virt_efi_reset_system(int reset_type,
10373 + "could not get exclusive access to the firmware\n");
10374 + return;
10375 + }
10376 +- efi_rts_work.efi_rts_id = RESET_SYSTEM;
10377 ++ efi_rts_work.efi_rts_id = EFI_RESET_SYSTEM;
10378 + __efi_call_virt(reset_system, reset_type, status, data_size, data);
10379 + up(&efi_runtime_lock);
10380 + }
10381 +@@ -423,7 +423,7 @@ static efi_status_t virt_efi_update_capsule(efi_capsule_header_t **capsules,
10382 +
10383 + if (down_interruptible(&efi_runtime_lock))
10384 + return EFI_ABORTED;
10385 +- status = efi_queue_work(UPDATE_CAPSULE, capsules, &count, &sg_list,
10386 ++ status = efi_queue_work(EFI_UPDATE_CAPSULE, capsules, &count, &sg_list,
10387 + NULL, NULL);
10388 + up(&efi_runtime_lock);
10389 + return status;
10390 +@@ -441,7 +441,7 @@ static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules,
10391 +
10392 + if (down_interruptible(&efi_runtime_lock))
10393 + return EFI_ABORTED;
10394 +- status = efi_queue_work(QUERY_CAPSULE_CAPS, capsules, &count,
10395 ++ status = efi_queue_work(EFI_QUERY_CAPSULE_CAPS, capsules, &count,
10396 + max_size, reset_type, NULL);
10397 + up(&efi_runtime_lock);
10398 + return status;
10399 +diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
10400 +index 6bc8e6640d71..c51462f5aa1e 100644
10401 +--- a/drivers/firmware/iscsi_ibft.c
10402 ++++ b/drivers/firmware/iscsi_ibft.c
10403 +@@ -542,6 +542,7 @@ static umode_t __init ibft_check_tgt_for(void *data, int type)
10404 + case ISCSI_BOOT_TGT_NIC_ASSOC:
10405 + case ISCSI_BOOT_TGT_CHAP_TYPE:
10406 + rc = S_IRUGO;
10407 ++ break;
10408 + case ISCSI_BOOT_TGT_NAME:
10409 + if (tgt->tgt_name_len)
10410 + rc = S_IRUGO;
10411 +diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c
10412 +index 226f6e6fe01b..8e3f6a776e02 100644
10413 +--- a/drivers/gnss/sirf.c
10414 ++++ b/drivers/gnss/sirf.c
10415 +@@ -310,30 +310,26 @@ static int sirf_probe(struct serdev_device *serdev)
10416 + ret = -ENODEV;
10417 + goto err_put_device;
10418 + }
10419 ++
10420 ++ ret = regulator_enable(data->vcc);
10421 ++ if (ret)
10422 ++ goto err_put_device;
10423 ++
10424 ++ /* Wait for chip to boot into hibernate mode. */
10425 ++ msleep(SIRF_BOOT_DELAY);
10426 + }
10427 +
10428 + if (data->wakeup) {
10429 + ret = gpiod_to_irq(data->wakeup);
10430 + if (ret < 0)
10431 +- goto err_put_device;
10432 +-
10433 ++ goto err_disable_vcc;
10434 + data->irq = ret;
10435 +
10436 +- ret = devm_request_threaded_irq(dev, data->irq, NULL,
10437 +- sirf_wakeup_handler,
10438 ++ ret = request_threaded_irq(data->irq, NULL, sirf_wakeup_handler,
10439 + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
10440 + "wakeup", data);
10441 + if (ret)
10442 +- goto err_put_device;
10443 +- }
10444 +-
10445 +- if (data->on_off) {
10446 +- ret = regulator_enable(data->vcc);
10447 +- if (ret)
10448 +- goto err_put_device;
10449 +-
10450 +- /* Wait for chip to boot into hibernate mode */
10451 +- msleep(SIRF_BOOT_DELAY);
10452 ++ goto err_disable_vcc;
10453 + }
10454 +
10455 + if (IS_ENABLED(CONFIG_PM)) {
10456 +@@ -342,7 +338,7 @@ static int sirf_probe(struct serdev_device *serdev)
10457 + } else {
10458 + ret = sirf_runtime_resume(dev);
10459 + if (ret < 0)
10460 +- goto err_disable_vcc;
10461 ++ goto err_free_irq;
10462 + }
10463 +
10464 + ret = gnss_register_device(gdev);
10465 +@@ -356,6 +352,9 @@ err_disable_rpm:
10466 + pm_runtime_disable(dev);
10467 + else
10468 + sirf_runtime_suspend(dev);
10469 ++err_free_irq:
10470 ++ if (data->wakeup)
10471 ++ free_irq(data->irq, data);
10472 + err_disable_vcc:
10473 + if (data->on_off)
10474 + regulator_disable(data->vcc);
10475 +@@ -376,6 +375,9 @@ static void sirf_remove(struct serdev_device *serdev)
10476 + else
10477 + sirf_runtime_suspend(&serdev->dev);
10478 +
10479 ++ if (data->wakeup)
10480 ++ free_irq(data->irq, data);
10481 ++
10482 + if (data->on_off)
10483 + regulator_disable(data->vcc);
10484 +
10485 +diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
10486 +index 91b90c0cea73..12acdac85820 100644
10487 +--- a/drivers/gpio/gpio-adnp.c
10488 ++++ b/drivers/gpio/gpio-adnp.c
10489 +@@ -132,8 +132,10 @@ static int adnp_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
10490 + if (err < 0)
10491 + goto out;
10492 +
10493 +- if (err & BIT(pos))
10494 +- err = -EACCES;
10495 ++ if (value & BIT(pos)) {
10496 ++ err = -EPERM;
10497 ++ goto out;
10498 ++ }
10499 +
10500 + err = 0;
10501 +
10502 +diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
10503 +index 0ecd2369c2ca..a09d2f9ebacc 100644
10504 +--- a/drivers/gpio/gpio-exar.c
10505 ++++ b/drivers/gpio/gpio-exar.c
10506 +@@ -148,6 +148,8 @@ static int gpio_exar_probe(struct platform_device *pdev)
10507 + mutex_init(&exar_gpio->lock);
10508 +
10509 + index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
10510 ++ if (index < 0)
10511 ++ goto err_destroy;
10512 +
10513 + sprintf(exar_gpio->name, "exar_gpio%d", index);
10514 + exar_gpio->gpio_chip.label = exar_gpio->name;
10515 +diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
10516 +index f4e9921fa966..7f33024b6d83 100644
10517 +--- a/drivers/gpio/gpio-omap.c
10518 ++++ b/drivers/gpio/gpio-omap.c
10519 +@@ -883,14 +883,16 @@ static void omap_gpio_unmask_irq(struct irq_data *d)
10520 + if (trigger)
10521 + omap_set_gpio_triggering(bank, offset, trigger);
10522 +
10523 +- /* For level-triggered GPIOs, the clearing must be done after
10524 +- * the HW source is cleared, thus after the handler has run */
10525 +- if (bank->level_mask & BIT(offset)) {
10526 +- omap_set_gpio_irqenable(bank, offset, 0);
10527 ++ omap_set_gpio_irqenable(bank, offset, 1);
10528 ++
10529 ++ /*
10530 ++ * For level-triggered GPIOs, clearing must be done after the source
10531 ++ * is cleared, thus after the handler has run. OMAP4 needs this done
10532 ++ * after enabing the interrupt to clear the wakeup status.
10533 ++ */
10534 ++ if (bank->level_mask & BIT(offset))
10535 + omap_clear_gpio_irqstatus(bank, offset);
10536 +- }
10537 +
10538 +- omap_set_gpio_irqenable(bank, offset, 1);
10539 + raw_spin_unlock_irqrestore(&bank->lock, flags);
10540 + }
10541 +
10542 +diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
10543 +index 0dc96419efe3..d8a985fc6a5d 100644
10544 +--- a/drivers/gpio/gpio-pca953x.c
10545 ++++ b/drivers/gpio/gpio-pca953x.c
10546 +@@ -587,7 +587,8 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
10547 +
10548 + static void pca953x_irq_shutdown(struct irq_data *d)
10549 + {
10550 +- struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
10551 ++ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
10552 ++ struct pca953x_chip *chip = gpiochip_get_data(gc);
10553 + u8 mask = 1 << (d->hwirq % BANK_SZ);
10554 +
10555 + chip->irq_trig_raise[d->hwirq / BANK_SZ] &= ~mask;
10556 +diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
10557 +index a6e1891217e2..a1dd2f1c0d02 100644
10558 +--- a/drivers/gpio/gpiolib-of.c
10559 ++++ b/drivers/gpio/gpiolib-of.c
10560 +@@ -86,7 +86,8 @@ static void of_gpio_flags_quirks(struct device_node *np,
10561 + if (IS_ENABLED(CONFIG_REGULATOR) &&
10562 + (of_device_is_compatible(np, "regulator-fixed") ||
10563 + of_device_is_compatible(np, "reg-fixed-voltage") ||
10564 +- of_device_is_compatible(np, "regulator-gpio"))) {
10565 ++ (of_device_is_compatible(np, "regulator-gpio") &&
10566 ++ strcmp(propname, "enable-gpio") == 0))) {
10567 + /*
10568 + * The regulator GPIO handles are specified such that the
10569 + * presence or absence of "enable-active-high" solely controls
10570 +diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
10571 +index bacdaef77b6c..278dd55ff476 100644
10572 +--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
10573 ++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
10574 +@@ -738,7 +738,7 @@ static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
10575 + }
10576 +
10577 + ring->vm_inv_eng = inv_eng - 1;
10578 +- change_bit(inv_eng - 1, (unsigned long *)(&vm_inv_engs[vmhub]));
10579 ++ vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
10580 +
10581 + dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
10582 + ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
10583 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
10584 +index 636d14a60952..83c8a0407537 100644
10585 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
10586 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
10587 +@@ -886,6 +886,7 @@ static void emulated_link_detect(struct dc_link *link)
10588 + return;
10589 + }
10590 +
10591 ++ /* dc_sink_create returns a new reference */
10592 + link->local_sink = sink;
10593 +
10594 + edid_status = dm_helpers_read_local_edid(
10595 +@@ -952,6 +953,8 @@ static int dm_resume(void *handle)
10596 + if (aconnector->fake_enable && aconnector->dc_link->local_sink)
10597 + aconnector->fake_enable = false;
10598 +
10599 ++ if (aconnector->dc_sink)
10600 ++ dc_sink_release(aconnector->dc_sink);
10601 + aconnector->dc_sink = NULL;
10602 + amdgpu_dm_update_connector_after_detect(aconnector);
10603 + mutex_unlock(&aconnector->hpd_lock);
10604 +@@ -1061,6 +1064,8 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
10605 +
10606 +
10607 + sink = aconnector->dc_link->local_sink;
10608 ++ if (sink)
10609 ++ dc_sink_retain(sink);
10610 +
10611 + /*
10612 + * Edid mgmt connector gets first update only in mode_valid hook and then
10613 +@@ -1085,21 +1090,24 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
10614 + * to it anymore after disconnect, so on next crtc to connector
10615 + * reshuffle by UMD we will get into unwanted dc_sink release
10616 + */
10617 +- if (aconnector->dc_sink != aconnector->dc_em_sink)
10618 +- dc_sink_release(aconnector->dc_sink);
10619 ++ dc_sink_release(aconnector->dc_sink);
10620 + }
10621 + aconnector->dc_sink = sink;
10622 ++ dc_sink_retain(aconnector->dc_sink);
10623 + amdgpu_dm_update_freesync_caps(connector,
10624 + aconnector->edid);
10625 + } else {
10626 + amdgpu_dm_update_freesync_caps(connector, NULL);
10627 +- if (!aconnector->dc_sink)
10628 ++ if (!aconnector->dc_sink) {
10629 + aconnector->dc_sink = aconnector->dc_em_sink;
10630 +- else if (aconnector->dc_sink != aconnector->dc_em_sink)
10631 + dc_sink_retain(aconnector->dc_sink);
10632 ++ }
10633 + }
10634 +
10635 + mutex_unlock(&dev->mode_config.mutex);
10636 ++
10637 ++ if (sink)
10638 ++ dc_sink_release(sink);
10639 + return;
10640 + }
10641 +
10642 +@@ -1107,8 +1115,10 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
10643 + * TODO: temporary guard to look for proper fix
10644 + * if this sink is MST sink, we should not do anything
10645 + */
10646 +- if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
10647 ++ if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
10648 ++ dc_sink_release(sink);
10649 + return;
10650 ++ }
10651 +
10652 + if (aconnector->dc_sink == sink) {
10653 + /*
10654 +@@ -1117,6 +1127,8 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
10655 + */
10656 + DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
10657 + aconnector->connector_id);
10658 ++ if (sink)
10659 ++ dc_sink_release(sink);
10660 + return;
10661 + }
10662 +
10663 +@@ -1138,6 +1150,7 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
10664 + amdgpu_dm_update_freesync_caps(connector, NULL);
10665 +
10666 + aconnector->dc_sink = sink;
10667 ++ dc_sink_retain(aconnector->dc_sink);
10668 + if (sink->dc_edid.length == 0) {
10669 + aconnector->edid = NULL;
10670 + drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
10671 +@@ -1158,11 +1171,15 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
10672 + amdgpu_dm_update_freesync_caps(connector, NULL);
10673 + drm_connector_update_edid_property(connector, NULL);
10674 + aconnector->num_modes = 0;
10675 ++ dc_sink_release(aconnector->dc_sink);
10676 + aconnector->dc_sink = NULL;
10677 + aconnector->edid = NULL;
10678 + }
10679 +
10680 + mutex_unlock(&dev->mode_config.mutex);
10681 ++
10682 ++ if (sink)
10683 ++ dc_sink_release(sink);
10684 + }
10685 +
10686 + static void handle_hpd_irq(void *param)
10687 +@@ -2908,6 +2925,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
10688 + }
10689 + } else {
10690 + sink = aconnector->dc_sink;
10691 ++ dc_sink_retain(sink);
10692 + }
10693 +
10694 + stream = dc_create_stream_for_sink(sink);
10695 +@@ -2974,8 +2992,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
10696 + stream->ignore_msa_timing_param = true;
10697 +
10698 + finish:
10699 +- if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON)
10700 +- dc_sink_release(sink);
10701 ++ dc_sink_release(sink);
10702 +
10703 + return stream;
10704 + }
10705 +@@ -3233,6 +3250,14 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
10706 + dm->backlight_dev = NULL;
10707 + }
10708 + #endif
10709 ++
10710 ++ if (aconnector->dc_em_sink)
10711 ++ dc_sink_release(aconnector->dc_em_sink);
10712 ++ aconnector->dc_em_sink = NULL;
10713 ++ if (aconnector->dc_sink)
10714 ++ dc_sink_release(aconnector->dc_sink);
10715 ++ aconnector->dc_sink = NULL;
10716 ++
10717 + drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
10718 + drm_connector_unregister(connector);
10719 + drm_connector_cleanup(connector);
10720 +@@ -3330,10 +3355,12 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
10721 + (edid->extensions + 1) * EDID_LENGTH,
10722 + &init_params);
10723 +
10724 +- if (aconnector->base.force == DRM_FORCE_ON)
10725 ++ if (aconnector->base.force == DRM_FORCE_ON) {
10726 + aconnector->dc_sink = aconnector->dc_link->local_sink ?
10727 + aconnector->dc_link->local_sink :
10728 + aconnector->dc_em_sink;
10729 ++ dc_sink_retain(aconnector->dc_sink);
10730 ++ }
10731 + }
10732 +
10733 + static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
10734 +@@ -4948,7 +4975,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
10735 + static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
10736 + struct dc_stream_state *stream_state)
10737 + {
10738 +- stream_state->mode_changed = crtc_state->mode_changed;
10739 ++ stream_state->mode_changed =
10740 ++ crtc_state->mode_changed || crtc_state->active_changed;
10741 + }
10742 +
10743 + static int amdgpu_dm_atomic_commit(struct drm_device *dev,
10744 +@@ -4969,10 +4997,22 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev,
10745 + */
10746 + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10747 + struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10748 ++ struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10749 + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
10750 +
10751 +- if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream)
10752 ++ if (drm_atomic_crtc_needs_modeset(new_crtc_state)
10753 ++ && dm_old_crtc_state->stream) {
10754 ++ /*
10755 ++ * CRC capture was enabled but not disabled.
10756 ++ * Release the vblank reference.
10757 ++ */
10758 ++ if (dm_new_crtc_state->crc_enabled) {
10759 ++ drm_crtc_vblank_put(crtc);
10760 ++ dm_new_crtc_state->crc_enabled = false;
10761 ++ }
10762 ++
10763 + manage_dm_interrupts(adev, acrtc, false);
10764 ++ }
10765 + }
10766 + /*
10767 + * Add check here for SoC's that support hardware cursor plane, to
10768 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
10769 +index f088ac585978..26b651148c67 100644
10770 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
10771 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
10772 +@@ -66,6 +66,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
10773 + {
10774 + struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state);
10775 + struct dc_stream_state *stream_state = crtc_state->stream;
10776 ++ bool enable;
10777 +
10778 + enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name);
10779 +
10780 +@@ -80,28 +81,27 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
10781 + return -EINVAL;
10782 + }
10783 +
10784 ++ enable = (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO);
10785 ++
10786 ++ if (!dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
10787 ++ enable, enable))
10788 ++ return -EINVAL;
10789 ++
10790 + /* When enabling CRC, we should also disable dithering. */
10791 +- if (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO) {
10792 +- if (dc_stream_configure_crc(stream_state->ctx->dc,
10793 +- stream_state,
10794 +- true, true)) {
10795 +- crtc_state->crc_enabled = true;
10796 +- dc_stream_set_dither_option(stream_state,
10797 +- DITHER_OPTION_TRUN8);
10798 +- }
10799 +- else
10800 +- return -EINVAL;
10801 +- } else {
10802 +- if (dc_stream_configure_crc(stream_state->ctx->dc,
10803 +- stream_state,
10804 +- false, false)) {
10805 +- crtc_state->crc_enabled = false;
10806 +- dc_stream_set_dither_option(stream_state,
10807 +- DITHER_OPTION_DEFAULT);
10808 +- }
10809 +- else
10810 +- return -EINVAL;
10811 +- }
10812 ++ dc_stream_set_dither_option(stream_state,
10813 ++ enable ? DITHER_OPTION_TRUN8
10814 ++ : DITHER_OPTION_DEFAULT);
10815 ++
10816 ++ /*
10817 ++ * Reading the CRC requires the vblank interrupt handler to be
10818 ++ * enabled. Keep a reference until CRC capture stops.
10819 ++ */
10820 ++ if (!crtc_state->crc_enabled && enable)
10821 ++ drm_crtc_vblank_get(crtc);
10822 ++ else if (crtc_state->crc_enabled && !enable)
10823 ++ drm_crtc_vblank_put(crtc);
10824 ++
10825 ++ crtc_state->crc_enabled = enable;
10826 +
10827 + /* Reset crc_skipped on dm state */
10828 + crtc_state->crc_skip_count = 0;
10829 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
10830 +index 1b0d209d8367..3b95a637b508 100644
10831 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
10832 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
10833 +@@ -239,6 +239,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
10834 + &init_params);
10835 +
10836 + dc_sink->priv = aconnector;
10837 ++ /* dc_link_add_remote_sink returns a new reference */
10838 + aconnector->dc_sink = dc_sink;
10839 +
10840 + if (aconnector->dc_sink)
10841 +diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
10842 +index 43e4a2be0fa6..57cc11d0e9a5 100644
10843 +--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
10844 ++++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
10845 +@@ -1355,12 +1355,12 @@ void dcn_bw_update_from_pplib(struct dc *dc)
10846 + struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0};
10847 + bool res;
10848 +
10849 +- kernel_fpu_begin();
10850 +-
10851 + /* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */
10852 + res = dm_pp_get_clock_levels_by_type_with_voltage(
10853 + ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
10854 +
10855 ++ kernel_fpu_begin();
10856 ++
10857 + if (res)
10858 + res = verify_clock_values(&fclks);
10859 +
10860 +@@ -1379,9 +1379,13 @@ void dcn_bw_update_from_pplib(struct dc *dc)
10861 + } else
10862 + BREAK_TO_DEBUGGER();
10863 +
10864 ++ kernel_fpu_end();
10865 ++
10866 + res = dm_pp_get_clock_levels_by_type_with_voltage(
10867 + ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
10868 +
10869 ++ kernel_fpu_begin();
10870 ++
10871 + if (res)
10872 + res = verify_clock_values(&dcfclks);
10873 +
10874 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
10875 +index 5fd52094d459..1f92e7e8e3d3 100644
10876 +--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
10877 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
10878 +@@ -1078,6 +1078,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
10879 + /* pplib is notified if disp_num changed */
10880 + dc->hwss.optimize_bandwidth(dc, context);
10881 +
10882 ++ for (i = 0; i < context->stream_count; i++)
10883 ++ context->streams[i]->mode_changed = false;
10884 ++
10885 + dc_release_state(dc->current_state);
10886 +
10887 + dc->current_state = context;
10888 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
10889 +index b0265dbebd4c..583eb367850f 100644
10890 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
10891 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
10892 +@@ -792,6 +792,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
10893 + sink->dongle_max_pix_clk = sink_caps.max_hdmi_pixel_clock;
10894 + sink->converter_disable_audio = converter_disable_audio;
10895 +
10896 ++ /* dc_sink_create returns a new reference */
10897 + link->local_sink = sink;
10898 +
10899 + edid_status = dm_helpers_read_local_edid(
10900 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
10901 +index 41883c981789..a684b38332ac 100644
10902 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
10903 ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
10904 +@@ -2334,9 +2334,10 @@ static void dcn10_apply_ctx_for_surface(
10905 + }
10906 + }
10907 +
10908 +- if (!pipe_ctx->plane_state &&
10909 +- old_pipe_ctx->plane_state &&
10910 +- old_pipe_ctx->stream_res.tg == tg) {
10911 ++ if ((!pipe_ctx->plane_state ||
10912 ++ pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
10913 ++ old_pipe_ctx->plane_state &&
10914 ++ old_pipe_ctx->stream_res.tg == tg) {
10915 +
10916 + dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx);
10917 + removed_pipe[i] = true;
10918 +diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
10919 +index c8f5c00dd1e7..86e3fb27c125 100644
10920 +--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
10921 ++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
10922 +@@ -3491,14 +3491,14 @@ static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
10923 +
10924 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
10925 + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
10926 +- ixSMU_PM_STATUS_94, 0);
10927 ++ ixSMU_PM_STATUS_95, 0);
10928 +
10929 + for (i = 0; i < 10; i++) {
10930 +- mdelay(1);
10931 ++ mdelay(500);
10932 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample);
10933 + tmp = cgs_read_ind_register(hwmgr->device,
10934 + CGS_IND_REG__SMC,
10935 +- ixSMU_PM_STATUS_94);
10936 ++ ixSMU_PM_STATUS_95);
10937 + if (tmp != 0)
10938 + break;
10939 + }
10940 +diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
10941 +index f4290f6b0c38..2323ba9310d9 100644
10942 +--- a/drivers/gpu/drm/drm_atomic_helper.c
10943 ++++ b/drivers/gpu/drm/drm_atomic_helper.c
10944 +@@ -1611,6 +1611,15 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
10945 + if (old_plane_state->fb != new_plane_state->fb)
10946 + return -EINVAL;
10947 +
10948 ++ /*
10949 ++ * FIXME: Since prepare_fb and cleanup_fb are always called on
10950 ++ * the new_plane_state for async updates we need to block framebuffer
10951 ++ * changes. This prevents use of a fb that's been cleaned up and
10952 ++ * double cleanups from occuring.
10953 ++ */
10954 ++ if (old_plane_state->fb != new_plane_state->fb)
10955 ++ return -EINVAL;
10956 ++
10957 + funcs = plane->helper_private;
10958 + if (!funcs->atomic_async_update)
10959 + return -EINVAL;
10960 +diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
10961 +index 529414556962..1a244c53252c 100644
10962 +--- a/drivers/gpu/drm/drm_dp_mst_topology.c
10963 ++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
10964 +@@ -3286,6 +3286,7 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs
10965 + msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
10966 + msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
10967 + msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
10968 ++ msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
10969 + }
10970 + msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
10971 + msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
10972 +diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
10973 +index d73703a695e8..edd8cb497f3b 100644
10974 +--- a/drivers/gpu/drm/drm_fb_helper.c
10975 ++++ b/drivers/gpu/drm/drm_fb_helper.c
10976 +@@ -2891,7 +2891,7 @@ int drm_fb_helper_fbdev_setup(struct drm_device *dev,
10977 + return 0;
10978 +
10979 + err_drm_fb_helper_fini:
10980 +- drm_fb_helper_fini(fb_helper);
10981 ++ drm_fb_helper_fbdev_teardown(dev);
10982 +
10983 + return ret;
10984 + }
10985 +@@ -3170,9 +3170,7 @@ static void drm_fbdev_client_unregister(struct drm_client_dev *client)
10986 +
10987 + static int drm_fbdev_client_restore(struct drm_client_dev *client)
10988 + {
10989 +- struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
10990 +-
10991 +- drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
10992 ++ drm_fb_helper_lastclose(client->dev);
10993 +
10994 + return 0;
10995 + }
10996 +diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
10997 +index 004191d01772..15b919f90c5a 100644
10998 +--- a/drivers/gpu/drm/drm_mode_object.c
10999 ++++ b/drivers/gpu/drm/drm_mode_object.c
11000 +@@ -465,6 +465,7 @@ static int set_property_atomic(struct drm_mode_object *obj,
11001 +
11002 + drm_modeset_acquire_init(&ctx, 0);
11003 + state->acquire_ctx = &ctx;
11004 ++
11005 + retry:
11006 + if (prop == state->dev->mode_config.dpms_property) {
11007 + if (obj->type != DRM_MODE_OBJECT_CONNECTOR) {
11008 +diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
11009 +index 5f650d8fc66b..4cfb56893b7f 100644
11010 +--- a/drivers/gpu/drm/drm_plane.c
11011 ++++ b/drivers/gpu/drm/drm_plane.c
11012 +@@ -220,6 +220,9 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
11013 + format_modifier_count++;
11014 + }
11015 +
11016 ++ if (format_modifier_count)
11017 ++ config->allow_fb_modifiers = true;
11018 ++
11019 + plane->modifier_count = format_modifier_count;
11020 + plane->modifiers = kmalloc_array(format_modifier_count,
11021 + sizeof(format_modifiers[0]),
11022 +diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
11023 +index 77ae634eb11c..bd95fd6b4ac8 100644
11024 +--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
11025 ++++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
11026 +@@ -1446,7 +1446,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
11027 + }
11028 +
11029 + if (index_mode) {
11030 +- if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) {
11031 ++ if (guest_gma >= I915_GTT_PAGE_SIZE) {
11032 + ret = -EFAULT;
11033 + goto err;
11034 + }
11035 +diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
11036 +index c7103dd2d8d5..563ab8590061 100644
11037 +--- a/drivers/gpu/drm/i915/gvt/gtt.c
11038 ++++ b/drivers/gpu/drm/i915/gvt/gtt.c
11039 +@@ -1942,7 +1942,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
11040 + */
11041 + void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
11042 + {
11043 +- atomic_dec(&mm->pincount);
11044 ++ atomic_dec_if_positive(&mm->pincount);
11045 + }
11046 +
11047 + /**
11048 +diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
11049 +index 55bb7885e228..8fff49affc11 100644
11050 +--- a/drivers/gpu/drm/i915/gvt/scheduler.c
11051 ++++ b/drivers/gpu/drm/i915/gvt/scheduler.c
11052 +@@ -1475,8 +1475,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
11053 + intel_runtime_pm_put(dev_priv);
11054 + }
11055 +
11056 +- if (ret && (vgpu_is_vm_unhealthy(ret))) {
11057 +- enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
11058 ++ if (ret) {
11059 ++ if (vgpu_is_vm_unhealthy(ret))
11060 ++ enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
11061 + intel_vgpu_destroy_workload(workload);
11062 + return ERR_PTR(ret);
11063 + }
11064 +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
11065 +index b1c31967194b..489c1e656ff6 100644
11066 +--- a/drivers/gpu/drm/i915/i915_drv.h
11067 ++++ b/drivers/gpu/drm/i915/i915_drv.h
11068 +@@ -2293,7 +2293,8 @@ intel_info(const struct drm_i915_private *dev_priv)
11069 + INTEL_DEVID(dev_priv) == 0x5915 || \
11070 + INTEL_DEVID(dev_priv) == 0x591E)
11071 + #define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \
11072 +- INTEL_DEVID(dev_priv) == 0x87C0)
11073 ++ INTEL_DEVID(dev_priv) == 0x87C0 || \
11074 ++ INTEL_DEVID(dev_priv) == 0x87CA)
11075 + #define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
11076 + (dev_priv)->info.gt == 2)
11077 + #define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
11078 +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
11079 +index 067054cf4a86..60bed3f27775 100644
11080 +--- a/drivers/gpu/drm/i915/i915_reg.h
11081 ++++ b/drivers/gpu/drm/i915/i915_reg.h
11082 +@@ -9205,7 +9205,7 @@ enum skl_power_gate {
11083 + #define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, \
11084 + _TRANS_DDI_FUNC_CTL2_A)
11085 + #define PORT_SYNC_MODE_ENABLE (1 << 4)
11086 +-#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) < 0)
11087 ++#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) << 0)
11088 + #define PORT_SYNC_MODE_MASTER_SELECT_MASK (0x7 << 0)
11089 + #define PORT_SYNC_MODE_MASTER_SELECT_SHIFT 0
11090 +
11091 +diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
11092 +index 22a74608c6e4..dcd1df5322e8 100644
11093 +--- a/drivers/gpu/drm/i915/intel_dp.c
11094 ++++ b/drivers/gpu/drm/i915/intel_dp.c
11095 +@@ -1845,42 +1845,6 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
11096 + return false;
11097 + }
11098 +
11099 +-/* Optimize link config in order: max bpp, min lanes, min clock */
11100 +-static bool
11101 +-intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
11102 +- struct intel_crtc_state *pipe_config,
11103 +- const struct link_config_limits *limits)
11104 +-{
11105 +- struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
11106 +- int bpp, clock, lane_count;
11107 +- int mode_rate, link_clock, link_avail;
11108 +-
11109 +- for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
11110 +- mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
11111 +- bpp);
11112 +-
11113 +- for (lane_count = limits->min_lane_count;
11114 +- lane_count <= limits->max_lane_count;
11115 +- lane_count <<= 1) {
11116 +- for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
11117 +- link_clock = intel_dp->common_rates[clock];
11118 +- link_avail = intel_dp_max_data_rate(link_clock,
11119 +- lane_count);
11120 +-
11121 +- if (mode_rate <= link_avail) {
11122 +- pipe_config->lane_count = lane_count;
11123 +- pipe_config->pipe_bpp = bpp;
11124 +- pipe_config->port_clock = link_clock;
11125 +-
11126 +- return true;
11127 +- }
11128 +- }
11129 +- }
11130 +- }
11131 +-
11132 +- return false;
11133 +-}
11134 +-
11135 + static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
11136 + {
11137 + int i, num_bpc;
11138 +@@ -2013,15 +1977,13 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
11139 + limits.min_bpp = 6 * 3;
11140 + limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
11141 +
11142 +- if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) {
11143 ++ if (intel_dp_is_edp(intel_dp)) {
11144 + /*
11145 + * Use the maximum clock and number of lanes the eDP panel
11146 +- * advertizes being capable of. The eDP 1.3 and earlier panels
11147 +- * are generally designed to support only a single clock and
11148 +- * lane configuration, and typically these values correspond to
11149 +- * the native resolution of the panel. With eDP 1.4 rate select
11150 +- * and DSC, this is decreasingly the case, and we need to be
11151 +- * able to select less than maximum link config.
11152 ++ * advertizes being capable of. The panels are generally
11153 ++ * designed to support only a single clock and lane
11154 ++ * configuration, and typically these values correspond to the
11155 ++ * native resolution of the panel.
11156 + */
11157 + limits.min_lane_count = limits.max_lane_count;
11158 + limits.min_clock = limits.max_clock;
11159 +@@ -2035,22 +1997,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
11160 + intel_dp->common_rates[limits.max_clock],
11161 + limits.max_bpp, adjusted_mode->crtc_clock);
11162 +
11163 +- if (intel_dp_is_edp(intel_dp))
11164 +- /*
11165 +- * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
11166 +- * section A.1: "It is recommended that the minimum number of
11167 +- * lanes be used, using the minimum link rate allowed for that
11168 +- * lane configuration."
11169 +- *
11170 +- * Note that we use the max clock and lane count for eDP 1.3 and
11171 +- * earlier, and fast vs. wide is irrelevant.
11172 +- */
11173 +- ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config,
11174 +- &limits);
11175 +- else
11176 +- /* Optimize for slow and wide. */
11177 +- ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config,
11178 +- &limits);
11179 ++ /*
11180 ++ * Optimize for slow and wide. This is the place to add alternative
11181 ++ * optimization policy.
11182 ++ */
11183 ++ ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
11184 +
11185 + /* enable compression if the mode doesn't fit available BW */
11186 + if (!ret) {
11187 +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
11188 +index cb307a2abf06..7316b4ab1b85 100644
11189 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
11190 ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
11191 +@@ -23,11 +23,14 @@ struct dpu_mdss {
11192 + struct dpu_irq_controller irq_controller;
11193 + };
11194 +
11195 +-static irqreturn_t dpu_mdss_irq(int irq, void *arg)
11196 ++static void dpu_mdss_irq(struct irq_desc *desc)
11197 + {
11198 +- struct dpu_mdss *dpu_mdss = arg;
11199 ++ struct dpu_mdss *dpu_mdss = irq_desc_get_handler_data(desc);
11200 ++ struct irq_chip *chip = irq_desc_get_chip(desc);
11201 + u32 interrupts;
11202 +
11203 ++ chained_irq_enter(chip, desc);
11204 ++
11205 + interrupts = readl_relaxed(dpu_mdss->mmio + HW_INTR_STATUS);
11206 +
11207 + while (interrupts) {
11208 +@@ -39,20 +42,20 @@ static irqreturn_t dpu_mdss_irq(int irq, void *arg)
11209 + hwirq);
11210 + if (mapping == 0) {
11211 + DRM_ERROR("couldn't find irq mapping for %lu\n", hwirq);
11212 +- return IRQ_NONE;
11213 ++ break;
11214 + }
11215 +
11216 + rc = generic_handle_irq(mapping);
11217 + if (rc < 0) {
11218 + DRM_ERROR("handle irq fail: irq=%lu mapping=%u rc=%d\n",
11219 + hwirq, mapping, rc);
11220 +- return IRQ_NONE;
11221 ++ break;
11222 + }
11223 +
11224 + interrupts &= ~(1 << hwirq);
11225 + }
11226 +
11227 +- return IRQ_HANDLED;
11228 ++ chained_irq_exit(chip, desc);
11229 + }
11230 +
11231 + static void dpu_mdss_irq_mask(struct irq_data *irqd)
11232 +@@ -83,16 +86,16 @@ static struct irq_chip dpu_mdss_irq_chip = {
11233 + .irq_unmask = dpu_mdss_irq_unmask,
11234 + };
11235 +
11236 ++static struct lock_class_key dpu_mdss_lock_key, dpu_mdss_request_key;
11237 ++
11238 + static int dpu_mdss_irqdomain_map(struct irq_domain *domain,
11239 + unsigned int irq, irq_hw_number_t hwirq)
11240 + {
11241 + struct dpu_mdss *dpu_mdss = domain->host_data;
11242 +- int ret;
11243 +
11244 ++ irq_set_lockdep_class(irq, &dpu_mdss_lock_key, &dpu_mdss_request_key);
11245 + irq_set_chip_and_handler(irq, &dpu_mdss_irq_chip, handle_level_irq);
11246 +- ret = irq_set_chip_data(irq, dpu_mdss);
11247 +-
11248 +- return ret;
11249 ++ return irq_set_chip_data(irq, dpu_mdss);
11250 + }
11251 +
11252 + static const struct irq_domain_ops dpu_mdss_irqdomain_ops = {
11253 +@@ -159,11 +162,13 @@ static void dpu_mdss_destroy(struct drm_device *dev)
11254 + struct msm_drm_private *priv = dev->dev_private;
11255 + struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
11256 + struct dss_module_power *mp = &dpu_mdss->mp;
11257 ++ int irq;
11258 +
11259 + pm_runtime_suspend(dev->dev);
11260 + pm_runtime_disable(dev->dev);
11261 + _dpu_mdss_irq_domain_fini(dpu_mdss);
11262 +- free_irq(platform_get_irq(pdev, 0), dpu_mdss);
11263 ++ irq = platform_get_irq(pdev, 0);
11264 ++ irq_set_chained_handler_and_data(irq, NULL, NULL);
11265 + msm_dss_put_clk(mp->clk_config, mp->num_clk);
11266 + devm_kfree(&pdev->dev, mp->clk_config);
11267 +
11268 +@@ -187,6 +192,7 @@ int dpu_mdss_init(struct drm_device *dev)
11269 + struct dpu_mdss *dpu_mdss;
11270 + struct dss_module_power *mp;
11271 + int ret = 0;
11272 ++ int irq;
11273 +
11274 + dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
11275 + if (!dpu_mdss)
11276 +@@ -219,12 +225,12 @@ int dpu_mdss_init(struct drm_device *dev)
11277 + if (ret)
11278 + goto irq_domain_error;
11279 +
11280 +- ret = request_irq(platform_get_irq(pdev, 0),
11281 +- dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss);
11282 +- if (ret) {
11283 +- DPU_ERROR("failed to init irq: %d\n", ret);
11284 ++ irq = platform_get_irq(pdev, 0);
11285 ++ if (irq < 0)
11286 + goto irq_error;
11287 +- }
11288 ++
11289 ++ irq_set_chained_handler_and_data(irq, dpu_mdss_irq,
11290 ++ dpu_mdss);
11291 +
11292 + pm_runtime_enable(dev->dev);
11293 +
11294 +diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
11295 +index 6a4ca139cf5d..8fd8124d72ba 100644
11296 +--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
11297 ++++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
11298 +@@ -750,7 +750,9 @@ static int nv17_tv_set_property(struct drm_encoder *encoder,
11299 + /* Disable the crtc to ensure a full modeset is
11300 + * performed whenever it's turned on again. */
11301 + if (crtc)
11302 +- drm_crtc_force_disable(crtc);
11303 ++ drm_crtc_helper_set_mode(crtc, &crtc->mode,
11304 ++ crtc->x, crtc->y,
11305 ++ crtc->primary->fb);
11306 + }
11307 +
11308 + return 0;
11309 +diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
11310 +index f471537c852f..1e14c6921454 100644
11311 +--- a/drivers/gpu/drm/radeon/evergreen_cs.c
11312 ++++ b/drivers/gpu/drm/radeon/evergreen_cs.c
11313 +@@ -1299,6 +1299,7 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
11314 + return -EINVAL;
11315 + }
11316 + ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
11317 ++ break;
11318 + case CB_TARGET_MASK:
11319 + track->cb_target_mask = radeon_get_ib_value(p, idx);
11320 + track->cb_dirty = true;
11321 +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
11322 +index 9c7007d45408..f9a90ff24e6d 100644
11323 +--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
11324 ++++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
11325 +@@ -331,6 +331,7 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
11326 + dev_dbg(rcdu->dev,
11327 + "connected entity %pOF is disabled, skipping\n",
11328 + entity);
11329 ++ of_node_put(entity);
11330 + return -ENODEV;
11331 + }
11332 +
11333 +@@ -366,6 +367,7 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
11334 + dev_warn(rcdu->dev,
11335 + "no encoder found for endpoint %pOF, skipping\n",
11336 + ep->local_node);
11337 ++ of_node_put(entity);
11338 + return -ENODEV;
11339 + }
11340 +
11341 +diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
11342 +index fb70fb486fbf..cdbb47566cac 100644
11343 +--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
11344 ++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
11345 +@@ -511,6 +511,18 @@ static void vop_core_clks_disable(struct vop *vop)
11346 + clk_disable(vop->hclk);
11347 + }
11348 +
11349 ++static void vop_win_disable(struct vop *vop, const struct vop_win_data *win)
11350 ++{
11351 ++ if (win->phy->scl && win->phy->scl->ext) {
11352 ++ VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, SCALE_NONE);
11353 ++ VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, SCALE_NONE);
11354 ++ VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, SCALE_NONE);
11355 ++ VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, SCALE_NONE);
11356 ++ }
11357 ++
11358 ++ VOP_WIN_SET(vop, win, enable, 0);
11359 ++}
11360 ++
11361 + static int vop_enable(struct drm_crtc *crtc)
11362 + {
11363 + struct vop *vop = to_vop(crtc);
11364 +@@ -556,7 +568,7 @@ static int vop_enable(struct drm_crtc *crtc)
11365 + struct vop_win *vop_win = &vop->win[i];
11366 + const struct vop_win_data *win = vop_win->data;
11367 +
11368 +- VOP_WIN_SET(vop, win, enable, 0);
11369 ++ vop_win_disable(vop, win);
11370 + }
11371 + spin_unlock(&vop->reg_lock);
11372 +
11373 +@@ -700,7 +712,7 @@ static void vop_plane_atomic_disable(struct drm_plane *plane,
11374 +
11375 + spin_lock(&vop->reg_lock);
11376 +
11377 +- VOP_WIN_SET(vop, win, enable, 0);
11378 ++ vop_win_disable(vop, win);
11379 +
11380 + spin_unlock(&vop->reg_lock);
11381 + }
11382 +@@ -1476,7 +1488,7 @@ static int vop_initial(struct vop *vop)
11383 + int channel = i * 2 + 1;
11384 +
11385 + VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel);
11386 +- VOP_WIN_SET(vop, win, enable, 0);
11387 ++ vop_win_disable(vop, win);
11388 + VOP_WIN_SET(vop, win, gate, 1);
11389 + }
11390 +
11391 +diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
11392 +index e2942c9a11a7..35ddbec1375a 100644
11393 +--- a/drivers/gpu/drm/scheduler/sched_entity.c
11394 ++++ b/drivers/gpu/drm/scheduler/sched_entity.c
11395 +@@ -52,12 +52,12 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
11396 + {
11397 + int i;
11398 +
11399 +- if (!(entity && rq_list && num_rq_list > 0 && rq_list[0]))
11400 ++ if (!(entity && rq_list && (num_rq_list == 0 || rq_list[0])))
11401 + return -EINVAL;
11402 +
11403 + memset(entity, 0, sizeof(struct drm_sched_entity));
11404 + INIT_LIST_HEAD(&entity->list);
11405 +- entity->rq = rq_list[0];
11406 ++ entity->rq = NULL;
11407 + entity->guilty = guilty;
11408 + entity->num_rq_list = num_rq_list;
11409 + entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *),
11410 +@@ -67,6 +67,10 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
11411 +
11412 + for (i = 0; i < num_rq_list; ++i)
11413 + entity->rq_list[i] = rq_list[i];
11414 ++
11415 ++ if (num_rq_list)
11416 ++ entity->rq = rq_list[0];
11417 ++
11418 + entity->last_scheduled = NULL;
11419 +
11420 + spin_lock_init(&entity->rq_lock);
11421 +@@ -165,6 +169,9 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
11422 + struct task_struct *last_user;
11423 + long ret = timeout;
11424 +
11425 ++ if (!entity->rq)
11426 ++ return 0;
11427 ++
11428 + sched = entity->rq->sched;
11429 + /**
11430 + * The client will not queue more IBs during this fini, consume existing
11431 +@@ -264,20 +271,24 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
11432 + */
11433 + void drm_sched_entity_fini(struct drm_sched_entity *entity)
11434 + {
11435 +- struct drm_gpu_scheduler *sched;
11436 ++ struct drm_gpu_scheduler *sched = NULL;
11437 +
11438 +- sched = entity->rq->sched;
11439 +- drm_sched_rq_remove_entity(entity->rq, entity);
11440 ++ if (entity->rq) {
11441 ++ sched = entity->rq->sched;
11442 ++ drm_sched_rq_remove_entity(entity->rq, entity);
11443 ++ }
11444 +
11445 + /* Consumption of existing IBs wasn't completed. Forcefully
11446 + * remove them here.
11447 + */
11448 + if (spsc_queue_peek(&entity->job_queue)) {
11449 +- /* Park the kernel for a moment to make sure it isn't processing
11450 +- * our enity.
11451 +- */
11452 +- kthread_park(sched->thread);
11453 +- kthread_unpark(sched->thread);
11454 ++ if (sched) {
11455 ++ /* Park the kernel for a moment to make sure it isn't processing
11456 ++ * our enity.
11457 ++ */
11458 ++ kthread_park(sched->thread);
11459 ++ kthread_unpark(sched->thread);
11460 ++ }
11461 + if (entity->dependency) {
11462 + dma_fence_remove_callback(entity->dependency,
11463 + &entity->cb);
11464 +@@ -362,9 +373,11 @@ void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
11465 + for (i = 0; i < entity->num_rq_list; ++i)
11466 + drm_sched_entity_set_rq_priority(&entity->rq_list[i], priority);
11467 +
11468 +- drm_sched_rq_remove_entity(entity->rq, entity);
11469 +- drm_sched_entity_set_rq_priority(&entity->rq, priority);
11470 +- drm_sched_rq_add_entity(entity->rq, entity);
11471 ++ if (entity->rq) {
11472 ++ drm_sched_rq_remove_entity(entity->rq, entity);
11473 ++ drm_sched_entity_set_rq_priority(&entity->rq, priority);
11474 ++ drm_sched_rq_add_entity(entity->rq, entity);
11475 ++ }
11476 +
11477 + spin_unlock(&entity->rq_lock);
11478 + }
11479 +diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
11480 +index dc47720c99ba..39d8509d96a0 100644
11481 +--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
11482 ++++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
11483 +@@ -48,8 +48,13 @@ static enum drm_mode_status
11484 + sun8i_dw_hdmi_mode_valid_h6(struct drm_connector *connector,
11485 + const struct drm_display_mode *mode)
11486 + {
11487 +- /* This is max for HDMI 2.0b (4K@60Hz) */
11488 +- if (mode->clock > 594000)
11489 ++ /*
11490 ++ * Controller support maximum of 594 MHz, which correlates to
11491 ++ * 4K@60Hz 4:4:4 or RGB. However, for frequencies greater than
11492 ++ * 340 MHz scrambling has to be enabled. Because scrambling is
11493 ++ * not yet implemented, just limit to 340 MHz for now.
11494 ++ */
11495 ++ if (mode->clock > 340000)
11496 + return MODE_CLOCK_HIGH;
11497 +
11498 + return MODE_OK;
11499 +diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
11500 +index a63e3011e971..bd4f0b88bbd7 100644
11501 +--- a/drivers/gpu/drm/udl/udl_drv.c
11502 ++++ b/drivers/gpu/drm/udl/udl_drv.c
11503 +@@ -51,6 +51,7 @@ static struct drm_driver driver = {
11504 + .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
11505 + .load = udl_driver_load,
11506 + .unload = udl_driver_unload,
11507 ++ .release = udl_driver_release,
11508 +
11509 + /* gem hooks */
11510 + .gem_free_object_unlocked = udl_gem_free_object,
11511 +diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
11512 +index e9e9b1ff678e..4ae67d882eae 100644
11513 +--- a/drivers/gpu/drm/udl/udl_drv.h
11514 ++++ b/drivers/gpu/drm/udl/udl_drv.h
11515 +@@ -104,6 +104,7 @@ void udl_urb_completion(struct urb *urb);
11516 +
11517 + int udl_driver_load(struct drm_device *dev, unsigned long flags);
11518 + void udl_driver_unload(struct drm_device *dev);
11519 ++void udl_driver_release(struct drm_device *dev);
11520 +
11521 + int udl_fbdev_init(struct drm_device *dev);
11522 + void udl_fbdev_cleanup(struct drm_device *dev);
11523 +diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
11524 +index 1b014d92855b..19055dda3140 100644
11525 +--- a/drivers/gpu/drm/udl/udl_main.c
11526 ++++ b/drivers/gpu/drm/udl/udl_main.c
11527 +@@ -378,6 +378,12 @@ void udl_driver_unload(struct drm_device *dev)
11528 + udl_free_urb_list(dev);
11529 +
11530 + udl_fbdev_cleanup(dev);
11531 +- udl_modeset_cleanup(dev);
11532 + kfree(udl);
11533 + }
11534 ++
11535 ++void udl_driver_release(struct drm_device *dev)
11536 ++{
11537 ++ udl_modeset_cleanup(dev);
11538 ++ drm_dev_fini(dev);
11539 ++ kfree(dev);
11540 ++}
11541 +diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
11542 +index 5930facd6d2d..11a8f99ba18c 100644
11543 +--- a/drivers/gpu/drm/vgem/vgem_drv.c
11544 ++++ b/drivers/gpu/drm/vgem/vgem_drv.c
11545 +@@ -191,13 +191,9 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
11546 + ret = drm_gem_handle_create(file, &obj->base, handle);
11547 + drm_gem_object_put_unlocked(&obj->base);
11548 + if (ret)
11549 +- goto err;
11550 ++ return ERR_PTR(ret);
11551 +
11552 + return &obj->base;
11553 +-
11554 +-err:
11555 +- __vgem_gem_destroy(obj);
11556 +- return ERR_PTR(ret);
11557 + }
11558 +
11559 + static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
11560 +diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
11561 +index f39a183d59c2..e7e946035027 100644
11562 +--- a/drivers/gpu/drm/virtio/virtgpu_object.c
11563 ++++ b/drivers/gpu/drm/virtio/virtgpu_object.c
11564 +@@ -28,10 +28,21 @@
11565 + static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
11566 + uint32_t *resid)
11567 + {
11568 ++#if 0
11569 + int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
11570 +
11571 + if (handle < 0)
11572 + return handle;
11573 ++#else
11574 ++ static int handle;
11575 ++
11576 ++ /*
11577 ++ * FIXME: dirty hack to avoid re-using IDs, virglrenderer
11578 ++ * can't deal with that. Needs fixing in virglrenderer, also
11579 ++ * should figure a better way to handle that in the guest.
11580 ++ */
11581 ++ handle++;
11582 ++#endif
11583 +
11584 + *resid = handle + 1;
11585 + return 0;
11586 +@@ -39,7 +50,9 @@ static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
11587 +
11588 + static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
11589 + {
11590 ++#if 0
11591 + ida_free(&vgdev->resource_ida, id - 1);
11592 ++#endif
11593 + }
11594 +
11595 + static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
11596 +diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
11597 +index eb56ee893761..1054f535178a 100644
11598 +--- a/drivers/gpu/drm/vkms/vkms_crtc.c
11599 ++++ b/drivers/gpu/drm/vkms/vkms_crtc.c
11600 +@@ -4,13 +4,17 @@
11601 + #include <drm/drm_atomic_helper.h>
11602 + #include <drm/drm_crtc_helper.h>
11603 +
11604 +-static void _vblank_handle(struct vkms_output *output)
11605 ++static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
11606 + {
11607 ++ struct vkms_output *output = container_of(timer, struct vkms_output,
11608 ++ vblank_hrtimer);
11609 + struct drm_crtc *crtc = &output->crtc;
11610 + struct vkms_crtc_state *state = to_vkms_crtc_state(crtc->state);
11611 ++ int ret_overrun;
11612 + bool ret;
11613 +
11614 + spin_lock(&output->lock);
11615 ++
11616 + ret = drm_crtc_handle_vblank(crtc);
11617 + if (!ret)
11618 + DRM_ERROR("vkms failure on handling vblank");
11619 +@@ -31,19 +35,9 @@ static void _vblank_handle(struct vkms_output *output)
11620 + DRM_WARN("failed to queue vkms_crc_work_handle");
11621 + }
11622 +
11623 +- spin_unlock(&output->lock);
11624 +-}
11625 +-
11626 +-static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
11627 +-{
11628 +- struct vkms_output *output = container_of(timer, struct vkms_output,
11629 +- vblank_hrtimer);
11630 +- int ret_overrun;
11631 +-
11632 +- _vblank_handle(output);
11633 +-
11634 + ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
11635 + output->period_ns);
11636 ++ spin_unlock(&output->lock);
11637 +
11638 + return HRTIMER_RESTART;
11639 + }
11640 +@@ -81,6 +75,9 @@ bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
11641 +
11642 + *vblank_time = output->vblank_hrtimer.node.expires;
11643 +
11644 ++ if (!in_vblank_irq)
11645 ++ *vblank_time -= output->period_ns;
11646 ++
11647 + return true;
11648 + }
11649 +
11650 +@@ -98,6 +95,7 @@ static void vkms_atomic_crtc_reset(struct drm_crtc *crtc)
11651 + vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
11652 + if (!vkms_state)
11653 + return;
11654 ++ INIT_WORK(&vkms_state->crc_work, vkms_crc_work_handle);
11655 +
11656 + crtc->state = &vkms_state->base;
11657 + crtc->state->crtc = crtc;
11658 +diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
11659 +index 138b0bb325cf..69048e73377d 100644
11660 +--- a/drivers/gpu/drm/vkms/vkms_gem.c
11661 ++++ b/drivers/gpu/drm/vkms/vkms_gem.c
11662 +@@ -111,11 +111,8 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
11663 +
11664 + ret = drm_gem_handle_create(file, &obj->gem, handle);
11665 + drm_gem_object_put_unlocked(&obj->gem);
11666 +- if (ret) {
11667 +- drm_gem_object_release(&obj->gem);
11668 +- kfree(obj);
11669 ++ if (ret)
11670 + return ERR_PTR(ret);
11671 +- }
11672 +
11673 + return &obj->gem;
11674 + }
11675 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
11676 +index b913a56f3426..2a9112515f46 100644
11677 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
11678 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
11679 +@@ -564,11 +564,9 @@ static int vmw_fb_set_par(struct fb_info *info)
11680 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
11681 + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
11682 + };
11683 +- struct drm_display_mode *old_mode;
11684 + struct drm_display_mode *mode;
11685 + int ret;
11686 +
11687 +- old_mode = par->set_mode;
11688 + mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
11689 + if (!mode) {
11690 + DRM_ERROR("Could not create new fb mode.\n");
11691 +@@ -579,11 +577,7 @@ static int vmw_fb_set_par(struct fb_info *info)
11692 + mode->vdisplay = var->yres;
11693 + vmw_guess_mode_timing(mode);
11694 +
11695 +- if (old_mode && drm_mode_equal(old_mode, mode)) {
11696 +- drm_mode_destroy(vmw_priv->dev, mode);
11697 +- mode = old_mode;
11698 +- old_mode = NULL;
11699 +- } else if (!vmw_kms_validate_mode_vram(vmw_priv,
11700 ++ if (!vmw_kms_validate_mode_vram(vmw_priv,
11701 + mode->hdisplay *
11702 + DIV_ROUND_UP(var->bits_per_pixel, 8),
11703 + mode->vdisplay)) {
11704 +@@ -620,8 +614,8 @@ static int vmw_fb_set_par(struct fb_info *info)
11705 + schedule_delayed_work(&par->local_work, 0);
11706 +
11707 + out_unlock:
11708 +- if (old_mode)
11709 +- drm_mode_destroy(vmw_priv->dev, old_mode);
11710 ++ if (par->set_mode)
11711 ++ drm_mode_destroy(vmw_priv->dev, par->set_mode);
11712 + par->set_mode = mode;
11713 +
11714 + mutex_unlock(&par->bo_mutex);
11715 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
11716 +index b93c558dd86e..7da752ca1c34 100644
11717 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
11718 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
11719 +@@ -57,7 +57,7 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
11720 +
11721 + id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
11722 + if (id < 0)
11723 +- return id;
11724 ++ return (id != -ENOMEM ? 0 : id);
11725 +
11726 + spin_lock(&gman->lock);
11727 +
11728 +diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
11729 +index 15ed6177a7a3..f040c8a7f9a9 100644
11730 +--- a/drivers/hid/hid-logitech-hidpp.c
11731 ++++ b/drivers/hid/hid-logitech-hidpp.c
11732 +@@ -2608,8 +2608,9 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
11733 + input_report_rel(mydata->input, REL_Y, v);
11734 +
11735 + v = hid_snto32(data[6], 8);
11736 +- hidpp_scroll_counter_handle_scroll(
11737 +- &hidpp->vertical_wheel_counter, v);
11738 ++ if (v != 0)
11739 ++ hidpp_scroll_counter_handle_scroll(
11740 ++ &hidpp->vertical_wheel_counter, v);
11741 +
11742 + input_sync(mydata->input);
11743 + }
11744 +diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
11745 +index 742191bb24c6..45e33c7ba9a6 100644
11746 +--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
11747 ++++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
11748 +@@ -91,7 +91,10 @@ static bool check_generated_interrupt(struct ishtp_device *dev)
11749 + IPC_INT_FROM_ISH_TO_HOST_CHV_AB(pisr_val);
11750 + } else {
11751 + pisr_val = ish_reg_read(dev, IPC_REG_PISR_BXT);
11752 +- interrupt_generated = IPC_INT_FROM_ISH_TO_HOST_BXT(pisr_val);
11753 ++ interrupt_generated = !!pisr_val;
11754 ++ /* only busy-clear bit is RW, others are RO */
11755 ++ if (pisr_val)
11756 ++ ish_reg_write(dev, IPC_REG_PISR_BXT, pisr_val);
11757 + }
11758 +
11759 + return interrupt_generated;
11760 +@@ -839,11 +842,11 @@ int ish_hw_start(struct ishtp_device *dev)
11761 + {
11762 + ish_set_host_rdy(dev);
11763 +
11764 ++ set_host_ready(dev);
11765 ++
11766 + /* After that we can enable ISH DMA operation and wakeup ISHFW */
11767 + ish_wakeup(dev);
11768 +
11769 +- set_host_ready(dev);
11770 +-
11771 + /* wait for FW-initiated reset flow */
11772 + if (!dev->recvd_hw_ready)
11773 + wait_event_interruptible_timeout(dev->wait_hw_ready,
11774 +diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
11775 +index 728dc6d4561a..a271d6d169b1 100644
11776 +--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
11777 ++++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
11778 +@@ -675,7 +675,8 @@ int ishtp_cl_device_bind(struct ishtp_cl *cl)
11779 + spin_lock_irqsave(&cl->dev->device_list_lock, flags);
11780 + list_for_each_entry(cl_device, &cl->dev->device_list,
11781 + device_link) {
11782 +- if (cl_device->fw_client->client_id == cl->fw_client_id) {
11783 ++ if (cl_device->fw_client &&
11784 ++ cl_device->fw_client->client_id == cl->fw_client_id) {
11785 + cl->device = cl_device;
11786 + rv = 0;
11787 + break;
11788 +@@ -735,6 +736,7 @@ void ishtp_bus_remove_all_clients(struct ishtp_device *ishtp_dev,
11789 + spin_lock_irqsave(&ishtp_dev->device_list_lock, flags);
11790 + list_for_each_entry_safe(cl_device, n, &ishtp_dev->device_list,
11791 + device_link) {
11792 ++ cl_device->fw_client = NULL;
11793 + if (warm_reset && cl_device->reference_count)
11794 + continue;
11795 +
11796 +diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
11797 +index 6f929bfa9fcd..d0f1dfe2bcbb 100644
11798 +--- a/drivers/hwmon/Kconfig
11799 ++++ b/drivers/hwmon/Kconfig
11800 +@@ -1759,6 +1759,7 @@ config SENSORS_VT8231
11801 + config SENSORS_W83773G
11802 + tristate "Nuvoton W83773G"
11803 + depends on I2C
11804 ++ select REGMAP_I2C
11805 + help
11806 + If you say yes here you get support for the Nuvoton W83773G hardware
11807 + monitoring chip.
11808 +diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
11809 +index 391118c8aae8..c888f4aca45c 100644
11810 +--- a/drivers/hwmon/occ/common.c
11811 ++++ b/drivers/hwmon/occ/common.c
11812 +@@ -889,6 +889,8 @@ static int occ_setup_sensor_attrs(struct occ *occ)
11813 + s++;
11814 + }
11815 + }
11816 ++
11817 ++ s = (sensors->power.num_sensors * 4) + 1;
11818 + } else {
11819 + for (i = 0; i < sensors->power.num_sensors; ++i) {
11820 + s = i + 1;
11821 +@@ -917,11 +919,11 @@ static int occ_setup_sensor_attrs(struct occ *occ)
11822 + show_power, NULL, 3, i);
11823 + attr++;
11824 + }
11825 +- }
11826 +
11827 +- if (sensors->caps.num_sensors >= 1) {
11828 + s = sensors->power.num_sensors + 1;
11829 ++ }
11830 +
11831 ++ if (sensors->caps.num_sensors >= 1) {
11832 + snprintf(attr->name, sizeof(attr->name), "power%d_label", s);
11833 + attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
11834 + 0, 0);
11835 +diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
11836 +index abe8249b893b..f21eb28b6782 100644
11837 +--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
11838 ++++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
11839 +@@ -177,15 +177,15 @@ static void etm_free_aux(void *data)
11840 + schedule_work(&event_data->work);
11841 + }
11842 +
11843 +-static void *etm_setup_aux(int event_cpu, void **pages,
11844 ++static void *etm_setup_aux(struct perf_event *event, void **pages,
11845 + int nr_pages, bool overwrite)
11846 + {
11847 +- int cpu;
11848 ++ int cpu = event->cpu;
11849 + cpumask_t *mask;
11850 + struct coresight_device *sink;
11851 + struct etm_event_data *event_data = NULL;
11852 +
11853 +- event_data = alloc_event_data(event_cpu);
11854 ++ event_data = alloc_event_data(cpu);
11855 + if (!event_data)
11856 + return NULL;
11857 + INIT_WORK(&event_data->work, free_event_data);
11858 +diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
11859 +index 53e2fb6e86f6..fe76b176974a 100644
11860 +--- a/drivers/hwtracing/coresight/coresight-etm4x.c
11861 ++++ b/drivers/hwtracing/coresight/coresight-etm4x.c
11862 +@@ -55,7 +55,8 @@ static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
11863 +
11864 + static bool etm4_arch_supported(u8 arch)
11865 + {
11866 +- switch (arch) {
11867 ++ /* Mask out the minor version number */
11868 ++ switch (arch & 0xf0) {
11869 + case ETM_ARCH_V4:
11870 + break;
11871 + default:
11872 +diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
11873 +index 8426b7970c14..cc287cf6eb29 100644
11874 +--- a/drivers/hwtracing/intel_th/gth.c
11875 ++++ b/drivers/hwtracing/intel_th/gth.c
11876 +@@ -607,6 +607,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
11877 + {
11878 + struct gth_device *gth = dev_get_drvdata(&thdev->dev);
11879 + int port = othdev->output.port;
11880 ++ int master;
11881 +
11882 + if (thdev->host_mode)
11883 + return;
11884 +@@ -615,6 +616,9 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
11885 + othdev->output.port = -1;
11886 + othdev->output.active = false;
11887 + gth->output[port].output = NULL;
11888 ++ for (master = 0; master < TH_CONFIGURABLE_MASTERS; master++)
11889 ++ if (gth->master[master] == port)
11890 ++ gth->master[master] = -1;
11891 + spin_unlock(&gth->gth_lock);
11892 + }
11893 +
11894 +diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
11895 +index 93ce3aa740a9..c7ba8acfd4d5 100644
11896 +--- a/drivers/hwtracing/stm/core.c
11897 ++++ b/drivers/hwtracing/stm/core.c
11898 +@@ -244,6 +244,9 @@ static int find_free_channels(unsigned long *bitmap, unsigned int start,
11899 + ;
11900 + if (i == width)
11901 + return pos;
11902 ++
11903 ++ /* step over [pos..pos+i) to continue search */
11904 ++ pos += i;
11905 + }
11906 +
11907 + return -1;
11908 +@@ -732,7 +735,7 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
11909 + struct stm_device *stm = stmf->stm;
11910 + struct stp_policy_id *id;
11911 + char *ids[] = { NULL, NULL };
11912 +- int ret = -EINVAL;
11913 ++ int ret = -EINVAL, wlimit = 1;
11914 + u32 size;
11915 +
11916 + if (stmf->output.nr_chans)
11917 +@@ -760,8 +763,10 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
11918 + if (id->__reserved_0 || id->__reserved_1)
11919 + goto err_free;
11920 +
11921 +- if (id->width < 1 ||
11922 +- id->width > PAGE_SIZE / stm->data->sw_mmiosz)
11923 ++ if (stm->data->sw_mmiosz)
11924 ++ wlimit = PAGE_SIZE / stm->data->sw_mmiosz;
11925 ++
11926 ++ if (id->width < 1 || id->width > wlimit)
11927 + goto err_free;
11928 +
11929 + ids[0] = id->id;
11930 +diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
11931 +index b4a0b2b99a78..6b4ef1d38fb2 100644
11932 +--- a/drivers/i2c/busses/i2c-designware-core.h
11933 ++++ b/drivers/i2c/busses/i2c-designware-core.h
11934 +@@ -215,6 +215,7 @@
11935 + * @disable_int: function to disable all interrupts
11936 + * @init: function to initialize the I2C hardware
11937 + * @mode: operation mode - DW_IC_MASTER or DW_IC_SLAVE
11938 ++ * @suspended: set to true if the controller is suspended
11939 + *
11940 + * HCNT and LCNT parameters can be used if the platform knows more accurate
11941 + * values than the one computed based only on the input clock frequency.
11942 +@@ -270,6 +271,7 @@ struct dw_i2c_dev {
11943 + int (*set_sda_hold_time)(struct dw_i2c_dev *dev);
11944 + int mode;
11945 + struct i2c_bus_recovery_info rinfo;
11946 ++ bool suspended;
11947 + };
11948 +
11949 + #define ACCESS_SWAP 0x00000001
11950 +diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
11951 +index 8d1bc44d2530..bb8e3f149979 100644
11952 +--- a/drivers/i2c/busses/i2c-designware-master.c
11953 ++++ b/drivers/i2c/busses/i2c-designware-master.c
11954 +@@ -426,6 +426,12 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
11955 +
11956 + pm_runtime_get_sync(dev->dev);
11957 +
11958 ++ if (dev->suspended) {
11959 ++ dev_err(dev->dev, "Error %s call while suspended\n", __func__);
11960 ++ ret = -ESHUTDOWN;
11961 ++ goto done_nolock;
11962 ++ }
11963 ++
11964 + reinit_completion(&dev->cmd_complete);
11965 + dev->msgs = msgs;
11966 + dev->msgs_num = num;
11967 +diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
11968 +index d50f80487214..76810deb2de6 100644
11969 +--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
11970 ++++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
11971 +@@ -176,6 +176,7 @@ static int i2c_dw_pci_suspend(struct device *dev)
11972 + struct pci_dev *pdev = to_pci_dev(dev);
11973 + struct dw_i2c_dev *i_dev = pci_get_drvdata(pdev);
11974 +
11975 ++ i_dev->suspended = true;
11976 + i_dev->disable(i_dev);
11977 +
11978 + return 0;
11979 +@@ -185,8 +186,12 @@ static int i2c_dw_pci_resume(struct device *dev)
11980 + {
11981 + struct pci_dev *pdev = to_pci_dev(dev);
11982 + struct dw_i2c_dev *i_dev = pci_get_drvdata(pdev);
11983 ++ int ret;
11984 +
11985 +- return i_dev->init(i_dev);
11986 ++ ret = i_dev->init(i_dev);
11987 ++ i_dev->suspended = false;
11988 ++
11989 ++ return ret;
11990 + }
11991 + #endif
11992 +
11993 +diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
11994 +index 9eaac3be1f63..ead5e7de3e4d 100644
11995 +--- a/drivers/i2c/busses/i2c-designware-platdrv.c
11996 ++++ b/drivers/i2c/busses/i2c-designware-platdrv.c
11997 +@@ -454,6 +454,8 @@ static int dw_i2c_plat_suspend(struct device *dev)
11998 + {
11999 + struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
12000 +
12001 ++ i_dev->suspended = true;
12002 ++
12003 + if (i_dev->shared_with_punit)
12004 + return 0;
12005 +
12006 +@@ -471,6 +473,7 @@ static int dw_i2c_plat_resume(struct device *dev)
12007 + i2c_dw_prepare_clk(i_dev, true);
12008 +
12009 + i_dev->init(i_dev);
12010 ++ i_dev->suspended = false;
12011 +
12012 + return 0;
12013 + }
12014 +diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
12015 +index c77adbbea0c7..e85dc8583896 100644
12016 +--- a/drivers/i2c/busses/i2c-tegra.c
12017 ++++ b/drivers/i2c/busses/i2c-tegra.c
12018 +@@ -118,6 +118,9 @@
12019 + #define I2C_MST_FIFO_STATUS_TX_MASK 0xff0000
12020 + #define I2C_MST_FIFO_STATUS_TX_SHIFT 16
12021 +
12022 ++/* Packet header size in bytes */
12023 ++#define I2C_PACKET_HEADER_SIZE 12
12024 ++
12025 + /*
12026 + * msg_end_type: The bus control which need to be send at end of transfer.
12027 + * @MSG_END_STOP: Send stop pulse at end of transfer.
12028 +@@ -836,12 +839,13 @@ static const struct i2c_algorithm tegra_i2c_algo = {
12029 + /* payload size is only 12 bit */
12030 + static const struct i2c_adapter_quirks tegra_i2c_quirks = {
12031 + .flags = I2C_AQ_NO_ZERO_LEN,
12032 +- .max_read_len = 4096,
12033 +- .max_write_len = 4096,
12034 ++ .max_read_len = SZ_4K,
12035 ++ .max_write_len = SZ_4K - I2C_PACKET_HEADER_SIZE,
12036 + };
12037 +
12038 + static const struct i2c_adapter_quirks tegra194_i2c_quirks = {
12039 + .flags = I2C_AQ_NO_ZERO_LEN,
12040 ++ .max_write_len = SZ_64K - I2C_PACKET_HEADER_SIZE,
12041 + };
12042 +
12043 + static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {
12044 +diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
12045 +index 28460f6a60cc..af87a16ac3a5 100644
12046 +--- a/drivers/i2c/i2c-core-base.c
12047 ++++ b/drivers/i2c/i2c-core-base.c
12048 +@@ -430,7 +430,7 @@ static int i2c_device_remove(struct device *dev)
12049 + dev_pm_clear_wake_irq(&client->dev);
12050 + device_init_wakeup(&client->dev, false);
12051 +
12052 +- client->irq = 0;
12053 ++ client->irq = client->init_irq;
12054 +
12055 + return status;
12056 + }
12057 +@@ -741,10 +741,11 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
12058 + client->flags = info->flags;
12059 + client->addr = info->addr;
12060 +
12061 +- client->irq = info->irq;
12062 +- if (!client->irq)
12063 +- client->irq = i2c_dev_irq_from_resources(info->resources,
12064 ++ client->init_irq = info->irq;
12065 ++ if (!client->init_irq)
12066 ++ client->init_irq = i2c_dev_irq_from_resources(info->resources,
12067 + info->num_resources);
12068 ++ client->irq = client->init_irq;
12069 +
12070 + strlcpy(client->name, info->type, sizeof(client->name));
12071 +
12072 +diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c
12073 +index 6cb7ad608bcd..0f01cdba9d2c 100644
12074 +--- a/drivers/i2c/i2c-core-of.c
12075 ++++ b/drivers/i2c/i2c-core-of.c
12076 +@@ -121,6 +121,17 @@ static int of_dev_node_match(struct device *dev, void *data)
12077 + return dev->of_node == data;
12078 + }
12079 +
12080 ++static int of_dev_or_parent_node_match(struct device *dev, void *data)
12081 ++{
12082 ++ if (dev->of_node == data)
12083 ++ return 1;
12084 ++
12085 ++ if (dev->parent)
12086 ++ return dev->parent->of_node == data;
12087 ++
12088 ++ return 0;
12089 ++}
12090 ++
12091 + /* must call put_device() when done with returned i2c_client device */
12092 + struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
12093 + {
12094 +@@ -145,7 +156,8 @@ struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
12095 + struct device *dev;
12096 + struct i2c_adapter *adapter;
12097 +
12098 +- dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match);
12099 ++ dev = bus_find_device(&i2c_bus_type, NULL, node,
12100 ++ of_dev_or_parent_node_match);
12101 + if (!dev)
12102 + return NULL;
12103 +
12104 +diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
12105 +index fa2d2b5767f3..1ca2c4d39f87 100644
12106 +--- a/drivers/iio/adc/exynos_adc.c
12107 ++++ b/drivers/iio/adc/exynos_adc.c
12108 +@@ -115,6 +115,7 @@
12109 + #define MAX_ADC_V2_CHANNELS 10
12110 + #define MAX_ADC_V1_CHANNELS 8
12111 + #define MAX_EXYNOS3250_ADC_CHANNELS 2
12112 ++#define MAX_EXYNOS4212_ADC_CHANNELS 4
12113 + #define MAX_S5PV210_ADC_CHANNELS 10
12114 +
12115 + /* Bit definitions common for ADC_V1 and ADC_V2 */
12116 +@@ -271,6 +272,19 @@ static void exynos_adc_v1_start_conv(struct exynos_adc *info,
12117 + writel(con1 | ADC_CON_EN_START, ADC_V1_CON(info->regs));
12118 + }
12119 +
12120 ++/* Exynos4212 and 4412 is like ADCv1 but with four channels only */
12121 ++static const struct exynos_adc_data exynos4212_adc_data = {
12122 ++ .num_channels = MAX_EXYNOS4212_ADC_CHANNELS,
12123 ++ .mask = ADC_DATX_MASK, /* 12 bit ADC resolution */
12124 ++ .needs_adc_phy = true,
12125 ++ .phy_offset = EXYNOS_ADCV1_PHY_OFFSET,
12126 ++
12127 ++ .init_hw = exynos_adc_v1_init_hw,
12128 ++ .exit_hw = exynos_adc_v1_exit_hw,
12129 ++ .clear_irq = exynos_adc_v1_clear_irq,
12130 ++ .start_conv = exynos_adc_v1_start_conv,
12131 ++};
12132 ++
12133 + static const struct exynos_adc_data exynos_adc_v1_data = {
12134 + .num_channels = MAX_ADC_V1_CHANNELS,
12135 + .mask = ADC_DATX_MASK, /* 12 bit ADC resolution */
12136 +@@ -492,6 +506,9 @@ static const struct of_device_id exynos_adc_match[] = {
12137 + }, {
12138 + .compatible = "samsung,s5pv210-adc",
12139 + .data = &exynos_adc_s5pv210_data,
12140 ++ }, {
12141 ++ .compatible = "samsung,exynos4212-adc",
12142 ++ .data = &exynos4212_adc_data,
12143 + }, {
12144 + .compatible = "samsung,exynos-adc-v1",
12145 + .data = &exynos_adc_v1_data,
12146 +@@ -929,7 +946,7 @@ static int exynos_adc_remove(struct platform_device *pdev)
12147 + struct iio_dev *indio_dev = platform_get_drvdata(pdev);
12148 + struct exynos_adc *info = iio_priv(indio_dev);
12149 +
12150 +- if (IS_REACHABLE(CONFIG_INPUT)) {
12151 ++ if (IS_REACHABLE(CONFIG_INPUT) && info->input) {
12152 + free_irq(info->tsirq, info);
12153 + input_unregister_device(info->input);
12154 + }
12155 +diff --git a/drivers/iio/adc/qcom-pm8xxx-xoadc.c b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
12156 +index c30c002f1fef..4735f8a1ca9d 100644
12157 +--- a/drivers/iio/adc/qcom-pm8xxx-xoadc.c
12158 ++++ b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
12159 +@@ -423,18 +423,14 @@ static irqreturn_t pm8xxx_eoc_irq(int irq, void *d)
12160 + static struct pm8xxx_chan_info *
12161 + pm8xxx_get_channel(struct pm8xxx_xoadc *adc, u8 chan)
12162 + {
12163 +- struct pm8xxx_chan_info *ch;
12164 + int i;
12165 +
12166 + for (i = 0; i < adc->nchans; i++) {
12167 +- ch = &adc->chans[i];
12168 ++ struct pm8xxx_chan_info *ch = &adc->chans[i];
12169 + if (ch->hwchan->amux_channel == chan)
12170 +- break;
12171 ++ return ch;
12172 + }
12173 +- if (i == adc->nchans)
12174 +- return NULL;
12175 +-
12176 +- return ch;
12177 ++ return NULL;
12178 + }
12179 +
12180 + static int pm8xxx_read_channel_rsv(struct pm8xxx_xoadc *adc,
12181 +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
12182 +index 84f077b2b90a..81bded0d37d1 100644
12183 +--- a/drivers/infiniband/core/cma.c
12184 ++++ b/drivers/infiniband/core/cma.c
12185 +@@ -2966,13 +2966,22 @@ static void addr_handler(int status, struct sockaddr *src_addr,
12186 + {
12187 + struct rdma_id_private *id_priv = context;
12188 + struct rdma_cm_event event = {};
12189 ++ struct sockaddr *addr;
12190 ++ struct sockaddr_storage old_addr;
12191 +
12192 + mutex_lock(&id_priv->handler_mutex);
12193 + if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
12194 + RDMA_CM_ADDR_RESOLVED))
12195 + goto out;
12196 +
12197 +- memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr));
12198 ++ /*
12199 ++ * Store the previous src address, so that if we fail to acquire
12200 ++ * matching rdma device, old address can be restored back, which helps
12201 ++ * to cancel the cma listen operation correctly.
12202 ++ */
12203 ++ addr = cma_src_addr(id_priv);
12204 ++ memcpy(&old_addr, addr, rdma_addr_size(addr));
12205 ++ memcpy(addr, src_addr, rdma_addr_size(src_addr));
12206 + if (!status && !id_priv->cma_dev) {
12207 + status = cma_acquire_dev_by_src_ip(id_priv);
12208 + if (status)
12209 +@@ -2983,6 +2992,8 @@ static void addr_handler(int status, struct sockaddr *src_addr,
12210 + }
12211 +
12212 + if (status) {
12213 ++ memcpy(addr, &old_addr,
12214 ++ rdma_addr_size((struct sockaddr *)&old_addr));
12215 + if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
12216 + RDMA_CM_ADDR_BOUND))
12217 + goto out;
12218 +diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
12219 +index 8221813219e5..25a81fbb0d4d 100644
12220 +--- a/drivers/infiniband/hw/cxgb4/cm.c
12221 ++++ b/drivers/infiniband/hw/cxgb4/cm.c
12222 +@@ -1903,8 +1903,10 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
12223 + }
12224 + mutex_unlock(&ep->com.mutex);
12225 +
12226 +- if (release)
12227 ++ if (release) {
12228 ++ close_complete_upcall(ep, -ECONNRESET);
12229 + release_ep_resources(ep);
12230 ++ }
12231 + c4iw_put_ep(&ep->com);
12232 + return 0;
12233 + }
12234 +@@ -3606,7 +3608,6 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
12235 + if (close) {
12236 + if (abrupt) {
12237 + set_bit(EP_DISC_ABORT, &ep->com.history);
12238 +- close_complete_upcall(ep, -ECONNRESET);
12239 + ret = send_abort(ep);
12240 + } else {
12241 + set_bit(EP_DISC_CLOSE, &ep->com.history);
12242 +diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
12243 +index 6db2276f5c13..15ec3e1feb09 100644
12244 +--- a/drivers/infiniband/hw/hfi1/hfi.h
12245 ++++ b/drivers/infiniband/hw/hfi1/hfi.h
12246 +@@ -1435,7 +1435,7 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
12247 + struct hfi1_devdata *dd, u8 hw_pidx, u8 port);
12248 + void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
12249 + int hfi1_rcd_put(struct hfi1_ctxtdata *rcd);
12250 +-void hfi1_rcd_get(struct hfi1_ctxtdata *rcd);
12251 ++int hfi1_rcd_get(struct hfi1_ctxtdata *rcd);
12252 + struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
12253 + u16 ctxt);
12254 + struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt);
12255 +diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
12256 +index 7835eb52e7c5..c532ceb0bb9a 100644
12257 +--- a/drivers/infiniband/hw/hfi1/init.c
12258 ++++ b/drivers/infiniband/hw/hfi1/init.c
12259 +@@ -215,12 +215,12 @@ static void hfi1_rcd_free(struct kref *kref)
12260 + struct hfi1_ctxtdata *rcd =
12261 + container_of(kref, struct hfi1_ctxtdata, kref);
12262 +
12263 +- hfi1_free_ctxtdata(rcd->dd, rcd);
12264 +-
12265 + spin_lock_irqsave(&rcd->dd->uctxt_lock, flags);
12266 + rcd->dd->rcd[rcd->ctxt] = NULL;
12267 + spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags);
12268 +
12269 ++ hfi1_free_ctxtdata(rcd->dd, rcd);
12270 ++
12271 + kfree(rcd);
12272 + }
12273 +
12274 +@@ -243,10 +243,13 @@ int hfi1_rcd_put(struct hfi1_ctxtdata *rcd)
12275 + * @rcd: pointer to an initialized rcd data structure
12276 + *
12277 + * Use this to get a reference after the init.
12278 ++ *
12279 ++ * Return : reflect kref_get_unless_zero(), which returns non-zero on
12280 ++ * increment, otherwise 0.
12281 + */
12282 +-void hfi1_rcd_get(struct hfi1_ctxtdata *rcd)
12283 ++int hfi1_rcd_get(struct hfi1_ctxtdata *rcd)
12284 + {
12285 +- kref_get(&rcd->kref);
12286 ++ return kref_get_unless_zero(&rcd->kref);
12287 + }
12288 +
12289 + /**
12290 +@@ -326,7 +329,8 @@ struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt)
12291 + spin_lock_irqsave(&dd->uctxt_lock, flags);
12292 + if (dd->rcd[ctxt]) {
12293 + rcd = dd->rcd[ctxt];
12294 +- hfi1_rcd_get(rcd);
12295 ++ if (!hfi1_rcd_get(rcd))
12296 ++ rcd = NULL;
12297 + }
12298 + spin_unlock_irqrestore(&dd->uctxt_lock, flags);
12299 +
12300 +diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
12301 +index fedaf8260105..8c79a480f2b7 100644
12302 +--- a/drivers/infiniband/hw/mlx4/cm.c
12303 ++++ b/drivers/infiniband/hw/mlx4/cm.c
12304 +@@ -39,7 +39,7 @@
12305 +
12306 + #include "mlx4_ib.h"
12307 +
12308 +-#define CM_CLEANUP_CACHE_TIMEOUT (5 * HZ)
12309 ++#define CM_CLEANUP_CACHE_TIMEOUT (30 * HZ)
12310 +
12311 + struct id_map_entry {
12312 + struct rb_node node;
12313 +diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
12314 +index 4ee32964e1dd..948eb6e25219 100644
12315 +--- a/drivers/infiniband/hw/mlx5/odp.c
12316 ++++ b/drivers/infiniband/hw/mlx5/odp.c
12317 +@@ -560,7 +560,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
12318 + struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
12319 + bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
12320 + bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
12321 +- u64 access_mask = ODP_READ_ALLOWED_BIT;
12322 ++ u64 access_mask;
12323 + u64 start_idx, page_mask;
12324 + struct ib_umem_odp *odp;
12325 + size_t size;
12326 +@@ -582,6 +582,7 @@ next_mr:
12327 + page_shift = mr->umem->page_shift;
12328 + page_mask = ~(BIT(page_shift) - 1);
12329 + start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift;
12330 ++ access_mask = ODP_READ_ALLOWED_BIT;
12331 +
12332 + if (prefetch && !downgrade && !mr->umem->writable) {
12333 + /* prefetch with write-access must
12334 +diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
12335 +index c6cc3e4ab71d..c45b8359b389 100644
12336 +--- a/drivers/infiniband/sw/rdmavt/qp.c
12337 ++++ b/drivers/infiniband/sw/rdmavt/qp.c
12338 +@@ -2785,6 +2785,18 @@ again:
12339 + }
12340 + EXPORT_SYMBOL(rvt_copy_sge);
12341 +
12342 ++static enum ib_wc_status loopback_qp_drop(struct rvt_ibport *rvp,
12343 ++ struct rvt_qp *sqp)
12344 ++{
12345 ++ rvp->n_pkt_drops++;
12346 ++ /*
12347 ++ * For RC, the requester would timeout and retry so
12348 ++ * shortcut the timeouts and just signal too many retries.
12349 ++ */
12350 ++ return sqp->ibqp.qp_type == IB_QPT_RC ?
12351 ++ IB_WC_RETRY_EXC_ERR : IB_WC_SUCCESS;
12352 ++}
12353 ++
12354 + /**
12355 + * ruc_loopback - handle UC and RC loopback requests
12356 + * @sqp: the sending QP
12357 +@@ -2857,17 +2869,14 @@ again:
12358 + }
12359 + spin_unlock_irqrestore(&sqp->s_lock, flags);
12360 +
12361 +- if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
12362 ++ if (!qp) {
12363 ++ send_status = loopback_qp_drop(rvp, sqp);
12364 ++ goto serr_no_r_lock;
12365 ++ }
12366 ++ spin_lock_irqsave(&qp->r_lock, flags);
12367 ++ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
12368 + qp->ibqp.qp_type != sqp->ibqp.qp_type) {
12369 +- rvp->n_pkt_drops++;
12370 +- /*
12371 +- * For RC, the requester would timeout and retry so
12372 +- * shortcut the timeouts and just signal too many retries.
12373 +- */
12374 +- if (sqp->ibqp.qp_type == IB_QPT_RC)
12375 +- send_status = IB_WC_RETRY_EXC_ERR;
12376 +- else
12377 +- send_status = IB_WC_SUCCESS;
12378 ++ send_status = loopback_qp_drop(rvp, sqp);
12379 + goto serr;
12380 + }
12381 +
12382 +@@ -2893,18 +2902,8 @@ again:
12383 + goto send_comp;
12384 +
12385 + case IB_WR_SEND_WITH_INV:
12386 +- if (!rvt_invalidate_rkey(qp, wqe->wr.ex.invalidate_rkey)) {
12387 +- wc.wc_flags = IB_WC_WITH_INVALIDATE;
12388 +- wc.ex.invalidate_rkey = wqe->wr.ex.invalidate_rkey;
12389 +- }
12390 +- goto send;
12391 +-
12392 + case IB_WR_SEND_WITH_IMM:
12393 +- wc.wc_flags = IB_WC_WITH_IMM;
12394 +- wc.ex.imm_data = wqe->wr.ex.imm_data;
12395 +- /* FALLTHROUGH */
12396 + case IB_WR_SEND:
12397 +-send:
12398 + ret = rvt_get_rwqe(qp, false);
12399 + if (ret < 0)
12400 + goto op_err;
12401 +@@ -2912,6 +2911,22 @@ send:
12402 + goto rnr_nak;
12403 + if (wqe->length > qp->r_len)
12404 + goto inv_err;
12405 ++ switch (wqe->wr.opcode) {
12406 ++ case IB_WR_SEND_WITH_INV:
12407 ++ if (!rvt_invalidate_rkey(qp,
12408 ++ wqe->wr.ex.invalidate_rkey)) {
12409 ++ wc.wc_flags = IB_WC_WITH_INVALIDATE;
12410 ++ wc.ex.invalidate_rkey =
12411 ++ wqe->wr.ex.invalidate_rkey;
12412 ++ }
12413 ++ break;
12414 ++ case IB_WR_SEND_WITH_IMM:
12415 ++ wc.wc_flags = IB_WC_WITH_IMM;
12416 ++ wc.ex.imm_data = wqe->wr.ex.imm_data;
12417 ++ break;
12418 ++ default:
12419 ++ break;
12420 ++ }
12421 + break;
12422 +
12423 + case IB_WR_RDMA_WRITE_WITH_IMM:
12424 +@@ -3041,6 +3056,7 @@ do_write:
12425 + wqe->wr.send_flags & IB_SEND_SOLICITED);
12426 +
12427 + send_comp:
12428 ++ spin_unlock_irqrestore(&qp->r_lock, flags);
12429 + spin_lock_irqsave(&sqp->s_lock, flags);
12430 + rvp->n_loop_pkts++;
12431 + flush_send:
12432 +@@ -3067,6 +3083,7 @@ rnr_nak:
12433 + }
12434 + if (sqp->s_rnr_retry_cnt < 7)
12435 + sqp->s_rnr_retry--;
12436 ++ spin_unlock_irqrestore(&qp->r_lock, flags);
12437 + spin_lock_irqsave(&sqp->s_lock, flags);
12438 + if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
12439 + goto clr_busy;
12440 +@@ -3095,6 +3112,8 @@ err:
12441 + rvt_rc_error(qp, wc.status);
12442 +
12443 + serr:
12444 ++ spin_unlock_irqrestore(&qp->r_lock, flags);
12445 ++serr_no_r_lock:
12446 + spin_lock_irqsave(&sqp->s_lock, flags);
12447 + rvt_send_complete(sqp, wqe, send_status);
12448 + if (sqp->ibqp.qp_type == IB_QPT_RC) {
12449 +diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
12450 +index 23520df7650f..55cd6e0b409c 100644
12451 +--- a/drivers/input/misc/soc_button_array.c
12452 ++++ b/drivers/input/misc/soc_button_array.c
12453 +@@ -373,7 +373,7 @@ static struct soc_button_info soc_button_PNP0C40[] = {
12454 + { "home", 1, EV_KEY, KEY_LEFTMETA, false, true },
12455 + { "volume_up", 2, EV_KEY, KEY_VOLUMEUP, true, false },
12456 + { "volume_down", 3, EV_KEY, KEY_VOLUMEDOWN, true, false },
12457 +- { "rotation_lock", 4, EV_SW, SW_ROTATE_LOCK, false, false },
12458 ++ { "rotation_lock", 4, EV_KEY, KEY_ROTATE_LOCK_TOGGLE, false, false },
12459 + { }
12460 + };
12461 +
12462 +diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
12463 +index 225ae6980182..628ef617bb2f 100644
12464 +--- a/drivers/input/mouse/elan_i2c_core.c
12465 ++++ b/drivers/input/mouse/elan_i2c_core.c
12466 +@@ -1337,6 +1337,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
12467 + { "ELAN0000", 0 },
12468 + { "ELAN0100", 0 },
12469 + { "ELAN0600", 0 },
12470 ++ { "ELAN0601", 0 },
12471 + { "ELAN0602", 0 },
12472 + { "ELAN0605", 0 },
12473 + { "ELAN0608", 0 },
12474 +diff --git a/drivers/input/tablet/wacom_serial4.c b/drivers/input/tablet/wacom_serial4.c
12475 +index 38bfaca48eab..150f9eecaca7 100644
12476 +--- a/drivers/input/tablet/wacom_serial4.c
12477 ++++ b/drivers/input/tablet/wacom_serial4.c
12478 +@@ -187,6 +187,7 @@ enum {
12479 + MODEL_DIGITIZER_II = 0x5544, /* UD */
12480 + MODEL_GRAPHIRE = 0x4554, /* ET */
12481 + MODEL_PENPARTNER = 0x4354, /* CT */
12482 ++ MODEL_ARTPAD_II = 0x4B54, /* KT */
12483 + };
12484 +
12485 + static void wacom_handle_model_response(struct wacom *wacom)
12486 +@@ -245,6 +246,7 @@ static void wacom_handle_model_response(struct wacom *wacom)
12487 + wacom->flags = F_HAS_STYLUS2 | F_HAS_SCROLLWHEEL;
12488 + break;
12489 +
12490 ++ case MODEL_ARTPAD_II:
12491 + case MODEL_DIGITIZER_II:
12492 + wacom->dev->name = "Wacom Digitizer II";
12493 + wacom->dev->id.version = MODEL_DIGITIZER_II;
12494 +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
12495 +index 2a7b78bb98b4..e628ef23418f 100644
12496 +--- a/drivers/iommu/amd_iommu.c
12497 ++++ b/drivers/iommu/amd_iommu.c
12498 +@@ -2605,7 +2605,12 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
12499 +
12500 + /* Everything is mapped - write the right values into s->dma_address */
12501 + for_each_sg(sglist, s, nelems, i) {
12502 +- s->dma_address += address + s->offset;
12503 ++ /*
12504 ++ * Add in the remaining piece of the scatter-gather offset that
12505 ++ * was masked out when we were determining the physical address
12506 ++ * via (sg_phys(s) & PAGE_MASK) earlier.
12507 ++ */
12508 ++ s->dma_address += address + (s->offset & ~PAGE_MASK);
12509 + s->dma_length = s->length;
12510 + }
12511 +
12512 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
12513 +index 78188bf7e90d..dbd6824dfffa 100644
12514 +--- a/drivers/iommu/intel-iommu.c
12515 ++++ b/drivers/iommu/intel-iommu.c
12516 +@@ -2485,7 +2485,8 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
12517 + if (dev && dev_is_pci(dev)) {
12518 + struct pci_dev *pdev = to_pci_dev(info->dev);
12519 +
12520 +- if (!pci_ats_disabled() &&
12521 ++ if (!pdev->untrusted &&
12522 ++ !pci_ats_disabled() &&
12523 + ecap_dev_iotlb_support(iommu->ecap) &&
12524 + pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
12525 + dmar_find_matched_atsr_unit(pdev))
12526 +diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
12527 +index cec29bf45c9b..18a8330e1882 100644
12528 +--- a/drivers/iommu/io-pgtable-arm-v7s.c
12529 ++++ b/drivers/iommu/io-pgtable-arm-v7s.c
12530 +@@ -161,6 +161,14 @@
12531 +
12532 + #define ARM_V7S_TCR_PD1 BIT(5)
12533 +
12534 ++#ifdef CONFIG_ZONE_DMA32
12535 ++#define ARM_V7S_TABLE_GFP_DMA GFP_DMA32
12536 ++#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32
12537 ++#else
12538 ++#define ARM_V7S_TABLE_GFP_DMA GFP_DMA
12539 ++#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA
12540 ++#endif
12541 ++
12542 + typedef u32 arm_v7s_iopte;
12543 +
12544 + static bool selftest_running;
12545 +@@ -198,13 +206,16 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
12546 + void *table = NULL;
12547 +
12548 + if (lvl == 1)
12549 +- table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size));
12550 ++ table = (void *)__get_free_pages(
12551 ++ __GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size));
12552 + else if (lvl == 2)
12553 +- table = kmem_cache_zalloc(data->l2_tables, gfp | GFP_DMA);
12554 ++ table = kmem_cache_zalloc(data->l2_tables, gfp);
12555 + phys = virt_to_phys(table);
12556 +- if (phys != (arm_v7s_iopte)phys)
12557 ++ if (phys != (arm_v7s_iopte)phys) {
12558 + /* Doesn't fit in PTE */
12559 ++ dev_err(dev, "Page table does not fit in PTE: %pa", &phys);
12560 + goto out_free;
12561 ++ }
12562 + if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
12563 + dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
12564 + if (dma_mapping_error(dev, dma))
12565 +@@ -217,7 +228,8 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
12566 + if (dma != phys)
12567 + goto out_unmap;
12568 + }
12569 +- kmemleak_ignore(table);
12570 ++ if (lvl == 2)
12571 ++ kmemleak_ignore(table);
12572 + return table;
12573 +
12574 + out_unmap:
12575 +@@ -733,7 +745,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
12576 + data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2",
12577 + ARM_V7S_TABLE_SIZE(2),
12578 + ARM_V7S_TABLE_SIZE(2),
12579 +- SLAB_CACHE_DMA, NULL);
12580 ++ ARM_V7S_TABLE_SLAB_FLAGS, NULL);
12581 + if (!data->l2_tables)
12582 + goto out_free_data;
12583 +
12584 +diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
12585 +index f8d3ba247523..2de8122e218f 100644
12586 +--- a/drivers/iommu/iova.c
12587 ++++ b/drivers/iommu/iova.c
12588 +@@ -207,8 +207,10 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
12589 + curr_iova = rb_entry(curr, struct iova, node);
12590 + } while (curr && new_pfn <= curr_iova->pfn_hi);
12591 +
12592 +- if (limit_pfn < size || new_pfn < iovad->start_pfn)
12593 ++ if (limit_pfn < size || new_pfn < iovad->start_pfn) {
12594 ++ iovad->max32_alloc_size = size;
12595 + goto iova32_full;
12596 ++ }
12597 +
12598 + /* pfn_lo will point to size aligned address if size_aligned is set */
12599 + new->pfn_lo = new_pfn;
12600 +@@ -222,7 +224,6 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
12601 + return 0;
12602 +
12603 + iova32_full:
12604 +- iovad->max32_alloc_size = size;
12605 + spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
12606 + return -ENOMEM;
12607 + }
12608 +diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
12609 +index 0e65f609352e..83364fedbf0a 100644
12610 +--- a/drivers/irqchip/irq-brcmstb-l2.c
12611 ++++ b/drivers/irqchip/irq-brcmstb-l2.c
12612 +@@ -129,8 +129,9 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d)
12613 + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
12614 + struct irq_chip_type *ct = irq_data_get_chip_type(d);
12615 + struct brcmstb_l2_intc_data *b = gc->private;
12616 ++ unsigned long flags;
12617 +
12618 +- irq_gc_lock(gc);
12619 ++ irq_gc_lock_irqsave(gc, flags);
12620 + /* Save the current mask */
12621 + b->saved_mask = irq_reg_readl(gc, ct->regs.mask);
12622 +
12623 +@@ -139,7 +140,7 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d)
12624 + irq_reg_writel(gc, ~gc->wake_active, ct->regs.disable);
12625 + irq_reg_writel(gc, gc->wake_active, ct->regs.enable);
12626 + }
12627 +- irq_gc_unlock(gc);
12628 ++ irq_gc_unlock_irqrestore(gc, flags);
12629 + }
12630 +
12631 + static void brcmstb_l2_intc_resume(struct irq_data *d)
12632 +@@ -147,8 +148,9 @@ static void brcmstb_l2_intc_resume(struct irq_data *d)
12633 + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
12634 + struct irq_chip_type *ct = irq_data_get_chip_type(d);
12635 + struct brcmstb_l2_intc_data *b = gc->private;
12636 ++ unsigned long flags;
12637 +
12638 +- irq_gc_lock(gc);
12639 ++ irq_gc_lock_irqsave(gc, flags);
12640 + if (ct->chip.irq_ack) {
12641 + /* Clear unmasked non-wakeup interrupts */
12642 + irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active,
12643 +@@ -158,7 +160,7 @@ static void brcmstb_l2_intc_resume(struct irq_data *d)
12644 + /* Restore the saved mask */
12645 + irq_reg_writel(gc, b->saved_mask, ct->regs.disable);
12646 + irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable);
12647 +- irq_gc_unlock(gc);
12648 ++ irq_gc_unlock_irqrestore(gc, flags);
12649 + }
12650 +
12651 + static int __init brcmstb_l2_intc_of_init(struct device_node *np,
12652 +diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
12653 +index c3aba3fc818d..93e32a59640c 100644
12654 +--- a/drivers/irqchip/irq-gic-v3-its.c
12655 ++++ b/drivers/irqchip/irq-gic-v3-its.c
12656 +@@ -1482,7 +1482,7 @@ static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b)
12657 + ra = container_of(a, struct lpi_range, entry);
12658 + rb = container_of(b, struct lpi_range, entry);
12659 +
12660 +- return rb->base_id - ra->base_id;
12661 ++ return ra->base_id - rb->base_id;
12662 + }
12663 +
12664 + static void merge_lpi_ranges(void)
12665 +@@ -1955,6 +1955,8 @@ static int its_alloc_tables(struct its_node *its)
12666 + indirect = its_parse_indirect_baser(its, baser,
12667 + psz, &order,
12668 + its->device_ids);
12669 ++ break;
12670 ++
12671 + case GITS_BASER_TYPE_VCPU:
12672 + indirect = its_parse_indirect_baser(its, baser,
12673 + psz, &order,
12674 +diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
12675 +index 4d85645c87f7..0928fd1f0e0c 100644
12676 +--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
12677 ++++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
12678 +@@ -4365,7 +4365,8 @@ setup_pci(struct hfc_multi *hc, struct pci_dev *pdev,
12679 + if (m->clock2)
12680 + test_and_set_bit(HFC_CHIP_CLOCK2, &hc->chip);
12681 +
12682 +- if (ent->device == 0xB410) {
12683 ++ if (ent->vendor == PCI_VENDOR_ID_DIGIUM &&
12684 ++ ent->device == PCI_DEVICE_ID_DIGIUM_HFC4S) {
12685 + test_and_set_bit(HFC_CHIP_B410P, &hc->chip);
12686 + test_and_set_bit(HFC_CHIP_PCM_MASTER, &hc->chip);
12687 + test_and_clear_bit(HFC_CHIP_PCM_SLAVE, &hc->chip);
12688 +diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c
12689 +index 3d79a6380761..723f2f17497a 100644
12690 +--- a/drivers/leds/leds-lp55xx-common.c
12691 ++++ b/drivers/leds/leds-lp55xx-common.c
12692 +@@ -201,7 +201,7 @@ static void lp55xx_firmware_loaded(const struct firmware *fw, void *context)
12693 +
12694 + if (!fw) {
12695 + dev_err(dev, "firmware request failed\n");
12696 +- goto out;
12697 ++ return;
12698 + }
12699 +
12700 + /* handling firmware data is chip dependent */
12701 +@@ -214,9 +214,9 @@ static void lp55xx_firmware_loaded(const struct firmware *fw, void *context)
12702 +
12703 + mutex_unlock(&chip->lock);
12704 +
12705 +-out:
12706 + /* firmware should be released for other channel use */
12707 + release_firmware(chip->fw);
12708 ++ chip->fw = NULL;
12709 + }
12710 +
12711 + static int lp55xx_request_firmware(struct lp55xx_chip *chip)
12712 +diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
12713 +index 956004366699..886710043025 100644
12714 +--- a/drivers/md/bcache/extents.c
12715 ++++ b/drivers/md/bcache/extents.c
12716 +@@ -538,6 +538,7 @@ static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
12717 + {
12718 + struct btree *b = container_of(bk, struct btree, keys);
12719 + unsigned int i, stale;
12720 ++ char buf[80];
12721 +
12722 + if (!KEY_PTRS(k) ||
12723 + bch_extent_invalid(bk, k))
12724 +@@ -547,19 +548,19 @@ static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
12725 + if (!ptr_available(b->c, k, i))
12726 + return true;
12727 +
12728 +- if (!expensive_debug_checks(b->c) && KEY_DIRTY(k))
12729 +- return false;
12730 +-
12731 + for (i = 0; i < KEY_PTRS(k); i++) {
12732 + stale = ptr_stale(b->c, k, i);
12733 +
12734 ++ if (stale && KEY_DIRTY(k)) {
12735 ++ bch_extent_to_text(buf, sizeof(buf), k);
12736 ++ pr_info("stale dirty pointer, stale %u, key: %s",
12737 ++ stale, buf);
12738 ++ }
12739 ++
12740 + btree_bug_on(stale > BUCKET_GC_GEN_MAX, b,
12741 + "key too stale: %i, need_gc %u",
12742 + stale, b->c->need_gc);
12743 +
12744 +- btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
12745 +- b, "stale dirty pointer");
12746 +-
12747 + if (stale)
12748 + return true;
12749 +
12750 +diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
12751 +index 15070412a32e..f101bfe8657a 100644
12752 +--- a/drivers/md/bcache/request.c
12753 ++++ b/drivers/md/bcache/request.c
12754 +@@ -392,10 +392,11 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
12755 +
12756 + /*
12757 + * Flag for bypass if the IO is for read-ahead or background,
12758 +- * unless the read-ahead request is for metadata (eg, for gfs2).
12759 ++ * unless the read-ahead request is for metadata
12760 ++ * (eg, for gfs2 or xfs).
12761 + */
12762 + if (bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND) &&
12763 +- !(bio->bi_opf & REQ_PRIO))
12764 ++ !(bio->bi_opf & (REQ_META|REQ_PRIO)))
12765 + goto skip;
12766 +
12767 + if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
12768 +@@ -877,7 +878,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
12769 + }
12770 +
12771 + if (!(bio->bi_opf & REQ_RAHEAD) &&
12772 +- !(bio->bi_opf & REQ_PRIO) &&
12773 ++ !(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
12774 + s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
12775 + reada = min_t(sector_t, dc->readahead >> 9,
12776 + get_capacity(bio->bi_disk) - bio_end_sector(bio));
12777 +diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
12778 +index 557a8a3270a1..e5daf91310f6 100644
12779 +--- a/drivers/md/bcache/sysfs.c
12780 ++++ b/drivers/md/bcache/sysfs.c
12781 +@@ -287,8 +287,12 @@ STORE(__cached_dev)
12782 + sysfs_strtoul_clamp(writeback_rate_update_seconds,
12783 + dc->writeback_rate_update_seconds,
12784 + 1, WRITEBACK_RATE_UPDATE_SECS_MAX);
12785 +- d_strtoul(writeback_rate_i_term_inverse);
12786 +- d_strtoul_nonzero(writeback_rate_p_term_inverse);
12787 ++ sysfs_strtoul_clamp(writeback_rate_i_term_inverse,
12788 ++ dc->writeback_rate_i_term_inverse,
12789 ++ 1, UINT_MAX);
12790 ++ sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
12791 ++ dc->writeback_rate_p_term_inverse,
12792 ++ 1, UINT_MAX);
12793 + d_strtoul_nonzero(writeback_rate_minimum);
12794 +
12795 + sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);
12796 +@@ -299,7 +303,9 @@ STORE(__cached_dev)
12797 + dc->io_disable = v ? 1 : 0;
12798 + }
12799 +
12800 +- d_strtoi_h(sequential_cutoff);
12801 ++ sysfs_strtoul_clamp(sequential_cutoff,
12802 ++ dc->sequential_cutoff,
12803 ++ 0, UINT_MAX);
12804 + d_strtoi_h(readahead);
12805 +
12806 + if (attr == &sysfs_clear_stats)
12807 +@@ -778,8 +784,17 @@ STORE(__bch_cache_set)
12808 + c->error_limit = strtoul_or_return(buf);
12809 +
12810 + /* See count_io_errors() for why 88 */
12811 +- if (attr == &sysfs_io_error_halflife)
12812 +- c->error_decay = strtoul_or_return(buf) / 88;
12813 ++ if (attr == &sysfs_io_error_halflife) {
12814 ++ unsigned long v = 0;
12815 ++ ssize_t ret;
12816 ++
12817 ++ ret = strtoul_safe_clamp(buf, v, 0, UINT_MAX);
12818 ++ if (!ret) {
12819 ++ c->error_decay = v / 88;
12820 ++ return size;
12821 ++ }
12822 ++ return ret;
12823 ++ }
12824 +
12825 + if (attr == &sysfs_io_disable) {
12826 + v = strtoul_or_return(buf);
12827 +diff --git a/drivers/md/bcache/sysfs.h b/drivers/md/bcache/sysfs.h
12828 +index 3fe82425859c..0ad2715a884e 100644
12829 +--- a/drivers/md/bcache/sysfs.h
12830 ++++ b/drivers/md/bcache/sysfs.h
12831 +@@ -81,9 +81,16 @@ do { \
12832 +
12833 + #define sysfs_strtoul_clamp(file, var, min, max) \
12834 + do { \
12835 +- if (attr == &sysfs_ ## file) \
12836 +- return strtoul_safe_clamp(buf, var, min, max) \
12837 +- ?: (ssize_t) size; \
12838 ++ if (attr == &sysfs_ ## file) { \
12839 ++ unsigned long v = 0; \
12840 ++ ssize_t ret; \
12841 ++ ret = strtoul_safe_clamp(buf, v, min, max); \
12842 ++ if (!ret) { \
12843 ++ var = v; \
12844 ++ return size; \
12845 ++ } \
12846 ++ return ret; \
12847 ++ } \
12848 + } while (0)
12849 +
12850 + #define strtoul_or_return(cp) \
12851 +diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
12852 +index 6a743d3bb338..4e4c6810dc3c 100644
12853 +--- a/drivers/md/bcache/writeback.h
12854 ++++ b/drivers/md/bcache/writeback.h
12855 +@@ -71,6 +71,9 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
12856 + in_use > bch_cutoff_writeback_sync)
12857 + return false;
12858 +
12859 ++ if (bio_op(bio) == REQ_OP_DISCARD)
12860 ++ return false;
12861 ++
12862 + if (dc->partial_stripes_expensive &&
12863 + bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
12864 + bio_sectors(bio)))
12865 +diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
12866 +index 95c6d86ab5e8..c4ef1fceead6 100644
12867 +--- a/drivers/md/dm-core.h
12868 ++++ b/drivers/md/dm-core.h
12869 +@@ -115,6 +115,7 @@ struct mapped_device {
12870 + struct srcu_struct io_barrier;
12871 + };
12872 +
12873 ++void disable_discard(struct mapped_device *md);
12874 + void disable_write_same(struct mapped_device *md);
12875 + void disable_write_zeroes(struct mapped_device *md);
12876 +
12877 +diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
12878 +index 457200ca6287..f535fd8ac82d 100644
12879 +--- a/drivers/md/dm-integrity.c
12880 ++++ b/drivers/md/dm-integrity.c
12881 +@@ -913,7 +913,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsig
12882 + static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
12883 + {
12884 + return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
12885 +- range2->logical_sector + range2->n_sectors > range2->logical_sector;
12886 ++ range1->logical_sector + range1->n_sectors > range2->logical_sector;
12887 + }
12888 +
12889 + static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
12890 +@@ -959,8 +959,6 @@ static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity
12891 + struct dm_integrity_range *last_range =
12892 + list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
12893 + struct task_struct *last_range_task;
12894 +- if (!ranges_overlap(range, last_range))
12895 +- break;
12896 + last_range_task = last_range->task;
12897 + list_del(&last_range->wait_entry);
12898 + if (!add_new_range(ic, last_range, false)) {
12899 +@@ -1368,8 +1366,8 @@ again:
12900 + checksums_ptr - checksums, !dio->write ? TAG_CMP : TAG_WRITE);
12901 + if (unlikely(r)) {
12902 + if (r > 0) {
12903 +- DMERR("Checksum failed at sector 0x%llx",
12904 +- (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size)));
12905 ++ DMERR_LIMIT("Checksum failed at sector 0x%llx",
12906 ++ (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size)));
12907 + r = -EILSEQ;
12908 + atomic64_inc(&ic->number_of_mismatches);
12909 + }
12910 +@@ -1561,8 +1559,8 @@ retry_kmap:
12911 +
12912 + integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
12913 + if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
12914 +- DMERR("Checksum failed when reading from journal, at sector 0x%llx",
12915 +- (unsigned long long)logical_sector);
12916 ++ DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
12917 ++ (unsigned long long)logical_sector);
12918 + }
12919 + }
12920 + #endif
12921 +@@ -3185,7 +3183,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
12922 + journal_watermark = val;
12923 + else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
12924 + sync_msec = val;
12925 +- else if (!memcmp(opt_string, "meta_device:", strlen("meta_device:"))) {
12926 ++ else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
12927 + if (ic->meta_dev) {
12928 + dm_put_device(ti, ic->meta_dev);
12929 + ic->meta_dev = NULL;
12930 +@@ -3204,17 +3202,17 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
12931 + goto bad;
12932 + }
12933 + ic->sectors_per_block = val >> SECTOR_SHIFT;
12934 +- } else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
12935 ++ } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
12936 + r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
12937 + "Invalid internal_hash argument");
12938 + if (r)
12939 + goto bad;
12940 +- } else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
12941 ++ } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
12942 + r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
12943 + "Invalid journal_crypt argument");
12944 + if (r)
12945 + goto bad;
12946 +- } else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
12947 ++ } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
12948 + r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
12949 + "Invalid journal_mac argument");
12950 + if (r)
12951 +diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
12952 +index a20531e5f3b4..582265e043a6 100644
12953 +--- a/drivers/md/dm-rq.c
12954 ++++ b/drivers/md/dm-rq.c
12955 +@@ -206,11 +206,14 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
12956 + }
12957 +
12958 + if (unlikely(error == BLK_STS_TARGET)) {
12959 +- if (req_op(clone) == REQ_OP_WRITE_SAME &&
12960 +- !clone->q->limits.max_write_same_sectors)
12961 ++ if (req_op(clone) == REQ_OP_DISCARD &&
12962 ++ !clone->q->limits.max_discard_sectors)
12963 ++ disable_discard(tio->md);
12964 ++ else if (req_op(clone) == REQ_OP_WRITE_SAME &&
12965 ++ !clone->q->limits.max_write_same_sectors)
12966 + disable_write_same(tio->md);
12967 +- if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
12968 +- !clone->q->limits.max_write_zeroes_sectors)
12969 ++ else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
12970 ++ !clone->q->limits.max_write_zeroes_sectors)
12971 + disable_write_zeroes(tio->md);
12972 + }
12973 +
12974 +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
12975 +index 4b1be754cc41..eb257e4dcb1c 100644
12976 +--- a/drivers/md/dm-table.c
12977 ++++ b/drivers/md/dm-table.c
12978 +@@ -1852,6 +1852,36 @@ static bool dm_table_supports_secure_erase(struct dm_table *t)
12979 + return true;
12980 + }
12981 +
12982 ++static int device_requires_stable_pages(struct dm_target *ti,
12983 ++ struct dm_dev *dev, sector_t start,
12984 ++ sector_t len, void *data)
12985 ++{
12986 ++ struct request_queue *q = bdev_get_queue(dev->bdev);
12987 ++
12988 ++ return q && bdi_cap_stable_pages_required(q->backing_dev_info);
12989 ++}
12990 ++
12991 ++/*
12992 ++ * If any underlying device requires stable pages, a table must require
12993 ++ * them as well. Only targets that support iterate_devices are considered:
12994 ++ * don't want error, zero, etc to require stable pages.
12995 ++ */
12996 ++static bool dm_table_requires_stable_pages(struct dm_table *t)
12997 ++{
12998 ++ struct dm_target *ti;
12999 ++ unsigned i;
13000 ++
13001 ++ for (i = 0; i < dm_table_get_num_targets(t); i++) {
13002 ++ ti = dm_table_get_target(t, i);
13003 ++
13004 ++ if (ti->type->iterate_devices &&
13005 ++ ti->type->iterate_devices(ti, device_requires_stable_pages, NULL))
13006 ++ return true;
13007 ++ }
13008 ++
13009 ++ return false;
13010 ++}
13011 ++
13012 + void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
13013 + struct queue_limits *limits)
13014 + {
13015 +@@ -1909,6 +1939,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
13016 +
13017 + dm_table_verify_integrity(t);
13018 +
13019 ++ /*
13020 ++ * Some devices don't use blk_integrity but still want stable pages
13021 ++ * because they do their own checksumming.
13022 ++ */
13023 ++ if (dm_table_requires_stable_pages(t))
13024 ++ q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
13025 ++ else
13026 ++ q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
13027 ++
13028 + /*
13029 + * Determine whether or not this queue's I/O timings contribute
13030 + * to the entropy pool, Only request-based targets use this.
13031 +diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
13032 +index e83b63608262..254c26eb963a 100644
13033 +--- a/drivers/md/dm-thin.c
13034 ++++ b/drivers/md/dm-thin.c
13035 +@@ -3283,6 +3283,13 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
13036 + as.argc = argc;
13037 + as.argv = argv;
13038 +
13039 ++ /* make sure metadata and data are different devices */
13040 ++ if (!strcmp(argv[0], argv[1])) {
13041 ++ ti->error = "Error setting metadata or data device";
13042 ++ r = -EINVAL;
13043 ++ goto out_unlock;
13044 ++ }
13045 ++
13046 + /*
13047 + * Set default pool features.
13048 + */
13049 +@@ -4167,6 +4174,12 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
13050 + tc->sort_bio_list = RB_ROOT;
13051 +
13052 + if (argc == 3) {
13053 ++ if (!strcmp(argv[0], argv[2])) {
13054 ++ ti->error = "Error setting origin device";
13055 ++ r = -EINVAL;
13056 ++ goto bad_origin_dev;
13057 ++ }
13058 ++
13059 + r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
13060 + if (r) {
13061 + ti->error = "Error opening origin device";
13062 +diff --git a/drivers/md/dm.c b/drivers/md/dm.c
13063 +index 515e6af9bed2..4986eea520b6 100644
13064 +--- a/drivers/md/dm.c
13065 ++++ b/drivers/md/dm.c
13066 +@@ -963,6 +963,15 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
13067 + }
13068 + }
13069 +
13070 ++void disable_discard(struct mapped_device *md)
13071 ++{
13072 ++ struct queue_limits *limits = dm_get_queue_limits(md);
13073 ++
13074 ++ /* device doesn't really support DISCARD, disable it */
13075 ++ limits->max_discard_sectors = 0;
13076 ++ blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
13077 ++}
13078 ++
13079 + void disable_write_same(struct mapped_device *md)
13080 + {
13081 + struct queue_limits *limits = dm_get_queue_limits(md);
13082 +@@ -988,11 +997,14 @@ static void clone_endio(struct bio *bio)
13083 + dm_endio_fn endio = tio->ti->type->end_io;
13084 +
13085 + if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
13086 +- if (bio_op(bio) == REQ_OP_WRITE_SAME &&
13087 +- !bio->bi_disk->queue->limits.max_write_same_sectors)
13088 ++ if (bio_op(bio) == REQ_OP_DISCARD &&
13089 ++ !bio->bi_disk->queue->limits.max_discard_sectors)
13090 ++ disable_discard(md);
13091 ++ else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
13092 ++ !bio->bi_disk->queue->limits.max_write_same_sectors)
13093 + disable_write_same(md);
13094 +- if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
13095 +- !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
13096 ++ else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
13097 ++ !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
13098 + disable_write_zeroes(md);
13099 + }
13100 +
13101 +@@ -1060,15 +1072,7 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
13102 + return -EINVAL;
13103 + }
13104 +
13105 +- /*
13106 +- * BIO based queue uses its own splitting. When multipage bvecs
13107 +- * is switched on, size of the incoming bio may be too big to
13108 +- * be handled in some targets, such as crypt.
13109 +- *
13110 +- * When these targets are ready for the big bio, we can remove
13111 +- * the limit.
13112 +- */
13113 +- ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE);
13114 ++ ti->max_io_len = (uint32_t) len;
13115 +
13116 + return 0;
13117 + }
13118 +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
13119 +index abb5d382f64d..3b6880dd648d 100644
13120 +--- a/drivers/md/raid10.c
13121 ++++ b/drivers/md/raid10.c
13122 +@@ -3939,6 +3939,8 @@ static int raid10_run(struct mddev *mddev)
13123 + set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
13124 + mddev->sync_thread = md_register_thread(md_do_sync, mddev,
13125 + "reshape");
13126 ++ if (!mddev->sync_thread)
13127 ++ goto out_free_conf;
13128 + }
13129 +
13130 + return 0;
13131 +@@ -4670,7 +4672,6 @@ read_more:
13132 + atomic_inc(&r10_bio->remaining);
13133 + read_bio->bi_next = NULL;
13134 + generic_make_request(read_bio);
13135 +- sector_nr += nr_sectors;
13136 + sectors_done += nr_sectors;
13137 + if (sector_nr <= last)
13138 + goto read_more;
13139 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
13140 +index cecea901ab8c..5b68f2d0da60 100644
13141 +--- a/drivers/md/raid5.c
13142 ++++ b/drivers/md/raid5.c
13143 +@@ -7402,6 +7402,8 @@ static int raid5_run(struct mddev *mddev)
13144 + set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
13145 + mddev->sync_thread = md_register_thread(md_do_sync, mddev,
13146 + "reshape");
13147 ++ if (!mddev->sync_thread)
13148 ++ goto abort;
13149 + }
13150 +
13151 + /* Ok, everything is just fine now */
13152 +diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c
13153 +index 96807e134886..8abb1a510a81 100644
13154 +--- a/drivers/media/dvb-frontends/lgdt330x.c
13155 ++++ b/drivers/media/dvb-frontends/lgdt330x.c
13156 +@@ -783,7 +783,7 @@ static int lgdt3303_read_status(struct dvb_frontend *fe,
13157 +
13158 + if ((buf[0] & 0x02) == 0x00)
13159 + *status |= FE_HAS_SYNC;
13160 +- if ((buf[0] & 0xfd) == 0x01)
13161 ++ if ((buf[0] & 0x01) == 0x01)
13162 + *status |= FE_HAS_VITERBI | FE_HAS_LOCK;
13163 + break;
13164 + default:
13165 +diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
13166 +index b168bf3635b6..8b0b8b5aa531 100644
13167 +--- a/drivers/media/i2c/cx25840/cx25840-core.c
13168 ++++ b/drivers/media/i2c/cx25840/cx25840-core.c
13169 +@@ -5216,8 +5216,9 @@ static int cx25840_probe(struct i2c_client *client,
13170 + * those extra inputs. So, let's add it only when needed.
13171 + */
13172 + state->pads[CX25840_PAD_INPUT].flags = MEDIA_PAD_FL_SINK;
13173 ++ state->pads[CX25840_PAD_INPUT].sig_type = PAD_SIGNAL_ANALOG;
13174 + state->pads[CX25840_PAD_VID_OUT].flags = MEDIA_PAD_FL_SOURCE;
13175 +- state->pads[CX25840_PAD_VBI_OUT].flags = MEDIA_PAD_FL_SOURCE;
13176 ++ state->pads[CX25840_PAD_VID_OUT].sig_type = PAD_SIGNAL_DV;
13177 + sd->entity.function = MEDIA_ENT_F_ATV_DECODER;
13178 +
13179 + ret = media_entity_pads_init(&sd->entity, ARRAY_SIZE(state->pads),
13180 +diff --git a/drivers/media/i2c/cx25840/cx25840-core.h b/drivers/media/i2c/cx25840/cx25840-core.h
13181 +index c323b1af1f83..9efefa15d090 100644
13182 +--- a/drivers/media/i2c/cx25840/cx25840-core.h
13183 ++++ b/drivers/media/i2c/cx25840/cx25840-core.h
13184 +@@ -40,7 +40,6 @@ enum cx25840_model {
13185 + enum cx25840_media_pads {
13186 + CX25840_PAD_INPUT,
13187 + CX25840_PAD_VID_OUT,
13188 +- CX25840_PAD_VBI_OUT,
13189 +
13190 + CX25840_NUM_PADS
13191 + };
13192 +diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
13193 +index d639b9bcf64a..7a759b4b88cf 100644
13194 +--- a/drivers/media/i2c/mt9m111.c
13195 ++++ b/drivers/media/i2c/mt9m111.c
13196 +@@ -1273,6 +1273,8 @@ static int mt9m111_probe(struct i2c_client *client,
13197 + mt9m111->rect.top = MT9M111_MIN_DARK_ROWS;
13198 + mt9m111->rect.width = MT9M111_MAX_WIDTH;
13199 + mt9m111->rect.height = MT9M111_MAX_HEIGHT;
13200 ++ mt9m111->width = mt9m111->rect.width;
13201 ++ mt9m111->height = mt9m111->rect.height;
13202 + mt9m111->fmt = &mt9m111_colour_fmts[0];
13203 + mt9m111->lastpage = -1;
13204 + mutex_init(&mt9m111->power_lock);
13205 +diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
13206 +index bef3f3aae0ed..9f8fc1ad9b1a 100644
13207 +--- a/drivers/media/i2c/ov5640.c
13208 ++++ b/drivers/media/i2c/ov5640.c
13209 +@@ -1893,7 +1893,7 @@ static void ov5640_reset(struct ov5640_dev *sensor)
13210 + usleep_range(1000, 2000);
13211 +
13212 + gpiod_set_value_cansleep(sensor->reset_gpio, 0);
13213 +- usleep_range(5000, 10000);
13214 ++ usleep_range(20000, 25000);
13215 + }
13216 +
13217 + static int ov5640_set_power_on(struct ov5640_dev *sensor)
13218 +diff --git a/drivers/media/i2c/ov7740.c b/drivers/media/i2c/ov7740.c
13219 +index 177688afd9a6..8835b831cdc0 100644
13220 +--- a/drivers/media/i2c/ov7740.c
13221 ++++ b/drivers/media/i2c/ov7740.c
13222 +@@ -1101,6 +1101,9 @@ static int ov7740_probe(struct i2c_client *client,
13223 + if (ret)
13224 + return ret;
13225 +
13226 ++ pm_runtime_set_active(&client->dev);
13227 ++ pm_runtime_enable(&client->dev);
13228 ++
13229 + ret = ov7740_detect(ov7740);
13230 + if (ret)
13231 + goto error_detect;
13232 +@@ -1123,8 +1126,6 @@ static int ov7740_probe(struct i2c_client *client,
13233 + if (ret)
13234 + goto error_async_register;
13235 +
13236 +- pm_runtime_set_active(&client->dev);
13237 +- pm_runtime_enable(&client->dev);
13238 + pm_runtime_idle(&client->dev);
13239 +
13240 + return 0;
13241 +@@ -1134,6 +1135,8 @@ error_async_register:
13242 + error_init_controls:
13243 + ov7740_free_controls(ov7740);
13244 + error_detect:
13245 ++ pm_runtime_disable(&client->dev);
13246 ++ pm_runtime_set_suspended(&client->dev);
13247 + ov7740_set_power(ov7740, 0);
13248 + media_entity_cleanup(&ov7740->subdev.entity);
13249 +
13250 +diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
13251 +index 2a5d5002c27e..f761e4d8bf2a 100644
13252 +--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
13253 ++++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
13254 +@@ -702,7 +702,7 @@ end:
13255 + v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, to_vb2_v4l2_buffer(vb));
13256 + }
13257 +
13258 +-static void *mtk_jpeg_buf_remove(struct mtk_jpeg_ctx *ctx,
13259 ++static struct vb2_v4l2_buffer *mtk_jpeg_buf_remove(struct mtk_jpeg_ctx *ctx,
13260 + enum v4l2_buf_type type)
13261 + {
13262 + if (V4L2_TYPE_IS_OUTPUT(type))
13263 +@@ -714,7 +714,7 @@ static void *mtk_jpeg_buf_remove(struct mtk_jpeg_ctx *ctx,
13264 + static int mtk_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
13265 + {
13266 + struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q);
13267 +- struct vb2_buffer *vb;
13268 ++ struct vb2_v4l2_buffer *vb;
13269 + int ret = 0;
13270 +
13271 + ret = pm_runtime_get_sync(ctx->jpeg->dev);
13272 +@@ -724,14 +724,14 @@ static int mtk_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
13273 + return 0;
13274 + err:
13275 + while ((vb = mtk_jpeg_buf_remove(ctx, q->type)))
13276 +- v4l2_m2m_buf_done(to_vb2_v4l2_buffer(vb), VB2_BUF_STATE_QUEUED);
13277 ++ v4l2_m2m_buf_done(vb, VB2_BUF_STATE_QUEUED);
13278 + return ret;
13279 + }
13280 +
13281 + static void mtk_jpeg_stop_streaming(struct vb2_queue *q)
13282 + {
13283 + struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q);
13284 +- struct vb2_buffer *vb;
13285 ++ struct vb2_v4l2_buffer *vb;
13286 +
13287 + /*
13288 + * STREAMOFF is an acknowledgment for source change event.
13289 +@@ -743,7 +743,7 @@ static void mtk_jpeg_stop_streaming(struct vb2_queue *q)
13290 + struct mtk_jpeg_src_buf *src_buf;
13291 +
13292 + vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
13293 +- src_buf = mtk_jpeg_vb2_to_srcbuf(vb);
13294 ++ src_buf = mtk_jpeg_vb2_to_srcbuf(&vb->vb2_buf);
13295 + mtk_jpeg_set_queue_data(ctx, &src_buf->dec_param);
13296 + ctx->state = MTK_JPEG_RUNNING;
13297 + } else if (V4L2_TYPE_IS_OUTPUT(q->type)) {
13298 +@@ -751,7 +751,7 @@ static void mtk_jpeg_stop_streaming(struct vb2_queue *q)
13299 + }
13300 +
13301 + while ((vb = mtk_jpeg_buf_remove(ctx, q->type)))
13302 +- v4l2_m2m_buf_done(to_vb2_v4l2_buffer(vb), VB2_BUF_STATE_ERROR);
13303 ++ v4l2_m2m_buf_done(vb, VB2_BUF_STATE_ERROR);
13304 +
13305 + pm_runtime_put_sync(ctx->jpeg->dev);
13306 + }
13307 +@@ -807,7 +807,7 @@ static void mtk_jpeg_device_run(void *priv)
13308 + {
13309 + struct mtk_jpeg_ctx *ctx = priv;
13310 + struct mtk_jpeg_dev *jpeg = ctx->jpeg;
13311 +- struct vb2_buffer *src_buf, *dst_buf;
13312 ++ struct vb2_v4l2_buffer *src_buf, *dst_buf;
13313 + enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR;
13314 + unsigned long flags;
13315 + struct mtk_jpeg_src_buf *jpeg_src_buf;
13316 +@@ -817,11 +817,11 @@ static void mtk_jpeg_device_run(void *priv)
13317 +
13318 + src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
13319 + dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
13320 +- jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(src_buf);
13321 ++ jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(&src_buf->vb2_buf);
13322 +
13323 + if (jpeg_src_buf->flags & MTK_JPEG_BUF_FLAGS_LAST_FRAME) {
13324 +- for (i = 0; i < dst_buf->num_planes; i++)
13325 +- vb2_set_plane_payload(dst_buf, i, 0);
13326 ++ for (i = 0; i < dst_buf->vb2_buf.num_planes; i++)
13327 ++ vb2_set_plane_payload(&dst_buf->vb2_buf, i, 0);
13328 + buf_state = VB2_BUF_STATE_DONE;
13329 + goto dec_end;
13330 + }
13331 +@@ -833,8 +833,8 @@ static void mtk_jpeg_device_run(void *priv)
13332 + return;
13333 + }
13334 +
13335 +- mtk_jpeg_set_dec_src(ctx, src_buf, &bs);
13336 +- if (mtk_jpeg_set_dec_dst(ctx, &jpeg_src_buf->dec_param, dst_buf, &fb))
13337 ++ mtk_jpeg_set_dec_src(ctx, &src_buf->vb2_buf, &bs);
13338 ++ if (mtk_jpeg_set_dec_dst(ctx, &jpeg_src_buf->dec_param, &dst_buf->vb2_buf, &fb))
13339 + goto dec_end;
13340 +
13341 + spin_lock_irqsave(&jpeg->hw_lock, flags);
13342 +@@ -849,8 +849,8 @@ static void mtk_jpeg_device_run(void *priv)
13343 + dec_end:
13344 + v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
13345 + v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
13346 +- v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), buf_state);
13347 +- v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), buf_state);
13348 ++ v4l2_m2m_buf_done(src_buf, buf_state);
13349 ++ v4l2_m2m_buf_done(dst_buf, buf_state);
13350 + v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
13351 + }
13352 +
13353 +@@ -921,7 +921,7 @@ static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv)
13354 + {
13355 + struct mtk_jpeg_dev *jpeg = priv;
13356 + struct mtk_jpeg_ctx *ctx;
13357 +- struct vb2_buffer *src_buf, *dst_buf;
13358 ++ struct vb2_v4l2_buffer *src_buf, *dst_buf;
13359 + struct mtk_jpeg_src_buf *jpeg_src_buf;
13360 + enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR;
13361 + u32 dec_irq_ret;
13362 +@@ -938,7 +938,7 @@ static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv)
13363 +
13364 + src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
13365 + dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
13366 +- jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(src_buf);
13367 ++ jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(&src_buf->vb2_buf);
13368 +
13369 + if (dec_irq_ret >= MTK_JPEG_DEC_RESULT_UNDERFLOW)
13370 + mtk_jpeg_dec_reset(jpeg->dec_reg_base);
13371 +@@ -948,15 +948,15 @@ static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv)
13372 + goto dec_end;
13373 + }
13374 +
13375 +- for (i = 0; i < dst_buf->num_planes; i++)
13376 +- vb2_set_plane_payload(dst_buf, i,
13377 ++ for (i = 0; i < dst_buf->vb2_buf.num_planes; i++)
13378 ++ vb2_set_plane_payload(&dst_buf->vb2_buf, i,
13379 + jpeg_src_buf->dec_param.comp_size[i]);
13380 +
13381 + buf_state = VB2_BUF_STATE_DONE;
13382 +
13383 + dec_end:
13384 +- v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), buf_state);
13385 +- v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), buf_state);
13386 ++ v4l2_m2m_buf_done(src_buf, buf_state);
13387 ++ v4l2_m2m_buf_done(dst_buf, buf_state);
13388 + v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
13389 + return IRQ_HANDLED;
13390 + }
13391 +diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
13392 +index 27b078cf98e3..f60f499c596b 100644
13393 +--- a/drivers/media/platform/mx2_emmaprp.c
13394 ++++ b/drivers/media/platform/mx2_emmaprp.c
13395 +@@ -274,7 +274,7 @@ static void emmaprp_device_run(void *priv)
13396 + {
13397 + struct emmaprp_ctx *ctx = priv;
13398 + struct emmaprp_q_data *s_q_data, *d_q_data;
13399 +- struct vb2_buffer *src_buf, *dst_buf;
13400 ++ struct vb2_v4l2_buffer *src_buf, *dst_buf;
13401 + struct emmaprp_dev *pcdev = ctx->dev;
13402 + unsigned int s_width, s_height;
13403 + unsigned int d_width, d_height;
13404 +@@ -294,8 +294,8 @@ static void emmaprp_device_run(void *priv)
13405 + d_height = d_q_data->height;
13406 + d_size = d_width * d_height;
13407 +
13408 +- p_in = vb2_dma_contig_plane_dma_addr(src_buf, 0);
13409 +- p_out = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
13410 ++ p_in = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
13411 ++ p_out = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
13412 + if (!p_in || !p_out) {
13413 + v4l2_err(&pcdev->v4l2_dev,
13414 + "Acquiring kernel pointers to buffers failed\n");
13415 +diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
13416 +index f0719ce24b97..aef8d8dab6ab 100644
13417 +--- a/drivers/media/platform/rcar-vin/rcar-core.c
13418 ++++ b/drivers/media/platform/rcar-vin/rcar-core.c
13419 +@@ -131,9 +131,13 @@ static int rvin_group_link_notify(struct media_link *link, u32 flags,
13420 + !is_media_entity_v4l2_video_device(link->sink->entity))
13421 + return 0;
13422 +
13423 +- /* If any entity is in use don't allow link changes. */
13424 ++ /*
13425 ++ * Don't allow link changes if any entity in the graph is
13426 ++ * streaming, modifying the CHSEL register fields can disrupt
13427 ++ * running streams.
13428 ++ */
13429 + media_device_for_each_entity(entity, &group->mdev)
13430 +- if (entity->use_count)
13431 ++ if (entity->stream_count)
13432 + return -EBUSY;
13433 +
13434 + mutex_lock(&group->lock);
13435 +diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
13436 +index 5c653287185f..b096227a9722 100644
13437 +--- a/drivers/media/platform/rockchip/rga/rga.c
13438 ++++ b/drivers/media/platform/rockchip/rga/rga.c
13439 +@@ -43,7 +43,7 @@ static void device_run(void *prv)
13440 + {
13441 + struct rga_ctx *ctx = prv;
13442 + struct rockchip_rga *rga = ctx->rga;
13443 +- struct vb2_buffer *src, *dst;
13444 ++ struct vb2_v4l2_buffer *src, *dst;
13445 + unsigned long flags;
13446 +
13447 + spin_lock_irqsave(&rga->ctrl_lock, flags);
13448 +@@ -53,8 +53,8 @@ static void device_run(void *prv)
13449 + src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
13450 + dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
13451 +
13452 +- rga_buf_map(src);
13453 +- rga_buf_map(dst);
13454 ++ rga_buf_map(&src->vb2_buf);
13455 ++ rga_buf_map(&dst->vb2_buf);
13456 +
13457 + rga_hw_start(rga);
13458 +
13459 +diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
13460 +index 57ab1d1085d1..971c47165010 100644
13461 +--- a/drivers/media/platform/s5p-g2d/g2d.c
13462 ++++ b/drivers/media/platform/s5p-g2d/g2d.c
13463 +@@ -513,7 +513,7 @@ static void device_run(void *prv)
13464 + {
13465 + struct g2d_ctx *ctx = prv;
13466 + struct g2d_dev *dev = ctx->dev;
13467 +- struct vb2_buffer *src, *dst;
13468 ++ struct vb2_v4l2_buffer *src, *dst;
13469 + unsigned long flags;
13470 + u32 cmd = 0;
13471 +
13472 +@@ -528,10 +528,10 @@ static void device_run(void *prv)
13473 + spin_lock_irqsave(&dev->ctrl_lock, flags);
13474 +
13475 + g2d_set_src_size(dev, &ctx->in);
13476 +- g2d_set_src_addr(dev, vb2_dma_contig_plane_dma_addr(src, 0));
13477 ++ g2d_set_src_addr(dev, vb2_dma_contig_plane_dma_addr(&src->vb2_buf, 0));
13478 +
13479 + g2d_set_dst_size(dev, &ctx->out);
13480 +- g2d_set_dst_addr(dev, vb2_dma_contig_plane_dma_addr(dst, 0));
13481 ++ g2d_set_dst_addr(dev, vb2_dma_contig_plane_dma_addr(&dst->vb2_buf, 0));
13482 +
13483 + g2d_set_rop4(dev, ctx->rop);
13484 + g2d_set_flip(dev, ctx->flip);
13485 +diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
13486 +index 3f9000b70385..370942b67d86 100644
13487 +--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
13488 ++++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
13489 +@@ -793,14 +793,14 @@ static void skip(struct s5p_jpeg_buffer *buf, long len);
13490 + static void exynos4_jpeg_parse_decode_h_tbl(struct s5p_jpeg_ctx *ctx)
13491 + {
13492 + struct s5p_jpeg *jpeg = ctx->jpeg;
13493 +- struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
13494 ++ struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
13495 + struct s5p_jpeg_buffer jpeg_buffer;
13496 + unsigned int word;
13497 + int c, x, components;
13498 +
13499 + jpeg_buffer.size = 2; /* Ls */
13500 + jpeg_buffer.data =
13501 +- (unsigned long)vb2_plane_vaddr(vb, 0) + ctx->out_q.sos + 2;
13502 ++ (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.sos + 2;
13503 + jpeg_buffer.curr = 0;
13504 +
13505 + word = 0;
13506 +@@ -830,14 +830,14 @@ static void exynos4_jpeg_parse_decode_h_tbl(struct s5p_jpeg_ctx *ctx)
13507 + static void exynos4_jpeg_parse_huff_tbl(struct s5p_jpeg_ctx *ctx)
13508 + {
13509 + struct s5p_jpeg *jpeg = ctx->jpeg;
13510 +- struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
13511 ++ struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
13512 + struct s5p_jpeg_buffer jpeg_buffer;
13513 + unsigned int word;
13514 + int c, i, n, j;
13515 +
13516 + for (j = 0; j < ctx->out_q.dht.n; ++j) {
13517 + jpeg_buffer.size = ctx->out_q.dht.len[j];
13518 +- jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(vb, 0) +
13519 ++ jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) +
13520 + ctx->out_q.dht.marker[j];
13521 + jpeg_buffer.curr = 0;
13522 +
13523 +@@ -889,13 +889,13 @@ static void exynos4_jpeg_parse_huff_tbl(struct s5p_jpeg_ctx *ctx)
13524 + static void exynos4_jpeg_parse_decode_q_tbl(struct s5p_jpeg_ctx *ctx)
13525 + {
13526 + struct s5p_jpeg *jpeg = ctx->jpeg;
13527 +- struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
13528 ++ struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
13529 + struct s5p_jpeg_buffer jpeg_buffer;
13530 + int c, x, components;
13531 +
13532 + jpeg_buffer.size = ctx->out_q.sof_len;
13533 + jpeg_buffer.data =
13534 +- (unsigned long)vb2_plane_vaddr(vb, 0) + ctx->out_q.sof;
13535 ++ (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.sof;
13536 + jpeg_buffer.curr = 0;
13537 +
13538 + skip(&jpeg_buffer, 5); /* P, Y, X */
13539 +@@ -920,14 +920,14 @@ static void exynos4_jpeg_parse_decode_q_tbl(struct s5p_jpeg_ctx *ctx)
13540 + static void exynos4_jpeg_parse_q_tbl(struct s5p_jpeg_ctx *ctx)
13541 + {
13542 + struct s5p_jpeg *jpeg = ctx->jpeg;
13543 +- struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
13544 ++ struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
13545 + struct s5p_jpeg_buffer jpeg_buffer;
13546 + unsigned int word;
13547 + int c, i, j;
13548 +
13549 + for (j = 0; j < ctx->out_q.dqt.n; ++j) {
13550 + jpeg_buffer.size = ctx->out_q.dqt.len[j];
13551 +- jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(vb, 0) +
13552 ++ jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) +
13553 + ctx->out_q.dqt.marker[j];
13554 + jpeg_buffer.curr = 0;
13555 +
13556 +@@ -1293,13 +1293,16 @@ static int s5p_jpeg_querycap(struct file *file, void *priv,
13557 + return 0;
13558 + }
13559 +
13560 +-static int enum_fmt(struct s5p_jpeg_fmt *sjpeg_formats, int n,
13561 ++static int enum_fmt(struct s5p_jpeg_ctx *ctx,
13562 ++ struct s5p_jpeg_fmt *sjpeg_formats, int n,
13563 + struct v4l2_fmtdesc *f, u32 type)
13564 + {
13565 + int i, num = 0;
13566 ++ unsigned int fmt_ver_flag = ctx->jpeg->variant->fmt_ver_flag;
13567 +
13568 + for (i = 0; i < n; ++i) {
13569 +- if (sjpeg_formats[i].flags & type) {
13570 ++ if (sjpeg_formats[i].flags & type &&
13571 ++ sjpeg_formats[i].flags & fmt_ver_flag) {
13572 + /* index-th format of type type found ? */
13573 + if (num == f->index)
13574 + break;
13575 +@@ -1326,11 +1329,11 @@ static int s5p_jpeg_enum_fmt_vid_cap(struct file *file, void *priv,
13576 + struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
13577 +
13578 + if (ctx->mode == S5P_JPEG_ENCODE)
13579 +- return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
13580 ++ return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
13581 + SJPEG_FMT_FLAG_ENC_CAPTURE);
13582 +
13583 +- return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
13584 +- SJPEG_FMT_FLAG_DEC_CAPTURE);
13585 ++ return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
13586 ++ SJPEG_FMT_FLAG_DEC_CAPTURE);
13587 + }
13588 +
13589 + static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
13590 +@@ -1339,11 +1342,11 @@ static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
13591 + struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
13592 +
13593 + if (ctx->mode == S5P_JPEG_ENCODE)
13594 +- return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
13595 ++ return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
13596 + SJPEG_FMT_FLAG_ENC_OUTPUT);
13597 +
13598 +- return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
13599 +- SJPEG_FMT_FLAG_DEC_OUTPUT);
13600 ++ return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
13601 ++ SJPEG_FMT_FLAG_DEC_OUTPUT);
13602 + }
13603 +
13604 + static struct s5p_jpeg_q_data *get_q_data(struct s5p_jpeg_ctx *ctx,
13605 +@@ -2072,15 +2075,15 @@ static void s5p_jpeg_device_run(void *priv)
13606 + {
13607 + struct s5p_jpeg_ctx *ctx = priv;
13608 + struct s5p_jpeg *jpeg = ctx->jpeg;
13609 +- struct vb2_buffer *src_buf, *dst_buf;
13610 ++ struct vb2_v4l2_buffer *src_buf, *dst_buf;
13611 + unsigned long src_addr, dst_addr, flags;
13612 +
13613 + spin_lock_irqsave(&ctx->jpeg->slock, flags);
13614 +
13615 + src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
13616 + dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
13617 +- src_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
13618 +- dst_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
13619 ++ src_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
13620 ++ dst_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
13621 +
13622 + s5p_jpeg_reset(jpeg->regs);
13623 + s5p_jpeg_poweron(jpeg->regs);
13624 +@@ -2153,7 +2156,7 @@ static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
13625 + {
13626 + struct s5p_jpeg *jpeg = ctx->jpeg;
13627 + struct s5p_jpeg_fmt *fmt;
13628 +- struct vb2_buffer *vb;
13629 ++ struct vb2_v4l2_buffer *vb;
13630 + struct s5p_jpeg_addr jpeg_addr = {};
13631 + u32 pix_size, padding_bytes = 0;
13632 +
13633 +@@ -2172,7 +2175,7 @@ static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
13634 + vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
13635 + }
13636 +
13637 +- jpeg_addr.y = vb2_dma_contig_plane_dma_addr(vb, 0);
13638 ++ jpeg_addr.y = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
13639 +
13640 + if (fmt->colplanes == 2) {
13641 + jpeg_addr.cb = jpeg_addr.y + pix_size - padding_bytes;
13642 +@@ -2190,7 +2193,7 @@ static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
13643 + static void exynos4_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
13644 + {
13645 + struct s5p_jpeg *jpeg = ctx->jpeg;
13646 +- struct vb2_buffer *vb;
13647 ++ struct vb2_v4l2_buffer *vb;
13648 + unsigned int jpeg_addr = 0;
13649 +
13650 + if (ctx->mode == S5P_JPEG_ENCODE)
13651 +@@ -2198,7 +2201,7 @@ static void exynos4_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
13652 + else
13653 + vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
13654 +
13655 +- jpeg_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
13656 ++ jpeg_addr = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
13657 + if (jpeg->variant->version == SJPEG_EXYNOS5433 &&
13658 + ctx->mode == S5P_JPEG_DECODE)
13659 + jpeg_addr += ctx->out_q.sos;
13660 +@@ -2314,7 +2317,7 @@ static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
13661 + {
13662 + struct s5p_jpeg *jpeg = ctx->jpeg;
13663 + struct s5p_jpeg_fmt *fmt;
13664 +- struct vb2_buffer *vb;
13665 ++ struct vb2_v4l2_buffer *vb;
13666 + struct s5p_jpeg_addr jpeg_addr = {};
13667 + u32 pix_size;
13668 +
13669 +@@ -2328,7 +2331,7 @@ static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
13670 + fmt = ctx->cap_q.fmt;
13671 + }
13672 +
13673 +- jpeg_addr.y = vb2_dma_contig_plane_dma_addr(vb, 0);
13674 ++ jpeg_addr.y = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
13675 +
13676 + if (fmt->colplanes == 2) {
13677 + jpeg_addr.cb = jpeg_addr.y + pix_size;
13678 +@@ -2346,7 +2349,7 @@ static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
13679 + static void exynos3250_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
13680 + {
13681 + struct s5p_jpeg *jpeg = ctx->jpeg;
13682 +- struct vb2_buffer *vb;
13683 ++ struct vb2_v4l2_buffer *vb;
13684 + unsigned int jpeg_addr = 0;
13685 +
13686 + if (ctx->mode == S5P_JPEG_ENCODE)
13687 +@@ -2354,7 +2357,7 @@ static void exynos3250_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
13688 + else
13689 + vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
13690 +
13691 +- jpeg_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
13692 ++ jpeg_addr = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
13693 + exynos3250_jpeg_jpgadr(jpeg->regs, jpeg_addr);
13694 + }
13695 +
13696 +diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
13697 +index 09ae64a0004c..d277cc674349 100644
13698 +--- a/drivers/media/platform/sh_veu.c
13699 ++++ b/drivers/media/platform/sh_veu.c
13700 +@@ -273,13 +273,13 @@ static void sh_veu_process(struct sh_veu_dev *veu,
13701 + static void sh_veu_device_run(void *priv)
13702 + {
13703 + struct sh_veu_dev *veu = priv;
13704 +- struct vb2_buffer *src_buf, *dst_buf;
13705 ++ struct vb2_v4l2_buffer *src_buf, *dst_buf;
13706 +
13707 + src_buf = v4l2_m2m_next_src_buf(veu->m2m_ctx);
13708 + dst_buf = v4l2_m2m_next_dst_buf(veu->m2m_ctx);
13709 +
13710 + if (src_buf && dst_buf)
13711 +- sh_veu_process(veu, src_buf, dst_buf);
13712 ++ sh_veu_process(veu, &src_buf->vb2_buf, &dst_buf->vb2_buf);
13713 + }
13714 +
13715 + /* ========== video ioctls ========== */
13716 +diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
13717 +index 6950585edb5a..d16f54cdc3b0 100644
13718 +--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
13719 ++++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
13720 +@@ -793,7 +793,7 @@ static const struct regmap_config sun6i_csi_regmap_config = {
13721 + .reg_bits = 32,
13722 + .reg_stride = 4,
13723 + .val_bits = 32,
13724 +- .max_register = 0x1000,
13725 ++ .max_register = 0x9c,
13726 + };
13727 +
13728 + static int sun6i_csi_resource_request(struct sun6i_csi_dev *sdev,
13729 +diff --git a/drivers/media/platform/vimc/Makefile b/drivers/media/platform/vimc/Makefile
13730 +index 4b2e3de7856e..c4fc8e7d365a 100644
13731 +--- a/drivers/media/platform/vimc/Makefile
13732 ++++ b/drivers/media/platform/vimc/Makefile
13733 +@@ -5,6 +5,7 @@ vimc_common-objs := vimc-common.o
13734 + vimc_debayer-objs := vimc-debayer.o
13735 + vimc_scaler-objs := vimc-scaler.o
13736 + vimc_sensor-objs := vimc-sensor.o
13737 ++vimc_streamer-objs := vimc-streamer.o
13738 +
13739 + obj-$(CONFIG_VIDEO_VIMC) += vimc.o vimc_capture.o vimc_common.o vimc-debayer.o \
13740 +- vimc_scaler.o vimc_sensor.o
13741 ++ vimc_scaler.o vimc_sensor.o vimc_streamer.o
13742 +diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/platform/vimc/vimc-capture.c
13743 +index 3f7e9ed56633..80d7515ec420 100644
13744 +--- a/drivers/media/platform/vimc/vimc-capture.c
13745 ++++ b/drivers/media/platform/vimc/vimc-capture.c
13746 +@@ -24,6 +24,7 @@
13747 + #include <media/videobuf2-vmalloc.h>
13748 +
13749 + #include "vimc-common.h"
13750 ++#include "vimc-streamer.h"
13751 +
13752 + #define VIMC_CAP_DRV_NAME "vimc-capture"
13753 +
13754 +@@ -44,7 +45,7 @@ struct vimc_cap_device {
13755 + spinlock_t qlock;
13756 + struct mutex lock;
13757 + u32 sequence;
13758 +- struct media_pipeline pipe;
13759 ++ struct vimc_stream stream;
13760 + };
13761 +
13762 + static const struct v4l2_pix_format fmt_default = {
13763 +@@ -248,14 +249,13 @@ static int vimc_cap_start_streaming(struct vb2_queue *vq, unsigned int count)
13764 + vcap->sequence = 0;
13765 +
13766 + /* Start the media pipeline */
13767 +- ret = media_pipeline_start(entity, &vcap->pipe);
13768 ++ ret = media_pipeline_start(entity, &vcap->stream.pipe);
13769 + if (ret) {
13770 + vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED);
13771 + return ret;
13772 + }
13773 +
13774 +- /* Enable streaming from the pipe */
13775 +- ret = vimc_pipeline_s_stream(&vcap->vdev.entity, 1);
13776 ++ ret = vimc_streamer_s_stream(&vcap->stream, &vcap->ved, 1);
13777 + if (ret) {
13778 + media_pipeline_stop(entity);
13779 + vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED);
13780 +@@ -273,8 +273,7 @@ static void vimc_cap_stop_streaming(struct vb2_queue *vq)
13781 + {
13782 + struct vimc_cap_device *vcap = vb2_get_drv_priv(vq);
13783 +
13784 +- /* Disable streaming from the pipe */
13785 +- vimc_pipeline_s_stream(&vcap->vdev.entity, 0);
13786 ++ vimc_streamer_s_stream(&vcap->stream, &vcap->ved, 0);
13787 +
13788 + /* Stop the media pipeline */
13789 + media_pipeline_stop(&vcap->vdev.entity);
13790 +@@ -355,8 +354,8 @@ static void vimc_cap_comp_unbind(struct device *comp, struct device *master,
13791 + kfree(vcap);
13792 + }
13793 +
13794 +-static void vimc_cap_process_frame(struct vimc_ent_device *ved,
13795 +- struct media_pad *sink, const void *frame)
13796 ++static void *vimc_cap_process_frame(struct vimc_ent_device *ved,
13797 ++ const void *frame)
13798 + {
13799 + struct vimc_cap_device *vcap = container_of(ved, struct vimc_cap_device,
13800 + ved);
13801 +@@ -370,7 +369,7 @@ static void vimc_cap_process_frame(struct vimc_ent_device *ved,
13802 + typeof(*vimc_buf), list);
13803 + if (!vimc_buf) {
13804 + spin_unlock(&vcap->qlock);
13805 +- return;
13806 ++ return ERR_PTR(-EAGAIN);
13807 + }
13808 +
13809 + /* Remove this entry from the list */
13810 +@@ -391,6 +390,7 @@ static void vimc_cap_process_frame(struct vimc_ent_device *ved,
13811 + vb2_set_plane_payload(&vimc_buf->vb2.vb2_buf, 0,
13812 + vcap->format.sizeimage);
13813 + vb2_buffer_done(&vimc_buf->vb2.vb2_buf, VB2_BUF_STATE_DONE);
13814 ++ return NULL;
13815 + }
13816 +
13817 + static int vimc_cap_comp_bind(struct device *comp, struct device *master,
13818 +diff --git a/drivers/media/platform/vimc/vimc-common.c b/drivers/media/platform/vimc/vimc-common.c
13819 +index 867e24dbd6b5..c1a74bb2df58 100644
13820 +--- a/drivers/media/platform/vimc/vimc-common.c
13821 ++++ b/drivers/media/platform/vimc/vimc-common.c
13822 +@@ -207,41 +207,6 @@ const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat)
13823 + }
13824 + EXPORT_SYMBOL_GPL(vimc_pix_map_by_pixelformat);
13825 +
13826 +-int vimc_propagate_frame(struct media_pad *src, const void *frame)
13827 +-{
13828 +- struct media_link *link;
13829 +-
13830 +- if (!(src->flags & MEDIA_PAD_FL_SOURCE))
13831 +- return -EINVAL;
13832 +-
13833 +- /* Send this frame to all sink pads that are direct linked */
13834 +- list_for_each_entry(link, &src->entity->links, list) {
13835 +- if (link->source == src &&
13836 +- (link->flags & MEDIA_LNK_FL_ENABLED)) {
13837 +- struct vimc_ent_device *ved = NULL;
13838 +- struct media_entity *entity = link->sink->entity;
13839 +-
13840 +- if (is_media_entity_v4l2_subdev(entity)) {
13841 +- struct v4l2_subdev *sd =
13842 +- container_of(entity, struct v4l2_subdev,
13843 +- entity);
13844 +- ved = v4l2_get_subdevdata(sd);
13845 +- } else if (is_media_entity_v4l2_video_device(entity)) {
13846 +- struct video_device *vdev =
13847 +- container_of(entity,
13848 +- struct video_device,
13849 +- entity);
13850 +- ved = video_get_drvdata(vdev);
13851 +- }
13852 +- if (ved && ved->process_frame)
13853 +- ved->process_frame(ved, link->sink, frame);
13854 +- }
13855 +- }
13856 +-
13857 +- return 0;
13858 +-}
13859 +-EXPORT_SYMBOL_GPL(vimc_propagate_frame);
13860 +-
13861 + /* Helper function to allocate and initialize pads */
13862 + struct media_pad *vimc_pads_init(u16 num_pads, const unsigned long *pads_flag)
13863 + {
13864 +diff --git a/drivers/media/platform/vimc/vimc-common.h b/drivers/media/platform/vimc/vimc-common.h
13865 +index 2e9981b18166..6ed969d9efbb 100644
13866 +--- a/drivers/media/platform/vimc/vimc-common.h
13867 ++++ b/drivers/media/platform/vimc/vimc-common.h
13868 +@@ -113,23 +113,12 @@ struct vimc_pix_map {
13869 + struct vimc_ent_device {
13870 + struct media_entity *ent;
13871 + struct media_pad *pads;
13872 +- void (*process_frame)(struct vimc_ent_device *ved,
13873 +- struct media_pad *sink, const void *frame);
13874 ++ void * (*process_frame)(struct vimc_ent_device *ved,
13875 ++ const void *frame);
13876 + void (*vdev_get_format)(struct vimc_ent_device *ved,
13877 + struct v4l2_pix_format *fmt);
13878 + };
13879 +
13880 +-/**
13881 +- * vimc_propagate_frame - propagate a frame through the topology
13882 +- *
13883 +- * @src: the source pad where the frame is being originated
13884 +- * @frame: the frame to be propagated
13885 +- *
13886 +- * This function will call the process_frame callback from the vimc_ent_device
13887 +- * struct of the nodes directly connected to the @src pad
13888 +- */
13889 +-int vimc_propagate_frame(struct media_pad *src, const void *frame);
13890 +-
13891 + /**
13892 + * vimc_pads_init - initialize pads
13893 + *
13894 +diff --git a/drivers/media/platform/vimc/vimc-debayer.c b/drivers/media/platform/vimc/vimc-debayer.c
13895 +index 77887f66f323..7d77c63b99d2 100644
13896 +--- a/drivers/media/platform/vimc/vimc-debayer.c
13897 ++++ b/drivers/media/platform/vimc/vimc-debayer.c
13898 +@@ -321,7 +321,6 @@ static void vimc_deb_set_rgb_mbus_fmt_rgb888_1x24(struct vimc_deb_device *vdeb,
13899 + static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable)
13900 + {
13901 + struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
13902 +- int ret;
13903 +
13904 + if (enable) {
13905 + const struct vimc_pix_map *vpix;
13906 +@@ -351,22 +350,10 @@ static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable)
13907 + if (!vdeb->src_frame)
13908 + return -ENOMEM;
13909 +
13910 +- /* Turn the stream on in the subdevices directly connected */
13911 +- ret = vimc_pipeline_s_stream(&vdeb->sd.entity, 1);
13912 +- if (ret) {
13913 +- vfree(vdeb->src_frame);
13914 +- vdeb->src_frame = NULL;
13915 +- return ret;
13916 +- }
13917 + } else {
13918 + if (!vdeb->src_frame)
13919 + return 0;
13920 +
13921 +- /* Disable streaming from the pipe */
13922 +- ret = vimc_pipeline_s_stream(&vdeb->sd.entity, 0);
13923 +- if (ret)
13924 +- return ret;
13925 +-
13926 + vfree(vdeb->src_frame);
13927 + vdeb->src_frame = NULL;
13928 + }
13929 +@@ -480,9 +467,8 @@ static void vimc_deb_calc_rgb_sink(struct vimc_deb_device *vdeb,
13930 + }
13931 + }
13932 +
13933 +-static void vimc_deb_process_frame(struct vimc_ent_device *ved,
13934 +- struct media_pad *sink,
13935 +- const void *sink_frame)
13936 ++static void *vimc_deb_process_frame(struct vimc_ent_device *ved,
13937 ++ const void *sink_frame)
13938 + {
13939 + struct vimc_deb_device *vdeb = container_of(ved, struct vimc_deb_device,
13940 + ved);
13941 +@@ -491,7 +477,7 @@ static void vimc_deb_process_frame(struct vimc_ent_device *ved,
13942 +
13943 + /* If the stream in this node is not active, just return */
13944 + if (!vdeb->src_frame)
13945 +- return;
13946 ++ return ERR_PTR(-EINVAL);
13947 +
13948 + for (i = 0; i < vdeb->sink_fmt.height; i++)
13949 + for (j = 0; j < vdeb->sink_fmt.width; j++) {
13950 +@@ -499,12 +485,8 @@ static void vimc_deb_process_frame(struct vimc_ent_device *ved,
13951 + vdeb->set_rgb_src(vdeb, i, j, rgb);
13952 + }
13953 +
13954 +- /* Propagate the frame through all source pads */
13955 +- for (i = 1; i < vdeb->sd.entity.num_pads; i++) {
13956 +- struct media_pad *pad = &vdeb->sd.entity.pads[i];
13957 ++ return vdeb->src_frame;
13958 +
13959 +- vimc_propagate_frame(pad, vdeb->src_frame);
13960 +- }
13961 + }
13962 +
13963 + static void vimc_deb_comp_unbind(struct device *comp, struct device *master,
13964 +diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c
13965 +index b0952ee86296..39b2a73dfcc1 100644
13966 +--- a/drivers/media/platform/vimc/vimc-scaler.c
13967 ++++ b/drivers/media/platform/vimc/vimc-scaler.c
13968 +@@ -217,7 +217,6 @@ static const struct v4l2_subdev_pad_ops vimc_sca_pad_ops = {
13969 + static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable)
13970 + {
13971 + struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
13972 +- int ret;
13973 +
13974 + if (enable) {
13975 + const struct vimc_pix_map *vpix;
13976 +@@ -245,22 +244,10 @@ static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable)
13977 + if (!vsca->src_frame)
13978 + return -ENOMEM;
13979 +
13980 +- /* Turn the stream on in the subdevices directly connected */
13981 +- ret = vimc_pipeline_s_stream(&vsca->sd.entity, 1);
13982 +- if (ret) {
13983 +- vfree(vsca->src_frame);
13984 +- vsca->src_frame = NULL;
13985 +- return ret;
13986 +- }
13987 + } else {
13988 + if (!vsca->src_frame)
13989 + return 0;
13990 +
13991 +- /* Disable streaming from the pipe */
13992 +- ret = vimc_pipeline_s_stream(&vsca->sd.entity, 0);
13993 +- if (ret)
13994 +- return ret;
13995 +-
13996 + vfree(vsca->src_frame);
13997 + vsca->src_frame = NULL;
13998 + }
13999 +@@ -346,26 +333,19 @@ static void vimc_sca_fill_src_frame(const struct vimc_sca_device *const vsca,
14000 + vimc_sca_scale_pix(vsca, i, j, sink_frame);
14001 + }
14002 +
14003 +-static void vimc_sca_process_frame(struct vimc_ent_device *ved,
14004 +- struct media_pad *sink,
14005 +- const void *sink_frame)
14006 ++static void *vimc_sca_process_frame(struct vimc_ent_device *ved,
14007 ++ const void *sink_frame)
14008 + {
14009 + struct vimc_sca_device *vsca = container_of(ved, struct vimc_sca_device,
14010 + ved);
14011 +- unsigned int i;
14012 +
14013 + /* If the stream in this node is not active, just return */
14014 + if (!vsca->src_frame)
14015 +- return;
14016 ++ return ERR_PTR(-EINVAL);
14017 +
14018 + vimc_sca_fill_src_frame(vsca, sink_frame);
14019 +
14020 +- /* Propagate the frame through all source pads */
14021 +- for (i = 1; i < vsca->sd.entity.num_pads; i++) {
14022 +- struct media_pad *pad = &vsca->sd.entity.pads[i];
14023 +-
14024 +- vimc_propagate_frame(pad, vsca->src_frame);
14025 +- }
14026 ++ return vsca->src_frame;
14027 + };
14028 +
14029 + static void vimc_sca_comp_unbind(struct device *comp, struct device *master,
14030 +diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c
14031 +index 32ca9c6172b1..93961a1e694f 100644
14032 +--- a/drivers/media/platform/vimc/vimc-sensor.c
14033 ++++ b/drivers/media/platform/vimc/vimc-sensor.c
14034 +@@ -16,8 +16,6 @@
14035 + */
14036 +
14037 + #include <linux/component.h>
14038 +-#include <linux/freezer.h>
14039 +-#include <linux/kthread.h>
14040 + #include <linux/module.h>
14041 + #include <linux/mod_devicetable.h>
14042 + #include <linux/platform_device.h>
14043 +@@ -201,38 +199,27 @@ static const struct v4l2_subdev_pad_ops vimc_sen_pad_ops = {
14044 + .set_fmt = vimc_sen_set_fmt,
14045 + };
14046 +
14047 +-static int vimc_sen_tpg_thread(void *data)
14048 ++static void *vimc_sen_process_frame(struct vimc_ent_device *ved,
14049 ++ const void *sink_frame)
14050 + {
14051 +- struct vimc_sen_device *vsen = data;
14052 +- unsigned int i;
14053 +-
14054 +- set_freezable();
14055 +- set_current_state(TASK_UNINTERRUPTIBLE);
14056 +-
14057 +- for (;;) {
14058 +- try_to_freeze();
14059 +- if (kthread_should_stop())
14060 +- break;
14061 +-
14062 +- tpg_fill_plane_buffer(&vsen->tpg, 0, 0, vsen->frame);
14063 ++ struct vimc_sen_device *vsen = container_of(ved, struct vimc_sen_device,
14064 ++ ved);
14065 ++ const struct vimc_pix_map *vpix;
14066 ++ unsigned int frame_size;
14067 +
14068 +- /* Send the frame to all source pads */
14069 +- for (i = 0; i < vsen->sd.entity.num_pads; i++)
14070 +- vimc_propagate_frame(&vsen->sd.entity.pads[i],
14071 +- vsen->frame);
14072 ++ /* Calculate the frame size */
14073 ++ vpix = vimc_pix_map_by_code(vsen->mbus_format.code);
14074 ++ frame_size = vsen->mbus_format.width * vpix->bpp *
14075 ++ vsen->mbus_format.height;
14076 +
14077 +- /* 60 frames per second */
14078 +- schedule_timeout(HZ/60);
14079 +- }
14080 +-
14081 +- return 0;
14082 ++ tpg_fill_plane_buffer(&vsen->tpg, 0, 0, vsen->frame);
14083 ++ return vsen->frame;
14084 + }
14085 +
14086 + static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable)
14087 + {
14088 + struct vimc_sen_device *vsen =
14089 + container_of(sd, struct vimc_sen_device, sd);
14090 +- int ret;
14091 +
14092 + if (enable) {
14093 + const struct vimc_pix_map *vpix;
14094 +@@ -258,26 +245,8 @@ static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable)
14095 + /* configure the test pattern generator */
14096 + vimc_sen_tpg_s_format(vsen);
14097 +
14098 +- /* Initialize the image generator thread */
14099 +- vsen->kthread_sen = kthread_run(vimc_sen_tpg_thread, vsen,
14100 +- "%s-sen", vsen->sd.v4l2_dev->name);
14101 +- if (IS_ERR(vsen->kthread_sen)) {
14102 +- dev_err(vsen->dev, "%s: kernel_thread() failed\n",
14103 +- vsen->sd.name);
14104 +- vfree(vsen->frame);
14105 +- vsen->frame = NULL;
14106 +- return PTR_ERR(vsen->kthread_sen);
14107 +- }
14108 + } else {
14109 +- if (!vsen->kthread_sen)
14110 +- return 0;
14111 +-
14112 +- /* Stop image generator */
14113 +- ret = kthread_stop(vsen->kthread_sen);
14114 +- if (ret)
14115 +- return ret;
14116 +
14117 +- vsen->kthread_sen = NULL;
14118 + vfree(vsen->frame);
14119 + vsen->frame = NULL;
14120 + return 0;
14121 +@@ -413,6 +382,7 @@ static int vimc_sen_comp_bind(struct device *comp, struct device *master,
14122 + if (ret)
14123 + goto err_free_hdl;
14124 +
14125 ++ vsen->ved.process_frame = vimc_sen_process_frame;
14126 + dev_set_drvdata(comp, &vsen->ved);
14127 + vsen->dev = comp;
14128 +
14129 +diff --git a/drivers/media/platform/vimc/vimc-streamer.c b/drivers/media/platform/vimc/vimc-streamer.c
14130 +new file mode 100644
14131 +index 000000000000..fcc897fb247b
14132 +--- /dev/null
14133 ++++ b/drivers/media/platform/vimc/vimc-streamer.c
14134 +@@ -0,0 +1,188 @@
14135 ++// SPDX-License-Identifier: GPL-2.0+
14136 ++/*
14137 ++ * vimc-streamer.c Virtual Media Controller Driver
14138 ++ *
14139 ++ * Copyright (C) 2018 Lucas A. M. Magalhães <lucmaga@×××××.com>
14140 ++ *
14141 ++ */
14142 ++
14143 ++#include <linux/init.h>
14144 ++#include <linux/module.h>
14145 ++#include <linux/freezer.h>
14146 ++#include <linux/kthread.h>
14147 ++
14148 ++#include "vimc-streamer.h"
14149 ++
14150 ++/**
14151 ++ * vimc_get_source_entity - get the entity connected with the first sink pad
14152 ++ *
14153 ++ * @ent: reference media_entity
14154 ++ *
14155 ++ * Helper function that returns the media entity containing the source pad
14156 ++ * linked with the first sink pad from the given media entity pad list.
14157 ++ */
14158 ++static struct media_entity *vimc_get_source_entity(struct media_entity *ent)
14159 ++{
14160 ++ struct media_pad *pad;
14161 ++ int i;
14162 ++
14163 ++ for (i = 0; i < ent->num_pads; i++) {
14164 ++ if (ent->pads[i].flags & MEDIA_PAD_FL_SOURCE)
14165 ++ continue;
14166 ++ pad = media_entity_remote_pad(&ent->pads[i]);
14167 ++ return pad ? pad->entity : NULL;
14168 ++ }
14169 ++ return NULL;
14170 ++}
14171 ++
14172 ++/*
14173 ++ * vimc_streamer_pipeline_terminate - Disable stream in all ved in stream
14174 ++ *
14175 ++ * @stream: the pointer to the stream structure with the pipeline to be
14176 ++ * disabled.
14177 ++ *
14178 ++ * Calls s_stream to disable the stream in each entity of the pipeline
14179 ++ *
14180 ++ */
14181 ++static void vimc_streamer_pipeline_terminate(struct vimc_stream *stream)
14182 ++{
14183 ++ struct media_entity *entity;
14184 ++ struct v4l2_subdev *sd;
14185 ++
14186 ++ while (stream->pipe_size) {
14187 ++ stream->pipe_size--;
14188 ++ entity = stream->ved_pipeline[stream->pipe_size]->ent;
14189 ++ entity = vimc_get_source_entity(entity);
14190 ++ stream->ved_pipeline[stream->pipe_size] = NULL;
14191 ++
14192 ++ if (!is_media_entity_v4l2_subdev(entity))
14193 ++ continue;
14194 ++
14195 ++ sd = media_entity_to_v4l2_subdev(entity);
14196 ++ v4l2_subdev_call(sd, video, s_stream, 0);
14197 ++ }
14198 ++}
14199 ++
14200 ++/*
14201 ++ * vimc_streamer_pipeline_init - initializes the stream structure
14202 ++ *
14203 ++ * @stream: the pointer to the stream structure to be initialized
14204 ++ * @ved: the pointer to the vimc entity initializing the stream
14205 ++ *
14206 ++ * Initializes the stream structure. Walks through the entity graph to
14207 ++ * construct the pipeline used later on the streamer thread.
14208 ++ * Calls s_stream to enable stream in all entities of the pipeline.
14209 ++ */
14210 ++static int vimc_streamer_pipeline_init(struct vimc_stream *stream,
14211 ++ struct vimc_ent_device *ved)
14212 ++{
14213 ++ struct media_entity *entity;
14214 ++ struct video_device *vdev;
14215 ++ struct v4l2_subdev *sd;
14216 ++ int ret = 0;
14217 ++
14218 ++ stream->pipe_size = 0;
14219 ++ while (stream->pipe_size < VIMC_STREAMER_PIPELINE_MAX_SIZE) {
14220 ++ if (!ved) {
14221 ++ vimc_streamer_pipeline_terminate(stream);
14222 ++ return -EINVAL;
14223 ++ }
14224 ++ stream->ved_pipeline[stream->pipe_size++] = ved;
14225 ++
14226 ++ entity = vimc_get_source_entity(ved->ent);
14227 ++ /* Check if the end of the pipeline was reached*/
14228 ++ if (!entity)
14229 ++ return 0;
14230 ++
14231 ++ if (is_media_entity_v4l2_subdev(entity)) {
14232 ++ sd = media_entity_to_v4l2_subdev(entity);
14233 ++ ret = v4l2_subdev_call(sd, video, s_stream, 1);
14234 ++ if (ret && ret != -ENOIOCTLCMD) {
14235 ++ vimc_streamer_pipeline_terminate(stream);
14236 ++ return ret;
14237 ++ }
14238 ++ ved = v4l2_get_subdevdata(sd);
14239 ++ } else {
14240 ++ vdev = container_of(entity,
14241 ++ struct video_device,
14242 ++ entity);
14243 ++ ved = video_get_drvdata(vdev);
14244 ++ }
14245 ++ }
14246 ++
14247 ++ vimc_streamer_pipeline_terminate(stream);
14248 ++ return -EINVAL;
14249 ++}
14250 ++
14251 ++static int vimc_streamer_thread(void *data)
14252 ++{
14253 ++ struct vimc_stream *stream = data;
14254 ++ int i;
14255 ++
14256 ++ set_freezable();
14257 ++ set_current_state(TASK_UNINTERRUPTIBLE);
14258 ++
14259 ++ for (;;) {
14260 ++ try_to_freeze();
14261 ++ if (kthread_should_stop())
14262 ++ break;
14263 ++
14264 ++ for (i = stream->pipe_size - 1; i >= 0; i--) {
14265 ++ stream->frame = stream->ved_pipeline[i]->process_frame(
14266 ++ stream->ved_pipeline[i],
14267 ++ stream->frame);
14268 ++ if (!stream->frame)
14269 ++ break;
14270 ++ if (IS_ERR(stream->frame))
14271 ++ break;
14272 ++ }
14273 ++ //wait for 60hz
14274 ++ schedule_timeout(HZ / 60);
14275 ++ }
14276 ++
14277 ++ return 0;
14278 ++}
14279 ++
14280 ++int vimc_streamer_s_stream(struct vimc_stream *stream,
14281 ++ struct vimc_ent_device *ved,
14282 ++ int enable)
14283 ++{
14284 ++ int ret;
14285 ++
14286 ++ if (!stream || !ved)
14287 ++ return -EINVAL;
14288 ++
14289 ++ if (enable) {
14290 ++ if (stream->kthread)
14291 ++ return 0;
14292 ++
14293 ++ ret = vimc_streamer_pipeline_init(stream, ved);
14294 ++ if (ret)
14295 ++ return ret;
14296 ++
14297 ++ stream->kthread = kthread_run(vimc_streamer_thread, stream,
14298 ++ "vimc-streamer thread");
14299 ++
14300 ++ if (IS_ERR(stream->kthread))
14301 ++ return PTR_ERR(stream->kthread);
14302 ++
14303 ++ } else {
14304 ++ if (!stream->kthread)
14305 ++ return 0;
14306 ++
14307 ++ ret = kthread_stop(stream->kthread);
14308 ++ if (ret)
14309 ++ return ret;
14310 ++
14311 ++ stream->kthread = NULL;
14312 ++
14313 ++ vimc_streamer_pipeline_terminate(stream);
14314 ++ }
14315 ++
14316 ++ return 0;
14317 ++}
14318 ++EXPORT_SYMBOL_GPL(vimc_streamer_s_stream);
14319 ++
14320 ++MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Streamer");
14321 ++MODULE_AUTHOR("Lucas A. M. Magalhães <lucmaga@×××××.com>");
14322 ++MODULE_LICENSE("GPL");
14323 +diff --git a/drivers/media/platform/vimc/vimc-streamer.h b/drivers/media/platform/vimc/vimc-streamer.h
14324 +new file mode 100644
14325 +index 000000000000..752af2e2d5a2
14326 +--- /dev/null
14327 ++++ b/drivers/media/platform/vimc/vimc-streamer.h
14328 +@@ -0,0 +1,38 @@
14329 ++/* SPDX-License-Identifier: GPL-2.0+ */
14330 ++/*
14331 ++ * vimc-streamer.h Virtual Media Controller Driver
14332 ++ *
14333 ++ * Copyright (C) 2018 Lucas A. M. Magalhães <lucmaga@×××××.com>
14334 ++ *
14335 ++ */
14336 ++
14337 ++#ifndef _VIMC_STREAMER_H_
14338 ++#define _VIMC_STREAMER_H_
14339 ++
14340 ++#include <media/media-device.h>
14341 ++
14342 ++#include "vimc-common.h"
14343 ++
14344 ++#define VIMC_STREAMER_PIPELINE_MAX_SIZE 16
14345 ++
14346 ++struct vimc_stream {
14347 ++ struct media_pipeline pipe;
14348 ++ struct vimc_ent_device *ved_pipeline[VIMC_STREAMER_PIPELINE_MAX_SIZE];
14349 ++ unsigned int pipe_size;
14350 ++ u8 *frame;
14351 ++ struct task_struct *kthread;
14352 ++};
14353 ++
14354 ++/**
14355 ++ * vimc_streamer_s_streamer - start/stop the stream
14356 ++ *
14357 ++ * @stream: the pointer to the stream to start or stop
14358 ++ * @ved: The last entity of the streamer pipeline
14359 ++ * @enable: any non-zero number start the stream, zero stop
14360 ++ *
14361 ++ */
14362 ++int vimc_streamer_s_stream(struct vimc_stream *stream,
14363 ++ struct vimc_ent_device *ved,
14364 ++ int enable);
14365 ++
14366 ++#endif //_VIMC_STREAMER_H_
14367 +diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
14368 +index 66a174979b3c..81745644f720 100644
14369 +--- a/drivers/media/rc/rc-main.c
14370 ++++ b/drivers/media/rc/rc-main.c
14371 +@@ -274,6 +274,7 @@ static unsigned int ir_update_mapping(struct rc_dev *dev,
14372 + unsigned int new_keycode)
14373 + {
14374 + int old_keycode = rc_map->scan[index].keycode;
14375 ++ int i;
14376 +
14377 + /* Did the user wish to remove the mapping? */
14378 + if (new_keycode == KEY_RESERVED || new_keycode == KEY_UNKNOWN) {
14379 +@@ -288,9 +289,20 @@ static unsigned int ir_update_mapping(struct rc_dev *dev,
14380 + old_keycode == KEY_RESERVED ? "New" : "Replacing",
14381 + rc_map->scan[index].scancode, new_keycode);
14382 + rc_map->scan[index].keycode = new_keycode;
14383 ++ __set_bit(new_keycode, dev->input_dev->keybit);
14384 + }
14385 +
14386 + if (old_keycode != KEY_RESERVED) {
14387 ++ /* A previous mapping was updated... */
14388 ++ __clear_bit(old_keycode, dev->input_dev->keybit);
14389 ++ /* ... but another scancode might use the same keycode */
14390 ++ for (i = 0; i < rc_map->len; i++) {
14391 ++ if (rc_map->scan[i].keycode == old_keycode) {
14392 ++ __set_bit(old_keycode, dev->input_dev->keybit);
14393 ++ break;
14394 ++ }
14395 ++ }
14396 ++
14397 + /* Possibly shrink the keytable, failure is not a problem */
14398 + ir_resize_table(dev, rc_map, GFP_ATOMIC);
14399 + }
14400 +@@ -1750,7 +1762,6 @@ static int rc_prepare_rx_device(struct rc_dev *dev)
14401 + set_bit(EV_REP, dev->input_dev->evbit);
14402 + set_bit(EV_MSC, dev->input_dev->evbit);
14403 + set_bit(MSC_SCAN, dev->input_dev->mscbit);
14404 +- bitmap_fill(dev->input_dev->keybit, KEY_CNT);
14405 +
14406 + /* Pointer/mouse events */
14407 + set_bit(EV_REL, dev->input_dev->evbit);
14408 +diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
14409 +index d45415cbe6e7..14cff91b7aea 100644
14410 +--- a/drivers/media/usb/uvc/uvc_ctrl.c
14411 ++++ b/drivers/media/usb/uvc/uvc_ctrl.c
14412 +@@ -1212,7 +1212,7 @@ static void uvc_ctrl_fill_event(struct uvc_video_chain *chain,
14413 +
14414 + __uvc_query_v4l2_ctrl(chain, ctrl, mapping, &v4l2_ctrl);
14415 +
14416 +- memset(ev->reserved, 0, sizeof(ev->reserved));
14417 ++ memset(ev, 0, sizeof(*ev));
14418 + ev->type = V4L2_EVENT_CTRL;
14419 + ev->id = v4l2_ctrl.id;
14420 + ev->u.ctrl.value = value;
14421 +diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
14422 +index b62cbd800111..33a22c016456 100644
14423 +--- a/drivers/media/usb/uvc/uvc_driver.c
14424 ++++ b/drivers/media/usb/uvc/uvc_driver.c
14425 +@@ -1106,11 +1106,19 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
14426 + return -EINVAL;
14427 + }
14428 +
14429 +- /* Make sure the terminal type MSB is not null, otherwise it
14430 +- * could be confused with a unit.
14431 ++ /*
14432 ++ * Reject invalid terminal types that would cause issues:
14433 ++ *
14434 ++ * - The high byte must be non-zero, otherwise it would be
14435 ++ * confused with a unit.
14436 ++ *
14437 ++ * - Bit 15 must be 0, as we use it internally as a terminal
14438 ++ * direction flag.
14439 ++ *
14440 ++ * Other unknown types are accepted.
14441 + */
14442 + type = get_unaligned_le16(&buffer[4]);
14443 +- if ((type & 0xff00) == 0) {
14444 ++ if ((type & 0x7f00) == 0 || (type & 0x8000) != 0) {
14445 + uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol "
14446 + "interface %d INPUT_TERMINAL %d has invalid "
14447 + "type 0x%04x, skipping\n", udev->devnum,
14448 +diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
14449 +index 84525ff04745..e314657a1843 100644
14450 +--- a/drivers/media/usb/uvc/uvc_video.c
14451 ++++ b/drivers/media/usb/uvc/uvc_video.c
14452 +@@ -676,6 +676,14 @@ void uvc_video_clock_update(struct uvc_streaming *stream,
14453 + if (!uvc_hw_timestamps_param)
14454 + return;
14455 +
14456 ++ /*
14457 ++ * We will get called from __vb2_queue_cancel() if there are buffers
14458 ++ * done but not dequeued by the user, but the sample array has already
14459 ++ * been released at that time. Just bail out in that case.
14460 ++ */
14461 ++ if (!clock->samples)
14462 ++ return;
14463 ++
14464 + spin_lock_irqsave(&clock->lock, flags);
14465 +
14466 + if (clock->count < clock->size)
14467 +diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
14468 +index 5e3806feb5d7..8a82427c4d54 100644
14469 +--- a/drivers/media/v4l2-core/v4l2-ctrls.c
14470 ++++ b/drivers/media/v4l2-core/v4l2-ctrls.c
14471 +@@ -1387,7 +1387,7 @@ static u32 user_flags(const struct v4l2_ctrl *ctrl)
14472 +
14473 + static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes)
14474 + {
14475 +- memset(ev->reserved, 0, sizeof(ev->reserved));
14476 ++ memset(ev, 0, sizeof(*ev));
14477 + ev->type = V4L2_EVENT_CTRL;
14478 + ev->id = ctrl->id;
14479 + ev->u.ctrl.changes = changes;
14480 +diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
14481 +index a530972c5a7e..e0173bf4b0dc 100644
14482 +--- a/drivers/mfd/sm501.c
14483 ++++ b/drivers/mfd/sm501.c
14484 +@@ -1145,6 +1145,9 @@ static int sm501_register_gpio_i2c_instance(struct sm501_devdata *sm,
14485 + lookup = devm_kzalloc(&pdev->dev,
14486 + sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup),
14487 + GFP_KERNEL);
14488 ++ if (!lookup)
14489 ++ return -ENOMEM;
14490 ++
14491 + lookup->dev_id = "i2c-gpio";
14492 + if (iic->pin_sda < 32)
14493 + lookup->table[0].chip_label = "SM501-LOW";
14494 +diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
14495 +index 5d28d9e454f5..08f4a512afad 100644
14496 +--- a/drivers/misc/cxl/guest.c
14497 ++++ b/drivers/misc/cxl/guest.c
14498 +@@ -267,6 +267,7 @@ static int guest_reset(struct cxl *adapter)
14499 + int i, rc;
14500 +
14501 + pr_devel("Adapter reset request\n");
14502 ++ spin_lock(&adapter->afu_list_lock);
14503 + for (i = 0; i < adapter->slices; i++) {
14504 + if ((afu = adapter->afu[i])) {
14505 + pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
14506 +@@ -283,6 +284,7 @@ static int guest_reset(struct cxl *adapter)
14507 + pci_error_handlers(afu, CXL_RESUME_EVENT, 0);
14508 + }
14509 + }
14510 ++ spin_unlock(&adapter->afu_list_lock);
14511 + return rc;
14512 + }
14513 +
14514 +diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
14515 +index c79ba1c699ad..300531d6136f 100644
14516 +--- a/drivers/misc/cxl/pci.c
14517 ++++ b/drivers/misc/cxl/pci.c
14518 +@@ -1805,7 +1805,7 @@ static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
14519 + /* There should only be one entry, but go through the list
14520 + * anyway
14521 + */
14522 +- if (afu->phb == NULL)
14523 ++ if (afu == NULL || afu->phb == NULL)
14524 + return result;
14525 +
14526 + list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
14527 +@@ -1832,7 +1832,8 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
14528 + {
14529 + struct cxl *adapter = pci_get_drvdata(pdev);
14530 + struct cxl_afu *afu;
14531 +- pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET, afu_result;
14532 ++ pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
14533 ++ pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET;
14534 + int i;
14535 +
14536 + /* At this point, we could still have an interrupt pending.
14537 +@@ -1843,6 +1844,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
14538 +
14539 + /* If we're permanently dead, give up. */
14540 + if (state == pci_channel_io_perm_failure) {
14541 ++ spin_lock(&adapter->afu_list_lock);
14542 + for (i = 0; i < adapter->slices; i++) {
14543 + afu = adapter->afu[i];
14544 + /*
14545 +@@ -1851,6 +1853,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
14546 + */
14547 + cxl_vphb_error_detected(afu, state);
14548 + }
14549 ++ spin_unlock(&adapter->afu_list_lock);
14550 + return PCI_ERS_RESULT_DISCONNECT;
14551 + }
14552 +
14553 +@@ -1932,11 +1935,17 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
14554 + * * In slot_reset, free the old resources and allocate new ones.
14555 + * * In resume, clear the flag to allow things to start.
14556 + */
14557 ++
14558 ++ /* Make sure no one else changes the afu list */
14559 ++ spin_lock(&adapter->afu_list_lock);
14560 ++
14561 + for (i = 0; i < adapter->slices; i++) {
14562 + afu = adapter->afu[i];
14563 +
14564 +- afu_result = cxl_vphb_error_detected(afu, state);
14565 ++ if (afu == NULL)
14566 ++ continue;
14567 +
14568 ++ afu_result = cxl_vphb_error_detected(afu, state);
14569 + cxl_context_detach_all(afu);
14570 + cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
14571 + pci_deconfigure_afu(afu);
14572 +@@ -1948,6 +1957,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
14573 + (result == PCI_ERS_RESULT_NEED_RESET))
14574 + result = PCI_ERS_RESULT_NONE;
14575 + }
14576 ++ spin_unlock(&adapter->afu_list_lock);
14577 +
14578 + /* should take the context lock here */
14579 + if (cxl_adapter_context_lock(adapter) != 0)
14580 +@@ -1980,14 +1990,18 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
14581 + */
14582 + cxl_adapter_context_unlock(adapter);
14583 +
14584 ++ spin_lock(&adapter->afu_list_lock);
14585 + for (i = 0; i < adapter->slices; i++) {
14586 + afu = adapter->afu[i];
14587 +
14588 ++ if (afu == NULL)
14589 ++ continue;
14590 ++
14591 + if (pci_configure_afu(afu, adapter, pdev))
14592 +- goto err;
14593 ++ goto err_unlock;
14594 +
14595 + if (cxl_afu_select_best_mode(afu))
14596 +- goto err;
14597 ++ goto err_unlock;
14598 +
14599 + if (afu->phb == NULL)
14600 + continue;
14601 +@@ -1999,16 +2013,16 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
14602 + ctx = cxl_get_context(afu_dev);
14603 +
14604 + if (ctx && cxl_release_context(ctx))
14605 +- goto err;
14606 ++ goto err_unlock;
14607 +
14608 + ctx = cxl_dev_context_init(afu_dev);
14609 + if (IS_ERR(ctx))
14610 +- goto err;
14611 ++ goto err_unlock;
14612 +
14613 + afu_dev->dev.archdata.cxl_ctx = ctx;
14614 +
14615 + if (cxl_ops->afu_check_and_enable(afu))
14616 +- goto err;
14617 ++ goto err_unlock;
14618 +
14619 + afu_dev->error_state = pci_channel_io_normal;
14620 +
14621 +@@ -2029,8 +2043,13 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
14622 + result = PCI_ERS_RESULT_DISCONNECT;
14623 + }
14624 + }
14625 ++
14626 ++ spin_unlock(&adapter->afu_list_lock);
14627 + return result;
14628 +
14629 ++err_unlock:
14630 ++ spin_unlock(&adapter->afu_list_lock);
14631 ++
14632 + err:
14633 + /* All the bits that happen in both error_detected and cxl_remove
14634 + * should be idempotent, so we don't need to worry about leaving a mix
14635 +@@ -2051,10 +2070,11 @@ static void cxl_pci_resume(struct pci_dev *pdev)
14636 + * This is not the place to be checking if everything came back up
14637 + * properly, because there's no return value: do that in slot_reset.
14638 + */
14639 ++ spin_lock(&adapter->afu_list_lock);
14640 + for (i = 0; i < adapter->slices; i++) {
14641 + afu = adapter->afu[i];
14642 +
14643 +- if (afu->phb == NULL)
14644 ++ if (afu == NULL || afu->phb == NULL)
14645 + continue;
14646 +
14647 + list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
14648 +@@ -2063,6 +2083,7 @@ static void cxl_pci_resume(struct pci_dev *pdev)
14649 + afu_dev->driver->err_handler->resume(afu_dev);
14650 + }
14651 + }
14652 ++ spin_unlock(&adapter->afu_list_lock);
14653 + }
14654 +
14655 + static const struct pci_error_handlers cxl_err_handler = {
14656 +diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
14657 +index fc3872fe7b25..c383322ec2ba 100644
14658 +--- a/drivers/misc/mei/bus.c
14659 ++++ b/drivers/misc/mei/bus.c
14660 +@@ -541,17 +541,9 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
14661 + goto out;
14662 + }
14663 +
14664 +- if (!mei_cl_bus_module_get(cldev)) {
14665 +- dev_err(&cldev->dev, "get hw module failed");
14666 +- ret = -ENODEV;
14667 +- goto out;
14668 +- }
14669 +-
14670 + ret = mei_cl_connect(cl, cldev->me_cl, NULL);
14671 +- if (ret < 0) {
14672 ++ if (ret < 0)
14673 + dev_err(&cldev->dev, "cannot connect\n");
14674 +- mei_cl_bus_module_put(cldev);
14675 +- }
14676 +
14677 + out:
14678 + mutex_unlock(&bus->device_lock);
14679 +@@ -614,7 +606,6 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
14680 + if (err < 0)
14681 + dev_err(bus->dev, "Could not disconnect from the ME client\n");
14682 +
14683 +- mei_cl_bus_module_put(cldev);
14684 + out:
14685 + /* Flush queues and remove any pending read */
14686 + mei_cl_flush_queues(cl, NULL);
14687 +@@ -725,9 +716,16 @@ static int mei_cl_device_probe(struct device *dev)
14688 + if (!id)
14689 + return -ENODEV;
14690 +
14691 ++ if (!mei_cl_bus_module_get(cldev)) {
14692 ++ dev_err(&cldev->dev, "get hw module failed");
14693 ++ return -ENODEV;
14694 ++ }
14695 ++
14696 + ret = cldrv->probe(cldev, id);
14697 +- if (ret)
14698 ++ if (ret) {
14699 ++ mei_cl_bus_module_put(cldev);
14700 + return ret;
14701 ++ }
14702 +
14703 + __module_get(THIS_MODULE);
14704 + return 0;
14705 +@@ -755,6 +753,7 @@ static int mei_cl_device_remove(struct device *dev)
14706 +
14707 + mei_cldev_unregister_callbacks(cldev);
14708 +
14709 ++ mei_cl_bus_module_put(cldev);
14710 + module_put(THIS_MODULE);
14711 + dev->driver = NULL;
14712 + return ret;
14713 +diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
14714 +index 8f7616557c97..e6207f614816 100644
14715 +--- a/drivers/misc/mei/hbm.c
14716 ++++ b/drivers/misc/mei/hbm.c
14717 +@@ -1029,29 +1029,36 @@ static void mei_hbm_config_features(struct mei_device *dev)
14718 + dev->version.minor_version >= HBM_MINOR_VERSION_PGI)
14719 + dev->hbm_f_pg_supported = 1;
14720 +
14721 ++ dev->hbm_f_dc_supported = 0;
14722 + if (dev->version.major_version >= HBM_MAJOR_VERSION_DC)
14723 + dev->hbm_f_dc_supported = 1;
14724 +
14725 ++ dev->hbm_f_ie_supported = 0;
14726 + if (dev->version.major_version >= HBM_MAJOR_VERSION_IE)
14727 + dev->hbm_f_ie_supported = 1;
14728 +
14729 + /* disconnect on connect timeout instead of link reset */
14730 ++ dev->hbm_f_dot_supported = 0;
14731 + if (dev->version.major_version >= HBM_MAJOR_VERSION_DOT)
14732 + dev->hbm_f_dot_supported = 1;
14733 +
14734 + /* Notification Event Support */
14735 ++ dev->hbm_f_ev_supported = 0;
14736 + if (dev->version.major_version >= HBM_MAJOR_VERSION_EV)
14737 + dev->hbm_f_ev_supported = 1;
14738 +
14739 + /* Fixed Address Client Support */
14740 ++ dev->hbm_f_fa_supported = 0;
14741 + if (dev->version.major_version >= HBM_MAJOR_VERSION_FA)
14742 + dev->hbm_f_fa_supported = 1;
14743 +
14744 + /* OS ver message Support */
14745 ++ dev->hbm_f_os_supported = 0;
14746 + if (dev->version.major_version >= HBM_MAJOR_VERSION_OS)
14747 + dev->hbm_f_os_supported = 1;
14748 +
14749 + /* DMA Ring Support */
14750 ++ dev->hbm_f_dr_supported = 0;
14751 + if (dev->version.major_version > HBM_MAJOR_VERSION_DR ||
14752 + (dev->version.major_version == HBM_MAJOR_VERSION_DR &&
14753 + dev->version.minor_version >= HBM_MINOR_VERSION_DR))
14754 +diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
14755 +index f8240b87df22..f69acb5d4a50 100644
14756 +--- a/drivers/misc/vmw_balloon.c
14757 ++++ b/drivers/misc/vmw_balloon.c
14758 +@@ -1287,7 +1287,7 @@ static void vmballoon_reset(struct vmballoon *b)
14759 + vmballoon_pop(b);
14760 +
14761 + if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
14762 +- return;
14763 ++ goto unlock;
14764 +
14765 + if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
14766 + if (vmballoon_init_batching(b)) {
14767 +@@ -1298,7 +1298,7 @@ static void vmballoon_reset(struct vmballoon *b)
14768 + * The guest will retry in one second.
14769 + */
14770 + vmballoon_send_start(b, 0);
14771 +- return;
14772 ++ goto unlock;
14773 + }
14774 + } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
14775 + vmballoon_deinit_batching(b);
14776 +@@ -1314,6 +1314,7 @@ static void vmballoon_reset(struct vmballoon *b)
14777 + if (vmballoon_send_guest_id(b))
14778 + pr_err("failed to send guest ID to the host\n");
14779 +
14780 ++unlock:
14781 + up_write(&b->conf_sem);
14782 + }
14783 +
14784 +diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
14785 +index b27a1e620233..1e6b07c176dc 100644
14786 +--- a/drivers/mmc/core/core.c
14787 ++++ b/drivers/mmc/core/core.c
14788 +@@ -2381,9 +2381,9 @@ unsigned int mmc_calc_max_discard(struct mmc_card *card)
14789 + return card->pref_erase;
14790 +
14791 + max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
14792 +- if (max_discard && mmc_can_trim(card)) {
14793 ++ if (mmc_can_trim(card)) {
14794 + max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
14795 +- if (max_trim < max_discard)
14796 ++ if (max_trim < max_discard || max_discard == 0)
14797 + max_discard = max_trim;
14798 + } else if (max_discard < card->erase_size) {
14799 + max_discard = 0;
14800 +diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c
14801 +index c712b7deb3a9..7c8f203f9a24 100644
14802 +--- a/drivers/mmc/host/alcor.c
14803 ++++ b/drivers/mmc/host/alcor.c
14804 +@@ -48,7 +48,6 @@ struct alcor_sdmmc_host {
14805 + struct mmc_command *cmd;
14806 + struct mmc_data *data;
14807 + unsigned int dma_on:1;
14808 +- unsigned int early_data:1;
14809 +
14810 + struct mutex cmd_mutex;
14811 +
14812 +@@ -144,8 +143,7 @@ static void alcor_data_set_dma(struct alcor_sdmmc_host *host)
14813 + host->sg_count--;
14814 + }
14815 +
14816 +-static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host,
14817 +- bool early)
14818 ++static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host)
14819 + {
14820 + struct alcor_pci_priv *priv = host->alcor_pci;
14821 + struct mmc_data *data = host->data;
14822 +@@ -155,13 +153,6 @@ static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host,
14823 + ctrl |= AU6601_DATA_WRITE;
14824 +
14825 + if (data->host_cookie == COOKIE_MAPPED) {
14826 +- if (host->early_data) {
14827 +- host->early_data = false;
14828 +- return;
14829 +- }
14830 +-
14831 +- host->early_data = early;
14832 +-
14833 + alcor_data_set_dma(host);
14834 + ctrl |= AU6601_DATA_DMA_MODE;
14835 + host->dma_on = 1;
14836 +@@ -231,6 +222,7 @@ static void alcor_prepare_sg_miter(struct alcor_sdmmc_host *host)
14837 + static void alcor_prepare_data(struct alcor_sdmmc_host *host,
14838 + struct mmc_command *cmd)
14839 + {
14840 ++ struct alcor_pci_priv *priv = host->alcor_pci;
14841 + struct mmc_data *data = cmd->data;
14842 +
14843 + if (!data)
14844 +@@ -248,7 +240,7 @@ static void alcor_prepare_data(struct alcor_sdmmc_host *host,
14845 + if (data->host_cookie != COOKIE_MAPPED)
14846 + alcor_prepare_sg_miter(host);
14847 +
14848 +- alcor_trigger_data_transfer(host, true);
14849 ++ alcor_write8(priv, 0, AU6601_DATA_XFER_CTRL);
14850 + }
14851 +
14852 + static void alcor_send_cmd(struct alcor_sdmmc_host *host,
14853 +@@ -435,7 +427,7 @@ static int alcor_cmd_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
14854 + if (!host->data)
14855 + return false;
14856 +
14857 +- alcor_trigger_data_transfer(host, false);
14858 ++ alcor_trigger_data_transfer(host);
14859 + host->cmd = NULL;
14860 + return true;
14861 + }
14862 +@@ -456,7 +448,7 @@ static void alcor_cmd_irq_thread(struct alcor_sdmmc_host *host, u32 intmask)
14863 + if (!host->data)
14864 + alcor_request_complete(host, 1);
14865 + else
14866 +- alcor_trigger_data_transfer(host, false);
14867 ++ alcor_trigger_data_transfer(host);
14868 + host->cmd = NULL;
14869 + }
14870 +
14871 +@@ -487,15 +479,9 @@ static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
14872 + break;
14873 + case AU6601_INT_READ_BUF_RDY:
14874 + alcor_trf_block_pio(host, true);
14875 +- if (!host->blocks)
14876 +- break;
14877 +- alcor_trigger_data_transfer(host, false);
14878 + return 1;
14879 + case AU6601_INT_WRITE_BUF_RDY:
14880 + alcor_trf_block_pio(host, false);
14881 +- if (!host->blocks)
14882 +- break;
14883 +- alcor_trigger_data_transfer(host, false);
14884 + return 1;
14885 + case AU6601_INT_DMA_END:
14886 + if (!host->sg_count)
14887 +@@ -508,8 +494,14 @@ static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
14888 + break;
14889 + }
14890 +
14891 +- if (intmask & AU6601_INT_DATA_END)
14892 +- return 0;
14893 ++ if (intmask & AU6601_INT_DATA_END) {
14894 ++ if (!host->dma_on && host->blocks) {
14895 ++ alcor_trigger_data_transfer(host);
14896 ++ return 1;
14897 ++ } else {
14898 ++ return 0;
14899 ++ }
14900 ++ }
14901 +
14902 + return 1;
14903 + }
14904 +@@ -1044,14 +1036,27 @@ static void alcor_init_mmc(struct alcor_sdmmc_host *host)
14905 + mmc->caps2 = MMC_CAP2_NO_SDIO;
14906 + mmc->ops = &alcor_sdc_ops;
14907 +
14908 +- /* Hardware cannot do scatter lists */
14909 ++ /* The hardware does DMA data transfer of 4096 bytes to/from a single
14910 ++ * buffer address. Scatterlists are not supported, but upon DMA
14911 ++ * completion (signalled via IRQ), the original vendor driver does
14912 ++ * then immediately set up another DMA transfer of the next 4096
14913 ++ * bytes.
14914 ++ *
14915 ++ * This means that we need to handle the I/O in 4096 byte chunks.
14916 ++ * Lacking a way to limit the sglist entries to 4096 bytes, we instead
14917 ++ * impose that only one segment is provided, with maximum size 4096,
14918 ++ * which also happens to be the minimum size. This means that the
14919 ++ * single-entry sglist handled by this driver can be handed directly
14920 ++ * to the hardware, nice and simple.
14921 ++ *
14922 ++ * Unfortunately though, that means we only do 4096 bytes I/O per
14923 ++ * MMC command. A future improvement would be to make the driver
14924 ++ * accept sg lists and entries of any size, and simply iterate
14925 ++ * through them 4096 bytes at a time.
14926 ++ */
14927 + mmc->max_segs = AU6601_MAX_DMA_SEGMENTS;
14928 + mmc->max_seg_size = AU6601_MAX_DMA_BLOCK_SIZE;
14929 +-
14930 +- mmc->max_blk_size = mmc->max_seg_size;
14931 +- mmc->max_blk_count = mmc->max_segs;
14932 +-
14933 +- mmc->max_req_size = mmc->max_seg_size * mmc->max_segs;
14934 ++ mmc->max_req_size = mmc->max_seg_size;
14935 + }
14936 +
14937 + static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
14938 +diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
14939 +index 4d17032d15ee..7b530e5a86da 100644
14940 +--- a/drivers/mmc/host/mxcmmc.c
14941 ++++ b/drivers/mmc/host/mxcmmc.c
14942 +@@ -292,11 +292,8 @@ static void mxcmci_swap_buffers(struct mmc_data *data)
14943 + struct scatterlist *sg;
14944 + int i;
14945 +
14946 +- for_each_sg(data->sg, sg, data->sg_len, i) {
14947 +- void *buf = kmap_atomic(sg_page(sg) + sg->offset);
14948 +- buffer_swap32(buf, sg->length);
14949 +- kunmap_atomic(buf);
14950 +- }
14951 ++ for_each_sg(data->sg, sg, data->sg_len, i)
14952 ++ buffer_swap32(sg_virt(sg), sg->length);
14953 + }
14954 + #else
14955 + static inline void mxcmci_swap_buffers(struct mmc_data *data) {}
14956 +@@ -613,7 +610,6 @@ static int mxcmci_transfer_data(struct mxcmci_host *host)
14957 + {
14958 + struct mmc_data *data = host->req->data;
14959 + struct scatterlist *sg;
14960 +- void *buf;
14961 + int stat, i;
14962 +
14963 + host->data = data;
14964 +@@ -621,18 +617,14 @@ static int mxcmci_transfer_data(struct mxcmci_host *host)
14965 +
14966 + if (data->flags & MMC_DATA_READ) {
14967 + for_each_sg(data->sg, sg, data->sg_len, i) {
14968 +- buf = kmap_atomic(sg_page(sg) + sg->offset);
14969 +- stat = mxcmci_pull(host, buf, sg->length);
14970 +- kunmap(buf);
14971 ++ stat = mxcmci_pull(host, sg_virt(sg), sg->length);
14972 + if (stat)
14973 + return stat;
14974 + host->datasize += sg->length;
14975 + }
14976 + } else {
14977 + for_each_sg(data->sg, sg, data->sg_len, i) {
14978 +- buf = kmap_atomic(sg_page(sg) + sg->offset);
14979 +- stat = mxcmci_push(host, buf, sg->length);
14980 +- kunmap(buf);
14981 ++ stat = mxcmci_push(host, sg_virt(sg), sg->length);
14982 + if (stat)
14983 + return stat;
14984 + host->datasize += sg->length;
14985 +diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
14986 +index c60a7625b1fa..b2873a2432b6 100644
14987 +--- a/drivers/mmc/host/omap.c
14988 ++++ b/drivers/mmc/host/omap.c
14989 +@@ -920,7 +920,7 @@ static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_reques
14990 + reg &= ~(1 << 5);
14991 + OMAP_MMC_WRITE(host, SDIO, reg);
14992 + /* Set maximum timeout */
14993 +- OMAP_MMC_WRITE(host, CTO, 0xff);
14994 ++ OMAP_MMC_WRITE(host, CTO, 0xfd);
14995 + }
14996 +
14997 + static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req)
14998 +diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
14999 +index 8779bbaa6b69..194a81888792 100644
15000 +--- a/drivers/mmc/host/pxamci.c
15001 ++++ b/drivers/mmc/host/pxamci.c
15002 +@@ -162,7 +162,7 @@ static void pxamci_dma_irq(void *param);
15003 + static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
15004 + {
15005 + struct dma_async_tx_descriptor *tx;
15006 +- enum dma_data_direction direction;
15007 ++ enum dma_transfer_direction direction;
15008 + struct dma_slave_config config;
15009 + struct dma_chan *chan;
15010 + unsigned int nob = data->blocks;
15011 +diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
15012 +index 31a351a20dc0..d9be22b310e6 100644
15013 +--- a/drivers/mmc/host/renesas_sdhi_core.c
15014 ++++ b/drivers/mmc/host/renesas_sdhi_core.c
15015 +@@ -634,6 +634,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
15016 + struct renesas_sdhi *priv;
15017 + struct resource *res;
15018 + int irq, ret, i;
15019 ++ u16 ver;
15020 +
15021 + of_data = of_device_get_match_data(&pdev->dev);
15022 +
15023 +@@ -723,6 +724,13 @@ int renesas_sdhi_probe(struct platform_device *pdev,
15024 + host->ops.start_signal_voltage_switch =
15025 + renesas_sdhi_start_signal_voltage_switch;
15026 + host->sdcard_irq_setbit_mask = TMIO_STAT_ALWAYS_SET_27;
15027 ++
15028 ++ /* SDR and HS200/400 registers requires HW reset */
15029 ++ if (of_data && of_data->scc_offset) {
15030 ++ priv->scc_ctl = host->ctl + of_data->scc_offset;
15031 ++ host->mmc->caps |= MMC_CAP_HW_RESET;
15032 ++ host->hw_reset = renesas_sdhi_hw_reset;
15033 ++ }
15034 + }
15035 +
15036 + /* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */
15037 +@@ -759,12 +767,17 @@ int renesas_sdhi_probe(struct platform_device *pdev,
15038 + if (ret)
15039 + goto efree;
15040 +
15041 ++ ver = sd_ctrl_read16(host, CTL_VERSION);
15042 ++ /* GEN2_SDR104 is first known SDHI to use 32bit block count */
15043 ++ if (ver < SDHI_VER_GEN2_SDR104 && mmc_data->max_blk_count > U16_MAX)
15044 ++ mmc_data->max_blk_count = U16_MAX;
15045 ++
15046 + ret = tmio_mmc_host_probe(host);
15047 + if (ret < 0)
15048 + goto edisclk;
15049 +
15050 + /* One Gen2 SDHI incarnation does NOT have a CBSY bit */
15051 +- if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN2_SDR50)
15052 ++ if (ver == SDHI_VER_GEN2_SDR50)
15053 + mmc_data->flags &= ~TMIO_MMC_HAVE_CBSY;
15054 +
15055 + /* Enable tuning iff we have an SCC and a supported mode */
15056 +@@ -775,8 +788,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
15057 + const struct renesas_sdhi_scc *taps = of_data->taps;
15058 + bool hit = false;
15059 +
15060 +- host->mmc->caps |= MMC_CAP_HW_RESET;
15061 +-
15062 + for (i = 0; i < of_data->taps_num; i++) {
15063 + if (taps[i].clk_rate == 0 ||
15064 + taps[i].clk_rate == host->mmc->f_max) {
15065 +@@ -789,12 +800,10 @@ int renesas_sdhi_probe(struct platform_device *pdev,
15066 + if (!hit)
15067 + dev_warn(&host->pdev->dev, "Unknown clock rate for SDR104\n");
15068 +
15069 +- priv->scc_ctl = host->ctl + of_data->scc_offset;
15070 + host->init_tuning = renesas_sdhi_init_tuning;
15071 + host->prepare_tuning = renesas_sdhi_prepare_tuning;
15072 + host->select_tuning = renesas_sdhi_select_tuning;
15073 + host->check_scc_error = renesas_sdhi_check_scc_error;
15074 +- host->hw_reset = renesas_sdhi_hw_reset;
15075 + host->prepare_hs400_tuning =
15076 + renesas_sdhi_prepare_hs400_tuning;
15077 + host->hs400_downgrade = renesas_sdhi_disable_scc;
15078 +diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
15079 +index 00d41b312c79..a6f25c796aed 100644
15080 +--- a/drivers/mmc/host/sdhci-esdhc-imx.c
15081 ++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
15082 +@@ -979,6 +979,7 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
15083 + case MMC_TIMING_UHS_SDR25:
15084 + case MMC_TIMING_UHS_SDR50:
15085 + case MMC_TIMING_UHS_SDR104:
15086 ++ case MMC_TIMING_MMC_HS:
15087 + case MMC_TIMING_MMC_HS200:
15088 + writel(m, host->ioaddr + ESDHC_MIX_CTRL);
15089 + break;
15090 +diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
15091 +index c11c18a9aacb..9ec300ec94ba 100644
15092 +--- a/drivers/mmc/host/sdhci-omap.c
15093 ++++ b/drivers/mmc/host/sdhci-omap.c
15094 +@@ -797,6 +797,43 @@ void sdhci_omap_reset(struct sdhci_host *host, u8 mask)
15095 + sdhci_reset(host, mask);
15096 + }
15097 +
15098 ++#define CMD_ERR_MASK (SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX |\
15099 ++ SDHCI_INT_TIMEOUT)
15100 ++#define CMD_MASK (CMD_ERR_MASK | SDHCI_INT_RESPONSE)
15101 ++
15102 ++static u32 sdhci_omap_irq(struct sdhci_host *host, u32 intmask)
15103 ++{
15104 ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
15105 ++ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
15106 ++
15107 ++ if (omap_host->is_tuning && host->cmd && !host->data_early &&
15108 ++ (intmask & CMD_ERR_MASK)) {
15109 ++
15110 ++ /*
15111 ++ * Since we are not resetting data lines during tuning
15112 ++ * operation, data error or data complete interrupts
15113 ++ * might still arrive. Mark this request as a failure
15114 ++ * but still wait for the data interrupt
15115 ++ */
15116 ++ if (intmask & SDHCI_INT_TIMEOUT)
15117 ++ host->cmd->error = -ETIMEDOUT;
15118 ++ else
15119 ++ host->cmd->error = -EILSEQ;
15120 ++
15121 ++ host->cmd = NULL;
15122 ++
15123 ++ /*
15124 ++ * Sometimes command error interrupts and command complete
15125 ++ * interrupt will arrive together. Clear all command related
15126 ++ * interrupts here.
15127 ++ */
15128 ++ sdhci_writel(host, intmask & CMD_MASK, SDHCI_INT_STATUS);
15129 ++ intmask &= ~CMD_MASK;
15130 ++ }
15131 ++
15132 ++ return intmask;
15133 ++}
15134 ++
15135 + static struct sdhci_ops sdhci_omap_ops = {
15136 + .set_clock = sdhci_omap_set_clock,
15137 + .set_power = sdhci_omap_set_power,
15138 +@@ -807,6 +844,7 @@ static struct sdhci_ops sdhci_omap_ops = {
15139 + .platform_send_init_74_clocks = sdhci_omap_init_74_clocks,
15140 + .reset = sdhci_omap_reset,
15141 + .set_uhs_signaling = sdhci_omap_set_uhs_signaling,
15142 ++ .irq = sdhci_omap_irq,
15143 + };
15144 +
15145 + static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host)
15146 +diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
15147 +index 21bf8ac78380..390e896dadc7 100644
15148 +--- a/drivers/net/Kconfig
15149 ++++ b/drivers/net/Kconfig
15150 +@@ -213,8 +213,8 @@ config GENEVE
15151 +
15152 + config GTP
15153 + tristate "GPRS Tunneling Protocol datapath (GTP-U)"
15154 +- depends on INET && NET_UDP_TUNNEL
15155 +- select NET_IP_TUNNEL
15156 ++ depends on INET
15157 ++ select NET_UDP_TUNNEL
15158 + ---help---
15159 + This allows one to create gtp virtual interfaces that provide
15160 + the GPRS Tunneling Protocol datapath (GTP-U). This tunneling protocol
15161 +diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
15162 +index ddc1f9ca8ebc..4543ac97f077 100644
15163 +--- a/drivers/net/dsa/lantiq_gswip.c
15164 ++++ b/drivers/net/dsa/lantiq_gswip.c
15165 +@@ -1069,10 +1069,10 @@ static int gswip_probe(struct platform_device *pdev)
15166 + version = gswip_switch_r(priv, GSWIP_VERSION);
15167 +
15168 + /* bring up the mdio bus */
15169 +- gphy_fw_np = of_find_compatible_node(pdev->dev.of_node, NULL,
15170 +- "lantiq,gphy-fw");
15171 ++ gphy_fw_np = of_get_compatible_child(dev->of_node, "lantiq,gphy-fw");
15172 + if (gphy_fw_np) {
15173 + err = gswip_gphy_fw_list(priv, gphy_fw_np, version);
15174 ++ of_node_put(gphy_fw_np);
15175 + if (err) {
15176 + dev_err(dev, "gphy fw probe failed\n");
15177 + return err;
15178 +@@ -1080,13 +1080,12 @@ static int gswip_probe(struct platform_device *pdev)
15179 + }
15180 +
15181 + /* bring up the mdio bus */
15182 +- mdio_np = of_find_compatible_node(pdev->dev.of_node, NULL,
15183 +- "lantiq,xrx200-mdio");
15184 ++ mdio_np = of_get_compatible_child(dev->of_node, "lantiq,xrx200-mdio");
15185 + if (mdio_np) {
15186 + err = gswip_mdio(priv, mdio_np);
15187 + if (err) {
15188 + dev_err(dev, "mdio probe failed\n");
15189 +- goto gphy_fw;
15190 ++ goto put_mdio_node;
15191 + }
15192 + }
15193 +
15194 +@@ -1099,7 +1098,7 @@ static int gswip_probe(struct platform_device *pdev)
15195 + dev_err(dev, "wrong CPU port defined, HW only supports port: %i",
15196 + priv->hw_info->cpu_port);
15197 + err = -EINVAL;
15198 +- goto mdio_bus;
15199 ++ goto disable_switch;
15200 + }
15201 +
15202 + platform_set_drvdata(pdev, priv);
15203 +@@ -1109,10 +1108,14 @@ static int gswip_probe(struct platform_device *pdev)
15204 + (version & GSWIP_VERSION_MOD_MASK) >> GSWIP_VERSION_MOD_SHIFT);
15205 + return 0;
15206 +
15207 ++disable_switch:
15208 ++ gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
15209 ++ dsa_unregister_switch(priv->ds);
15210 + mdio_bus:
15211 + if (mdio_np)
15212 + mdiobus_unregister(priv->ds->slave_mii_bus);
15213 +-gphy_fw:
15214 ++put_mdio_node:
15215 ++ of_node_put(mdio_np);
15216 + for (i = 0; i < priv->num_gphy_fw; i++)
15217 + gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
15218 + return err;
15219 +@@ -1131,8 +1134,10 @@ static int gswip_remove(struct platform_device *pdev)
15220 +
15221 + dsa_unregister_switch(priv->ds);
15222 +
15223 +- if (priv->ds->slave_mii_bus)
15224 ++ if (priv->ds->slave_mii_bus) {
15225 + mdiobus_unregister(priv->ds->slave_mii_bus);
15226 ++ of_node_put(priv->ds->slave_mii_bus->dev.of_node);
15227 ++ }
15228 +
15229 + for (i = 0; i < priv->num_gphy_fw; i++)
15230 + gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
15231 +diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
15232 +index 7e3c00bd9532..6cba05a80892 100644
15233 +--- a/drivers/net/dsa/mv88e6xxx/chip.c
15234 ++++ b/drivers/net/dsa/mv88e6xxx/chip.c
15235 +@@ -442,12 +442,20 @@ out_mapping:
15236 +
15237 + static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip)
15238 + {
15239 ++ static struct lock_class_key lock_key;
15240 ++ static struct lock_class_key request_key;
15241 + int err;
15242 +
15243 + err = mv88e6xxx_g1_irq_setup_common(chip);
15244 + if (err)
15245 + return err;
15246 +
15247 ++ /* These lock classes tells lockdep that global 1 irqs are in
15248 ++ * a different category than their parent GPIO, so it won't
15249 ++ * report false recursion.
15250 ++ */
15251 ++ irq_set_lockdep_class(chip->irq, &lock_key, &request_key);
15252 ++
15253 + err = request_threaded_irq(chip->irq, NULL,
15254 + mv88e6xxx_g1_irq_thread_fn,
15255 + IRQF_ONESHOT | IRQF_SHARED,
15256 +@@ -559,6 +567,9 @@ static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port,
15257 + goto restore_link;
15258 + }
15259 +
15260 ++ if (speed == SPEED_MAX && chip->info->ops->port_max_speed_mode)
15261 ++ mode = chip->info->ops->port_max_speed_mode(port);
15262 ++
15263 + if (chip->info->ops->port_set_pause) {
15264 + err = chip->info->ops->port_set_pause(chip, port, pause);
15265 + if (err)
15266 +@@ -3042,6 +3053,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
15267 + .port_set_duplex = mv88e6xxx_port_set_duplex,
15268 + .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
15269 + .port_set_speed = mv88e6341_port_set_speed,
15270 ++ .port_max_speed_mode = mv88e6341_port_max_speed_mode,
15271 + .port_tag_remap = mv88e6095_port_tag_remap,
15272 + .port_set_frame_mode = mv88e6351_port_set_frame_mode,
15273 + .port_set_egress_floods = mv88e6352_port_set_egress_floods,
15274 +@@ -3360,6 +3372,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
15275 + .port_set_duplex = mv88e6xxx_port_set_duplex,
15276 + .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
15277 + .port_set_speed = mv88e6390_port_set_speed,
15278 ++ .port_max_speed_mode = mv88e6390_port_max_speed_mode,
15279 + .port_tag_remap = mv88e6390_port_tag_remap,
15280 + .port_set_frame_mode = mv88e6351_port_set_frame_mode,
15281 + .port_set_egress_floods = mv88e6352_port_set_egress_floods,
15282 +@@ -3404,6 +3417,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
15283 + .port_set_duplex = mv88e6xxx_port_set_duplex,
15284 + .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
15285 + .port_set_speed = mv88e6390x_port_set_speed,
15286 ++ .port_max_speed_mode = mv88e6390x_port_max_speed_mode,
15287 + .port_tag_remap = mv88e6390_port_tag_remap,
15288 + .port_set_frame_mode = mv88e6351_port_set_frame_mode,
15289 + .port_set_egress_floods = mv88e6352_port_set_egress_floods,
15290 +@@ -3448,6 +3462,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
15291 + .port_set_duplex = mv88e6xxx_port_set_duplex,
15292 + .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
15293 + .port_set_speed = mv88e6390_port_set_speed,
15294 ++ .port_max_speed_mode = mv88e6390_port_max_speed_mode,
15295 + .port_tag_remap = mv88e6390_port_tag_remap,
15296 + .port_set_frame_mode = mv88e6351_port_set_frame_mode,
15297 + .port_set_egress_floods = mv88e6352_port_set_egress_floods,
15298 +@@ -3541,6 +3556,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
15299 + .port_set_duplex = mv88e6xxx_port_set_duplex,
15300 + .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
15301 + .port_set_speed = mv88e6390_port_set_speed,
15302 ++ .port_max_speed_mode = mv88e6390_port_max_speed_mode,
15303 + .port_tag_remap = mv88e6390_port_tag_remap,
15304 + .port_set_frame_mode = mv88e6351_port_set_frame_mode,
15305 + .port_set_egress_floods = mv88e6352_port_set_egress_floods,
15306 +@@ -3672,6 +3688,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
15307 + .port_set_duplex = mv88e6xxx_port_set_duplex,
15308 + .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
15309 + .port_set_speed = mv88e6341_port_set_speed,
15310 ++ .port_max_speed_mode = mv88e6341_port_max_speed_mode,
15311 + .port_tag_remap = mv88e6095_port_tag_remap,
15312 + .port_set_frame_mode = mv88e6351_port_set_frame_mode,
15313 + .port_set_egress_floods = mv88e6352_port_set_egress_floods,
15314 +@@ -3847,6 +3864,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
15315 + .port_set_duplex = mv88e6xxx_port_set_duplex,
15316 + .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
15317 + .port_set_speed = mv88e6390_port_set_speed,
15318 ++ .port_max_speed_mode = mv88e6390_port_max_speed_mode,
15319 + .port_tag_remap = mv88e6390_port_tag_remap,
15320 + .port_set_frame_mode = mv88e6351_port_set_frame_mode,
15321 + .port_set_egress_floods = mv88e6352_port_set_egress_floods,
15322 +@@ -3895,6 +3913,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
15323 + .port_set_duplex = mv88e6xxx_port_set_duplex,
15324 + .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
15325 + .port_set_speed = mv88e6390x_port_set_speed,
15326 ++ .port_max_speed_mode = mv88e6390x_port_max_speed_mode,
15327 + .port_tag_remap = mv88e6390_port_tag_remap,
15328 + .port_set_frame_mode = mv88e6351_port_set_frame_mode,
15329 + .port_set_egress_floods = mv88e6352_port_set_egress_floods,
15330 +@@ -4222,7 +4241,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
15331 + .name = "Marvell 88E6190",
15332 + .num_databases = 4096,
15333 + .num_ports = 11, /* 10 + Z80 */
15334 +- .num_internal_phys = 11,
15335 ++ .num_internal_phys = 9,
15336 + .num_gpio = 16,
15337 + .max_vid = 8191,
15338 + .port_base_addr = 0x0,
15339 +@@ -4245,7 +4264,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
15340 + .name = "Marvell 88E6190X",
15341 + .num_databases = 4096,
15342 + .num_ports = 11, /* 10 + Z80 */
15343 +- .num_internal_phys = 11,
15344 ++ .num_internal_phys = 9,
15345 + .num_gpio = 16,
15346 + .max_vid = 8191,
15347 + .port_base_addr = 0x0,
15348 +@@ -4268,7 +4287,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
15349 + .name = "Marvell 88E6191",
15350 + .num_databases = 4096,
15351 + .num_ports = 11, /* 10 + Z80 */
15352 +- .num_internal_phys = 11,
15353 ++ .num_internal_phys = 9,
15354 + .max_vid = 8191,
15355 + .port_base_addr = 0x0,
15356 + .phy_base_addr = 0x0,
15357 +@@ -4315,7 +4334,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
15358 + .name = "Marvell 88E6290",
15359 + .num_databases = 4096,
15360 + .num_ports = 11, /* 10 + Z80 */
15361 +- .num_internal_phys = 11,
15362 ++ .num_internal_phys = 9,
15363 + .num_gpio = 16,
15364 + .max_vid = 8191,
15365 + .port_base_addr = 0x0,
15366 +@@ -4477,7 +4496,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
15367 + .name = "Marvell 88E6390",
15368 + .num_databases = 4096,
15369 + .num_ports = 11, /* 10 + Z80 */
15370 +- .num_internal_phys = 11,
15371 ++ .num_internal_phys = 9,
15372 + .num_gpio = 16,
15373 + .max_vid = 8191,
15374 + .port_base_addr = 0x0,
15375 +@@ -4500,7 +4519,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
15376 + .name = "Marvell 88E6390X",
15377 + .num_databases = 4096,
15378 + .num_ports = 11, /* 10 + Z80 */
15379 +- .num_internal_phys = 11,
15380 ++ .num_internal_phys = 9,
15381 + .num_gpio = 16,
15382 + .max_vid = 8191,
15383 + .port_base_addr = 0x0,
15384 +@@ -4847,6 +4866,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
15385 + if (err)
15386 + goto out;
15387 +
15388 ++ mv88e6xxx_ports_cmode_init(chip);
15389 + mv88e6xxx_phy_init(chip);
15390 +
15391 + if (chip->info->ops->get_eeprom) {
15392 +diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
15393 +index 546651d8c3e1..dfb1af65c205 100644
15394 +--- a/drivers/net/dsa/mv88e6xxx/chip.h
15395 ++++ b/drivers/net/dsa/mv88e6xxx/chip.h
15396 +@@ -377,6 +377,9 @@ struct mv88e6xxx_ops {
15397 + */
15398 + int (*port_set_speed)(struct mv88e6xxx_chip *chip, int port, int speed);
15399 +
15400 ++ /* What interface mode should be used for maximum speed? */
15401 ++ phy_interface_t (*port_max_speed_mode)(int port);
15402 ++
15403 + int (*port_tag_remap)(struct mv88e6xxx_chip *chip, int port);
15404 +
15405 + int (*port_set_frame_mode)(struct mv88e6xxx_chip *chip, int port,
15406 +diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
15407 +index 79ab51e69aee..c44b2822e4dd 100644
15408 +--- a/drivers/net/dsa/mv88e6xxx/port.c
15409 ++++ b/drivers/net/dsa/mv88e6xxx/port.c
15410 +@@ -190,7 +190,7 @@ int mv88e6xxx_port_set_duplex(struct mv88e6xxx_chip *chip, int port, int dup)
15411 + /* normal duplex detection */
15412 + break;
15413 + default:
15414 +- return -EINVAL;
15415 ++ return -EOPNOTSUPP;
15416 + }
15417 +
15418 + err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg);
15419 +@@ -312,6 +312,14 @@ int mv88e6341_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
15420 + return mv88e6xxx_port_set_speed(chip, port, speed, !port, true);
15421 + }
15422 +
15423 ++phy_interface_t mv88e6341_port_max_speed_mode(int port)
15424 ++{
15425 ++ if (port == 5)
15426 ++ return PHY_INTERFACE_MODE_2500BASEX;
15427 ++
15428 ++ return PHY_INTERFACE_MODE_NA;
15429 ++}
15430 ++
15431 + /* Support 10, 100, 200, 1000 Mbps (e.g. 88E6352 family) */
15432 + int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
15433 + {
15434 +@@ -345,6 +353,14 @@ int mv88e6390_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
15435 + return mv88e6xxx_port_set_speed(chip, port, speed, true, true);
15436 + }
15437 +
15438 ++phy_interface_t mv88e6390_port_max_speed_mode(int port)
15439 ++{
15440 ++ if (port == 9 || port == 10)
15441 ++ return PHY_INTERFACE_MODE_2500BASEX;
15442 ++
15443 ++ return PHY_INTERFACE_MODE_NA;
15444 ++}
15445 ++
15446 + /* Support 10, 100, 200, 1000, 2500, 10000 Mbps (e.g. 88E6190X) */
15447 + int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
15448 + {
15449 +@@ -360,6 +376,14 @@ int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
15450 + return mv88e6xxx_port_set_speed(chip, port, speed, true, true);
15451 + }
15452 +
15453 ++phy_interface_t mv88e6390x_port_max_speed_mode(int port)
15454 ++{
15455 ++ if (port == 9 || port == 10)
15456 ++ return PHY_INTERFACE_MODE_XAUI;
15457 ++
15458 ++ return PHY_INTERFACE_MODE_NA;
15459 ++}
15460 ++
15461 + int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
15462 + phy_interface_t mode)
15463 + {
15464 +@@ -403,18 +427,22 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
15465 + return 0;
15466 +
15467 + lane = mv88e6390x_serdes_get_lane(chip, port);
15468 +- if (lane < 0)
15469 ++ if (lane < 0 && lane != -ENODEV)
15470 + return lane;
15471 +
15472 +- if (chip->ports[port].serdes_irq) {
15473 +- err = mv88e6390_serdes_irq_disable(chip, port, lane);
15474 ++ if (lane >= 0) {
15475 ++ if (chip->ports[port].serdes_irq) {
15476 ++ err = mv88e6390_serdes_irq_disable(chip, port, lane);
15477 ++ if (err)
15478 ++ return err;
15479 ++ }
15480 ++
15481 ++ err = mv88e6390x_serdes_power(chip, port, false);
15482 + if (err)
15483 + return err;
15484 + }
15485 +
15486 +- err = mv88e6390x_serdes_power(chip, port, false);
15487 +- if (err)
15488 +- return err;
15489 ++ chip->ports[port].cmode = 0;
15490 +
15491 + if (cmode) {
15492 + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
15493 +@@ -428,6 +456,12 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
15494 + if (err)
15495 + return err;
15496 +
15497 ++ chip->ports[port].cmode = cmode;
15498 ++
15499 ++ lane = mv88e6390x_serdes_get_lane(chip, port);
15500 ++ if (lane < 0)
15501 ++ return lane;
15502 ++
15503 + err = mv88e6390x_serdes_power(chip, port, true);
15504 + if (err)
15505 + return err;
15506 +@@ -439,8 +473,6 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
15507 + }
15508 + }
15509 +
15510 +- chip->ports[port].cmode = cmode;
15511 +-
15512 + return 0;
15513 + }
15514 +
15515 +@@ -448,6 +480,8 @@ int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
15516 + phy_interface_t mode)
15517 + {
15518 + switch (mode) {
15519 ++ case PHY_INTERFACE_MODE_NA:
15520 ++ return 0;
15521 + case PHY_INTERFACE_MODE_XGMII:
15522 + case PHY_INTERFACE_MODE_XAUI:
15523 + case PHY_INTERFACE_MODE_RXAUI:
15524 +diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
15525 +index 4aadf321edb7..c7bed263a0f4 100644
15526 +--- a/drivers/net/dsa/mv88e6xxx/port.h
15527 ++++ b/drivers/net/dsa/mv88e6xxx/port.h
15528 +@@ -285,6 +285,10 @@ int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
15529 + int mv88e6390_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
15530 + int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed);
15531 +
15532 ++phy_interface_t mv88e6341_port_max_speed_mode(int port);
15533 ++phy_interface_t mv88e6390_port_max_speed_mode(int port);
15534 ++phy_interface_t mv88e6390x_port_max_speed_mode(int port);
15535 ++
15536 + int mv88e6xxx_port_set_state(struct mv88e6xxx_chip *chip, int port, u8 state);
15537 +
15538 + int mv88e6xxx_port_set_vlan_map(struct mv88e6xxx_chip *chip, int port, u16 map);
15539 +diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
15540 +index 7e97e620bd44..a26850c888cf 100644
15541 +--- a/drivers/net/dsa/qca8k.c
15542 ++++ b/drivers/net/dsa/qca8k.c
15543 +@@ -620,22 +620,6 @@ qca8k_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phy)
15544 + qca8k_port_set_status(priv, port, 1);
15545 + }
15546 +
15547 +-static int
15548 +-qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum)
15549 +-{
15550 +- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
15551 +-
15552 +- return mdiobus_read(priv->bus, phy, regnum);
15553 +-}
15554 +-
15555 +-static int
15556 +-qca8k_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val)
15557 +-{
15558 +- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
15559 +-
15560 +- return mdiobus_write(priv->bus, phy, regnum, val);
15561 +-}
15562 +-
15563 + static void
15564 + qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
15565 + {
15566 +@@ -876,8 +860,6 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
15567 + .setup = qca8k_setup,
15568 + .adjust_link = qca8k_adjust_link,
15569 + .get_strings = qca8k_get_strings,
15570 +- .phy_read = qca8k_phy_read,
15571 +- .phy_write = qca8k_phy_write,
15572 + .get_ethtool_stats = qca8k_get_ethtool_stats,
15573 + .get_sset_count = qca8k_get_sset_count,
15574 + .get_mac_eee = qca8k_get_mac_eee,
15575 +diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
15576 +index 342ae08ec3c2..d60a86aa8aa8 100644
15577 +--- a/drivers/net/ethernet/8390/mac8390.c
15578 ++++ b/drivers/net/ethernet/8390/mac8390.c
15579 +@@ -153,8 +153,6 @@ static void dayna_block_input(struct net_device *dev, int count,
15580 + static void dayna_block_output(struct net_device *dev, int count,
15581 + const unsigned char *buf, int start_page);
15582 +
15583 +-#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c))
15584 +-
15585 + /* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
15586 + static void slow_sane_get_8390_hdr(struct net_device *dev,
15587 + struct e8390_pkt_hdr *hdr, int ring_page);
15588 +@@ -233,19 +231,26 @@ static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres)
15589 +
15590 + static enum mac8390_access mac8390_testio(unsigned long membase)
15591 + {
15592 +- unsigned long outdata = 0xA5A0B5B0;
15593 +- unsigned long indata = 0x00000000;
15594 ++ u32 outdata = 0xA5A0B5B0;
15595 ++ u32 indata = 0;
15596 ++
15597 + /* Try writing 32 bits */
15598 +- memcpy_toio((void __iomem *)membase, &outdata, 4);
15599 +- /* Now compare them */
15600 +- if (memcmp_withio(&outdata, membase, 4) == 0)
15601 ++ nubus_writel(outdata, membase);
15602 ++ /* Now read it back */
15603 ++ indata = nubus_readl(membase);
15604 ++ if (outdata == indata)
15605 + return ACCESS_32;
15606 ++
15607 ++ outdata = 0xC5C0D5D0;
15608 ++ indata = 0;
15609 ++
15610 + /* Write 16 bit output */
15611 + word_memcpy_tocard(membase, &outdata, 4);
15612 + /* Now read it back */
15613 + word_memcpy_fromcard(&indata, membase, 4);
15614 + if (outdata == indata)
15615 + return ACCESS_16;
15616 ++
15617 + return ACCESS_UNKNOWN;
15618 + }
15619 +
15620 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
15621 +index 74550ccc7a20..e2ffb159cbe2 100644
15622 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
15623 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
15624 +@@ -186,11 +186,12 @@ static void aq_rx_checksum(struct aq_ring_s *self,
15625 + }
15626 + if (buff->is_ip_cso) {
15627 + __skb_incr_checksum_unnecessary(skb);
15628 +- if (buff->is_udp_cso || buff->is_tcp_cso)
15629 +- __skb_incr_checksum_unnecessary(skb);
15630 + } else {
15631 + skb->ip_summed = CHECKSUM_NONE;
15632 + }
15633 ++
15634 ++ if (buff->is_udp_cso || buff->is_tcp_cso)
15635 ++ __skb_incr_checksum_unnecessary(skb);
15636 + }
15637 +
15638 + #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
15639 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
15640 +index 803f7990d32b..40ca339ec3df 100644
15641 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
15642 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
15643 +@@ -1129,6 +1129,8 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
15644 + tpa_info = &rxr->rx_tpa[agg_id];
15645 +
15646 + if (unlikely(cons != rxr->rx_next_cons)) {
15647 ++ netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n",
15648 ++ cons, rxr->rx_next_cons);
15649 + bnxt_sched_reset(bp, rxr);
15650 + return;
15651 + }
15652 +@@ -1581,15 +1583,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
15653 + }
15654 +
15655 + cons = rxcmp->rx_cmp_opaque;
15656 +- rx_buf = &rxr->rx_buf_ring[cons];
15657 +- data = rx_buf->data;
15658 +- data_ptr = rx_buf->data_ptr;
15659 + if (unlikely(cons != rxr->rx_next_cons)) {
15660 + int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
15661 +
15662 ++ netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
15663 ++ cons, rxr->rx_next_cons);
15664 + bnxt_sched_reset(bp, rxr);
15665 + return rc1;
15666 + }
15667 ++ rx_buf = &rxr->rx_buf_ring[cons];
15668 ++ data = rx_buf->data;
15669 ++ data_ptr = rx_buf->data_ptr;
15670 + prefetch(data_ptr);
15671 +
15672 + misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
15673 +@@ -1606,11 +1610,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
15674 +
15675 + rx_buf->data = NULL;
15676 + if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
15677 ++ u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
15678 ++
15679 + bnxt_reuse_rx_data(rxr, cons, data);
15680 + if (agg_bufs)
15681 + bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
15682 +
15683 + rc = -EIO;
15684 ++ if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
15685 ++ netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
15686 ++ bnxt_sched_reset(bp, rxr);
15687 ++ }
15688 + goto next_rx;
15689 + }
15690 +
15691 +diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
15692 +index 503cfadff4ac..d4ee9f9c8c34 100644
15693 +--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
15694 ++++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
15695 +@@ -1328,10 +1328,11 @@ int nicvf_stop(struct net_device *netdev)
15696 + struct nicvf_cq_poll *cq_poll = NULL;
15697 + union nic_mbx mbx = {};
15698 +
15699 +- cancel_delayed_work_sync(&nic->link_change_work);
15700 +-
15701 + /* wait till all queued set_rx_mode tasks completes */
15702 +- drain_workqueue(nic->nicvf_rx_mode_wq);
15703 ++ if (nic->nicvf_rx_mode_wq) {
15704 ++ cancel_delayed_work_sync(&nic->link_change_work);
15705 ++ drain_workqueue(nic->nicvf_rx_mode_wq);
15706 ++ }
15707 +
15708 + mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
15709 + nicvf_send_msg_to_pf(nic, &mbx);
15710 +@@ -1452,7 +1453,8 @@ int nicvf_open(struct net_device *netdev)
15711 + struct nicvf_cq_poll *cq_poll = NULL;
15712 +
15713 + /* wait till all queued set_rx_mode tasks completes if any */
15714 +- drain_workqueue(nic->nicvf_rx_mode_wq);
15715 ++ if (nic->nicvf_rx_mode_wq)
15716 ++ drain_workqueue(nic->nicvf_rx_mode_wq);
15717 +
15718 + netif_carrier_off(netdev);
15719 +
15720 +@@ -1550,10 +1552,12 @@ int nicvf_open(struct net_device *netdev)
15721 + /* Send VF config done msg to PF */
15722 + nicvf_send_cfg_done(nic);
15723 +
15724 +- INIT_DELAYED_WORK(&nic->link_change_work,
15725 +- nicvf_link_status_check_task);
15726 +- queue_delayed_work(nic->nicvf_rx_mode_wq,
15727 +- &nic->link_change_work, 0);
15728 ++ if (nic->nicvf_rx_mode_wq) {
15729 ++ INIT_DELAYED_WORK(&nic->link_change_work,
15730 ++ nicvf_link_status_check_task);
15731 ++ queue_delayed_work(nic->nicvf_rx_mode_wq,
15732 ++ &nic->link_change_work, 0);
15733 ++ }
15734 +
15735 + return 0;
15736 + cleanup:
15737 +diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
15738 +index 5b4d3badcb73..e246f9733bb8 100644
15739 +--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
15740 ++++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
15741 +@@ -105,20 +105,19 @@ static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic,
15742 + /* Check if page can be recycled */
15743 + if (page) {
15744 + ref_count = page_ref_count(page);
15745 +- /* Check if this page has been used once i.e 'put_page'
15746 +- * called after packet transmission i.e internal ref_count
15747 +- * and page's ref_count are equal i.e page can be recycled.
15748 ++ /* This page can be recycled if internal ref_count and page's
15749 ++ * ref_count are equal, indicating that the page has been used
15750 ++ * once for packet transmission. For non-XDP mode, internal
15751 ++ * ref_count is always '1'.
15752 + */
15753 +- if (rbdr->is_xdp && (ref_count == pgcache->ref_count))
15754 +- pgcache->ref_count--;
15755 +- else
15756 +- page = NULL;
15757 +-
15758 +- /* In non-XDP mode, page's ref_count needs to be '1' for it
15759 +- * to be recycled.
15760 +- */
15761 +- if (!rbdr->is_xdp && (ref_count != 1))
15762 ++ if (rbdr->is_xdp) {
15763 ++ if (ref_count == pgcache->ref_count)
15764 ++ pgcache->ref_count--;
15765 ++ else
15766 ++ page = NULL;
15767 ++ } else if (ref_count != 1) {
15768 + page = NULL;
15769 ++ }
15770 + }
15771 +
15772 + if (!page) {
15773 +@@ -365,11 +364,10 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
15774 + while (head < rbdr->pgcnt) {
15775 + pgcache = &rbdr->pgcache[head];
15776 + if (pgcache->page && page_ref_count(pgcache->page) != 0) {
15777 +- if (!rbdr->is_xdp) {
15778 +- put_page(pgcache->page);
15779 +- continue;
15780 ++ if (rbdr->is_xdp) {
15781 ++ page_ref_sub(pgcache->page,
15782 ++ pgcache->ref_count - 1);
15783 + }
15784 +- page_ref_sub(pgcache->page, pgcache->ref_count - 1);
15785 + put_page(pgcache->page);
15786 + }
15787 + head++;
15788 +diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
15789 +index 9a7f70db20c7..733d9172425b 100644
15790 +--- a/drivers/net/ethernet/cisco/enic/enic_main.c
15791 ++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
15792 +@@ -119,7 +119,7 @@ static void enic_init_affinity_hint(struct enic *enic)
15793 +
15794 + for (i = 0; i < enic->intr_count; i++) {
15795 + if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i) ||
15796 +- (enic->msix[i].affinity_mask &&
15797 ++ (cpumask_available(enic->msix[i].affinity_mask) &&
15798 + !cpumask_empty(enic->msix[i].affinity_mask)))
15799 + continue;
15800 + if (zalloc_cpumask_var(&enic->msix[i].affinity_mask,
15801 +@@ -148,7 +148,7 @@ static void enic_set_affinity_hint(struct enic *enic)
15802 + for (i = 0; i < enic->intr_count; i++) {
15803 + if (enic_is_err_intr(enic, i) ||
15804 + enic_is_notify_intr(enic, i) ||
15805 +- !enic->msix[i].affinity_mask ||
15806 ++ !cpumask_available(enic->msix[i].affinity_mask) ||
15807 + cpumask_empty(enic->msix[i].affinity_mask))
15808 + continue;
15809 + err = irq_set_affinity_hint(enic->msix_entry[i].vector,
15810 +@@ -161,7 +161,7 @@ static void enic_set_affinity_hint(struct enic *enic)
15811 + for (i = 0; i < enic->wq_count; i++) {
15812 + int wq_intr = enic_msix_wq_intr(enic, i);
15813 +
15814 +- if (enic->msix[wq_intr].affinity_mask &&
15815 ++ if (cpumask_available(enic->msix[wq_intr].affinity_mask) &&
15816 + !cpumask_empty(enic->msix[wq_intr].affinity_mask))
15817 + netif_set_xps_queue(enic->netdev,
15818 + enic->msix[wq_intr].affinity_mask,
15819 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
15820 +index 36eab37d8a40..09c774fe8853 100644
15821 +--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
15822 ++++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
15823 +@@ -192,6 +192,7 @@ struct hnae3_ae_dev {
15824 + const struct hnae3_ae_ops *ops;
15825 + struct list_head node;
15826 + u32 flag;
15827 ++ u8 override_pci_need_reset; /* fix to stop multiple reset happening */
15828 + enum hnae3_dev_type dev_type;
15829 + enum hnae3_reset_type reset_type;
15830 + void *priv;
15831 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
15832 +index 1bf7a5f116a0..d84c50068f66 100644
15833 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
15834 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
15835 +@@ -1852,7 +1852,9 @@ static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
15836 +
15837 + /* request the reset */
15838 + if (ae_dev->ops->reset_event) {
15839 +- ae_dev->ops->reset_event(pdev, NULL);
15840 ++ if (!ae_dev->override_pci_need_reset)
15841 ++ ae_dev->ops->reset_event(pdev, NULL);
15842 ++
15843 + return PCI_ERS_RESULT_RECOVERED;
15844 + }
15845 +
15846 +@@ -2476,6 +2478,8 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
15847 + desc = &ring->desc[ring->next_to_clean];
15848 + desc_cb = &ring->desc_cb[ring->next_to_clean];
15849 + bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
15850 ++ /* make sure HW write desc complete */
15851 ++ dma_rmb();
15852 + if (!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))
15853 + return -ENXIO;
15854 +
15855 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
15856 +index d0f654123b9b..3ea72e4d9dc4 100644
15857 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
15858 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
15859 +@@ -1094,10 +1094,10 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
15860 + return 0;
15861 + }
15862 +
15863 +-static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
15864 ++static enum hnae3_reset_type
15865 ++hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
15866 + {
15867 +- enum hnae3_reset_type reset_type = HNAE3_FUNC_RESET;
15868 +- struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
15869 ++ enum hnae3_reset_type reset_type = HNAE3_NONE_RESET;
15870 + struct device *dev = &hdev->pdev->dev;
15871 + struct hclge_desc desc[2];
15872 + unsigned int status;
15873 +@@ -1110,17 +1110,20 @@ static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
15874 + if (ret) {
15875 + dev_err(dev, "failed(%d) to query ROCEE RAS INT SRC\n", ret);
15876 + /* reset everything for now */
15877 +- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
15878 +- return ret;
15879 ++ return HNAE3_GLOBAL_RESET;
15880 + }
15881 +
15882 + status = le32_to_cpu(desc[0].data[0]);
15883 +
15884 +- if (status & HCLGE_ROCEE_RERR_INT_MASK)
15885 ++ if (status & HCLGE_ROCEE_RERR_INT_MASK) {
15886 + dev_warn(dev, "ROCEE RAS AXI rresp error\n");
15887 ++ reset_type = HNAE3_FUNC_RESET;
15888 ++ }
15889 +
15890 +- if (status & HCLGE_ROCEE_BERR_INT_MASK)
15891 ++ if (status & HCLGE_ROCEE_BERR_INT_MASK) {
15892 + dev_warn(dev, "ROCEE RAS AXI bresp error\n");
15893 ++ reset_type = HNAE3_FUNC_RESET;
15894 ++ }
15895 +
15896 + if (status & HCLGE_ROCEE_ECC_INT_MASK) {
15897 + dev_warn(dev, "ROCEE RAS 2bit ECC error\n");
15898 +@@ -1132,9 +1135,9 @@ static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
15899 + if (ret) {
15900 + dev_err(dev, "failed(%d) to process ovf error\n", ret);
15901 + /* reset everything for now */
15902 +- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
15903 +- return ret;
15904 ++ return HNAE3_GLOBAL_RESET;
15905 + }
15906 ++ reset_type = HNAE3_FUNC_RESET;
15907 + }
15908 +
15909 + /* clear error status */
15910 +@@ -1143,12 +1146,10 @@ static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
15911 + if (ret) {
15912 + dev_err(dev, "failed(%d) to clear ROCEE RAS error\n", ret);
15913 + /* reset everything for now */
15914 +- reset_type = HNAE3_GLOBAL_RESET;
15915 ++ return HNAE3_GLOBAL_RESET;
15916 + }
15917 +
15918 +- HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type);
15919 +-
15920 +- return ret;
15921 ++ return reset_type;
15922 + }
15923 +
15924 + static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
15925 +@@ -1178,15 +1179,18 @@ static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
15926 + return ret;
15927 + }
15928 +
15929 +-static int hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
15930 ++static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
15931 + {
15932 ++ enum hnae3_reset_type reset_type = HNAE3_NONE_RESET;
15933 + struct hclge_dev *hdev = ae_dev->priv;
15934 +
15935 + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
15936 + hdev->pdev->revision < 0x21)
15937 +- return HNAE3_NONE_RESET;
15938 ++ return;
15939 +
15940 +- return hclge_log_and_clear_rocee_ras_error(hdev);
15941 ++ reset_type = hclge_log_and_clear_rocee_ras_error(hdev);
15942 ++ if (reset_type != HNAE3_NONE_RESET)
15943 ++ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type);
15944 + }
15945 +
15946 + static const struct hclge_hw_blk hw_blk[] = {
15947 +@@ -1259,8 +1263,10 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
15948 + hclge_handle_all_ras_errors(hdev);
15949 + } else {
15950 + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
15951 +- hdev->pdev->revision < 0x21)
15952 ++ hdev->pdev->revision < 0x21) {
15953 ++ ae_dev->override_pci_need_reset = 1;
15954 + return PCI_ERS_RESULT_RECOVERED;
15955 ++ }
15956 + }
15957 +
15958 + if (status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
15959 +@@ -1269,8 +1275,11 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
15960 + }
15961 +
15962 + if (status & HCLGE_RAS_REG_NFE_MASK ||
15963 +- status & HCLGE_RAS_REG_ROCEE_ERR_MASK)
15964 ++ status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
15965 ++ ae_dev->override_pci_need_reset = 0;
15966 + return PCI_ERS_RESULT_NEED_RESET;
15967 ++ }
15968 ++ ae_dev->override_pci_need_reset = 1;
15969 +
15970 + return PCI_ERS_RESULT_RECOVERED;
15971 + }
15972 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
15973 +index 5ecbb1adcf3b..51cfe95f3e24 100644
15974 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
15975 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
15976 +@@ -1885,6 +1885,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
15977 + */
15978 + adapter->state = VNIC_PROBED;
15979 +
15980 ++ reinit_completion(&adapter->init_done);
15981 + rc = init_crq_queue(adapter);
15982 + if (rc) {
15983 + netdev_err(adapter->netdev,
15984 +@@ -4625,7 +4626,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
15985 + old_num_rx_queues = adapter->req_rx_queues;
15986 + old_num_tx_queues = adapter->req_tx_queues;
15987 +
15988 +- init_completion(&adapter->init_done);
15989 ++ reinit_completion(&adapter->init_done);
15990 + adapter->init_done_rc = 0;
15991 + ibmvnic_send_crq_init(adapter);
15992 + if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
15993 +@@ -4680,7 +4681,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
15994 +
15995 + adapter->from_passive_init = false;
15996 +
15997 +- init_completion(&adapter->init_done);
15998 + adapter->init_done_rc = 0;
15999 + ibmvnic_send_crq_init(adapter);
16000 + if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
16001 +@@ -4759,6 +4759,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
16002 + INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
16003 + INIT_LIST_HEAD(&adapter->rwi_list);
16004 + spin_lock_init(&adapter->rwi_lock);
16005 ++ init_completion(&adapter->init_done);
16006 + adapter->resetting = false;
16007 +
16008 + adapter->mac_change_pending = false;
16009 +diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
16010 +index 189f231075c2..7acc61e4f645 100644
16011 +--- a/drivers/net/ethernet/intel/e1000e/netdev.c
16012 ++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
16013 +@@ -2106,7 +2106,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
16014 + if (strlen(netdev->name) < (IFNAMSIZ - 5))
16015 + snprintf(adapter->rx_ring->name,
16016 + sizeof(adapter->rx_ring->name) - 1,
16017 +- "%s-rx-0", netdev->name);
16018 ++ "%.14s-rx-0", netdev->name);
16019 + else
16020 + memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
16021 + err = request_irq(adapter->msix_entries[vector].vector,
16022 +@@ -2122,7 +2122,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
16023 + if (strlen(netdev->name) < (IFNAMSIZ - 5))
16024 + snprintf(adapter->tx_ring->name,
16025 + sizeof(adapter->tx_ring->name) - 1,
16026 +- "%s-tx-0", netdev->name);
16027 ++ "%.14s-tx-0", netdev->name);
16028 + else
16029 + memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
16030 + err = request_irq(adapter->msix_entries[vector].vector,
16031 +@@ -5309,8 +5309,13 @@ static void e1000_watchdog_task(struct work_struct *work)
16032 + /* 8000ES2LAN requires a Rx packet buffer work-around
16033 + * on link down event; reset the controller to flush
16034 + * the Rx packet buffer.
16035 ++ *
16036 ++ * If the link is lost the controller stops DMA, but
16037 ++ * if there is queued Tx work it cannot be done. So
16038 ++ * reset the controller to flush the Tx packet buffers.
16039 + */
16040 +- if (adapter->flags & FLAG_RX_NEEDS_RESTART)
16041 ++ if ((adapter->flags & FLAG_RX_NEEDS_RESTART) ||
16042 ++ e1000_desc_unused(tx_ring) + 1 < tx_ring->count)
16043 + adapter->flags |= FLAG_RESTART_NOW;
16044 + else
16045 + pm_schedule_suspend(netdev->dev.parent,
16046 +@@ -5333,14 +5338,6 @@ link_up:
16047 + adapter->gotc_old = adapter->stats.gotc;
16048 + spin_unlock(&adapter->stats64_lock);
16049 +
16050 +- /* If the link is lost the controller stops DMA, but
16051 +- * if there is queued Tx work it cannot be done. So
16052 +- * reset the controller to flush the Tx packet buffers.
16053 +- */
16054 +- if (!netif_carrier_ok(netdev) &&
16055 +- (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
16056 +- adapter->flags |= FLAG_RESTART_NOW;
16057 +-
16058 + /* If reset is necessary, do it outside of interrupt context. */
16059 + if (adapter->flags & FLAG_RESTART_NOW) {
16060 + schedule_work(&adapter->reset_task);
16061 +@@ -7351,6 +7348,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
16062 +
16063 + e1000_print_device_info(adapter);
16064 +
16065 ++ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
16066 ++
16067 + if (pci_dev_run_wake(pdev))
16068 + pm_runtime_put_noidle(&pdev->dev);
16069 +
16070 +diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
16071 +index 2e5693107fa4..8d602247eb44 100644
16072 +--- a/drivers/net/ethernet/intel/ice/ice_switch.c
16073 ++++ b/drivers/net/ethernet/intel/ice/ice_switch.c
16074 +@@ -1538,9 +1538,20 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
16075 + } else if (!list_elem->vsi_list_info) {
16076 + status = ICE_ERR_DOES_NOT_EXIST;
16077 + goto exit;
16078 ++ } else if (list_elem->vsi_list_info->ref_cnt > 1) {
16079 ++ /* a ref_cnt > 1 indicates that the vsi_list is being
16080 ++ * shared by multiple rules. Decrement the ref_cnt and
16081 ++ * remove this rule, but do not modify the list, as it
16082 ++ * is in-use by other rules.
16083 ++ */
16084 ++ list_elem->vsi_list_info->ref_cnt--;
16085 ++ remove_rule = true;
16086 + } else {
16087 +- if (list_elem->vsi_list_info->ref_cnt > 1)
16088 +- list_elem->vsi_list_info->ref_cnt--;
16089 ++ /* a ref_cnt of 1 indicates the vsi_list is only used
16090 ++ * by one rule. However, the original removal request is only
16091 ++ * for a single VSI. Update the vsi_list first, and only
16092 ++ * remove the rule if there are no further VSIs in this list.
16093 ++ */
16094 + vsi_handle = f_entry->fltr_info.vsi_handle;
16095 + status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
16096 + if (status)
16097 +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
16098 +index 16066c2d5b3a..931beac3359d 100644
16099 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
16100 ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
16101 +@@ -1380,13 +1380,9 @@ static void mvpp2_port_reset(struct mvpp2_port *port)
16102 + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
16103 + mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
16104 +
16105 +- val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
16106 +- ~MVPP2_GMAC_PORT_RESET_MASK;
16107 ++ val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) |
16108 ++ MVPP2_GMAC_PORT_RESET_MASK;
16109 + writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
16110 +-
16111 +- while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
16112 +- MVPP2_GMAC_PORT_RESET_MASK)
16113 +- continue;
16114 + }
16115 +
16116 + /* Change maximum receive size of the port */
16117 +@@ -4543,12 +4539,15 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
16118 + const struct phylink_link_state *state)
16119 + {
16120 + u32 an, ctrl0, ctrl2, ctrl4;
16121 ++ u32 old_ctrl2;
16122 +
16123 + an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
16124 + ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
16125 + ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
16126 + ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
16127 +
16128 ++ old_ctrl2 = ctrl2;
16129 ++
16130 + /* Force link down */
16131 + an &= ~MVPP2_GMAC_FORCE_LINK_PASS;
16132 + an |= MVPP2_GMAC_FORCE_LINK_DOWN;
16133 +@@ -4621,6 +4620,12 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
16134 + writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
16135 + writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
16136 + writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
16137 ++
16138 ++ if (old_ctrl2 & MVPP2_GMAC_PORT_RESET_MASK) {
16139 ++ while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
16140 ++ MVPP2_GMAC_PORT_RESET_MASK)
16141 ++ continue;
16142 ++ }
16143 + }
16144 +
16145 + static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
16146 +diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
16147 +index 57727fe1501e..8b3495ee2b6e 100644
16148 +--- a/drivers/net/ethernet/marvell/sky2.c
16149 ++++ b/drivers/net/ethernet/marvell/sky2.c
16150 +@@ -46,6 +46,7 @@
16151 + #include <linux/mii.h>
16152 + #include <linux/of_device.h>
16153 + #include <linux/of_net.h>
16154 ++#include <linux/dmi.h>
16155 +
16156 + #include <asm/irq.h>
16157 +
16158 +@@ -93,7 +94,7 @@ static int copybreak __read_mostly = 128;
16159 + module_param(copybreak, int, 0);
16160 + MODULE_PARM_DESC(copybreak, "Receive copy threshold");
16161 +
16162 +-static int disable_msi = 0;
16163 ++static int disable_msi = -1;
16164 + module_param(disable_msi, int, 0);
16165 + MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
16166 +
16167 +@@ -4917,6 +4918,24 @@ static const char *sky2_name(u8 chipid, char *buf, int sz)
16168 + return buf;
16169 + }
16170 +
16171 ++static const struct dmi_system_id msi_blacklist[] = {
16172 ++ {
16173 ++ .ident = "Dell Inspiron 1545",
16174 ++ .matches = {
16175 ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
16176 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1545"),
16177 ++ },
16178 ++ },
16179 ++ {
16180 ++ .ident = "Gateway P-79",
16181 ++ .matches = {
16182 ++ DMI_MATCH(DMI_SYS_VENDOR, "Gateway"),
16183 ++ DMI_MATCH(DMI_PRODUCT_NAME, "P-79"),
16184 ++ },
16185 ++ },
16186 ++ {}
16187 ++};
16188 ++
16189 + static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
16190 + {
16191 + struct net_device *dev, *dev1;
16192 +@@ -5028,6 +5047,9 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
16193 + goto err_out_free_pci;
16194 + }
16195 +
16196 ++ if (disable_msi == -1)
16197 ++ disable_msi = !!dmi_check_system(msi_blacklist);
16198 ++
16199 + if (!disable_msi && pci_enable_msi(pdev) == 0) {
16200 + err = sky2_test_msi(hw);
16201 + if (err) {
16202 +diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
16203 +index e65bc3c95630..857588e2488d 100644
16204 +--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
16205 ++++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
16206 +@@ -2645,6 +2645,8 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
16207 + if (!priv->cmd.context)
16208 + return -ENOMEM;
16209 +
16210 ++ if (mlx4_is_mfunc(dev))
16211 ++ mutex_lock(&priv->cmd.slave_cmd_mutex);
16212 + down_write(&priv->cmd.switch_sem);
16213 + for (i = 0; i < priv->cmd.max_cmds; ++i) {
16214 + priv->cmd.context[i].token = i;
16215 +@@ -2670,6 +2672,8 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
16216 + down(&priv->cmd.poll_sem);
16217 + priv->cmd.use_events = 1;
16218 + up_write(&priv->cmd.switch_sem);
16219 ++ if (mlx4_is_mfunc(dev))
16220 ++ mutex_unlock(&priv->cmd.slave_cmd_mutex);
16221 +
16222 + return err;
16223 + }
16224 +@@ -2682,6 +2686,8 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
16225 + struct mlx4_priv *priv = mlx4_priv(dev);
16226 + int i;
16227 +
16228 ++ if (mlx4_is_mfunc(dev))
16229 ++ mutex_lock(&priv->cmd.slave_cmd_mutex);
16230 + down_write(&priv->cmd.switch_sem);
16231 + priv->cmd.use_events = 0;
16232 +
16233 +@@ -2689,9 +2695,12 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
16234 + down(&priv->cmd.event_sem);
16235 +
16236 + kfree(priv->cmd.context);
16237 ++ priv->cmd.context = NULL;
16238 +
16239 + up(&priv->cmd.poll_sem);
16240 + up_write(&priv->cmd.switch_sem);
16241 ++ if (mlx4_is_mfunc(dev))
16242 ++ mutex_unlock(&priv->cmd.slave_cmd_mutex);
16243 + }
16244 +
16245 + struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
16246 +diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
16247 +index eb13d3618162..4356f3a58002 100644
16248 +--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
16249 ++++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
16250 +@@ -2719,13 +2719,13 @@ static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
16251 + int total_pages;
16252 + int total_mem;
16253 + int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
16254 ++ int tot;
16255 +
16256 + sq_size = 1 << (log_sq_size + log_sq_sride + 4);
16257 + rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
16258 + total_mem = sq_size + rq_size;
16259 +- total_pages =
16260 +- roundup_pow_of_two((total_mem + (page_offset << 6)) >>
16261 +- page_shift);
16262 ++ tot = (total_mem + (page_offset << 6)) >> page_shift;
16263 ++ total_pages = !tot ? 1 : roundup_pow_of_two(tot);
16264 +
16265 + return total_pages;
16266 + }
16267 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
16268 +index eac245a93f91..4ab0d030b544 100644
16269 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
16270 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
16271 +@@ -122,7 +122,9 @@ out:
16272 + return err;
16273 + }
16274 +
16275 +-/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */
16276 ++/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B])
16277 ++ * minimum speed value is 40Gbps
16278 ++ */
16279 + static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
16280 + {
16281 + u32 speed;
16282 +@@ -130,10 +132,9 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
16283 + int err;
16284 +
16285 + err = mlx5e_port_linkspeed(priv->mdev, &speed);
16286 +- if (err) {
16287 +- mlx5_core_warn(priv->mdev, "cannot get port speed\n");
16288 +- return 0;
16289 +- }
16290 ++ if (err)
16291 ++ speed = SPEED_40000;
16292 ++ speed = max_t(u32, speed, SPEED_40000);
16293 +
16294 + xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
16295 +
16296 +@@ -142,7 +143,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
16297 + }
16298 +
16299 + static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
16300 +- u32 xoff, unsigned int mtu)
16301 ++ u32 xoff, unsigned int max_mtu)
16302 + {
16303 + int i;
16304 +
16305 +@@ -154,11 +155,12 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
16306 + }
16307 +
16308 + if (port_buffer->buffer[i].size <
16309 +- (xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
16310 ++ (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
16311 + return -ENOMEM;
16312 +
16313 + port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
16314 +- port_buffer->buffer[i].xon = port_buffer->buffer[i].xoff - mtu;
16315 ++ port_buffer->buffer[i].xon =
16316 ++ port_buffer->buffer[i].xoff - max_mtu;
16317 + }
16318 +
16319 + return 0;
16320 +@@ -166,7 +168,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
16321 +
16322 + /**
16323 + * update_buffer_lossy()
16324 +- * mtu: device's MTU
16325 ++ * max_mtu: netdev's max_mtu
16326 + * pfc_en: <input> current pfc configuration
16327 + * buffer: <input> current prio to buffer mapping
16328 + * xoff: <input> xoff value
16329 +@@ -183,7 +185,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
16330 + * Return 0 if no error.
16331 + * Set change to true if buffer configuration is modified.
16332 + */
16333 +-static int update_buffer_lossy(unsigned int mtu,
16334 ++static int update_buffer_lossy(unsigned int max_mtu,
16335 + u8 pfc_en, u8 *buffer, u32 xoff,
16336 + struct mlx5e_port_buffer *port_buffer,
16337 + bool *change)
16338 +@@ -220,7 +222,7 @@ static int update_buffer_lossy(unsigned int mtu,
16339 + }
16340 +
16341 + if (changed) {
16342 +- err = update_xoff_threshold(port_buffer, xoff, mtu);
16343 ++ err = update_xoff_threshold(port_buffer, xoff, max_mtu);
16344 + if (err)
16345 + return err;
16346 +
16347 +@@ -230,6 +232,7 @@ static int update_buffer_lossy(unsigned int mtu,
16348 + return 0;
16349 + }
16350 +
16351 ++#define MINIMUM_MAX_MTU 9216
16352 + int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
16353 + u32 change, unsigned int mtu,
16354 + struct ieee_pfc *pfc,
16355 +@@ -241,12 +244,14 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
16356 + bool update_prio2buffer = false;
16357 + u8 buffer[MLX5E_MAX_PRIORITY];
16358 + bool update_buffer = false;
16359 ++ unsigned int max_mtu;
16360 + u32 total_used = 0;
16361 + u8 curr_pfc_en;
16362 + int err;
16363 + int i;
16364 +
16365 + mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change);
16366 ++ max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU);
16367 +
16368 + err = mlx5e_port_query_buffer(priv, &port_buffer);
16369 + if (err)
16370 +@@ -254,7 +259,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
16371 +
16372 + if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
16373 + update_buffer = true;
16374 +- err = update_xoff_threshold(&port_buffer, xoff, mtu);
16375 ++ err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
16376 + if (err)
16377 + return err;
16378 + }
16379 +@@ -264,7 +269,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
16380 + if (err)
16381 + return err;
16382 +
16383 +- err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff,
16384 ++ err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff,
16385 + &port_buffer, &update_buffer);
16386 + if (err)
16387 + return err;
16388 +@@ -276,8 +281,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
16389 + if (err)
16390 + return err;
16391 +
16392 +- err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff,
16393 +- &port_buffer, &update_buffer);
16394 ++ err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer,
16395 ++ xoff, &port_buffer, &update_buffer);
16396 + if (err)
16397 + return err;
16398 + }
16399 +@@ -301,7 +306,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
16400 + return -EINVAL;
16401 +
16402 + update_buffer = true;
16403 +- err = update_xoff_threshold(&port_buffer, xoff, mtu);
16404 ++ err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
16405 + if (err)
16406 + return err;
16407 + }
16408 +@@ -309,7 +314,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
16409 + /* Need to update buffer configuration if xoff value is changed */
16410 + if (!update_buffer && xoff != priv->dcbx.xoff) {
16411 + update_buffer = true;
16412 +- err = update_xoff_threshold(&port_buffer, xoff, mtu);
16413 ++ err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
16414 + if (err)
16415 + return err;
16416 + }
16417 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
16418 +index 3078491cc0d0..1539cf3de5dc 100644
16419 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
16420 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
16421 +@@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
16422 + if (err)
16423 + return err;
16424 +
16425 ++ mutex_lock(&mdev->mlx5e_res.td.list_lock);
16426 + list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
16427 ++ mutex_unlock(&mdev->mlx5e_res.td.list_lock);
16428 +
16429 + return 0;
16430 + }
16431 +@@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
16432 + void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
16433 + struct mlx5e_tir *tir)
16434 + {
16435 ++ mutex_lock(&mdev->mlx5e_res.td.list_lock);
16436 + mlx5_core_destroy_tir(mdev, tir->tirn);
16437 + list_del(&tir->list);
16438 ++ mutex_unlock(&mdev->mlx5e_res.td.list_lock);
16439 + }
16440 +
16441 + static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
16442 +@@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
16443 + }
16444 +
16445 + INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
16446 ++ mutex_init(&mdev->mlx5e_res.td.list_lock);
16447 +
16448 + return 0;
16449 +
16450 +@@ -141,15 +146,17 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
16451 + {
16452 + struct mlx5_core_dev *mdev = priv->mdev;
16453 + struct mlx5e_tir *tir;
16454 +- int err = -ENOMEM;
16455 ++ int err = 0;
16456 + u32 tirn = 0;
16457 + int inlen;
16458 + void *in;
16459 +
16460 + inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
16461 + in = kvzalloc(inlen, GFP_KERNEL);
16462 +- if (!in)
16463 ++ if (!in) {
16464 ++ err = -ENOMEM;
16465 + goto out;
16466 ++ }
16467 +
16468 + if (enable_uc_lb)
16469 + MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
16470 +@@ -157,6 +164,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
16471 +
16472 + MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
16473 +
16474 ++ mutex_lock(&mdev->mlx5e_res.td.list_lock);
16475 + list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
16476 + tirn = tir->tirn;
16477 + err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
16478 +@@ -168,6 +176,7 @@ out:
16479 + kvfree(in);
16480 + if (err)
16481 + netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
16482 ++ mutex_unlock(&mdev->mlx5e_res.td.list_lock);
16483 +
16484 + return err;
16485 + }
16486 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
16487 +index 47233b9a4f81..e6099f51d25f 100644
16488 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
16489 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
16490 +@@ -357,6 +357,9 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
16491 +
16492 + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
16493 + priv->channels.params = new_channels.params;
16494 ++ if (!netif_is_rxfh_configured(priv->netdev))
16495 ++ mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
16496 ++ MLX5E_INDIR_RQT_SIZE, count);
16497 + goto out;
16498 + }
16499 +
16500 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
16501 +index 5b492b67f4e1..13c48883ed61 100644
16502 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
16503 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
16504 +@@ -1812,7 +1812,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
16505 + u64 node_guid;
16506 + int err = 0;
16507 +
16508 +- if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
16509 ++ if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
16510 + return -EPERM;
16511 + if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
16512 + return -EINVAL;
16513 +@@ -1886,7 +1886,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
16514 + {
16515 + struct mlx5_vport *evport;
16516 +
16517 +- if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
16518 ++ if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
16519 + return -EPERM;
16520 + if (!LEGAL_VPORT(esw, vport))
16521 + return -EINVAL;
16522 +@@ -2059,19 +2059,24 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
16523 + int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
16524 + u32 max_rate, u32 min_rate)
16525 + {
16526 +- u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
16527 +- bool min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
16528 +- fw_max_bw_share >= MLX5_MIN_BW_SHARE;
16529 +- bool max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
16530 + struct mlx5_vport *evport;
16531 ++ u32 fw_max_bw_share;
16532 + u32 previous_min_rate;
16533 + u32 divider;
16534 ++ bool min_rate_supported;
16535 ++ bool max_rate_supported;
16536 + int err = 0;
16537 +
16538 + if (!ESW_ALLOWED(esw))
16539 + return -EPERM;
16540 + if (!LEGAL_VPORT(esw, vport))
16541 + return -EINVAL;
16542 ++
16543 ++ fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
16544 ++ min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
16545 ++ fw_max_bw_share >= MLX5_MIN_BW_SHARE;
16546 ++ max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
16547 ++
16548 + if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported))
16549 + return -EOPNOTSUPP;
16550 +
16551 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
16552 +index 5cf5f2a9d51f..8de64e88c670 100644
16553 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
16554 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
16555 +@@ -217,15 +217,21 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
16556 + void *cmd;
16557 + int ret;
16558 +
16559 ++ rcu_read_lock();
16560 ++ flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
16561 ++ rcu_read_unlock();
16562 ++
16563 ++ if (!flow) {
16564 ++ WARN_ONCE(1, "Received NULL pointer for handle\n");
16565 ++ return -EINVAL;
16566 ++ }
16567 ++
16568 + buf = kzalloc(size, GFP_ATOMIC);
16569 + if (!buf)
16570 + return -ENOMEM;
16571 +
16572 + cmd = (buf + 1);
16573 +
16574 +- rcu_read_lock();
16575 +- flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
16576 +- rcu_read_unlock();
16577 + mlx5_fpga_tls_flow_to_cmd(flow, cmd);
16578 +
16579 + MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
16580 +@@ -238,6 +244,8 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
16581 + buf->complete = mlx_tls_kfree_complete;
16582 +
16583 + ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
16584 ++ if (ret < 0)
16585 ++ kfree(buf);
16586 +
16587 + return ret;
16588 + }
16589 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
16590 +index be81b319b0dc..694edd899322 100644
16591 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
16592 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
16593 +@@ -163,26 +163,6 @@ static struct mlx5_profile profile[] = {
16594 + .size = 8,
16595 + .limit = 4
16596 + },
16597 +- .mr_cache[16] = {
16598 +- .size = 8,
16599 +- .limit = 4
16600 +- },
16601 +- .mr_cache[17] = {
16602 +- .size = 8,
16603 +- .limit = 4
16604 +- },
16605 +- .mr_cache[18] = {
16606 +- .size = 8,
16607 +- .limit = 4
16608 +- },
16609 +- .mr_cache[19] = {
16610 +- .size = 4,
16611 +- .limit = 2
16612 +- },
16613 +- .mr_cache[20] = {
16614 +- .size = 4,
16615 +- .limit = 2
16616 +- },
16617 + },
16618 + };
16619 +
16620 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
16621 +index 370ca94b6775..c7c2920c05c4 100644
16622 +--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
16623 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
16624 +@@ -40,6 +40,9 @@
16625 + #include "mlx5_core.h"
16626 + #include "lib/eq.h"
16627 +
16628 ++static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
16629 ++ struct mlx5_core_dct *dct);
16630 ++
16631 + static struct mlx5_core_rsc_common *
16632 + mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
16633 + {
16634 +@@ -227,13 +230,42 @@ static void destroy_resource_common(struct mlx5_core_dev *dev,
16635 + wait_for_completion(&qp->common.free);
16636 + }
16637 +
16638 ++static int _mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
16639 ++ struct mlx5_core_dct *dct, bool need_cleanup)
16640 ++{
16641 ++ u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
16642 ++ u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
16643 ++ struct mlx5_core_qp *qp = &dct->mqp;
16644 ++ int err;
16645 ++
16646 ++ err = mlx5_core_drain_dct(dev, dct);
16647 ++ if (err) {
16648 ++ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
16649 ++ goto destroy;
16650 ++ } else {
16651 ++ mlx5_core_warn(
16652 ++ dev, "failed drain DCT 0x%x with error 0x%x\n",
16653 ++ qp->qpn, err);
16654 ++ return err;
16655 ++ }
16656 ++ }
16657 ++ wait_for_completion(&dct->drained);
16658 ++destroy:
16659 ++ if (need_cleanup)
16660 ++ destroy_resource_common(dev, &dct->mqp);
16661 ++ MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
16662 ++ MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
16663 ++ MLX5_SET(destroy_dct_in, in, uid, qp->uid);
16664 ++ err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
16665 ++ (void *)&out, sizeof(out));
16666 ++ return err;
16667 ++}
16668 ++
16669 + int mlx5_core_create_dct(struct mlx5_core_dev *dev,
16670 + struct mlx5_core_dct *dct,
16671 + u32 *in, int inlen)
16672 + {
16673 + u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0};
16674 +- u32 din[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
16675 +- u32 dout[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
16676 + struct mlx5_core_qp *qp = &dct->mqp;
16677 + int err;
16678 +
16679 +@@ -254,11 +286,7 @@ int mlx5_core_create_dct(struct mlx5_core_dev *dev,
16680 +
16681 + return 0;
16682 + err_cmd:
16683 +- MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
16684 +- MLX5_SET(destroy_dct_in, din, dctn, qp->qpn);
16685 +- MLX5_SET(destroy_dct_in, din, uid, qp->uid);
16686 +- mlx5_cmd_exec(dev, (void *)&in, sizeof(din),
16687 +- (void *)&out, sizeof(dout));
16688 ++ _mlx5_core_destroy_dct(dev, dct, false);
16689 + return err;
16690 + }
16691 + EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
16692 +@@ -323,29 +351,7 @@ static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
16693 + int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
16694 + struct mlx5_core_dct *dct)
16695 + {
16696 +- u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
16697 +- u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
16698 +- struct mlx5_core_qp *qp = &dct->mqp;
16699 +- int err;
16700 +-
16701 +- err = mlx5_core_drain_dct(dev, dct);
16702 +- if (err) {
16703 +- if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
16704 +- goto destroy;
16705 +- } else {
16706 +- mlx5_core_warn(dev, "failed drain DCT 0x%x with error 0x%x\n", qp->qpn, err);
16707 +- return err;
16708 +- }
16709 +- }
16710 +- wait_for_completion(&dct->drained);
16711 +-destroy:
16712 +- destroy_resource_common(dev, &dct->mqp);
16713 +- MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
16714 +- MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
16715 +- MLX5_SET(destroy_dct_in, in, uid, qp->uid);
16716 +- err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
16717 +- (void *)&out, sizeof(out));
16718 +- return err;
16719 ++ return _mlx5_core_destroy_dct(dev, dct, true);
16720 + }
16721 + EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
16722 +
16723 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
16724 +index b65e274b02e9..cbdee5164be7 100644
16725 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
16726 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
16727 +@@ -2105,7 +2105,7 @@ static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
16728 + int i;
16729 +
16730 + for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
16731 +- snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
16732 ++ snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
16733 + mlxsw_sp_port_hw_prio_stats[i].str, prio);
16734 + *p += ETH_GSTRING_LEN;
16735 + }
16736 +@@ -2116,7 +2116,7 @@ static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
16737 + int i;
16738 +
16739 + for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
16740 +- snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
16741 ++ snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
16742 + mlxsw_sp_port_hw_tc_stats[i].str, tc);
16743 + *p += ETH_GSTRING_LEN;
16744 + }
16745 +diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
16746 +index 4d1b4a24907f..13e6bf13ac4d 100644
16747 +--- a/drivers/net/ethernet/microchip/lan743x_main.c
16748 ++++ b/drivers/net/ethernet/microchip/lan743x_main.c
16749 +@@ -585,8 +585,7 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
16750 +
16751 + if (adapter->csr.flags &
16752 + LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
16753 +- flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR |
16754 +- LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
16755 ++ flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
16756 + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
16757 + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
16758 + LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
16759 +@@ -599,12 +598,6 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
16760 + /* map TX interrupt to vector */
16761 + int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector);
16762 + lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1);
16763 +- if (flags &
16764 +- LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) {
16765 +- int_vec_en_auto_clr |= INT_VEC_EN_(vector);
16766 +- lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR,
16767 +- int_vec_en_auto_clr);
16768 +- }
16769 +
16770 + /* Remove TX interrupt from shared mask */
16771 + intr->vector_list[0].int_mask &= ~int_bit;
16772 +@@ -1902,7 +1895,17 @@ static int lan743x_rx_next_index(struct lan743x_rx *rx, int index)
16773 + return ((++index) % rx->ring_size);
16774 + }
16775 +
16776 +-static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index)
16777 ++static struct sk_buff *lan743x_rx_allocate_skb(struct lan743x_rx *rx)
16778 ++{
16779 ++ int length = 0;
16780 ++
16781 ++ length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
16782 ++ return __netdev_alloc_skb(rx->adapter->netdev,
16783 ++ length, GFP_ATOMIC | GFP_DMA);
16784 ++}
16785 ++
16786 ++static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
16787 ++ struct sk_buff *skb)
16788 + {
16789 + struct lan743x_rx_buffer_info *buffer_info;
16790 + struct lan743x_rx_descriptor *descriptor;
16791 +@@ -1911,9 +1914,7 @@ static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index)
16792 + length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
16793 + descriptor = &rx->ring_cpu_ptr[index];
16794 + buffer_info = &rx->buffer_info[index];
16795 +- buffer_info->skb = __netdev_alloc_skb(rx->adapter->netdev,
16796 +- length,
16797 +- GFP_ATOMIC | GFP_DMA);
16798 ++ buffer_info->skb = skb;
16799 + if (!(buffer_info->skb))
16800 + return -ENOMEM;
16801 + buffer_info->dma_ptr = dma_map_single(&rx->adapter->pdev->dev,
16802 +@@ -2060,8 +2061,19 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
16803 + /* packet is available */
16804 + if (first_index == last_index) {
16805 + /* single buffer packet */
16806 ++ struct sk_buff *new_skb = NULL;
16807 + int packet_length;
16808 +
16809 ++ new_skb = lan743x_rx_allocate_skb(rx);
16810 ++ if (!new_skb) {
16811 ++ /* failed to allocate next skb.
16812 ++ * Memory is very low.
16813 ++ * Drop this packet and reuse buffer.
16814 ++ */
16815 ++ lan743x_rx_reuse_ring_element(rx, first_index);
16816 ++ goto process_extension;
16817 ++ }
16818 ++
16819 + buffer_info = &rx->buffer_info[first_index];
16820 + skb = buffer_info->skb;
16821 + descriptor = &rx->ring_cpu_ptr[first_index];
16822 +@@ -2081,7 +2093,7 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
16823 + skb_put(skb, packet_length - 4);
16824 + skb->protocol = eth_type_trans(skb,
16825 + rx->adapter->netdev);
16826 +- lan743x_rx_allocate_ring_element(rx, first_index);
16827 ++ lan743x_rx_init_ring_element(rx, first_index, new_skb);
16828 + } else {
16829 + int index = first_index;
16830 +
16831 +@@ -2094,26 +2106,23 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
16832 + if (first_index <= last_index) {
16833 + while ((index >= first_index) &&
16834 + (index <= last_index)) {
16835 +- lan743x_rx_release_ring_element(rx,
16836 +- index);
16837 +- lan743x_rx_allocate_ring_element(rx,
16838 +- index);
16839 ++ lan743x_rx_reuse_ring_element(rx,
16840 ++ index);
16841 + index = lan743x_rx_next_index(rx,
16842 + index);
16843 + }
16844 + } else {
16845 + while ((index >= first_index) ||
16846 + (index <= last_index)) {
16847 +- lan743x_rx_release_ring_element(rx,
16848 +- index);
16849 +- lan743x_rx_allocate_ring_element(rx,
16850 +- index);
16851 ++ lan743x_rx_reuse_ring_element(rx,
16852 ++ index);
16853 + index = lan743x_rx_next_index(rx,
16854 + index);
16855 + }
16856 + }
16857 + }
16858 +
16859 ++process_extension:
16860 + if (extension_index >= 0) {
16861 + descriptor = &rx->ring_cpu_ptr[extension_index];
16862 + buffer_info = &rx->buffer_info[extension_index];
16863 +@@ -2290,7 +2299,9 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
16864 +
16865 + rx->last_head = 0;
16866 + for (index = 0; index < rx->ring_size; index++) {
16867 +- ret = lan743x_rx_allocate_ring_element(rx, index);
16868 ++ struct sk_buff *new_skb = lan743x_rx_allocate_skb(rx);
16869 ++
16870 ++ ret = lan743x_rx_init_ring_element(rx, index, new_skb);
16871 + if (ret)
16872 + goto cleanup;
16873 + }
16874 +diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
16875 +index ca3ea2fbfcd0..80d87798c62b 100644
16876 +--- a/drivers/net/ethernet/mscc/ocelot_board.c
16877 ++++ b/drivers/net/ethernet/mscc/ocelot_board.c
16878 +@@ -267,6 +267,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
16879 + struct phy *serdes;
16880 + void __iomem *regs;
16881 + char res_name[8];
16882 ++ int phy_mode;
16883 + u32 port;
16884 +
16885 + if (of_property_read_u32(portnp, "reg", &port))
16886 +@@ -292,11 +293,11 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
16887 + if (err)
16888 + return err;
16889 +
16890 +- err = of_get_phy_mode(portnp);
16891 +- if (err < 0)
16892 ++ phy_mode = of_get_phy_mode(portnp);
16893 ++ if (phy_mode < 0)
16894 + ocelot->ports[port]->phy_mode = PHY_INTERFACE_MODE_NA;
16895 + else
16896 +- ocelot->ports[port]->phy_mode = err;
16897 ++ ocelot->ports[port]->phy_mode = phy_mode;
16898 +
16899 + switch (ocelot->ports[port]->phy_mode) {
16900 + case PHY_INTERFACE_MODE_NA:
16901 +@@ -304,6 +305,13 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
16902 + case PHY_INTERFACE_MODE_SGMII:
16903 + break;
16904 + case PHY_INTERFACE_MODE_QSGMII:
16905 ++ /* Ensure clock signals and speed is set on all
16906 ++ * QSGMII links
16907 ++ */
16908 ++ ocelot_port_writel(ocelot->ports[port],
16909 ++ DEV_CLOCK_CFG_LINK_SPEED
16910 ++ (OCELOT_SPEED_1000),
16911 ++ DEV_CLOCK_CFG);
16912 + break;
16913 + default:
16914 + dev_err(ocelot->dev,
16915 +diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
16916 +index 69d7aebda09b..73db94e55fd0 100644
16917 +--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
16918 ++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
16919 +@@ -196,7 +196,7 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
16920 + ret = dev_queue_xmit(skb);
16921 + nfp_repr_inc_tx_stats(netdev, len, ret);
16922 +
16923 +- return ret;
16924 ++ return NETDEV_TX_OK;
16925 + }
16926 +
16927 + static int nfp_repr_stop(struct net_device *netdev)
16928 +@@ -384,7 +384,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
16929 + netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
16930 + netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
16931 +
16932 +- netdev->priv_flags |= IFF_NO_QUEUE;
16933 ++ netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
16934 + netdev->features |= NETIF_F_LLTX;
16935 +
16936 + if (nfp_app_has_tc(app)) {
16937 +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
16938 +index 6e36b88ca7c9..365cddbfc684 100644
16939 +--- a/drivers/net/ethernet/realtek/r8169.c
16940 ++++ b/drivers/net/ethernet/realtek/r8169.c
16941 +@@ -28,6 +28,7 @@
16942 + #include <linux/pm_runtime.h>
16943 + #include <linux/firmware.h>
16944 + #include <linux/prefetch.h>
16945 ++#include <linux/pci-aspm.h>
16946 + #include <linux/ipv6.h>
16947 + #include <net/ip6_checksum.h>
16948 +
16949 +@@ -5332,7 +5333,7 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
16950 + tp->cp_cmd |= PktCntrDisable | INTT_1;
16951 + RTL_W16(tp, CPlusCmd, tp->cp_cmd);
16952 +
16953 +- RTL_W16(tp, IntrMitigate, 0x5151);
16954 ++ RTL_W16(tp, IntrMitigate, 0x5100);
16955 +
16956 + /* Work around for RxFIFO overflow. */
16957 + if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
16958 +@@ -6435,7 +6436,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
16959 + set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
16960 + }
16961 +
16962 +- if (status & RTL_EVENT_NAPI) {
16963 ++ if (status & (RTL_EVENT_NAPI | LinkChg)) {
16964 + rtl_irq_disable(tp);
16965 + napi_schedule_irqoff(&tp->napi);
16966 + }
16967 +@@ -7224,6 +7225,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
16968 + return rc;
16969 + }
16970 +
16971 ++ /* Disable ASPM completely as that cause random device stop working
16972 ++ * problems as well as full system hangs for some PCIe devices users.
16973 ++ */
16974 ++ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
16975 ++
16976 + /* enable device (incl. PCI PM wakeup and hotplug setup) */
16977 + rc = pcim_enable_device(pdev);
16978 + if (rc < 0) {
16979 +diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
16980 +index d28c8f9ca55b..8154b38c08f7 100644
16981 +--- a/drivers/net/ethernet/renesas/ravb_main.c
16982 ++++ b/drivers/net/ethernet/renesas/ravb_main.c
16983 +@@ -458,7 +458,7 @@ static int ravb_dmac_init(struct net_device *ndev)
16984 + RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
16985 +
16986 + /* Set FIFO size */
16987 +- ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC);
16988 ++ ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC);
16989 +
16990 + /* Timestamp enable */
16991 + ravb_write(ndev, TCCR_TFEN, TCCR);
16992 +diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
16993 +index d8c5bc412219..c0c75c111abb 100644
16994 +--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
16995 ++++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
16996 +@@ -111,10 +111,11 @@ static unsigned int is_jumbo_frm(int len, int enh_desc)
16997 +
16998 + static void refill_desc3(void *priv_ptr, struct dma_desc *p)
16999 + {
17000 +- struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
17001 ++ struct stmmac_rx_queue *rx_q = priv_ptr;
17002 ++ struct stmmac_priv *priv = rx_q->priv_data;
17003 +
17004 + /* Fill DES3 in case of RING mode */
17005 +- if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
17006 ++ if (priv->dma_buf_sz == BUF_SIZE_16KiB)
17007 + p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
17008 + }
17009 +
17010 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
17011 +index 685d20472358..019ab99e65bb 100644
17012 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
17013 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
17014 +@@ -474,7 +474,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
17015 + struct dma_desc *p, struct sk_buff *skb)
17016 + {
17017 + struct skb_shared_hwtstamps shhwtstamp;
17018 +- u64 ns;
17019 ++ u64 ns = 0;
17020 +
17021 + if (!priv->hwts_tx_en)
17022 + return;
17023 +@@ -513,7 +513,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
17024 + {
17025 + struct skb_shared_hwtstamps *shhwtstamp = NULL;
17026 + struct dma_desc *desc = p;
17027 +- u64 ns;
17028 ++ u64 ns = 0;
17029 +
17030 + if (!priv->hwts_rx_en)
17031 + return;
17032 +@@ -558,8 +558,8 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
17033 + u32 snap_type_sel = 0;
17034 + u32 ts_master_en = 0;
17035 + u32 ts_event_en = 0;
17036 ++ u32 sec_inc = 0;
17037 + u32 value = 0;
17038 +- u32 sec_inc;
17039 + bool xmac;
17040 +
17041 + xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
17042 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
17043 +index 2293e21f789f..cc60b3fb0892 100644
17044 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
17045 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
17046 +@@ -105,7 +105,7 @@ static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
17047 + struct stmmac_priv *priv =
17048 + container_of(ptp, struct stmmac_priv, ptp_clock_ops);
17049 + unsigned long flags;
17050 +- u64 ns;
17051 ++ u64 ns = 0;
17052 +
17053 + spin_lock_irqsave(&priv->ptp_lock, flags);
17054 + stmmac_get_systime(priv, priv->ptpaddr, &ns);
17055 +diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
17056 +index e859ae2e42d5..49f41b64077b 100644
17057 +--- a/drivers/net/hyperv/hyperv_net.h
17058 ++++ b/drivers/net/hyperv/hyperv_net.h
17059 +@@ -987,6 +987,7 @@ struct netvsc_device {
17060 +
17061 + wait_queue_head_t wait_drain;
17062 + bool destroy;
17063 ++ bool tx_disable; /* if true, do not wake up queue again */
17064 +
17065 + /* Receive buffer allocated by us but manages by NetVSP */
17066 + void *recv_buf;
17067 +diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
17068 +index 813d195bbd57..e0dce373cdd9 100644
17069 +--- a/drivers/net/hyperv/netvsc.c
17070 ++++ b/drivers/net/hyperv/netvsc.c
17071 +@@ -110,6 +110,7 @@ static struct netvsc_device *alloc_net_device(void)
17072 +
17073 + init_waitqueue_head(&net_device->wait_drain);
17074 + net_device->destroy = false;
17075 ++ net_device->tx_disable = false;
17076 +
17077 + net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
17078 + net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
17079 +@@ -719,7 +720,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
17080 + } else {
17081 + struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
17082 +
17083 +- if (netif_tx_queue_stopped(txq) &&
17084 ++ if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
17085 + (hv_get_avail_to_write_percent(&channel->outbound) >
17086 + RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
17087 + netif_tx_wake_queue(txq);
17088 +@@ -874,7 +875,8 @@ static inline int netvsc_send_pkt(
17089 + } else if (ret == -EAGAIN) {
17090 + netif_tx_stop_queue(txq);
17091 + ndev_ctx->eth_stats.stop_queue++;
17092 +- if (atomic_read(&nvchan->queue_sends) < 1) {
17093 ++ if (atomic_read(&nvchan->queue_sends) < 1 &&
17094 ++ !net_device->tx_disable) {
17095 + netif_tx_wake_queue(txq);
17096 + ndev_ctx->eth_stats.wake_queue++;
17097 + ret = -ENOSPC;
17098 +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
17099 +index cf4897043e83..b20fb0fb595b 100644
17100 +--- a/drivers/net/hyperv/netvsc_drv.c
17101 ++++ b/drivers/net/hyperv/netvsc_drv.c
17102 +@@ -109,6 +109,15 @@ static void netvsc_set_rx_mode(struct net_device *net)
17103 + rcu_read_unlock();
17104 + }
17105 +
17106 ++static void netvsc_tx_enable(struct netvsc_device *nvscdev,
17107 ++ struct net_device *ndev)
17108 ++{
17109 ++ nvscdev->tx_disable = false;
17110 ++ virt_wmb(); /* ensure queue wake up mechanism is on */
17111 ++
17112 ++ netif_tx_wake_all_queues(ndev);
17113 ++}
17114 ++
17115 + static int netvsc_open(struct net_device *net)
17116 + {
17117 + struct net_device_context *ndev_ctx = netdev_priv(net);
17118 +@@ -129,7 +138,7 @@ static int netvsc_open(struct net_device *net)
17119 + rdev = nvdev->extension;
17120 + if (!rdev->link_state) {
17121 + netif_carrier_on(net);
17122 +- netif_tx_wake_all_queues(net);
17123 ++ netvsc_tx_enable(nvdev, net);
17124 + }
17125 +
17126 + if (vf_netdev) {
17127 +@@ -184,6 +193,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
17128 + }
17129 + }
17130 +
17131 ++static void netvsc_tx_disable(struct netvsc_device *nvscdev,
17132 ++ struct net_device *ndev)
17133 ++{
17134 ++ if (nvscdev) {
17135 ++ nvscdev->tx_disable = true;
17136 ++ virt_wmb(); /* ensure txq will not wake up after stop */
17137 ++ }
17138 ++
17139 ++ netif_tx_disable(ndev);
17140 ++}
17141 ++
17142 + static int netvsc_close(struct net_device *net)
17143 + {
17144 + struct net_device_context *net_device_ctx = netdev_priv(net);
17145 +@@ -192,7 +212,7 @@ static int netvsc_close(struct net_device *net)
17146 + struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
17147 + int ret;
17148 +
17149 +- netif_tx_disable(net);
17150 ++ netvsc_tx_disable(nvdev, net);
17151 +
17152 + /* No need to close rndis filter if it is removed already */
17153 + if (!nvdev)
17154 +@@ -920,7 +940,7 @@ static int netvsc_detach(struct net_device *ndev,
17155 +
17156 + /* If device was up (receiving) then shutdown */
17157 + if (netif_running(ndev)) {
17158 +- netif_tx_disable(ndev);
17159 ++ netvsc_tx_disable(nvdev, ndev);
17160 +
17161 + ret = rndis_filter_close(nvdev);
17162 + if (ret) {
17163 +@@ -1908,7 +1928,7 @@ static void netvsc_link_change(struct work_struct *w)
17164 + if (rdev->link_state) {
17165 + rdev->link_state = false;
17166 + netif_carrier_on(net);
17167 +- netif_tx_wake_all_queues(net);
17168 ++ netvsc_tx_enable(net_device, net);
17169 + } else {
17170 + notify = true;
17171 + }
17172 +@@ -1918,7 +1938,7 @@ static void netvsc_link_change(struct work_struct *w)
17173 + if (!rdev->link_state) {
17174 + rdev->link_state = true;
17175 + netif_carrier_off(net);
17176 +- netif_tx_stop_all_queues(net);
17177 ++ netvsc_tx_disable(net_device, net);
17178 + }
17179 + kfree(event);
17180 + break;
17181 +@@ -1927,7 +1947,7 @@ static void netvsc_link_change(struct work_struct *w)
17182 + if (!rdev->link_state) {
17183 + rdev->link_state = true;
17184 + netif_carrier_off(net);
17185 +- netif_tx_stop_all_queues(net);
17186 ++ netvsc_tx_disable(net_device, net);
17187 + event->event = RNDIS_STATUS_MEDIA_CONNECT;
17188 + spin_lock_irqsave(&ndev_ctx->lock, flags);
17189 + list_add(&event->list, &ndev_ctx->reconfig_events);
17190 +diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
17191 +index 3ddaf9595697..68af4c75ffb3 100644
17192 +--- a/drivers/net/phy/meson-gxl.c
17193 ++++ b/drivers/net/phy/meson-gxl.c
17194 +@@ -211,6 +211,7 @@ static int meson_gxl_ack_interrupt(struct phy_device *phydev)
17195 + static int meson_gxl_config_intr(struct phy_device *phydev)
17196 + {
17197 + u16 val;
17198 ++ int ret;
17199 +
17200 + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
17201 + val = INTSRC_ANEG_PR
17202 +@@ -223,6 +224,11 @@ static int meson_gxl_config_intr(struct phy_device *phydev)
17203 + val = 0;
17204 + }
17205 +
17206 ++ /* Ack any pending IRQ */
17207 ++ ret = meson_gxl_ack_interrupt(phydev);
17208 ++ if (ret)
17209 ++ return ret;
17210 ++
17211 + return phy_write(phydev, INTSRC_MASK, val);
17212 + }
17213 +
17214 +diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
17215 +index 03af927fa5ad..e39bf0428dd9 100644
17216 +--- a/drivers/net/phy/phy-c45.c
17217 ++++ b/drivers/net/phy/phy-c45.c
17218 +@@ -147,9 +147,15 @@ int genphy_c45_read_link(struct phy_device *phydev, u32 mmd_mask)
17219 + mmd_mask &= ~BIT(devad);
17220 +
17221 + /* The link state is latched low so that momentary link
17222 +- * drops can be detected. Do not double-read the status
17223 +- * register if the link is down.
17224 ++ * drops can be detected. Do not double-read the status
17225 ++ * in polling mode to detect such short link drops.
17226 + */
17227 ++ if (!phy_polling_mode(phydev)) {
17228 ++ val = phy_read_mmd(phydev, devad, MDIO_STAT1);
17229 ++ if (val < 0)
17230 ++ return val;
17231 ++ }
17232 ++
17233 + val = phy_read_mmd(phydev, devad, MDIO_STAT1);
17234 + if (val < 0)
17235 + return val;
17236 +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
17237 +index 46c86725a693..adf79614c2db 100644
17238 +--- a/drivers/net/phy/phy_device.c
17239 ++++ b/drivers/net/phy/phy_device.c
17240 +@@ -1683,10 +1683,15 @@ int genphy_update_link(struct phy_device *phydev)
17241 + {
17242 + int status;
17243 +
17244 +- /* Do a fake read */
17245 +- status = phy_read(phydev, MII_BMSR);
17246 +- if (status < 0)
17247 +- return status;
17248 ++ /* The link state is latched low so that momentary link
17249 ++ * drops can be detected. Do not double-read the status
17250 ++ * in polling mode to detect such short link drops.
17251 ++ */
17252 ++ if (!phy_polling_mode(phydev)) {
17253 ++ status = phy_read(phydev, MII_BMSR);
17254 ++ if (status < 0)
17255 ++ return status;
17256 ++ }
17257 +
17258 + /* Read link and autonegotiation status */
17259 + status = phy_read(phydev, MII_BMSR);
17260 +@@ -1827,7 +1832,7 @@ int genphy_soft_reset(struct phy_device *phydev)
17261 + {
17262 + int ret;
17263 +
17264 +- ret = phy_write(phydev, MII_BMCR, BMCR_RESET);
17265 ++ ret = phy_set_bits(phydev, MII_BMCR, BMCR_RESET);
17266 + if (ret < 0)
17267 + return ret;
17268 +
17269 +diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
17270 +index 8f09edd811e9..50c60550f295 100644
17271 +--- a/drivers/net/ppp/pptp.c
17272 ++++ b/drivers/net/ppp/pptp.c
17273 +@@ -532,6 +532,7 @@ static void pptp_sock_destruct(struct sock *sk)
17274 + pppox_unbind_sock(sk);
17275 + }
17276 + skb_queue_purge(&sk->sk_receive_queue);
17277 ++ dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
17278 + }
17279 +
17280 + static int pptp_create(struct net *net, struct socket *sock, int kern)
17281 +diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
17282 +index a5ef97010eb3..5541e1c19936 100644
17283 +--- a/drivers/net/team/team_mode_loadbalance.c
17284 ++++ b/drivers/net/team/team_mode_loadbalance.c
17285 +@@ -325,6 +325,20 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
17286 + return 0;
17287 + }
17288 +
17289 ++static void lb_bpf_func_free(struct team *team)
17290 ++{
17291 ++ struct lb_priv *lb_priv = get_lb_priv(team);
17292 ++ struct bpf_prog *fp;
17293 ++
17294 ++ if (!lb_priv->ex->orig_fprog)
17295 ++ return;
17296 ++
17297 ++ __fprog_destroy(lb_priv->ex->orig_fprog);
17298 ++ fp = rcu_dereference_protected(lb_priv->fp,
17299 ++ lockdep_is_held(&team->lock));
17300 ++ bpf_prog_destroy(fp);
17301 ++}
17302 ++
17303 + static int lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx)
17304 + {
17305 + struct lb_priv *lb_priv = get_lb_priv(team);
17306 +@@ -639,6 +653,7 @@ static void lb_exit(struct team *team)
17307 +
17308 + team_options_unregister(team, lb_options,
17309 + ARRAY_SIZE(lb_options));
17310 ++ lb_bpf_func_free(team);
17311 + cancel_delayed_work_sync(&lb_priv->ex->stats.refresh_dw);
17312 + free_percpu(lb_priv->pcpu_stats);
17313 + kfree(lb_priv->ex);
17314 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
17315 +index 53f4f37b0ffd..448d5439ff6a 100644
17316 +--- a/drivers/net/tun.c
17317 ++++ b/drivers/net/tun.c
17318 +@@ -1763,9 +1763,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
17319 + int skb_xdp = 1;
17320 + bool frags = tun_napi_frags_enabled(tfile);
17321 +
17322 +- if (!(tun->dev->flags & IFF_UP))
17323 +- return -EIO;
17324 +-
17325 + if (!(tun->flags & IFF_NO_PI)) {
17326 + if (len < sizeof(pi))
17327 + return -EINVAL;
17328 +@@ -1867,6 +1864,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
17329 + err = skb_copy_datagram_from_iter(skb, 0, from, len);
17330 +
17331 + if (err) {
17332 ++ err = -EFAULT;
17333 ++drop:
17334 + this_cpu_inc(tun->pcpu_stats->rx_dropped);
17335 + kfree_skb(skb);
17336 + if (frags) {
17337 +@@ -1874,7 +1873,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
17338 + mutex_unlock(&tfile->napi_mutex);
17339 + }
17340 +
17341 +- return -EFAULT;
17342 ++ return err;
17343 + }
17344 + }
17345 +
17346 +@@ -1958,6 +1957,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
17347 + !tfile->detached)
17348 + rxhash = __skb_get_hash_symmetric(skb);
17349 +
17350 ++ rcu_read_lock();
17351 ++ if (unlikely(!(tun->dev->flags & IFF_UP))) {
17352 ++ err = -EIO;
17353 ++ rcu_read_unlock();
17354 ++ goto drop;
17355 ++ }
17356 ++
17357 + if (frags) {
17358 + /* Exercise flow dissector code path. */
17359 + u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb));
17360 +@@ -1965,6 +1971,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
17361 + if (unlikely(headlen > skb_headlen(skb))) {
17362 + this_cpu_inc(tun->pcpu_stats->rx_dropped);
17363 + napi_free_frags(&tfile->napi);
17364 ++ rcu_read_unlock();
17365 + mutex_unlock(&tfile->napi_mutex);
17366 + WARN_ON(1);
17367 + return -ENOMEM;
17368 +@@ -1992,6 +1999,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
17369 + } else {
17370 + netif_rx_ni(skb);
17371 + }
17372 ++ rcu_read_unlock();
17373 +
17374 + stats = get_cpu_ptr(tun->pcpu_stats);
17375 + u64_stats_update_begin(&stats->syncp);
17376 +diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
17377 +index 820a2fe7d027..aff995be2a31 100644
17378 +--- a/drivers/net/usb/aqc111.c
17379 ++++ b/drivers/net/usb/aqc111.c
17380 +@@ -1301,6 +1301,20 @@ static const struct driver_info trendnet_info = {
17381 + .tx_fixup = aqc111_tx_fixup,
17382 + };
17383 +
17384 ++static const struct driver_info qnap_info = {
17385 ++ .description = "QNAP QNA-UC5G1T USB to 5GbE Adapter",
17386 ++ .bind = aqc111_bind,
17387 ++ .unbind = aqc111_unbind,
17388 ++ .status = aqc111_status,
17389 ++ .link_reset = aqc111_link_reset,
17390 ++ .reset = aqc111_reset,
17391 ++ .stop = aqc111_stop,
17392 ++ .flags = FLAG_ETHER | FLAG_FRAMING_AX |
17393 ++ FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET,
17394 ++ .rx_fixup = aqc111_rx_fixup,
17395 ++ .tx_fixup = aqc111_tx_fixup,
17396 ++};
17397 ++
17398 + static int aqc111_suspend(struct usb_interface *intf, pm_message_t message)
17399 + {
17400 + struct usbnet *dev = usb_get_intfdata(intf);
17401 +@@ -1455,6 +1469,7 @@ static const struct usb_device_id products[] = {
17402 + {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)},
17403 + {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)},
17404 + {AQC111_USB_ETH_DEV(0x20f4, 0xe05a, trendnet_info)},
17405 ++ {AQC111_USB_ETH_DEV(0x1c04, 0x0015, qnap_info)},
17406 + { },/* END */
17407 + };
17408 + MODULE_DEVICE_TABLE(usb, products);
17409 +diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
17410 +index 5512a1038721..3e9b2c319e45 100644
17411 +--- a/drivers/net/usb/cdc_ether.c
17412 ++++ b/drivers/net/usb/cdc_ether.c
17413 +@@ -851,6 +851,14 @@ static const struct usb_device_id products[] = {
17414 + .driver_info = 0,
17415 + },
17416 +
17417 ++/* QNAP QNA-UC5G1T USB to 5GbE Adapter (based on AQC111U) */
17418 ++{
17419 ++ USB_DEVICE_AND_INTERFACE_INFO(0x1c04, 0x0015, USB_CLASS_COMM,
17420 ++ USB_CDC_SUBCLASS_ETHERNET,
17421 ++ USB_CDC_PROTO_NONE),
17422 ++ .driver_info = 0,
17423 ++},
17424 ++
17425 + /* WHITELIST!!!
17426 + *
17427 + * CDC Ether uses two interfaces, not necessarily consecutive.
17428 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
17429 +index 18af2f8eee96..9195f3476b1d 100644
17430 +--- a/drivers/net/usb/qmi_wwan.c
17431 ++++ b/drivers/net/usb/qmi_wwan.c
17432 +@@ -976,6 +976,13 @@ static const struct usb_device_id products[] = {
17433 + 0xff),
17434 + .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr,
17435 + },
17436 ++ { /* Quectel EG12/EM12 */
17437 ++ USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0512,
17438 ++ USB_CLASS_VENDOR_SPEC,
17439 ++ USB_SUBCLASS_VENDOR_SPEC,
17440 ++ 0xff),
17441 ++ .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr,
17442 ++ },
17443 +
17444 + /* 3. Combined interface devices matching on interface number */
17445 + {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
17446 +@@ -1196,6 +1203,7 @@ static const struct usb_device_id products[] = {
17447 + {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
17448 + {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
17449 + {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
17450 ++ {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
17451 + {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
17452 + {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
17453 + {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
17454 +@@ -1343,17 +1351,20 @@ static bool quectel_ec20_detected(struct usb_interface *intf)
17455 + return false;
17456 + }
17457 +
17458 +-static bool quectel_ep06_diag_detected(struct usb_interface *intf)
17459 ++static bool quectel_diag_detected(struct usb_interface *intf)
17460 + {
17461 + struct usb_device *dev = interface_to_usbdev(intf);
17462 + struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc;
17463 ++ u16 id_vendor = le16_to_cpu(dev->descriptor.idVendor);
17464 ++ u16 id_product = le16_to_cpu(dev->descriptor.idProduct);
17465 +
17466 +- if (le16_to_cpu(dev->descriptor.idVendor) == 0x2c7c &&
17467 +- le16_to_cpu(dev->descriptor.idProduct) == 0x0306 &&
17468 +- intf_desc.bNumEndpoints == 2)
17469 +- return true;
17470 ++ if (id_vendor != 0x2c7c || intf_desc.bNumEndpoints != 2)
17471 ++ return false;
17472 +
17473 +- return false;
17474 ++ if (id_product == 0x0306 || id_product == 0x0512)
17475 ++ return true;
17476 ++ else
17477 ++ return false;
17478 + }
17479 +
17480 + static int qmi_wwan_probe(struct usb_interface *intf,
17481 +@@ -1390,13 +1401,13 @@ static int qmi_wwan_probe(struct usb_interface *intf,
17482 + return -ENODEV;
17483 + }
17484 +
17485 +- /* Quectel EP06/EM06/EG06 supports dynamic interface configuration, so
17486 ++ /* Several Quectel modems supports dynamic interface configuration, so
17487 + * we need to match on class/subclass/protocol. These values are
17488 + * identical for the diagnostic- and QMI-interface, but bNumEndpoints is
17489 + * different. Ignore the current interface if the number of endpoints
17490 + * the number for the diag interface (two).
17491 + */
17492 +- if (quectel_ep06_diag_detected(intf))
17493 ++ if (quectel_diag_detected(intf))
17494 + return -ENODEV;
17495 +
17496 + return usbnet_probe(intf, id);
17497 +diff --git a/drivers/net/veth.c b/drivers/net/veth.c
17498 +index f412ea1cef18..b203d1867959 100644
17499 +--- a/drivers/net/veth.c
17500 ++++ b/drivers/net/veth.c
17501 +@@ -115,7 +115,8 @@ static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
17502 + p += sizeof(ethtool_stats_keys);
17503 + for (i = 0; i < dev->real_num_rx_queues; i++) {
17504 + for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
17505 +- snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_%s",
17506 ++ snprintf(p, ETH_GSTRING_LEN,
17507 ++ "rx_queue_%u_%.11s",
17508 + i, veth_rq_stats_desc[j].desc);
17509 + p += ETH_GSTRING_LEN;
17510 + }
17511 +diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
17512 +index 7c1430ed0244..cd15c32b2e43 100644
17513 +--- a/drivers/net/vrf.c
17514 ++++ b/drivers/net/vrf.c
17515 +@@ -1273,9 +1273,14 @@ static void vrf_setup(struct net_device *dev)
17516 +
17517 + /* default to no qdisc; user can add if desired */
17518 + dev->priv_flags |= IFF_NO_QUEUE;
17519 ++ dev->priv_flags |= IFF_NO_RX_HANDLER;
17520 +
17521 +- dev->min_mtu = 0;
17522 +- dev->max_mtu = 0;
17523 ++ /* VRF devices do not care about MTU, but if the MTU is set
17524 ++ * too low then the ipv4 and ipv6 protocols are disabled
17525 ++ * which breaks networking.
17526 ++ */
17527 ++ dev->min_mtu = IPV6_MIN_MTU;
17528 ++ dev->max_mtu = ETH_MAX_MTU;
17529 + }
17530 +
17531 + static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
17532 +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
17533 +index 2aae11feff0c..5006daed2e96 100644
17534 +--- a/drivers/net/vxlan.c
17535 ++++ b/drivers/net/vxlan.c
17536 +@@ -1657,6 +1657,14 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
17537 + goto drop;
17538 + }
17539 +
17540 ++ rcu_read_lock();
17541 ++
17542 ++ if (unlikely(!(vxlan->dev->flags & IFF_UP))) {
17543 ++ rcu_read_unlock();
17544 ++ atomic_long_inc(&vxlan->dev->rx_dropped);
17545 ++ goto drop;
17546 ++ }
17547 ++
17548 + stats = this_cpu_ptr(vxlan->dev->tstats);
17549 + u64_stats_update_begin(&stats->syncp);
17550 + stats->rx_packets++;
17551 +@@ -1664,6 +1672,9 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
17552 + u64_stats_update_end(&stats->syncp);
17553 +
17554 + gro_cells_receive(&vxlan->gro_cells, skb);
17555 ++
17556 ++ rcu_read_unlock();
17557 ++
17558 + return 0;
17559 +
17560 + drop:
17561 +@@ -2693,6 +2704,8 @@ static void vxlan_uninit(struct net_device *dev)
17562 + {
17563 + struct vxlan_dev *vxlan = netdev_priv(dev);
17564 +
17565 ++ gro_cells_destroy(&vxlan->gro_cells);
17566 ++
17567 + vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni);
17568 +
17569 + free_percpu(dev->tstats);
17570 +@@ -3794,7 +3807,6 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
17571 +
17572 + vxlan_flush(vxlan, true);
17573 +
17574 +- gro_cells_destroy(&vxlan->gro_cells);
17575 + list_del(&vxlan->next);
17576 + unregister_netdevice_queue(dev, head);
17577 + }
17578 +@@ -4172,10 +4184,8 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
17579 + /* If vxlan->dev is in the same netns, it has already been added
17580 + * to the list by the previous loop.
17581 + */
17582 +- if (!net_eq(dev_net(vxlan->dev), net)) {
17583 +- gro_cells_destroy(&vxlan->gro_cells);
17584 ++ if (!net_eq(dev_net(vxlan->dev), net))
17585 + unregister_netdevice_queue(vxlan->dev, head);
17586 +- }
17587 + }
17588 +
17589 + for (h = 0; h < PORT_HASH_SIZE; ++h)
17590 +diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
17591 +index 2a5668b4f6bc..1a1ea4bbf8a0 100644
17592 +--- a/drivers/net/wireless/ath/ath10k/ce.c
17593 ++++ b/drivers/net/wireless/ath/ath10k/ce.c
17594 +@@ -500,14 +500,8 @@ static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
17595 + write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
17596 +
17597 + /* WORKAROUND */
17598 +- if (!(flags & CE_SEND_FLAG_GATHER)) {
17599 +- if (ar->hw_params.shadow_reg_support)
17600 +- ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state,
17601 +- write_index);
17602 +- else
17603 +- ath10k_ce_src_ring_write_index_set(ar, ctrl_addr,
17604 +- write_index);
17605 +- }
17606 ++ if (!(flags & CE_SEND_FLAG_GATHER))
17607 ++ ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
17608 +
17609 + src_ring->write_index = write_index;
17610 + exit:
17611 +@@ -581,8 +575,14 @@ static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
17612 + /* Update Source Ring Write Index */
17613 + write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
17614 +
17615 +- if (!(flags & CE_SEND_FLAG_GATHER))
17616 +- ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
17617 ++ if (!(flags & CE_SEND_FLAG_GATHER)) {
17618 ++ if (ar->hw_params.shadow_reg_support)
17619 ++ ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state,
17620 ++ write_index);
17621 ++ else
17622 ++ ath10k_ce_src_ring_write_index_set(ar, ctrl_addr,
17623 ++ write_index);
17624 ++ }
17625 +
17626 + src_ring->write_index = write_index;
17627 + exit:
17628 +@@ -1404,12 +1404,12 @@ static int ath10k_ce_alloc_shadow_base(struct ath10k *ar,
17629 + u32 nentries)
17630 + {
17631 + src_ring->shadow_base_unaligned = kcalloc(nentries,
17632 +- sizeof(struct ce_desc),
17633 ++ sizeof(struct ce_desc_64),
17634 + GFP_KERNEL);
17635 + if (!src_ring->shadow_base_unaligned)
17636 + return -ENOMEM;
17637 +
17638 +- src_ring->shadow_base = (struct ce_desc *)
17639 ++ src_ring->shadow_base = (struct ce_desc_64 *)
17640 + PTR_ALIGN(src_ring->shadow_base_unaligned,
17641 + CE_DESC_RING_ALIGN);
17642 + return 0;
17643 +@@ -1461,7 +1461,7 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
17644 + ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
17645 + if (ret) {
17646 + dma_free_coherent(ar->dev,
17647 +- (nentries * sizeof(struct ce_desc) +
17648 ++ (nentries * sizeof(struct ce_desc_64) +
17649 + CE_DESC_RING_ALIGN),
17650 + src_ring->base_addr_owner_space_unaligned,
17651 + base_addr);
17652 +diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
17653 +index ead9987c3259..463e2fc8b501 100644
17654 +--- a/drivers/net/wireless/ath/ath10k/ce.h
17655 ++++ b/drivers/net/wireless/ath/ath10k/ce.h
17656 +@@ -118,7 +118,7 @@ struct ath10k_ce_ring {
17657 + u32 base_addr_ce_space;
17658 +
17659 + char *shadow_base_unaligned;
17660 +- struct ce_desc *shadow_base;
17661 ++ struct ce_desc_64 *shadow_base;
17662 +
17663 + /* keep last */
17664 + void *per_transfer_context[0];
17665 +diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
17666 +index 4778a455d81a..068f1a7e07d3 100644
17667 +--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
17668 ++++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
17669 +@@ -696,11 +696,12 @@ static ssize_t ath10k_dbg_sta_dump_tx_stats(struct file *file,
17670 + " %llu ", stats->ht[j][i]);
17671 + len += scnprintf(buf + len, size - len, "\n");
17672 + len += scnprintf(buf + len, size - len,
17673 +- " BW %s (20,40,80,160 MHz)\n", str[j]);
17674 ++ " BW %s (20,5,10,40,80,160 MHz)\n", str[j]);
17675 + len += scnprintf(buf + len, size - len,
17676 +- " %llu %llu %llu %llu\n",
17677 ++ " %llu %llu %llu %llu %llu %llu\n",
17678 + stats->bw[j][0], stats->bw[j][1],
17679 +- stats->bw[j][2], stats->bw[j][3]);
17680 ++ stats->bw[j][2], stats->bw[j][3],
17681 ++ stats->bw[j][4], stats->bw[j][5]);
17682 + len += scnprintf(buf + len, size - len,
17683 + " NSS %s (1x1,2x2,3x3,4x4)\n", str[j]);
17684 + len += scnprintf(buf + len, size - len,
17685 +diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
17686 +index f42bac204ef8..ecf34ce7acf0 100644
17687 +--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
17688 ++++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
17689 +@@ -2130,9 +2130,15 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
17690 + hdr = (struct ieee80211_hdr *)skb->data;
17691 + rx_status = IEEE80211_SKB_RXCB(skb);
17692 + rx_status->chains |= BIT(0);
17693 +- rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
17694 +- rx->ppdu.combined_rssi;
17695 +- rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
17696 ++ if (rx->ppdu.combined_rssi == 0) {
17697 ++ /* SDIO firmware does not provide signal */
17698 ++ rx_status->signal = 0;
17699 ++ rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
17700 ++ } else {
17701 ++ rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
17702 ++ rx->ppdu.combined_rssi;
17703 ++ rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
17704 ++ }
17705 +
17706 + spin_lock_bh(&ar->data_lock);
17707 + ch = ar->scan_channel;
17708 +diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
17709 +index 2034ccc7cc72..1d5d0209ebeb 100644
17710 +--- a/drivers/net/wireless/ath/ath10k/wmi.h
17711 ++++ b/drivers/net/wireless/ath/ath10k/wmi.h
17712 +@@ -5003,7 +5003,7 @@ enum wmi_rate_preamble {
17713 + #define ATH10K_FW_SKIPPED_RATE_CTRL(flags) (((flags) >> 6) & 0x1)
17714 +
17715 + #define ATH10K_VHT_MCS_NUM 10
17716 +-#define ATH10K_BW_NUM 4
17717 ++#define ATH10K_BW_NUM 6
17718 + #define ATH10K_NSS_NUM 4
17719 + #define ATH10K_LEGACY_NUM 12
17720 + #define ATH10K_GI_NUM 2
17721 +diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
17722 +index c070a9e51ebf..fae572b38416 100644
17723 +--- a/drivers/net/wireless/ath/ath9k/init.c
17724 ++++ b/drivers/net/wireless/ath/ath9k/init.c
17725 +@@ -636,15 +636,15 @@ static int ath9k_of_init(struct ath_softc *sc)
17726 + ret = ath9k_eeprom_request(sc, eeprom_name);
17727 + if (ret)
17728 + return ret;
17729 ++
17730 ++ ah->ah_flags &= ~AH_USE_EEPROM;
17731 ++ ah->ah_flags |= AH_NO_EEP_SWAP;
17732 + }
17733 +
17734 + mac = of_get_mac_address(np);
17735 + if (mac)
17736 + ether_addr_copy(common->macaddr, mac);
17737 +
17738 +- ah->ah_flags &= ~AH_USE_EEPROM;
17739 +- ah->ah_flags |= AH_NO_EEP_SWAP;
17740 +-
17741 + return 0;
17742 + }
17743 +
17744 +diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
17745 +index 9b2f9f543952..5a44f9d0ff02 100644
17746 +--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
17747 ++++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
17748 +@@ -1580,6 +1580,12 @@ static int _wil_cfg80211_merge_extra_ies(const u8 *ies1, u16 ies1_len,
17749 + u8 *buf, *dpos;
17750 + const u8 *spos;
17751 +
17752 ++ if (!ies1)
17753 ++ ies1_len = 0;
17754 ++
17755 ++ if (!ies2)
17756 ++ ies2_len = 0;
17757 ++
17758 + if (ies1_len == 0 && ies2_len == 0) {
17759 + *merged_ies = NULL;
17760 + *merged_len = 0;
17761 +@@ -1589,17 +1595,19 @@ static int _wil_cfg80211_merge_extra_ies(const u8 *ies1, u16 ies1_len,
17762 + buf = kmalloc(ies1_len + ies2_len, GFP_KERNEL);
17763 + if (!buf)
17764 + return -ENOMEM;
17765 +- memcpy(buf, ies1, ies1_len);
17766 ++ if (ies1)
17767 ++ memcpy(buf, ies1, ies1_len);
17768 + dpos = buf + ies1_len;
17769 + spos = ies2;
17770 +- while (spos + 1 < ies2 + ies2_len) {
17771 ++ while (spos && (spos + 1 < ies2 + ies2_len)) {
17772 + /* IE tag at offset 0, length at offset 1 */
17773 + u16 ielen = 2 + spos[1];
17774 +
17775 + if (spos + ielen > ies2 + ies2_len)
17776 + break;
17777 + if (spos[0] == WLAN_EID_VENDOR_SPECIFIC &&
17778 +- !_wil_cfg80211_find_ie(ies1, ies1_len, spos, ielen)) {
17779 ++ (!ies1 || !_wil_cfg80211_find_ie(ies1, ies1_len,
17780 ++ spos, ielen))) {
17781 + memcpy(dpos, spos, ielen);
17782 + dpos += ielen;
17783 + }
17784 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
17785 +index 1f1e95a15a17..0ce1d8174e6d 100644
17786 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
17787 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
17788 +@@ -149,7 +149,7 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
17789 + return err;
17790 + }
17791 +
17792 +- err = request_firmware(&clm, clm_name, bus->dev);
17793 ++ err = firmware_request_nowarn(&clm, clm_name, bus->dev);
17794 + if (err) {
17795 + brcmf_info("no clm_blob available (err=%d), device may have limited channels available\n",
17796 + err);
17797 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
17798 +index 0d6c313b6669..19ec55cef802 100644
17799 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
17800 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
17801 +@@ -127,13 +127,17 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
17802 +
17803 + static int iwl_configure_rxq(struct iwl_mvm *mvm)
17804 + {
17805 +- int i, num_queues, size;
17806 ++ int i, num_queues, size, ret;
17807 + struct iwl_rfh_queue_config *cmd;
17808 ++ struct iwl_host_cmd hcmd = {
17809 ++ .id = WIDE_ID(DATA_PATH_GROUP, RFH_QUEUE_CONFIG_CMD),
17810 ++ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
17811 ++ };
17812 +
17813 + /* Do not configure default queue, it is configured via context info */
17814 + num_queues = mvm->trans->num_rx_queues - 1;
17815 +
17816 +- size = sizeof(*cmd) + num_queues * sizeof(struct iwl_rfh_queue_data);
17817 ++ size = struct_size(cmd, data, num_queues);
17818 +
17819 + cmd = kzalloc(size, GFP_KERNEL);
17820 + if (!cmd)
17821 +@@ -154,10 +158,14 @@ static int iwl_configure_rxq(struct iwl_mvm *mvm)
17822 + cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid);
17823 + }
17824 +
17825 +- return iwl_mvm_send_cmd_pdu(mvm,
17826 +- WIDE_ID(DATA_PATH_GROUP,
17827 +- RFH_QUEUE_CONFIG_CMD),
17828 +- 0, size, cmd);
17829 ++ hcmd.data[0] = cmd;
17830 ++ hcmd.len[0] = size;
17831 ++
17832 ++ ret = iwl_mvm_send_cmd(mvm, &hcmd);
17833 ++
17834 ++ kfree(cmd);
17835 ++
17836 ++ return ret;
17837 + }
17838 +
17839 + static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
17840 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
17841 +index 9e850c25877b..c596c7b13504 100644
17842 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
17843 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
17844 +@@ -499,7 +499,7 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
17845 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
17846 + struct iwl_rb_allocator *rba = &trans_pcie->rba;
17847 + struct list_head local_empty;
17848 +- int pending = atomic_xchg(&rba->req_pending, 0);
17849 ++ int pending = atomic_read(&rba->req_pending);
17850 +
17851 + IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
17852 +
17853 +@@ -554,11 +554,13 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
17854 + i++;
17855 + }
17856 +
17857 ++ atomic_dec(&rba->req_pending);
17858 + pending--;
17859 ++
17860 + if (!pending) {
17861 +- pending = atomic_xchg(&rba->req_pending, 0);
17862 ++ pending = atomic_read(&rba->req_pending);
17863 + IWL_DEBUG_RX(trans,
17864 +- "Pending allocation requests = %d\n",
17865 ++ "Got more pending allocation requests = %d\n",
17866 + pending);
17867 + }
17868 +
17869 +@@ -570,12 +572,15 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
17870 + spin_unlock(&rba->lock);
17871 +
17872 + atomic_inc(&rba->req_ready);
17873 ++
17874 + }
17875 +
17876 + spin_lock(&rba->lock);
17877 + /* return unused rbds to the allocator empty list */
17878 + list_splice_tail(&local_empty, &rba->rbd_empty);
17879 + spin_unlock(&rba->lock);
17880 ++
17881 ++ IWL_DEBUG_RX(trans, "%s, exit.\n", __func__);
17882 + }
17883 +
17884 + /*
17885 +diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
17886 +index 789337ea676a..6ede6168bd85 100644
17887 +--- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c
17888 ++++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
17889 +@@ -433,8 +433,6 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
17890 + skb_tail_pointer(skb),
17891 + MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, cardp);
17892 +
17893 +- cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;
17894 +-
17895 + lbtf_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n",
17896 + cardp->rx_urb);
17897 + ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC);
17898 +diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
17899 +index 1467af22e394..883752f640b4 100644
17900 +--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
17901 ++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
17902 +@@ -4310,11 +4310,13 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
17903 + wiphy->mgmt_stypes = mwifiex_mgmt_stypes;
17904 + wiphy->max_remain_on_channel_duration = 5000;
17905 + wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
17906 +- BIT(NL80211_IFTYPE_ADHOC) |
17907 + BIT(NL80211_IFTYPE_P2P_CLIENT) |
17908 + BIT(NL80211_IFTYPE_P2P_GO) |
17909 + BIT(NL80211_IFTYPE_AP);
17910 +
17911 ++ if (ISSUPP_ADHOC_ENABLED(adapter->fw_cap_info))
17912 ++ wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
17913 ++
17914 + wiphy->bands[NL80211_BAND_2GHZ] = &mwifiex_band_2ghz;
17915 + if (adapter->config_bands & BAND_A)
17916 + wiphy->bands[NL80211_BAND_5GHZ] = &mwifiex_band_5ghz;
17917 +@@ -4374,11 +4376,13 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
17918 + wiphy->available_antennas_tx = BIT(adapter->number_of_antenna) - 1;
17919 + wiphy->available_antennas_rx = BIT(adapter->number_of_antenna) - 1;
17920 +
17921 +- wiphy->features |= NL80211_FEATURE_HT_IBSS |
17922 +- NL80211_FEATURE_INACTIVITY_TIMER |
17923 ++ wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER |
17924 + NL80211_FEATURE_LOW_PRIORITY_SCAN |
17925 + NL80211_FEATURE_NEED_OBSS_SCAN;
17926 +
17927 ++ if (ISSUPP_ADHOC_ENABLED(adapter->fw_cap_info))
17928 ++ wiphy->features |= NL80211_FEATURE_HT_IBSS;
17929 ++
17930 + if (ISSUPP_RANDOM_MAC(adapter->fw_cap_info))
17931 + wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
17932 + NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
17933 +diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
17934 +index 530e5593765c..a1529920d877 100644
17935 +--- a/drivers/net/wireless/mediatek/mt76/eeprom.c
17936 ++++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
17937 +@@ -54,22 +54,30 @@ mt76_get_of_eeprom(struct mt76_dev *dev, int len)
17938 + part = np->name;
17939 +
17940 + mtd = get_mtd_device_nm(part);
17941 +- if (IS_ERR(mtd))
17942 +- return PTR_ERR(mtd);
17943 ++ if (IS_ERR(mtd)) {
17944 ++ ret = PTR_ERR(mtd);
17945 ++ goto out_put_node;
17946 ++ }
17947 +
17948 +- if (size <= sizeof(*list))
17949 +- return -EINVAL;
17950 ++ if (size <= sizeof(*list)) {
17951 ++ ret = -EINVAL;
17952 ++ goto out_put_node;
17953 ++ }
17954 +
17955 + offset = be32_to_cpup(list);
17956 + ret = mtd_read(mtd, offset, len, &retlen, dev->eeprom.data);
17957 + put_mtd_device(mtd);
17958 + if (ret)
17959 +- return ret;
17960 ++ goto out_put_node;
17961 +
17962 +- if (retlen < len)
17963 +- return -EINVAL;
17964 ++ if (retlen < len) {
17965 ++ ret = -EINVAL;
17966 ++ goto out_put_node;
17967 ++ }
17968 +
17969 +- return 0;
17970 ++out_put_node:
17971 ++ of_node_put(np);
17972 ++ return ret;
17973 + #else
17974 + return -ENOENT;
17975 + #endif
17976 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
17977 +index 5cd508a68609..6d29ba4046c3 100644
17978 +--- a/drivers/net/wireless/mediatek/mt76/mt76.h
17979 ++++ b/drivers/net/wireless/mediatek/mt76/mt76.h
17980 +@@ -713,6 +713,19 @@ static inline bool mt76u_check_sg(struct mt76_dev *dev)
17981 + udev->speed == USB_SPEED_WIRELESS));
17982 + }
17983 +
17984 ++static inline int
17985 ++mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int timeout)
17986 ++{
17987 ++ struct usb_interface *intf = to_usb_interface(dev->dev);
17988 ++ struct usb_device *udev = interface_to_usbdev(intf);
17989 ++ struct mt76_usb *usb = &dev->usb;
17990 ++ unsigned int pipe;
17991 ++ int sent;
17992 ++
17993 ++ pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
17994 ++ return usb_bulk_msg(udev, pipe, data, len, &sent, timeout);
17995 ++}
17996 ++
17997 + int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
17998 + u8 req_type, u16 val, u16 offset,
17999 + void *buf, size_t len);
18000 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
18001 +index c08bf371e527..7c9dfa54fee8 100644
18002 +--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
18003 ++++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
18004 +@@ -309,7 +309,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
18005 + ccmp_pn[6] = pn >> 32;
18006 + ccmp_pn[7] = pn >> 40;
18007 + txwi->iv = *((__le32 *)&ccmp_pn[0]);
18008 +- txwi->eiv = *((__le32 *)&ccmp_pn[1]);
18009 ++ txwi->eiv = *((__le32 *)&ccmp_pn[4]);
18010 + }
18011 +
18012 + spin_lock_bh(&dev->mt76.lock);
18013 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
18014 +index 6db789f90269..2ca393e267af 100644
18015 +--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
18016 ++++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
18017 +@@ -121,18 +121,14 @@ static int
18018 + __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
18019 + int cmd, bool wait_resp)
18020 + {
18021 +- struct usb_interface *intf = to_usb_interface(dev->dev);
18022 +- struct usb_device *udev = interface_to_usbdev(intf);
18023 + struct mt76_usb *usb = &dev->usb;
18024 +- unsigned int pipe;
18025 +- int ret, sent;
18026 ++ int ret;
18027 + u8 seq = 0;
18028 + u32 info;
18029 +
18030 + if (test_bit(MT76_REMOVED, &dev->state))
18031 + return 0;
18032 +
18033 +- pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
18034 + if (wait_resp) {
18035 + seq = ++usb->mcu.msg_seq & 0xf;
18036 + if (!seq)
18037 +@@ -146,7 +142,7 @@ __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
18038 + if (ret)
18039 + return ret;
18040 +
18041 +- ret = usb_bulk_msg(udev, pipe, skb->data, skb->len, &sent, 500);
18042 ++ ret = mt76u_bulk_msg(dev, skb->data, skb->len, 500);
18043 + if (ret)
18044 + return ret;
18045 +
18046 +@@ -268,14 +264,12 @@ void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev)
18047 + EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_reset);
18048 +
18049 + static int
18050 +-__mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, struct mt76u_buf *buf,
18051 ++__mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, u8 *data,
18052 + const void *fw_data, int len, u32 dst_addr)
18053 + {
18054 +- u8 *data = sg_virt(&buf->urb->sg[0]);
18055 +- DECLARE_COMPLETION_ONSTACK(cmpl);
18056 + __le32 info;
18057 + u32 val;
18058 +- int err;
18059 ++ int err, data_len;
18060 +
18061 + info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
18062 + FIELD_PREP(MT_MCU_MSG_LEN, len) |
18063 +@@ -291,25 +285,12 @@ __mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, struct mt76u_buf *buf,
18064 + mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
18065 + MT_FCE_DMA_LEN, len << 16);
18066 +
18067 +- buf->len = MT_CMD_HDR_LEN + len + sizeof(info);
18068 +- err = mt76u_submit_buf(&dev->mt76, USB_DIR_OUT,
18069 +- MT_EP_OUT_INBAND_CMD,
18070 +- buf, GFP_KERNEL,
18071 +- mt76u_mcu_complete_urb, &cmpl);
18072 +- if (err < 0)
18073 +- return err;
18074 +-
18075 +- if (!wait_for_completion_timeout(&cmpl,
18076 +- msecs_to_jiffies(1000))) {
18077 +- dev_err(dev->mt76.dev, "firmware upload timed out\n");
18078 +- usb_kill_urb(buf->urb);
18079 +- return -ETIMEDOUT;
18080 +- }
18081 ++ data_len = MT_CMD_HDR_LEN + len + sizeof(info);
18082 +
18083 +- if (mt76u_urb_error(buf->urb)) {
18084 +- dev_err(dev->mt76.dev, "firmware upload failed: %d\n",
18085 +- buf->urb->status);
18086 +- return buf->urb->status;
18087 ++ err = mt76u_bulk_msg(&dev->mt76, data, data_len, 1000);
18088 ++ if (err) {
18089 ++ dev_err(dev->mt76.dev, "firmware upload failed: %d\n", err);
18090 ++ return err;
18091 + }
18092 +
18093 + val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
18094 +@@ -322,17 +303,16 @@ __mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, struct mt76u_buf *buf,
18095 + int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
18096 + int data_len, u32 max_payload, u32 offset)
18097 + {
18098 +- int err, len, pos = 0, max_len = max_payload - 8;
18099 +- struct mt76u_buf buf;
18100 ++ int len, err = 0, pos = 0, max_len = max_payload - 8;
18101 ++ u8 *buf;
18102 +
18103 +- err = mt76u_buf_alloc(&dev->mt76, &buf, 1, max_payload, max_payload,
18104 +- GFP_KERNEL);
18105 +- if (err < 0)
18106 +- return err;
18107 ++ buf = kmalloc(max_payload, GFP_KERNEL);
18108 ++ if (!buf)
18109 ++ return -ENOMEM;
18110 +
18111 + while (data_len > 0) {
18112 + len = min_t(int, data_len, max_len);
18113 +- err = __mt76x02u_mcu_fw_send_data(dev, &buf, data + pos,
18114 ++ err = __mt76x02u_mcu_fw_send_data(dev, buf, data + pos,
18115 + len, offset + pos);
18116 + if (err < 0)
18117 + break;
18118 +@@ -341,7 +321,7 @@ int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
18119 + pos += len;
18120 + usleep_range(5000, 10000);
18121 + }
18122 +- mt76u_buf_free(&buf);
18123 ++ kfree(buf);
18124 +
18125 + return err;
18126 + }
18127 +diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
18128 +index b061263453d4..61cde0f9f58f 100644
18129 +--- a/drivers/net/wireless/mediatek/mt76/usb.c
18130 ++++ b/drivers/net/wireless/mediatek/mt76/usb.c
18131 +@@ -326,7 +326,6 @@ int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
18132 +
18133 + return mt76u_fill_rx_sg(dev, buf, nsgs, len, sglen);
18134 + }
18135 +-EXPORT_SYMBOL_GPL(mt76u_buf_alloc);
18136 +
18137 + void mt76u_buf_free(struct mt76u_buf *buf)
18138 + {
18139 +@@ -838,16 +837,9 @@ int mt76u_alloc_queues(struct mt76_dev *dev)
18140 +
18141 + err = mt76u_alloc_rx(dev);
18142 + if (err < 0)
18143 +- goto err;
18144 +-
18145 +- err = mt76u_alloc_tx(dev);
18146 +- if (err < 0)
18147 +- goto err;
18148 ++ return err;
18149 +
18150 +- return 0;
18151 +-err:
18152 +- mt76u_queues_deinit(dev);
18153 +- return err;
18154 ++ return mt76u_alloc_tx(dev);
18155 + }
18156 + EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
18157 +
18158 +diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.h b/drivers/net/wireless/mediatek/mt7601u/eeprom.h
18159 +index 662d12703b69..57b503ae63f1 100644
18160 +--- a/drivers/net/wireless/mediatek/mt7601u/eeprom.h
18161 ++++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.h
18162 +@@ -17,7 +17,7 @@
18163 +
18164 + struct mt7601u_dev;
18165 +
18166 +-#define MT7601U_EE_MAX_VER 0x0c
18167 ++#define MT7601U_EE_MAX_VER 0x0d
18168 + #define MT7601U_EEPROM_SIZE 256
18169 +
18170 + #define MT7601U_DEFAULT_TX_POWER 6
18171 +diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
18172 +index 26b187336875..2e12de813a5b 100644
18173 +--- a/drivers/net/wireless/ti/wlcore/main.c
18174 ++++ b/drivers/net/wireless/ti/wlcore/main.c
18175 +@@ -1085,8 +1085,11 @@ static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
18176 + goto out;
18177 +
18178 + ret = wl12xx_fetch_firmware(wl, plt);
18179 +- if (ret < 0)
18180 +- goto out;
18181 ++ if (ret < 0) {
18182 ++ kfree(wl->fw_status);
18183 ++ kfree(wl->raw_fw_status);
18184 ++ kfree(wl->tx_res_if);
18185 ++ }
18186 +
18187 + out:
18188 + return ret;
18189 +diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
18190 +index a11bf4e6b451..6d6e9a12150b 100644
18191 +--- a/drivers/nvdimm/label.c
18192 ++++ b/drivers/nvdimm/label.c
18193 +@@ -755,7 +755,7 @@ static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
18194 +
18195 + static int __pmem_label_update(struct nd_region *nd_region,
18196 + struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
18197 +- int pos)
18198 ++ int pos, unsigned long flags)
18199 + {
18200 + struct nd_namespace_common *ndns = &nspm->nsio.common;
18201 + struct nd_interleave_set *nd_set = nd_region->nd_set;
18202 +@@ -796,7 +796,7 @@ static int __pmem_label_update(struct nd_region *nd_region,
18203 + memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
18204 + if (nspm->alt_name)
18205 + memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
18206 +- nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_UPDATING);
18207 ++ nd_label->flags = __cpu_to_le32(flags);
18208 + nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
18209 + nd_label->position = __cpu_to_le16(pos);
18210 + nd_label->isetcookie = __cpu_to_le64(cookie);
18211 +@@ -1249,13 +1249,13 @@ static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
18212 + int nd_pmem_namespace_label_update(struct nd_region *nd_region,
18213 + struct nd_namespace_pmem *nspm, resource_size_t size)
18214 + {
18215 +- int i;
18216 ++ int i, rc;
18217 +
18218 + for (i = 0; i < nd_region->ndr_mappings; i++) {
18219 + struct nd_mapping *nd_mapping = &nd_region->mapping[i];
18220 + struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
18221 + struct resource *res;
18222 +- int rc, count = 0;
18223 ++ int count = 0;
18224 +
18225 + if (size == 0) {
18226 + rc = del_labels(nd_mapping, nspm->uuid);
18227 +@@ -1273,7 +1273,20 @@ int nd_pmem_namespace_label_update(struct nd_region *nd_region,
18228 + if (rc < 0)
18229 + return rc;
18230 +
18231 +- rc = __pmem_label_update(nd_region, nd_mapping, nspm, i);
18232 ++ rc = __pmem_label_update(nd_region, nd_mapping, nspm, i,
18233 ++ NSLABEL_FLAG_UPDATING);
18234 ++ if (rc)
18235 ++ return rc;
18236 ++ }
18237 ++
18238 ++ if (size == 0)
18239 ++ return 0;
18240 ++
18241 ++ /* Clear the UPDATING flag per UEFI 2.7 expectations */
18242 ++ for (i = 0; i < nd_region->ndr_mappings; i++) {
18243 ++ struct nd_mapping *nd_mapping = &nd_region->mapping[i];
18244 ++
18245 ++ rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0);
18246 + if (rc)
18247 + return rc;
18248 + }
18249 +diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
18250 +index 4b077555ac70..33a3b23b3db7 100644
18251 +--- a/drivers/nvdimm/namespace_devs.c
18252 ++++ b/drivers/nvdimm/namespace_devs.c
18253 +@@ -138,6 +138,7 @@ bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
18254 + bool pmem_should_map_pages(struct device *dev)
18255 + {
18256 + struct nd_region *nd_region = to_nd_region(dev->parent);
18257 ++ struct nd_namespace_common *ndns = to_ndns(dev);
18258 + struct nd_namespace_io *nsio;
18259 +
18260 + if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
18261 +@@ -149,6 +150,9 @@ bool pmem_should_map_pages(struct device *dev)
18262 + if (is_nd_pfn(dev) || is_nd_btt(dev))
18263 + return false;
18264 +
18265 ++ if (ndns->force_raw)
18266 ++ return false;
18267 ++
18268 + nsio = to_nd_namespace_io(dev);
18269 + if (region_intersects(nsio->res.start, resource_size(&nsio->res),
18270 + IORESOURCE_SYSTEM_RAM,
18271 +diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
18272 +index 6f22272e8d80..7760c1b91853 100644
18273 +--- a/drivers/nvdimm/pfn_devs.c
18274 ++++ b/drivers/nvdimm/pfn_devs.c
18275 +@@ -593,7 +593,7 @@ static unsigned long init_altmap_base(resource_size_t base)
18276 +
18277 + static unsigned long init_altmap_reserve(resource_size_t base)
18278 + {
18279 +- unsigned long reserve = PHYS_PFN(SZ_8K);
18280 ++ unsigned long reserve = PFN_UP(SZ_8K);
18281 + unsigned long base_pfn = PHYS_PFN(base);
18282 +
18283 + reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
18284 +@@ -678,7 +678,7 @@ static void trim_pfn_device(struct nd_pfn *nd_pfn, u32 *start_pad, u32 *end_trun
18285 + if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
18286 + IORES_DESC_NONE) == REGION_MIXED
18287 + || !IS_ALIGNED(end, nd_pfn->align)
18288 +- || nd_region_conflict(nd_region, start, size + adjust))
18289 ++ || nd_region_conflict(nd_region, start, size))
18290 + *end_trunc = end - phys_pmem_align_down(nd_pfn, end);
18291 + }
18292 +
18293 +diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
18294 +index 89accc76d71c..c37d5bbd72ab 100644
18295 +--- a/drivers/nvme/host/fc.c
18296 ++++ b/drivers/nvme/host/fc.c
18297 +@@ -3018,7 +3018,10 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
18298 +
18299 + ctrl->ctrl.opts = opts;
18300 + ctrl->ctrl.nr_reconnects = 0;
18301 +- ctrl->ctrl.numa_node = dev_to_node(lport->dev);
18302 ++ if (lport->dev)
18303 ++ ctrl->ctrl.numa_node = dev_to_node(lport->dev);
18304 ++ else
18305 ++ ctrl->ctrl.numa_node = NUMA_NO_NODE;
18306 + INIT_LIST_HEAD(&ctrl->ctrl_list);
18307 + ctrl->lport = lport;
18308 + ctrl->rport = rport;
18309 +diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
18310 +index 88d260f31835..02c63c463222 100644
18311 +--- a/drivers/nvme/target/core.c
18312 ++++ b/drivers/nvme/target/core.c
18313 +@@ -1171,6 +1171,15 @@ static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
18314 + put_device(ctrl->p2p_client);
18315 + }
18316 +
18317 ++static void nvmet_fatal_error_handler(struct work_struct *work)
18318 ++{
18319 ++ struct nvmet_ctrl *ctrl =
18320 ++ container_of(work, struct nvmet_ctrl, fatal_err_work);
18321 ++
18322 ++ pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
18323 ++ ctrl->ops->delete_ctrl(ctrl);
18324 ++}
18325 ++
18326 + u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
18327 + struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
18328 + {
18329 +@@ -1213,6 +1222,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
18330 + INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
18331 + INIT_LIST_HEAD(&ctrl->async_events);
18332 + INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
18333 ++ INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
18334 +
18335 + memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
18336 + memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
18337 +@@ -1316,21 +1326,11 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
18338 + kref_put(&ctrl->ref, nvmet_ctrl_free);
18339 + }
18340 +
18341 +-static void nvmet_fatal_error_handler(struct work_struct *work)
18342 +-{
18343 +- struct nvmet_ctrl *ctrl =
18344 +- container_of(work, struct nvmet_ctrl, fatal_err_work);
18345 +-
18346 +- pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
18347 +- ctrl->ops->delete_ctrl(ctrl);
18348 +-}
18349 +-
18350 + void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
18351 + {
18352 + mutex_lock(&ctrl->lock);
18353 + if (!(ctrl->csts & NVME_CSTS_CFS)) {
18354 + ctrl->csts |= NVME_CSTS_CFS;
18355 +- INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
18356 + schedule_work(&ctrl->fatal_err_work);
18357 + }
18358 + mutex_unlock(&ctrl->lock);
18359 +diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
18360 +index f7301bb4ef3b..3ce65927e11c 100644
18361 +--- a/drivers/nvmem/core.c
18362 ++++ b/drivers/nvmem/core.c
18363 +@@ -686,9 +686,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
18364 + if (rval)
18365 + goto err_remove_cells;
18366 +
18367 +- rval = blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
18368 +- if (rval)
18369 +- goto err_remove_cells;
18370 ++ blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
18371 +
18372 + return nvmem;
18373 +
18374 +diff --git a/drivers/opp/core.c b/drivers/opp/core.c
18375 +index 18f1639dbc4a..f5d2fa195f5f 100644
18376 +--- a/drivers/opp/core.c
18377 ++++ b/drivers/opp/core.c
18378 +@@ -743,7 +743,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
18379 + old_freq, freq);
18380 +
18381 + /* Scaling up? Configure required OPPs before frequency */
18382 +- if (freq > old_freq) {
18383 ++ if (freq >= old_freq) {
18384 + ret = _set_required_opps(dev, opp_table, opp);
18385 + if (ret)
18386 + goto put_opp;
18387 +diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
18388 +index 9c8249f74479..6296dbb83d47 100644
18389 +--- a/drivers/parport/parport_pc.c
18390 ++++ b/drivers/parport/parport_pc.c
18391 +@@ -1377,7 +1377,7 @@ static struct superio_struct *find_superio(struct parport *p)
18392 + {
18393 + int i;
18394 + for (i = 0; i < NR_SUPERIOS; i++)
18395 +- if (superios[i].io != p->base)
18396 ++ if (superios[i].io == p->base)
18397 + return &superios[i];
18398 + return NULL;
18399 + }
18400 +diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
18401 +index 721d60a5d9e4..9c5614f21b8e 100644
18402 +--- a/drivers/pci/controller/dwc/pcie-designware-host.c
18403 ++++ b/drivers/pci/controller/dwc/pcie-designware-host.c
18404 +@@ -439,7 +439,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
18405 + if (ret)
18406 + pci->num_viewport = 2;
18407 +
18408 +- if (IS_ENABLED(CONFIG_PCI_MSI)) {
18409 ++ if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_enabled()) {
18410 + /*
18411 + * If a specific SoC driver needs to change the
18412 + * default number of vectors, it needs to implement
18413 +diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
18414 +index d185ea5fe996..a7f703556790 100644
18415 +--- a/drivers/pci/controller/dwc/pcie-qcom.c
18416 ++++ b/drivers/pci/controller/dwc/pcie-qcom.c
18417 +@@ -1228,7 +1228,7 @@ static int qcom_pcie_probe(struct platform_device *pdev)
18418 +
18419 + pcie->ops = of_device_get_match_data(dev);
18420 +
18421 +- pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
18422 ++ pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
18423 + if (IS_ERR(pcie->reset)) {
18424 + ret = PTR_ERR(pcie->reset);
18425 + goto err_pm_runtime_put;
18426 +diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
18427 +index 750081c1cb48..6eecae447af3 100644
18428 +--- a/drivers/pci/controller/pci-aardvark.c
18429 ++++ b/drivers/pci/controller/pci-aardvark.c
18430 +@@ -499,7 +499,7 @@ static void advk_sw_pci_bridge_init(struct advk_pcie *pcie)
18431 + bridge->data = pcie;
18432 + bridge->ops = &advk_pci_bridge_emul_ops;
18433 +
18434 +- pci_bridge_emul_init(bridge);
18435 ++ pci_bridge_emul_init(bridge, 0);
18436 +
18437 + }
18438 +
18439 +diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
18440 +index fa0fc46edb0c..d3a0419e42f2 100644
18441 +--- a/drivers/pci/controller/pci-mvebu.c
18442 ++++ b/drivers/pci/controller/pci-mvebu.c
18443 +@@ -583,7 +583,7 @@ static void mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port)
18444 + bridge->data = port;
18445 + bridge->ops = &mvebu_pci_bridge_emul_ops;
18446 +
18447 +- pci_bridge_emul_init(bridge);
18448 ++ pci_bridge_emul_init(bridge, PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR);
18449 + }
18450 +
18451 + static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
18452 +diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
18453 +index 55e471c18e8d..c42fe5c4319f 100644
18454 +--- a/drivers/pci/controller/pcie-mediatek.c
18455 ++++ b/drivers/pci/controller/pcie-mediatek.c
18456 +@@ -654,7 +654,6 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
18457 + struct resource *mem = &pcie->mem;
18458 + const struct mtk_pcie_soc *soc = port->pcie->soc;
18459 + u32 val;
18460 +- size_t size;
18461 + int err;
18462 +
18463 + /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
18464 +@@ -706,8 +705,8 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
18465 + mtk_pcie_enable_msi(port);
18466 +
18467 + /* Set AHB to PCIe translation windows */
18468 +- size = mem->end - mem->start;
18469 +- val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size));
18470 ++ val = lower_32_bits(mem->start) |
18471 ++ AHB2PCIE_SIZE(fls(resource_size(mem)));
18472 + writel(val, port->base + PCIE_AHB_TRANS_BASE0_L);
18473 +
18474 + val = upper_32_bits(mem->start);
18475 +diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
18476 +index 3f3df4c29f6e..905282a8ddaa 100644
18477 +--- a/drivers/pci/hotplug/pciehp_ctrl.c
18478 ++++ b/drivers/pci/hotplug/pciehp_ctrl.c
18479 +@@ -115,6 +115,10 @@ static void remove_board(struct controller *ctrl, bool safe_removal)
18480 + * removed from the slot/adapter.
18481 + */
18482 + msleep(1000);
18483 ++
18484 ++ /* Ignore link or presence changes caused by power off */
18485 ++ atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC),
18486 ++ &ctrl->pending_events);
18487 + }
18488 +
18489 + /* turn off Green LED */
18490 +diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
18491 +index 7dd443aea5a5..8bfcb8cd0900 100644
18492 +--- a/drivers/pci/hotplug/pciehp_hpc.c
18493 ++++ b/drivers/pci/hotplug/pciehp_hpc.c
18494 +@@ -156,9 +156,9 @@ static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
18495 + slot_ctrl |= (cmd & mask);
18496 + ctrl->cmd_busy = 1;
18497 + smp_mb();
18498 ++ ctrl->slot_ctrl = slot_ctrl;
18499 + pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl);
18500 + ctrl->cmd_started = jiffies;
18501 +- ctrl->slot_ctrl = slot_ctrl;
18502 +
18503 + /*
18504 + * Controllers with the Intel CF118 and similar errata advertise
18505 +@@ -736,12 +736,25 @@ void pcie_clear_hotplug_events(struct controller *ctrl)
18506 +
18507 + void pcie_enable_interrupt(struct controller *ctrl)
18508 + {
18509 +- pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_HPIE, PCI_EXP_SLTCTL_HPIE);
18510 ++ u16 mask;
18511 ++
18512 ++ mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE;
18513 ++ pcie_write_cmd(ctrl, mask, mask);
18514 + }
18515 +
18516 + void pcie_disable_interrupt(struct controller *ctrl)
18517 + {
18518 +- pcie_write_cmd(ctrl, 0, PCI_EXP_SLTCTL_HPIE);
18519 ++ u16 mask;
18520 ++
18521 ++ /*
18522 ++ * Mask hot-plug interrupt to prevent it triggering immediately
18523 ++ * when the link goes inactive (we still get PME when any of the
18524 ++ * enabled events is detected). Same goes with Link Layer State
18525 ++ * changed event which generates PME immediately when the link goes
18526 ++ * inactive so mask it as well.
18527 ++ */
18528 ++ mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE;
18529 ++ pcie_write_cmd(ctrl, 0, mask);
18530 + }
18531 +
18532 + /*
18533 +diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
18534 +index 129738362d90..83fb077d0b41 100644
18535 +--- a/drivers/pci/pci-bridge-emul.c
18536 ++++ b/drivers/pci/pci-bridge-emul.c
18537 +@@ -24,29 +24,6 @@
18538 + #define PCI_CAP_PCIE_START PCI_BRIDGE_CONF_END
18539 + #define PCI_CAP_PCIE_END (PCI_CAP_PCIE_START + PCI_EXP_SLTSTA2 + 2)
18540 +
18541 +-/*
18542 +- * Initialize a pci_bridge_emul structure to represent a fake PCI
18543 +- * bridge configuration space. The caller needs to have initialized
18544 +- * the PCI configuration space with whatever values make sense
18545 +- * (typically at least vendor, device, revision), the ->ops pointer,
18546 +- * and optionally ->data and ->has_pcie.
18547 +- */
18548 +-void pci_bridge_emul_init(struct pci_bridge_emul *bridge)
18549 +-{
18550 +- bridge->conf.class_revision |= PCI_CLASS_BRIDGE_PCI << 16;
18551 +- bridge->conf.header_type = PCI_HEADER_TYPE_BRIDGE;
18552 +- bridge->conf.cache_line_size = 0x10;
18553 +- bridge->conf.status = PCI_STATUS_CAP_LIST;
18554 +-
18555 +- if (bridge->has_pcie) {
18556 +- bridge->conf.capabilities_pointer = PCI_CAP_PCIE_START;
18557 +- bridge->pcie_conf.cap_id = PCI_CAP_ID_EXP;
18558 +- /* Set PCIe v2, root port, slot support */
18559 +- bridge->pcie_conf.cap = PCI_EXP_TYPE_ROOT_PORT << 4 | 2 |
18560 +- PCI_EXP_FLAGS_SLOT;
18561 +- }
18562 +-}
18563 +-
18564 + struct pci_bridge_reg_behavior {
18565 + /* Read-only bits */
18566 + u32 ro;
18567 +@@ -283,6 +260,61 @@ const static struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
18568 + },
18569 + };
18570 +
18571 ++/*
18572 ++ * Initialize a pci_bridge_emul structure to represent a fake PCI
18573 ++ * bridge configuration space. The caller needs to have initialized
18574 ++ * the PCI configuration space with whatever values make sense
18575 ++ * (typically at least vendor, device, revision), the ->ops pointer,
18576 ++ * and optionally ->data and ->has_pcie.
18577 ++ */
18578 ++int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
18579 ++ unsigned int flags)
18580 ++{
18581 ++ bridge->conf.class_revision |= PCI_CLASS_BRIDGE_PCI << 16;
18582 ++ bridge->conf.header_type = PCI_HEADER_TYPE_BRIDGE;
18583 ++ bridge->conf.cache_line_size = 0x10;
18584 ++ bridge->conf.status = PCI_STATUS_CAP_LIST;
18585 ++ bridge->pci_regs_behavior = kmemdup(pci_regs_behavior,
18586 ++ sizeof(pci_regs_behavior),
18587 ++ GFP_KERNEL);
18588 ++ if (!bridge->pci_regs_behavior)
18589 ++ return -ENOMEM;
18590 ++
18591 ++ if (bridge->has_pcie) {
18592 ++ bridge->conf.capabilities_pointer = PCI_CAP_PCIE_START;
18593 ++ bridge->pcie_conf.cap_id = PCI_CAP_ID_EXP;
18594 ++ /* Set PCIe v2, root port, slot support */
18595 ++ bridge->pcie_conf.cap = PCI_EXP_TYPE_ROOT_PORT << 4 | 2 |
18596 ++ PCI_EXP_FLAGS_SLOT;
18597 ++ bridge->pcie_cap_regs_behavior =
18598 ++ kmemdup(pcie_cap_regs_behavior,
18599 ++ sizeof(pcie_cap_regs_behavior),
18600 ++ GFP_KERNEL);
18601 ++ if (!bridge->pcie_cap_regs_behavior) {
18602 ++ kfree(bridge->pci_regs_behavior);
18603 ++ return -ENOMEM;
18604 ++ }
18605 ++ }
18606 ++
18607 ++ if (flags & PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR) {
18608 ++ bridge->pci_regs_behavior[PCI_PREF_MEMORY_BASE / 4].ro = ~0;
18609 ++ bridge->pci_regs_behavior[PCI_PREF_MEMORY_BASE / 4].rw = 0;
18610 ++ }
18611 ++
18612 ++ return 0;
18613 ++}
18614 ++
18615 ++/*
18616 ++ * Cleanup a pci_bridge_emul structure that was previously initilized
18617 ++ * using pci_bridge_emul_init().
18618 ++ */
18619 ++void pci_bridge_emul_cleanup(struct pci_bridge_emul *bridge)
18620 ++{
18621 ++ if (bridge->has_pcie)
18622 ++ kfree(bridge->pcie_cap_regs_behavior);
18623 ++ kfree(bridge->pci_regs_behavior);
18624 ++}
18625 ++
18626 + /*
18627 + * Should be called by the PCI controller driver when reading the PCI
18628 + * configuration space of the fake bridge. It will call back the
18629 +@@ -312,11 +344,11 @@ int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
18630 + reg -= PCI_CAP_PCIE_START;
18631 + read_op = bridge->ops->read_pcie;
18632 + cfgspace = (u32 *) &bridge->pcie_conf;
18633 +- behavior = pcie_cap_regs_behavior;
18634 ++ behavior = bridge->pcie_cap_regs_behavior;
18635 + } else {
18636 + read_op = bridge->ops->read_base;
18637 + cfgspace = (u32 *) &bridge->conf;
18638 +- behavior = pci_regs_behavior;
18639 ++ behavior = bridge->pci_regs_behavior;
18640 + }
18641 +
18642 + if (read_op)
18643 +@@ -383,11 +415,11 @@ int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
18644 + reg -= PCI_CAP_PCIE_START;
18645 + write_op = bridge->ops->write_pcie;
18646 + cfgspace = (u32 *) &bridge->pcie_conf;
18647 +- behavior = pcie_cap_regs_behavior;
18648 ++ behavior = bridge->pcie_cap_regs_behavior;
18649 + } else {
18650 + write_op = bridge->ops->write_base;
18651 + cfgspace = (u32 *) &bridge->conf;
18652 +- behavior = pci_regs_behavior;
18653 ++ behavior = bridge->pci_regs_behavior;
18654 + }
18655 +
18656 + /* Keep all bits, except the RW bits */
18657 +diff --git a/drivers/pci/pci-bridge-emul.h b/drivers/pci/pci-bridge-emul.h
18658 +index 9d510ccf738b..e65b1b79899d 100644
18659 +--- a/drivers/pci/pci-bridge-emul.h
18660 ++++ b/drivers/pci/pci-bridge-emul.h
18661 +@@ -107,15 +107,26 @@ struct pci_bridge_emul_ops {
18662 + u32 old, u32 new, u32 mask);
18663 + };
18664 +
18665 ++struct pci_bridge_reg_behavior;
18666 ++
18667 + struct pci_bridge_emul {
18668 + struct pci_bridge_emul_conf conf;
18669 + struct pci_bridge_emul_pcie_conf pcie_conf;
18670 + struct pci_bridge_emul_ops *ops;
18671 ++ struct pci_bridge_reg_behavior *pci_regs_behavior;
18672 ++ struct pci_bridge_reg_behavior *pcie_cap_regs_behavior;
18673 + void *data;
18674 + bool has_pcie;
18675 + };
18676 +
18677 +-void pci_bridge_emul_init(struct pci_bridge_emul *bridge);
18678 ++enum {
18679 ++ PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR = BIT(0),
18680 ++};
18681 ++
18682 ++int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
18683 ++ unsigned int flags);
18684 ++void pci_bridge_emul_cleanup(struct pci_bridge_emul *bridge);
18685 ++
18686 + int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
18687 + int size, u32 *value);
18688 + int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
18689 +diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
18690 +index e435d12e61a0..7b77754a82de 100644
18691 +--- a/drivers/pci/pcie/dpc.c
18692 ++++ b/drivers/pci/pcie/dpc.c
18693 +@@ -202,6 +202,28 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
18694 + pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, status);
18695 + }
18696 +
18697 ++static int dpc_get_aer_uncorrect_severity(struct pci_dev *dev,
18698 ++ struct aer_err_info *info)
18699 ++{
18700 ++ int pos = dev->aer_cap;
18701 ++ u32 status, mask, sev;
18702 ++
18703 ++ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
18704 ++ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask);
18705 ++ status &= ~mask;
18706 ++ if (!status)
18707 ++ return 0;
18708 ++
18709 ++ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev);
18710 ++ status &= sev;
18711 ++ if (status)
18712 ++ info->severity = AER_FATAL;
18713 ++ else
18714 ++ info->severity = AER_NONFATAL;
18715 ++
18716 ++ return 1;
18717 ++}
18718 ++
18719 + static irqreturn_t dpc_handler(int irq, void *context)
18720 + {
18721 + struct aer_err_info info;
18722 +@@ -229,9 +251,12 @@ static irqreturn_t dpc_handler(int irq, void *context)
18723 + /* show RP PIO error detail information */
18724 + if (dpc->rp_extensions && reason == 3 && ext_reason == 0)
18725 + dpc_process_rp_pio_error(dpc);
18726 +- else if (reason == 0 && aer_get_device_error_info(pdev, &info)) {
18727 ++ else if (reason == 0 &&
18728 ++ dpc_get_aer_uncorrect_severity(pdev, &info) &&
18729 ++ aer_get_device_error_info(pdev, &info)) {
18730 + aer_print_error(pdev, &info);
18731 + pci_cleanup_aer_uncorrect_error_status(pdev);
18732 ++ pci_aer_clear_fatal_status(pdev);
18733 + }
18734 +
18735 + /* We configure DPC so it only triggers on ERR_FATAL */
18736 +diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
18737 +index 0dbcf429089f..efa5b552914b 100644
18738 +--- a/drivers/pci/pcie/pme.c
18739 ++++ b/drivers/pci/pcie/pme.c
18740 +@@ -363,6 +363,16 @@ static bool pcie_pme_check_wakeup(struct pci_bus *bus)
18741 + return false;
18742 + }
18743 +
18744 ++static void pcie_pme_disable_interrupt(struct pci_dev *port,
18745 ++ struct pcie_pme_service_data *data)
18746 ++{
18747 ++ spin_lock_irq(&data->lock);
18748 ++ pcie_pme_interrupt_enable(port, false);
18749 ++ pcie_clear_root_pme_status(port);
18750 ++ data->noirq = true;
18751 ++ spin_unlock_irq(&data->lock);
18752 ++}
18753 ++
18754 + /**
18755 + * pcie_pme_suspend - Suspend PCIe PME service device.
18756 + * @srv: PCIe service device to suspend.
18757 +@@ -387,11 +397,7 @@ static int pcie_pme_suspend(struct pcie_device *srv)
18758 + return 0;
18759 + }
18760 +
18761 +- spin_lock_irq(&data->lock);
18762 +- pcie_pme_interrupt_enable(port, false);
18763 +- pcie_clear_root_pme_status(port);
18764 +- data->noirq = true;
18765 +- spin_unlock_irq(&data->lock);
18766 ++ pcie_pme_disable_interrupt(port, data);
18767 +
18768 + synchronize_irq(srv->irq);
18769 +
18770 +@@ -426,35 +432,12 @@ static int pcie_pme_resume(struct pcie_device *srv)
18771 + * @srv - PCIe service device to remove.
18772 + */
18773 + static void pcie_pme_remove(struct pcie_device *srv)
18774 +-{
18775 +- pcie_pme_suspend(srv);
18776 +- free_irq(srv->irq, srv);
18777 +- kfree(get_service_data(srv));
18778 +-}
18779 +-
18780 +-static int pcie_pme_runtime_suspend(struct pcie_device *srv)
18781 +-{
18782 +- struct pcie_pme_service_data *data = get_service_data(srv);
18783 +-
18784 +- spin_lock_irq(&data->lock);
18785 +- pcie_pme_interrupt_enable(srv->port, false);
18786 +- pcie_clear_root_pme_status(srv->port);
18787 +- data->noirq = true;
18788 +- spin_unlock_irq(&data->lock);
18789 +-
18790 +- return 0;
18791 +-}
18792 +-
18793 +-static int pcie_pme_runtime_resume(struct pcie_device *srv)
18794 + {
18795 + struct pcie_pme_service_data *data = get_service_data(srv);
18796 +
18797 +- spin_lock_irq(&data->lock);
18798 +- pcie_pme_interrupt_enable(srv->port, true);
18799 +- data->noirq = false;
18800 +- spin_unlock_irq(&data->lock);
18801 +-
18802 +- return 0;
18803 ++ pcie_pme_disable_interrupt(srv->port, data);
18804 ++ free_irq(srv->irq, srv);
18805 ++ kfree(data);
18806 + }
18807 +
18808 + static struct pcie_port_service_driver pcie_pme_driver = {
18809 +@@ -464,8 +447,6 @@ static struct pcie_port_service_driver pcie_pme_driver = {
18810 +
18811 + .probe = pcie_pme_probe,
18812 + .suspend = pcie_pme_suspend,
18813 +- .runtime_suspend = pcie_pme_runtime_suspend,
18814 +- .runtime_resume = pcie_pme_runtime_resume,
18815 + .resume = pcie_pme_resume,
18816 + .remove = pcie_pme_remove,
18817 + };
18818 +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
18819 +index 257b9f6f2ebb..c46a3fcb341e 100644
18820 +--- a/drivers/pci/probe.c
18821 ++++ b/drivers/pci/probe.c
18822 +@@ -2071,11 +2071,8 @@ static void pci_configure_ltr(struct pci_dev *dev)
18823 + {
18824 + #ifdef CONFIG_PCIEASPM
18825 + struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
18826 +- u32 cap;
18827 + struct pci_dev *bridge;
18828 +-
18829 +- if (!host->native_ltr)
18830 +- return;
18831 ++ u32 cap, ctl;
18832 +
18833 + if (!pci_is_pcie(dev))
18834 + return;
18835 +@@ -2084,22 +2081,35 @@ static void pci_configure_ltr(struct pci_dev *dev)
18836 + if (!(cap & PCI_EXP_DEVCAP2_LTR))
18837 + return;
18838 +
18839 +- /*
18840 +- * Software must not enable LTR in an Endpoint unless the Root
18841 +- * Complex and all intermediate Switches indicate support for LTR.
18842 +- * PCIe r3.1, sec 6.18.
18843 +- */
18844 +- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
18845 +- dev->ltr_path = 1;
18846 +- else {
18847 ++ pcie_capability_read_dword(dev, PCI_EXP_DEVCTL2, &ctl);
18848 ++ if (ctl & PCI_EXP_DEVCTL2_LTR_EN) {
18849 ++ if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
18850 ++ dev->ltr_path = 1;
18851 ++ return;
18852 ++ }
18853 ++
18854 + bridge = pci_upstream_bridge(dev);
18855 + if (bridge && bridge->ltr_path)
18856 + dev->ltr_path = 1;
18857 ++
18858 ++ return;
18859 + }
18860 +
18861 +- if (dev->ltr_path)
18862 ++ if (!host->native_ltr)
18863 ++ return;
18864 ++
18865 ++ /*
18866 ++ * Software must not enable LTR in an Endpoint unless the Root
18867 ++ * Complex and all intermediate Switches indicate support for LTR.
18868 ++ * PCIe r4.0, sec 6.18.
18869 ++ */
18870 ++ if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
18871 ++ ((bridge = pci_upstream_bridge(dev)) &&
18872 ++ bridge->ltr_path)) {
18873 + pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
18874 + PCI_EXP_DEVCTL2_LTR_EN);
18875 ++ dev->ltr_path = 1;
18876 ++ }
18877 + #endif
18878 + }
18879 +
18880 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
18881 +index e2a879e93d86..fba03a7d5c7f 100644
18882 +--- a/drivers/pci/quirks.c
18883 ++++ b/drivers/pci/quirks.c
18884 +@@ -3877,6 +3877,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128,
18885 + /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
18886 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
18887 + quirk_dma_func1_alias);
18888 ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170,
18889 ++ quirk_dma_func1_alias);
18890 + /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */
18891 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
18892 + quirk_dma_func1_alias);
18893 +diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
18894 +index 8e46a9dad2fa..7cb766dafe85 100644
18895 +--- a/drivers/perf/arm_spe_pmu.c
18896 ++++ b/drivers/perf/arm_spe_pmu.c
18897 +@@ -824,10 +824,10 @@ static void arm_spe_pmu_read(struct perf_event *event)
18898 + {
18899 + }
18900 +
18901 +-static void *arm_spe_pmu_setup_aux(int cpu, void **pages, int nr_pages,
18902 +- bool snapshot)
18903 ++static void *arm_spe_pmu_setup_aux(struct perf_event *event, void **pages,
18904 ++ int nr_pages, bool snapshot)
18905 + {
18906 +- int i;
18907 ++ int i, cpu = event->cpu;
18908 + struct page **pglist;
18909 + struct arm_spe_pmu_buf *buf;
18910 +
18911 +diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
18912 +index 5163097b43df..4bbd9ede38c8 100644
18913 +--- a/drivers/phy/allwinner/phy-sun4i-usb.c
18914 ++++ b/drivers/phy/allwinner/phy-sun4i-usb.c
18915 +@@ -485,8 +485,11 @@ static int sun4i_usb_phy_set_mode(struct phy *_phy,
18916 + struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
18917 + int new_mode;
18918 +
18919 +- if (phy->index != 0)
18920 ++ if (phy->index != 0) {
18921 ++ if (mode == PHY_MODE_USB_HOST)
18922 ++ return 0;
18923 + return -EINVAL;
18924 ++ }
18925 +
18926 + switch (mode) {
18927 + case PHY_MODE_USB_HOST:
18928 +diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
18929 +index ea87d739f534..a4ae1ac5369e 100644
18930 +--- a/drivers/pinctrl/meson/pinctrl-meson.c
18931 ++++ b/drivers/pinctrl/meson/pinctrl-meson.c
18932 +@@ -31,6 +31,9 @@
18933 + * In some cases the register ranges for pull enable and pull
18934 + * direction are the same and thus there are only 3 register ranges.
18935 + *
18936 ++ * Since Meson G12A SoC, the ao register ranges for gpio, pull enable
18937 ++ * and pull direction are the same, so there are only 2 register ranges.
18938 ++ *
18939 + * For the pull and GPIO configuration every bank uses a contiguous
18940 + * set of bits in the register sets described above; the same register
18941 + * can be shared by more banks with different offsets.
18942 +@@ -488,23 +491,22 @@ static int meson_pinctrl_parse_dt(struct meson_pinctrl *pc,
18943 + return PTR_ERR(pc->reg_mux);
18944 + }
18945 +
18946 +- pc->reg_pull = meson_map_resource(pc, gpio_np, "pull");
18947 +- if (IS_ERR(pc->reg_pull)) {
18948 +- dev_err(pc->dev, "pull registers not found\n");
18949 +- return PTR_ERR(pc->reg_pull);
18950 ++ pc->reg_gpio = meson_map_resource(pc, gpio_np, "gpio");
18951 ++ if (IS_ERR(pc->reg_gpio)) {
18952 ++ dev_err(pc->dev, "gpio registers not found\n");
18953 ++ return PTR_ERR(pc->reg_gpio);
18954 + }
18955 +
18956 ++ pc->reg_pull = meson_map_resource(pc, gpio_np, "pull");
18957 ++ /* Use gpio region if pull one is not present */
18958 ++ if (IS_ERR(pc->reg_pull))
18959 ++ pc->reg_pull = pc->reg_gpio;
18960 ++
18961 + pc->reg_pullen = meson_map_resource(pc, gpio_np, "pull-enable");
18962 + /* Use pull region if pull-enable one is not present */
18963 + if (IS_ERR(pc->reg_pullen))
18964 + pc->reg_pullen = pc->reg_pull;
18965 +
18966 +- pc->reg_gpio = meson_map_resource(pc, gpio_np, "gpio");
18967 +- if (IS_ERR(pc->reg_gpio)) {
18968 +- dev_err(pc->dev, "gpio registers not found\n");
18969 +- return PTR_ERR(pc->reg_gpio);
18970 +- }
18971 +-
18972 + return 0;
18973 + }
18974 +
18975 +diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
18976 +index 0f140a802137..7f76000cc12e 100644
18977 +--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
18978 ++++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
18979 +@@ -346,6 +346,8 @@ static const unsigned int eth_rx_dv_pins[] = { DIF_1_P };
18980 + static const unsigned int eth_rx_clk_pins[] = { DIF_1_N };
18981 + static const unsigned int eth_txd0_1_pins[] = { DIF_2_P };
18982 + static const unsigned int eth_txd1_1_pins[] = { DIF_2_N };
18983 ++static const unsigned int eth_rxd3_pins[] = { DIF_2_P };
18984 ++static const unsigned int eth_rxd2_pins[] = { DIF_2_N };
18985 + static const unsigned int eth_tx_en_pins[] = { DIF_3_P };
18986 + static const unsigned int eth_ref_clk_pins[] = { DIF_3_N };
18987 + static const unsigned int eth_mdc_pins[] = { DIF_4_P };
18988 +@@ -599,6 +601,8 @@ static struct meson_pmx_group meson8b_cbus_groups[] = {
18989 + GROUP(eth_ref_clk, 6, 8),
18990 + GROUP(eth_mdc, 6, 9),
18991 + GROUP(eth_mdio_en, 6, 10),
18992 ++ GROUP(eth_rxd3, 7, 22),
18993 ++ GROUP(eth_rxd2, 7, 23),
18994 + };
18995 +
18996 + static struct meson_pmx_group meson8b_aobus_groups[] = {
18997 +@@ -748,7 +752,7 @@ static const char * const ethernet_groups[] = {
18998 + "eth_tx_clk", "eth_tx_en", "eth_txd1_0", "eth_txd1_1",
18999 + "eth_txd0_0", "eth_txd0_1", "eth_rx_clk", "eth_rx_dv",
19000 + "eth_rxd1", "eth_rxd0", "eth_mdio_en", "eth_mdc", "eth_ref_clk",
19001 +- "eth_txd2", "eth_txd3"
19002 ++ "eth_txd2", "eth_txd3", "eth_rxd3", "eth_rxd2"
19003 + };
19004 +
19005 + static const char * const i2c_a_groups[] = {
19006 +diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
19007 +index e40908dc37e0..1ce286f7b286 100644
19008 +--- a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
19009 ++++ b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
19010 +@@ -391,29 +391,33 @@ FM(IP12_23_20) IP12_23_20 FM(IP13_23_20) IP13_23_20 FM(IP14_23_20) IP14_23_20 FM
19011 + FM(IP12_27_24) IP12_27_24 FM(IP13_27_24) IP13_27_24 FM(IP14_27_24) IP14_27_24 FM(IP15_27_24) IP15_27_24 \
19012 + FM(IP12_31_28) IP12_31_28 FM(IP13_31_28) IP13_31_28 FM(IP14_31_28) IP14_31_28 FM(IP15_31_28) IP15_31_28
19013 +
19014 ++/* The bit numbering in MOD_SEL fields is reversed */
19015 ++#define REV4(f0, f1, f2, f3) f0 f2 f1 f3
19016 ++#define REV8(f0, f1, f2, f3, f4, f5, f6, f7) f0 f4 f2 f6 f1 f5 f3 f7
19017 ++
19018 + /* MOD_SEL0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */
19019 +-#define MOD_SEL0_30_29 FM(SEL_ADGB_0) FM(SEL_ADGB_1) FM(SEL_ADGB_2) F_(0, 0)
19020 ++#define MOD_SEL0_30_29 REV4(FM(SEL_ADGB_0), FM(SEL_ADGB_1), FM(SEL_ADGB_2), F_(0, 0))
19021 + #define MOD_SEL0_28 FM(SEL_DRIF0_0) FM(SEL_DRIF0_1)
19022 +-#define MOD_SEL0_27_26 FM(SEL_FM_0) FM(SEL_FM_1) FM(SEL_FM_2) F_(0, 0)
19023 ++#define MOD_SEL0_27_26 REV4(FM(SEL_FM_0), FM(SEL_FM_1), FM(SEL_FM_2), F_(0, 0))
19024 + #define MOD_SEL0_25 FM(SEL_FSO_0) FM(SEL_FSO_1)
19025 + #define MOD_SEL0_24 FM(SEL_HSCIF0_0) FM(SEL_HSCIF0_1)
19026 + #define MOD_SEL0_23 FM(SEL_HSCIF1_0) FM(SEL_HSCIF1_1)
19027 + #define MOD_SEL0_22 FM(SEL_HSCIF2_0) FM(SEL_HSCIF2_1)
19028 +-#define MOD_SEL0_21_20 FM(SEL_I2C1_0) FM(SEL_I2C1_1) FM(SEL_I2C1_2) FM(SEL_I2C1_3)
19029 +-#define MOD_SEL0_19_18_17 FM(SEL_I2C2_0) FM(SEL_I2C2_1) FM(SEL_I2C2_2) FM(SEL_I2C2_3) FM(SEL_I2C2_4) F_(0, 0) F_(0, 0) F_(0, 0)
19030 ++#define MOD_SEL0_21_20 REV4(FM(SEL_I2C1_0), FM(SEL_I2C1_1), FM(SEL_I2C1_2), FM(SEL_I2C1_3))
19031 ++#define MOD_SEL0_19_18_17 REV8(FM(SEL_I2C2_0), FM(SEL_I2C2_1), FM(SEL_I2C2_2), FM(SEL_I2C2_3), FM(SEL_I2C2_4), F_(0, 0), F_(0, 0), F_(0, 0))
19032 + #define MOD_SEL0_16 FM(SEL_NDFC_0) FM(SEL_NDFC_1)
19033 + #define MOD_SEL0_15 FM(SEL_PWM0_0) FM(SEL_PWM0_1)
19034 + #define MOD_SEL0_14 FM(SEL_PWM1_0) FM(SEL_PWM1_1)
19035 +-#define MOD_SEL0_13_12 FM(SEL_PWM2_0) FM(SEL_PWM2_1) FM(SEL_PWM2_2) F_(0, 0)
19036 +-#define MOD_SEL0_11_10 FM(SEL_PWM3_0) FM(SEL_PWM3_1) FM(SEL_PWM3_2) F_(0, 0)
19037 ++#define MOD_SEL0_13_12 REV4(FM(SEL_PWM2_0), FM(SEL_PWM2_1), FM(SEL_PWM2_2), F_(0, 0))
19038 ++#define MOD_SEL0_11_10 REV4(FM(SEL_PWM3_0), FM(SEL_PWM3_1), FM(SEL_PWM3_2), F_(0, 0))
19039 + #define MOD_SEL0_9 FM(SEL_PWM4_0) FM(SEL_PWM4_1)
19040 + #define MOD_SEL0_8 FM(SEL_PWM5_0) FM(SEL_PWM5_1)
19041 + #define MOD_SEL0_7 FM(SEL_PWM6_0) FM(SEL_PWM6_1)
19042 +-#define MOD_SEL0_6_5 FM(SEL_REMOCON_0) FM(SEL_REMOCON_1) FM(SEL_REMOCON_2) F_(0, 0)
19043 ++#define MOD_SEL0_6_5 REV4(FM(SEL_REMOCON_0), FM(SEL_REMOCON_1), FM(SEL_REMOCON_2), F_(0, 0))
19044 + #define MOD_SEL0_4 FM(SEL_SCIF_0) FM(SEL_SCIF_1)
19045 + #define MOD_SEL0_3 FM(SEL_SCIF0_0) FM(SEL_SCIF0_1)
19046 + #define MOD_SEL0_2 FM(SEL_SCIF2_0) FM(SEL_SCIF2_1)
19047 +-#define MOD_SEL0_1_0 FM(SEL_SPEED_PULSE_IF_0) FM(SEL_SPEED_PULSE_IF_1) FM(SEL_SPEED_PULSE_IF_2) F_(0, 0)
19048 ++#define MOD_SEL0_1_0 REV4(FM(SEL_SPEED_PULSE_IF_0), FM(SEL_SPEED_PULSE_IF_1), FM(SEL_SPEED_PULSE_IF_2), F_(0, 0))
19049 +
19050 + /* MOD_SEL1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */
19051 + #define MOD_SEL1_31 FM(SEL_SIMCARD_0) FM(SEL_SIMCARD_1)
19052 +@@ -422,18 +426,18 @@ FM(IP12_31_28) IP12_31_28 FM(IP13_31_28) IP13_31_28 FM(IP14_31_28) IP14_31_28 FM
19053 + #define MOD_SEL1_28 FM(SEL_USB_20_CH0_0) FM(SEL_USB_20_CH0_1)
19054 + #define MOD_SEL1_26 FM(SEL_DRIF2_0) FM(SEL_DRIF2_1)
19055 + #define MOD_SEL1_25 FM(SEL_DRIF3_0) FM(SEL_DRIF3_1)
19056 +-#define MOD_SEL1_24_23_22 FM(SEL_HSCIF3_0) FM(SEL_HSCIF3_1) FM(SEL_HSCIF3_2) FM(SEL_HSCIF3_3) FM(SEL_HSCIF3_4) F_(0, 0) F_(0, 0) F_(0, 0)
19057 +-#define MOD_SEL1_21_20_19 FM(SEL_HSCIF4_0) FM(SEL_HSCIF4_1) FM(SEL_HSCIF4_2) FM(SEL_HSCIF4_3) FM(SEL_HSCIF4_4) F_(0, 0) F_(0, 0) F_(0, 0)
19058 ++#define MOD_SEL1_24_23_22 REV8(FM(SEL_HSCIF3_0), FM(SEL_HSCIF3_1), FM(SEL_HSCIF3_2), FM(SEL_HSCIF3_3), FM(SEL_HSCIF3_4), F_(0, 0), F_(0, 0), F_(0, 0))
19059 ++#define MOD_SEL1_21_20_19 REV8(FM(SEL_HSCIF4_0), FM(SEL_HSCIF4_1), FM(SEL_HSCIF4_2), FM(SEL_HSCIF4_3), FM(SEL_HSCIF4_4), F_(0, 0), F_(0, 0), F_(0, 0))
19060 + #define MOD_SEL1_18 FM(SEL_I2C6_0) FM(SEL_I2C6_1)
19061 + #define MOD_SEL1_17 FM(SEL_I2C7_0) FM(SEL_I2C7_1)
19062 + #define MOD_SEL1_16 FM(SEL_MSIOF2_0) FM(SEL_MSIOF2_1)
19063 + #define MOD_SEL1_15 FM(SEL_MSIOF3_0) FM(SEL_MSIOF3_1)
19064 +-#define MOD_SEL1_14_13 FM(SEL_SCIF3_0) FM(SEL_SCIF3_1) FM(SEL_SCIF3_2) F_(0, 0)
19065 +-#define MOD_SEL1_12_11 FM(SEL_SCIF4_0) FM(SEL_SCIF4_1) FM(SEL_SCIF4_2) F_(0, 0)
19066 +-#define MOD_SEL1_10_9 FM(SEL_SCIF5_0) FM(SEL_SCIF5_1) FM(SEL_SCIF5_2) F_(0, 0)
19067 ++#define MOD_SEL1_14_13 REV4(FM(SEL_SCIF3_0), FM(SEL_SCIF3_1), FM(SEL_SCIF3_2), F_(0, 0))
19068 ++#define MOD_SEL1_12_11 REV4(FM(SEL_SCIF4_0), FM(SEL_SCIF4_1), FM(SEL_SCIF4_2), F_(0, 0))
19069 ++#define MOD_SEL1_10_9 REV4(FM(SEL_SCIF5_0), FM(SEL_SCIF5_1), FM(SEL_SCIF5_2), F_(0, 0))
19070 + #define MOD_SEL1_8 FM(SEL_VIN4_0) FM(SEL_VIN4_1)
19071 + #define MOD_SEL1_7 FM(SEL_VIN5_0) FM(SEL_VIN5_1)
19072 +-#define MOD_SEL1_6_5 FM(SEL_ADGC_0) FM(SEL_ADGC_1) FM(SEL_ADGC_2) F_(0, 0)
19073 ++#define MOD_SEL1_6_5 REV4(FM(SEL_ADGC_0), FM(SEL_ADGC_1), FM(SEL_ADGC_2), F_(0, 0))
19074 + #define MOD_SEL1_4 FM(SEL_SSI9_0) FM(SEL_SSI9_1)
19075 +
19076 + #define PINMUX_MOD_SELS \
19077 +diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77995.c b/drivers/pinctrl/sh-pfc/pfc-r8a77995.c
19078 +index 84d78db381e3..9e377e3b9cb3 100644
19079 +--- a/drivers/pinctrl/sh-pfc/pfc-r8a77995.c
19080 ++++ b/drivers/pinctrl/sh-pfc/pfc-r8a77995.c
19081 +@@ -381,6 +381,9 @@ FM(IP12_23_20) IP12_23_20 \
19082 + FM(IP12_27_24) IP12_27_24 \
19083 + FM(IP12_31_28) IP12_31_28 \
19084 +
19085 ++/* The bit numbering in MOD_SEL fields is reversed */
19086 ++#define REV4(f0, f1, f2, f3) f0 f2 f1 f3
19087 ++
19088 + /* MOD_SEL0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */
19089 + #define MOD_SEL0_30 FM(SEL_MSIOF2_0) FM(SEL_MSIOF2_1)
19090 + #define MOD_SEL0_29 FM(SEL_I2C3_0) FM(SEL_I2C3_1)
19091 +@@ -388,10 +391,10 @@ FM(IP12_31_28) IP12_31_28 \
19092 + #define MOD_SEL0_27 FM(SEL_MSIOF3_0) FM(SEL_MSIOF3_1)
19093 + #define MOD_SEL0_26 FM(SEL_HSCIF3_0) FM(SEL_HSCIF3_1)
19094 + #define MOD_SEL0_25 FM(SEL_SCIF4_0) FM(SEL_SCIF4_1)
19095 +-#define MOD_SEL0_24_23 FM(SEL_PWM0_0) FM(SEL_PWM0_1) FM(SEL_PWM0_2) F_(0, 0)
19096 +-#define MOD_SEL0_22_21 FM(SEL_PWM1_0) FM(SEL_PWM1_1) FM(SEL_PWM1_2) F_(0, 0)
19097 +-#define MOD_SEL0_20_19 FM(SEL_PWM2_0) FM(SEL_PWM2_1) FM(SEL_PWM2_2) F_(0, 0)
19098 +-#define MOD_SEL0_18_17 FM(SEL_PWM3_0) FM(SEL_PWM3_1) FM(SEL_PWM3_2) F_(0, 0)
19099 ++#define MOD_SEL0_24_23 REV4(FM(SEL_PWM0_0), FM(SEL_PWM0_1), FM(SEL_PWM0_2), F_(0, 0))
19100 ++#define MOD_SEL0_22_21 REV4(FM(SEL_PWM1_0), FM(SEL_PWM1_1), FM(SEL_PWM1_2), F_(0, 0))
19101 ++#define MOD_SEL0_20_19 REV4(FM(SEL_PWM2_0), FM(SEL_PWM2_1), FM(SEL_PWM2_2), F_(0, 0))
19102 ++#define MOD_SEL0_18_17 REV4(FM(SEL_PWM3_0), FM(SEL_PWM3_1), FM(SEL_PWM3_2), F_(0, 0))
19103 + #define MOD_SEL0_15 FM(SEL_IRQ_0_0) FM(SEL_IRQ_0_1)
19104 + #define MOD_SEL0_14 FM(SEL_IRQ_1_0) FM(SEL_IRQ_1_1)
19105 + #define MOD_SEL0_13 FM(SEL_IRQ_2_0) FM(SEL_IRQ_2_1)
19106 +diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c
19107 +index b6d44550d98c..eca16d00e310 100644
19108 +--- a/drivers/platform/mellanox/mlxreg-hotplug.c
19109 ++++ b/drivers/platform/mellanox/mlxreg-hotplug.c
19110 +@@ -248,7 +248,8 @@ mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv,
19111 + struct mlxreg_core_item *item)
19112 + {
19113 + struct mlxreg_core_data *data;
19114 +- u32 asserted, regval, bit;
19115 ++ unsigned long asserted;
19116 ++ u32 regval, bit;
19117 + int ret;
19118 +
19119 + /*
19120 +@@ -281,7 +282,7 @@ mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv,
19121 + asserted = item->cache ^ regval;
19122 + item->cache = regval;
19123 +
19124 +- for_each_set_bit(bit, (unsigned long *)&asserted, 8) {
19125 ++ for_each_set_bit(bit, &asserted, 8) {
19126 + data = item->data + bit;
19127 + if (regval & BIT(bit)) {
19128 + if (item->inversed)
19129 +diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
19130 +index 1589dffab9fa..8b53a9ceb897 100644
19131 +--- a/drivers/platform/x86/ideapad-laptop.c
19132 ++++ b/drivers/platform/x86/ideapad-laptop.c
19133 +@@ -989,7 +989,7 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
19134 + .ident = "Lenovo RESCUER R720-15IKBN",
19135 + .matches = {
19136 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
19137 +- DMI_MATCH(DMI_BOARD_NAME, "80WW"),
19138 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo R720-15IKBN"),
19139 + },
19140 + },
19141 + {
19142 +diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
19143 +index e28bcf61b126..bc0d55a59015 100644
19144 +--- a/drivers/platform/x86/intel-hid.c
19145 ++++ b/drivers/platform/x86/intel-hid.c
19146 +@@ -363,7 +363,7 @@ wakeup:
19147 + * the 5-button array, but still send notifies with power button
19148 + * event code to this device object on power button actions.
19149 + *
19150 +- * Report the power button press; catch and ignore the button release.
19151 ++ * Report the power button press and release.
19152 + */
19153 + if (!priv->array) {
19154 + if (event == 0xce) {
19155 +@@ -372,8 +372,11 @@ wakeup:
19156 + return;
19157 + }
19158 +
19159 +- if (event == 0xcf)
19160 ++ if (event == 0xcf) {
19161 ++ input_report_key(priv->input_dev, KEY_POWER, 0);
19162 ++ input_sync(priv->input_dev);
19163 + return;
19164 ++ }
19165 + }
19166 +
19167 + /* 0xC0 is for HID events, other values are for 5 button array */
19168 +diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
19169 +index 22dbf115782e..c37e74ee609d 100644
19170 +--- a/drivers/platform/x86/intel_pmc_core.c
19171 ++++ b/drivers/platform/x86/intel_pmc_core.c
19172 +@@ -380,7 +380,8 @@ static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
19173 + index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++)
19174 + pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter);
19175 +
19176 +- for (index = 0; map[index].name; index++)
19177 ++ for (index = 0; map[index].name &&
19178 ++ index < pmcdev->map->ppfear_buckets * 8; index++)
19179 + pmc_core_display_map(s, index, pf_regs[index / 8], map);
19180 +
19181 + return 0;
19182 +diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h
19183 +index 89554cba5758..1a0104d2cbf0 100644
19184 +--- a/drivers/platform/x86/intel_pmc_core.h
19185 ++++ b/drivers/platform/x86/intel_pmc_core.h
19186 +@@ -32,7 +32,7 @@
19187 + #define SPT_PMC_SLP_S0_RES_COUNTER_STEP 0x64
19188 + #define PMC_BASE_ADDR_MASK ~(SPT_PMC_MMIO_REG_LEN - 1)
19189 + #define MTPMC_MASK 0xffff0000
19190 +-#define PPFEAR_MAX_NUM_ENTRIES 5
19191 ++#define PPFEAR_MAX_NUM_ENTRIES 12
19192 + #define SPT_PPFEAR_NUM_ENTRIES 5
19193 + #define SPT_PMC_READ_DISABLE_BIT 0x16
19194 + #define SPT_PMC_MSG_FULL_STS_BIT 0x18
19195 +diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
19196 +index c843eaff8ad0..c3ed7b476676 100644
19197 +--- a/drivers/power/supply/cpcap-charger.c
19198 ++++ b/drivers/power/supply/cpcap-charger.c
19199 +@@ -458,6 +458,7 @@ static void cpcap_usb_detect(struct work_struct *work)
19200 + goto out_err;
19201 + }
19202 +
19203 ++ power_supply_changed(ddata->usb);
19204 + return;
19205 +
19206 + out_err:
19207 +diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c
19208 +index 21e20483bd91..e0239cf3f56d 100644
19209 +--- a/drivers/regulator/act8865-regulator.c
19210 ++++ b/drivers/regulator/act8865-regulator.c
19211 +@@ -131,7 +131,7 @@
19212 + * ACT8865 voltage number
19213 + */
19214 + #define ACT8865_VOLTAGE_NUM 64
19215 +-#define ACT8600_SUDCDC_VOLTAGE_NUM 255
19216 ++#define ACT8600_SUDCDC_VOLTAGE_NUM 256
19217 +
19218 + struct act8865 {
19219 + struct regmap *regmap;
19220 +@@ -222,7 +222,8 @@ static const struct regulator_linear_range act8600_sudcdc_voltage_ranges[] = {
19221 + REGULATOR_LINEAR_RANGE(3000000, 0, 63, 0),
19222 + REGULATOR_LINEAR_RANGE(3000000, 64, 159, 100000),
19223 + REGULATOR_LINEAR_RANGE(12600000, 160, 191, 200000),
19224 +- REGULATOR_LINEAR_RANGE(19000000, 191, 255, 400000),
19225 ++ REGULATOR_LINEAR_RANGE(19000000, 192, 247, 400000),
19226 ++ REGULATOR_LINEAR_RANGE(41400000, 248, 255, 0),
19227 + };
19228 +
19229 + static struct regulator_ops act8865_ops = {
19230 +diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
19231 +index b9d7b45c7295..e2caf11598c7 100644
19232 +--- a/drivers/regulator/core.c
19233 ++++ b/drivers/regulator/core.c
19234 +@@ -1349,7 +1349,9 @@ static int set_machine_constraints(struct regulator_dev *rdev,
19235 + * We'll only apply the initial system load if an
19236 + * initial mode wasn't specified.
19237 + */
19238 ++ regulator_lock(rdev);
19239 + drms_uA_update(rdev);
19240 ++ regulator_unlock(rdev);
19241 + }
19242 +
19243 + if ((rdev->constraints->ramp_delay || rdev->constraints->ramp_disable)
19244 +diff --git a/drivers/regulator/max77620-regulator.c b/drivers/regulator/max77620-regulator.c
19245 +index b94e3a721721..cd93cf53e23c 100644
19246 +--- a/drivers/regulator/max77620-regulator.c
19247 ++++ b/drivers/regulator/max77620-regulator.c
19248 +@@ -1,7 +1,7 @@
19249 + /*
19250 + * Maxim MAX77620 Regulator driver
19251 + *
19252 +- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
19253 ++ * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
19254 + *
19255 + * Author: Mallikarjun Kasoju <mkasoju@××××××.com>
19256 + * Laxman Dewangan <ldewangan@××××××.com>
19257 +@@ -803,6 +803,14 @@ static int max77620_regulator_probe(struct platform_device *pdev)
19258 + rdesc = &rinfo[id].desc;
19259 + pmic->rinfo[id] = &max77620_regs_info[id];
19260 + pmic->enable_power_mode[id] = MAX77620_POWER_MODE_NORMAL;
19261 ++ pmic->reg_pdata[id].active_fps_src = -1;
19262 ++ pmic->reg_pdata[id].active_fps_pd_slot = -1;
19263 ++ pmic->reg_pdata[id].active_fps_pu_slot = -1;
19264 ++ pmic->reg_pdata[id].suspend_fps_src = -1;
19265 ++ pmic->reg_pdata[id].suspend_fps_pd_slot = -1;
19266 ++ pmic->reg_pdata[id].suspend_fps_pu_slot = -1;
19267 ++ pmic->reg_pdata[id].power_ok = -1;
19268 ++ pmic->reg_pdata[id].ramp_rate_setting = -1;
19269 +
19270 + ret = max77620_read_slew_rate(pmic, id);
19271 + if (ret < 0)
19272 +diff --git a/drivers/regulator/mcp16502.c b/drivers/regulator/mcp16502.c
19273 +index 3479ae009b0b..0fc4963bd5b0 100644
19274 +--- a/drivers/regulator/mcp16502.c
19275 ++++ b/drivers/regulator/mcp16502.c
19276 +@@ -17,6 +17,7 @@
19277 + #include <linux/regmap.h>
19278 + #include <linux/regulator/driver.h>
19279 + #include <linux/suspend.h>
19280 ++#include <linux/gpio/consumer.h>
19281 +
19282 + #define VDD_LOW_SEL 0x0D
19283 + #define VDD_HIGH_SEL 0x3F
19284 +diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c
19285 +index 095d25f3d2ea..58a1fe583a6c 100644
19286 +--- a/drivers/regulator/s2mpa01.c
19287 ++++ b/drivers/regulator/s2mpa01.c
19288 +@@ -298,13 +298,13 @@ static const struct regulator_desc regulators[] = {
19289 + regulator_desc_ldo(2, STEP_50_MV),
19290 + regulator_desc_ldo(3, STEP_50_MV),
19291 + regulator_desc_ldo(4, STEP_50_MV),
19292 +- regulator_desc_ldo(5, STEP_50_MV),
19293 ++ regulator_desc_ldo(5, STEP_25_MV),
19294 + regulator_desc_ldo(6, STEP_25_MV),
19295 + regulator_desc_ldo(7, STEP_50_MV),
19296 + regulator_desc_ldo(8, STEP_50_MV),
19297 + regulator_desc_ldo(9, STEP_50_MV),
19298 + regulator_desc_ldo(10, STEP_50_MV),
19299 +- regulator_desc_ldo(11, STEP_25_MV),
19300 ++ regulator_desc_ldo(11, STEP_50_MV),
19301 + regulator_desc_ldo(12, STEP_50_MV),
19302 + regulator_desc_ldo(13, STEP_50_MV),
19303 + regulator_desc_ldo(14, STEP_50_MV),
19304 +@@ -315,11 +315,11 @@ static const struct regulator_desc regulators[] = {
19305 + regulator_desc_ldo(19, STEP_50_MV),
19306 + regulator_desc_ldo(20, STEP_50_MV),
19307 + regulator_desc_ldo(21, STEP_50_MV),
19308 +- regulator_desc_ldo(22, STEP_25_MV),
19309 +- regulator_desc_ldo(23, STEP_25_MV),
19310 ++ regulator_desc_ldo(22, STEP_50_MV),
19311 ++ regulator_desc_ldo(23, STEP_50_MV),
19312 + regulator_desc_ldo(24, STEP_50_MV),
19313 + regulator_desc_ldo(25, STEP_50_MV),
19314 +- regulator_desc_ldo(26, STEP_50_MV),
19315 ++ regulator_desc_ldo(26, STEP_25_MV),
19316 + regulator_desc_buck1_4(1),
19317 + regulator_desc_buck1_4(2),
19318 + regulator_desc_buck1_4(3),
19319 +diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
19320 +index ee4a23ab0663..134c62db36c5 100644
19321 +--- a/drivers/regulator/s2mps11.c
19322 ++++ b/drivers/regulator/s2mps11.c
19323 +@@ -362,7 +362,7 @@ static const struct regulator_desc s2mps11_regulators[] = {
19324 + regulator_desc_s2mps11_ldo(32, STEP_50_MV),
19325 + regulator_desc_s2mps11_ldo(33, STEP_50_MV),
19326 + regulator_desc_s2mps11_ldo(34, STEP_50_MV),
19327 +- regulator_desc_s2mps11_ldo(35, STEP_50_MV),
19328 ++ regulator_desc_s2mps11_ldo(35, STEP_25_MV),
19329 + regulator_desc_s2mps11_ldo(36, STEP_50_MV),
19330 + regulator_desc_s2mps11_ldo(37, STEP_50_MV),
19331 + regulator_desc_s2mps11_ldo(38, STEP_50_MV),
19332 +@@ -372,8 +372,8 @@ static const struct regulator_desc s2mps11_regulators[] = {
19333 + regulator_desc_s2mps11_buck1_4(4),
19334 + regulator_desc_s2mps11_buck5,
19335 + regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV),
19336 +- regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_6_25_MV),
19337 +- regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_6_25_MV),
19338 ++ regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_12_5_MV),
19339 ++ regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_12_5_MV),
19340 + regulator_desc_s2mps11_buck9,
19341 + regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV),
19342 + };
19343 +diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
19344 +index a10cec0e86eb..0b3b9de45c60 100644
19345 +--- a/drivers/s390/cio/vfio_ccw_drv.c
19346 ++++ b/drivers/s390/cio/vfio_ccw_drv.c
19347 +@@ -72,20 +72,24 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
19348 + {
19349 + struct vfio_ccw_private *private;
19350 + struct irb *irb;
19351 ++ bool is_final;
19352 +
19353 + private = container_of(work, struct vfio_ccw_private, io_work);
19354 + irb = &private->irb;
19355 +
19356 ++ is_final = !(scsw_actl(&irb->scsw) &
19357 ++ (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
19358 + if (scsw_is_solicited(&irb->scsw)) {
19359 + cp_update_scsw(&private->cp, &irb->scsw);
19360 +- cp_free(&private->cp);
19361 ++ if (is_final)
19362 ++ cp_free(&private->cp);
19363 + }
19364 + memcpy(private->io_region->irb_area, irb, sizeof(*irb));
19365 +
19366 + if (private->io_trigger)
19367 + eventfd_signal(private->io_trigger, 1);
19368 +
19369 +- if (private->mdev)
19370 ++ if (private->mdev && is_final)
19371 + private->state = VFIO_CCW_STATE_IDLE;
19372 + }
19373 +
19374 +diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
19375 +index 31c6c847eaca..e9824c35c34f 100644
19376 +--- a/drivers/s390/crypto/vfio_ap_drv.c
19377 ++++ b/drivers/s390/crypto/vfio_ap_drv.c
19378 +@@ -15,7 +15,6 @@
19379 + #include "vfio_ap_private.h"
19380 +
19381 + #define VFIO_AP_ROOT_NAME "vfio_ap"
19382 +-#define VFIO_AP_DEV_TYPE_NAME "ap_matrix"
19383 + #define VFIO_AP_DEV_NAME "matrix"
19384 +
19385 + MODULE_AUTHOR("IBM Corporation");
19386 +@@ -24,10 +23,6 @@ MODULE_LICENSE("GPL v2");
19387 +
19388 + static struct ap_driver vfio_ap_drv;
19389 +
19390 +-static struct device_type vfio_ap_dev_type = {
19391 +- .name = VFIO_AP_DEV_TYPE_NAME,
19392 +-};
19393 +-
19394 + struct ap_matrix_dev *matrix_dev;
19395 +
19396 + /* Only type 10 adapters (CEX4 and later) are supported
19397 +@@ -62,6 +57,22 @@ static void vfio_ap_matrix_dev_release(struct device *dev)
19398 + kfree(matrix_dev);
19399 + }
19400 +
19401 ++static int matrix_bus_match(struct device *dev, struct device_driver *drv)
19402 ++{
19403 ++ return 1;
19404 ++}
19405 ++
19406 ++static struct bus_type matrix_bus = {
19407 ++ .name = "matrix",
19408 ++ .match = &matrix_bus_match,
19409 ++};
19410 ++
19411 ++static struct device_driver matrix_driver = {
19412 ++ .name = "vfio_ap",
19413 ++ .bus = &matrix_bus,
19414 ++ .suppress_bind_attrs = true,
19415 ++};
19416 ++
19417 + static int vfio_ap_matrix_dev_create(void)
19418 + {
19419 + int ret;
19420 +@@ -71,6 +82,10 @@ static int vfio_ap_matrix_dev_create(void)
19421 + if (IS_ERR(root_device))
19422 + return PTR_ERR(root_device);
19423 +
19424 ++ ret = bus_register(&matrix_bus);
19425 ++ if (ret)
19426 ++ goto bus_register_err;
19427 ++
19428 + matrix_dev = kzalloc(sizeof(*matrix_dev), GFP_KERNEL);
19429 + if (!matrix_dev) {
19430 + ret = -ENOMEM;
19431 +@@ -87,30 +102,41 @@ static int vfio_ap_matrix_dev_create(void)
19432 + mutex_init(&matrix_dev->lock);
19433 + INIT_LIST_HEAD(&matrix_dev->mdev_list);
19434 +
19435 +- matrix_dev->device.type = &vfio_ap_dev_type;
19436 + dev_set_name(&matrix_dev->device, "%s", VFIO_AP_DEV_NAME);
19437 + matrix_dev->device.parent = root_device;
19438 ++ matrix_dev->device.bus = &matrix_bus;
19439 + matrix_dev->device.release = vfio_ap_matrix_dev_release;
19440 +- matrix_dev->device.driver = &vfio_ap_drv.driver;
19441 ++ matrix_dev->vfio_ap_drv = &vfio_ap_drv;
19442 +
19443 + ret = device_register(&matrix_dev->device);
19444 + if (ret)
19445 + goto matrix_reg_err;
19446 +
19447 ++ ret = driver_register(&matrix_driver);
19448 ++ if (ret)
19449 ++ goto matrix_drv_err;
19450 ++
19451 + return 0;
19452 +
19453 ++matrix_drv_err:
19454 ++ device_unregister(&matrix_dev->device);
19455 + matrix_reg_err:
19456 + put_device(&matrix_dev->device);
19457 + matrix_alloc_err:
19458 ++ bus_unregister(&matrix_bus);
19459 ++bus_register_err:
19460 + root_device_unregister(root_device);
19461 +-
19462 + return ret;
19463 + }
19464 +
19465 + static void vfio_ap_matrix_dev_destroy(void)
19466 + {
19467 ++ struct device *root_device = matrix_dev->device.parent;
19468 ++
19469 ++ driver_unregister(&matrix_driver);
19470 + device_unregister(&matrix_dev->device);
19471 +- root_device_unregister(matrix_dev->device.parent);
19472 ++ bus_unregister(&matrix_bus);
19473 ++ root_device_unregister(root_device);
19474 + }
19475 +
19476 + static int __init vfio_ap_init(void)
19477 +diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
19478 +index 272ef427dcc0..900b9cf20ca5 100644
19479 +--- a/drivers/s390/crypto/vfio_ap_ops.c
19480 ++++ b/drivers/s390/crypto/vfio_ap_ops.c
19481 +@@ -198,8 +198,8 @@ static int vfio_ap_verify_queue_reserved(unsigned long *apid,
19482 + qres.apqi = apqi;
19483 + qres.reserved = false;
19484 +
19485 +- ret = driver_for_each_device(matrix_dev->device.driver, NULL, &qres,
19486 +- vfio_ap_has_queue);
19487 ++ ret = driver_for_each_device(&matrix_dev->vfio_ap_drv->driver, NULL,
19488 ++ &qres, vfio_ap_has_queue);
19489 + if (ret)
19490 + return ret;
19491 +
19492 +diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
19493 +index 5675492233c7..76b7f98e47e9 100644
19494 +--- a/drivers/s390/crypto/vfio_ap_private.h
19495 ++++ b/drivers/s390/crypto/vfio_ap_private.h
19496 +@@ -40,6 +40,7 @@ struct ap_matrix_dev {
19497 + struct ap_config_info info;
19498 + struct list_head mdev_list;
19499 + struct mutex lock;
19500 ++ struct ap_driver *vfio_ap_drv;
19501 + };
19502 +
19503 + extern struct ap_matrix_dev *matrix_dev;
19504 +diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
19505 +index ed8e58f09054..3e132592c1fe 100644
19506 +--- a/drivers/s390/net/ism_drv.c
19507 ++++ b/drivers/s390/net/ism_drv.c
19508 +@@ -141,10 +141,13 @@ static int register_ieq(struct ism_dev *ism)
19509 +
19510 + static int unregister_sba(struct ism_dev *ism)
19511 + {
19512 ++ int ret;
19513 ++
19514 + if (!ism->sba)
19515 + return 0;
19516 +
19517 +- if (ism_cmd_simple(ism, ISM_UNREG_SBA))
19518 ++ ret = ism_cmd_simple(ism, ISM_UNREG_SBA);
19519 ++ if (ret && ret != ISM_ERROR)
19520 + return -EIO;
19521 +
19522 + dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
19523 +@@ -158,10 +161,13 @@ static int unregister_sba(struct ism_dev *ism)
19524 +
19525 + static int unregister_ieq(struct ism_dev *ism)
19526 + {
19527 ++ int ret;
19528 ++
19529 + if (!ism->ieq)
19530 + return 0;
19531 +
19532 +- if (ism_cmd_simple(ism, ISM_UNREG_IEQ))
19533 ++ ret = ism_cmd_simple(ism, ISM_UNREG_IEQ);
19534 ++ if (ret && ret != ISM_ERROR)
19535 + return -EIO;
19536 +
19537 + dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
19538 +@@ -287,7 +293,7 @@ static int ism_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
19539 + cmd.request.dmb_tok = dmb->dmb_tok;
19540 +
19541 + ret = ism_cmd(ism, &cmd);
19542 +- if (ret)
19543 ++ if (ret && ret != ISM_ERROR)
19544 + goto out;
19545 +
19546 + ism_free_dmb(ism, dmb);
19547 +diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
19548 +index 744a64680d5b..e8fc28dba8df 100644
19549 +--- a/drivers/s390/scsi/zfcp_erp.c
19550 ++++ b/drivers/s390/scsi/zfcp_erp.c
19551 +@@ -624,6 +624,20 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
19552 + add_timer(&erp_action->timer);
19553 + }
19554 +
19555 ++void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
19556 ++ int clear, char *dbftag)
19557 ++{
19558 ++ unsigned long flags;
19559 ++ struct zfcp_port *port;
19560 ++
19561 ++ write_lock_irqsave(&adapter->erp_lock, flags);
19562 ++ read_lock(&adapter->port_list_lock);
19563 ++ list_for_each_entry(port, &adapter->port_list, list)
19564 ++ _zfcp_erp_port_forced_reopen(port, clear, dbftag);
19565 ++ read_unlock(&adapter->port_list_lock);
19566 ++ write_unlock_irqrestore(&adapter->erp_lock, flags);
19567 ++}
19568 ++
19569 + static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
19570 + int clear, char *dbftag)
19571 + {
19572 +@@ -1341,6 +1355,9 @@ static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
19573 + struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
19574 + int lun_status;
19575 +
19576 ++ if (sdev->sdev_state == SDEV_DEL ||
19577 ++ sdev->sdev_state == SDEV_CANCEL)
19578 ++ continue;
19579 + if (zsdev->port != port)
19580 + continue;
19581 + /* LUN under port of interest */
19582 +diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
19583 +index 3fce47b0b21b..c6acca521ffe 100644
19584 +--- a/drivers/s390/scsi/zfcp_ext.h
19585 ++++ b/drivers/s390/scsi/zfcp_ext.h
19586 +@@ -70,6 +70,8 @@ extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear,
19587 + char *dbftag);
19588 + extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
19589 + extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
19590 ++extern void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
19591 ++ int clear, char *dbftag);
19592 + extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
19593 + extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
19594 + extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
19595 +diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
19596 +index f4f6a07c5222..221d0dfb8493 100644
19597 +--- a/drivers/s390/scsi/zfcp_scsi.c
19598 ++++ b/drivers/s390/scsi/zfcp_scsi.c
19599 +@@ -368,6 +368,10 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
19600 + struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
19601 + int ret = SUCCESS, fc_ret;
19602 +
19603 ++ if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) {
19604 ++ zfcp_erp_port_forced_reopen_all(adapter, 0, "schrh_p");
19605 ++ zfcp_erp_wait(adapter);
19606 ++ }
19607 + zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
19608 + zfcp_erp_wait(adapter);
19609 + fc_ret = fc_block_scsi_eh(scpnt);
19610 +diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
19611 +index ae1d56da671d..1a738fe9f26b 100644
19612 +--- a/drivers/s390/virtio/virtio_ccw.c
19613 ++++ b/drivers/s390/virtio/virtio_ccw.c
19614 +@@ -272,6 +272,8 @@ static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev)
19615 + {
19616 + struct virtio_ccw_vq_info *info;
19617 +
19618 ++ if (!vcdev->airq_info)
19619 ++ return;
19620 + list_for_each_entry(info, &vcdev->virtqueues, node)
19621 + drop_airq_indicator(info->vq, vcdev->airq_info);
19622 + }
19623 +@@ -413,7 +415,7 @@ static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
19624 + ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
19625 + if (ret)
19626 + return ret;
19627 +- return vcdev->config_block->num;
19628 ++ return vcdev->config_block->num ?: -ENOENT;
19629 + }
19630 +
19631 + static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
19632 +diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
19633 +index d5a6aa9676c8..a3adc954f40f 100644
19634 +--- a/drivers/scsi/aacraid/commsup.c
19635 ++++ b/drivers/scsi/aacraid/commsup.c
19636 +@@ -1303,8 +1303,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
19637 + ADD : DELETE;
19638 + break;
19639 + }
19640 +- case AifBuManagerEvent:
19641 +- aac_handle_aif_bu(dev, aifcmd);
19642 ++ break;
19643 ++ case AifBuManagerEvent:
19644 ++ aac_handle_aif_bu(dev, aifcmd);
19645 + break;
19646 + }
19647 +
19648 +diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
19649 +index 7e56a11836c1..ccefface7e31 100644
19650 +--- a/drivers/scsi/aacraid/linit.c
19651 ++++ b/drivers/scsi/aacraid/linit.c
19652 +@@ -413,13 +413,16 @@ static int aac_slave_configure(struct scsi_device *sdev)
19653 + if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS && aac->sa_firmware) {
19654 + devtype = aac->hba_map[chn][tid].devtype;
19655 +
19656 +- if (devtype == AAC_DEVTYPE_NATIVE_RAW)
19657 ++ if (devtype == AAC_DEVTYPE_NATIVE_RAW) {
19658 + depth = aac->hba_map[chn][tid].qd_limit;
19659 +- else if (devtype == AAC_DEVTYPE_ARC_RAW)
19660 ++ set_timeout = 1;
19661 ++ goto common_config;
19662 ++ }
19663 ++ if (devtype == AAC_DEVTYPE_ARC_RAW) {
19664 + set_qd_dev_type = true;
19665 +-
19666 +- set_timeout = 1;
19667 +- goto common_config;
19668 ++ set_timeout = 1;
19669 ++ goto common_config;
19670 ++ }
19671 + }
19672 +
19673 + if (aac->jbod && (sdev->type == TYPE_DISK))
19674 +diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
19675 +index 2e4e7159ebf9..a75e74ad1698 100644
19676 +--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
19677 ++++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
19678 +@@ -1438,7 +1438,7 @@ bind_err:
19679 + static struct bnx2fc_interface *
19680 + bnx2fc_interface_create(struct bnx2fc_hba *hba,
19681 + struct net_device *netdev,
19682 +- enum fip_state fip_mode)
19683 ++ enum fip_mode fip_mode)
19684 + {
19685 + struct fcoe_ctlr_device *ctlr_dev;
19686 + struct bnx2fc_interface *interface;
19687 +diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
19688 +index cd19be3f3405..8ba8862d3292 100644
19689 +--- a/drivers/scsi/fcoe/fcoe.c
19690 ++++ b/drivers/scsi/fcoe/fcoe.c
19691 +@@ -389,7 +389,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
19692 + * Returns: pointer to a struct fcoe_interface or NULL on error
19693 + */
19694 + static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
19695 +- enum fip_state fip_mode)
19696 ++ enum fip_mode fip_mode)
19697 + {
19698 + struct fcoe_ctlr_device *ctlr_dev;
19699 + struct fcoe_ctlr *ctlr;
19700 +diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
19701 +index 54da3166da8d..7dc4ffa24430 100644
19702 +--- a/drivers/scsi/fcoe/fcoe_ctlr.c
19703 ++++ b/drivers/scsi/fcoe/fcoe_ctlr.c
19704 +@@ -147,7 +147,7 @@ static void fcoe_ctlr_map_dest(struct fcoe_ctlr *fip)
19705 + * fcoe_ctlr_init() - Initialize the FCoE Controller instance
19706 + * @fip: The FCoE controller to initialize
19707 + */
19708 +-void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode)
19709 ++void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_mode mode)
19710 + {
19711 + fcoe_ctlr_set_state(fip, FIP_ST_LINK_WAIT);
19712 + fip->mode = mode;
19713 +@@ -454,7 +454,10 @@ void fcoe_ctlr_link_up(struct fcoe_ctlr *fip)
19714 + mutex_unlock(&fip->ctlr_mutex);
19715 + fc_linkup(fip->lp);
19716 + } else if (fip->state == FIP_ST_LINK_WAIT) {
19717 +- fcoe_ctlr_set_state(fip, fip->mode);
19718 ++ if (fip->mode == FIP_MODE_NON_FIP)
19719 ++ fcoe_ctlr_set_state(fip, FIP_ST_NON_FIP);
19720 ++ else
19721 ++ fcoe_ctlr_set_state(fip, FIP_ST_AUTO);
19722 + switch (fip->mode) {
19723 + default:
19724 + LIBFCOE_FIP_DBG(fip, "invalid mode %d\n", fip->mode);
19725 +diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
19726 +index f4909cd206d3..f15d5e1d56b1 100644
19727 +--- a/drivers/scsi/fcoe/fcoe_transport.c
19728 ++++ b/drivers/scsi/fcoe/fcoe_transport.c
19729 +@@ -873,7 +873,7 @@ static int fcoe_transport_create(const char *buffer,
19730 + int rc = -ENODEV;
19731 + struct net_device *netdev = NULL;
19732 + struct fcoe_transport *ft = NULL;
19733 +- enum fip_state fip_mode = (enum fip_state)(long)kp->arg;
19734 ++ enum fip_mode fip_mode = (enum fip_mode)kp->arg;
19735 +
19736 + mutex_lock(&ft_mutex);
19737 +
19738 +diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
19739 +index bc17fa0d8375..62d158574281 100644
19740 +--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
19741 ++++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
19742 +@@ -10,6 +10,7 @@
19743 + */
19744 +
19745 + #include "hisi_sas.h"
19746 ++#include "../libsas/sas_internal.h"
19747 + #define DRV_NAME "hisi_sas"
19748 +
19749 + #define DEV_IS_GONE(dev) \
19750 +@@ -872,7 +873,8 @@ static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task
19751 + spin_lock_irqsave(&task->task_state_lock, flags);
19752 + task->task_state_flags &=
19753 + ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
19754 +- task->task_state_flags |= SAS_TASK_STATE_DONE;
19755 ++ if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP)
19756 ++ task->task_state_flags |= SAS_TASK_STATE_DONE;
19757 + spin_unlock_irqrestore(&task->task_state_lock, flags);
19758 + }
19759 +
19760 +@@ -1972,9 +1974,18 @@ static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
19761 +
19762 + static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
19763 + {
19764 ++ struct asd_sas_phy *sas_phy = &phy->sas_phy;
19765 ++ struct sas_phy *sphy = sas_phy->phy;
19766 ++ struct sas_phy_data *d = sphy->hostdata;
19767 ++
19768 + phy->phy_attached = 0;
19769 + phy->phy_type = 0;
19770 + phy->port = NULL;
19771 ++
19772 ++ if (d->enable)
19773 ++ sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
19774 ++ else
19775 ++ sphy->negotiated_linkrate = SAS_PHY_DISABLED;
19776 + }
19777 +
19778 + void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
19779 +diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
19780 +index 1135e74646e2..8cec5230fe31 100644
19781 +--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
19782 ++++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
19783 +@@ -96,6 +96,7 @@ static int client_reserve = 1;
19784 + static char partition_name[96] = "UNKNOWN";
19785 + static unsigned int partition_number = -1;
19786 + static LIST_HEAD(ibmvscsi_head);
19787 ++static DEFINE_SPINLOCK(ibmvscsi_driver_lock);
19788 +
19789 + static struct scsi_transport_template *ibmvscsi_transport_template;
19790 +
19791 +@@ -2270,7 +2271,9 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
19792 + }
19793 +
19794 + dev_set_drvdata(&vdev->dev, hostdata);
19795 ++ spin_lock(&ibmvscsi_driver_lock);
19796 + list_add_tail(&hostdata->host_list, &ibmvscsi_head);
19797 ++ spin_unlock(&ibmvscsi_driver_lock);
19798 + return 0;
19799 +
19800 + add_srp_port_failed:
19801 +@@ -2292,15 +2295,27 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
19802 + static int ibmvscsi_remove(struct vio_dev *vdev)
19803 + {
19804 + struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
19805 +- list_del(&hostdata->host_list);
19806 +- unmap_persist_bufs(hostdata);
19807 ++ unsigned long flags;
19808 ++
19809 ++ srp_remove_host(hostdata->host);
19810 ++ scsi_remove_host(hostdata->host);
19811 ++
19812 ++ purge_requests(hostdata, DID_ERROR);
19813 ++
19814 ++ spin_lock_irqsave(hostdata->host->host_lock, flags);
19815 + release_event_pool(&hostdata->pool, hostdata);
19816 ++ spin_unlock_irqrestore(hostdata->host->host_lock, flags);
19817 ++
19818 + ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
19819 + max_events);
19820 +
19821 + kthread_stop(hostdata->work_thread);
19822 +- srp_remove_host(hostdata->host);
19823 +- scsi_remove_host(hostdata->host);
19824 ++ unmap_persist_bufs(hostdata);
19825 ++
19826 ++ spin_lock(&ibmvscsi_driver_lock);
19827 ++ list_del(&hostdata->host_list);
19828 ++ spin_unlock(&ibmvscsi_driver_lock);
19829 ++
19830 + scsi_host_put(hostdata->host);
19831 +
19832 + return 0;
19833 +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
19834 +index fcbff83c0097..c9811d1aa007 100644
19835 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c
19836 ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
19837 +@@ -4188,6 +4188,7 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
19838 + if (megasas_create_frame_pool(instance)) {
19839 + dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
19840 + megasas_free_cmds(instance);
19841 ++ return -ENOMEM;
19842 + }
19843 +
19844 + return 0;
19845 +diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
19846 +index 9bbc19fc190b..9f9431a4cc0e 100644
19847 +--- a/drivers/scsi/qedf/qedf_main.c
19848 ++++ b/drivers/scsi/qedf/qedf_main.c
19849 +@@ -1418,7 +1418,7 @@ static struct libfc_function_template qedf_lport_template = {
19850 +
19851 + static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf)
19852 + {
19853 +- fcoe_ctlr_init(&qedf->ctlr, FIP_ST_AUTO);
19854 ++ fcoe_ctlr_init(&qedf->ctlr, FIP_MODE_AUTO);
19855 +
19856 + qedf->ctlr.send = qedf_fip_send;
19857 + qedf->ctlr.get_src_addr = qedf_get_src_mac;
19858 +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
19859 +index 8d1acc802a67..7f8946844a5e 100644
19860 +--- a/drivers/scsi/qla2xxx/qla_init.c
19861 ++++ b/drivers/scsi/qla2xxx/qla_init.c
19862 +@@ -644,11 +644,14 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
19863 + break;
19864 + case DSC_LS_PORT_UNAVAIL:
19865 + default:
19866 +- if (fcport->loop_id != FC_NO_LOOP_ID)
19867 +- qla2x00_clear_loop_id(fcport);
19868 +-
19869 +- fcport->loop_id = loop_id;
19870 +- fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
19871 ++ if (fcport->loop_id == FC_NO_LOOP_ID) {
19872 ++ qla2x00_find_new_loop_id(vha, fcport);
19873 ++ fcport->fw_login_state =
19874 ++ DSC_LS_PORT_UNAVAIL;
19875 ++ }
19876 ++ ql_dbg(ql_dbg_disc, vha, 0x20e5,
19877 ++ "%s %d %8phC\n", __func__, __LINE__,
19878 ++ fcport->port_name);
19879 + qla24xx_fcport_handle_login(vha, fcport);
19880 + break;
19881 + }
19882 +@@ -1471,29 +1474,6 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
19883 + return 0;
19884 + }
19885 +
19886 +-static
19887 +-void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea)
19888 +-{
19889 +- fcport->rscn_gen++;
19890 +-
19891 +- ql_dbg(ql_dbg_disc, fcport->vha, 0x210c,
19892 +- "%s %8phC DS %d LS %d\n",
19893 +- __func__, fcport->port_name, fcport->disc_state,
19894 +- fcport->fw_login_state);
19895 +-
19896 +- if (fcport->flags & FCF_ASYNC_SENT)
19897 +- return;
19898 +-
19899 +- switch (fcport->disc_state) {
19900 +- case DSC_DELETED:
19901 +- case DSC_LOGIN_COMPLETE:
19902 +- qla24xx_post_gpnid_work(fcport->vha, &ea->id);
19903 +- break;
19904 +- default:
19905 +- break;
19906 +- }
19907 +-}
19908 +-
19909 + int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
19910 + u8 *port_name, u8 *node_name, void *pla, u8 fc4_type)
19911 + {
19912 +@@ -1560,8 +1540,6 @@ static void qla_handle_els_plogi_done(scsi_qla_host_t *vha,
19913 +
19914 + void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
19915 + {
19916 +- fc_port_t *f, *tf;
19917 +- uint32_t id = 0, mask, rid;
19918 + fc_port_t *fcport;
19919 +
19920 + switch (ea->event) {
19921 +@@ -1574,10 +1552,6 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
19922 + case FCME_RSCN:
19923 + if (test_bit(UNLOADING, &vha->dpc_flags))
19924 + return;
19925 +- switch (ea->id.b.rsvd_1) {
19926 +- case RSCN_PORT_ADDR:
19927 +-#define BIGSCAN 1
19928 +-#if defined BIGSCAN & BIGSCAN > 0
19929 + {
19930 + unsigned long flags;
19931 + fcport = qla2x00_find_fcport_by_nportid
19932 +@@ -1596,59 +1570,6 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
19933 + }
19934 + spin_unlock_irqrestore(&vha->work_lock, flags);
19935 + }
19936 +-#else
19937 +- {
19938 +- int rc;
19939 +- fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
19940 +- if (!fcport) {
19941 +- /* cable moved */
19942 +- rc = qla24xx_post_gpnid_work(vha, &ea->id);
19943 +- if (rc) {
19944 +- ql_log(ql_log_warn, vha, 0xd044,
19945 +- "RSCN GPNID work failed %06x\n",
19946 +- ea->id.b24);
19947 +- }
19948 +- } else {
19949 +- ea->fcport = fcport;
19950 +- fcport->scan_needed = 1;
19951 +- qla24xx_handle_rscn_event(fcport, ea);
19952 +- }
19953 +- }
19954 +-#endif
19955 +- break;
19956 +- case RSCN_AREA_ADDR:
19957 +- case RSCN_DOM_ADDR:
19958 +- if (ea->id.b.rsvd_1 == RSCN_AREA_ADDR) {
19959 +- mask = 0xffff00;
19960 +- ql_dbg(ql_dbg_async, vha, 0x5044,
19961 +- "RSCN: Area 0x%06x was affected\n",
19962 +- ea->id.b24);
19963 +- } else {
19964 +- mask = 0xff0000;
19965 +- ql_dbg(ql_dbg_async, vha, 0x507a,
19966 +- "RSCN: Domain 0x%06x was affected\n",
19967 +- ea->id.b24);
19968 +- }
19969 +-
19970 +- rid = ea->id.b24 & mask;
19971 +- list_for_each_entry_safe(f, tf, &vha->vp_fcports,
19972 +- list) {
19973 +- id = f->d_id.b24 & mask;
19974 +- if (rid == id) {
19975 +- ea->fcport = f;
19976 +- qla24xx_handle_rscn_event(f, ea);
19977 +- }
19978 +- }
19979 +- break;
19980 +- case RSCN_FAB_ADDR:
19981 +- default:
19982 +- ql_log(ql_log_warn, vha, 0xd045,
19983 +- "RSCN: Fabric was affected. Addr format %d\n",
19984 +- ea->id.b.rsvd_1);
19985 +- qla2x00_mark_all_devices_lost(vha, 1);
19986 +- set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
19987 +- set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
19988 +- }
19989 + break;
19990 + case FCME_GNL_DONE:
19991 + qla24xx_handle_gnl_done_event(vha, ea);
19992 +@@ -1709,11 +1630,7 @@ void qla_rscn_replay(fc_port_t *fcport)
19993 + ea.event = FCME_RSCN;
19994 + ea.id = fcport->d_id;
19995 + ea.id.b.rsvd_1 = RSCN_PORT_ADDR;
19996 +-#if defined BIGSCAN & BIGSCAN > 0
19997 + qla2x00_fcport_event_handler(fcport->vha, &ea);
19998 +-#else
19999 +- qla24xx_post_gpnid_work(fcport->vha, &ea.id);
20000 +-#endif
20001 + }
20002 + }
20003 +
20004 +@@ -5051,6 +4968,13 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
20005 + (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
20006 + continue;
20007 +
20008 ++ /* Bypass if not same domain and area of adapter. */
20009 ++ if (area && domain && ((area != vha->d_id.b.area) ||
20010 ++ (domain != vha->d_id.b.domain)) &&
20011 ++ (ha->current_topology == ISP_CFG_NL))
20012 ++ continue;
20013 ++
20014 ++
20015 + /* Bypass invalid local loop ID. */
20016 + if (loop_id > LAST_LOCAL_LOOP_ID)
20017 + continue;
20018 +diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
20019 +index 8507c43b918c..1a20e5d8f057 100644
20020 +--- a/drivers/scsi/qla2xxx/qla_isr.c
20021 ++++ b/drivers/scsi/qla2xxx/qla_isr.c
20022 +@@ -3410,7 +3410,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
20023 + min_vecs++;
20024 + }
20025 +
20026 +- if (USER_CTRL_IRQ(ha)) {
20027 ++ if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
20028 + /* user wants to control IRQ setting for target mode */
20029 + ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
20030 + ha->msix_count, PCI_IRQ_MSIX);
20031 +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
20032 +index c6ef83d0d99b..7e35ce2162d0 100644
20033 +--- a/drivers/scsi/qla2xxx/qla_os.c
20034 ++++ b/drivers/scsi/qla2xxx/qla_os.c
20035 +@@ -6936,7 +6936,7 @@ static int qla2xxx_map_queues(struct Scsi_Host *shost)
20036 + scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
20037 + struct blk_mq_queue_map *qmap = &shost->tag_set.map[0];
20038 +
20039 +- if (USER_CTRL_IRQ(vha->hw))
20040 ++ if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase)
20041 + rc = blk_mq_map_queues(qmap);
20042 + else
20043 + rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
20044 +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
20045 +index a6828391d6b3..5a6e8e12701a 100644
20046 +--- a/drivers/scsi/scsi_lib.c
20047 ++++ b/drivers/scsi/scsi_lib.c
20048 +@@ -2598,8 +2598,10 @@ void scsi_device_resume(struct scsi_device *sdev)
20049 + * device deleted during suspend)
20050 + */
20051 + mutex_lock(&sdev->state_mutex);
20052 +- sdev->quiesced_by = NULL;
20053 +- blk_clear_pm_only(sdev->request_queue);
20054 ++ if (sdev->quiesced_by) {
20055 ++ sdev->quiesced_by = NULL;
20056 ++ blk_clear_pm_only(sdev->request_queue);
20057 ++ }
20058 + if (sdev->sdev_state == SDEV_QUIESCE)
20059 + scsi_device_set_state(sdev, SDEV_RUNNING);
20060 + mutex_unlock(&sdev->state_mutex);
20061 +diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
20062 +index dd0d516f65e2..53380e07b40e 100644
20063 +--- a/drivers/scsi/scsi_scan.c
20064 ++++ b/drivers/scsi/scsi_scan.c
20065 +@@ -220,7 +220,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
20066 + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
20067 +
20068 + sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
20069 +- GFP_ATOMIC);
20070 ++ GFP_KERNEL);
20071 + if (!sdev)
20072 + goto out;
20073 +
20074 +@@ -788,7 +788,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
20075 + */
20076 + sdev->inquiry = kmemdup(inq_result,
20077 + max_t(size_t, sdev->inquiry_len, 36),
20078 +- GFP_ATOMIC);
20079 ++ GFP_KERNEL);
20080 + if (sdev->inquiry == NULL)
20081 + return SCSI_SCAN_NO_RESPONSE;
20082 +
20083 +@@ -1079,7 +1079,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
20084 + if (!sdev)
20085 + goto out;
20086 +
20087 +- result = kmalloc(result_len, GFP_ATOMIC |
20088 ++ result = kmalloc(result_len, GFP_KERNEL |
20089 + ((shost->unchecked_isa_dma) ? __GFP_DMA : 0));
20090 + if (!result)
20091 + goto out_free_sdev;
20092 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
20093 +index 5464d467e23e..d64553c0a051 100644
20094 +--- a/drivers/scsi/sd.c
20095 ++++ b/drivers/scsi/sd.c
20096 +@@ -1398,11 +1398,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode)
20097 + scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
20098 + }
20099 +
20100 +- /*
20101 +- * XXX and what if there are packets in flight and this close()
20102 +- * XXX is followed by a "rmmod sd_mod"?
20103 +- */
20104 +-
20105 + scsi_disk_put(sdkp);
20106 + }
20107 +
20108 +@@ -3047,6 +3042,58 @@ static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer)
20109 + sdkp->security = 1;
20110 + }
20111 +
20112 ++/*
20113 ++ * Determine the device's preferred I/O size for reads and writes
20114 ++ * unless the reported value is unreasonably small, large, not a
20115 ++ * multiple of the physical block size, or simply garbage.
20116 ++ */
20117 ++static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
20118 ++ unsigned int dev_max)
20119 ++{
20120 ++ struct scsi_device *sdp = sdkp->device;
20121 ++ unsigned int opt_xfer_bytes =
20122 ++ logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
20123 ++
20124 ++ if (sdkp->opt_xfer_blocks == 0)
20125 ++ return false;
20126 ++
20127 ++ if (sdkp->opt_xfer_blocks > dev_max) {
20128 ++ sd_first_printk(KERN_WARNING, sdkp,
20129 ++ "Optimal transfer size %u logical blocks " \
20130 ++ "> dev_max (%u logical blocks)\n",
20131 ++ sdkp->opt_xfer_blocks, dev_max);
20132 ++ return false;
20133 ++ }
20134 ++
20135 ++ if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) {
20136 ++ sd_first_printk(KERN_WARNING, sdkp,
20137 ++ "Optimal transfer size %u logical blocks " \
20138 ++ "> sd driver limit (%u logical blocks)\n",
20139 ++ sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS);
20140 ++ return false;
20141 ++ }
20142 ++
20143 ++ if (opt_xfer_bytes < PAGE_SIZE) {
20144 ++ sd_first_printk(KERN_WARNING, sdkp,
20145 ++ "Optimal transfer size %u bytes < " \
20146 ++ "PAGE_SIZE (%u bytes)\n",
20147 ++ opt_xfer_bytes, (unsigned int)PAGE_SIZE);
20148 ++ return false;
20149 ++ }
20150 ++
20151 ++ if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) {
20152 ++ sd_first_printk(KERN_WARNING, sdkp,
20153 ++ "Optimal transfer size %u bytes not a " \
20154 ++ "multiple of physical block size (%u bytes)\n",
20155 ++ opt_xfer_bytes, sdkp->physical_block_size);
20156 ++ return false;
20157 ++ }
20158 ++
20159 ++ sd_first_printk(KERN_INFO, sdkp, "Optimal transfer size %u bytes\n",
20160 ++ opt_xfer_bytes);
20161 ++ return true;
20162 ++}
20163 ++
20164 + /**
20165 + * sd_revalidate_disk - called the first time a new disk is seen,
20166 + * performs disk spin up, read_capacity, etc.
20167 +@@ -3125,15 +3172,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
20168 + dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
20169 + q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
20170 +
20171 +- /*
20172 +- * Determine the device's preferred I/O size for reads and writes
20173 +- * unless the reported value is unreasonably small, large, or
20174 +- * garbage.
20175 +- */
20176 +- if (sdkp->opt_xfer_blocks &&
20177 +- sdkp->opt_xfer_blocks <= dev_max &&
20178 +- sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
20179 +- logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_SIZE) {
20180 ++ if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
20181 + q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
20182 + rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
20183 + } else
20184 +@@ -3447,9 +3486,21 @@ static void scsi_disk_release(struct device *dev)
20185 + {
20186 + struct scsi_disk *sdkp = to_scsi_disk(dev);
20187 + struct gendisk *disk = sdkp->disk;
20188 +-
20189 ++ struct request_queue *q = disk->queue;
20190 ++
20191 + ida_free(&sd_index_ida, sdkp->index);
20192 +
20193 ++ /*
20194 ++ * Wait until all requests that are in progress have completed.
20195 ++ * This is necessary to avoid that e.g. scsi_end_request() crashes
20196 ++ * due to clearing the disk->private_data pointer. Wait from inside
20197 ++ * scsi_disk_release() instead of from sd_release() to avoid that
20198 ++ * freezing and unfreezing the request queue affects user space I/O
20199 ++ * in case multiple processes open a /dev/sd... node concurrently.
20200 ++ */
20201 ++ blk_mq_freeze_queue(q);
20202 ++ blk_mq_unfreeze_queue(q);
20203 ++
20204 + disk->private_data = NULL;
20205 + put_disk(disk);
20206 + put_device(&sdkp->device->sdev_gendev);
20207 +diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
20208 +index 772b976e4ee4..464cba521fb6 100644
20209 +--- a/drivers/scsi/virtio_scsi.c
20210 ++++ b/drivers/scsi/virtio_scsi.c
20211 +@@ -594,7 +594,6 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc)
20212 + return FAILED;
20213 +
20214 + memset(cmd, 0, sizeof(*cmd));
20215 +- cmd->sc = sc;
20216 + cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
20217 + .type = VIRTIO_SCSI_T_TMF,
20218 + .subtype = cpu_to_virtio32(vscsi->vdev,
20219 +@@ -653,7 +652,6 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
20220 + return FAILED;
20221 +
20222 + memset(cmd, 0, sizeof(*cmd));
20223 +- cmd->sc = sc;
20224 + cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
20225 + .type = VIRTIO_SCSI_T_TMF,
20226 + .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
20227 +diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c
20228 +index 09c669e70d63..038abc377fdb 100644
20229 +--- a/drivers/soc/qcom/qcom_gsbi.c
20230 ++++ b/drivers/soc/qcom/qcom_gsbi.c
20231 +@@ -138,7 +138,7 @@ static int gsbi_probe(struct platform_device *pdev)
20232 + struct resource *res;
20233 + void __iomem *base;
20234 + struct gsbi_info *gsbi;
20235 +- int i;
20236 ++ int i, ret;
20237 + u32 mask, gsbi_num;
20238 + const struct crci_config *config = NULL;
20239 +
20240 +@@ -221,7 +221,10 @@ static int gsbi_probe(struct platform_device *pdev)
20241 +
20242 + platform_set_drvdata(pdev, gsbi);
20243 +
20244 +- return of_platform_populate(node, NULL, NULL, &pdev->dev);
20245 ++ ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
20246 ++ if (ret)
20247 ++ clk_disable_unprepare(gsbi->hclk);
20248 ++ return ret;
20249 + }
20250 +
20251 + static int gsbi_remove(struct platform_device *pdev)
20252 +diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
20253 +index c7beb6841289..ab8f731a3426 100644
20254 +--- a/drivers/soc/qcom/rpmh.c
20255 ++++ b/drivers/soc/qcom/rpmh.c
20256 +@@ -80,6 +80,7 @@ void rpmh_tx_done(const struct tcs_request *msg, int r)
20257 + struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
20258 + msg);
20259 + struct completion *compl = rpm_msg->completion;
20260 ++ bool free = rpm_msg->needs_free;
20261 +
20262 + rpm_msg->err = r;
20263 +
20264 +@@ -94,7 +95,7 @@ void rpmh_tx_done(const struct tcs_request *msg, int r)
20265 + complete(compl);
20266 +
20267 + exit:
20268 +- if (rpm_msg->needs_free)
20269 ++ if (free)
20270 + kfree(rpm_msg);
20271 + }
20272 +
20273 +@@ -348,11 +349,12 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
20274 + {
20275 + struct batch_cache_req *req;
20276 + struct rpmh_request *rpm_msgs;
20277 +- DECLARE_COMPLETION_ONSTACK(compl);
20278 ++ struct completion *compls;
20279 + struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
20280 + unsigned long time_left;
20281 + int count = 0;
20282 +- int ret, i, j;
20283 ++ int ret, i;
20284 ++ void *ptr;
20285 +
20286 + if (!cmd || !n)
20287 + return -EINVAL;
20288 +@@ -362,10 +364,15 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
20289 + if (!count)
20290 + return -EINVAL;
20291 +
20292 +- req = kzalloc(sizeof(*req) + count * sizeof(req->rpm_msgs[0]),
20293 ++ ptr = kzalloc(sizeof(*req) +
20294 ++ count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)),
20295 + GFP_ATOMIC);
20296 +- if (!req)
20297 ++ if (!ptr)
20298 + return -ENOMEM;
20299 ++
20300 ++ req = ptr;
20301 ++ compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs);
20302 ++
20303 + req->count = count;
20304 + rpm_msgs = req->rpm_msgs;
20305 +
20306 +@@ -380,25 +387,26 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
20307 + }
20308 +
20309 + for (i = 0; i < count; i++) {
20310 +- rpm_msgs[i].completion = &compl;
20311 ++ struct completion *compl = &compls[i];
20312 ++
20313 ++ init_completion(compl);
20314 ++ rpm_msgs[i].completion = compl;
20315 + ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
20316 + if (ret) {
20317 + pr_err("Error(%d) sending RPMH message addr=%#x\n",
20318 + ret, rpm_msgs[i].msg.cmds[0].addr);
20319 +- for (j = i; j < count; j++)
20320 +- rpmh_tx_done(&rpm_msgs[j].msg, ret);
20321 + break;
20322 + }
20323 + }
20324 +
20325 + time_left = RPMH_TIMEOUT_MS;
20326 +- for (i = 0; i < count; i++) {
20327 +- time_left = wait_for_completion_timeout(&compl, time_left);
20328 ++ while (i--) {
20329 ++ time_left = wait_for_completion_timeout(&compls[i], time_left);
20330 + if (!time_left) {
20331 + /*
20332 + * Better hope they never finish because they'll signal
20333 +- * the completion on our stack and that's bad once
20334 +- * we've returned from the function.
20335 ++ * the completion that we're going to free once
20336 ++ * we've returned from this function.
20337 + */
20338 + WARN_ON(1);
20339 + ret = -ETIMEDOUT;
20340 +@@ -407,7 +415,7 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
20341 + }
20342 +
20343 + exit:
20344 +- kfree(req);
20345 ++ kfree(ptr);
20346 +
20347 + return ret;
20348 + }
20349 +diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
20350 +index a33ee8ef8b6b..51625703399e 100644
20351 +--- a/drivers/soc/tegra/fuse/fuse-tegra.c
20352 ++++ b/drivers/soc/tegra/fuse/fuse-tegra.c
20353 +@@ -137,13 +137,17 @@ static int tegra_fuse_probe(struct platform_device *pdev)
20354 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
20355 + fuse->phys = res->start;
20356 + fuse->base = devm_ioremap_resource(&pdev->dev, res);
20357 +- if (IS_ERR(fuse->base))
20358 +- return PTR_ERR(fuse->base);
20359 ++ if (IS_ERR(fuse->base)) {
20360 ++ err = PTR_ERR(fuse->base);
20361 ++ fuse->base = base;
20362 ++ return err;
20363 ++ }
20364 +
20365 + fuse->clk = devm_clk_get(&pdev->dev, "fuse");
20366 + if (IS_ERR(fuse->clk)) {
20367 + dev_err(&pdev->dev, "failed to get FUSE clock: %ld",
20368 + PTR_ERR(fuse->clk));
20369 ++ fuse->base = base;
20370 + return PTR_ERR(fuse->clk);
20371 + }
20372 +
20373 +@@ -152,8 +156,10 @@ static int tegra_fuse_probe(struct platform_device *pdev)
20374 +
20375 + if (fuse->soc->probe) {
20376 + err = fuse->soc->probe(fuse);
20377 +- if (err < 0)
20378 ++ if (err < 0) {
20379 ++ fuse->base = base;
20380 + return err;
20381 ++ }
20382 + }
20383 +
20384 + if (tegra_fuse_create_sysfs(&pdev->dev, fuse->soc->info->size,
20385 +diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
20386 +index a4aee26028cd..53b35c56a557 100644
20387 +--- a/drivers/spi/spi-gpio.c
20388 ++++ b/drivers/spi/spi-gpio.c
20389 +@@ -428,7 +428,8 @@ static int spi_gpio_probe(struct platform_device *pdev)
20390 + return status;
20391 +
20392 + master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
20393 +- master->mode_bits = SPI_3WIRE | SPI_3WIRE_HIZ | SPI_CPHA | SPI_CPOL;
20394 ++ master->mode_bits = SPI_3WIRE | SPI_3WIRE_HIZ | SPI_CPHA | SPI_CPOL |
20395 ++ SPI_CS_HIGH;
20396 + master->flags = master_flags;
20397 + master->bus_num = pdev->id;
20398 + /* The master needs to think there is a chipselect even if not connected */
20399 +@@ -455,7 +456,6 @@ static int spi_gpio_probe(struct platform_device *pdev)
20400 + spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_spec_txrx_word_mode3;
20401 + }
20402 + spi_gpio->bitbang.setup_transfer = spi_bitbang_setup_transfer;
20403 +- spi_gpio->bitbang.flags = SPI_CS_HIGH;
20404 +
20405 + status = spi_bitbang_start(&spi_gpio->bitbang);
20406 + if (status)
20407 +diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
20408 +index 2fd8881fcd65..8be304379628 100644
20409 +--- a/drivers/spi/spi-omap2-mcspi.c
20410 ++++ b/drivers/spi/spi-omap2-mcspi.c
20411 +@@ -623,8 +623,8 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
20412 + cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
20413 + cfg.src_addr_width = width;
20414 + cfg.dst_addr_width = width;
20415 +- cfg.src_maxburst = es;
20416 +- cfg.dst_maxburst = es;
20417 ++ cfg.src_maxburst = 1;
20418 ++ cfg.dst_maxburst = 1;
20419 +
20420 + rx = xfer->rx_buf;
20421 + tx = xfer->tx_buf;
20422 +diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
20423 +index d84b893a64d7..3e82eaad0f2d 100644
20424 +--- a/drivers/spi/spi-pxa2xx.c
20425 ++++ b/drivers/spi/spi-pxa2xx.c
20426 +@@ -1696,6 +1696,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
20427 + platform_info->enable_dma = false;
20428 + } else {
20429 + master->can_dma = pxa2xx_spi_can_dma;
20430 ++ master->max_dma_len = MAX_DMA_LEN;
20431 + }
20432 + }
20433 +
20434 +diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
20435 +index 5f19016bbf10..b9fb6493cd6b 100644
20436 +--- a/drivers/spi/spi-ti-qspi.c
20437 ++++ b/drivers/spi/spi-ti-qspi.c
20438 +@@ -490,8 +490,8 @@ static void ti_qspi_enable_memory_map(struct spi_device *spi)
20439 + ti_qspi_write(qspi, MM_SWITCH, QSPI_SPI_SWITCH_REG);
20440 + if (qspi->ctrl_base) {
20441 + regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
20442 +- MEM_CS_EN(spi->chip_select),
20443 +- MEM_CS_MASK);
20444 ++ MEM_CS_MASK,
20445 ++ MEM_CS_EN(spi->chip_select));
20446 + }
20447 + qspi->mmap_enabled = true;
20448 + }
20449 +@@ -503,7 +503,7 @@ static void ti_qspi_disable_memory_map(struct spi_device *spi)
20450 + ti_qspi_write(qspi, 0, QSPI_SPI_SWITCH_REG);
20451 + if (qspi->ctrl_base)
20452 + regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
20453 +- 0, MEM_CS_MASK);
20454 ++ MEM_CS_MASK, 0);
20455 + qspi->mmap_enabled = false;
20456 + }
20457 +
20458 +diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
20459 +index 90a8a9f1ac7d..910826df4a31 100644
20460 +--- a/drivers/staging/android/ashmem.c
20461 ++++ b/drivers/staging/android/ashmem.c
20462 +@@ -75,6 +75,9 @@ struct ashmem_range {
20463 + /* LRU list of unpinned pages, protected by ashmem_mutex */
20464 + static LIST_HEAD(ashmem_lru_list);
20465 +
20466 ++static atomic_t ashmem_shrink_inflight = ATOMIC_INIT(0);
20467 ++static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait);
20468 ++
20469 + /*
20470 + * long lru_count - The count of pages on our LRU list.
20471 + *
20472 +@@ -168,19 +171,15 @@ static inline void lru_del(struct ashmem_range *range)
20473 + * @end: The ending page (inclusive)
20474 + *
20475 + * This function is protected by ashmem_mutex.
20476 +- *
20477 +- * Return: 0 if successful, or -ENOMEM if there is an error
20478 + */
20479 +-static int range_alloc(struct ashmem_area *asma,
20480 +- struct ashmem_range *prev_range, unsigned int purged,
20481 +- size_t start, size_t end)
20482 ++static void range_alloc(struct ashmem_area *asma,
20483 ++ struct ashmem_range *prev_range, unsigned int purged,
20484 ++ size_t start, size_t end,
20485 ++ struct ashmem_range **new_range)
20486 + {
20487 +- struct ashmem_range *range;
20488 +-
20489 +- range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
20490 +- if (!range)
20491 +- return -ENOMEM;
20492 ++ struct ashmem_range *range = *new_range;
20493 +
20494 ++ *new_range = NULL;
20495 + range->asma = asma;
20496 + range->pgstart = start;
20497 + range->pgend = end;
20498 +@@ -190,8 +189,6 @@ static int range_alloc(struct ashmem_area *asma,
20499 +
20500 + if (range_on_lru(range))
20501 + lru_add(range);
20502 +-
20503 +- return 0;
20504 + }
20505 +
20506 + /**
20507 +@@ -438,7 +435,6 @@ out:
20508 + static unsigned long
20509 + ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
20510 + {
20511 +- struct ashmem_range *range, *next;
20512 + unsigned long freed = 0;
20513 +
20514 + /* We might recurse into filesystem code, so bail out if necessary */
20515 +@@ -448,21 +444,33 @@ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
20516 + if (!mutex_trylock(&ashmem_mutex))
20517 + return -1;
20518 +
20519 +- list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
20520 ++ while (!list_empty(&ashmem_lru_list)) {
20521 ++ struct ashmem_range *range =
20522 ++ list_first_entry(&ashmem_lru_list, typeof(*range), lru);
20523 + loff_t start = range->pgstart * PAGE_SIZE;
20524 + loff_t end = (range->pgend + 1) * PAGE_SIZE;
20525 ++ struct file *f = range->asma->file;
20526 +
20527 +- range->asma->file->f_op->fallocate(range->asma->file,
20528 +- FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
20529 +- start, end - start);
20530 ++ get_file(f);
20531 ++ atomic_inc(&ashmem_shrink_inflight);
20532 + range->purged = ASHMEM_WAS_PURGED;
20533 + lru_del(range);
20534 +
20535 + freed += range_size(range);
20536 ++ mutex_unlock(&ashmem_mutex);
20537 ++ f->f_op->fallocate(f,
20538 ++ FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
20539 ++ start, end - start);
20540 ++ fput(f);
20541 ++ if (atomic_dec_and_test(&ashmem_shrink_inflight))
20542 ++ wake_up_all(&ashmem_shrink_wait);
20543 ++ if (!mutex_trylock(&ashmem_mutex))
20544 ++ goto out;
20545 + if (--sc->nr_to_scan <= 0)
20546 + break;
20547 + }
20548 + mutex_unlock(&ashmem_mutex);
20549 ++out:
20550 + return freed;
20551 + }
20552 +
20553 +@@ -582,7 +590,8 @@ static int get_name(struct ashmem_area *asma, void __user *name)
20554 + *
20555 + * Caller must hold ashmem_mutex.
20556 + */
20557 +-static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
20558 ++static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
20559 ++ struct ashmem_range **new_range)
20560 + {
20561 + struct ashmem_range *range, *next;
20562 + int ret = ASHMEM_NOT_PURGED;
20563 +@@ -635,7 +644,7 @@ static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
20564 + * second half and adjust the first chunk's endpoint.
20565 + */
20566 + range_alloc(asma, range, range->purged,
20567 +- pgend + 1, range->pgend);
20568 ++ pgend + 1, range->pgend, new_range);
20569 + range_shrink(range, range->pgstart, pgstart - 1);
20570 + break;
20571 + }
20572 +@@ -649,7 +658,8 @@ static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
20573 + *
20574 + * Caller must hold ashmem_mutex.
20575 + */
20576 +-static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
20577 ++static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
20578 ++ struct ashmem_range **new_range)
20579 + {
20580 + struct ashmem_range *range, *next;
20581 + unsigned int purged = ASHMEM_NOT_PURGED;
20582 +@@ -675,7 +685,8 @@ restart:
20583 + }
20584 + }
20585 +
20586 +- return range_alloc(asma, range, purged, pgstart, pgend);
20587 ++ range_alloc(asma, range, purged, pgstart, pgend, new_range);
20588 ++ return 0;
20589 + }
20590 +
20591 + /*
20592 +@@ -708,11 +719,19 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
20593 + struct ashmem_pin pin;
20594 + size_t pgstart, pgend;
20595 + int ret = -EINVAL;
20596 ++ struct ashmem_range *range = NULL;
20597 +
20598 + if (copy_from_user(&pin, p, sizeof(pin)))
20599 + return -EFAULT;
20600 +
20601 ++ if (cmd == ASHMEM_PIN || cmd == ASHMEM_UNPIN) {
20602 ++ range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
20603 ++ if (!range)
20604 ++ return -ENOMEM;
20605 ++ }
20606 ++
20607 + mutex_lock(&ashmem_mutex);
20608 ++ wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
20609 +
20610 + if (!asma->file)
20611 + goto out_unlock;
20612 +@@ -735,10 +754,10 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
20613 +
20614 + switch (cmd) {
20615 + case ASHMEM_PIN:
20616 +- ret = ashmem_pin(asma, pgstart, pgend);
20617 ++ ret = ashmem_pin(asma, pgstart, pgend, &range);
20618 + break;
20619 + case ASHMEM_UNPIN:
20620 +- ret = ashmem_unpin(asma, pgstart, pgend);
20621 ++ ret = ashmem_unpin(asma, pgstart, pgend, &range);
20622 + break;
20623 + case ASHMEM_GET_PIN_STATUS:
20624 + ret = ashmem_get_pin_status(asma, pgstart, pgend);
20625 +@@ -747,6 +766,8 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
20626 +
20627 + out_unlock:
20628 + mutex_unlock(&ashmem_mutex);
20629 ++ if (range)
20630 ++ kmem_cache_free(ashmem_range_cachep, range);
20631 +
20632 + return ret;
20633 + }
20634 +diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
20635 +index 0383f7548d48..20f2103a4ebf 100644
20636 +--- a/drivers/staging/android/ion/ion_system_heap.c
20637 ++++ b/drivers/staging/android/ion/ion_system_heap.c
20638 +@@ -223,10 +223,10 @@ static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
20639 + static int ion_system_heap_create_pools(struct ion_page_pool **pools)
20640 + {
20641 + int i;
20642 +- gfp_t gfp_flags = low_order_gfp_flags;
20643 +
20644 + for (i = 0; i < NUM_ORDERS; i++) {
20645 + struct ion_page_pool *pool;
20646 ++ gfp_t gfp_flags = low_order_gfp_flags;
20647 +
20648 + if (orders[i] > 4)
20649 + gfp_flags = high_order_gfp_flags;
20650 +diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
20651 +index a7d569cfca5d..0dff1ac057cd 100644
20652 +--- a/drivers/staging/comedi/comedidev.h
20653 ++++ b/drivers/staging/comedi/comedidev.h
20654 +@@ -1001,6 +1001,8 @@ int comedi_dio_insn_config(struct comedi_device *dev,
20655 + unsigned int mask);
20656 + unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
20657 + unsigned int *data);
20658 ++unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
20659 ++ struct comedi_cmd *cmd);
20660 + unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s);
20661 + unsigned int comedi_nscans_left(struct comedi_subdevice *s,
20662 + unsigned int nscans);
20663 +diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
20664 +index eefa62f42c0f..5a32b8fc000e 100644
20665 +--- a/drivers/staging/comedi/drivers.c
20666 ++++ b/drivers/staging/comedi/drivers.c
20667 +@@ -394,11 +394,13 @@ unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
20668 + EXPORT_SYMBOL_GPL(comedi_dio_update_state);
20669 +
20670 + /**
20671 +- * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
20672 ++ * comedi_bytes_per_scan_cmd() - Get length of asynchronous command "scan" in
20673 ++ * bytes
20674 + * @s: COMEDI subdevice.
20675 ++ * @cmd: COMEDI command.
20676 + *
20677 + * Determines the overall scan length according to the subdevice type and the
20678 +- * number of channels in the scan.
20679 ++ * number of channels in the scan for the specified command.
20680 + *
20681 + * For digital input, output or input/output subdevices, samples for
20682 + * multiple channels are assumed to be packed into one or more unsigned
20683 +@@ -408,9 +410,9 @@ EXPORT_SYMBOL_GPL(comedi_dio_update_state);
20684 + *
20685 + * Returns the overall scan length in bytes.
20686 + */
20687 +-unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
20688 ++unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
20689 ++ struct comedi_cmd *cmd)
20690 + {
20691 +- struct comedi_cmd *cmd = &s->async->cmd;
20692 + unsigned int num_samples;
20693 + unsigned int bits_per_sample;
20694 +
20695 +@@ -427,6 +429,29 @@ unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
20696 + }
20697 + return comedi_samples_to_bytes(s, num_samples);
20698 + }
20699 ++EXPORT_SYMBOL_GPL(comedi_bytes_per_scan_cmd);
20700 ++
20701 ++/**
20702 ++ * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
20703 ++ * @s: COMEDI subdevice.
20704 ++ *
20705 ++ * Determines the overall scan length according to the subdevice type and the
20706 ++ * number of channels in the scan for the current command.
20707 ++ *
20708 ++ * For digital input, output or input/output subdevices, samples for
20709 ++ * multiple channels are assumed to be packed into one or more unsigned
20710 ++ * short or unsigned int values according to the subdevice's %SDF_LSAMPL
20711 ++ * flag. For other types of subdevice, samples are assumed to occupy a
20712 ++ * whole unsigned short or unsigned int according to the %SDF_LSAMPL flag.
20713 ++ *
20714 ++ * Returns the overall scan length in bytes.
20715 ++ */
20716 ++unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
20717 ++{
20718 ++ struct comedi_cmd *cmd = &s->async->cmd;
20719 ++
20720 ++ return comedi_bytes_per_scan_cmd(s, cmd);
20721 ++}
20722 + EXPORT_SYMBOL_GPL(comedi_bytes_per_scan);
20723 +
20724 + static unsigned int __comedi_nscans_left(struct comedi_subdevice *s,
20725 +diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
20726 +index e70a461e723f..405573e927cf 100644
20727 +--- a/drivers/staging/comedi/drivers/ni_660x.c
20728 ++++ b/drivers/staging/comedi/drivers/ni_660x.c
20729 +@@ -656,6 +656,7 @@ static int ni_660x_set_pfi_routing(struct comedi_device *dev,
20730 + case NI_660X_PFI_OUTPUT_DIO:
20731 + if (chan > 31)
20732 + return -EINVAL;
20733 ++ break;
20734 + default:
20735 + return -EINVAL;
20736 + }
20737 +diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
20738 +index 5edf59ac6706..b04dad8c7092 100644
20739 +--- a/drivers/staging/comedi/drivers/ni_mio_common.c
20740 ++++ b/drivers/staging/comedi/drivers/ni_mio_common.c
20741 +@@ -3545,6 +3545,7 @@ static int ni_cdio_cmdtest(struct comedi_device *dev,
20742 + struct comedi_subdevice *s, struct comedi_cmd *cmd)
20743 + {
20744 + struct ni_private *devpriv = dev->private;
20745 ++ unsigned int bytes_per_scan;
20746 + int err = 0;
20747 +
20748 + /* Step 1 : check if triggers are trivially valid */
20749 +@@ -3579,9 +3580,12 @@ static int ni_cdio_cmdtest(struct comedi_device *dev,
20750 + err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
20751 + err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
20752 + cmd->chanlist_len);
20753 +- err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
20754 +- s->async->prealloc_bufsz /
20755 +- comedi_bytes_per_scan(s));
20756 ++ bytes_per_scan = comedi_bytes_per_scan_cmd(s, cmd);
20757 ++ if (bytes_per_scan) {
20758 ++ err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
20759 ++ s->async->prealloc_bufsz /
20760 ++ bytes_per_scan);
20761 ++ }
20762 +
20763 + if (err)
20764 + return 3;
20765 +diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c
20766 +index 833f052f79d0..b21ed5b4c711 100644
20767 +--- a/drivers/staging/erofs/dir.c
20768 ++++ b/drivers/staging/erofs/dir.c
20769 +@@ -23,6 +23,21 @@ static const unsigned char erofs_filetype_table[EROFS_FT_MAX] = {
20770 + [EROFS_FT_SYMLINK] = DT_LNK,
20771 + };
20772 +
20773 ++static void debug_one_dentry(unsigned char d_type, const char *de_name,
20774 ++ unsigned int de_namelen)
20775 ++{
20776 ++#ifdef CONFIG_EROFS_FS_DEBUG
20777 ++ /* since the on-disk name could not have the trailing '\0' */
20778 ++ unsigned char dbg_namebuf[EROFS_NAME_LEN + 1];
20779 ++
20780 ++ memcpy(dbg_namebuf, de_name, de_namelen);
20781 ++ dbg_namebuf[de_namelen] = '\0';
20782 ++
20783 ++ debugln("found dirent %s de_len %u d_type %d", dbg_namebuf,
20784 ++ de_namelen, d_type);
20785 ++#endif
20786 ++}
20787 ++
20788 + static int erofs_fill_dentries(struct dir_context *ctx,
20789 + void *dentry_blk, unsigned int *ofs,
20790 + unsigned int nameoff, unsigned int maxsize)
20791 +@@ -33,14 +48,10 @@ static int erofs_fill_dentries(struct dir_context *ctx,
20792 + de = dentry_blk + *ofs;
20793 + while (de < end) {
20794 + const char *de_name;
20795 +- int de_namelen;
20796 ++ unsigned int de_namelen;
20797 + unsigned char d_type;
20798 +-#ifdef CONFIG_EROFS_FS_DEBUG
20799 +- unsigned int dbg_namelen;
20800 +- unsigned char dbg_namebuf[EROFS_NAME_LEN];
20801 +-#endif
20802 +
20803 +- if (unlikely(de->file_type < EROFS_FT_MAX))
20804 ++ if (de->file_type < EROFS_FT_MAX)
20805 + d_type = erofs_filetype_table[de->file_type];
20806 + else
20807 + d_type = DT_UNKNOWN;
20808 +@@ -48,26 +59,20 @@ static int erofs_fill_dentries(struct dir_context *ctx,
20809 + nameoff = le16_to_cpu(de->nameoff);
20810 + de_name = (char *)dentry_blk + nameoff;
20811 +
20812 +- de_namelen = unlikely(de + 1 >= end) ?
20813 +- /* last directory entry */
20814 +- strnlen(de_name, maxsize - nameoff) :
20815 +- le16_to_cpu(de[1].nameoff) - nameoff;
20816 ++ /* the last dirent in the block? */
20817 ++ if (de + 1 >= end)
20818 ++ de_namelen = strnlen(de_name, maxsize - nameoff);
20819 ++ else
20820 ++ de_namelen = le16_to_cpu(de[1].nameoff) - nameoff;
20821 +
20822 + /* a corrupted entry is found */
20823 +- if (unlikely(de_namelen < 0)) {
20824 ++ if (unlikely(nameoff + de_namelen > maxsize ||
20825 ++ de_namelen > EROFS_NAME_LEN)) {
20826 + DBG_BUGON(1);
20827 + return -EIO;
20828 + }
20829 +
20830 +-#ifdef CONFIG_EROFS_FS_DEBUG
20831 +- dbg_namelen = min(EROFS_NAME_LEN - 1, de_namelen);
20832 +- memcpy(dbg_namebuf, de_name, dbg_namelen);
20833 +- dbg_namebuf[dbg_namelen] = '\0';
20834 +-
20835 +- debugln("%s, found de_name %s de_len %d d_type %d", __func__,
20836 +- dbg_namebuf, de_namelen, d_type);
20837 +-#endif
20838 +-
20839 ++ debug_one_dentry(d_type, de_name, de_namelen);
20840 + if (!dir_emit(ctx, de_name, de_namelen,
20841 + le64_to_cpu(de->nid), d_type))
20842 + /* stopped by some reason */
20843 +diff --git a/drivers/staging/erofs/inode.c b/drivers/staging/erofs/inode.c
20844 +index d7fbf5f4600f..f99954dbfdb5 100644
20845 +--- a/drivers/staging/erofs/inode.c
20846 ++++ b/drivers/staging/erofs/inode.c
20847 +@@ -185,16 +185,16 @@ static int fill_inode(struct inode *inode, int isdir)
20848 + /* setup the new inode */
20849 + if (S_ISREG(inode->i_mode)) {
20850 + #ifdef CONFIG_EROFS_FS_XATTR
20851 +- if (vi->xattr_isize)
20852 +- inode->i_op = &erofs_generic_xattr_iops;
20853 ++ inode->i_op = &erofs_generic_xattr_iops;
20854 + #endif
20855 + inode->i_fop = &generic_ro_fops;
20856 + } else if (S_ISDIR(inode->i_mode)) {
20857 + inode->i_op =
20858 + #ifdef CONFIG_EROFS_FS_XATTR
20859 +- vi->xattr_isize ? &erofs_dir_xattr_iops :
20860 +-#endif
20861 ++ &erofs_dir_xattr_iops;
20862 ++#else
20863 + &erofs_dir_iops;
20864 ++#endif
20865 + inode->i_fop = &erofs_dir_fops;
20866 + } else if (S_ISLNK(inode->i_mode)) {
20867 + /* by default, page_get_link is used for symlink */
20868 +diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
20869 +index e049d00c087a..16249d7f0895 100644
20870 +--- a/drivers/staging/erofs/internal.h
20871 ++++ b/drivers/staging/erofs/internal.h
20872 +@@ -354,12 +354,17 @@ static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid)
20873 + return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits);
20874 + }
20875 +
20876 +-#define inode_set_inited_xattr(inode) (EROFS_V(inode)->flags |= 1)
20877 +-#define inode_has_inited_xattr(inode) (EROFS_V(inode)->flags & 1)
20878 ++/* atomic flag definitions */
20879 ++#define EROFS_V_EA_INITED_BIT 0
20880 ++
20881 ++/* bitlock definitions (arranged in reverse order) */
20882 ++#define EROFS_V_BL_XATTR_BIT (BITS_PER_LONG - 1)
20883 +
20884 + struct erofs_vnode {
20885 + erofs_nid_t nid;
20886 +- unsigned int flags;
20887 ++
20888 ++ /* atomic flags (including bitlocks) */
20889 ++ unsigned long flags;
20890 +
20891 + unsigned char data_mapping_mode;
20892 + /* inline size in bytes */
20893 +diff --git a/drivers/staging/erofs/namei.c b/drivers/staging/erofs/namei.c
20894 +index 5596c52e246d..ecc51ef0753f 100644
20895 +--- a/drivers/staging/erofs/namei.c
20896 ++++ b/drivers/staging/erofs/namei.c
20897 +@@ -15,74 +15,77 @@
20898 +
20899 + #include <trace/events/erofs.h>
20900 +
20901 +-/* based on the value of qn->len is accurate */
20902 +-static inline int dirnamecmp(struct qstr *qn,
20903 +- struct qstr *qd, unsigned int *matched)
20904 ++struct erofs_qstr {
20905 ++ const unsigned char *name;
20906 ++ const unsigned char *end;
20907 ++};
20908 ++
20909 ++/* based on the end of qn is accurate and it must have the trailing '\0' */
20910 ++static inline int dirnamecmp(const struct erofs_qstr *qn,
20911 ++ const struct erofs_qstr *qd,
20912 ++ unsigned int *matched)
20913 + {
20914 +- unsigned int i = *matched, len = min(qn->len, qd->len);
20915 +-loop:
20916 +- if (unlikely(i >= len)) {
20917 +- *matched = i;
20918 +- if (qn->len < qd->len) {
20919 +- /*
20920 +- * actually (qn->len == qd->len)
20921 +- * when qd->name[i] == '\0'
20922 +- */
20923 +- return qd->name[i] == '\0' ? 0 : -1;
20924 ++ unsigned int i = *matched;
20925 ++
20926 ++ /*
20927 ++ * on-disk error, let's only BUG_ON in the debugging mode.
20928 ++ * otherwise, it will return 1 to just skip the invalid name
20929 ++ * and go on (in consideration of the lookup performance).
20930 ++ */
20931 ++ DBG_BUGON(qd->name > qd->end);
20932 ++
20933 ++ /* qd could not have trailing '\0' */
20934 ++ /* However it is absolutely safe if < qd->end */
20935 ++ while (qd->name + i < qd->end && qd->name[i] != '\0') {
20936 ++ if (qn->name[i] != qd->name[i]) {
20937 ++ *matched = i;
20938 ++ return qn->name[i] > qd->name[i] ? 1 : -1;
20939 + }
20940 +- return (qn->len > qd->len);
20941 ++ ++i;
20942 + }
20943 +-
20944 +- if (qn->name[i] != qd->name[i]) {
20945 +- *matched = i;
20946 +- return qn->name[i] > qd->name[i] ? 1 : -1;
20947 +- }
20948 +-
20949 +- ++i;
20950 +- goto loop;
20951 ++ *matched = i;
20952 ++ /* See comments in __d_alloc on the terminating NUL character */
20953 ++ return qn->name[i] == '\0' ? 0 : 1;
20954 + }
20955 +
20956 +-static struct erofs_dirent *find_target_dirent(
20957 +- struct qstr *name,
20958 +- u8 *data, int maxsize)
20959 ++#define nameoff_from_disk(off, sz) (le16_to_cpu(off) & ((sz) - 1))
20960 ++
20961 ++static struct erofs_dirent *find_target_dirent(struct erofs_qstr *name,
20962 ++ u8 *data,
20963 ++ unsigned int dirblksize,
20964 ++ const int ndirents)
20965 + {
20966 +- unsigned int ndirents, head, back;
20967 ++ int head, back;
20968 + unsigned int startprfx, endprfx;
20969 + struct erofs_dirent *const de = (struct erofs_dirent *)data;
20970 +
20971 +- /* make sure that maxsize is valid */
20972 +- BUG_ON(maxsize < sizeof(struct erofs_dirent));
20973 +-
20974 +- ndirents = le16_to_cpu(de->nameoff) / sizeof(*de);
20975 +-
20976 +- /* corrupted dir (may be unnecessary...) */
20977 +- BUG_ON(!ndirents);
20978 +-
20979 +- head = 0;
20980 ++ /* since the 1st dirent has been evaluated previously */
20981 ++ head = 1;
20982 + back = ndirents - 1;
20983 + startprfx = endprfx = 0;
20984 +
20985 + while (head <= back) {
20986 +- unsigned int mid = head + (back - head) / 2;
20987 +- unsigned int nameoff = le16_to_cpu(de[mid].nameoff);
20988 ++ const int mid = head + (back - head) / 2;
20989 ++ const int nameoff = nameoff_from_disk(de[mid].nameoff,
20990 ++ dirblksize);
20991 + unsigned int matched = min(startprfx, endprfx);
20992 +-
20993 +- struct qstr dname = QSTR_INIT(data + nameoff,
20994 +- unlikely(mid >= ndirents - 1) ?
20995 +- maxsize - nameoff :
20996 +- le16_to_cpu(de[mid + 1].nameoff) - nameoff);
20997 ++ struct erofs_qstr dname = {
20998 ++ .name = data + nameoff,
20999 ++ .end = unlikely(mid >= ndirents - 1) ?
21000 ++ data + dirblksize :
21001 ++ data + nameoff_from_disk(de[mid + 1].nameoff,
21002 ++ dirblksize)
21003 ++ };
21004 +
21005 + /* string comparison without already matched prefix */
21006 + int ret = dirnamecmp(name, &dname, &matched);
21007 +
21008 +- if (unlikely(!ret))
21009 ++ if (unlikely(!ret)) {
21010 + return de + mid;
21011 +- else if (ret > 0) {
21012 ++ } else if (ret > 0) {
21013 + head = mid + 1;
21014 + startprfx = matched;
21015 +- } else if (unlikely(mid < 1)) /* fix "mid" overflow */
21016 +- break;
21017 +- else {
21018 ++ } else {
21019 + back = mid - 1;
21020 + endprfx = matched;
21021 + }
21022 +@@ -91,12 +94,12 @@ static struct erofs_dirent *find_target_dirent(
21023 + return ERR_PTR(-ENOENT);
21024 + }
21025 +
21026 +-static struct page *find_target_block_classic(
21027 +- struct inode *dir,
21028 +- struct qstr *name, int *_diff)
21029 ++static struct page *find_target_block_classic(struct inode *dir,
21030 ++ struct erofs_qstr *name,
21031 ++ int *_ndirents)
21032 + {
21033 + unsigned int startprfx, endprfx;
21034 +- unsigned int head, back;
21035 ++ int head, back;
21036 + struct address_space *const mapping = dir->i_mapping;
21037 + struct page *candidate = ERR_PTR(-ENOENT);
21038 +
21039 +@@ -105,41 +108,43 @@ static struct page *find_target_block_classic(
21040 + back = inode_datablocks(dir) - 1;
21041 +
21042 + while (head <= back) {
21043 +- unsigned int mid = head + (back - head) / 2;
21044 ++ const int mid = head + (back - head) / 2;
21045 + struct page *page = read_mapping_page(mapping, mid, NULL);
21046 +
21047 +- if (IS_ERR(page)) {
21048 +-exact_out:
21049 +- if (!IS_ERR(candidate)) /* valid candidate */
21050 +- put_page(candidate);
21051 +- return page;
21052 +- } else {
21053 +- int diff;
21054 +- unsigned int ndirents, matched;
21055 +- struct qstr dname;
21056 ++ if (!IS_ERR(page)) {
21057 + struct erofs_dirent *de = kmap_atomic(page);
21058 +- unsigned int nameoff = le16_to_cpu(de->nameoff);
21059 +-
21060 +- ndirents = nameoff / sizeof(*de);
21061 ++ const int nameoff = nameoff_from_disk(de->nameoff,
21062 ++ EROFS_BLKSIZ);
21063 ++ const int ndirents = nameoff / sizeof(*de);
21064 ++ int diff;
21065 ++ unsigned int matched;
21066 ++ struct erofs_qstr dname;
21067 +
21068 +- /* corrupted dir (should have one entry at least) */
21069 +- BUG_ON(!ndirents || nameoff > PAGE_SIZE);
21070 ++ if (unlikely(!ndirents)) {
21071 ++ DBG_BUGON(1);
21072 ++ kunmap_atomic(de);
21073 ++ put_page(page);
21074 ++ page = ERR_PTR(-EIO);
21075 ++ goto out;
21076 ++ }
21077 +
21078 + matched = min(startprfx, endprfx);
21079 +
21080 + dname.name = (u8 *)de + nameoff;
21081 +- dname.len = ndirents == 1 ?
21082 +- /* since the rest of the last page is 0 */
21083 +- EROFS_BLKSIZ - nameoff
21084 +- : le16_to_cpu(de[1].nameoff) - nameoff;
21085 ++ if (ndirents == 1)
21086 ++ dname.end = (u8 *)de + EROFS_BLKSIZ;
21087 ++ else
21088 ++ dname.end = (u8 *)de +
21089 ++ nameoff_from_disk(de[1].nameoff,
21090 ++ EROFS_BLKSIZ);
21091 +
21092 + /* string comparison without already matched prefix */
21093 + diff = dirnamecmp(name, &dname, &matched);
21094 + kunmap_atomic(de);
21095 +
21096 + if (unlikely(!diff)) {
21097 +- *_diff = 0;
21098 +- goto exact_out;
21099 ++ *_ndirents = 0;
21100 ++ goto out;
21101 + } else if (diff > 0) {
21102 + head = mid + 1;
21103 + startprfx = matched;
21104 +@@ -147,45 +152,51 @@ exact_out:
21105 + if (likely(!IS_ERR(candidate)))
21106 + put_page(candidate);
21107 + candidate = page;
21108 ++ *_ndirents = ndirents;
21109 + } else {
21110 + put_page(page);
21111 +
21112 +- if (unlikely(mid < 1)) /* fix "mid" overflow */
21113 +- break;
21114 +-
21115 + back = mid - 1;
21116 + endprfx = matched;
21117 + }
21118 ++ continue;
21119 + }
21120 ++out: /* free if the candidate is valid */
21121 ++ if (!IS_ERR(candidate))
21122 ++ put_page(candidate);
21123 ++ return page;
21124 + }
21125 +- *_diff = 1;
21126 + return candidate;
21127 + }
21128 +
21129 + int erofs_namei(struct inode *dir,
21130 +- struct qstr *name,
21131 +- erofs_nid_t *nid, unsigned int *d_type)
21132 ++ struct qstr *name,
21133 ++ erofs_nid_t *nid, unsigned int *d_type)
21134 + {
21135 +- int diff;
21136 ++ int ndirents;
21137 + struct page *page;
21138 +- u8 *data;
21139 ++ void *data;
21140 + struct erofs_dirent *de;
21141 ++ struct erofs_qstr qn;
21142 +
21143 + if (unlikely(!dir->i_size))
21144 + return -ENOENT;
21145 +
21146 +- diff = 1;
21147 +- page = find_target_block_classic(dir, name, &diff);
21148 ++ qn.name = name->name;
21149 ++ qn.end = name->name + name->len;
21150 ++
21151 ++ ndirents = 0;
21152 ++ page = find_target_block_classic(dir, &qn, &ndirents);
21153 +
21154 + if (unlikely(IS_ERR(page)))
21155 + return PTR_ERR(page);
21156 +
21157 + data = kmap_atomic(page);
21158 + /* the target page has been mapped */
21159 +- de = likely(diff) ?
21160 +- /* since the rest of the last page is 0 */
21161 +- find_target_dirent(name, data, EROFS_BLKSIZ) :
21162 +- (struct erofs_dirent *)data;
21163 ++ if (ndirents)
21164 ++ de = find_target_dirent(&qn, data, EROFS_BLKSIZ, ndirents);
21165 ++ else
21166 ++ de = (struct erofs_dirent *)data;
21167 +
21168 + if (likely(!IS_ERR(de))) {
21169 + *nid = le64_to_cpu(de->nid);
21170 +diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
21171 +index 4ac1099a39c6..d850be1abc84 100644
21172 +--- a/drivers/staging/erofs/unzip_vle.c
21173 ++++ b/drivers/staging/erofs/unzip_vle.c
21174 +@@ -107,15 +107,30 @@ enum z_erofs_vle_work_role {
21175 + Z_EROFS_VLE_WORK_SECONDARY,
21176 + Z_EROFS_VLE_WORK_PRIMARY,
21177 + /*
21178 +- * The current work has at least been linked with the following
21179 +- * processed chained works, which means if the processing page
21180 +- * is the tail partial page of the work, the current work can
21181 +- * safely use the whole page, as illustrated below:
21182 +- * +--------------+-------------------------------------------+
21183 +- * | tail page | head page (of the previous work) |
21184 +- * +--------------+-------------------------------------------+
21185 +- * /\ which belongs to the current work
21186 +- * [ (*) this page can be used for the current work itself. ]
21187 ++ * The current work was the tail of an exist chain, and the previous
21188 ++ * processed chained works are all decided to be hooked up to it.
21189 ++ * A new chain should be created for the remaining unprocessed works,
21190 ++ * therefore different from Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
21191 ++ * the next work cannot reuse the whole page in the following scenario:
21192 ++ * ________________________________________________________________
21193 ++ * | tail (partial) page | head (partial) page |
21194 ++ * | (belongs to the next work) | (belongs to the current work) |
21195 ++ * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
21196 ++ */
21197 ++ Z_EROFS_VLE_WORK_PRIMARY_HOOKED,
21198 ++ /*
21199 ++ * The current work has been linked with the processed chained works,
21200 ++ * and could be also linked with the potential remaining works, which
21201 ++ * means if the processing page is the tail partial page of the work,
21202 ++ * the current work can safely use the whole page (since the next work
21203 ++ * is under control) for in-place decompression, as illustrated below:
21204 ++ * ________________________________________________________________
21205 ++ * | tail (partial) page | head (partial) page |
21206 ++ * | (of the current work) | (of the previous work) |
21207 ++ * | PRIMARY_FOLLOWED or | |
21208 ++ * |_____PRIMARY_HOOKED____|____________PRIMARY_FOLLOWED____________|
21209 ++ *
21210 ++ * [ (*) the above page can be used for the current work itself. ]
21211 + */
21212 + Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
21213 + Z_EROFS_VLE_WORK_MAX
21214 +@@ -315,10 +330,10 @@ static int z_erofs_vle_work_add_page(
21215 + return ret ? 0 : -EAGAIN;
21216 + }
21217 +
21218 +-static inline bool try_to_claim_workgroup(
21219 +- struct z_erofs_vle_workgroup *grp,
21220 +- z_erofs_vle_owned_workgrp_t *owned_head,
21221 +- bool *hosted)
21222 ++static enum z_erofs_vle_work_role
21223 ++try_to_claim_workgroup(struct z_erofs_vle_workgroup *grp,
21224 ++ z_erofs_vle_owned_workgrp_t *owned_head,
21225 ++ bool *hosted)
21226 + {
21227 + DBG_BUGON(*hosted == true);
21228 +
21229 +@@ -332,6 +347,9 @@ retry:
21230 +
21231 + *owned_head = &grp->next;
21232 + *hosted = true;
21233 ++ /* lucky, I am the followee :) */
21234 ++ return Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
21235 ++
21236 + } else if (grp->next == Z_EROFS_VLE_WORKGRP_TAIL) {
21237 + /*
21238 + * type 2, link to the end of a existing open chain,
21239 +@@ -341,12 +359,11 @@ retry:
21240 + if (cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL,
21241 + *owned_head) != Z_EROFS_VLE_WORKGRP_TAIL)
21242 + goto retry;
21243 +-
21244 + *owned_head = Z_EROFS_VLE_WORKGRP_TAIL;
21245 +- } else
21246 +- return false; /* :( better luck next time */
21247 ++ return Z_EROFS_VLE_WORK_PRIMARY_HOOKED;
21248 ++ }
21249 +
21250 +- return true; /* lucky, I am the followee :) */
21251 ++ return Z_EROFS_VLE_WORK_PRIMARY; /* :( better luck next time */
21252 + }
21253 +
21254 + struct z_erofs_vle_work_finder {
21255 +@@ -424,12 +441,9 @@ z_erofs_vle_work_lookup(const struct z_erofs_vle_work_finder *f)
21256 + *f->hosted = false;
21257 + if (!primary)
21258 + *f->role = Z_EROFS_VLE_WORK_SECONDARY;
21259 +- /* claim the workgroup if possible */
21260 +- else if (try_to_claim_workgroup(grp, f->owned_head, f->hosted))
21261 +- *f->role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
21262 +- else
21263 +- *f->role = Z_EROFS_VLE_WORK_PRIMARY;
21264 +-
21265 ++ else /* claim the workgroup if possible */
21266 ++ *f->role = try_to_claim_workgroup(grp, f->owned_head,
21267 ++ f->hosted);
21268 + return work;
21269 + }
21270 +
21271 +@@ -493,6 +507,9 @@ z_erofs_vle_work_register(const struct z_erofs_vle_work_finder *f,
21272 + return work;
21273 + }
21274 +
21275 ++#define builder_is_hooked(builder) \
21276 ++ ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_HOOKED)
21277 ++
21278 + #define builder_is_followed(builder) \
21279 + ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
21280 +
21281 +@@ -686,7 +703,7 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
21282 + struct z_erofs_vle_work_builder *const builder = &fe->builder;
21283 + const loff_t offset = page_offset(page);
21284 +
21285 +- bool tight = builder_is_followed(builder);
21286 ++ bool tight = builder_is_hooked(builder);
21287 + struct z_erofs_vle_work *work = builder->work;
21288 +
21289 + enum z_erofs_cache_alloctype cache_strategy;
21290 +@@ -704,8 +721,12 @@ repeat:
21291 +
21292 + /* lucky, within the range of the current map_blocks */
21293 + if (offset + cur >= map->m_la &&
21294 +- offset + cur < map->m_la + map->m_llen)
21295 ++ offset + cur < map->m_la + map->m_llen) {
21296 ++ /* didn't get a valid unzip work previously (very rare) */
21297 ++ if (!builder->work)
21298 ++ goto restart_now;
21299 + goto hitted;
21300 ++ }
21301 +
21302 + /* go ahead the next map_blocks */
21303 + debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
21304 +@@ -719,6 +740,7 @@ repeat:
21305 + if (unlikely(err))
21306 + goto err_out;
21307 +
21308 ++restart_now:
21309 + if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
21310 + goto hitted;
21311 +
21312 +@@ -740,7 +762,7 @@ repeat:
21313 + map->m_plen / PAGE_SIZE,
21314 + cache_strategy, page_pool, GFP_KERNEL);
21315 +
21316 +- tight &= builder_is_followed(builder);
21317 ++ tight &= builder_is_hooked(builder);
21318 + work = builder->work;
21319 + hitted:
21320 + cur = end - min_t(unsigned int, offset + end - map->m_la, end);
21321 +@@ -755,6 +777,9 @@ hitted:
21322 + (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
21323 + Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
21324 +
21325 ++ if (cur)
21326 ++ tight &= builder_is_followed(builder);
21327 ++
21328 + retry:
21329 + err = z_erofs_vle_work_add_page(builder, page, page_type);
21330 + /* should allocate an additional staging page for pagevec */
21331 +@@ -952,6 +977,7 @@ repeat:
21332 + overlapped = false;
21333 + compressed_pages = grp->compressed_pages;
21334 +
21335 ++ err = 0;
21336 + for (i = 0; i < clusterpages; ++i) {
21337 + unsigned int pagenr;
21338 +
21339 +@@ -961,26 +987,39 @@ repeat:
21340 + DBG_BUGON(!page);
21341 + DBG_BUGON(!page->mapping);
21342 +
21343 +- if (z_erofs_is_stagingpage(page))
21344 +- continue;
21345 ++ if (!z_erofs_is_stagingpage(page)) {
21346 + #ifdef EROFS_FS_HAS_MANAGED_CACHE
21347 +- if (page->mapping == MNGD_MAPPING(sbi)) {
21348 +- DBG_BUGON(!PageUptodate(page));
21349 +- continue;
21350 +- }
21351 ++ if (page->mapping == MNGD_MAPPING(sbi)) {
21352 ++ if (unlikely(!PageUptodate(page)))
21353 ++ err = -EIO;
21354 ++ continue;
21355 ++ }
21356 + #endif
21357 +
21358 +- /* only non-head page could be reused as a compressed page */
21359 +- pagenr = z_erofs_onlinepage_index(page);
21360 ++ /*
21361 ++ * only if non-head page can be selected
21362 ++ * for inplace decompression
21363 ++ */
21364 ++ pagenr = z_erofs_onlinepage_index(page);
21365 +
21366 +- DBG_BUGON(pagenr >= nr_pages);
21367 +- DBG_BUGON(pages[pagenr]);
21368 +- ++sparsemem_pages;
21369 +- pages[pagenr] = page;
21370 ++ DBG_BUGON(pagenr >= nr_pages);
21371 ++ DBG_BUGON(pages[pagenr]);
21372 ++ ++sparsemem_pages;
21373 ++ pages[pagenr] = page;
21374 +
21375 +- overlapped = true;
21376 ++ overlapped = true;
21377 ++ }
21378 ++
21379 ++ /* PG_error needs checking for inplaced and staging pages */
21380 ++ if (unlikely(PageError(page))) {
21381 ++ DBG_BUGON(PageUptodate(page));
21382 ++ err = -EIO;
21383 ++ }
21384 + }
21385 +
21386 ++ if (unlikely(err))
21387 ++ goto out;
21388 ++
21389 + llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
21390 +
21391 + if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
21392 +@@ -992,11 +1031,10 @@ repeat:
21393 + if (llen > grp->llen)
21394 + llen = grp->llen;
21395 +
21396 +- err = z_erofs_vle_unzip_fast_percpu(compressed_pages,
21397 +- clusterpages, pages, llen, work->pageofs,
21398 +- z_erofs_onlinepage_endio);
21399 ++ err = z_erofs_vle_unzip_fast_percpu(compressed_pages, clusterpages,
21400 ++ pages, llen, work->pageofs);
21401 + if (err != -ENOTSUPP)
21402 +- goto out_percpu;
21403 ++ goto out;
21404 +
21405 + if (sparsemem_pages >= nr_pages)
21406 + goto skip_allocpage;
21407 +@@ -1010,6 +1048,10 @@ repeat:
21408 +
21409 + skip_allocpage:
21410 + vout = erofs_vmap(pages, nr_pages);
21411 ++ if (!vout) {
21412 ++ err = -ENOMEM;
21413 ++ goto out;
21414 ++ }
21415 +
21416 + err = z_erofs_vle_unzip_vmap(compressed_pages,
21417 + clusterpages, vout, llen, work->pageofs, overlapped);
21418 +@@ -1017,8 +1059,25 @@ skip_allocpage:
21419 + erofs_vunmap(vout, nr_pages);
21420 +
21421 + out:
21422 ++ /* must handle all compressed pages before endding pages */
21423 ++ for (i = 0; i < clusterpages; ++i) {
21424 ++ page = compressed_pages[i];
21425 ++
21426 ++#ifdef EROFS_FS_HAS_MANAGED_CACHE
21427 ++ if (page->mapping == MNGD_MAPPING(sbi))
21428 ++ continue;
21429 ++#endif
21430 ++ /* recycle all individual staging pages */
21431 ++ (void)z_erofs_gather_if_stagingpage(page_pool, page);
21432 ++
21433 ++ WRITE_ONCE(compressed_pages[i], NULL);
21434 ++ }
21435 ++
21436 + for (i = 0; i < nr_pages; ++i) {
21437 + page = pages[i];
21438 ++ if (!page)
21439 ++ continue;
21440 ++
21441 + DBG_BUGON(!page->mapping);
21442 +
21443 + /* recycle all individual staging pages */
21444 +@@ -1031,20 +1090,6 @@ out:
21445 + z_erofs_onlinepage_endio(page);
21446 + }
21447 +
21448 +-out_percpu:
21449 +- for (i = 0; i < clusterpages; ++i) {
21450 +- page = compressed_pages[i];
21451 +-
21452 +-#ifdef EROFS_FS_HAS_MANAGED_CACHE
21453 +- if (page->mapping == MNGD_MAPPING(sbi))
21454 +- continue;
21455 +-#endif
21456 +- /* recycle all individual staging pages */
21457 +- (void)z_erofs_gather_if_stagingpage(page_pool, page);
21458 +-
21459 +- WRITE_ONCE(compressed_pages[i], NULL);
21460 +- }
21461 +-
21462 + if (pages == z_pagemap_global)
21463 + mutex_unlock(&z_pagemap_global_lock);
21464 + else if (unlikely(pages != pages_onstack))
21465 +@@ -1172,6 +1217,7 @@ repeat:
21466 + if (page->mapping == mc) {
21467 + WRITE_ONCE(grp->compressed_pages[nr], page);
21468 +
21469 ++ ClearPageError(page);
21470 + if (!PagePrivate(page)) {
21471 + /*
21472 + * impossible to be !PagePrivate(page) for
21473 +diff --git a/drivers/staging/erofs/unzip_vle.h b/drivers/staging/erofs/unzip_vle.h
21474 +index 5a4e1b62c0d1..c0dfd6906aa8 100644
21475 +--- a/drivers/staging/erofs/unzip_vle.h
21476 ++++ b/drivers/staging/erofs/unzip_vle.h
21477 +@@ -218,8 +218,7 @@ extern int z_erofs_vle_plain_copy(struct page **compressed_pages,
21478 +
21479 + extern int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
21480 + unsigned clusterpages, struct page **pages,
21481 +- unsigned outlen, unsigned short pageofs,
21482 +- void (*endio)(struct page *));
21483 ++ unsigned int outlen, unsigned short pageofs);
21484 +
21485 + extern int z_erofs_vle_unzip_vmap(struct page **compressed_pages,
21486 + unsigned clusterpages, void *vaddr, unsigned llen,
21487 +diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c
21488 +index 52797bd89da1..3e8b0ff2efeb 100644
21489 +--- a/drivers/staging/erofs/unzip_vle_lz4.c
21490 ++++ b/drivers/staging/erofs/unzip_vle_lz4.c
21491 +@@ -125,8 +125,7 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
21492 + unsigned int clusterpages,
21493 + struct page **pages,
21494 + unsigned int outlen,
21495 +- unsigned short pageofs,
21496 +- void (*endio)(struct page *))
21497 ++ unsigned short pageofs)
21498 + {
21499 + void *vin, *vout;
21500 + unsigned int nr_pages, i, j;
21501 +@@ -137,10 +136,13 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
21502 +
21503 + nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE);
21504 +
21505 +- if (clusterpages == 1)
21506 ++ if (clusterpages == 1) {
21507 + vin = kmap_atomic(compressed_pages[0]);
21508 +- else
21509 ++ } else {
21510 + vin = erofs_vmap(compressed_pages, clusterpages);
21511 ++ if (!vin)
21512 ++ return -ENOMEM;
21513 ++ }
21514 +
21515 + preempt_disable();
21516 + vout = erofs_pcpubuf[smp_processor_id()].data;
21517 +@@ -148,19 +150,16 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
21518 + ret = z_erofs_unzip_lz4(vin, vout + pageofs,
21519 + clusterpages * PAGE_SIZE, outlen);
21520 +
21521 +- if (ret >= 0) {
21522 +- outlen = ret;
21523 +- ret = 0;
21524 +- }
21525 ++ if (ret < 0)
21526 ++ goto out;
21527 ++ ret = 0;
21528 +
21529 + for (i = 0; i < nr_pages; ++i) {
21530 + j = min((unsigned int)PAGE_SIZE - pageofs, outlen);
21531 +
21532 + if (pages[i]) {
21533 +- if (ret < 0) {
21534 +- SetPageError(pages[i]);
21535 +- } else if (clusterpages == 1 &&
21536 +- pages[i] == compressed_pages[0]) {
21537 ++ if (clusterpages == 1 &&
21538 ++ pages[i] == compressed_pages[0]) {
21539 + memcpy(vin + pageofs, vout + pageofs, j);
21540 + } else {
21541 + void *dst = kmap_atomic(pages[i]);
21542 +@@ -168,12 +167,13 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
21543 + memcpy(dst + pageofs, vout + pageofs, j);
21544 + kunmap_atomic(dst);
21545 + }
21546 +- endio(pages[i]);
21547 + }
21548 + vout += PAGE_SIZE;
21549 + outlen -= j;
21550 + pageofs = 0;
21551 + }
21552 ++
21553 ++out:
21554 + preempt_enable();
21555 +
21556 + if (clusterpages == 1)
21557 +diff --git a/drivers/staging/erofs/xattr.c b/drivers/staging/erofs/xattr.c
21558 +index 80dca6a4adbe..6cb05ae31233 100644
21559 +--- a/drivers/staging/erofs/xattr.c
21560 ++++ b/drivers/staging/erofs/xattr.c
21561 +@@ -44,19 +44,48 @@ static inline void xattr_iter_end_final(struct xattr_iter *it)
21562 +
21563 + static int init_inode_xattrs(struct inode *inode)
21564 + {
21565 ++ struct erofs_vnode *const vi = EROFS_V(inode);
21566 + struct xattr_iter it;
21567 + unsigned int i;
21568 + struct erofs_xattr_ibody_header *ih;
21569 + struct super_block *sb;
21570 + struct erofs_sb_info *sbi;
21571 +- struct erofs_vnode *vi;
21572 + bool atomic_map;
21573 ++ int ret = 0;
21574 +
21575 +- if (likely(inode_has_inited_xattr(inode)))
21576 ++ /* the most case is that xattrs of this inode are initialized. */
21577 ++ if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
21578 + return 0;
21579 +
21580 +- vi = EROFS_V(inode);
21581 +- BUG_ON(!vi->xattr_isize);
21582 ++ if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_XATTR_BIT, TASK_KILLABLE))
21583 ++ return -ERESTARTSYS;
21584 ++
21585 ++ /* someone has initialized xattrs for us? */
21586 ++ if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
21587 ++ goto out_unlock;
21588 ++
21589 ++ /*
21590 ++ * bypass all xattr operations if ->xattr_isize is not greater than
21591 ++ * sizeof(struct erofs_xattr_ibody_header), in detail:
21592 ++ * 1) it is not enough to contain erofs_xattr_ibody_header then
21593 ++ * ->xattr_isize should be 0 (it means no xattr);
21594 ++ * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
21595 ++ * undefined right now (maybe use later with some new sb feature).
21596 ++ */
21597 ++ if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
21598 ++ errln("xattr_isize %d of nid %llu is not supported yet",
21599 ++ vi->xattr_isize, vi->nid);
21600 ++ ret = -ENOTSUPP;
21601 ++ goto out_unlock;
21602 ++ } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
21603 ++ if (unlikely(vi->xattr_isize)) {
21604 ++ DBG_BUGON(1);
21605 ++ ret = -EIO;
21606 ++ goto out_unlock; /* xattr ondisk layout error */
21607 ++ }
21608 ++ ret = -ENOATTR;
21609 ++ goto out_unlock;
21610 ++ }
21611 +
21612 + sb = inode->i_sb;
21613 + sbi = EROFS_SB(sb);
21614 +@@ -64,8 +93,10 @@ static int init_inode_xattrs(struct inode *inode)
21615 + it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
21616 +
21617 + it.page = erofs_get_inline_page(inode, it.blkaddr);
21618 +- if (IS_ERR(it.page))
21619 +- return PTR_ERR(it.page);
21620 ++ if (IS_ERR(it.page)) {
21621 ++ ret = PTR_ERR(it.page);
21622 ++ goto out_unlock;
21623 ++ }
21624 +
21625 + /* read in shared xattr array (non-atomic, see kmalloc below) */
21626 + it.kaddr = kmap(it.page);
21627 +@@ -78,7 +109,8 @@ static int init_inode_xattrs(struct inode *inode)
21628 + sizeof(uint), GFP_KERNEL);
21629 + if (vi->xattr_shared_xattrs == NULL) {
21630 + xattr_iter_end(&it, atomic_map);
21631 +- return -ENOMEM;
21632 ++ ret = -ENOMEM;
21633 ++ goto out_unlock;
21634 + }
21635 +
21636 + /* let's skip ibody header */
21637 +@@ -92,8 +124,12 @@ static int init_inode_xattrs(struct inode *inode)
21638 +
21639 + it.page = erofs_get_meta_page(sb,
21640 + ++it.blkaddr, S_ISDIR(inode->i_mode));
21641 +- if (IS_ERR(it.page))
21642 +- return PTR_ERR(it.page);
21643 ++ if (IS_ERR(it.page)) {
21644 ++ kfree(vi->xattr_shared_xattrs);
21645 ++ vi->xattr_shared_xattrs = NULL;
21646 ++ ret = PTR_ERR(it.page);
21647 ++ goto out_unlock;
21648 ++ }
21649 +
21650 + it.kaddr = kmap_atomic(it.page);
21651 + atomic_map = true;
21652 +@@ -105,8 +141,11 @@ static int init_inode_xattrs(struct inode *inode)
21653 + }
21654 + xattr_iter_end(&it, atomic_map);
21655 +
21656 +- inode_set_inited_xattr(inode);
21657 +- return 0;
21658 ++ set_bit(EROFS_V_EA_INITED_BIT, &vi->flags);
21659 ++
21660 ++out_unlock:
21661 ++ clear_and_wake_up_bit(EROFS_V_BL_XATTR_BIT, &vi->flags);
21662 ++ return ret;
21663 + }
21664 +
21665 + /*
21666 +@@ -422,7 +461,6 @@ static int erofs_xattr_generic_get(const struct xattr_handler *handler,
21667 + struct dentry *unused, struct inode *inode,
21668 + const char *name, void *buffer, size_t size)
21669 + {
21670 +- struct erofs_vnode *const vi = EROFS_V(inode);
21671 + struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
21672 +
21673 + switch (handler->flags) {
21674 +@@ -440,9 +478,6 @@ static int erofs_xattr_generic_get(const struct xattr_handler *handler,
21675 + return -EINVAL;
21676 + }
21677 +
21678 +- if (!vi->xattr_isize)
21679 +- return -ENOATTR;
21680 +-
21681 + return erofs_getxattr(inode, handler->flags, name, buffer, size);
21682 + }
21683 +
21684 +diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
21685 +index dc93e85808e0..7839d869d25d 100644
21686 +--- a/drivers/staging/iio/addac/adt7316.c
21687 ++++ b/drivers/staging/iio/addac/adt7316.c
21688 +@@ -651,17 +651,10 @@ static ssize_t adt7316_store_da_high_resolution(struct device *dev,
21689 + u8 config3;
21690 + int ret;
21691 +
21692 +- chip->dac_bits = 8;
21693 +-
21694 +- if (buf[0] == '1') {
21695 ++ if (buf[0] == '1')
21696 + config3 = chip->config3 | ADT7316_DA_HIGH_RESOLUTION;
21697 +- if (chip->id == ID_ADT7316 || chip->id == ID_ADT7516)
21698 +- chip->dac_bits = 12;
21699 +- else if (chip->id == ID_ADT7317 || chip->id == ID_ADT7517)
21700 +- chip->dac_bits = 10;
21701 +- } else {
21702 ++ else
21703 + config3 = chip->config3 & (~ADT7316_DA_HIGH_RESOLUTION);
21704 +- }
21705 +
21706 + ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG3, config3);
21707 + if (ret)
21708 +@@ -2123,6 +2116,13 @@ int adt7316_probe(struct device *dev, struct adt7316_bus *bus,
21709 + else
21710 + return -ENODEV;
21711 +
21712 ++ if (chip->id == ID_ADT7316 || chip->id == ID_ADT7516)
21713 ++ chip->dac_bits = 12;
21714 ++ else if (chip->id == ID_ADT7317 || chip->id == ID_ADT7517)
21715 ++ chip->dac_bits = 10;
21716 ++ else
21717 ++ chip->dac_bits = 8;
21718 ++
21719 + chip->ldac_pin = devm_gpiod_get_optional(dev, "adi,ldac", GPIOD_OUT_LOW);
21720 + if (IS_ERR(chip->ldac_pin)) {
21721 + ret = PTR_ERR(chip->ldac_pin);
21722 +diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c
21723 +index 28f41caba05d..fb442499f806 100644
21724 +--- a/drivers/staging/media/imx/imx-ic-prpencvf.c
21725 ++++ b/drivers/staging/media/imx/imx-ic-prpencvf.c
21726 +@@ -680,12 +680,23 @@ static int prp_start(struct prp_priv *priv)
21727 + goto out_free_nfb4eof_irq;
21728 + }
21729 +
21730 ++ /* start upstream */
21731 ++ ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1);
21732 ++ ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
21733 ++ if (ret) {
21734 ++ v4l2_err(&ic_priv->sd,
21735 ++ "upstream stream on failed: %d\n", ret);
21736 ++ goto out_free_eof_irq;
21737 ++ }
21738 ++
21739 + /* start the EOF timeout timer */
21740 + mod_timer(&priv->eof_timeout_timer,
21741 + jiffies + msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
21742 +
21743 + return 0;
21744 +
21745 ++out_free_eof_irq:
21746 ++ devm_free_irq(ic_priv->dev, priv->eof_irq, priv);
21747 + out_free_nfb4eof_irq:
21748 + devm_free_irq(ic_priv->dev, priv->nfb4eof_irq, priv);
21749 + out_unsetup:
21750 +@@ -717,6 +728,12 @@ static void prp_stop(struct prp_priv *priv)
21751 + if (ret == 0)
21752 + v4l2_warn(&ic_priv->sd, "wait last EOF timeout\n");
21753 +
21754 ++ /* stop upstream */
21755 ++ ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
21756 ++ if (ret && ret != -ENOIOCTLCMD)
21757 ++ v4l2_warn(&ic_priv->sd,
21758 ++ "upstream stream off failed: %d\n", ret);
21759 ++
21760 + devm_free_irq(ic_priv->dev, priv->eof_irq, priv);
21761 + devm_free_irq(ic_priv->dev, priv->nfb4eof_irq, priv);
21762 +
21763 +@@ -1148,15 +1165,6 @@ static int prp_s_stream(struct v4l2_subdev *sd, int enable)
21764 + if (ret)
21765 + goto out;
21766 +
21767 +- /* start/stop upstream */
21768 +- ret = v4l2_subdev_call(priv->src_sd, video, s_stream, enable);
21769 +- ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
21770 +- if (ret) {
21771 +- if (enable)
21772 +- prp_stop(priv);
21773 +- goto out;
21774 +- }
21775 +-
21776 + update_count:
21777 + priv->stream_count += enable ? 1 : -1;
21778 + if (priv->stream_count < 0)
21779 +diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
21780 +index 4223f8d418ae..be1e9e52b2a0 100644
21781 +--- a/drivers/staging/media/imx/imx-media-csi.c
21782 ++++ b/drivers/staging/media/imx/imx-media-csi.c
21783 +@@ -629,7 +629,7 @@ out_put_ipu:
21784 + return ret;
21785 + }
21786 +
21787 +-static void csi_idmac_stop(struct csi_priv *priv)
21788 ++static void csi_idmac_wait_last_eof(struct csi_priv *priv)
21789 + {
21790 + unsigned long flags;
21791 + int ret;
21792 +@@ -646,7 +646,10 @@ static void csi_idmac_stop(struct csi_priv *priv)
21793 + &priv->last_eof_comp, msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
21794 + if (ret == 0)
21795 + v4l2_warn(&priv->sd, "wait last EOF timeout\n");
21796 ++}
21797 +
21798 ++static void csi_idmac_stop(struct csi_priv *priv)
21799 ++{
21800 + devm_free_irq(priv->dev, priv->eof_irq, priv);
21801 + devm_free_irq(priv->dev, priv->nfb4eof_irq, priv);
21802 +
21803 +@@ -722,10 +725,16 @@ static int csi_start(struct csi_priv *priv)
21804 +
21805 + output_fi = &priv->frame_interval[priv->active_output_pad];
21806 +
21807 ++ /* start upstream */
21808 ++ ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1);
21809 ++ ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
21810 ++ if (ret)
21811 ++ return ret;
21812 ++
21813 + if (priv->dest == IPU_CSI_DEST_IDMAC) {
21814 + ret = csi_idmac_start(priv);
21815 + if (ret)
21816 +- return ret;
21817 ++ goto stop_upstream;
21818 + }
21819 +
21820 + ret = csi_setup(priv);
21821 +@@ -753,11 +762,26 @@ fim_off:
21822 + idmac_stop:
21823 + if (priv->dest == IPU_CSI_DEST_IDMAC)
21824 + csi_idmac_stop(priv);
21825 ++stop_upstream:
21826 ++ v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
21827 + return ret;
21828 + }
21829 +
21830 + static void csi_stop(struct csi_priv *priv)
21831 + {
21832 ++ if (priv->dest == IPU_CSI_DEST_IDMAC)
21833 ++ csi_idmac_wait_last_eof(priv);
21834 ++
21835 ++ /*
21836 ++ * Disable the CSI asap, after syncing with the last EOF.
21837 ++ * Doing so after the IDMA channel is disabled has shown to
21838 ++ * create hard system-wide hangs.
21839 ++ */
21840 ++ ipu_csi_disable(priv->csi);
21841 ++
21842 ++ /* stop upstream */
21843 ++ v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
21844 ++
21845 + if (priv->dest == IPU_CSI_DEST_IDMAC) {
21846 + csi_idmac_stop(priv);
21847 +
21848 +@@ -765,8 +789,6 @@ static void csi_stop(struct csi_priv *priv)
21849 + if (priv->fim)
21850 + imx_media_fim_set_stream(priv->fim, NULL, false);
21851 + }
21852 +-
21853 +- ipu_csi_disable(priv->csi);
21854 + }
21855 +
21856 + static const struct csi_skip_desc csi_skip[12] = {
21857 +@@ -927,23 +949,13 @@ static int csi_s_stream(struct v4l2_subdev *sd, int enable)
21858 + goto update_count;
21859 +
21860 + if (enable) {
21861 +- /* upstream must be started first, before starting CSI */
21862 +- ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1);
21863 +- ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
21864 +- if (ret)
21865 +- goto out;
21866 +-
21867 + dev_dbg(priv->dev, "stream ON\n");
21868 + ret = csi_start(priv);
21869 +- if (ret) {
21870 +- v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
21871 ++ if (ret)
21872 + goto out;
21873 +- }
21874 + } else {
21875 + dev_dbg(priv->dev, "stream OFF\n");
21876 +- /* CSI must be stopped first, then stop upstream */
21877 + csi_stop(priv);
21878 +- v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
21879 + }
21880 +
21881 + update_count:
21882 +@@ -1787,7 +1799,7 @@ static int imx_csi_parse_endpoint(struct device *dev,
21883 + struct v4l2_fwnode_endpoint *vep,
21884 + struct v4l2_async_subdev *asd)
21885 + {
21886 +- return fwnode_device_is_available(asd->match.fwnode) ? 0 : -EINVAL;
21887 ++ return fwnode_device_is_available(asd->match.fwnode) ? 0 : -ENOTCONN;
21888 + }
21889 +
21890 + static int imx_csi_async_register(struct csi_priv *priv)
21891 +diff --git a/drivers/staging/media/rockchip/vpu/rk3288_vpu_hw_jpeg_enc.c b/drivers/staging/media/rockchip/vpu/rk3288_vpu_hw_jpeg_enc.c
21892 +index 5282236d1bb1..06daea66fb49 100644
21893 +--- a/drivers/staging/media/rockchip/vpu/rk3288_vpu_hw_jpeg_enc.c
21894 ++++ b/drivers/staging/media/rockchip/vpu/rk3288_vpu_hw_jpeg_enc.c
21895 +@@ -80,7 +80,7 @@ rk3288_vpu_jpeg_enc_set_qtable(struct rockchip_vpu_dev *vpu,
21896 + void rk3288_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx)
21897 + {
21898 + struct rockchip_vpu_dev *vpu = ctx->dev;
21899 +- struct vb2_buffer *src_buf, *dst_buf;
21900 ++ struct vb2_v4l2_buffer *src_buf, *dst_buf;
21901 + struct rockchip_vpu_jpeg_ctx jpeg_ctx;
21902 + u32 reg;
21903 +
21904 +@@ -88,7 +88,7 @@ void rk3288_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx)
21905 + dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
21906 +
21907 + memset(&jpeg_ctx, 0, sizeof(jpeg_ctx));
21908 +- jpeg_ctx.buffer = vb2_plane_vaddr(dst_buf, 0);
21909 ++ jpeg_ctx.buffer = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
21910 + jpeg_ctx.width = ctx->dst_fmt.width;
21911 + jpeg_ctx.height = ctx->dst_fmt.height;
21912 + jpeg_ctx.quality = ctx->jpeg_quality;
21913 +@@ -99,7 +99,7 @@ void rk3288_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx)
21914 + VEPU_REG_ENC_CTRL);
21915 +
21916 + rk3288_vpu_set_src_img_ctrl(vpu, ctx);
21917 +- rk3288_vpu_jpeg_enc_set_buffers(vpu, ctx, src_buf);
21918 ++ rk3288_vpu_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf);
21919 + rk3288_vpu_jpeg_enc_set_qtable(vpu,
21920 + rockchip_vpu_jpeg_get_qtable(&jpeg_ctx, 0),
21921 + rockchip_vpu_jpeg_get_qtable(&jpeg_ctx, 1));
21922 +diff --git a/drivers/staging/media/rockchip/vpu/rk3399_vpu_hw_jpeg_enc.c b/drivers/staging/media/rockchip/vpu/rk3399_vpu_hw_jpeg_enc.c
21923 +index dbc86d95fe3b..3d438797692e 100644
21924 +--- a/drivers/staging/media/rockchip/vpu/rk3399_vpu_hw_jpeg_enc.c
21925 ++++ b/drivers/staging/media/rockchip/vpu/rk3399_vpu_hw_jpeg_enc.c
21926 +@@ -111,7 +111,7 @@ rk3399_vpu_jpeg_enc_set_qtable(struct rockchip_vpu_dev *vpu,
21927 + void rk3399_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx)
21928 + {
21929 + struct rockchip_vpu_dev *vpu = ctx->dev;
21930 +- struct vb2_buffer *src_buf, *dst_buf;
21931 ++ struct vb2_v4l2_buffer *src_buf, *dst_buf;
21932 + struct rockchip_vpu_jpeg_ctx jpeg_ctx;
21933 + u32 reg;
21934 +
21935 +@@ -119,7 +119,7 @@ void rk3399_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx)
21936 + dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
21937 +
21938 + memset(&jpeg_ctx, 0, sizeof(jpeg_ctx));
21939 +- jpeg_ctx.buffer = vb2_plane_vaddr(dst_buf, 0);
21940 ++ jpeg_ctx.buffer = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
21941 + jpeg_ctx.width = ctx->dst_fmt.width;
21942 + jpeg_ctx.height = ctx->dst_fmt.height;
21943 + jpeg_ctx.quality = ctx->jpeg_quality;
21944 +@@ -130,7 +130,7 @@ void rk3399_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx)
21945 + VEPU_REG_ENCODE_START);
21946 +
21947 + rk3399_vpu_set_src_img_ctrl(vpu, ctx);
21948 +- rk3399_vpu_jpeg_enc_set_buffers(vpu, ctx, src_buf);
21949 ++ rk3399_vpu_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf);
21950 + rk3399_vpu_jpeg_enc_set_qtable(vpu,
21951 + rockchip_vpu_jpeg_get_qtable(&jpeg_ctx, 0),
21952 + rockchip_vpu_jpeg_get_qtable(&jpeg_ctx, 1));
21953 +diff --git a/drivers/staging/mt7621-spi/spi-mt7621.c b/drivers/staging/mt7621-spi/spi-mt7621.c
21954 +index 513b6e79b985..e1f50efd0922 100644
21955 +--- a/drivers/staging/mt7621-spi/spi-mt7621.c
21956 ++++ b/drivers/staging/mt7621-spi/spi-mt7621.c
21957 +@@ -330,6 +330,7 @@ static int mt7621_spi_probe(struct platform_device *pdev)
21958 + int status = 0;
21959 + struct clk *clk;
21960 + struct mt7621_spi_ops *ops;
21961 ++ int ret;
21962 +
21963 + match = of_match_device(mt7621_spi_match, &pdev->dev);
21964 + if (!match)
21965 +@@ -377,7 +378,11 @@ static int mt7621_spi_probe(struct platform_device *pdev)
21966 + rs->pending_write = 0;
21967 + dev_info(&pdev->dev, "sys_freq: %u\n", rs->sys_freq);
21968 +
21969 +- device_reset(&pdev->dev);
21970 ++ ret = device_reset(&pdev->dev);
21971 ++ if (ret) {
21972 ++ dev_err(&pdev->dev, "SPI reset failed!\n");
21973 ++ return ret;
21974 ++ }
21975 +
21976 + mt7621_spi_reset(rs);
21977 +
21978 +diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
21979 +index 80b8d4153414..a54286498a47 100644
21980 +--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
21981 ++++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
21982 +@@ -45,7 +45,7 @@ static int dcon_init_xo_1(struct dcon_priv *dcon)
21983 + {
21984 + unsigned char lob;
21985 + int ret, i;
21986 +- struct dcon_gpio *pin = &gpios_asis[0];
21987 ++ const struct dcon_gpio *pin = &gpios_asis[0];
21988 +
21989 + for (i = 0; i < ARRAY_SIZE(gpios_asis); i++) {
21990 + gpios[i] = devm_gpiod_get(&dcon->client->dev, pin[i].name,
21991 +diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
21992 +index 947c79532e10..d5383974d40e 100644
21993 +--- a/drivers/staging/speakup/speakup_soft.c
21994 ++++ b/drivers/staging/speakup/speakup_soft.c
21995 +@@ -208,12 +208,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
21996 + return -EINVAL;
21997 +
21998 + spin_lock_irqsave(&speakup_info.spinlock, flags);
21999 ++ synth_soft.alive = 1;
22000 + while (1) {
22001 + prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE);
22002 +- if (!unicode)
22003 +- synth_buffer_skip_nonlatin1();
22004 +- if (!synth_buffer_empty() || speakup_info.flushing)
22005 +- break;
22006 ++ if (synth_current() == &synth_soft) {
22007 ++ if (!unicode)
22008 ++ synth_buffer_skip_nonlatin1();
22009 ++ if (!synth_buffer_empty() || speakup_info.flushing)
22010 ++ break;
22011 ++ }
22012 + spin_unlock_irqrestore(&speakup_info.spinlock, flags);
22013 + if (fp->f_flags & O_NONBLOCK) {
22014 + finish_wait(&speakup_event, &wait);
22015 +@@ -233,6 +236,8 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
22016 +
22017 + /* Keep 3 bytes available for a 16bit UTF-8-encoded character */
22018 + while (chars_sent <= count - bytes_per_ch) {
22019 ++ if (synth_current() != &synth_soft)
22020 ++ break;
22021 + if (speakup_info.flushing) {
22022 + speakup_info.flushing = 0;
22023 + ch = '\x18';
22024 +@@ -329,7 +334,8 @@ static __poll_t softsynth_poll(struct file *fp, struct poll_table_struct *wait)
22025 + poll_wait(fp, &speakup_event, wait);
22026 +
22027 + spin_lock_irqsave(&speakup_info.spinlock, flags);
22028 +- if (!synth_buffer_empty() || speakup_info.flushing)
22029 ++ if (synth_current() == &synth_soft &&
22030 ++ (!synth_buffer_empty() || speakup_info.flushing))
22031 + ret = EPOLLIN | EPOLLRDNORM;
22032 + spin_unlock_irqrestore(&speakup_info.spinlock, flags);
22033 + return ret;
22034 +diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h
22035 +index c8e688878fc7..ac6a74883af4 100644
22036 +--- a/drivers/staging/speakup/spk_priv.h
22037 ++++ b/drivers/staging/speakup/spk_priv.h
22038 +@@ -74,6 +74,7 @@ int synth_request_region(unsigned long start, unsigned long n);
22039 + int synth_release_region(unsigned long start, unsigned long n);
22040 + int synth_add(struct spk_synth *in_synth);
22041 + void synth_remove(struct spk_synth *in_synth);
22042 ++struct spk_synth *synth_current(void);
22043 +
22044 + extern struct speakup_info_t speakup_info;
22045 +
22046 +diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
22047 +index 25f259ee4ffc..3568bfb89912 100644
22048 +--- a/drivers/staging/speakup/synth.c
22049 ++++ b/drivers/staging/speakup/synth.c
22050 +@@ -481,4 +481,10 @@ void synth_remove(struct spk_synth *in_synth)
22051 + }
22052 + EXPORT_SYMBOL_GPL(synth_remove);
22053 +
22054 ++struct spk_synth *synth_current(void)
22055 ++{
22056 ++ return synth;
22057 ++}
22058 ++EXPORT_SYMBOL_GPL(synth_current);
22059 ++
22060 + short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC | B_SYM };
22061 +diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
22062 +index c9097e7367d8..2e28fbcdfe8e 100644
22063 +--- a/drivers/staging/vt6655/device_main.c
22064 ++++ b/drivers/staging/vt6655/device_main.c
22065 +@@ -1033,8 +1033,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
22066 + return;
22067 + }
22068 +
22069 +- MACvIntDisable(priv->PortOffset);
22070 +-
22071 + spin_lock_irqsave(&priv->lock, flags);
22072 +
22073 + /* Read low level stats */
22074 +@@ -1122,8 +1120,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
22075 + }
22076 +
22077 + spin_unlock_irqrestore(&priv->lock, flags);
22078 +-
22079 +- MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
22080 + }
22081 +
22082 + static void vnt_interrupt_work(struct work_struct *work)
22083 +@@ -1133,14 +1129,17 @@ static void vnt_interrupt_work(struct work_struct *work)
22084 +
22085 + if (priv->vif)
22086 + vnt_interrupt_process(priv);
22087 ++
22088 ++ MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
22089 + }
22090 +
22091 + static irqreturn_t vnt_interrupt(int irq, void *arg)
22092 + {
22093 + struct vnt_private *priv = arg;
22094 +
22095 +- if (priv->vif)
22096 +- schedule_work(&priv->interrupt_work);
22097 ++ schedule_work(&priv->interrupt_work);
22098 ++
22099 ++ MACvIntDisable(priv->PortOffset);
22100 +
22101 + return IRQ_HANDLED;
22102 + }
22103 +diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
22104 +index 721689048648..5e5149c9a92d 100644
22105 +--- a/drivers/staging/wilc1000/linux_wlan.c
22106 ++++ b/drivers/staging/wilc1000/linux_wlan.c
22107 +@@ -1086,8 +1086,8 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
22108 + vif->wilc = *wilc;
22109 + vif->ndev = ndev;
22110 + wl->vif[i] = vif;
22111 +- wl->vif_num = i;
22112 +- vif->idx = wl->vif_num;
22113 ++ wl->vif_num = i + 1;
22114 ++ vif->idx = i;
22115 +
22116 + ndev->netdev_ops = &wilc_netdev_ops;
22117 +
22118 +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
22119 +index bd15a564fe24..3ad2659630e8 100644
22120 +--- a/drivers/target/iscsi/iscsi_target.c
22121 ++++ b/drivers/target/iscsi/iscsi_target.c
22122 +@@ -4040,9 +4040,9 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
22123 + struct se_cmd *se_cmd = &cmd->se_cmd;
22124 +
22125 + if (se_cmd->se_tfo != NULL) {
22126 +- spin_lock(&se_cmd->t_state_lock);
22127 ++ spin_lock_irq(&se_cmd->t_state_lock);
22128 + se_cmd->transport_state |= CMD_T_FABRIC_STOP;
22129 +- spin_unlock(&se_cmd->t_state_lock);
22130 ++ spin_unlock_irq(&se_cmd->t_state_lock);
22131 + }
22132 + }
22133 + spin_unlock_bh(&conn->cmd_lock);
22134 +diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
22135 +index 0840d27381ea..e0a04bfc873e 100644
22136 +--- a/drivers/tty/Kconfig
22137 ++++ b/drivers/tty/Kconfig
22138 +@@ -441,4 +441,28 @@ config VCC
22139 + depends on SUN_LDOMS
22140 + help
22141 + Support for Sun logical domain consoles.
22142 ++
22143 ++config LDISC_AUTOLOAD
22144 ++ bool "Automatically load TTY Line Disciplines"
22145 ++ default y
22146 ++ help
22147 ++ Historically the kernel has always automatically loaded any
22148 ++ line discipline that is in a kernel module when a user asks
22149 ++ for it to be loaded with the TIOCSETD ioctl, or through other
22150 ++ means. This is not always the best thing to do on systems
22151 ++ where you know you will not be using some of the more
22152 ++ "ancient" line disciplines, so prevent the kernel from doing
22153 ++ this unless the request is coming from a process with the
22154 ++ CAP_SYS_MODULE permissions.
22155 ++
22156 ++ Say 'Y' here if you trust your userspace users to do the right
22157 ++ thing, or if you have only provided the line disciplines that
22158 ++ you know you will be using, or if you wish to continue to use
22159 ++ the traditional method of on-demand loading of these modules
22160 ++ by any user.
22161 ++
22162 ++ This functionality can be changed at runtime with the
22163 ++ dev.tty.ldisc_autoload sysctl, this configuration option will
22164 ++ only set the default value of this functionality.
22165 ++
22166 + endif # TTY
22167 +diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
22168 +index a1a85805d010..2488de1c4bc4 100644
22169 +--- a/drivers/tty/serial/8250/8250_of.c
22170 ++++ b/drivers/tty/serial/8250/8250_of.c
22171 +@@ -130,6 +130,10 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
22172 + port->flags |= UPF_IOREMAP;
22173 + }
22174 +
22175 ++ /* Compatibility with the deprecated pxa driver and 8250_pxa drivers. */
22176 ++ if (of_device_is_compatible(np, "mrvl,mmp-uart"))
22177 ++ port->regshift = 2;
22178 ++
22179 + /* Check for registers offset within the devices address range */
22180 + if (of_property_read_u32(np, "reg-shift", &prop) == 0)
22181 + port->regshift = prop;
22182 +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
22183 +index 48bd694a5fa1..bbe5cba21522 100644
22184 +--- a/drivers/tty/serial/8250/8250_pci.c
22185 ++++ b/drivers/tty/serial/8250/8250_pci.c
22186 +@@ -2027,6 +2027,111 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
22187 + .setup = pci_default_setup,
22188 + .exit = pci_plx9050_exit,
22189 + },
22190 ++ {
22191 ++ .vendor = PCI_VENDOR_ID_ACCESIO,
22192 ++ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB,
22193 ++ .subvendor = PCI_ANY_ID,
22194 ++ .subdevice = PCI_ANY_ID,
22195 ++ .setup = pci_pericom_setup,
22196 ++ },
22197 ++ {
22198 ++ .vendor = PCI_VENDOR_ID_ACCESIO,
22199 ++ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S,
22200 ++ .subvendor = PCI_ANY_ID,
22201 ++ .subdevice = PCI_ANY_ID,
22202 ++ .setup = pci_pericom_setup,
22203 ++ },
22204 ++ {
22205 ++ .vendor = PCI_VENDOR_ID_ACCESIO,
22206 ++ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB,
22207 ++ .subvendor = PCI_ANY_ID,
22208 ++ .subdevice = PCI_ANY_ID,
22209 ++ .setup = pci_pericom_setup,
22210 ++ },
22211 ++ {
22212 ++ .vendor = PCI_VENDOR_ID_ACCESIO,
22213 ++ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4,
22214 ++ .subvendor = PCI_ANY_ID,
22215 ++ .subdevice = PCI_ANY_ID,
22216 ++ .setup = pci_pericom_setup,
22217 ++ },
22218 ++ {
22219 ++ .vendor = PCI_VENDOR_ID_ACCESIO,
22220 ++ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB,
22221 ++ .subvendor = PCI_ANY_ID,
22222 ++ .subdevice = PCI_ANY_ID,
22223 ++ .setup = pci_pericom_setup,
22224 ++ },
22225 ++ {
22226 ++ .vendor = PCI_VENDOR_ID_ACCESIO,
22227 ++ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM,
22228 ++ .subvendor = PCI_ANY_ID,
22229 ++ .subdevice = PCI_ANY_ID,
22230 ++ .setup = pci_pericom_setup,
22231 ++ },
22232 ++ {
22233 ++ .vendor = PCI_VENDOR_ID_ACCESIO,
22234 ++ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4,
22235 ++ .subvendor = PCI_ANY_ID,
22236 ++ .subdevice = PCI_ANY_ID,
22237 ++ .setup = pci_pericom_setup,
22238 ++ },
22239 ++ {
22240 ++ .vendor = PCI_VENDOR_ID_ACCESIO,
22241 ++ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4,
22242 ++ .subvendor = PCI_ANY_ID,
22243 ++ .subdevice = PCI_ANY_ID,
22244 ++ .setup = pci_pericom_setup,
22245 ++ },
22246 ++ {
22247 ++ .vendor = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
22248 ++ .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
22249 ++ .subvendor = PCI_ANY_ID,
22250 ++ .subdevice = PCI_ANY_ID,
22251 ++ .setup = pci_pericom_setup,
22252 ++ },
22253 ++ {
22254 ++ .vendor = PCI_VENDOR_ID_ACCESIO,
22255 ++ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4,
22256 ++ .subvendor = PCI_ANY_ID,
22257 ++ .subdevice = PCI_ANY_ID,
22258 ++ .setup = pci_pericom_setup,
22259 ++ },
22260 ++ {
22261 ++ .vendor = PCI_VENDOR_ID_ACCESIO,
22262 ++ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4,
22263 ++ .subvendor = PCI_ANY_ID,
22264 ++ .subdevice = PCI_ANY_ID,
22265 ++ .setup = pci_pericom_setup,
22266 ++ },
22267 ++ {
22268 ++ .vendor = PCI_VENDOR_ID_ACCESIO,
22269 ++ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4,
22270 ++ .subvendor = PCI_ANY_ID,
22271 ++ .subdevice = PCI_ANY_ID,
22272 ++ .setup = pci_pericom_setup,
22273 ++ },
22274 ++ {
22275 ++ .vendor = PCI_VENDOR_ID_ACCESIO,
22276 ++ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4,
22277 ++ .subvendor = PCI_ANY_ID,
22278 ++ .subdevice = PCI_ANY_ID,
22279 ++ .setup = pci_pericom_setup,
22280 ++ },
22281 ++ {
22282 ++ .vendor = PCI_VENDOR_ID_ACCESIO,
22283 ++ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM,
22284 ++ .subvendor = PCI_ANY_ID,
22285 ++ .subdevice = PCI_ANY_ID,
22286 ++ .setup = pci_pericom_setup,
22287 ++ },
22288 ++ {
22289 ++ .vendor = PCI_VENDOR_ID_ACCESIO,
22290 ++ .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM,
22291 ++ .subvendor = PCI_ANY_ID,
22292 ++ .subdevice = PCI_ANY_ID,
22293 ++ .setup = pci_pericom_setup,
22294 ++ },
22295 + /*
22296 + * SBS Technologies, Inc., PMC-OCTALPRO 232
22297 + */
22298 +@@ -4575,10 +4680,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
22299 + */
22300 + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB,
22301 + PCI_ANY_ID, PCI_ANY_ID, 0, 0,
22302 +- pbn_pericom_PI7C9X7954 },
22303 ++ pbn_pericom_PI7C9X7952 },
22304 + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S,
22305 + PCI_ANY_ID, PCI_ANY_ID, 0, 0,
22306 +- pbn_pericom_PI7C9X7954 },
22307 ++ pbn_pericom_PI7C9X7952 },
22308 + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB,
22309 + PCI_ANY_ID, PCI_ANY_ID, 0, 0,
22310 + pbn_pericom_PI7C9X7954 },
22311 +@@ -4587,10 +4692,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
22312 + pbn_pericom_PI7C9X7954 },
22313 + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB,
22314 + PCI_ANY_ID, PCI_ANY_ID, 0, 0,
22315 +- pbn_pericom_PI7C9X7954 },
22316 ++ pbn_pericom_PI7C9X7952 },
22317 + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2,
22318 + PCI_ANY_ID, PCI_ANY_ID, 0, 0,
22319 +- pbn_pericom_PI7C9X7954 },
22320 ++ pbn_pericom_PI7C9X7952 },
22321 + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB,
22322 + PCI_ANY_ID, PCI_ANY_ID, 0, 0,
22323 + pbn_pericom_PI7C9X7954 },
22324 +@@ -4599,10 +4704,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
22325 + pbn_pericom_PI7C9X7954 },
22326 + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB,
22327 + PCI_ANY_ID, PCI_ANY_ID, 0, 0,
22328 +- pbn_pericom_PI7C9X7954 },
22329 ++ pbn_pericom_PI7C9X7952 },
22330 + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM,
22331 + PCI_ANY_ID, PCI_ANY_ID, 0, 0,
22332 +- pbn_pericom_PI7C9X7954 },
22333 ++ pbn_pericom_PI7C9X7952 },
22334 + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB,
22335 + PCI_ANY_ID, PCI_ANY_ID, 0, 0,
22336 + pbn_pericom_PI7C9X7954 },
22337 +@@ -4611,13 +4716,13 @@ static const struct pci_device_id serial_pci_tbl[] = {
22338 + pbn_pericom_PI7C9X7954 },
22339 + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1,
22340 + PCI_ANY_ID, PCI_ANY_ID, 0, 0,
22341 +- pbn_pericom_PI7C9X7954 },
22342 ++ pbn_pericom_PI7C9X7951 },
22343 + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2,
22344 + PCI_ANY_ID, PCI_ANY_ID, 0, 0,
22345 +- pbn_pericom_PI7C9X7954 },
22346 ++ pbn_pericom_PI7C9X7952 },
22347 + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2,
22348 + PCI_ANY_ID, PCI_ANY_ID, 0, 0,
22349 +- pbn_pericom_PI7C9X7954 },
22350 ++ pbn_pericom_PI7C9X7952 },
22351 + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4,
22352 + PCI_ANY_ID, PCI_ANY_ID, 0, 0,
22353 + pbn_pericom_PI7C9X7954 },
22354 +@@ -4626,16 +4731,16 @@ static const struct pci_device_id serial_pci_tbl[] = {
22355 + pbn_pericom_PI7C9X7954 },
22356 + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S,
22357 + PCI_ANY_ID, PCI_ANY_ID, 0, 0,
22358 +- pbn_pericom_PI7C9X7954 },
22359 ++ pbn_pericom_PI7C9X7952 },
22360 + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
22361 + PCI_ANY_ID, PCI_ANY_ID, 0, 0,
22362 + pbn_pericom_PI7C9X7954 },
22363 + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2,
22364 + PCI_ANY_ID, PCI_ANY_ID, 0, 0,
22365 +- pbn_pericom_PI7C9X7954 },
22366 ++ pbn_pericom_PI7C9X7952 },
22367 + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2,
22368 + PCI_ANY_ID, PCI_ANY_ID, 0, 0,
22369 +- pbn_pericom_PI7C9X7954 },
22370 ++ pbn_pericom_PI7C9X7952 },
22371 + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
22372 + PCI_ANY_ID, PCI_ANY_ID, 0, 0,
22373 + pbn_pericom_PI7C9X7954 },
22374 +@@ -4644,13 +4749,13 @@ static const struct pci_device_id serial_pci_tbl[] = {
22375 + pbn_pericom_PI7C9X7954 },
22376 + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM,
22377 + PCI_ANY_ID, PCI_ANY_ID, 0, 0,
22378 +- pbn_pericom_PI7C9X7954 },
22379 ++ pbn_pericom_PI7C9X7952 },
22380 + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4,
22381 + PCI_ANY_ID, PCI_ANY_ID, 0, 0,
22382 +- pbn_pericom_PI7C9X7958 },
22383 ++ pbn_pericom_PI7C9X7954 },
22384 + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4,
22385 + PCI_ANY_ID, PCI_ANY_ID, 0, 0,
22386 +- pbn_pericom_PI7C9X7958 },
22387 ++ pbn_pericom_PI7C9X7954 },
22388 + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8,
22389 + PCI_ANY_ID, PCI_ANY_ID, 0, 0,
22390 + pbn_pericom_PI7C9X7958 },
22391 +@@ -4659,19 +4764,19 @@ static const struct pci_device_id serial_pci_tbl[] = {
22392 + pbn_pericom_PI7C9X7958 },
22393 + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4,
22394 + PCI_ANY_ID, PCI_ANY_ID, 0, 0,
22395 +- pbn_pericom_PI7C9X7958 },
22396 ++ pbn_pericom_PI7C9X7954 },
22397 + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8,
22398 + PCI_ANY_ID, PCI_ANY_ID, 0, 0,
22399 + pbn_pericom_PI7C9X7958 },
22400 + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM,
22401 + PCI_ANY_ID, PCI_ANY_ID, 0, 0,
22402 +- pbn_pericom_PI7C9X7958 },
22403 ++ pbn_pericom_PI7C9X7954 },
22404 + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM,
22405 + PCI_ANY_ID, PCI_ANY_ID, 0, 0,
22406 + pbn_pericom_PI7C9X7958 },
22407 + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM,
22408 + PCI_ANY_ID, PCI_ANY_ID, 0, 0,
22409 +- pbn_pericom_PI7C9X7958 },
22410 ++ pbn_pericom_PI7C9X7954 },
22411 + /*
22412 + * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke)
22413 + */
22414 +diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c
22415 +index b9bcbe20a2be..c47188860e32 100644
22416 +--- a/drivers/tty/serial/8250/8250_pxa.c
22417 ++++ b/drivers/tty/serial/8250/8250_pxa.c
22418 +@@ -113,6 +113,10 @@ static int serial_pxa_probe(struct platform_device *pdev)
22419 + if (ret)
22420 + return ret;
22421 +
22422 ++ ret = of_alias_get_id(pdev->dev.of_node, "serial");
22423 ++ if (ret >= 0)
22424 ++ uart.port.line = ret;
22425 ++
22426 + uart.port.type = PORT_XSCALE;
22427 + uart.port.iotype = UPIO_MEM32;
22428 + uart.port.mapbase = mmres->start;
22429 +diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
22430 +index 05147fe24343..0b4f36905321 100644
22431 +--- a/drivers/tty/serial/atmel_serial.c
22432 ++++ b/drivers/tty/serial/atmel_serial.c
22433 +@@ -166,6 +166,8 @@ struct atmel_uart_port {
22434 + unsigned int pending_status;
22435 + spinlock_t lock_suspended;
22436 +
22437 ++ bool hd_start_rx; /* can start RX during half-duplex operation */
22438 ++
22439 + /* ISO7816 */
22440 + unsigned int fidi_min;
22441 + unsigned int fidi_max;
22442 +@@ -231,6 +233,13 @@ static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
22443 + __raw_writeb(value, port->membase + ATMEL_US_THR);
22444 + }
22445 +
22446 ++static inline int atmel_uart_is_half_duplex(struct uart_port *port)
22447 ++{
22448 ++ return ((port->rs485.flags & SER_RS485_ENABLED) &&
22449 ++ !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
22450 ++ (port->iso7816.flags & SER_ISO7816_ENABLED);
22451 ++}
22452 ++
22453 + #ifdef CONFIG_SERIAL_ATMEL_PDC
22454 + static bool atmel_use_pdc_rx(struct uart_port *port)
22455 + {
22456 +@@ -608,10 +617,9 @@ static void atmel_stop_tx(struct uart_port *port)
22457 + /* Disable interrupts */
22458 + atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
22459 +
22460 +- if (((port->rs485.flags & SER_RS485_ENABLED) &&
22461 +- !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
22462 +- port->iso7816.flags & SER_ISO7816_ENABLED)
22463 ++ if (atmel_uart_is_half_duplex(port))
22464 + atmel_start_rx(port);
22465 ++
22466 + }
22467 +
22468 + /*
22469 +@@ -628,9 +636,7 @@ static void atmel_start_tx(struct uart_port *port)
22470 + return;
22471 +
22472 + if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
22473 +- if (((port->rs485.flags & SER_RS485_ENABLED) &&
22474 +- !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
22475 +- port->iso7816.flags & SER_ISO7816_ENABLED)
22476 ++ if (atmel_uart_is_half_duplex(port))
22477 + atmel_stop_rx(port);
22478 +
22479 + if (atmel_use_pdc_tx(port))
22480 +@@ -928,11 +934,14 @@ static void atmel_complete_tx_dma(void *arg)
22481 + */
22482 + if (!uart_circ_empty(xmit))
22483 + atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
22484 +- else if (((port->rs485.flags & SER_RS485_ENABLED) &&
22485 +- !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
22486 +- port->iso7816.flags & SER_ISO7816_ENABLED) {
22487 +- /* DMA done, stop TX, start RX for RS485 */
22488 +- atmel_start_rx(port);
22489 ++ else if (atmel_uart_is_half_duplex(port)) {
22490 ++ /*
22491 ++ * DMA done, re-enable TXEMPTY and signal that we can stop
22492 ++ * TX and start RX for RS485
22493 ++ */
22494 ++ atmel_port->hd_start_rx = true;
22495 ++ atmel_uart_writel(port, ATMEL_US_IER,
22496 ++ atmel_port->tx_done_mask);
22497 + }
22498 +
22499 + spin_unlock_irqrestore(&port->lock, flags);
22500 +@@ -1288,6 +1297,10 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
22501 + sg_dma_len(&atmel_port->sg_rx)/2,
22502 + DMA_DEV_TO_MEM,
22503 + DMA_PREP_INTERRUPT);
22504 ++ if (!desc) {
22505 ++ dev_err(port->dev, "Preparing DMA cyclic failed\n");
22506 ++ goto chan_err;
22507 ++ }
22508 + desc->callback = atmel_complete_rx_dma;
22509 + desc->callback_param = port;
22510 + atmel_port->desc_rx = desc;
22511 +@@ -1376,9 +1389,20 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
22512 + struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
22513 +
22514 + if (pending & atmel_port->tx_done_mask) {
22515 +- /* Either PDC or interrupt transmission */
22516 + atmel_uart_writel(port, ATMEL_US_IDR,
22517 + atmel_port->tx_done_mask);
22518 ++
22519 ++ /* Start RX if flag was set and FIFO is empty */
22520 ++ if (atmel_port->hd_start_rx) {
22521 ++ if (!(atmel_uart_readl(port, ATMEL_US_CSR)
22522 ++ & ATMEL_US_TXEMPTY))
22523 ++ dev_warn(port->dev, "Should start RX, but TX fifo is not empty\n");
22524 ++
22525 ++ atmel_port->hd_start_rx = false;
22526 ++ atmel_start_rx(port);
22527 ++ return;
22528 ++ }
22529 ++
22530 + atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
22531 + }
22532 + }
22533 +@@ -1508,9 +1532,7 @@ static void atmel_tx_pdc(struct uart_port *port)
22534 + atmel_uart_writel(port, ATMEL_US_IER,
22535 + atmel_port->tx_done_mask);
22536 + } else {
22537 +- if (((port->rs485.flags & SER_RS485_ENABLED) &&
22538 +- !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
22539 +- port->iso7816.flags & SER_ISO7816_ENABLED) {
22540 ++ if (atmel_uart_is_half_duplex(port)) {
22541 + /* DMA done, stop TX, start RX for RS485 */
22542 + atmel_start_rx(port);
22543 + }
22544 +diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
22545 +index 6fb312e7af71..bfe5e9e034ec 100644
22546 +--- a/drivers/tty/serial/kgdboc.c
22547 ++++ b/drivers/tty/serial/kgdboc.c
22548 +@@ -148,8 +148,10 @@ static int configure_kgdboc(void)
22549 + char *cptr = config;
22550 + struct console *cons;
22551 +
22552 +- if (!strlen(config) || isspace(config[0]))
22553 ++ if (!strlen(config) || isspace(config[0])) {
22554 ++ err = 0;
22555 + goto noconfig;
22556 ++ }
22557 +
22558 + kgdboc_io_ops.is_console = 0;
22559 + kgdb_tty_driver = NULL;
22560 +diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
22561 +index 4f479841769a..0fdf3a760aa0 100644
22562 +--- a/drivers/tty/serial/max310x.c
22563 ++++ b/drivers/tty/serial/max310x.c
22564 +@@ -1416,6 +1416,8 @@ static int max310x_spi_probe(struct spi_device *spi)
22565 + if (spi->dev.of_node) {
22566 + const struct of_device_id *of_id =
22567 + of_match_device(max310x_dt_ids, &spi->dev);
22568 ++ if (!of_id)
22569 ++ return -ENODEV;
22570 +
22571 + devtype = (struct max310x_devtype *)of_id->data;
22572 + } else {
22573 +diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
22574 +index 231f751d1ef4..7e7b1559fa36 100644
22575 +--- a/drivers/tty/serial/mvebu-uart.c
22576 ++++ b/drivers/tty/serial/mvebu-uart.c
22577 +@@ -810,6 +810,9 @@ static int mvebu_uart_probe(struct platform_device *pdev)
22578 + return -EINVAL;
22579 + }
22580 +
22581 ++ if (!match)
22582 ++ return -ENODEV;
22583 ++
22584 + /* Assume that all UART ports have a DT alias or none has */
22585 + id = of_alias_get_id(pdev->dev.of_node, "serial");
22586 + if (!pdev->dev.of_node || id < 0)
22587 +diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
22588 +index 27235a526cce..4c188f4079b3 100644
22589 +--- a/drivers/tty/serial/mxs-auart.c
22590 ++++ b/drivers/tty/serial/mxs-auart.c
22591 +@@ -1686,6 +1686,10 @@ static int mxs_auart_probe(struct platform_device *pdev)
22592 +
22593 + s->port.mapbase = r->start;
22594 + s->port.membase = ioremap(r->start, resource_size(r));
22595 ++ if (!s->port.membase) {
22596 ++ ret = -ENOMEM;
22597 ++ goto out_disable_clks;
22598 ++ }
22599 + s->port.ops = &mxs_auart_ops;
22600 + s->port.iotype = UPIO_MEM;
22601 + s->port.fifosize = MXS_AUART_FIFO_SIZE;
22602 +diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
22603 +index 38016609c7fa..d30502c58106 100644
22604 +--- a/drivers/tty/serial/qcom_geni_serial.c
22605 ++++ b/drivers/tty/serial/qcom_geni_serial.c
22606 +@@ -1117,7 +1117,7 @@ static int __init qcom_geni_console_setup(struct console *co, char *options)
22607 + {
22608 + struct uart_port *uport;
22609 + struct qcom_geni_serial_port *port;
22610 +- int baud;
22611 ++ int baud = 9600;
22612 + int bits = 8;
22613 + int parity = 'n';
22614 + int flow = 'n';
22615 +diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
22616 +index 64bbeb7d7e0c..93bd90f1ff14 100644
22617 +--- a/drivers/tty/serial/sh-sci.c
22618 ++++ b/drivers/tty/serial/sh-sci.c
22619 +@@ -838,19 +838,9 @@ static void sci_transmit_chars(struct uart_port *port)
22620 +
22621 + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
22622 + uart_write_wakeup(port);
22623 +- if (uart_circ_empty(xmit)) {
22624 ++ if (uart_circ_empty(xmit))
22625 + sci_stop_tx(port);
22626 +- } else {
22627 +- ctrl = serial_port_in(port, SCSCR);
22628 +-
22629 +- if (port->type != PORT_SCI) {
22630 +- serial_port_in(port, SCxSR); /* Dummy read */
22631 +- sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port));
22632 +- }
22633 +
22634 +- ctrl |= SCSCR_TIE;
22635 +- serial_port_out(port, SCSCR, ctrl);
22636 +- }
22637 + }
22638 +
22639 + /* On SH3, SCIF may read end-of-break as a space->mark char */
22640 +diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
22641 +index 094f2958cb2b..ee9f18c52d29 100644
22642 +--- a/drivers/tty/serial/xilinx_uartps.c
22643 ++++ b/drivers/tty/serial/xilinx_uartps.c
22644 +@@ -364,7 +364,13 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
22645 + cdns_uart_handle_tx(dev_id);
22646 + isrstatus &= ~CDNS_UART_IXR_TXEMPTY;
22647 + }
22648 +- if (isrstatus & CDNS_UART_IXR_RXMASK)
22649 ++
22650 ++ /*
22651 ++ * Skip RX processing if RX is disabled as RXEMPTY will never be set
22652 ++ * as read bytes will not be removed from the FIFO.
22653 ++ */
22654 ++ if (isrstatus & CDNS_UART_IXR_RXMASK &&
22655 ++ !(readl(port->membase + CDNS_UART_CR) & CDNS_UART_CR_RX_DIS))
22656 + cdns_uart_handle_rx(dev_id, isrstatus);
22657 +
22658 + spin_unlock(&port->lock);
22659 +diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
22660 +index 77070c2d1240..ec145a59f199 100644
22661 +--- a/drivers/tty/tty_buffer.c
22662 ++++ b/drivers/tty/tty_buffer.c
22663 +@@ -26,7 +26,7 @@
22664 + * Byte threshold to limit memory consumption for flip buffers.
22665 + * The actual memory limit is > 2x this amount.
22666 + */
22667 +-#define TTYB_DEFAULT_MEM_LIMIT 65536
22668 ++#define TTYB_DEFAULT_MEM_LIMIT (640 * 1024UL)
22669 +
22670 + /*
22671 + * We default to dicing tty buffer allocations to this many characters
22672 +diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
22673 +index 21ffcce16927..5fa250157025 100644
22674 +--- a/drivers/tty/tty_io.c
22675 ++++ b/drivers/tty/tty_io.c
22676 +@@ -513,6 +513,8 @@ static const struct file_operations hung_up_tty_fops = {
22677 + static DEFINE_SPINLOCK(redirect_lock);
22678 + static struct file *redirect;
22679 +
22680 ++extern void tty_sysctl_init(void);
22681 ++
22682 + /**
22683 + * tty_wakeup - request more data
22684 + * @tty: terminal
22685 +@@ -3483,6 +3485,7 @@ void console_sysfs_notify(void)
22686 + */
22687 + int __init tty_init(void)
22688 + {
22689 ++ tty_sysctl_init();
22690 + cdev_init(&tty_cdev, &tty_fops);
22691 + if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
22692 + register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0)
22693 +diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
22694 +index 45eda69b150c..e38f104db174 100644
22695 +--- a/drivers/tty/tty_ldisc.c
22696 ++++ b/drivers/tty/tty_ldisc.c
22697 +@@ -156,6 +156,13 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
22698 + * takes tty_ldiscs_lock to guard against ldisc races
22699 + */
22700 +
22701 ++#if defined(CONFIG_LDISC_AUTOLOAD)
22702 ++ #define INITIAL_AUTOLOAD_STATE 1
22703 ++#else
22704 ++ #define INITIAL_AUTOLOAD_STATE 0
22705 ++#endif
22706 ++static int tty_ldisc_autoload = INITIAL_AUTOLOAD_STATE;
22707 ++
22708 + static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
22709 + {
22710 + struct tty_ldisc *ld;
22711 +@@ -170,6 +177,8 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
22712 + */
22713 + ldops = get_ldops(disc);
22714 + if (IS_ERR(ldops)) {
22715 ++ if (!capable(CAP_SYS_MODULE) && !tty_ldisc_autoload)
22716 ++ return ERR_PTR(-EPERM);
22717 + request_module("tty-ldisc-%d", disc);
22718 + ldops = get_ldops(disc);
22719 + if (IS_ERR(ldops))
22720 +@@ -845,3 +854,41 @@ void tty_ldisc_deinit(struct tty_struct *tty)
22721 + tty_ldisc_put(tty->ldisc);
22722 + tty->ldisc = NULL;
22723 + }
22724 ++
22725 ++static int zero;
22726 ++static int one = 1;
22727 ++static struct ctl_table tty_table[] = {
22728 ++ {
22729 ++ .procname = "ldisc_autoload",
22730 ++ .data = &tty_ldisc_autoload,
22731 ++ .maxlen = sizeof(tty_ldisc_autoload),
22732 ++ .mode = 0644,
22733 ++ .proc_handler = proc_dointvec,
22734 ++ .extra1 = &zero,
22735 ++ .extra2 = &one,
22736 ++ },
22737 ++ { }
22738 ++};
22739 ++
22740 ++static struct ctl_table tty_dir_table[] = {
22741 ++ {
22742 ++ .procname = "tty",
22743 ++ .mode = 0555,
22744 ++ .child = tty_table,
22745 ++ },
22746 ++ { }
22747 ++};
22748 ++
22749 ++static struct ctl_table tty_root_table[] = {
22750 ++ {
22751 ++ .procname = "dev",
22752 ++ .mode = 0555,
22753 ++ .child = tty_dir_table,
22754 ++ },
22755 ++ { }
22756 ++};
22757 ++
22758 ++void tty_sysctl_init(void)
22759 ++{
22760 ++ register_sysctl_table(tty_root_table);
22761 ++}
22762 +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
22763 +index bba75560d11e..9646ff63e77a 100644
22764 +--- a/drivers/tty/vt/vt.c
22765 ++++ b/drivers/tty/vt/vt.c
22766 +@@ -935,8 +935,11 @@ static void flush_scrollback(struct vc_data *vc)
22767 + {
22768 + WARN_CONSOLE_UNLOCKED();
22769 +
22770 ++ set_origin(vc);
22771 + if (vc->vc_sw->con_flush_scrollback)
22772 + vc->vc_sw->con_flush_scrollback(vc);
22773 ++ else
22774 ++ vc->vc_sw->con_switch(vc);
22775 + }
22776 +
22777 + /*
22778 +@@ -1503,8 +1506,10 @@ static void csi_J(struct vc_data *vc, int vpar)
22779 + count = ((vc->vc_pos - vc->vc_origin) >> 1) + 1;
22780 + start = (unsigned short *)vc->vc_origin;
22781 + break;
22782 ++ case 3: /* include scrollback */
22783 ++ flush_scrollback(vc);
22784 ++ /* fallthrough */
22785 + case 2: /* erase whole display */
22786 +- case 3: /* (and scrollback buffer later) */
22787 + vc_uniscr_clear_lines(vc, 0, vc->vc_rows);
22788 + count = vc->vc_cols * vc->vc_rows;
22789 + start = (unsigned short *)vc->vc_origin;
22790 +@@ -1513,13 +1518,7 @@ static void csi_J(struct vc_data *vc, int vpar)
22791 + return;
22792 + }
22793 + scr_memsetw(start, vc->vc_video_erase_char, 2 * count);
22794 +- if (vpar == 3) {
22795 +- set_origin(vc);
22796 +- flush_scrollback(vc);
22797 +- if (con_is_visible(vc))
22798 +- update_screen(vc);
22799 +- } else if (con_should_update(vc))
22800 +- do_update_region(vc, (unsigned long) start, count);
22801 ++ update_region(vc, (unsigned long) start, count);
22802 + vc->vc_need_wrap = 0;
22803 + }
22804 +
22805 +diff --git a/drivers/usb/chipidea/ci_hdrc_tegra.c b/drivers/usb/chipidea/ci_hdrc_tegra.c
22806 +index 772851bee99b..12025358bb3c 100644
22807 +--- a/drivers/usb/chipidea/ci_hdrc_tegra.c
22808 ++++ b/drivers/usb/chipidea/ci_hdrc_tegra.c
22809 +@@ -130,6 +130,7 @@ static int tegra_udc_remove(struct platform_device *pdev)
22810 + {
22811 + struct tegra_udc *udc = platform_get_drvdata(pdev);
22812 +
22813 ++ ci_hdrc_remove_device(udc->dev);
22814 + usb_phy_set_suspend(udc->phy, 1);
22815 + clk_disable_unprepare(udc->clk);
22816 +
22817 +diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
22818 +index 7bfcbb23c2a4..016e4004fe9d 100644
22819 +--- a/drivers/usb/chipidea/core.c
22820 ++++ b/drivers/usb/chipidea/core.c
22821 +@@ -954,8 +954,15 @@ static int ci_hdrc_probe(struct platform_device *pdev)
22822 + } else if (ci->platdata->usb_phy) {
22823 + ci->usb_phy = ci->platdata->usb_phy;
22824 + } else {
22825 ++ ci->usb_phy = devm_usb_get_phy_by_phandle(dev->parent, "phys",
22826 ++ 0);
22827 + ci->phy = devm_phy_get(dev->parent, "usb-phy");
22828 +- ci->usb_phy = devm_usb_get_phy(dev->parent, USB_PHY_TYPE_USB2);
22829 ++
22830 ++ /* Fallback to grabbing any registered USB2 PHY */
22831 ++ if (IS_ERR(ci->usb_phy) &&
22832 ++ PTR_ERR(ci->usb_phy) != -EPROBE_DEFER)
22833 ++ ci->usb_phy = devm_usb_get_phy(dev->parent,
22834 ++ USB_PHY_TYPE_USB2);
22835 +
22836 + /* if both generic PHY and USB PHY layers aren't enabled */
22837 + if (PTR_ERR(ci->phy) == -ENOSYS &&
22838 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
22839 +index 739f8960811a..ec666eb4b7b4 100644
22840 +--- a/drivers/usb/class/cdc-acm.c
22841 ++++ b/drivers/usb/class/cdc-acm.c
22842 +@@ -558,10 +558,8 @@ static void acm_softint(struct work_struct *work)
22843 + clear_bit(EVENT_RX_STALL, &acm->flags);
22844 + }
22845 +
22846 +- if (test_bit(EVENT_TTY_WAKEUP, &acm->flags)) {
22847 ++ if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags))
22848 + tty_port_tty_wakeup(&acm->port);
22849 +- clear_bit(EVENT_TTY_WAKEUP, &acm->flags);
22850 +- }
22851 + }
22852 +
22853 + /*
22854 +diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
22855 +index 48277bbc15e4..73c8e6591746 100644
22856 +--- a/drivers/usb/common/common.c
22857 ++++ b/drivers/usb/common/common.c
22858 +@@ -145,6 +145,8 @@ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0)
22859 +
22860 + do {
22861 + controller = of_find_node_with_property(controller, "phys");
22862 ++ if (!of_device_is_available(controller))
22863 ++ continue;
22864 + index = 0;
22865 + do {
22866 + if (arg0 == -1) {
22867 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
22868 +index 6c9b76bcc2e1..8d1dbe36db92 100644
22869 +--- a/drivers/usb/dwc3/gadget.c
22870 ++++ b/drivers/usb/dwc3/gadget.c
22871 +@@ -3339,6 +3339,8 @@ int dwc3_gadget_init(struct dwc3 *dwc)
22872 + goto err4;
22873 + }
22874 +
22875 ++ dwc3_gadget_set_speed(&dwc->gadget, dwc->maximum_speed);
22876 ++
22877 + return 0;
22878 +
22879 + err4:
22880 +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
22881 +index 1e5430438703..0f8d16de7a37 100644
22882 +--- a/drivers/usb/gadget/function/f_fs.c
22883 ++++ b/drivers/usb/gadget/function/f_fs.c
22884 +@@ -1082,6 +1082,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
22885 + * condition with req->complete callback.
22886 + */
22887 + usb_ep_dequeue(ep->ep, req);
22888 ++ wait_for_completion(&done);
22889 + interrupted = ep->status < 0;
22890 + }
22891 +
22892 +diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
22893 +index 75b113a5b25c..f3816a5c861e 100644
22894 +--- a/drivers/usb/gadget/function/f_hid.c
22895 ++++ b/drivers/usb/gadget/function/f_hid.c
22896 +@@ -391,20 +391,20 @@ try_again:
22897 + req->complete = f_hidg_req_complete;
22898 + req->context = hidg;
22899 +
22900 ++ spin_unlock_irqrestore(&hidg->write_spinlock, flags);
22901 ++
22902 + status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC);
22903 + if (status < 0) {
22904 + ERROR(hidg->func.config->cdev,
22905 + "usb_ep_queue error on int endpoint %zd\n", status);
22906 +- goto release_write_pending_unlocked;
22907 ++ goto release_write_pending;
22908 + } else {
22909 + status = count;
22910 + }
22911 +- spin_unlock_irqrestore(&hidg->write_spinlock, flags);
22912 +
22913 + return status;
22914 + release_write_pending:
22915 + spin_lock_irqsave(&hidg->write_spinlock, flags);
22916 +-release_write_pending_unlocked:
22917 + hidg->write_pending = 0;
22918 + spin_unlock_irqrestore(&hidg->write_spinlock, flags);
22919 +
22920 +diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
22921 +index 86cff5c28eff..ba841c569c48 100644
22922 +--- a/drivers/usb/host/xhci-dbgcap.c
22923 ++++ b/drivers/usb/host/xhci-dbgcap.c
22924 +@@ -516,7 +516,6 @@ static int xhci_do_dbc_stop(struct xhci_hcd *xhci)
22925 + return -1;
22926 +
22927 + writel(0, &dbc->regs->control);
22928 +- xhci_dbc_mem_cleanup(xhci);
22929 + dbc->state = DS_DISABLED;
22930 +
22931 + return 0;
22932 +@@ -562,8 +561,10 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci)
22933 + ret = xhci_do_dbc_stop(xhci);
22934 + spin_unlock_irqrestore(&dbc->lock, flags);
22935 +
22936 +- if (!ret)
22937 ++ if (!ret) {
22938 ++ xhci_dbc_mem_cleanup(xhci);
22939 + pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
22940 ++ }
22941 + }
22942 +
22943 + static void
22944 +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
22945 +index e2eece693655..96a740543183 100644
22946 +--- a/drivers/usb/host/xhci-hub.c
22947 ++++ b/drivers/usb/host/xhci-hub.c
22948 +@@ -1545,20 +1545,25 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
22949 + port_index = max_ports;
22950 + while (port_index--) {
22951 + u32 t1, t2;
22952 +-
22953 ++ int retries = 10;
22954 ++retry:
22955 + t1 = readl(ports[port_index]->addr);
22956 + t2 = xhci_port_state_to_neutral(t1);
22957 + portsc_buf[port_index] = 0;
22958 +
22959 +- /* Bail out if a USB3 port has a new device in link training */
22960 +- if ((hcd->speed >= HCD_USB3) &&
22961 ++ /*
22962 ++ * Give a USB3 port in link training time to finish, but don't
22963 ++ * prevent suspend as port might be stuck
22964 ++ */
22965 ++ if ((hcd->speed >= HCD_USB3) && retries-- &&
22966 + (t1 & PORT_PLS_MASK) == XDEV_POLLING) {
22967 +- bus_state->bus_suspended = 0;
22968 + spin_unlock_irqrestore(&xhci->lock, flags);
22969 +- xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
22970 +- return -EBUSY;
22971 ++ msleep(XHCI_PORT_POLLING_LFPS_TIME);
22972 ++ spin_lock_irqsave(&xhci->lock, flags);
22973 ++ xhci_dbg(xhci, "port %d polling in bus suspend, waiting\n",
22974 ++ port_index);
22975 ++ goto retry;
22976 + }
22977 +-
22978 + /* suspend ports in U0, or bail out for new connect changes */
22979 + if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
22980 + if ((t1 & PORT_CSC) && wake_enabled) {
22981 +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
22982 +index a9ec7051f286..c2fe218e051f 100644
22983 +--- a/drivers/usb/host/xhci-pci.c
22984 ++++ b/drivers/usb/host/xhci-pci.c
22985 +@@ -194,6 +194,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
22986 + xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
22987 + if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
22988 + (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
22989 ++ pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
22990 + pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
22991 + xhci->quirks |= XHCI_INTEL_USB_ROLE_SW;
22992 + if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
22993 +diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
22994 +index a6e463715779..671bce18782c 100644
22995 +--- a/drivers/usb/host/xhci-rcar.c
22996 ++++ b/drivers/usb/host/xhci-rcar.c
22997 +@@ -246,6 +246,7 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd)
22998 + if (!xhci_rcar_wait_for_pll_active(hcd))
22999 + return -ETIMEDOUT;
23000 +
23001 ++ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
23002 + return xhci_rcar_download_firmware(hcd);
23003 + }
23004 +
23005 +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
23006 +index 40fa25c4d041..9215a28dad40 100644
23007 +--- a/drivers/usb/host/xhci-ring.c
23008 ++++ b/drivers/usb/host/xhci-ring.c
23009 +@@ -1647,10 +1647,13 @@ static void handle_port_status(struct xhci_hcd *xhci,
23010 + }
23011 + }
23012 +
23013 +- if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_U0 &&
23014 +- DEV_SUPERSPEED_ANY(portsc)) {
23015 ++ if ((portsc & PORT_PLC) &&
23016 ++ DEV_SUPERSPEED_ANY(portsc) &&
23017 ++ ((portsc & PORT_PLS_MASK) == XDEV_U0 ||
23018 ++ (portsc & PORT_PLS_MASK) == XDEV_U1 ||
23019 ++ (portsc & PORT_PLS_MASK) == XDEV_U2)) {
23020 + xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
23021 +- /* We've just brought the device into U0 through either the
23022 ++ /* We've just brought the device into U0/1/2 through either the
23023 + * Resume state after a device remote wakeup, or through the
23024 + * U3Exit state after a host-initiated resume. If it's a device
23025 + * initiated remote wake, don't pass up the link state change,
23026 +diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
23027 +index 938ff06c0349..efb0cad8710e 100644
23028 +--- a/drivers/usb/host/xhci-tegra.c
23029 ++++ b/drivers/usb/host/xhci-tegra.c
23030 +@@ -941,9 +941,9 @@ static void tegra_xusb_powerdomain_remove(struct device *dev,
23031 + device_link_del(tegra->genpd_dl_ss);
23032 + if (tegra->genpd_dl_host)
23033 + device_link_del(tegra->genpd_dl_host);
23034 +- if (tegra->genpd_dev_ss)
23035 ++ if (!IS_ERR_OR_NULL(tegra->genpd_dev_ss))
23036 + dev_pm_domain_detach(tegra->genpd_dev_ss, true);
23037 +- if (tegra->genpd_dev_host)
23038 ++ if (!IS_ERR_OR_NULL(tegra->genpd_dev_host))
23039 + dev_pm_domain_detach(tegra->genpd_dev_host, true);
23040 + }
23041 +
23042 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
23043 +index 652dc36e3012..9334cdee382a 100644
23044 +--- a/drivers/usb/host/xhci.h
23045 ++++ b/drivers/usb/host/xhci.h
23046 +@@ -452,6 +452,14 @@ struct xhci_op_regs {
23047 + */
23048 + #define XHCI_DEFAULT_BESL 4
23049 +
23050 ++/*
23051 ++ * USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports
23052 ++ * to complete link training. usually link trainig completes much faster
23053 ++ * so check status 10 times with 36ms sleep in places we need to wait for
23054 ++ * polling to complete.
23055 ++ */
23056 ++#define XHCI_PORT_POLLING_LFPS_TIME 36
23057 ++
23058 + /**
23059 + * struct xhci_intr_reg - Interrupt Register Set
23060 + * @irq_pending: IMAN - Interrupt Management Register. Used to enable
23061 +diff --git a/drivers/usb/mtu3/Kconfig b/drivers/usb/mtu3/Kconfig
23062 +index 40bbf1f53337..fe58904f350b 100644
23063 +--- a/drivers/usb/mtu3/Kconfig
23064 ++++ b/drivers/usb/mtu3/Kconfig
23065 +@@ -4,6 +4,7 @@ config USB_MTU3
23066 + tristate "MediaTek USB3 Dual Role controller"
23067 + depends on USB || USB_GADGET
23068 + depends on ARCH_MEDIATEK || COMPILE_TEST
23069 ++ depends on EXTCON || !EXTCON
23070 + select USB_XHCI_MTK if USB_SUPPORT && USB_XHCI_HCD
23071 + help
23072 + Say Y or M here if your system runs on MediaTek SoCs with
23073 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
23074 +index c0777a374a88..e732949f6567 100644
23075 +--- a/drivers/usb/serial/cp210x.c
23076 ++++ b/drivers/usb/serial/cp210x.c
23077 +@@ -61,6 +61,7 @@ static const struct usb_device_id id_table[] = {
23078 + { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
23079 + { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
23080 + { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
23081 ++ { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
23082 + { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
23083 + { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
23084 + { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
23085 +@@ -79,6 +80,7 @@ static const struct usb_device_id id_table[] = {
23086 + { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */
23087 + { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
23088 + { USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */
23089 ++ { USB_DEVICE(0x10C4, 0x8056) }, /* Lorenz Messtechnik devices */
23090 + { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
23091 + { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
23092 + { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
23093 +@@ -1353,8 +1355,13 @@ static int cp210x_gpio_get(struct gpio_chip *gc, unsigned int gpio)
23094 + if (priv->partnum == CP210X_PARTNUM_CP2105)
23095 + req_type = REQTYPE_INTERFACE_TO_HOST;
23096 +
23097 ++ result = usb_autopm_get_interface(serial->interface);
23098 ++ if (result)
23099 ++ return result;
23100 ++
23101 + result = cp210x_read_vendor_block(serial, req_type,
23102 + CP210X_READ_LATCH, &buf, sizeof(buf));
23103 ++ usb_autopm_put_interface(serial->interface);
23104 + if (result < 0)
23105 + return result;
23106 +
23107 +@@ -1375,6 +1382,10 @@ static void cp210x_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
23108 +
23109 + buf.mask = BIT(gpio);
23110 +
23111 ++ result = usb_autopm_get_interface(serial->interface);
23112 ++ if (result)
23113 ++ goto out;
23114 ++
23115 + if (priv->partnum == CP210X_PARTNUM_CP2105) {
23116 + result = cp210x_write_vendor_block(serial,
23117 + REQTYPE_HOST_TO_INTERFACE,
23118 +@@ -1392,6 +1403,8 @@ static void cp210x_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
23119 + NULL, 0, USB_CTRL_SET_TIMEOUT);
23120 + }
23121 +
23122 ++ usb_autopm_put_interface(serial->interface);
23123 ++out:
23124 + if (result < 0) {
23125 + dev_err(&serial->interface->dev, "failed to set GPIO value: %d\n",
23126 + result);
23127 +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
23128 +index 77ef4c481f3c..1d8461ae2c34 100644
23129 +--- a/drivers/usb/serial/ftdi_sio.c
23130 ++++ b/drivers/usb/serial/ftdi_sio.c
23131 +@@ -609,6 +609,8 @@ static const struct usb_device_id id_table_combined[] = {
23132 + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
23133 + { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
23134 + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
23135 ++ { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) },
23136 ++ { USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) },
23137 + { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
23138 + { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
23139 + { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
23140 +@@ -1025,6 +1027,8 @@ static const struct usb_device_id id_table_combined[] = {
23141 + { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
23142 + { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
23143 + { USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) },
23144 ++ /* EZPrototypes devices */
23145 ++ { USB_DEVICE(EZPROTOTYPES_VID, HJELMSLUND_USB485_ISO_PID) },
23146 + { } /* Terminating entry */
23147 + };
23148 +
23149 +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
23150 +index 975d02666c5a..5755f0df0025 100644
23151 +--- a/drivers/usb/serial/ftdi_sio_ids.h
23152 ++++ b/drivers/usb/serial/ftdi_sio_ids.h
23153 +@@ -567,7 +567,9 @@
23154 + /*
23155 + * NovaTech product ids (FTDI_VID)
23156 + */
23157 +-#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
23158 ++#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
23159 ++#define FTDI_NT_ORIONLX_PLUS_PID 0x7c91 /* OrionLX+ Substation Automation Platform */
23160 ++#define FTDI_NT_ORION_IO_PID 0x7c92 /* Orion I/O */
23161 +
23162 + /*
23163 + * Synapse Wireless product ids (FTDI_VID)
23164 +@@ -1308,6 +1310,12 @@
23165 + #define IONICS_VID 0x1c0c
23166 + #define IONICS_PLUGCOMPUTER_PID 0x0102
23167 +
23168 ++/*
23169 ++ * EZPrototypes (PID reseller)
23170 ++ */
23171 ++#define EZPROTOTYPES_VID 0x1c40
23172 ++#define HJELMSLUND_USB485_ISO_PID 0x0477
23173 ++
23174 + /*
23175 + * Dresden Elektronik Sensor Terminal Board
23176 + */
23177 +diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
23178 +index fc52ac75fbf6..18110225d506 100644
23179 +--- a/drivers/usb/serial/mos7720.c
23180 ++++ b/drivers/usb/serial/mos7720.c
23181 +@@ -366,8 +366,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
23182 + if (!urbtrack)
23183 + return -ENOMEM;
23184 +
23185 +- kref_get(&mos_parport->ref_count);
23186 +- urbtrack->mos_parport = mos_parport;
23187 + urbtrack->urb = usb_alloc_urb(0, GFP_ATOMIC);
23188 + if (!urbtrack->urb) {
23189 + kfree(urbtrack);
23190 +@@ -388,6 +386,8 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
23191 + usb_sndctrlpipe(usbdev, 0),
23192 + (unsigned char *)urbtrack->setup,
23193 + NULL, 0, async_complete, urbtrack);
23194 ++ kref_get(&mos_parport->ref_count);
23195 ++ urbtrack->mos_parport = mos_parport;
23196 + kref_init(&urbtrack->ref_count);
23197 + INIT_LIST_HEAD(&urbtrack->urblist_entry);
23198 +
23199 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
23200 +index aef15497ff31..83869065b802 100644
23201 +--- a/drivers/usb/serial/option.c
23202 ++++ b/drivers/usb/serial/option.c
23203 +@@ -246,6 +246,7 @@ static void option_instat_callback(struct urb *urb);
23204 + #define QUECTEL_PRODUCT_EC25 0x0125
23205 + #define QUECTEL_PRODUCT_BG96 0x0296
23206 + #define QUECTEL_PRODUCT_EP06 0x0306
23207 ++#define QUECTEL_PRODUCT_EM12 0x0512
23208 +
23209 + #define CMOTECH_VENDOR_ID 0x16d8
23210 + #define CMOTECH_PRODUCT_6001 0x6001
23211 +@@ -1066,7 +1067,8 @@ static const struct usb_device_id option_ids[] = {
23212 + .driver_info = RSVD(3) },
23213 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
23214 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
23215 +- { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
23216 ++ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000), /* SIMCom SIM5218 */
23217 ++ .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | NCTRL(3) | RSVD(4) },
23218 + /* Quectel products using Qualcomm vendor ID */
23219 + { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
23220 + { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
23221 +@@ -1087,6 +1089,9 @@ static const struct usb_device_id option_ids[] = {
23222 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
23223 + .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
23224 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
23225 ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
23226 ++ .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
23227 ++ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
23228 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
23229 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
23230 + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
23231 +@@ -1148,6 +1153,8 @@ static const struct usb_device_id option_ids[] = {
23232 + .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
23233 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
23234 + .driver_info = NCTRL(0) | RSVD(3) },
23235 ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1102, 0xff), /* Telit ME910 (ECM) */
23236 ++ .driver_info = NCTRL(0) },
23237 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
23238 + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
23239 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
23240 +@@ -1938,10 +1945,12 @@ static const struct usb_device_id option_ids[] = {
23241 + .driver_info = RSVD(4) },
23242 + { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
23243 + .driver_info = RSVD(4) },
23244 +- { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
23245 +- { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
23246 +- { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
23247 +- { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
23248 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
23249 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
23250 ++ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
23251 ++ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */
23252 ++ .driver_info = RSVD(4) },
23253 ++ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
23254 + { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
23255 + { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
23256 + { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
23257 +diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
23258 +index f1c39a3c7534..d34e945e5d09 100644
23259 +--- a/drivers/usb/typec/tcpm/tcpm.c
23260 ++++ b/drivers/usb/typec/tcpm/tcpm.c
23261 +@@ -37,6 +37,7 @@
23262 + S(SRC_ATTACHED), \
23263 + S(SRC_STARTUP), \
23264 + S(SRC_SEND_CAPABILITIES), \
23265 ++ S(SRC_SEND_CAPABILITIES_TIMEOUT), \
23266 + S(SRC_NEGOTIATE_CAPABILITIES), \
23267 + S(SRC_TRANSITION_SUPPLY), \
23268 + S(SRC_READY), \
23269 +@@ -2966,10 +2967,34 @@ static void run_state_machine(struct tcpm_port *port)
23270 + /* port->hard_reset_count = 0; */
23271 + port->caps_count = 0;
23272 + port->pd_capable = true;
23273 +- tcpm_set_state_cond(port, hard_reset_state(port),
23274 ++ tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
23275 + PD_T_SEND_SOURCE_CAP);
23276 + }
23277 + break;
23278 ++ case SRC_SEND_CAPABILITIES_TIMEOUT:
23279 ++ /*
23280 ++ * Error recovery for a PD_DATA_SOURCE_CAP reply timeout.
23281 ++ *
23282 ++ * PD 2.0 sinks are supposed to accept src-capabilities with a
23283 ++ * 3.0 header and simply ignore any src PDOs which the sink does
23284 ++ * not understand such as PPS but some 2.0 sinks instead ignore
23285 ++ * the entire PD_DATA_SOURCE_CAP message, causing contract
23286 ++ * negotiation to fail.
23287 ++ *
23288 ++ * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try
23289 ++ * sending src-capabilities with a lower PD revision to
23290 ++ * make these broken sinks work.
23291 ++ */
23292 ++ if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) {
23293 ++ tcpm_set_state(port, HARD_RESET_SEND, 0);
23294 ++ } else if (port->negotiated_rev > PD_REV20) {
23295 ++ port->negotiated_rev--;
23296 ++ port->hard_reset_count = 0;
23297 ++ tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
23298 ++ } else {
23299 ++ tcpm_set_state(port, hard_reset_state(port), 0);
23300 ++ }
23301 ++ break;
23302 + case SRC_NEGOTIATE_CAPABILITIES:
23303 + ret = tcpm_pd_check_request(port);
23304 + if (ret < 0) {
23305 +diff --git a/drivers/usb/typec/tcpm/wcove.c b/drivers/usb/typec/tcpm/wcove.c
23306 +index 423208e19383..6770afd40765 100644
23307 +--- a/drivers/usb/typec/tcpm/wcove.c
23308 ++++ b/drivers/usb/typec/tcpm/wcove.c
23309 +@@ -615,8 +615,13 @@ static int wcove_typec_probe(struct platform_device *pdev)
23310 + wcove->dev = &pdev->dev;
23311 + wcove->regmap = pmic->regmap;
23312 +
23313 +- irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr,
23314 +- platform_get_irq(pdev, 0));
23315 ++ irq = platform_get_irq(pdev, 0);
23316 ++ if (irq < 0) {
23317 ++ dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq);
23318 ++ return irq;
23319 ++ }
23320 ++
23321 ++ irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr, irq);
23322 + if (irq < 0)
23323 + return irq;
23324 +
23325 +diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c
23326 +index 1c0033ad8738..e1109b15636d 100644
23327 +--- a/drivers/usb/typec/tps6598x.c
23328 ++++ b/drivers/usb/typec/tps6598x.c
23329 +@@ -110,6 +110,20 @@ tps6598x_block_read(struct tps6598x *tps, u8 reg, void *val, size_t len)
23330 + return 0;
23331 + }
23332 +
23333 ++static int tps6598x_block_write(struct tps6598x *tps, u8 reg,
23334 ++ void *val, size_t len)
23335 ++{
23336 ++ u8 data[TPS_MAX_LEN + 1];
23337 ++
23338 ++ if (!tps->i2c_protocol)
23339 ++ return regmap_raw_write(tps->regmap, reg, val, len);
23340 ++
23341 ++ data[0] = len;
23342 ++ memcpy(&data[1], val, len);
23343 ++
23344 ++ return regmap_raw_write(tps->regmap, reg, data, sizeof(data));
23345 ++}
23346 ++
23347 + static inline int tps6598x_read16(struct tps6598x *tps, u8 reg, u16 *val)
23348 + {
23349 + return tps6598x_block_read(tps, reg, val, sizeof(u16));
23350 +@@ -127,23 +141,23 @@ static inline int tps6598x_read64(struct tps6598x *tps, u8 reg, u64 *val)
23351 +
23352 + static inline int tps6598x_write16(struct tps6598x *tps, u8 reg, u16 val)
23353 + {
23354 +- return regmap_raw_write(tps->regmap, reg, &val, sizeof(u16));
23355 ++ return tps6598x_block_write(tps, reg, &val, sizeof(u16));
23356 + }
23357 +
23358 + static inline int tps6598x_write32(struct tps6598x *tps, u8 reg, u32 val)
23359 + {
23360 +- return regmap_raw_write(tps->regmap, reg, &val, sizeof(u32));
23361 ++ return tps6598x_block_write(tps, reg, &val, sizeof(u32));
23362 + }
23363 +
23364 + static inline int tps6598x_write64(struct tps6598x *tps, u8 reg, u64 val)
23365 + {
23366 +- return regmap_raw_write(tps->regmap, reg, &val, sizeof(u64));
23367 ++ return tps6598x_block_write(tps, reg, &val, sizeof(u64));
23368 + }
23369 +
23370 + static inline int
23371 + tps6598x_write_4cc(struct tps6598x *tps, u8 reg, const char *val)
23372 + {
23373 +- return regmap_raw_write(tps->regmap, reg, &val, sizeof(u32));
23374 ++ return tps6598x_block_write(tps, reg, &val, sizeof(u32));
23375 + }
23376 +
23377 + static int tps6598x_read_partner_identity(struct tps6598x *tps)
23378 +@@ -229,8 +243,8 @@ static int tps6598x_exec_cmd(struct tps6598x *tps, const char *cmd,
23379 + return -EBUSY;
23380 +
23381 + if (in_len) {
23382 +- ret = regmap_raw_write(tps->regmap, TPS_REG_DATA1,
23383 +- in_data, in_len);
23384 ++ ret = tps6598x_block_write(tps, TPS_REG_DATA1,
23385 ++ in_data, in_len);
23386 + if (ret)
23387 + return ret;
23388 + }
23389 +diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
23390 +index feb90764a811..53b8ceea9bde 100644
23391 +--- a/drivers/video/backlight/pwm_bl.c
23392 ++++ b/drivers/video/backlight/pwm_bl.c
23393 +@@ -435,7 +435,7 @@ static int pwm_backlight_initial_power_state(const struct pwm_bl_data *pb)
23394 + */
23395 +
23396 + /* if the enable GPIO is disabled, do not enable the backlight */
23397 +- if (pb->enable_gpio && gpiod_get_value(pb->enable_gpio) == 0)
23398 ++ if (pb->enable_gpio && gpiod_get_value_cansleep(pb->enable_gpio) == 0)
23399 + return FB_BLANK_POWERDOWN;
23400 +
23401 + /* The regulator is disabled, do not enable the backlight */
23402 +diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
23403 +index cb43a2258c51..4721491e6c8c 100644
23404 +--- a/drivers/video/fbdev/core/fbmem.c
23405 ++++ b/drivers/video/fbdev/core/fbmem.c
23406 +@@ -431,6 +431,9 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
23407 + {
23408 + unsigned int x;
23409 +
23410 ++ if (image->width > info->var.xres || image->height > info->var.yres)
23411 ++ return;
23412 ++
23413 + if (rotate == FB_ROTATE_UR) {
23414 + for (x = 0;
23415 + x < num && image->dx + image->width <= info->var.xres;
23416 +diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
23417 +index a0b07c331255..a38b65b97be0 100644
23418 +--- a/drivers/virtio/virtio_ring.c
23419 ++++ b/drivers/virtio/virtio_ring.c
23420 +@@ -871,6 +871,8 @@ static struct virtqueue *vring_create_virtqueue_split(
23421 + GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
23422 + if (queue)
23423 + break;
23424 ++ if (!may_reduce_num)
23425 ++ return NULL;
23426 + }
23427 +
23428 + if (!num)
23429 +diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
23430 +index cba6b586bfbd..d97fcfc5e558 100644
23431 +--- a/drivers/xen/gntdev-dmabuf.c
23432 ++++ b/drivers/xen/gntdev-dmabuf.c
23433 +@@ -80,6 +80,12 @@ struct gntdev_dmabuf_priv {
23434 + struct list_head imp_list;
23435 + /* This is the lock which protects dma_buf_xxx lists. */
23436 + struct mutex lock;
23437 ++ /*
23438 ++ * We reference this file while exporting dma-bufs, so
23439 ++ * the grant device context is not destroyed while there are
23440 ++ * external users alive.
23441 ++ */
23442 ++ struct file *filp;
23443 + };
23444 +
23445 + /* DMA buffer export support. */
23446 +@@ -311,6 +317,7 @@ static void dmabuf_exp_release(struct kref *kref)
23447 +
23448 + dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
23449 + list_del(&gntdev_dmabuf->next);
23450 ++ fput(gntdev_dmabuf->priv->filp);
23451 + kfree(gntdev_dmabuf);
23452 + }
23453 +
23454 +@@ -423,6 +430,7 @@ static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
23455 + mutex_lock(&args->dmabuf_priv->lock);
23456 + list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
23457 + mutex_unlock(&args->dmabuf_priv->lock);
23458 ++ get_file(gntdev_dmabuf->priv->filp);
23459 + return 0;
23460 +
23461 + fail:
23462 +@@ -834,7 +842,7 @@ long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
23463 + return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
23464 + }
23465 +
23466 +-struct gntdev_dmabuf_priv *gntdev_dmabuf_init(void)
23467 ++struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp)
23468 + {
23469 + struct gntdev_dmabuf_priv *priv;
23470 +
23471 +@@ -847,6 +855,8 @@ struct gntdev_dmabuf_priv *gntdev_dmabuf_init(void)
23472 + INIT_LIST_HEAD(&priv->exp_wait_list);
23473 + INIT_LIST_HEAD(&priv->imp_list);
23474 +
23475 ++ priv->filp = filp;
23476 ++
23477 + return priv;
23478 + }
23479 +
23480 +diff --git a/drivers/xen/gntdev-dmabuf.h b/drivers/xen/gntdev-dmabuf.h
23481 +index 7220a53d0fc5..3d9b9cf9d5a1 100644
23482 +--- a/drivers/xen/gntdev-dmabuf.h
23483 ++++ b/drivers/xen/gntdev-dmabuf.h
23484 +@@ -14,7 +14,7 @@
23485 + struct gntdev_dmabuf_priv;
23486 + struct gntdev_priv;
23487 +
23488 +-struct gntdev_dmabuf_priv *gntdev_dmabuf_init(void);
23489 ++struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp);
23490 +
23491 + void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv);
23492 +
23493 +diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
23494 +index 5efc5eee9544..7cf9c51318aa 100644
23495 +--- a/drivers/xen/gntdev.c
23496 ++++ b/drivers/xen/gntdev.c
23497 +@@ -600,7 +600,7 @@ static int gntdev_open(struct inode *inode, struct file *flip)
23498 + mutex_init(&priv->lock);
23499 +
23500 + #ifdef CONFIG_XEN_GNTDEV_DMABUF
23501 +- priv->dmabuf_priv = gntdev_dmabuf_init();
23502 ++ priv->dmabuf_priv = gntdev_dmabuf_init(flip);
23503 + if (IS_ERR(priv->dmabuf_priv)) {
23504 + ret = PTR_ERR(priv->dmabuf_priv);
23505 + kfree(priv);
23506 +diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
23507 +index 5a0db6dec8d1..aaee1e6584e6 100644
23508 +--- a/fs/9p/v9fs_vfs.h
23509 ++++ b/fs/9p/v9fs_vfs.h
23510 +@@ -40,6 +40,9 @@
23511 + */
23512 + #define P9_LOCK_TIMEOUT (30*HZ)
23513 +
23514 ++/* flags for v9fs_stat2inode() & v9fs_stat2inode_dotl() */
23515 ++#define V9FS_STAT2INODE_KEEP_ISIZE 1
23516 ++
23517 + extern struct file_system_type v9fs_fs_type;
23518 + extern const struct address_space_operations v9fs_addr_operations;
23519 + extern const struct file_operations v9fs_file_operations;
23520 +@@ -61,8 +64,10 @@ int v9fs_init_inode(struct v9fs_session_info *v9ses,
23521 + struct inode *inode, umode_t mode, dev_t);
23522 + void v9fs_evict_inode(struct inode *inode);
23523 + ino_t v9fs_qid2ino(struct p9_qid *qid);
23524 +-void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *);
23525 +-void v9fs_stat2inode_dotl(struct p9_stat_dotl *, struct inode *);
23526 ++void v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
23527 ++ struct super_block *sb, unsigned int flags);
23528 ++void v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
23529 ++ unsigned int flags);
23530 + int v9fs_dir_release(struct inode *inode, struct file *filp);
23531 + int v9fs_file_open(struct inode *inode, struct file *file);
23532 + void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat);
23533 +@@ -83,4 +88,18 @@ static inline void v9fs_invalidate_inode_attr(struct inode *inode)
23534 + }
23535 +
23536 + int v9fs_open_to_dotl_flags(int flags);
23537 ++
23538 ++static inline void v9fs_i_size_write(struct inode *inode, loff_t i_size)
23539 ++{
23540 ++ /*
23541 ++ * 32-bit need the lock, concurrent updates could break the
23542 ++ * sequences and make i_size_read() loop forever.
23543 ++ * 64-bit updates are atomic and can skip the locking.
23544 ++ */
23545 ++ if (sizeof(i_size) > sizeof(long))
23546 ++ spin_lock(&inode->i_lock);
23547 ++ i_size_write(inode, i_size);
23548 ++ if (sizeof(i_size) > sizeof(long))
23549 ++ spin_unlock(&inode->i_lock);
23550 ++}
23551 + #endif
23552 +diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
23553 +index a25efa782fcc..9a1125305d84 100644
23554 +--- a/fs/9p/vfs_file.c
23555 ++++ b/fs/9p/vfs_file.c
23556 +@@ -446,7 +446,11 @@ v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
23557 + i_size = i_size_read(inode);
23558 + if (iocb->ki_pos > i_size) {
23559 + inode_add_bytes(inode, iocb->ki_pos - i_size);
23560 +- i_size_write(inode, iocb->ki_pos);
23561 ++ /*
23562 ++ * Need to serialize against i_size_write() in
23563 ++ * v9fs_stat2inode()
23564 ++ */
23565 ++ v9fs_i_size_write(inode, iocb->ki_pos);
23566 + }
23567 + return retval;
23568 + }
23569 +diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
23570 +index 85ff859d3af5..72b779bc0942 100644
23571 +--- a/fs/9p/vfs_inode.c
23572 ++++ b/fs/9p/vfs_inode.c
23573 +@@ -538,7 +538,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
23574 + if (retval)
23575 + goto error;
23576 +
23577 +- v9fs_stat2inode(st, inode, sb);
23578 ++ v9fs_stat2inode(st, inode, sb, 0);
23579 + v9fs_cache_inode_get_cookie(inode);
23580 + unlock_new_inode(inode);
23581 + return inode;
23582 +@@ -1092,7 +1092,7 @@ v9fs_vfs_getattr(const struct path *path, struct kstat *stat,
23583 + if (IS_ERR(st))
23584 + return PTR_ERR(st);
23585 +
23586 +- v9fs_stat2inode(st, d_inode(dentry), dentry->d_sb);
23587 ++ v9fs_stat2inode(st, d_inode(dentry), dentry->d_sb, 0);
23588 + generic_fillattr(d_inode(dentry), stat);
23589 +
23590 + p9stat_free(st);
23591 +@@ -1170,12 +1170,13 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
23592 + * @stat: Plan 9 metadata (mistat) structure
23593 + * @inode: inode to populate
23594 + * @sb: superblock of filesystem
23595 ++ * @flags: control flags (e.g. V9FS_STAT2INODE_KEEP_ISIZE)
23596 + *
23597 + */
23598 +
23599 + void
23600 + v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
23601 +- struct super_block *sb)
23602 ++ struct super_block *sb, unsigned int flags)
23603 + {
23604 + umode_t mode;
23605 + char ext[32];
23606 +@@ -1216,10 +1217,11 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
23607 + mode = p9mode2perm(v9ses, stat);
23608 + mode |= inode->i_mode & ~S_IALLUGO;
23609 + inode->i_mode = mode;
23610 +- i_size_write(inode, stat->length);
23611 +
23612 ++ if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE))
23613 ++ v9fs_i_size_write(inode, stat->length);
23614 + /* not real number of blocks, but 512 byte ones ... */
23615 +- inode->i_blocks = (i_size_read(inode) + 512 - 1) >> 9;
23616 ++ inode->i_blocks = (stat->length + 512 - 1) >> 9;
23617 + v9inode->cache_validity &= ~V9FS_INO_INVALID_ATTR;
23618 + }
23619 +
23620 +@@ -1416,9 +1418,9 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
23621 + {
23622 + int umode;
23623 + dev_t rdev;
23624 +- loff_t i_size;
23625 + struct p9_wstat *st;
23626 + struct v9fs_session_info *v9ses;
23627 ++ unsigned int flags;
23628 +
23629 + v9ses = v9fs_inode2v9ses(inode);
23630 + st = p9_client_stat(fid);
23631 +@@ -1431,16 +1433,13 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
23632 + if ((inode->i_mode & S_IFMT) != (umode & S_IFMT))
23633 + goto out;
23634 +
23635 +- spin_lock(&inode->i_lock);
23636 + /*
23637 + * We don't want to refresh inode->i_size,
23638 + * because we may have cached data
23639 + */
23640 +- i_size = inode->i_size;
23641 +- v9fs_stat2inode(st, inode, inode->i_sb);
23642 +- if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
23643 +- inode->i_size = i_size;
23644 +- spin_unlock(&inode->i_lock);
23645 ++ flags = (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) ?
23646 ++ V9FS_STAT2INODE_KEEP_ISIZE : 0;
23647 ++ v9fs_stat2inode(st, inode, inode->i_sb, flags);
23648 + out:
23649 + p9stat_free(st);
23650 + kfree(st);
23651 +diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
23652 +index 4823e1c46999..a950a927a626 100644
23653 +--- a/fs/9p/vfs_inode_dotl.c
23654 ++++ b/fs/9p/vfs_inode_dotl.c
23655 +@@ -143,7 +143,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
23656 + if (retval)
23657 + goto error;
23658 +
23659 +- v9fs_stat2inode_dotl(st, inode);
23660 ++ v9fs_stat2inode_dotl(st, inode, 0);
23661 + v9fs_cache_inode_get_cookie(inode);
23662 + retval = v9fs_get_acl(inode, fid);
23663 + if (retval)
23664 +@@ -496,7 +496,7 @@ v9fs_vfs_getattr_dotl(const struct path *path, struct kstat *stat,
23665 + if (IS_ERR(st))
23666 + return PTR_ERR(st);
23667 +
23668 +- v9fs_stat2inode_dotl(st, d_inode(dentry));
23669 ++ v9fs_stat2inode_dotl(st, d_inode(dentry), 0);
23670 + generic_fillattr(d_inode(dentry), stat);
23671 + /* Change block size to what the server returned */
23672 + stat->blksize = st->st_blksize;
23673 +@@ -607,11 +607,13 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
23674 + * v9fs_stat2inode_dotl - populate an inode structure with stat info
23675 + * @stat: stat structure
23676 + * @inode: inode to populate
23677 ++ * @flags: ctrl flags (e.g. V9FS_STAT2INODE_KEEP_ISIZE)
23678 + *
23679 + */
23680 +
23681 + void
23682 +-v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
23683 ++v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
23684 ++ unsigned int flags)
23685 + {
23686 + umode_t mode;
23687 + struct v9fs_inode *v9inode = V9FS_I(inode);
23688 +@@ -631,7 +633,8 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
23689 + mode |= inode->i_mode & ~S_IALLUGO;
23690 + inode->i_mode = mode;
23691 +
23692 +- i_size_write(inode, stat->st_size);
23693 ++ if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE))
23694 ++ v9fs_i_size_write(inode, stat->st_size);
23695 + inode->i_blocks = stat->st_blocks;
23696 + } else {
23697 + if (stat->st_result_mask & P9_STATS_ATIME) {
23698 +@@ -661,8 +664,9 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
23699 + }
23700 + if (stat->st_result_mask & P9_STATS_RDEV)
23701 + inode->i_rdev = new_decode_dev(stat->st_rdev);
23702 +- if (stat->st_result_mask & P9_STATS_SIZE)
23703 +- i_size_write(inode, stat->st_size);
23704 ++ if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE) &&
23705 ++ stat->st_result_mask & P9_STATS_SIZE)
23706 ++ v9fs_i_size_write(inode, stat->st_size);
23707 + if (stat->st_result_mask & P9_STATS_BLOCKS)
23708 + inode->i_blocks = stat->st_blocks;
23709 + }
23710 +@@ -928,9 +932,9 @@ v9fs_vfs_get_link_dotl(struct dentry *dentry,
23711 +
23712 + int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
23713 + {
23714 +- loff_t i_size;
23715 + struct p9_stat_dotl *st;
23716 + struct v9fs_session_info *v9ses;
23717 ++ unsigned int flags;
23718 +
23719 + v9ses = v9fs_inode2v9ses(inode);
23720 + st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
23721 +@@ -942,16 +946,13 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
23722 + if ((inode->i_mode & S_IFMT) != (st->st_mode & S_IFMT))
23723 + goto out;
23724 +
23725 +- spin_lock(&inode->i_lock);
23726 + /*
23727 + * We don't want to refresh inode->i_size,
23728 + * because we may have cached data
23729 + */
23730 +- i_size = inode->i_size;
23731 +- v9fs_stat2inode_dotl(st, inode);
23732 +- if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
23733 +- inode->i_size = i_size;
23734 +- spin_unlock(&inode->i_lock);
23735 ++ flags = (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) ?
23736 ++ V9FS_STAT2INODE_KEEP_ISIZE : 0;
23737 ++ v9fs_stat2inode_dotl(st, inode, flags);
23738 + out:
23739 + kfree(st);
23740 + return 0;
23741 +diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
23742 +index 48ce50484e80..eeab9953af89 100644
23743 +--- a/fs/9p/vfs_super.c
23744 ++++ b/fs/9p/vfs_super.c
23745 +@@ -172,7 +172,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
23746 + goto release_sb;
23747 + }
23748 + d_inode(root)->i_ino = v9fs_qid2ino(&st->qid);
23749 +- v9fs_stat2inode_dotl(st, d_inode(root));
23750 ++ v9fs_stat2inode_dotl(st, d_inode(root), 0);
23751 + kfree(st);
23752 + } else {
23753 + struct p9_wstat *st = NULL;
23754 +@@ -183,7 +183,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
23755 + }
23756 +
23757 + d_inode(root)->i_ino = v9fs_qid2ino(&st->qid);
23758 +- v9fs_stat2inode(st, d_inode(root), sb);
23759 ++ v9fs_stat2inode(st, d_inode(root), sb, 0);
23760 +
23761 + p9stat_free(st);
23762 + kfree(st);
23763 +diff --git a/fs/aio.c b/fs/aio.c
23764 +index aaaaf4d12c73..3d9669d011b9 100644
23765 +--- a/fs/aio.c
23766 ++++ b/fs/aio.c
23767 +@@ -167,9 +167,13 @@ struct kioctx {
23768 + unsigned id;
23769 + };
23770 +
23771 ++/*
23772 ++ * First field must be the file pointer in all the
23773 ++ * iocb unions! See also 'struct kiocb' in <linux/fs.h>
23774 ++ */
23775 + struct fsync_iocb {
23776 +- struct work_struct work;
23777 + struct file *file;
23778 ++ struct work_struct work;
23779 + bool datasync;
23780 + };
23781 +
23782 +@@ -183,8 +187,15 @@ struct poll_iocb {
23783 + struct work_struct work;
23784 + };
23785 +
23786 ++/*
23787 ++ * NOTE! Each of the iocb union members has the file pointer
23788 ++ * as the first entry in their struct definition. So you can
23789 ++ * access the file pointer through any of the sub-structs,
23790 ++ * or directly as just 'ki_filp' in this struct.
23791 ++ */
23792 + struct aio_kiocb {
23793 + union {
23794 ++ struct file *ki_filp;
23795 + struct kiocb rw;
23796 + struct fsync_iocb fsync;
23797 + struct poll_iocb poll;
23798 +@@ -1060,6 +1071,8 @@ static inline void iocb_put(struct aio_kiocb *iocb)
23799 + {
23800 + if (refcount_read(&iocb->ki_refcnt) == 0 ||
23801 + refcount_dec_and_test(&iocb->ki_refcnt)) {
23802 ++ if (iocb->ki_filp)
23803 ++ fput(iocb->ki_filp);
23804 + percpu_ref_put(&iocb->ki_ctx->reqs);
23805 + kmem_cache_free(kiocb_cachep, iocb);
23806 + }
23807 +@@ -1424,7 +1437,6 @@ static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
23808 + file_end_write(kiocb->ki_filp);
23809 + }
23810 +
23811 +- fput(kiocb->ki_filp);
23812 + aio_complete(iocb, res, res2);
23813 + }
23814 +
23815 +@@ -1432,9 +1444,6 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
23816 + {
23817 + int ret;
23818 +
23819 +- req->ki_filp = fget(iocb->aio_fildes);
23820 +- if (unlikely(!req->ki_filp))
23821 +- return -EBADF;
23822 + req->ki_complete = aio_complete_rw;
23823 + req->private = NULL;
23824 + req->ki_pos = iocb->aio_offset;
23825 +@@ -1451,7 +1460,7 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
23826 + ret = ioprio_check_cap(iocb->aio_reqprio);
23827 + if (ret) {
23828 + pr_debug("aio ioprio check cap error: %d\n", ret);
23829 +- goto out_fput;
23830 ++ return ret;
23831 + }
23832 +
23833 + req->ki_ioprio = iocb->aio_reqprio;
23834 +@@ -1460,14 +1469,10 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
23835 +
23836 + ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags);
23837 + if (unlikely(ret))
23838 +- goto out_fput;
23839 ++ return ret;
23840 +
23841 + req->ki_flags &= ~IOCB_HIPRI; /* no one is going to poll for this I/O */
23842 + return 0;
23843 +-
23844 +-out_fput:
23845 +- fput(req->ki_filp);
23846 +- return ret;
23847 + }
23848 +
23849 + static int aio_setup_rw(int rw, const struct iocb *iocb, struct iovec **iovec,
23850 +@@ -1521,24 +1526,19 @@ static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb,
23851 + if (ret)
23852 + return ret;
23853 + file = req->ki_filp;
23854 +-
23855 +- ret = -EBADF;
23856 + if (unlikely(!(file->f_mode & FMODE_READ)))
23857 +- goto out_fput;
23858 ++ return -EBADF;
23859 + ret = -EINVAL;
23860 + if (unlikely(!file->f_op->read_iter))
23861 +- goto out_fput;
23862 ++ return -EINVAL;
23863 +
23864 + ret = aio_setup_rw(READ, iocb, &iovec, vectored, compat, &iter);
23865 + if (ret)
23866 +- goto out_fput;
23867 ++ return ret;
23868 + ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
23869 + if (!ret)
23870 + aio_rw_done(req, call_read_iter(file, req, &iter));
23871 + kfree(iovec);
23872 +-out_fput:
23873 +- if (unlikely(ret))
23874 +- fput(file);
23875 + return ret;
23876 + }
23877 +
23878 +@@ -1555,16 +1555,14 @@ static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb,
23879 + return ret;
23880 + file = req->ki_filp;
23881 +
23882 +- ret = -EBADF;
23883 + if (unlikely(!(file->f_mode & FMODE_WRITE)))
23884 +- goto out_fput;
23885 +- ret = -EINVAL;
23886 ++ return -EBADF;
23887 + if (unlikely(!file->f_op->write_iter))
23888 +- goto out_fput;
23889 ++ return -EINVAL;
23890 +
23891 + ret = aio_setup_rw(WRITE, iocb, &iovec, vectored, compat, &iter);
23892 + if (ret)
23893 +- goto out_fput;
23894 ++ return ret;
23895 + ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter));
23896 + if (!ret) {
23897 + /*
23898 +@@ -1582,9 +1580,6 @@ static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb,
23899 + aio_rw_done(req, call_write_iter(file, req, &iter));
23900 + }
23901 + kfree(iovec);
23902 +-out_fput:
23903 +- if (unlikely(ret))
23904 +- fput(file);
23905 + return ret;
23906 + }
23907 +
23908 +@@ -1594,7 +1589,6 @@ static void aio_fsync_work(struct work_struct *work)
23909 + int ret;
23910 +
23911 + ret = vfs_fsync(req->file, req->datasync);
23912 +- fput(req->file);
23913 + aio_complete(container_of(req, struct aio_kiocb, fsync), ret, 0);
23914 + }
23915 +
23916 +@@ -1605,13 +1599,8 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
23917 + iocb->aio_rw_flags))
23918 + return -EINVAL;
23919 +
23920 +- req->file = fget(iocb->aio_fildes);
23921 +- if (unlikely(!req->file))
23922 +- return -EBADF;
23923 +- if (unlikely(!req->file->f_op->fsync)) {
23924 +- fput(req->file);
23925 ++ if (unlikely(!req->file->f_op->fsync))
23926 + return -EINVAL;
23927 +- }
23928 +
23929 + req->datasync = datasync;
23930 + INIT_WORK(&req->work, aio_fsync_work);
23931 +@@ -1621,10 +1610,7 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
23932 +
23933 + static inline void aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask)
23934 + {
23935 +- struct file *file = iocb->poll.file;
23936 +-
23937 + aio_complete(iocb, mangle_poll(mask), 0);
23938 +- fput(file);
23939 + }
23940 +
23941 + static void aio_poll_complete_work(struct work_struct *work)
23942 +@@ -1680,6 +1666,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
23943 + struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
23944 + struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
23945 + __poll_t mask = key_to_poll(key);
23946 ++ unsigned long flags;
23947 +
23948 + req->woken = true;
23949 +
23950 +@@ -1688,10 +1675,15 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
23951 + if (!(mask & req->events))
23952 + return 0;
23953 +
23954 +- /* try to complete the iocb inline if we can: */
23955 +- if (spin_trylock(&iocb->ki_ctx->ctx_lock)) {
23956 ++ /*
23957 ++ * Try to complete the iocb inline if we can. Use
23958 ++ * irqsave/irqrestore because not all filesystems (e.g. fuse)
23959 ++ * call this function with IRQs disabled and because IRQs
23960 ++ * have to be disabled before ctx_lock is obtained.
23961 ++ */
23962 ++ if (spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
23963 + list_del(&iocb->ki_list);
23964 +- spin_unlock(&iocb->ki_ctx->ctx_lock);
23965 ++ spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
23966 +
23967 + list_del_init(&req->wait.entry);
23968 + aio_poll_complete(iocb, mask);
23969 +@@ -1743,9 +1735,6 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
23970 +
23971 + INIT_WORK(&req->work, aio_poll_complete_work);
23972 + req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
23973 +- req->file = fget(iocb->aio_fildes);
23974 +- if (unlikely(!req->file))
23975 +- return -EBADF;
23976 +
23977 + req->head = NULL;
23978 + req->woken = false;
23979 +@@ -1788,10 +1777,8 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
23980 + spin_unlock_irq(&ctx->ctx_lock);
23981 +
23982 + out:
23983 +- if (unlikely(apt.error)) {
23984 +- fput(req->file);
23985 ++ if (unlikely(apt.error))
23986 + return apt.error;
23987 +- }
23988 +
23989 + if (mask)
23990 + aio_poll_complete(aiocb, mask);
23991 +@@ -1829,6 +1816,11 @@ static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
23992 + if (unlikely(!req))
23993 + goto out_put_reqs_available;
23994 +
23995 ++ req->ki_filp = fget(iocb->aio_fildes);
23996 ++ ret = -EBADF;
23997 ++ if (unlikely(!req->ki_filp))
23998 ++ goto out_put_req;
23999 ++
24000 + if (iocb->aio_flags & IOCB_FLAG_RESFD) {
24001 + /*
24002 + * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
24003 +diff --git a/fs/block_dev.c b/fs/block_dev.c
24004 +index 58a4c1217fa8..06ef48ad1998 100644
24005 +--- a/fs/block_dev.c
24006 ++++ b/fs/block_dev.c
24007 +@@ -298,10 +298,10 @@ static void blkdev_bio_end_io(struct bio *bio)
24008 + struct blkdev_dio *dio = bio->bi_private;
24009 + bool should_dirty = dio->should_dirty;
24010 +
24011 +- if (dio->multi_bio && !atomic_dec_and_test(&dio->ref)) {
24012 +- if (bio->bi_status && !dio->bio.bi_status)
24013 +- dio->bio.bi_status = bio->bi_status;
24014 +- } else {
24015 ++ if (bio->bi_status && !dio->bio.bi_status)
24016 ++ dio->bio.bi_status = bio->bi_status;
24017 ++
24018 ++ if (!dio->multi_bio || atomic_dec_and_test(&dio->ref)) {
24019 + if (!dio->is_sync) {
24020 + struct kiocb *iocb = dio->iocb;
24021 + ssize_t ret;
24022 +diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
24023 +index 3b66c957ea6f..5810463dc6d2 100644
24024 +--- a/fs/btrfs/acl.c
24025 ++++ b/fs/btrfs/acl.c
24026 +@@ -9,6 +9,7 @@
24027 + #include <linux/posix_acl_xattr.h>
24028 + #include <linux/posix_acl.h>
24029 + #include <linux/sched.h>
24030 ++#include <linux/sched/mm.h>
24031 + #include <linux/slab.h>
24032 +
24033 + #include "ctree.h"
24034 +@@ -72,8 +73,16 @@ static int __btrfs_set_acl(struct btrfs_trans_handle *trans,
24035 + }
24036 +
24037 + if (acl) {
24038 ++ unsigned int nofs_flag;
24039 ++
24040 + size = posix_acl_xattr_size(acl->a_count);
24041 ++ /*
24042 ++ * We're holding a transaction handle, so use a NOFS memory
24043 ++ * allocation context to avoid deadlock if reclaim happens.
24044 ++ */
24045 ++ nofs_flag = memalloc_nofs_save();
24046 + value = kmalloc(size, GFP_KERNEL);
24047 ++ memalloc_nofs_restore(nofs_flag);
24048 + if (!value) {
24049 + ret = -ENOMEM;
24050 + goto out;
24051 +diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
24052 +index 8750c835f535..c4dea3b7349e 100644
24053 +--- a/fs/btrfs/dev-replace.c
24054 ++++ b/fs/btrfs/dev-replace.c
24055 +@@ -862,6 +862,7 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
24056 + btrfs_destroy_dev_replace_tgtdev(tgt_device);
24057 + break;
24058 + default:
24059 ++ up_write(&dev_replace->rwsem);
24060 + result = -EINVAL;
24061 + }
24062 +
24063 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
24064 +index 6a2a2a951705..888d72dda794 100644
24065 +--- a/fs/btrfs/disk-io.c
24066 ++++ b/fs/btrfs/disk-io.c
24067 +@@ -17,6 +17,7 @@
24068 + #include <linux/semaphore.h>
24069 + #include <linux/error-injection.h>
24070 + #include <linux/crc32c.h>
24071 ++#include <linux/sched/mm.h>
24072 + #include <asm/unaligned.h>
24073 + #include "ctree.h"
24074 + #include "disk-io.h"
24075 +@@ -1258,10 +1259,17 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
24076 + struct btrfs_root *tree_root = fs_info->tree_root;
24077 + struct btrfs_root *root;
24078 + struct btrfs_key key;
24079 ++ unsigned int nofs_flag;
24080 + int ret = 0;
24081 + uuid_le uuid = NULL_UUID_LE;
24082 +
24083 ++ /*
24084 ++ * We're holding a transaction handle, so use a NOFS memory allocation
24085 ++ * context to avoid deadlock if reclaim happens.
24086 ++ */
24087 ++ nofs_flag = memalloc_nofs_save();
24088 + root = btrfs_alloc_root(fs_info, GFP_KERNEL);
24089 ++ memalloc_nofs_restore(nofs_flag);
24090 + if (!root)
24091 + return ERR_PTR(-ENOMEM);
24092 +
24093 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
24094 +index d81035b7ea7d..1b68700bc1c5 100644
24095 +--- a/fs/btrfs/extent-tree.c
24096 ++++ b/fs/btrfs/extent-tree.c
24097 +@@ -4808,6 +4808,7 @@ skip_async:
24098 + }
24099 +
24100 + struct reserve_ticket {
24101 ++ u64 orig_bytes;
24102 + u64 bytes;
24103 + int error;
24104 + struct list_head list;
24105 +@@ -5030,7 +5031,7 @@ static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info,
24106 + !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
24107 + }
24108 +
24109 +-static void wake_all_tickets(struct list_head *head)
24110 ++static bool wake_all_tickets(struct list_head *head)
24111 + {
24112 + struct reserve_ticket *ticket;
24113 +
24114 +@@ -5039,7 +5040,10 @@ static void wake_all_tickets(struct list_head *head)
24115 + list_del_init(&ticket->list);
24116 + ticket->error = -ENOSPC;
24117 + wake_up(&ticket->wait);
24118 ++ if (ticket->bytes != ticket->orig_bytes)
24119 ++ return true;
24120 + }
24121 ++ return false;
24122 + }
24123 +
24124 + /*
24125 +@@ -5094,8 +5098,12 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
24126 + if (flush_state > COMMIT_TRANS) {
24127 + commit_cycles++;
24128 + if (commit_cycles > 2) {
24129 +- wake_all_tickets(&space_info->tickets);
24130 +- space_info->flush = 0;
24131 ++ if (wake_all_tickets(&space_info->tickets)) {
24132 ++ flush_state = FLUSH_DELAYED_ITEMS_NR;
24133 ++ commit_cycles--;
24134 ++ } else {
24135 ++ space_info->flush = 0;
24136 ++ }
24137 + } else {
24138 + flush_state = FLUSH_DELAYED_ITEMS_NR;
24139 + }
24140 +@@ -5147,10 +5155,11 @@ static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
24141 +
24142 + static int wait_reserve_ticket(struct btrfs_fs_info *fs_info,
24143 + struct btrfs_space_info *space_info,
24144 +- struct reserve_ticket *ticket, u64 orig_bytes)
24145 ++ struct reserve_ticket *ticket)
24146 +
24147 + {
24148 + DEFINE_WAIT(wait);
24149 ++ u64 reclaim_bytes = 0;
24150 + int ret = 0;
24151 +
24152 + spin_lock(&space_info->lock);
24153 +@@ -5171,14 +5180,12 @@ static int wait_reserve_ticket(struct btrfs_fs_info *fs_info,
24154 + ret = ticket->error;
24155 + if (!list_empty(&ticket->list))
24156 + list_del_init(&ticket->list);
24157 +- if (ticket->bytes && ticket->bytes < orig_bytes) {
24158 +- u64 num_bytes = orig_bytes - ticket->bytes;
24159 +- update_bytes_may_use(space_info, -num_bytes);
24160 +- trace_btrfs_space_reservation(fs_info, "space_info",
24161 +- space_info->flags, num_bytes, 0);
24162 +- }
24163 ++ if (ticket->bytes && ticket->bytes < ticket->orig_bytes)
24164 ++ reclaim_bytes = ticket->orig_bytes - ticket->bytes;
24165 + spin_unlock(&space_info->lock);
24166 +
24167 ++ if (reclaim_bytes)
24168 ++ space_info_add_old_bytes(fs_info, space_info, reclaim_bytes);
24169 + return ret;
24170 + }
24171 +
24172 +@@ -5204,6 +5211,7 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
24173 + {
24174 + struct reserve_ticket ticket;
24175 + u64 used;
24176 ++ u64 reclaim_bytes = 0;
24177 + int ret = 0;
24178 +
24179 + ASSERT(orig_bytes);
24180 +@@ -5239,6 +5247,7 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
24181 + * the list and we will do our own flushing further down.
24182 + */
24183 + if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
24184 ++ ticket.orig_bytes = orig_bytes;
24185 + ticket.bytes = orig_bytes;
24186 + ticket.error = 0;
24187 + init_waitqueue_head(&ticket.wait);
24188 +@@ -5279,25 +5288,21 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
24189 + return ret;
24190 +
24191 + if (flush == BTRFS_RESERVE_FLUSH_ALL)
24192 +- return wait_reserve_ticket(fs_info, space_info, &ticket,
24193 +- orig_bytes);
24194 ++ return wait_reserve_ticket(fs_info, space_info, &ticket);
24195 +
24196 + ret = 0;
24197 + priority_reclaim_metadata_space(fs_info, space_info, &ticket);
24198 + spin_lock(&space_info->lock);
24199 + if (ticket.bytes) {
24200 +- if (ticket.bytes < orig_bytes) {
24201 +- u64 num_bytes = orig_bytes - ticket.bytes;
24202 +- update_bytes_may_use(space_info, -num_bytes);
24203 +- trace_btrfs_space_reservation(fs_info, "space_info",
24204 +- space_info->flags,
24205 +- num_bytes, 0);
24206 +-
24207 +- }
24208 ++ if (ticket.bytes < orig_bytes)
24209 ++ reclaim_bytes = orig_bytes - ticket.bytes;
24210 + list_del_init(&ticket.list);
24211 + ret = -ENOSPC;
24212 + }
24213 + spin_unlock(&space_info->lock);
24214 ++
24215 ++ if (reclaim_bytes)
24216 ++ space_info_add_old_bytes(fs_info, space_info, reclaim_bytes);
24217 + ASSERT(list_empty(&ticket.list));
24218 + return ret;
24219 + }
24220 +@@ -6115,7 +6120,7 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
24221 + *
24222 + * This is overestimating in most cases.
24223 + */
24224 +- qgroup_rsv_size = outstanding_extents * fs_info->nodesize;
24225 ++ qgroup_rsv_size = (u64)outstanding_extents * fs_info->nodesize;
24226 +
24227 + spin_lock(&block_rsv->lock);
24228 + block_rsv->size = reserve_size;
24229 +@@ -8690,6 +8695,8 @@ struct walk_control {
24230 + u64 refs[BTRFS_MAX_LEVEL];
24231 + u64 flags[BTRFS_MAX_LEVEL];
24232 + struct btrfs_key update_progress;
24233 ++ struct btrfs_key drop_progress;
24234 ++ int drop_level;
24235 + int stage;
24236 + int level;
24237 + int shared_level;
24238 +@@ -9028,6 +9035,16 @@ skip:
24239 + ret);
24240 + }
24241 + }
24242 ++
24243 ++ /*
24244 ++ * We need to update the next key in our walk control so we can
24245 ++ * update the drop_progress key accordingly. We don't care if
24246 ++ * find_next_key doesn't find a key because that means we're at
24247 ++ * the end and are going to clean up now.
24248 ++ */
24249 ++ wc->drop_level = level;
24250 ++ find_next_key(path, level, &wc->drop_progress);
24251 ++
24252 + ret = btrfs_free_extent(trans, root, bytenr, fs_info->nodesize,
24253 + parent, root->root_key.objectid,
24254 + level - 1, 0);
24255 +@@ -9378,12 +9395,14 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
24256 + }
24257 +
24258 + if (wc->stage == DROP_REFERENCE) {
24259 +- level = wc->level;
24260 +- btrfs_node_key(path->nodes[level],
24261 +- &root_item->drop_progress,
24262 +- path->slots[level]);
24263 +- root_item->drop_level = level;
24264 +- }
24265 ++ wc->drop_level = wc->level;
24266 ++ btrfs_node_key_to_cpu(path->nodes[wc->drop_level],
24267 ++ &wc->drop_progress,
24268 ++ path->slots[wc->drop_level]);
24269 ++ }
24270 ++ btrfs_cpu_key_to_disk(&root_item->drop_progress,
24271 ++ &wc->drop_progress);
24272 ++ root_item->drop_level = wc->drop_level;
24273 +
24274 + BUG_ON(wc->level == 0);
24275 + if (btrfs_should_end_transaction(trans) ||
24276 +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
24277 +index 52abe4082680..1bfb7207bbf0 100644
24278 +--- a/fs/btrfs/extent_io.c
24279 ++++ b/fs/btrfs/extent_io.c
24280 +@@ -2985,11 +2985,11 @@ static int __do_readpage(struct extent_io_tree *tree,
24281 + */
24282 + if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
24283 + prev_em_start && *prev_em_start != (u64)-1 &&
24284 +- *prev_em_start != em->orig_start)
24285 ++ *prev_em_start != em->start)
24286 + force_bio_submit = true;
24287 +
24288 + if (prev_em_start)
24289 +- *prev_em_start = em->orig_start;
24290 ++ *prev_em_start = em->start;
24291 +
24292 + free_extent_map(em);
24293 + em = NULL;
24294 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
24295 +index 9c8e1734429c..1d64a6b8e413 100644
24296 +--- a/fs/btrfs/ioctl.c
24297 ++++ b/fs/btrfs/ioctl.c
24298 +@@ -501,6 +501,16 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
24299 + if (!capable(CAP_SYS_ADMIN))
24300 + return -EPERM;
24301 +
24302 ++ /*
24303 ++ * If the fs is mounted with nologreplay, which requires it to be
24304 ++ * mounted in RO mode as well, we can not allow discard on free space
24305 ++ * inside block groups, because log trees refer to extents that are not
24306 ++ * pinned in a block group's free space cache (pinning the extents is
24307 ++ * precisely the first phase of replaying a log tree).
24308 ++ */
24309 ++ if (btrfs_test_opt(fs_info, NOLOGREPLAY))
24310 ++ return -EROFS;
24311 ++
24312 + rcu_read_lock();
24313 + list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
24314 + dev_list) {
24315 +@@ -3206,21 +3216,6 @@ out:
24316 + return ret;
24317 + }
24318 +
24319 +-static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2)
24320 +-{
24321 +- inode_unlock(inode1);
24322 +- inode_unlock(inode2);
24323 +-}
24324 +-
24325 +-static void btrfs_double_inode_lock(struct inode *inode1, struct inode *inode2)
24326 +-{
24327 +- if (inode1 < inode2)
24328 +- swap(inode1, inode2);
24329 +-
24330 +- inode_lock_nested(inode1, I_MUTEX_PARENT);
24331 +- inode_lock_nested(inode2, I_MUTEX_CHILD);
24332 +-}
24333 +-
24334 + static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
24335 + struct inode *inode2, u64 loff2, u64 len)
24336 + {
24337 +@@ -3989,7 +3984,7 @@ static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in,
24338 + if (same_inode)
24339 + inode_lock(inode_in);
24340 + else
24341 +- btrfs_double_inode_lock(inode_in, inode_out);
24342 ++ lock_two_nondirectories(inode_in, inode_out);
24343 +
24344 + /*
24345 + * Now that the inodes are locked, we need to start writeback ourselves
24346 +@@ -4039,7 +4034,7 @@ static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in,
24347 + if (same_inode)
24348 + inode_unlock(inode_in);
24349 + else
24350 +- btrfs_double_inode_unlock(inode_in, inode_out);
24351 ++ unlock_two_nondirectories(inode_in, inode_out);
24352 +
24353 + return ret;
24354 + }
24355 +@@ -4069,7 +4064,7 @@ loff_t btrfs_remap_file_range(struct file *src_file, loff_t off,
24356 + if (same_inode)
24357 + inode_unlock(src_inode);
24358 + else
24359 +- btrfs_double_inode_unlock(src_inode, dst_inode);
24360 ++ unlock_two_nondirectories(src_inode, dst_inode);
24361 +
24362 + return ret < 0 ? ret : len;
24363 + }
24364 +diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
24365 +index dc6140013ae8..61d22a56c0ba 100644
24366 +--- a/fs/btrfs/props.c
24367 ++++ b/fs/btrfs/props.c
24368 +@@ -366,11 +366,11 @@ int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans,
24369 +
24370 + static int prop_compression_validate(const char *value, size_t len)
24371 + {
24372 +- if (!strncmp("lzo", value, len))
24373 ++ if (!strncmp("lzo", value, 3))
24374 + return 0;
24375 +- else if (!strncmp("zlib", value, len))
24376 ++ else if (!strncmp("zlib", value, 4))
24377 + return 0;
24378 +- else if (!strncmp("zstd", value, len))
24379 ++ else if (!strncmp("zstd", value, 4))
24380 + return 0;
24381 +
24382 + return -EINVAL;
24383 +@@ -396,7 +396,7 @@ static int prop_compression_apply(struct inode *inode,
24384 + btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
24385 + } else if (!strncmp("zlib", value, 4)) {
24386 + type = BTRFS_COMPRESS_ZLIB;
24387 +- } else if (!strncmp("zstd", value, len)) {
24388 ++ } else if (!strncmp("zstd", value, 4)) {
24389 + type = BTRFS_COMPRESS_ZSTD;
24390 + btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
24391 + } else {
24392 +diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
24393 +index 4e473a998219..e28fb43e943b 100644
24394 +--- a/fs/btrfs/qgroup.c
24395 ++++ b/fs/btrfs/qgroup.c
24396 +@@ -1917,8 +1917,8 @@ static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
24397 + int i;
24398 +
24399 + /* Level sanity check */
24400 +- if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL ||
24401 +- root_level < 0 || root_level >= BTRFS_MAX_LEVEL ||
24402 ++ if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 ||
24403 ++ root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 ||
24404 + root_level < cur_level) {
24405 + btrfs_err_rl(fs_info,
24406 + "%s: bad levels, cur_level=%d root_level=%d",
24407 +@@ -2842,16 +2842,15 @@ out:
24408 + /*
24409 + * Two limits to commit transaction in advance.
24410 + *
24411 +- * For RATIO, it will be 1/RATIO of the remaining limit
24412 +- * (excluding data and prealloc meta) as threshold.
24413 ++ * For RATIO, it will be 1/RATIO of the remaining limit as threshold.
24414 + * For SIZE, it will be in byte unit as threshold.
24415 + */
24416 +-#define QGROUP_PERTRANS_RATIO 32
24417 +-#define QGROUP_PERTRANS_SIZE SZ_32M
24418 ++#define QGROUP_FREE_RATIO 32
24419 ++#define QGROUP_FREE_SIZE SZ_32M
24420 + static bool qgroup_check_limits(struct btrfs_fs_info *fs_info,
24421 + const struct btrfs_qgroup *qg, u64 num_bytes)
24422 + {
24423 +- u64 limit;
24424 ++ u64 free;
24425 + u64 threshold;
24426 +
24427 + if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
24428 +@@ -2870,20 +2869,21 @@ static bool qgroup_check_limits(struct btrfs_fs_info *fs_info,
24429 + */
24430 + if ((qg->lim_flags & (BTRFS_QGROUP_LIMIT_MAX_RFER |
24431 + BTRFS_QGROUP_LIMIT_MAX_EXCL))) {
24432 +- if (qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL)
24433 +- limit = qg->max_excl;
24434 +- else
24435 +- limit = qg->max_rfer;
24436 +- threshold = (limit - qg->rsv.values[BTRFS_QGROUP_RSV_DATA] -
24437 +- qg->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC]) /
24438 +- QGROUP_PERTRANS_RATIO;
24439 +- threshold = min_t(u64, threshold, QGROUP_PERTRANS_SIZE);
24440 ++ if (qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
24441 ++ free = qg->max_excl - qgroup_rsv_total(qg) - qg->excl;
24442 ++ threshold = min_t(u64, qg->max_excl / QGROUP_FREE_RATIO,
24443 ++ QGROUP_FREE_SIZE);
24444 ++ } else {
24445 ++ free = qg->max_rfer - qgroup_rsv_total(qg) - qg->rfer;
24446 ++ threshold = min_t(u64, qg->max_rfer / QGROUP_FREE_RATIO,
24447 ++ QGROUP_FREE_SIZE);
24448 ++ }
24449 +
24450 + /*
24451 + * Use transaction_kthread to commit transaction, so we no
24452 + * longer need to bother nested transaction nor lock context.
24453 + */
24454 +- if (qg->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS] > threshold)
24455 ++ if (free < threshold)
24456 + btrfs_commit_transaction_locksafe(fs_info);
24457 + }
24458 +
24459 +diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
24460 +index e74455eb42f9..6976e2280771 100644
24461 +--- a/fs/btrfs/raid56.c
24462 ++++ b/fs/btrfs/raid56.c
24463 +@@ -2429,8 +2429,9 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
24464 + bitmap_clear(rbio->dbitmap, pagenr, 1);
24465 + kunmap(p);
24466 +
24467 +- for (stripe = 0; stripe < rbio->real_stripes; stripe++)
24468 ++ for (stripe = 0; stripe < nr_data; stripe++)
24469 + kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
24470 ++ kunmap(p_page);
24471 + }
24472 +
24473 + __free_page(p_page);
24474 +diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
24475 +index 6dcd36d7b849..1aeac70d0531 100644
24476 +--- a/fs/btrfs/scrub.c
24477 ++++ b/fs/btrfs/scrub.c
24478 +@@ -584,6 +584,7 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
24479 + sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
24480 + sctx->curr = -1;
24481 + sctx->fs_info = fs_info;
24482 ++ INIT_LIST_HEAD(&sctx->csum_list);
24483 + for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
24484 + struct scrub_bio *sbio;
24485 +
24486 +@@ -608,7 +609,6 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
24487 + atomic_set(&sctx->workers_pending, 0);
24488 + atomic_set(&sctx->cancel_req, 0);
24489 + sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
24490 +- INIT_LIST_HEAD(&sctx->csum_list);
24491 +
24492 + spin_lock_init(&sctx->list_lock);
24493 + spin_lock_init(&sctx->stat_lock);
24494 +@@ -3770,16 +3770,6 @@ fail_scrub_workers:
24495 + return -ENOMEM;
24496 + }
24497 +
24498 +-static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
24499 +-{
24500 +- if (--fs_info->scrub_workers_refcnt == 0) {
24501 +- btrfs_destroy_workqueue(fs_info->scrub_workers);
24502 +- btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
24503 +- btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
24504 +- }
24505 +- WARN_ON(fs_info->scrub_workers_refcnt < 0);
24506 +-}
24507 +-
24508 + int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
24509 + u64 end, struct btrfs_scrub_progress *progress,
24510 + int readonly, int is_dev_replace)
24511 +@@ -3788,6 +3778,9 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
24512 + int ret;
24513 + struct btrfs_device *dev;
24514 + unsigned int nofs_flag;
24515 ++ struct btrfs_workqueue *scrub_workers = NULL;
24516 ++ struct btrfs_workqueue *scrub_wr_comp = NULL;
24517 ++ struct btrfs_workqueue *scrub_parity = NULL;
24518 +
24519 + if (btrfs_fs_closing(fs_info))
24520 + return -EINVAL;
24521 +@@ -3927,9 +3920,16 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
24522 +
24523 + mutex_lock(&fs_info->scrub_lock);
24524 + dev->scrub_ctx = NULL;
24525 +- scrub_workers_put(fs_info);
24526 ++ if (--fs_info->scrub_workers_refcnt == 0) {
24527 ++ scrub_workers = fs_info->scrub_workers;
24528 ++ scrub_wr_comp = fs_info->scrub_wr_completion_workers;
24529 ++ scrub_parity = fs_info->scrub_parity_workers;
24530 ++ }
24531 + mutex_unlock(&fs_info->scrub_lock);
24532 +
24533 ++ btrfs_destroy_workqueue(scrub_workers);
24534 ++ btrfs_destroy_workqueue(scrub_wr_comp);
24535 ++ btrfs_destroy_workqueue(scrub_parity);
24536 + scrub_put_ctx(sctx);
24537 +
24538 + return ret;
24539 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
24540 +index ac232b3d6d7e..7f3b74a55073 100644
24541 +--- a/fs/btrfs/tree-log.c
24542 ++++ b/fs/btrfs/tree-log.c
24543 +@@ -3517,9 +3517,16 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
24544 + }
24545 + btrfs_release_path(path);
24546 +
24547 +- /* find the first key from this transaction again */
24548 ++ /*
24549 ++ * Find the first key from this transaction again. See the note for
24550 ++ * log_new_dir_dentries, if we're logging a directory recursively we
24551 ++ * won't be holding its i_mutex, which means we can modify the directory
24552 ++ * while we're logging it. If we remove an entry between our first
24553 ++ * search and this search we'll not find the key again and can just
24554 ++ * bail.
24555 ++ */
24556 + ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
24557 +- if (WARN_ON(ret != 0))
24558 ++ if (ret != 0)
24559 + goto done;
24560 +
24561 + /*
24562 +@@ -4481,6 +4488,19 @@ static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode,
24563 + item = btrfs_item_ptr(path->nodes[0], path->slots[0],
24564 + struct btrfs_inode_item);
24565 + *size_ret = btrfs_inode_size(path->nodes[0], item);
24566 ++ /*
24567 ++ * If the in-memory inode's i_size is smaller then the inode
24568 ++ * size stored in the btree, return the inode's i_size, so
24569 ++ * that we get a correct inode size after replaying the log
24570 ++ * when before a power failure we had a shrinking truncate
24571 ++ * followed by addition of a new name (rename / new hard link).
24572 ++ * Otherwise return the inode size from the btree, to avoid
24573 ++ * data loss when replaying a log due to previously doing a
24574 ++ * write that expands the inode's size and logging a new name
24575 ++ * immediately after.
24576 ++ */
24577 ++ if (*size_ret > inode->vfs_inode.i_size)
24578 ++ *size_ret = inode->vfs_inode.i_size;
24579 + }
24580 +
24581 + btrfs_release_path(path);
24582 +@@ -4642,15 +4662,8 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
24583 + struct btrfs_file_extent_item);
24584 +
24585 + if (btrfs_file_extent_type(leaf, extent) ==
24586 +- BTRFS_FILE_EXTENT_INLINE) {
24587 +- len = btrfs_file_extent_ram_bytes(leaf, extent);
24588 +- ASSERT(len == i_size ||
24589 +- (len == fs_info->sectorsize &&
24590 +- btrfs_file_extent_compression(leaf, extent) !=
24591 +- BTRFS_COMPRESS_NONE) ||
24592 +- (len < i_size && i_size < fs_info->sectorsize));
24593 ++ BTRFS_FILE_EXTENT_INLINE)
24594 + return 0;
24595 +- }
24596 +
24597 + len = btrfs_file_extent_num_bytes(leaf, extent);
24598 + /* Last extent goes beyond i_size, no need to log a hole. */
24599 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
24600 +index 15561926ab32..88a323a453d8 100644
24601 +--- a/fs/btrfs/volumes.c
24602 ++++ b/fs/btrfs/volumes.c
24603 +@@ -6413,7 +6413,7 @@ static void btrfs_end_bio(struct bio *bio)
24604 + if (bio_op(bio) == REQ_OP_WRITE)
24605 + btrfs_dev_stat_inc_and_print(dev,
24606 + BTRFS_DEV_STAT_WRITE_ERRS);
24607 +- else
24608 ++ else if (!(bio->bi_opf & REQ_RAHEAD))
24609 + btrfs_dev_stat_inc_and_print(dev,
24610 + BTRFS_DEV_STAT_READ_ERRS);
24611 + if (bio->bi_opf & REQ_PREFLUSH)
24612 +@@ -6782,10 +6782,10 @@ static int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
24613 + }
24614 +
24615 + if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
24616 +- (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) ||
24617 ++ (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes != 2) ||
24618 + (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
24619 + (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
24620 +- (type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) ||
24621 ++ (type & BTRFS_BLOCK_GROUP_DUP && num_stripes != 2) ||
24622 + ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
24623 + num_stripes != 1)) {
24624 + btrfs_err(fs_info,
24625 +diff --git a/fs/buffer.c b/fs/buffer.c
24626 +index 48318fb74938..cab7a026876b 100644
24627 +--- a/fs/buffer.c
24628 ++++ b/fs/buffer.c
24629 +@@ -3027,6 +3027,13 @@ void guard_bio_eod(int op, struct bio *bio)
24630 + /* Uhhuh. We've got a bio that straddles the device size! */
24631 + truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9);
24632 +
24633 ++ /*
24634 ++ * The bio contains more than one segment which spans EOD, just return
24635 ++ * and let IO layer turn it into an EIO
24636 ++ */
24637 ++ if (truncated_bytes > bvec->bv_len)
24638 ++ return;
24639 ++
24640 + /* Truncate the bio.. */
24641 + bio->bi_iter.bi_size -= truncated_bytes;
24642 + bvec->bv_len -= truncated_bytes;
24643 +diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
24644 +index d9b99abe1243..5d83c924cc47 100644
24645 +--- a/fs/cifs/cifs_dfs_ref.c
24646 ++++ b/fs/cifs/cifs_dfs_ref.c
24647 +@@ -285,9 +285,9 @@ static void dump_referral(const struct dfs_info3_param *ref)
24648 + {
24649 + cifs_dbg(FYI, "DFS: ref path: %s\n", ref->path_name);
24650 + cifs_dbg(FYI, "DFS: node path: %s\n", ref->node_name);
24651 +- cifs_dbg(FYI, "DFS: fl: %hd, srv_type: %hd\n",
24652 ++ cifs_dbg(FYI, "DFS: fl: %d, srv_type: %d\n",
24653 + ref->flags, ref->server_type);
24654 +- cifs_dbg(FYI, "DFS: ref_flags: %hd, path_consumed: %hd\n",
24655 ++ cifs_dbg(FYI, "DFS: ref_flags: %d, path_consumed: %d\n",
24656 + ref->ref_flag, ref->path_consumed);
24657 + }
24658 +
24659 +diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
24660 +index 42f0d67f1054..ed49222abecb 100644
24661 +--- a/fs/cifs/cifs_fs_sb.h
24662 ++++ b/fs/cifs/cifs_fs_sb.h
24663 +@@ -58,6 +58,7 @@ struct cifs_sb_info {
24664 + spinlock_t tlink_tree_lock;
24665 + struct tcon_link *master_tlink;
24666 + struct nls_table *local_nls;
24667 ++ unsigned int bsize;
24668 + unsigned int rsize;
24669 + unsigned int wsize;
24670 + unsigned long actimeo; /* attribute cache timeout (jiffies) */
24671 +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
24672 +index 62d48d486d8f..07cad54b84f1 100644
24673 +--- a/fs/cifs/cifsfs.c
24674 ++++ b/fs/cifs/cifsfs.c
24675 +@@ -554,10 +554,13 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
24676 +
24677 + seq_printf(s, ",rsize=%u", cifs_sb->rsize);
24678 + seq_printf(s, ",wsize=%u", cifs_sb->wsize);
24679 ++ seq_printf(s, ",bsize=%u", cifs_sb->bsize);
24680 + seq_printf(s, ",echo_interval=%lu",
24681 + tcon->ses->server->echo_interval / HZ);
24682 + if (tcon->snapshot_time)
24683 + seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
24684 ++ if (tcon->handle_timeout)
24685 ++ seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
24686 + /* convert actimeo and display it in seconds */
24687 + seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
24688 +
24689 +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
24690 +index 94dbdbe5be34..6c934ab3722b 100644
24691 +--- a/fs/cifs/cifsglob.h
24692 ++++ b/fs/cifs/cifsglob.h
24693 +@@ -59,6 +59,12 @@
24694 + */
24695 + #define CIFS_MAX_ACTIMEO (1 << 30)
24696 +
24697 ++/*
24698 ++ * Max persistent and resilient handle timeout (milliseconds).
24699 ++ * Windows durable max was 960000 (16 minutes)
24700 ++ */
24701 ++#define SMB3_MAX_HANDLE_TIMEOUT 960000
24702 ++
24703 + /*
24704 + * MAX_REQ is the maximum number of requests that WE will send
24705 + * on one socket concurrently.
24706 +@@ -236,6 +242,8 @@ struct smb_version_operations {
24707 + int * (*get_credits_field)(struct TCP_Server_Info *, const int);
24708 + unsigned int (*get_credits)(struct mid_q_entry *);
24709 + __u64 (*get_next_mid)(struct TCP_Server_Info *);
24710 ++ void (*revert_current_mid)(struct TCP_Server_Info *server,
24711 ++ const unsigned int val);
24712 + /* data offset from read response message */
24713 + unsigned int (*read_data_offset)(char *);
24714 + /*
24715 +@@ -557,6 +565,7 @@ struct smb_vol {
24716 + bool resilient:1; /* noresilient not required since not fored for CA */
24717 + bool domainauto:1;
24718 + bool rdma:1;
24719 ++ unsigned int bsize;
24720 + unsigned int rsize;
24721 + unsigned int wsize;
24722 + bool sockopt_tcp_nodelay:1;
24723 +@@ -569,6 +578,7 @@ struct smb_vol {
24724 + struct nls_table *local_nls;
24725 + unsigned int echo_interval; /* echo interval in secs */
24726 + __u64 snapshot_time; /* needed for timewarp tokens */
24727 ++ __u32 handle_timeout; /* persistent and durable handle timeout in ms */
24728 + unsigned int max_credits; /* smb3 max_credits 10 < credits < 60000 */
24729 + };
24730 +
24731 +@@ -770,6 +780,22 @@ get_next_mid(struct TCP_Server_Info *server)
24732 + return cpu_to_le16(mid);
24733 + }
24734 +
24735 ++static inline void
24736 ++revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
24737 ++{
24738 ++ if (server->ops->revert_current_mid)
24739 ++ server->ops->revert_current_mid(server, val);
24740 ++}
24741 ++
24742 ++static inline void
24743 ++revert_current_mid_from_hdr(struct TCP_Server_Info *server,
24744 ++ const struct smb2_sync_hdr *shdr)
24745 ++{
24746 ++ unsigned int num = le16_to_cpu(shdr->CreditCharge);
24747 ++
24748 ++ return revert_current_mid(server, num > 0 ? num : 1);
24749 ++}
24750 ++
24751 + static inline __u16
24752 + get_mid(const struct smb_hdr *smb)
24753 + {
24754 +@@ -1009,6 +1035,7 @@ struct cifs_tcon {
24755 + __u32 vol_serial_number;
24756 + __le64 vol_create_time;
24757 + __u64 snapshot_time; /* for timewarp tokens - timestamp of snapshot */
24758 ++ __u32 handle_timeout; /* persistent and durable handle timeout in ms */
24759 + __u32 ss_flags; /* sector size flags */
24760 + __u32 perf_sector_size; /* best sector size for perf */
24761 + __u32 max_chunks;
24762 +@@ -1422,6 +1449,7 @@ struct mid_q_entry {
24763 + struct kref refcount;
24764 + struct TCP_Server_Info *server; /* server corresponding to this mid */
24765 + __u64 mid; /* multiplex id */
24766 ++ __u16 credits; /* number of credits consumed by this mid */
24767 + __u32 pid; /* process id */
24768 + __u32 sequence_number; /* for CIFS signing */
24769 + unsigned long when_alloc; /* when mid was created */
24770 +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
24771 +index bb54ccf8481c..551924beb86f 100644
24772 +--- a/fs/cifs/cifssmb.c
24773 ++++ b/fs/cifs/cifssmb.c
24774 +@@ -2125,12 +2125,13 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
24775 +
24776 + wdata2->cfile = find_writable_file(CIFS_I(inode), false);
24777 + if (!wdata2->cfile) {
24778 +- cifs_dbg(VFS, "No writable handles for inode\n");
24779 ++ cifs_dbg(VFS, "No writable handle to retry writepages\n");
24780 + rc = -EBADF;
24781 +- break;
24782 ++ } else {
24783 ++ wdata2->pid = wdata2->cfile->pid;
24784 ++ rc = server->ops->async_writev(wdata2,
24785 ++ cifs_writedata_release);
24786 + }
24787 +- wdata2->pid = wdata2->cfile->pid;
24788 +- rc = server->ops->async_writev(wdata2, cifs_writedata_release);
24789 +
24790 + for (j = 0; j < nr_pages; j++) {
24791 + unlock_page(wdata2->pages[j]);
24792 +@@ -2145,6 +2146,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
24793 + kref_put(&wdata2->refcount, cifs_writedata_release);
24794 + if (is_retryable_error(rc))
24795 + continue;
24796 ++ i += nr_pages;
24797 + break;
24798 + }
24799 +
24800 +@@ -2152,6 +2154,13 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
24801 + i += nr_pages;
24802 + } while (i < wdata->nr_pages);
24803 +
24804 ++ /* cleanup remaining pages from the original wdata */
24805 ++ for (; i < wdata->nr_pages; i++) {
24806 ++ SetPageError(wdata->pages[i]);
24807 ++ end_page_writeback(wdata->pages[i]);
24808 ++ put_page(wdata->pages[i]);
24809 ++ }
24810 ++
24811 + if (rc != 0 && !is_retryable_error(rc))
24812 + mapping_set_error(inode->i_mapping, rc);
24813 + kref_put(&wdata->refcount, cifs_writedata_release);
24814 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
24815 +index 8463c940e0e5..44e6ec85f832 100644
24816 +--- a/fs/cifs/connect.c
24817 ++++ b/fs/cifs/connect.c
24818 +@@ -102,8 +102,8 @@ enum {
24819 + Opt_backupuid, Opt_backupgid, Opt_uid,
24820 + Opt_cruid, Opt_gid, Opt_file_mode,
24821 + Opt_dirmode, Opt_port,
24822 +- Opt_rsize, Opt_wsize, Opt_actimeo,
24823 +- Opt_echo_interval, Opt_max_credits,
24824 ++ Opt_blocksize, Opt_rsize, Opt_wsize, Opt_actimeo,
24825 ++ Opt_echo_interval, Opt_max_credits, Opt_handletimeout,
24826 + Opt_snapshot,
24827 +
24828 + /* Mount options which take string value */
24829 +@@ -204,9 +204,11 @@ static const match_table_t cifs_mount_option_tokens = {
24830 + { Opt_dirmode, "dirmode=%s" },
24831 + { Opt_dirmode, "dir_mode=%s" },
24832 + { Opt_port, "port=%s" },
24833 ++ { Opt_blocksize, "bsize=%s" },
24834 + { Opt_rsize, "rsize=%s" },
24835 + { Opt_wsize, "wsize=%s" },
24836 + { Opt_actimeo, "actimeo=%s" },
24837 ++ { Opt_handletimeout, "handletimeout=%s" },
24838 + { Opt_echo_interval, "echo_interval=%s" },
24839 + { Opt_max_credits, "max_credits=%s" },
24840 + { Opt_snapshot, "snapshot=%s" },
24841 +@@ -1486,6 +1488,11 @@ cifs_parse_devname(const char *devname, struct smb_vol *vol)
24842 + const char *delims = "/\\";
24843 + size_t len;
24844 +
24845 ++ if (unlikely(!devname || !*devname)) {
24846 ++ cifs_dbg(VFS, "Device name not specified.\n");
24847 ++ return -EINVAL;
24848 ++ }
24849 ++
24850 + /* make sure we have a valid UNC double delimiter prefix */
24851 + len = strspn(devname, delims);
24852 + if (len != 2)
24853 +@@ -1571,7 +1578,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
24854 + vol->cred_uid = current_uid();
24855 + vol->linux_uid = current_uid();
24856 + vol->linux_gid = current_gid();
24857 +-
24858 ++ vol->bsize = 1024 * 1024; /* can improve cp performance significantly */
24859 + /*
24860 + * default to SFM style remapping of seven reserved characters
24861 + * unless user overrides it or we negotiate CIFS POSIX where
24862 +@@ -1594,6 +1601,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
24863 +
24864 + vol->actimeo = CIFS_DEF_ACTIMEO;
24865 +
24866 ++ /* Most clients set timeout to 0, allows server to use its default */
24867 ++ vol->handle_timeout = 0; /* See MS-SMB2 spec section 2.2.14.2.12 */
24868 ++
24869 + /* offer SMB2.1 and later (SMB3 etc). Secure and widely accepted */
24870 + vol->ops = &smb30_operations;
24871 + vol->vals = &smbdefault_values;
24872 +@@ -1944,6 +1954,26 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
24873 + }
24874 + port = (unsigned short)option;
24875 + break;
24876 ++ case Opt_blocksize:
24877 ++ if (get_option_ul(args, &option)) {
24878 ++ cifs_dbg(VFS, "%s: Invalid blocksize value\n",
24879 ++ __func__);
24880 ++ goto cifs_parse_mount_err;
24881 ++ }
24882 ++ /*
24883 ++ * inode blocksize realistically should never need to be
24884 ++ * less than 16K or greater than 16M and default is 1MB.
24885 ++ * Note that small inode block sizes (e.g. 64K) can lead
24886 ++ * to very poor performance of common tools like cp and scp
24887 ++ */
24888 ++ if ((option < CIFS_MAX_MSGSIZE) ||
24889 ++ (option > (4 * SMB3_DEFAULT_IOSIZE))) {
24890 ++ cifs_dbg(VFS, "%s: Invalid blocksize\n",
24891 ++ __func__);
24892 ++ goto cifs_parse_mount_err;
24893 ++ }
24894 ++ vol->bsize = option;
24895 ++ break;
24896 + case Opt_rsize:
24897 + if (get_option_ul(args, &option)) {
24898 + cifs_dbg(VFS, "%s: Invalid rsize value\n",
24899 +@@ -1972,6 +2002,18 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
24900 + goto cifs_parse_mount_err;
24901 + }
24902 + break;
24903 ++ case Opt_handletimeout:
24904 ++ if (get_option_ul(args, &option)) {
24905 ++ cifs_dbg(VFS, "%s: Invalid handletimeout value\n",
24906 ++ __func__);
24907 ++ goto cifs_parse_mount_err;
24908 ++ }
24909 ++ vol->handle_timeout = option;
24910 ++ if (vol->handle_timeout > SMB3_MAX_HANDLE_TIMEOUT) {
24911 ++ cifs_dbg(VFS, "Invalid handle cache timeout, longer than 16 minutes\n");
24912 ++ goto cifs_parse_mount_err;
24913 ++ }
24914 ++ break;
24915 + case Opt_echo_interval:
24916 + if (get_option_ul(args, &option)) {
24917 + cifs_dbg(VFS, "%s: Invalid echo interval value\n",
24918 +@@ -3138,6 +3180,8 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb_vol *volume_info)
24919 + return 0;
24920 + if (tcon->snapshot_time != volume_info->snapshot_time)
24921 + return 0;
24922 ++ if (tcon->handle_timeout != volume_info->handle_timeout)
24923 ++ return 0;
24924 + return 1;
24925 + }
24926 +
24927 +@@ -3252,6 +3296,16 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
24928 + tcon->snapshot_time = volume_info->snapshot_time;
24929 + }
24930 +
24931 ++ if (volume_info->handle_timeout) {
24932 ++ if (ses->server->vals->protocol_id == 0) {
24933 ++ cifs_dbg(VFS,
24934 ++ "Use SMB2.1 or later for handle timeout option\n");
24935 ++ rc = -EOPNOTSUPP;
24936 ++ goto out_fail;
24937 ++ } else
24938 ++ tcon->handle_timeout = volume_info->handle_timeout;
24939 ++ }
24940 ++
24941 + tcon->ses = ses;
24942 + if (volume_info->password) {
24943 + tcon->password = kstrdup(volume_info->password, GFP_KERNEL);
24944 +@@ -3839,6 +3893,7 @@ int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
24945 + spin_lock_init(&cifs_sb->tlink_tree_lock);
24946 + cifs_sb->tlink_tree = RB_ROOT;
24947 +
24948 ++ cifs_sb->bsize = pvolume_info->bsize;
24949 + /*
24950 + * Temporarily set r/wsize for matching superblock. If we end up using
24951 + * new sb then client will later negotiate it downward if needed.
24952 +diff --git a/fs/cifs/file.c b/fs/cifs/file.c
24953 +index 659ce1b92c44..8d107587208f 100644
24954 +--- a/fs/cifs/file.c
24955 ++++ b/fs/cifs/file.c
24956 +@@ -1645,8 +1645,20 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
24957 + rc = server->ops->mand_unlock_range(cfile, flock, xid);
24958 +
24959 + out:
24960 +- if (flock->fl_flags & FL_POSIX && !rc)
24961 ++ if (flock->fl_flags & FL_POSIX) {
24962 ++ /*
24963 ++ * If this is a request to remove all locks because we
24964 ++ * are closing the file, it doesn't matter if the
24965 ++ * unlocking failed as both cifs.ko and the SMB server
24966 ++ * remove the lock on file close
24967 ++ */
24968 ++ if (rc) {
24969 ++ cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
24970 ++ if (!(flock->fl_flags & FL_CLOSE))
24971 ++ return rc;
24972 ++ }
24973 + rc = locks_lock_file_wait(file, flock);
24974 ++ }
24975 + return rc;
24976 + }
24977 +
24978 +@@ -3028,14 +3040,16 @@ cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
24979 + * these pages but not on the region from pos to ppos+len-1.
24980 + */
24981 + written = cifs_user_writev(iocb, from);
24982 +- if (written > 0 && CIFS_CACHE_READ(cinode)) {
24983 ++ if (CIFS_CACHE_READ(cinode)) {
24984 + /*
24985 +- * Windows 7 server can delay breaking level2 oplock if a write
24986 +- * request comes - break it on the client to prevent reading
24987 +- * an old data.
24988 ++ * We have read level caching and we have just sent a write
24989 ++ * request to the server thus making data in the cache stale.
24990 ++ * Zap the cache and set oplock/lease level to NONE to avoid
24991 ++ * reading stale data from the cache. All subsequent read
24992 ++ * operations will read new data from the server.
24993 + */
24994 + cifs_zap_mapping(inode);
24995 +- cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n",
24996 ++ cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
24997 + inode);
24998 + cinode->oplock = 0;
24999 + }
25000 +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
25001 +index 478003644916..53fdb5df0d2e 100644
25002 +--- a/fs/cifs/inode.c
25003 ++++ b/fs/cifs/inode.c
25004 +@@ -2080,7 +2080,7 @@ int cifs_getattr(const struct path *path, struct kstat *stat,
25005 + return rc;
25006 +
25007 + generic_fillattr(inode, stat);
25008 +- stat->blksize = CIFS_MAX_MSGSIZE;
25009 ++ stat->blksize = cifs_sb->bsize;
25010 + stat->ino = CIFS_I(inode)->uniqueid;
25011 +
25012 + /* old CIFS Unix Extensions doesn't return create time */
25013 +diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
25014 +index 32a6c020478f..20a88776f04d 100644
25015 +--- a/fs/cifs/smb1ops.c
25016 ++++ b/fs/cifs/smb1ops.c
25017 +@@ -308,7 +308,7 @@ coalesce_t2(char *second_buf, struct smb_hdr *target_hdr)
25018 + remaining = tgt_total_cnt - total_in_tgt;
25019 +
25020 + if (remaining < 0) {
25021 +- cifs_dbg(FYI, "Server sent too much data. tgt_total_cnt=%hu total_in_tgt=%hu\n",
25022 ++ cifs_dbg(FYI, "Server sent too much data. tgt_total_cnt=%hu total_in_tgt=%u\n",
25023 + tgt_total_cnt, total_in_tgt);
25024 + return -EPROTO;
25025 + }
25026 +diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
25027 +index b204e84b87fb..b0e76d27d752 100644
25028 +--- a/fs/cifs/smb2file.c
25029 ++++ b/fs/cifs/smb2file.c
25030 +@@ -68,7 +68,9 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
25031 +
25032 +
25033 + if (oparms->tcon->use_resilient) {
25034 +- nr_ioctl_req.Timeout = 0; /* use server default (120 seconds) */
25035 ++ /* default timeout is 0, servers pick default (120 seconds) */
25036 ++ nr_ioctl_req.Timeout =
25037 ++ cpu_to_le32(oparms->tcon->handle_timeout);
25038 + nr_ioctl_req.Reserved = 0;
25039 + rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid,
25040 + fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY,
25041 +diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
25042 +index 7b8b58fb4d3f..58700d2ba8cd 100644
25043 +--- a/fs/cifs/smb2misc.c
25044 ++++ b/fs/cifs/smb2misc.c
25045 +@@ -517,7 +517,6 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
25046 + __u8 lease_state;
25047 + struct list_head *tmp;
25048 + struct cifsFileInfo *cfile;
25049 +- struct TCP_Server_Info *server = tcon->ses->server;
25050 + struct cifs_pending_open *open;
25051 + struct cifsInodeInfo *cinode;
25052 + int ack_req = le32_to_cpu(rsp->Flags &
25053 +@@ -537,13 +536,25 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
25054 + cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
25055 + le32_to_cpu(rsp->NewLeaseState));
25056 +
25057 +- server->ops->set_oplock_level(cinode, lease_state, 0, NULL);
25058 +-
25059 + if (ack_req)
25060 + cfile->oplock_break_cancelled = false;
25061 + else
25062 + cfile->oplock_break_cancelled = true;
25063 +
25064 ++ set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
25065 ++
25066 ++ /*
25067 ++ * Set or clear flags depending on the lease state being READ.
25068 ++ * HANDLE caching flag should be added when the client starts
25069 ++ * to defer closing remote file handles with HANDLE leases.
25070 ++ */
25071 ++ if (lease_state & SMB2_LEASE_READ_CACHING_HE)
25072 ++ set_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
25073 ++ &cinode->flags);
25074 ++ else
25075 ++ clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
25076 ++ &cinode->flags);
25077 ++
25078 + queue_work(cifsoplockd_wq, &cfile->oplock_break);
25079 + kfree(lw);
25080 + return true;
25081 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
25082 +index 6f96e2292856..b29f711ab965 100644
25083 +--- a/fs/cifs/smb2ops.c
25084 ++++ b/fs/cifs/smb2ops.c
25085 +@@ -219,6 +219,15 @@ smb2_get_next_mid(struct TCP_Server_Info *server)
25086 + return mid;
25087 + }
25088 +
25089 ++static void
25090 ++smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
25091 ++{
25092 ++ spin_lock(&GlobalMid_Lock);
25093 ++ if (server->CurrentMid >= val)
25094 ++ server->CurrentMid -= val;
25095 ++ spin_unlock(&GlobalMid_Lock);
25096 ++}
25097 ++
25098 + static struct mid_q_entry *
25099 + smb2_find_mid(struct TCP_Server_Info *server, char *buf)
25100 + {
25101 +@@ -2594,6 +2603,15 @@ smb2_downgrade_oplock(struct TCP_Server_Info *server,
25102 + server->ops->set_oplock_level(cinode, 0, 0, NULL);
25103 + }
25104 +
25105 ++static void
25106 ++smb21_downgrade_oplock(struct TCP_Server_Info *server,
25107 ++ struct cifsInodeInfo *cinode, bool set_level2)
25108 ++{
25109 ++ server->ops->set_oplock_level(cinode,
25110 ++ set_level2 ? SMB2_LEASE_READ_CACHING_HE :
25111 ++ 0, 0, NULL);
25112 ++}
25113 ++
25114 + static void
25115 + smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
25116 + unsigned int epoch, bool *purge_cache)
25117 +@@ -3541,6 +3559,7 @@ struct smb_version_operations smb20_operations = {
25118 + .get_credits = smb2_get_credits,
25119 + .wait_mtu_credits = cifs_wait_mtu_credits,
25120 + .get_next_mid = smb2_get_next_mid,
25121 ++ .revert_current_mid = smb2_revert_current_mid,
25122 + .read_data_offset = smb2_read_data_offset,
25123 + .read_data_length = smb2_read_data_length,
25124 + .map_error = map_smb2_to_linux_error,
25125 +@@ -3636,6 +3655,7 @@ struct smb_version_operations smb21_operations = {
25126 + .get_credits = smb2_get_credits,
25127 + .wait_mtu_credits = smb2_wait_mtu_credits,
25128 + .get_next_mid = smb2_get_next_mid,
25129 ++ .revert_current_mid = smb2_revert_current_mid,
25130 + .read_data_offset = smb2_read_data_offset,
25131 + .read_data_length = smb2_read_data_length,
25132 + .map_error = map_smb2_to_linux_error,
25133 +@@ -3646,7 +3666,7 @@ struct smb_version_operations smb21_operations = {
25134 + .print_stats = smb2_print_stats,
25135 + .is_oplock_break = smb2_is_valid_oplock_break,
25136 + .handle_cancelled_mid = smb2_handle_cancelled_mid,
25137 +- .downgrade_oplock = smb2_downgrade_oplock,
25138 ++ .downgrade_oplock = smb21_downgrade_oplock,
25139 + .need_neg = smb2_need_neg,
25140 + .negotiate = smb2_negotiate,
25141 + .negotiate_wsize = smb2_negotiate_wsize,
25142 +@@ -3732,6 +3752,7 @@ struct smb_version_operations smb30_operations = {
25143 + .get_credits = smb2_get_credits,
25144 + .wait_mtu_credits = smb2_wait_mtu_credits,
25145 + .get_next_mid = smb2_get_next_mid,
25146 ++ .revert_current_mid = smb2_revert_current_mid,
25147 + .read_data_offset = smb2_read_data_offset,
25148 + .read_data_length = smb2_read_data_length,
25149 + .map_error = map_smb2_to_linux_error,
25150 +@@ -3743,7 +3764,7 @@ struct smb_version_operations smb30_operations = {
25151 + .dump_share_caps = smb2_dump_share_caps,
25152 + .is_oplock_break = smb2_is_valid_oplock_break,
25153 + .handle_cancelled_mid = smb2_handle_cancelled_mid,
25154 +- .downgrade_oplock = smb2_downgrade_oplock,
25155 ++ .downgrade_oplock = smb21_downgrade_oplock,
25156 + .need_neg = smb2_need_neg,
25157 + .negotiate = smb2_negotiate,
25158 + .negotiate_wsize = smb3_negotiate_wsize,
25159 +@@ -3837,6 +3858,7 @@ struct smb_version_operations smb311_operations = {
25160 + .get_credits = smb2_get_credits,
25161 + .wait_mtu_credits = smb2_wait_mtu_credits,
25162 + .get_next_mid = smb2_get_next_mid,
25163 ++ .revert_current_mid = smb2_revert_current_mid,
25164 + .read_data_offset = smb2_read_data_offset,
25165 + .read_data_length = smb2_read_data_length,
25166 + .map_error = map_smb2_to_linux_error,
25167 +@@ -3848,7 +3870,7 @@ struct smb_version_operations smb311_operations = {
25168 + .dump_share_caps = smb2_dump_share_caps,
25169 + .is_oplock_break = smb2_is_valid_oplock_break,
25170 + .handle_cancelled_mid = smb2_handle_cancelled_mid,
25171 +- .downgrade_oplock = smb2_downgrade_oplock,
25172 ++ .downgrade_oplock = smb21_downgrade_oplock,
25173 + .need_neg = smb2_need_neg,
25174 + .negotiate = smb2_negotiate,
25175 + .negotiate_wsize = smb3_negotiate_wsize,
25176 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
25177 +index 77b3aaa39b35..068febe37fe4 100644
25178 +--- a/fs/cifs/smb2pdu.c
25179 ++++ b/fs/cifs/smb2pdu.c
25180 +@@ -986,8 +986,14 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
25181 + rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
25182 + FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
25183 + (char *)pneg_inbuf, inbuflen, (char **)&pneg_rsp, &rsplen);
25184 +-
25185 +- if (rc != 0) {
25186 ++ if (rc == -EOPNOTSUPP) {
25187 ++ /*
25188 ++ * Old Windows versions or Netapp SMB server can return
25189 ++ * not supported error. Client should accept it.
25190 ++ */
25191 ++ cifs_dbg(VFS, "Server does not support validate negotiate\n");
25192 ++ return 0;
25193 ++ } else if (rc != 0) {
25194 + cifs_dbg(VFS, "validate protocol negotiate failed: %d\n", rc);
25195 + rc = -EIO;
25196 + goto out_free_inbuf;
25197 +@@ -1605,9 +1611,16 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
25198 + iov[1].iov_base = unc_path;
25199 + iov[1].iov_len = unc_path_len;
25200 +
25201 +- /* 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1 */
25202 ++ /*
25203 ++ * 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1
25204 ++ * unless it is guest or anonymous user. See MS-SMB2 3.2.5.3.1
25205 ++ * (Samba servers don't always set the flag so also check if null user)
25206 ++ */
25207 + if ((ses->server->dialect == SMB311_PROT_ID) &&
25208 +- !smb3_encryption_required(tcon))
25209 ++ !smb3_encryption_required(tcon) &&
25210 ++ !(ses->session_flags &
25211 ++ (SMB2_SESSION_FLAG_IS_GUEST|SMB2_SESSION_FLAG_IS_NULL)) &&
25212 ++ ((ses->user_name != NULL) || (ses->sectype == Kerberos)))
25213 + req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
25214 +
25215 + memset(&rqst, 0, sizeof(struct smb_rqst));
25216 +@@ -1824,8 +1837,9 @@ add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
25217 + }
25218 +
25219 + static struct create_durable_v2 *
25220 +-create_durable_v2_buf(struct cifs_fid *pfid)
25221 ++create_durable_v2_buf(struct cifs_open_parms *oparms)
25222 + {
25223 ++ struct cifs_fid *pfid = oparms->fid;
25224 + struct create_durable_v2 *buf;
25225 +
25226 + buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL);
25227 +@@ -1839,7 +1853,14 @@ create_durable_v2_buf(struct cifs_fid *pfid)
25228 + (struct create_durable_v2, Name));
25229 + buf->ccontext.NameLength = cpu_to_le16(4);
25230 +
25231 +- buf->dcontext.Timeout = 0; /* Should this be configurable by workload */
25232 ++ /*
25233 ++ * NB: Handle timeout defaults to 0, which allows server to choose
25234 ++ * (most servers default to 120 seconds) and most clients default to 0.
25235 ++ * This can be overridden at mount ("handletimeout=") if the user wants
25236 ++ * a different persistent (or resilient) handle timeout for all opens
25237 ++ * opens on a particular SMB3 mount.
25238 ++ */
25239 ++ buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout);
25240 + buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
25241 + generate_random_uuid(buf->dcontext.CreateGuid);
25242 + memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
25243 +@@ -1892,7 +1913,7 @@ add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec,
25244 + struct smb2_create_req *req = iov[0].iov_base;
25245 + unsigned int num = *num_iovec;
25246 +
25247 +- iov[num].iov_base = create_durable_v2_buf(oparms->fid);
25248 ++ iov[num].iov_base = create_durable_v2_buf(oparms);
25249 + if (iov[num].iov_base == NULL)
25250 + return -ENOMEM;
25251 + iov[num].iov_len = sizeof(struct create_durable_v2);
25252 +diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
25253 +index 7b351c65ee46..63264db78b89 100644
25254 +--- a/fs/cifs/smb2transport.c
25255 ++++ b/fs/cifs/smb2transport.c
25256 +@@ -576,6 +576,7 @@ smb2_mid_entry_alloc(const struct smb2_sync_hdr *shdr,
25257 + struct TCP_Server_Info *server)
25258 + {
25259 + struct mid_q_entry *temp;
25260 ++ unsigned int credits = le16_to_cpu(shdr->CreditCharge);
25261 +
25262 + if (server == NULL) {
25263 + cifs_dbg(VFS, "Null TCP session in smb2_mid_entry_alloc\n");
25264 +@@ -586,6 +587,7 @@ smb2_mid_entry_alloc(const struct smb2_sync_hdr *shdr,
25265 + memset(temp, 0, sizeof(struct mid_q_entry));
25266 + kref_init(&temp->refcount);
25267 + temp->mid = le64_to_cpu(shdr->MessageId);
25268 ++ temp->credits = credits > 0 ? credits : 1;
25269 + temp->pid = current->pid;
25270 + temp->command = shdr->Command; /* Always LE */
25271 + temp->when_alloc = jiffies;
25272 +@@ -674,13 +676,18 @@ smb2_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
25273 + smb2_seq_num_into_buf(ses->server, shdr);
25274 +
25275 + rc = smb2_get_mid_entry(ses, shdr, &mid);
25276 +- if (rc)
25277 ++ if (rc) {
25278 ++ revert_current_mid_from_hdr(ses->server, shdr);
25279 + return ERR_PTR(rc);
25280 ++ }
25281 ++
25282 + rc = smb2_sign_rqst(rqst, ses->server);
25283 + if (rc) {
25284 ++ revert_current_mid_from_hdr(ses->server, shdr);
25285 + cifs_delete_mid(mid);
25286 + return ERR_PTR(rc);
25287 + }
25288 ++
25289 + return mid;
25290 + }
25291 +
25292 +@@ -695,11 +702,14 @@ smb2_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
25293 + smb2_seq_num_into_buf(server, shdr);
25294 +
25295 + mid = smb2_mid_entry_alloc(shdr, server);
25296 +- if (mid == NULL)
25297 ++ if (mid == NULL) {
25298 ++ revert_current_mid_from_hdr(server, shdr);
25299 + return ERR_PTR(-ENOMEM);
25300 ++ }
25301 +
25302 + rc = smb2_sign_rqst(rqst, server);
25303 + if (rc) {
25304 ++ revert_current_mid_from_hdr(server, shdr);
25305 + DeleteMidQEntry(mid);
25306 + return ERR_PTR(rc);
25307 + }
25308 +diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
25309 +index 53532bd3f50d..9544eb99b5a2 100644
25310 +--- a/fs/cifs/transport.c
25311 ++++ b/fs/cifs/transport.c
25312 +@@ -647,6 +647,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
25313 + cifs_in_send_dec(server);
25314 +
25315 + if (rc < 0) {
25316 ++ revert_current_mid(server, mid->credits);
25317 + server->sequence_number -= 2;
25318 + cifs_delete_mid(mid);
25319 + }
25320 +@@ -868,6 +869,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
25321 + for (i = 0; i < num_rqst; i++) {
25322 + midQ[i] = ses->server->ops->setup_request(ses, &rqst[i]);
25323 + if (IS_ERR(midQ[i])) {
25324 ++ revert_current_mid(ses->server, i);
25325 + for (j = 0; j < i; j++)
25326 + cifs_delete_mid(midQ[j]);
25327 + mutex_unlock(&ses->server->srv_mutex);
25328 +@@ -897,8 +899,10 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
25329 + for (i = 0; i < num_rqst; i++)
25330 + cifs_save_when_sent(midQ[i]);
25331 +
25332 +- if (rc < 0)
25333 ++ if (rc < 0) {
25334 ++ revert_current_mid(ses->server, num_rqst);
25335 + ses->server->sequence_number -= 2;
25336 ++ }
25337 +
25338 + mutex_unlock(&ses->server->srv_mutex);
25339 +
25340 +diff --git a/fs/dax.c b/fs/dax.c
25341 +index 6959837cc465..05cca2214ae3 100644
25342 +--- a/fs/dax.c
25343 ++++ b/fs/dax.c
25344 +@@ -843,9 +843,8 @@ unlock_pte:
25345 + static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
25346 + struct address_space *mapping, void *entry)
25347 + {
25348 +- unsigned long pfn;
25349 ++ unsigned long pfn, index, count;
25350 + long ret = 0;
25351 +- size_t size;
25352 +
25353 + /*
25354 + * A page got tagged dirty in DAX mapping? Something is seriously
25355 +@@ -894,17 +893,18 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
25356 + xas_unlock_irq(xas);
25357 +
25358 + /*
25359 +- * Even if dax_writeback_mapping_range() was given a wbc->range_start
25360 +- * in the middle of a PMD, the 'index' we are given will be aligned to
25361 +- * the start index of the PMD, as will the pfn we pull from 'entry'.
25362 ++ * If dax_writeback_mapping_range() was given a wbc->range_start
25363 ++ * in the middle of a PMD, the 'index' we use needs to be
25364 ++ * aligned to the start of the PMD.
25365 + * This allows us to flush for PMD_SIZE and not have to worry about
25366 + * partial PMD writebacks.
25367 + */
25368 + pfn = dax_to_pfn(entry);
25369 +- size = PAGE_SIZE << dax_entry_order(entry);
25370 ++ count = 1UL << dax_entry_order(entry);
25371 ++ index = xas->xa_index & ~(count - 1);
25372 +
25373 +- dax_entry_mkclean(mapping, xas->xa_index, pfn);
25374 +- dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size);
25375 ++ dax_entry_mkclean(mapping, index, pfn);
25376 ++ dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE);
25377 + /*
25378 + * After we have flushed the cache, we can clear the dirty tag. There
25379 + * cannot be new dirty data in the pfn after the flush has completed as
25380 +@@ -917,8 +917,7 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
25381 + xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
25382 + dax_wake_entry(xas, entry, false);
25383 +
25384 +- trace_dax_writeback_one(mapping->host, xas->xa_index,
25385 +- size >> PAGE_SHIFT);
25386 ++ trace_dax_writeback_one(mapping->host, index, count);
25387 + return ret;
25388 +
25389 + put_unlocked:
25390 +diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
25391 +index c53814539070..553a3f3300ae 100644
25392 +--- a/fs/devpts/inode.c
25393 ++++ b/fs/devpts/inode.c
25394 +@@ -455,6 +455,7 @@ devpts_fill_super(struct super_block *s, void *data, int silent)
25395 + s->s_blocksize_bits = 10;
25396 + s->s_magic = DEVPTS_SUPER_MAGIC;
25397 + s->s_op = &devpts_sops;
25398 ++ s->s_d_op = &simple_dentry_operations;
25399 + s->s_time_gran = 1;
25400 +
25401 + error = -ENOMEM;
25402 +diff --git a/fs/exec.c b/fs/exec.c
25403 +index fb72d36f7823..bcf383730bea 100644
25404 +--- a/fs/exec.c
25405 ++++ b/fs/exec.c
25406 +@@ -932,7 +932,7 @@ int kernel_read_file(struct file *file, void **buf, loff_t *size,
25407 + bytes = kernel_read(file, *buf + pos, i_size - pos, &pos);
25408 + if (bytes < 0) {
25409 + ret = bytes;
25410 +- goto out;
25411 ++ goto out_free;
25412 + }
25413 +
25414 + if (bytes == 0)
25415 +diff --git a/fs/ext2/super.c b/fs/ext2/super.c
25416 +index 73b2d528237f..a9ea38182578 100644
25417 +--- a/fs/ext2/super.c
25418 ++++ b/fs/ext2/super.c
25419 +@@ -757,7 +757,8 @@ static loff_t ext2_max_size(int bits)
25420 + {
25421 + loff_t res = EXT2_NDIR_BLOCKS;
25422 + int meta_blocks;
25423 +- loff_t upper_limit;
25424 ++ unsigned int upper_limit;
25425 ++ unsigned int ppb = 1 << (bits-2);
25426 +
25427 + /* This is calculated to be the largest file size for a
25428 + * dense, file such that the total number of
25429 +@@ -771,24 +772,34 @@ static loff_t ext2_max_size(int bits)
25430 + /* total blocks in file system block size */
25431 + upper_limit >>= (bits - 9);
25432 +
25433 ++ /* Compute how many blocks we can address by block tree */
25434 ++ res += 1LL << (bits-2);
25435 ++ res += 1LL << (2*(bits-2));
25436 ++ res += 1LL << (3*(bits-2));
25437 ++ /* Does block tree limit file size? */
25438 ++ if (res < upper_limit)
25439 ++ goto check_lfs;
25440 +
25441 ++ res = upper_limit;
25442 ++ /* How many metadata blocks are needed for addressing upper_limit? */
25443 ++ upper_limit -= EXT2_NDIR_BLOCKS;
25444 + /* indirect blocks */
25445 + meta_blocks = 1;
25446 ++ upper_limit -= ppb;
25447 + /* double indirect blocks */
25448 +- meta_blocks += 1 + (1LL << (bits-2));
25449 +- /* tripple indirect blocks */
25450 +- meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));
25451 +-
25452 +- upper_limit -= meta_blocks;
25453 +- upper_limit <<= bits;
25454 +-
25455 +- res += 1LL << (bits-2);
25456 +- res += 1LL << (2*(bits-2));
25457 +- res += 1LL << (3*(bits-2));
25458 ++ if (upper_limit < ppb * ppb) {
25459 ++ meta_blocks += 1 + DIV_ROUND_UP(upper_limit, ppb);
25460 ++ res -= meta_blocks;
25461 ++ goto check_lfs;
25462 ++ }
25463 ++ meta_blocks += 1 + ppb;
25464 ++ upper_limit -= ppb * ppb;
25465 ++ /* tripple indirect blocks for the rest */
25466 ++ meta_blocks += 1 + DIV_ROUND_UP(upper_limit, ppb) +
25467 ++ DIV_ROUND_UP(upper_limit, ppb*ppb);
25468 ++ res -= meta_blocks;
25469 ++check_lfs:
25470 + res <<= bits;
25471 +- if (res > upper_limit)
25472 +- res = upper_limit;
25473 +-
25474 + if (res > MAX_LFS_FILESIZE)
25475 + res = MAX_LFS_FILESIZE;
25476 +
25477 +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
25478 +index 185a05d3257e..508a37ec9271 100644
25479 +--- a/fs/ext4/ext4.h
25480 ++++ b/fs/ext4/ext4.h
25481 +@@ -426,6 +426,9 @@ struct flex_groups {
25482 + /* Flags that are appropriate for non-directories/regular files. */
25483 + #define EXT4_OTHER_FLMASK (EXT4_NODUMP_FL | EXT4_NOATIME_FL)
25484 +
25485 ++/* The only flags that should be swapped */
25486 ++#define EXT4_FL_SHOULD_SWAP (EXT4_HUGE_FILE_FL | EXT4_EXTENTS_FL)
25487 ++
25488 + /* Mask out flags that are inappropriate for the given type of inode. */
25489 + static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags)
25490 + {
25491 +diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
25492 +index 15b6dd733780..df908ef79cce 100644
25493 +--- a/fs/ext4/ext4_jbd2.h
25494 ++++ b/fs/ext4/ext4_jbd2.h
25495 +@@ -384,7 +384,7 @@ static inline void ext4_update_inode_fsync_trans(handle_t *handle,
25496 + {
25497 + struct ext4_inode_info *ei = EXT4_I(inode);
25498 +
25499 +- if (ext4_handle_valid(handle)) {
25500 ++ if (ext4_handle_valid(handle) && !is_handle_aborted(handle)) {
25501 + ei->i_sync_tid = handle->h_transaction->t_tid;
25502 + if (datasync)
25503 + ei->i_datasync_tid = handle->h_transaction->t_tid;
25504 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
25505 +index 240b6dea5441..252bbbb5a2f4 100644
25506 +--- a/fs/ext4/extents.c
25507 ++++ b/fs/ext4/extents.c
25508 +@@ -2956,14 +2956,17 @@ again:
25509 + if (err < 0)
25510 + goto out;
25511 +
25512 +- } else if (sbi->s_cluster_ratio > 1 && end >= ex_end) {
25513 ++ } else if (sbi->s_cluster_ratio > 1 && end >= ex_end &&
25514 ++ partial.state == initial) {
25515 + /*
25516 +- * If there's an extent to the right its first cluster
25517 +- * contains the immediate right boundary of the
25518 +- * truncated/punched region. Set partial_cluster to
25519 +- * its negative value so it won't be freed if shared
25520 +- * with the current extent. The end < ee_block case
25521 +- * is handled in ext4_ext_rm_leaf().
25522 ++ * If we're punching, there's an extent to the right.
25523 ++ * If the partial cluster hasn't been set, set it to
25524 ++ * that extent's first cluster and its state to nofree
25525 ++ * so it won't be freed should it contain blocks to be
25526 ++ * removed. If it's already set (tofree/nofree), we're
25527 ++ * retrying and keep the original partial cluster info
25528 ++ * so a cluster marked tofree as a result of earlier
25529 ++ * extent removal is not lost.
25530 + */
25531 + lblk = ex_end + 1;
25532 + err = ext4_ext_search_right(inode, path, &lblk, &pblk,
25533 +diff --git a/fs/ext4/file.c b/fs/ext4/file.c
25534 +index 69d65d49837b..98ec11f69cd4 100644
25535 +--- a/fs/ext4/file.c
25536 ++++ b/fs/ext4/file.c
25537 +@@ -125,7 +125,7 @@ ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
25538 + struct super_block *sb = inode->i_sb;
25539 + int blockmask = sb->s_blocksize - 1;
25540 +
25541 +- if (pos >= i_size_read(inode))
25542 ++ if (pos >= ALIGN(i_size_read(inode), sb->s_blocksize))
25543 + return 0;
25544 +
25545 + if ((pos | iov_iter_alignment(from)) & blockmask)
25546 +diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
25547 +index bf7fa1507e81..e1801b288847 100644
25548 +--- a/fs/ext4/indirect.c
25549 ++++ b/fs/ext4/indirect.c
25550 +@@ -1219,6 +1219,7 @@ int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
25551 + ext4_lblk_t offsets[4], offsets2[4];
25552 + Indirect chain[4], chain2[4];
25553 + Indirect *partial, *partial2;
25554 ++ Indirect *p = NULL, *p2 = NULL;
25555 + ext4_lblk_t max_block;
25556 + __le32 nr = 0, nr2 = 0;
25557 + int n = 0, n2 = 0;
25558 +@@ -1260,7 +1261,7 @@ int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
25559 + }
25560 +
25561 +
25562 +- partial = ext4_find_shared(inode, n, offsets, chain, &nr);
25563 ++ partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
25564 + if (nr) {
25565 + if (partial == chain) {
25566 + /* Shared branch grows from the inode */
25567 +@@ -1285,13 +1286,11 @@ int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
25568 + partial->p + 1,
25569 + (__le32 *)partial->bh->b_data+addr_per_block,
25570 + (chain+n-1) - partial);
25571 +- BUFFER_TRACE(partial->bh, "call brelse");
25572 +- brelse(partial->bh);
25573 + partial--;
25574 + }
25575 +
25576 + end_range:
25577 +- partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
25578 ++ partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
25579 + if (nr2) {
25580 + if (partial2 == chain2) {
25581 + /*
25582 +@@ -1321,16 +1320,14 @@ end_range:
25583 + (__le32 *)partial2->bh->b_data,
25584 + partial2->p,
25585 + (chain2+n2-1) - partial2);
25586 +- BUFFER_TRACE(partial2->bh, "call brelse");
25587 +- brelse(partial2->bh);
25588 + partial2--;
25589 + }
25590 + goto do_indirects;
25591 + }
25592 +
25593 + /* Punch happened within the same level (n == n2) */
25594 +- partial = ext4_find_shared(inode, n, offsets, chain, &nr);
25595 +- partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
25596 ++ partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
25597 ++ partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
25598 +
25599 + /* Free top, but only if partial2 isn't its subtree. */
25600 + if (nr) {
25601 +@@ -1387,11 +1384,7 @@ end_range:
25602 + partial->p + 1,
25603 + partial2->p,
25604 + (chain+n-1) - partial);
25605 +- BUFFER_TRACE(partial->bh, "call brelse");
25606 +- brelse(partial->bh);
25607 +- BUFFER_TRACE(partial2->bh, "call brelse");
25608 +- brelse(partial2->bh);
25609 +- return 0;
25610 ++ goto cleanup;
25611 + }
25612 +
25613 + /*
25614 +@@ -1406,8 +1399,6 @@ end_range:
25615 + partial->p + 1,
25616 + (__le32 *)partial->bh->b_data+addr_per_block,
25617 + (chain+n-1) - partial);
25618 +- BUFFER_TRACE(partial->bh, "call brelse");
25619 +- brelse(partial->bh);
25620 + partial--;
25621 + }
25622 + if (partial2 > chain2 && depth2 <= depth) {
25623 +@@ -1415,11 +1406,21 @@ end_range:
25624 + (__le32 *)partial2->bh->b_data,
25625 + partial2->p,
25626 + (chain2+n2-1) - partial2);
25627 +- BUFFER_TRACE(partial2->bh, "call brelse");
25628 +- brelse(partial2->bh);
25629 + partial2--;
25630 + }
25631 + }
25632 ++
25633 ++cleanup:
25634 ++ while (p && p > chain) {
25635 ++ BUFFER_TRACE(p->bh, "call brelse");
25636 ++ brelse(p->bh);
25637 ++ p--;
25638 ++ }
25639 ++ while (p2 && p2 > chain2) {
25640 ++ BUFFER_TRACE(p2->bh, "call brelse");
25641 ++ brelse(p2->bh);
25642 ++ p2--;
25643 ++ }
25644 + return 0;
25645 +
25646 + do_indirects:
25647 +@@ -1427,7 +1428,7 @@ do_indirects:
25648 + switch (offsets[0]) {
25649 + default:
25650 + if (++n >= n2)
25651 +- return 0;
25652 ++ break;
25653 + nr = i_data[EXT4_IND_BLOCK];
25654 + if (nr) {
25655 + ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
25656 +@@ -1435,7 +1436,7 @@ do_indirects:
25657 + }
25658 + case EXT4_IND_BLOCK:
25659 + if (++n >= n2)
25660 +- return 0;
25661 ++ break;
25662 + nr = i_data[EXT4_DIND_BLOCK];
25663 + if (nr) {
25664 + ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
25665 +@@ -1443,7 +1444,7 @@ do_indirects:
25666 + }
25667 + case EXT4_DIND_BLOCK:
25668 + if (++n >= n2)
25669 +- return 0;
25670 ++ break;
25671 + nr = i_data[EXT4_TIND_BLOCK];
25672 + if (nr) {
25673 + ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
25674 +@@ -1452,5 +1453,5 @@ do_indirects:
25675 + case EXT4_TIND_BLOCK:
25676 + ;
25677 + }
25678 +- return 0;
25679 ++ goto cleanup;
25680 + }
25681 +diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
25682 +index d37dafa1d133..2e76fb55d94a 100644
25683 +--- a/fs/ext4/ioctl.c
25684 ++++ b/fs/ext4/ioctl.c
25685 +@@ -63,18 +63,20 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2)
25686 + loff_t isize;
25687 + struct ext4_inode_info *ei1;
25688 + struct ext4_inode_info *ei2;
25689 ++ unsigned long tmp;
25690 +
25691 + ei1 = EXT4_I(inode1);
25692 + ei2 = EXT4_I(inode2);
25693 +
25694 + swap(inode1->i_version, inode2->i_version);
25695 +- swap(inode1->i_blocks, inode2->i_blocks);
25696 +- swap(inode1->i_bytes, inode2->i_bytes);
25697 + swap(inode1->i_atime, inode2->i_atime);
25698 + swap(inode1->i_mtime, inode2->i_mtime);
25699 +
25700 + memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data));
25701 +- swap(ei1->i_flags, ei2->i_flags);
25702 ++ tmp = ei1->i_flags & EXT4_FL_SHOULD_SWAP;
25703 ++ ei1->i_flags = (ei2->i_flags & EXT4_FL_SHOULD_SWAP) |
25704 ++ (ei1->i_flags & ~EXT4_FL_SHOULD_SWAP);
25705 ++ ei2->i_flags = tmp | (ei2->i_flags & ~EXT4_FL_SHOULD_SWAP);
25706 + swap(ei1->i_disksize, ei2->i_disksize);
25707 + ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS);
25708 + ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS);
25709 +@@ -115,28 +117,41 @@ static long swap_inode_boot_loader(struct super_block *sb,
25710 + int err;
25711 + struct inode *inode_bl;
25712 + struct ext4_inode_info *ei_bl;
25713 +-
25714 +- if (inode->i_nlink != 1 || !S_ISREG(inode->i_mode) ||
25715 +- IS_SWAPFILE(inode) || IS_ENCRYPTED(inode) ||
25716 +- ext4_has_inline_data(inode))
25717 +- return -EINVAL;
25718 +-
25719 +- if (IS_RDONLY(inode) || IS_APPEND(inode) || IS_IMMUTABLE(inode) ||
25720 +- !inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN))
25721 +- return -EPERM;
25722 ++ qsize_t size, size_bl, diff;
25723 ++ blkcnt_t blocks;
25724 ++ unsigned short bytes;
25725 +
25726 + inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO, EXT4_IGET_SPECIAL);
25727 + if (IS_ERR(inode_bl))
25728 + return PTR_ERR(inode_bl);
25729 + ei_bl = EXT4_I(inode_bl);
25730 +
25731 +- filemap_flush(inode->i_mapping);
25732 +- filemap_flush(inode_bl->i_mapping);
25733 +-
25734 + /* Protect orig inodes against a truncate and make sure,
25735 + * that only 1 swap_inode_boot_loader is running. */
25736 + lock_two_nondirectories(inode, inode_bl);
25737 +
25738 ++ if (inode->i_nlink != 1 || !S_ISREG(inode->i_mode) ||
25739 ++ IS_SWAPFILE(inode) || IS_ENCRYPTED(inode) ||
25740 ++ ext4_has_inline_data(inode)) {
25741 ++ err = -EINVAL;
25742 ++ goto journal_err_out;
25743 ++ }
25744 ++
25745 ++ if (IS_RDONLY(inode) || IS_APPEND(inode) || IS_IMMUTABLE(inode) ||
25746 ++ !inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN)) {
25747 ++ err = -EPERM;
25748 ++ goto journal_err_out;
25749 ++ }
25750 ++
25751 ++ down_write(&EXT4_I(inode)->i_mmap_sem);
25752 ++ err = filemap_write_and_wait(inode->i_mapping);
25753 ++ if (err)
25754 ++ goto err_out;
25755 ++
25756 ++ err = filemap_write_and_wait(inode_bl->i_mapping);
25757 ++ if (err)
25758 ++ goto err_out;
25759 ++
25760 + /* Wait for all existing dio workers */
25761 + inode_dio_wait(inode);
25762 + inode_dio_wait(inode_bl);
25763 +@@ -147,7 +162,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
25764 + handle = ext4_journal_start(inode_bl, EXT4_HT_MOVE_EXTENTS, 2);
25765 + if (IS_ERR(handle)) {
25766 + err = -EINVAL;
25767 +- goto journal_err_out;
25768 ++ goto err_out;
25769 + }
25770 +
25771 + /* Protect extent tree against block allocations via delalloc */
25772 +@@ -170,6 +185,13 @@ static long swap_inode_boot_loader(struct super_block *sb,
25773 + memset(ei_bl->i_data, 0, sizeof(ei_bl->i_data));
25774 + }
25775 +
25776 ++ err = dquot_initialize(inode);
25777 ++ if (err)
25778 ++ goto err_out1;
25779 ++
25780 ++ size = (qsize_t)(inode->i_blocks) * (1 << 9) + inode->i_bytes;
25781 ++ size_bl = (qsize_t)(inode_bl->i_blocks) * (1 << 9) + inode_bl->i_bytes;
25782 ++ diff = size - size_bl;
25783 + swap_inode_data(inode, inode_bl);
25784 +
25785 + inode->i_ctime = inode_bl->i_ctime = current_time(inode);
25786 +@@ -183,27 +205,51 @@ static long swap_inode_boot_loader(struct super_block *sb,
25787 +
25788 + err = ext4_mark_inode_dirty(handle, inode);
25789 + if (err < 0) {
25790 ++ /* No need to update quota information. */
25791 + ext4_warning(inode->i_sb,
25792 + "couldn't mark inode #%lu dirty (err %d)",
25793 + inode->i_ino, err);
25794 + /* Revert all changes: */
25795 + swap_inode_data(inode, inode_bl);
25796 + ext4_mark_inode_dirty(handle, inode);
25797 +- } else {
25798 +- err = ext4_mark_inode_dirty(handle, inode_bl);
25799 +- if (err < 0) {
25800 +- ext4_warning(inode_bl->i_sb,
25801 +- "couldn't mark inode #%lu dirty (err %d)",
25802 +- inode_bl->i_ino, err);
25803 +- /* Revert all changes: */
25804 +- swap_inode_data(inode, inode_bl);
25805 +- ext4_mark_inode_dirty(handle, inode);
25806 +- ext4_mark_inode_dirty(handle, inode_bl);
25807 +- }
25808 ++ goto err_out1;
25809 ++ }
25810 ++
25811 ++ blocks = inode_bl->i_blocks;
25812 ++ bytes = inode_bl->i_bytes;
25813 ++ inode_bl->i_blocks = inode->i_blocks;
25814 ++ inode_bl->i_bytes = inode->i_bytes;
25815 ++ err = ext4_mark_inode_dirty(handle, inode_bl);
25816 ++ if (err < 0) {
25817 ++ /* No need to update quota information. */
25818 ++ ext4_warning(inode_bl->i_sb,
25819 ++ "couldn't mark inode #%lu dirty (err %d)",
25820 ++ inode_bl->i_ino, err);
25821 ++ goto revert;
25822 ++ }
25823 ++
25824 ++ /* Bootloader inode should not be counted into quota information. */
25825 ++ if (diff > 0)
25826 ++ dquot_free_space(inode, diff);
25827 ++ else
25828 ++ err = dquot_alloc_space(inode, -1 * diff);
25829 ++
25830 ++ if (err < 0) {
25831 ++revert:
25832 ++ /* Revert all changes: */
25833 ++ inode_bl->i_blocks = blocks;
25834 ++ inode_bl->i_bytes = bytes;
25835 ++ swap_inode_data(inode, inode_bl);
25836 ++ ext4_mark_inode_dirty(handle, inode);
25837 ++ ext4_mark_inode_dirty(handle, inode_bl);
25838 + }
25839 ++
25840 ++err_out1:
25841 + ext4_journal_stop(handle);
25842 + ext4_double_up_write_data_sem(inode, inode_bl);
25843 +
25844 ++err_out:
25845 ++ up_write(&EXT4_I(inode)->i_mmap_sem);
25846 + journal_err_out:
25847 + unlock_two_nondirectories(inode, inode_bl);
25848 + iput(inode_bl);
25849 +diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
25850 +index 48421de803b7..3d9b18505c0c 100644
25851 +--- a/fs/ext4/resize.c
25852 ++++ b/fs/ext4/resize.c
25853 +@@ -1960,7 +1960,8 @@ retry:
25854 + le16_to_cpu(es->s_reserved_gdt_blocks);
25855 + n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
25856 + n_blocks_count = (ext4_fsblk_t)n_group *
25857 +- EXT4_BLOCKS_PER_GROUP(sb);
25858 ++ EXT4_BLOCKS_PER_GROUP(sb) +
25859 ++ le32_to_cpu(es->s_first_data_block);
25860 + n_group--; /* set to last group number */
25861 + }
25862 +
25863 +diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
25864 +index 1cb0fcc67d2d..caf77fe8ac07 100644
25865 +--- a/fs/f2fs/extent_cache.c
25866 ++++ b/fs/f2fs/extent_cache.c
25867 +@@ -506,7 +506,7 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
25868 + unsigned int end = fofs + len;
25869 + unsigned int pos = (unsigned int)fofs;
25870 + bool updated = false;
25871 +- bool leftmost;
25872 ++ bool leftmost = false;
25873 +
25874 + if (!et)
25875 + return;
25876 +diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
25877 +index 12fabd6735dd..279bc00489cc 100644
25878 +--- a/fs/f2fs/f2fs.h
25879 ++++ b/fs/f2fs/f2fs.h
25880 +@@ -456,7 +456,6 @@ struct f2fs_flush_device {
25881 +
25882 + /* for inline stuff */
25883 + #define DEF_INLINE_RESERVED_SIZE 1
25884 +-#define DEF_MIN_INLINE_SIZE 1
25885 + static inline int get_extra_isize(struct inode *inode);
25886 + static inline int get_inline_xattr_addrs(struct inode *inode);
25887 + #define MAX_INLINE_DATA(inode) (sizeof(__le32) * \
25888 +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
25889 +index bba56b39dcc5..ae2b45e75847 100644
25890 +--- a/fs/f2fs/file.c
25891 ++++ b/fs/f2fs/file.c
25892 +@@ -1750,10 +1750,12 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
25893 +
25894 + down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
25895 +
25896 +- if (!get_dirty_pages(inode))
25897 +- goto skip_flush;
25898 +-
25899 +- f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
25900 ++ /*
25901 ++ * Should wait end_io to count F2FS_WB_CP_DATA correctly by
25902 ++ * f2fs_is_atomic_file.
25903 ++ */
25904 ++ if (get_dirty_pages(inode))
25905 ++ f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
25906 + "Unexpected flush for atomic writes: ino=%lu, npages=%u",
25907 + inode->i_ino, get_dirty_pages(inode));
25908 + ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
25909 +@@ -1761,7 +1763,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
25910 + up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
25911 + goto out;
25912 + }
25913 +-skip_flush:
25914 ++
25915 + set_inode_flag(inode, FI_ATOMIC_FILE);
25916 + clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
25917 + up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
25918 +diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
25919 +index d636cbcf68f2..aacbb864ec1e 100644
25920 +--- a/fs/f2fs/inline.c
25921 ++++ b/fs/f2fs/inline.c
25922 +@@ -659,6 +659,12 @@ int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
25923 + if (IS_ERR(ipage))
25924 + return PTR_ERR(ipage);
25925 +
25926 ++ /*
25927 ++ * f2fs_readdir was protected by inode.i_rwsem, it is safe to access
25928 ++ * ipage without page's lock held.
25929 ++ */
25930 ++ unlock_page(ipage);
25931 ++
25932 + inline_dentry = inline_data_addr(inode, ipage);
25933 +
25934 + make_dentry_ptr_inline(inode, &d, inline_dentry);
25935 +@@ -667,7 +673,7 @@ int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
25936 + if (!err)
25937 + ctx->pos = d.max;
25938 +
25939 +- f2fs_put_page(ipage, 1);
25940 ++ f2fs_put_page(ipage, 0);
25941 + return err < 0 ? err : 0;
25942 + }
25943 +
25944 +diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
25945 +index 9b79056d705d..e1b1d390b329 100644
25946 +--- a/fs/f2fs/segment.c
25947 ++++ b/fs/f2fs/segment.c
25948 +@@ -215,7 +215,8 @@ void f2fs_register_inmem_page(struct inode *inode, struct page *page)
25949 + }
25950 +
25951 + static int __revoke_inmem_pages(struct inode *inode,
25952 +- struct list_head *head, bool drop, bool recover)
25953 ++ struct list_head *head, bool drop, bool recover,
25954 ++ bool trylock)
25955 + {
25956 + struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
25957 + struct inmem_pages *cur, *tmp;
25958 +@@ -227,7 +228,16 @@ static int __revoke_inmem_pages(struct inode *inode,
25959 + if (drop)
25960 + trace_f2fs_commit_inmem_page(page, INMEM_DROP);
25961 +
25962 +- lock_page(page);
25963 ++ if (trylock) {
25964 ++ /*
25965 ++ * to avoid deadlock in between page lock and
25966 ++ * inmem_lock.
25967 ++ */
25968 ++ if (!trylock_page(page))
25969 ++ continue;
25970 ++ } else {
25971 ++ lock_page(page);
25972 ++ }
25973 +
25974 + f2fs_wait_on_page_writeback(page, DATA, true, true);
25975 +
25976 +@@ -318,13 +328,19 @@ void f2fs_drop_inmem_pages(struct inode *inode)
25977 + struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
25978 + struct f2fs_inode_info *fi = F2FS_I(inode);
25979 +
25980 +- mutex_lock(&fi->inmem_lock);
25981 +- __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
25982 +- spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
25983 +- if (!list_empty(&fi->inmem_ilist))
25984 +- list_del_init(&fi->inmem_ilist);
25985 +- spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
25986 +- mutex_unlock(&fi->inmem_lock);
25987 ++ while (!list_empty(&fi->inmem_pages)) {
25988 ++ mutex_lock(&fi->inmem_lock);
25989 ++ __revoke_inmem_pages(inode, &fi->inmem_pages,
25990 ++ true, false, true);
25991 ++
25992 ++ if (list_empty(&fi->inmem_pages)) {
25993 ++ spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
25994 ++ if (!list_empty(&fi->inmem_ilist))
25995 ++ list_del_init(&fi->inmem_ilist);
25996 ++ spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
25997 ++ }
25998 ++ mutex_unlock(&fi->inmem_lock);
25999 ++ }
26000 +
26001 + clear_inode_flag(inode, FI_ATOMIC_FILE);
26002 + fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
26003 +@@ -429,12 +445,15 @@ retry:
26004 + * recovery or rewrite & commit last transaction. For other
26005 + * error number, revoking was done by filesystem itself.
26006 + */
26007 +- err = __revoke_inmem_pages(inode, &revoke_list, false, true);
26008 ++ err = __revoke_inmem_pages(inode, &revoke_list,
26009 ++ false, true, false);
26010 +
26011 + /* drop all uncommitted pages */
26012 +- __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
26013 ++ __revoke_inmem_pages(inode, &fi->inmem_pages,
26014 ++ true, false, false);
26015 + } else {
26016 +- __revoke_inmem_pages(inode, &revoke_list, false, false);
26017 ++ __revoke_inmem_pages(inode, &revoke_list,
26018 ++ false, false, false);
26019 + }
26020 +
26021 + return err;
26022 +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
26023 +index c46a1d4318d4..5892fa3c885f 100644
26024 +--- a/fs/f2fs/super.c
26025 ++++ b/fs/f2fs/super.c
26026 +@@ -834,12 +834,13 @@ static int parse_options(struct super_block *sb, char *options)
26027 + "set with inline_xattr option");
26028 + return -EINVAL;
26029 + }
26030 +- if (!F2FS_OPTION(sbi).inline_xattr_size ||
26031 +- F2FS_OPTION(sbi).inline_xattr_size >=
26032 +- DEF_ADDRS_PER_INODE -
26033 +- F2FS_TOTAL_EXTRA_ATTR_SIZE -
26034 +- DEF_INLINE_RESERVED_SIZE -
26035 +- DEF_MIN_INLINE_SIZE) {
26036 ++ if (F2FS_OPTION(sbi).inline_xattr_size <
26037 ++ sizeof(struct f2fs_xattr_header) / sizeof(__le32) ||
26038 ++ F2FS_OPTION(sbi).inline_xattr_size >
26039 ++ DEF_ADDRS_PER_INODE -
26040 ++ F2FS_TOTAL_EXTRA_ATTR_SIZE / sizeof(__le32) -
26041 ++ DEF_INLINE_RESERVED_SIZE -
26042 ++ MIN_INLINE_DENTRY_SIZE / sizeof(__le32)) {
26043 + f2fs_msg(sb, KERN_ERR,
26044 + "inline xattr size is out of range");
26045 + return -EINVAL;
26046 +@@ -915,6 +916,10 @@ static int f2fs_drop_inode(struct inode *inode)
26047 + sb_start_intwrite(inode->i_sb);
26048 + f2fs_i_size_write(inode, 0);
26049 +
26050 ++ f2fs_submit_merged_write_cond(F2FS_I_SB(inode),
26051 ++ inode, NULL, 0, DATA);
26052 ++ truncate_inode_pages_final(inode->i_mapping);
26053 ++
26054 + if (F2FS_HAS_BLOCKS(inode))
26055 + f2fs_truncate(inode);
26056 +
26057 +diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
26058 +index 0575edbe3ed6..f1ab9000b294 100644
26059 +--- a/fs/f2fs/sysfs.c
26060 ++++ b/fs/f2fs/sysfs.c
26061 +@@ -278,10 +278,16 @@ out:
26062 + return count;
26063 + }
26064 +
26065 +- *ui = t;
26066 +
26067 +- if (!strcmp(a->attr.name, "iostat_enable") && *ui == 0)
26068 +- f2fs_reset_iostat(sbi);
26069 ++ if (!strcmp(a->attr.name, "iostat_enable")) {
26070 ++ sbi->iostat_enable = !!t;
26071 ++ if (!sbi->iostat_enable)
26072 ++ f2fs_reset_iostat(sbi);
26073 ++ return count;
26074 ++ }
26075 ++
26076 ++ *ui = (unsigned int)t;
26077 ++
26078 + return count;
26079 + }
26080 +
26081 +diff --git a/fs/f2fs/trace.c b/fs/f2fs/trace.c
26082 +index ce2a5eb210b6..d0ab533a9ce8 100644
26083 +--- a/fs/f2fs/trace.c
26084 ++++ b/fs/f2fs/trace.c
26085 +@@ -14,7 +14,7 @@
26086 + #include "trace.h"
26087 +
26088 + static RADIX_TREE(pids, GFP_ATOMIC);
26089 +-static struct mutex pids_lock;
26090 ++static spinlock_t pids_lock;
26091 + static struct last_io_info last_io;
26092 +
26093 + static inline void __print_last_io(void)
26094 +@@ -58,23 +58,29 @@ void f2fs_trace_pid(struct page *page)
26095 +
26096 + set_page_private(page, (unsigned long)pid);
26097 +
26098 ++retry:
26099 + if (radix_tree_preload(GFP_NOFS))
26100 + return;
26101 +
26102 +- mutex_lock(&pids_lock);
26103 ++ spin_lock(&pids_lock);
26104 + p = radix_tree_lookup(&pids, pid);
26105 + if (p == current)
26106 + goto out;
26107 + if (p)
26108 + radix_tree_delete(&pids, pid);
26109 +
26110 +- f2fs_radix_tree_insert(&pids, pid, current);
26111 ++ if (radix_tree_insert(&pids, pid, current)) {
26112 ++ spin_unlock(&pids_lock);
26113 ++ radix_tree_preload_end();
26114 ++ cond_resched();
26115 ++ goto retry;
26116 ++ }
26117 +
26118 + trace_printk("%3x:%3x %4x %-16s\n",
26119 + MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
26120 + pid, current->comm);
26121 + out:
26122 +- mutex_unlock(&pids_lock);
26123 ++ spin_unlock(&pids_lock);
26124 + radix_tree_preload_end();
26125 + }
26126 +
26127 +@@ -119,7 +125,7 @@ void f2fs_trace_ios(struct f2fs_io_info *fio, int flush)
26128 +
26129 + void f2fs_build_trace_ios(void)
26130 + {
26131 +- mutex_init(&pids_lock);
26132 ++ spin_lock_init(&pids_lock);
26133 + }
26134 +
26135 + #define PIDVEC_SIZE 128
26136 +@@ -147,7 +153,7 @@ void f2fs_destroy_trace_ios(void)
26137 + pid_t next_pid = 0;
26138 + unsigned int found;
26139 +
26140 +- mutex_lock(&pids_lock);
26141 ++ spin_lock(&pids_lock);
26142 + while ((found = gang_lookup_pids(pid, next_pid, PIDVEC_SIZE))) {
26143 + unsigned idx;
26144 +
26145 +@@ -155,5 +161,5 @@ void f2fs_destroy_trace_ios(void)
26146 + for (idx = 0; idx < found; idx++)
26147 + radix_tree_delete(&pids, pid[idx]);
26148 + }
26149 +- mutex_unlock(&pids_lock);
26150 ++ spin_unlock(&pids_lock);
26151 + }
26152 +diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
26153 +index 18d5ffbc5e8c..73b92985198b 100644
26154 +--- a/fs/f2fs/xattr.c
26155 ++++ b/fs/f2fs/xattr.c
26156 +@@ -224,11 +224,11 @@ static struct f2fs_xattr_entry *__find_inline_xattr(struct inode *inode,
26157 + {
26158 + struct f2fs_xattr_entry *entry;
26159 + unsigned int inline_size = inline_xattr_size(inode);
26160 ++ void *max_addr = base_addr + inline_size;
26161 +
26162 + list_for_each_xattr(entry, base_addr) {
26163 +- if ((void *)entry + sizeof(__u32) > base_addr + inline_size ||
26164 +- (void *)XATTR_NEXT_ENTRY(entry) + sizeof(__u32) >
26165 +- base_addr + inline_size) {
26166 ++ if ((void *)entry + sizeof(__u32) > max_addr ||
26167 ++ (void *)XATTR_NEXT_ENTRY(entry) > max_addr) {
26168 + *last_addr = entry;
26169 + return NULL;
26170 + }
26171 +@@ -239,6 +239,13 @@ static struct f2fs_xattr_entry *__find_inline_xattr(struct inode *inode,
26172 + if (!memcmp(entry->e_name, name, len))
26173 + break;
26174 + }
26175 ++
26176 ++ /* inline xattr header or entry across max inline xattr size */
26177 ++ if (IS_XATTR_LAST_ENTRY(entry) &&
26178 ++ (void *)entry + sizeof(__u32) > max_addr) {
26179 ++ *last_addr = entry;
26180 ++ return NULL;
26181 ++ }
26182 + return entry;
26183 + }
26184 +
26185 +diff --git a/fs/file.c b/fs/file.c
26186 +index 3209ee271c41..a10487aa0a84 100644
26187 +--- a/fs/file.c
26188 ++++ b/fs/file.c
26189 +@@ -457,6 +457,7 @@ struct files_struct init_files = {
26190 + .full_fds_bits = init_files.full_fds_bits_init,
26191 + },
26192 + .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
26193 ++ .resize_wait = __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait),
26194 + };
26195 +
26196 + static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
26197 +diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
26198 +index b92740edc416..4b038f25f256 100644
26199 +--- a/fs/gfs2/glock.c
26200 ++++ b/fs/gfs2/glock.c
26201 +@@ -107,7 +107,7 @@ static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode,
26202 +
26203 + static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
26204 + {
26205 +- u32 hash = jhash2((u32 *)name, sizeof(*name) / 4, 0);
26206 ++ u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0);
26207 +
26208 + return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
26209 + }
26210 +diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
26211 +index 2eb55c3361a8..efd0ce9489ae 100644
26212 +--- a/fs/jbd2/commit.c
26213 ++++ b/fs/jbd2/commit.c
26214 +@@ -694,9 +694,11 @@ void jbd2_journal_commit_transaction(journal_t *journal)
26215 + the last tag we set up. */
26216 +
26217 + tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG);
26218 +-
26219 +- jbd2_descriptor_block_csum_set(journal, descriptor);
26220 + start_journal_io:
26221 ++ if (descriptor)
26222 ++ jbd2_descriptor_block_csum_set(journal,
26223 ++ descriptor);
26224 ++
26225 + for (i = 0; i < bufs; i++) {
26226 + struct buffer_head *bh = wbuf[i];
26227 + /*
26228 +diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
26229 +index 8ef6b6daaa7a..88f2a49338a1 100644
26230 +--- a/fs/jbd2/journal.c
26231 ++++ b/fs/jbd2/journal.c
26232 +@@ -1356,6 +1356,10 @@ static int journal_reset(journal_t *journal)
26233 + return jbd2_journal_start_thread(journal);
26234 + }
26235 +
26236 ++/*
26237 ++ * This function expects that the caller will have locked the journal
26238 ++ * buffer head, and will return with it unlocked
26239 ++ */
26240 + static int jbd2_write_superblock(journal_t *journal, int write_flags)
26241 + {
26242 + struct buffer_head *bh = journal->j_sb_buffer;
26243 +@@ -1365,7 +1369,6 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
26244 + trace_jbd2_write_superblock(journal, write_flags);
26245 + if (!(journal->j_flags & JBD2_BARRIER))
26246 + write_flags &= ~(REQ_FUA | REQ_PREFLUSH);
26247 +- lock_buffer(bh);
26248 + if (buffer_write_io_error(bh)) {
26249 + /*
26250 + * Oh, dear. A previous attempt to write the journal
26251 +@@ -1424,6 +1427,7 @@ int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
26252 + jbd_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n",
26253 + tail_block, tail_tid);
26254 +
26255 ++ lock_buffer(journal->j_sb_buffer);
26256 + sb->s_sequence = cpu_to_be32(tail_tid);
26257 + sb->s_start = cpu_to_be32(tail_block);
26258 +
26259 +@@ -1454,18 +1458,17 @@ static void jbd2_mark_journal_empty(journal_t *journal, int write_op)
26260 + journal_superblock_t *sb = journal->j_superblock;
26261 +
26262 + BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
26263 +- read_lock(&journal->j_state_lock);
26264 +- /* Is it already empty? */
26265 +- if (sb->s_start == 0) {
26266 +- read_unlock(&journal->j_state_lock);
26267 ++ lock_buffer(journal->j_sb_buffer);
26268 ++ if (sb->s_start == 0) { /* Is it already empty? */
26269 ++ unlock_buffer(journal->j_sb_buffer);
26270 + return;
26271 + }
26272 ++
26273 + jbd_debug(1, "JBD2: Marking journal as empty (seq %d)\n",
26274 + journal->j_tail_sequence);
26275 +
26276 + sb->s_sequence = cpu_to_be32(journal->j_tail_sequence);
26277 + sb->s_start = cpu_to_be32(0);
26278 +- read_unlock(&journal->j_state_lock);
26279 +
26280 + jbd2_write_superblock(journal, write_op);
26281 +
26282 +@@ -1488,9 +1491,8 @@ void jbd2_journal_update_sb_errno(journal_t *journal)
26283 + journal_superblock_t *sb = journal->j_superblock;
26284 + int errcode;
26285 +
26286 +- read_lock(&journal->j_state_lock);
26287 ++ lock_buffer(journal->j_sb_buffer);
26288 + errcode = journal->j_errno;
26289 +- read_unlock(&journal->j_state_lock);
26290 + if (errcode == -ESHUTDOWN)
26291 + errcode = 0;
26292 + jbd_debug(1, "JBD2: updating superblock error (errno %d)\n", errcode);
26293 +@@ -1894,28 +1896,27 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
26294 +
26295 + sb = journal->j_superblock;
26296 +
26297 ++ /* Load the checksum driver if necessary */
26298 ++ if ((journal->j_chksum_driver == NULL) &&
26299 ++ INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
26300 ++ journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
26301 ++ if (IS_ERR(journal->j_chksum_driver)) {
26302 ++ printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n");
26303 ++ journal->j_chksum_driver = NULL;
26304 ++ return 0;
26305 ++ }
26306 ++ /* Precompute checksum seed for all metadata */
26307 ++ journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid,
26308 ++ sizeof(sb->s_uuid));
26309 ++ }
26310 ++
26311 ++ lock_buffer(journal->j_sb_buffer);
26312 ++
26313 + /* If enabling v3 checksums, update superblock */
26314 + if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
26315 + sb->s_checksum_type = JBD2_CRC32C_CHKSUM;
26316 + sb->s_feature_compat &=
26317 + ~cpu_to_be32(JBD2_FEATURE_COMPAT_CHECKSUM);
26318 +-
26319 +- /* Load the checksum driver */
26320 +- if (journal->j_chksum_driver == NULL) {
26321 +- journal->j_chksum_driver = crypto_alloc_shash("crc32c",
26322 +- 0, 0);
26323 +- if (IS_ERR(journal->j_chksum_driver)) {
26324 +- printk(KERN_ERR "JBD2: Cannot load crc32c "
26325 +- "driver.\n");
26326 +- journal->j_chksum_driver = NULL;
26327 +- return 0;
26328 +- }
26329 +-
26330 +- /* Precompute checksum seed for all metadata */
26331 +- journal->j_csum_seed = jbd2_chksum(journal, ~0,
26332 +- sb->s_uuid,
26333 +- sizeof(sb->s_uuid));
26334 +- }
26335 + }
26336 +
26337 + /* If enabling v1 checksums, downgrade superblock */
26338 +@@ -1927,6 +1928,7 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
26339 + sb->s_feature_compat |= cpu_to_be32(compat);
26340 + sb->s_feature_ro_compat |= cpu_to_be32(ro);
26341 + sb->s_feature_incompat |= cpu_to_be32(incompat);
26342 ++ unlock_buffer(journal->j_sb_buffer);
26343 +
26344 + return 1;
26345 + #undef COMPAT_FEATURE_ON
26346 +diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
26347 +index cc35537232f2..f0d8dabe1ff5 100644
26348 +--- a/fs/jbd2/transaction.c
26349 ++++ b/fs/jbd2/transaction.c
26350 +@@ -1252,11 +1252,12 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
26351 + struct journal_head *jh;
26352 + char *committed_data = NULL;
26353 +
26354 +- JBUFFER_TRACE(jh, "entry");
26355 + if (jbd2_write_access_granted(handle, bh, true))
26356 + return 0;
26357 +
26358 + jh = jbd2_journal_add_journal_head(bh);
26359 ++ JBUFFER_TRACE(jh, "entry");
26360 ++
26361 + /*
26362 + * Do this first --- it can drop the journal lock, so we want to
26363 + * make sure that obtaining the committed_data is done
26364 +@@ -1367,15 +1368,17 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
26365 +
26366 + if (is_handle_aborted(handle))
26367 + return -EROFS;
26368 +- if (!buffer_jbd(bh)) {
26369 +- ret = -EUCLEAN;
26370 +- goto out;
26371 +- }
26372 ++ if (!buffer_jbd(bh))
26373 ++ return -EUCLEAN;
26374 ++
26375 + /*
26376 + * We don't grab jh reference here since the buffer must be part
26377 + * of the running transaction.
26378 + */
26379 + jh = bh2jh(bh);
26380 ++ jbd_debug(5, "journal_head %p\n", jh);
26381 ++ JBUFFER_TRACE(jh, "entry");
26382 ++
26383 + /*
26384 + * This and the following assertions are unreliable since we may see jh
26385 + * in inconsistent state unless we grab bh_state lock. But this is
26386 +@@ -1409,9 +1412,6 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
26387 + }
26388 +
26389 + journal = transaction->t_journal;
26390 +- jbd_debug(5, "journal_head %p\n", jh);
26391 +- JBUFFER_TRACE(jh, "entry");
26392 +-
26393 + jbd_lock_bh_state(bh);
26394 +
26395 + if (jh->b_modified == 0) {
26396 +@@ -1609,14 +1609,21 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
26397 + /* However, if the buffer is still owned by a prior
26398 + * (committing) transaction, we can't drop it yet... */
26399 + JBUFFER_TRACE(jh, "belongs to older transaction");
26400 +- /* ... but we CAN drop it from the new transaction if we
26401 +- * have also modified it since the original commit. */
26402 ++ /* ... but we CAN drop it from the new transaction through
26403 ++ * marking the buffer as freed and set j_next_transaction to
26404 ++ * the new transaction, so that not only the commit code
26405 ++ * knows it should clear dirty bits when it is done with the
26406 ++ * buffer, but also the buffer can be checkpointed only
26407 ++ * after the new transaction commits. */
26408 +
26409 +- if (jh->b_next_transaction) {
26410 +- J_ASSERT(jh->b_next_transaction == transaction);
26411 ++ set_buffer_freed(bh);
26412 ++
26413 ++ if (!jh->b_next_transaction) {
26414 + spin_lock(&journal->j_list_lock);
26415 +- jh->b_next_transaction = NULL;
26416 ++ jh->b_next_transaction = transaction;
26417 + spin_unlock(&journal->j_list_lock);
26418 ++ } else {
26419 ++ J_ASSERT(jh->b_next_transaction == transaction);
26420 +
26421 + /*
26422 + * only drop a reference if this transaction modified
26423 +diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
26424 +index fdf527b6d79c..d71c9405874a 100644
26425 +--- a/fs/kernfs/mount.c
26426 ++++ b/fs/kernfs/mount.c
26427 +@@ -196,8 +196,10 @@ struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
26428 + return dentry;
26429 +
26430 + knparent = find_next_ancestor(kn, NULL);
26431 +- if (WARN_ON(!knparent))
26432 ++ if (WARN_ON(!knparent)) {
26433 ++ dput(dentry);
26434 + return ERR_PTR(-EINVAL);
26435 ++ }
26436 +
26437 + do {
26438 + struct dentry *dtmp;
26439 +@@ -206,8 +208,10 @@ struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
26440 + if (kn == knparent)
26441 + return dentry;
26442 + kntmp = find_next_ancestor(kn, knparent);
26443 +- if (WARN_ON(!kntmp))
26444 ++ if (WARN_ON(!kntmp)) {
26445 ++ dput(dentry);
26446 + return ERR_PTR(-EINVAL);
26447 ++ }
26448 + dtmp = lookup_one_len_unlocked(kntmp->name, dentry,
26449 + strlen(kntmp->name));
26450 + dput(dentry);
26451 +diff --git a/fs/lockd/host.c b/fs/lockd/host.c
26452 +index 93fb7cf0b92b..f0b5c987d6ae 100644
26453 +--- a/fs/lockd/host.c
26454 ++++ b/fs/lockd/host.c
26455 +@@ -290,12 +290,11 @@ void nlmclnt_release_host(struct nlm_host *host)
26456 +
26457 + WARN_ON_ONCE(host->h_server);
26458 +
26459 +- if (refcount_dec_and_test(&host->h_count)) {
26460 ++ if (refcount_dec_and_mutex_lock(&host->h_count, &nlm_host_mutex)) {
26461 + WARN_ON_ONCE(!list_empty(&host->h_lockowners));
26462 + WARN_ON_ONCE(!list_empty(&host->h_granted));
26463 + WARN_ON_ONCE(!list_empty(&host->h_reclaim));
26464 +
26465 +- mutex_lock(&nlm_host_mutex);
26466 + nlm_destroy_host_locked(host);
26467 + mutex_unlock(&nlm_host_mutex);
26468 + }
26469 +diff --git a/fs/locks.c b/fs/locks.c
26470 +index ff6af2c32601..5f468cd95f68 100644
26471 +--- a/fs/locks.c
26472 ++++ b/fs/locks.c
26473 +@@ -1160,6 +1160,11 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
26474 + */
26475 + error = -EDEADLK;
26476 + spin_lock(&blocked_lock_lock);
26477 ++ /*
26478 ++ * Ensure that we don't find any locks blocked on this
26479 ++ * request during deadlock detection.
26480 ++ */
26481 ++ __locks_wake_up_blocks(request);
26482 + if (likely(!posix_locks_deadlock(request, fl))) {
26483 + error = FILE_LOCK_DEFERRED;
26484 + __locks_insert_block(fl, request,
26485 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
26486 +index 557a5d636183..44258c516305 100644
26487 +--- a/fs/nfs/nfs4proc.c
26488 ++++ b/fs/nfs/nfs4proc.c
26489 +@@ -947,6 +947,13 @@ nfs4_sequence_process_interrupted(struct nfs_client *client,
26490 +
26491 + #endif /* !CONFIG_NFS_V4_1 */
26492 +
26493 ++static void nfs41_sequence_res_init(struct nfs4_sequence_res *res)
26494 ++{
26495 ++ res->sr_timestamp = jiffies;
26496 ++ res->sr_status_flags = 0;
26497 ++ res->sr_status = 1;
26498 ++}
26499 ++
26500 + static
26501 + void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args,
26502 + struct nfs4_sequence_res *res,
26503 +@@ -958,10 +965,6 @@ void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args,
26504 + args->sa_slot = slot;
26505 +
26506 + res->sr_slot = slot;
26507 +- res->sr_timestamp = jiffies;
26508 +- res->sr_status_flags = 0;
26509 +- res->sr_status = 1;
26510 +-
26511 + }
26512 +
26513 + int nfs4_setup_sequence(struct nfs_client *client,
26514 +@@ -1007,6 +1010,7 @@ int nfs4_setup_sequence(struct nfs_client *client,
26515 +
26516 + trace_nfs4_setup_sequence(session, args);
26517 + out_start:
26518 ++ nfs41_sequence_res_init(res);
26519 + rpc_call_start(task);
26520 + return 0;
26521 +
26522 +@@ -2934,7 +2938,8 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
26523 + }
26524 +
26525 + out:
26526 +- nfs4_sequence_free_slot(&opendata->o_res.seq_res);
26527 ++ if (!opendata->cancelled)
26528 ++ nfs4_sequence_free_slot(&opendata->o_res.seq_res);
26529 + return ret;
26530 + }
26531 +
26532 +@@ -6302,7 +6307,6 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
26533 + p->arg.seqid = seqid;
26534 + p->res.seqid = seqid;
26535 + p->lsp = lsp;
26536 +- refcount_inc(&lsp->ls_count);
26537 + /* Ensure we don't close file until we're done freeing locks! */
26538 + p->ctx = get_nfs_open_context(ctx);
26539 + p->l_ctx = nfs_get_lock_context(ctx);
26540 +@@ -6527,7 +6531,6 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
26541 + p->res.lock_seqid = p->arg.lock_seqid;
26542 + p->lsp = lsp;
26543 + p->server = server;
26544 +- refcount_inc(&lsp->ls_count);
26545 + p->ctx = get_nfs_open_context(ctx);
26546 + locks_init_lock(&p->fl);
26547 + locks_copy_lock(&p->fl, fl);
26548 +diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
26549 +index e54d899c1848..a8951f1f7b4e 100644
26550 +--- a/fs/nfs/pagelist.c
26551 ++++ b/fs/nfs/pagelist.c
26552 +@@ -988,6 +988,17 @@ static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
26553 + }
26554 + }
26555 +
26556 ++static void
26557 ++nfs_pageio_cleanup_request(struct nfs_pageio_descriptor *desc,
26558 ++ struct nfs_page *req)
26559 ++{
26560 ++ LIST_HEAD(head);
26561 ++
26562 ++ nfs_list_remove_request(req);
26563 ++ nfs_list_add_request(req, &head);
26564 ++ desc->pg_completion_ops->error_cleanup(&head);
26565 ++}
26566 ++
26567 + /**
26568 + * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
26569 + * @desc: destination io descriptor
26570 +@@ -1025,10 +1036,8 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
26571 + nfs_page_group_unlock(req);
26572 + desc->pg_moreio = 1;
26573 + nfs_pageio_doio(desc);
26574 +- if (desc->pg_error < 0)
26575 +- return 0;
26576 +- if (mirror->pg_recoalesce)
26577 +- return 0;
26578 ++ if (desc->pg_error < 0 || mirror->pg_recoalesce)
26579 ++ goto out_cleanup_subreq;
26580 + /* retry add_request for this subreq */
26581 + nfs_page_group_lock(req);
26582 + continue;
26583 +@@ -1061,6 +1070,10 @@ err_ptr:
26584 + desc->pg_error = PTR_ERR(subreq);
26585 + nfs_page_group_unlock(req);
26586 + return 0;
26587 ++out_cleanup_subreq:
26588 ++ if (req != subreq)
26589 ++ nfs_pageio_cleanup_request(desc, subreq);
26590 ++ return 0;
26591 + }
26592 +
26593 + static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
26594 +@@ -1079,7 +1092,6 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
26595 + struct nfs_page *req;
26596 +
26597 + req = list_first_entry(&head, struct nfs_page, wb_list);
26598 +- nfs_list_remove_request(req);
26599 + if (__nfs_pageio_add_request(desc, req))
26600 + continue;
26601 + if (desc->pg_error < 0) {
26602 +@@ -1168,11 +1180,14 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
26603 + if (nfs_pgio_has_mirroring(desc))
26604 + desc->pg_mirror_idx = midx;
26605 + if (!nfs_pageio_add_request_mirror(desc, dupreq))
26606 +- goto out_failed;
26607 ++ goto out_cleanup_subreq;
26608 + }
26609 +
26610 + return 1;
26611 +
26612 ++out_cleanup_subreq:
26613 ++ if (req != dupreq)
26614 ++ nfs_pageio_cleanup_request(desc, dupreq);
26615 + out_failed:
26616 + nfs_pageio_error_cleanup(desc);
26617 + return 0;
26618 +@@ -1194,7 +1209,7 @@ static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
26619 + desc->pg_mirror_idx = mirror_idx;
26620 + for (;;) {
26621 + nfs_pageio_doio(desc);
26622 +- if (!mirror->pg_recoalesce)
26623 ++ if (desc->pg_error < 0 || !mirror->pg_recoalesce)
26624 + break;
26625 + if (!nfs_do_recoalesce(desc))
26626 + break;
26627 +diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
26628 +index 9eb8086ea841..c9cf46e0c040 100644
26629 +--- a/fs/nfsd/nfs3proc.c
26630 ++++ b/fs/nfsd/nfs3proc.c
26631 +@@ -463,8 +463,19 @@ nfsd3_proc_readdir(struct svc_rqst *rqstp)
26632 + &resp->common, nfs3svc_encode_entry);
26633 + memcpy(resp->verf, argp->verf, 8);
26634 + resp->count = resp->buffer - argp->buffer;
26635 +- if (resp->offset)
26636 +- xdr_encode_hyper(resp->offset, argp->cookie);
26637 ++ if (resp->offset) {
26638 ++ loff_t offset = argp->cookie;
26639 ++
26640 ++ if (unlikely(resp->offset1)) {
26641 ++ /* we ended up with offset on a page boundary */
26642 ++ *resp->offset = htonl(offset >> 32);
26643 ++ *resp->offset1 = htonl(offset & 0xffffffff);
26644 ++ resp->offset1 = NULL;
26645 ++ } else {
26646 ++ xdr_encode_hyper(resp->offset, offset);
26647 ++ }
26648 ++ resp->offset = NULL;
26649 ++ }
26650 +
26651 + RETURN_STATUS(nfserr);
26652 + }
26653 +@@ -533,6 +544,7 @@ nfsd3_proc_readdirplus(struct svc_rqst *rqstp)
26654 + } else {
26655 + xdr_encode_hyper(resp->offset, offset);
26656 + }
26657 ++ resp->offset = NULL;
26658 + }
26659 +
26660 + RETURN_STATUS(nfserr);
26661 +diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
26662 +index 9b973f4f7d01..83919116d5cb 100644
26663 +--- a/fs/nfsd/nfs3xdr.c
26664 ++++ b/fs/nfsd/nfs3xdr.c
26665 +@@ -921,6 +921,7 @@ encode_entry(struct readdir_cd *ccd, const char *name, int namlen,
26666 + } else {
26667 + xdr_encode_hyper(cd->offset, offset64);
26668 + }
26669 ++ cd->offset = NULL;
26670 + }
26671 +
26672 + /*
26673 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
26674 +index fb3c9844c82a..6a45fb00c5fc 100644
26675 +--- a/fs/nfsd/nfs4state.c
26676 ++++ b/fs/nfsd/nfs4state.c
26677 +@@ -1544,16 +1544,16 @@ static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
26678 + {
26679 + u32 slotsize = slot_bytes(ca);
26680 + u32 num = ca->maxreqs;
26681 +- int avail;
26682 ++ unsigned long avail, total_avail;
26683 +
26684 + spin_lock(&nfsd_drc_lock);
26685 +- avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION,
26686 +- nfsd_drc_max_mem - nfsd_drc_mem_used);
26687 ++ total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
26688 ++ avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
26689 + /*
26690 + * Never use more than a third of the remaining memory,
26691 + * unless it's the only way to give this client a slot:
26692 + */
26693 +- avail = clamp_t(int, avail, slotsize, avail/3);
26694 ++ avail = clamp_t(int, avail, slotsize, total_avail/3);
26695 + num = min_t(int, num, avail / slotsize);
26696 + nfsd_drc_mem_used += num * slotsize;
26697 + spin_unlock(&nfsd_drc_lock);
26698 +diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
26699 +index 72a7681f4046..f2feb2d11bae 100644
26700 +--- a/fs/nfsd/nfsctl.c
26701 ++++ b/fs/nfsd/nfsctl.c
26702 +@@ -1126,7 +1126,7 @@ static ssize_t write_v4_end_grace(struct file *file, char *buf, size_t size)
26703 + case 'Y':
26704 + case 'y':
26705 + case '1':
26706 +- if (nn->nfsd_serv)
26707 ++ if (!nn->nfsd_serv)
26708 + return -EBUSY;
26709 + nfsd4_end_grace(nn);
26710 + break;
26711 +diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c
26712 +index 0e4166cc23a0..4ac775e32240 100644
26713 +--- a/fs/ocfs2/cluster/nodemanager.c
26714 ++++ b/fs/ocfs2/cluster/nodemanager.c
26715 +@@ -621,13 +621,15 @@ static void o2nm_node_group_drop_item(struct config_group *group,
26716 + struct o2nm_node *node = to_o2nm_node(item);
26717 + struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent);
26718 +
26719 +- o2net_disconnect_node(node);
26720 ++ if (cluster->cl_nodes[node->nd_num] == node) {
26721 ++ o2net_disconnect_node(node);
26722 +
26723 +- if (cluster->cl_has_local &&
26724 +- (cluster->cl_local_node == node->nd_num)) {
26725 +- cluster->cl_has_local = 0;
26726 +- cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
26727 +- o2net_stop_listening(node);
26728 ++ if (cluster->cl_has_local &&
26729 ++ (cluster->cl_local_node == node->nd_num)) {
26730 ++ cluster->cl_has_local = 0;
26731 ++ cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
26732 ++ o2net_stop_listening(node);
26733 ++ }
26734 + }
26735 +
26736 + /* XXX call into net to stop this node from trading messages */
26737 +diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
26738 +index a35259eebc56..1dc9a08e8bdc 100644
26739 +--- a/fs/ocfs2/refcounttree.c
26740 ++++ b/fs/ocfs2/refcounttree.c
26741 +@@ -4719,22 +4719,23 @@ out:
26742 +
26743 + /* Lock an inode and grab a bh pointing to the inode. */
26744 + int ocfs2_reflink_inodes_lock(struct inode *s_inode,
26745 +- struct buffer_head **bh1,
26746 ++ struct buffer_head **bh_s,
26747 + struct inode *t_inode,
26748 +- struct buffer_head **bh2)
26749 ++ struct buffer_head **bh_t)
26750 + {
26751 +- struct inode *inode1;
26752 +- struct inode *inode2;
26753 ++ struct inode *inode1 = s_inode;
26754 ++ struct inode *inode2 = t_inode;
26755 + struct ocfs2_inode_info *oi1;
26756 + struct ocfs2_inode_info *oi2;
26757 ++ struct buffer_head *bh1 = NULL;
26758 ++ struct buffer_head *bh2 = NULL;
26759 + bool same_inode = (s_inode == t_inode);
26760 ++ bool need_swap = (inode1->i_ino > inode2->i_ino);
26761 + int status;
26762 +
26763 + /* First grab the VFS and rw locks. */
26764 + lock_two_nondirectories(s_inode, t_inode);
26765 +- inode1 = s_inode;
26766 +- inode2 = t_inode;
26767 +- if (inode1->i_ino > inode2->i_ino)
26768 ++ if (need_swap)
26769 + swap(inode1, inode2);
26770 +
26771 + status = ocfs2_rw_lock(inode1, 1);
26772 +@@ -4757,17 +4758,13 @@ int ocfs2_reflink_inodes_lock(struct inode *s_inode,
26773 + trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno,
26774 + (unsigned long long)oi2->ip_blkno);
26775 +
26776 +- if (*bh1)
26777 +- *bh1 = NULL;
26778 +- if (*bh2)
26779 +- *bh2 = NULL;
26780 +-
26781 + /* We always want to lock the one with the lower lockid first. */
26782 + if (oi1->ip_blkno > oi2->ip_blkno)
26783 + mlog_errno(-ENOLCK);
26784 +
26785 + /* lock id1 */
26786 +- status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_REFLINK_TARGET);
26787 ++ status = ocfs2_inode_lock_nested(inode1, &bh1, 1,
26788 ++ OI_LS_REFLINK_TARGET);
26789 + if (status < 0) {
26790 + if (status != -ENOENT)
26791 + mlog_errno(status);
26792 +@@ -4776,15 +4773,25 @@ int ocfs2_reflink_inodes_lock(struct inode *s_inode,
26793 +
26794 + /* lock id2 */
26795 + if (!same_inode) {
26796 +- status = ocfs2_inode_lock_nested(inode2, bh2, 1,
26797 ++ status = ocfs2_inode_lock_nested(inode2, &bh2, 1,
26798 + OI_LS_REFLINK_TARGET);
26799 + if (status < 0) {
26800 + if (status != -ENOENT)
26801 + mlog_errno(status);
26802 + goto out_cl1;
26803 + }
26804 +- } else
26805 +- *bh2 = *bh1;
26806 ++ } else {
26807 ++ bh2 = bh1;
26808 ++ }
26809 ++
26810 ++ /*
26811 ++ * If we swapped inode order above, we have to swap the buffer heads
26812 ++ * before passing them back to the caller.
26813 ++ */
26814 ++ if (need_swap)
26815 ++ swap(bh1, bh2);
26816 ++ *bh_s = bh1;
26817 ++ *bh_t = bh2;
26818 +
26819 + trace_ocfs2_double_lock_end(
26820 + (unsigned long long)oi1->ip_blkno,
26821 +@@ -4794,8 +4801,7 @@ int ocfs2_reflink_inodes_lock(struct inode *s_inode,
26822 +
26823 + out_cl1:
26824 + ocfs2_inode_unlock(inode1, 1);
26825 +- brelse(*bh1);
26826 +- *bh1 = NULL;
26827 ++ brelse(bh1);
26828 + out_rw2:
26829 + ocfs2_rw_unlock(inode2, 1);
26830 + out_i2:
26831 +diff --git a/fs/open.c b/fs/open.c
26832 +index 0285ce7dbd51..f1c2f855fd43 100644
26833 +--- a/fs/open.c
26834 ++++ b/fs/open.c
26835 +@@ -733,6 +733,12 @@ static int do_dentry_open(struct file *f,
26836 + return 0;
26837 + }
26838 +
26839 ++ /* Any file opened for execve()/uselib() has to be a regular file. */
26840 ++ if (unlikely(f->f_flags & FMODE_EXEC && !S_ISREG(inode->i_mode))) {
26841 ++ error = -EACCES;
26842 ++ goto cleanup_file;
26843 ++ }
26844 ++
26845 + if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) {
26846 + error = get_write_access(inode);
26847 + if (unlikely(error))
26848 +diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
26849 +index 9e62dcf06fc4..68b3303e4b46 100644
26850 +--- a/fs/overlayfs/copy_up.c
26851 ++++ b/fs/overlayfs/copy_up.c
26852 +@@ -443,6 +443,24 @@ static int ovl_copy_up_inode(struct ovl_copy_up_ctx *c, struct dentry *temp)
26853 + {
26854 + int err;
26855 +
26856 ++ /*
26857 ++ * Copy up data first and then xattrs. Writing data after
26858 ++ * xattrs will remove security.capability xattr automatically.
26859 ++ */
26860 ++ if (S_ISREG(c->stat.mode) && !c->metacopy) {
26861 ++ struct path upperpath, datapath;
26862 ++
26863 ++ ovl_path_upper(c->dentry, &upperpath);
26864 ++ if (WARN_ON(upperpath.dentry != NULL))
26865 ++ return -EIO;
26866 ++ upperpath.dentry = temp;
26867 ++
26868 ++ ovl_path_lowerdata(c->dentry, &datapath);
26869 ++ err = ovl_copy_up_data(&datapath, &upperpath, c->stat.size);
26870 ++ if (err)
26871 ++ return err;
26872 ++ }
26873 ++
26874 + err = ovl_copy_xattr(c->lowerpath.dentry, temp);
26875 + if (err)
26876 + return err;
26877 +@@ -460,19 +478,6 @@ static int ovl_copy_up_inode(struct ovl_copy_up_ctx *c, struct dentry *temp)
26878 + return err;
26879 + }
26880 +
26881 +- if (S_ISREG(c->stat.mode) && !c->metacopy) {
26882 +- struct path upperpath, datapath;
26883 +-
26884 +- ovl_path_upper(c->dentry, &upperpath);
26885 +- BUG_ON(upperpath.dentry != NULL);
26886 +- upperpath.dentry = temp;
26887 +-
26888 +- ovl_path_lowerdata(c->dentry, &datapath);
26889 +- err = ovl_copy_up_data(&datapath, &upperpath, c->stat.size);
26890 +- if (err)
26891 +- return err;
26892 +- }
26893 +-
26894 + if (c->metacopy) {
26895 + err = ovl_check_setxattr(c->dentry, temp, OVL_XATTR_METACOPY,
26896 + NULL, 0, -EOPNOTSUPP);
26897 +@@ -737,6 +742,8 @@ static int ovl_copy_up_meta_inode_data(struct ovl_copy_up_ctx *c)
26898 + {
26899 + struct path upperpath, datapath;
26900 + int err;
26901 ++ char *capability = NULL;
26902 ++ ssize_t uninitialized_var(cap_size);
26903 +
26904 + ovl_path_upper(c->dentry, &upperpath);
26905 + if (WARN_ON(upperpath.dentry == NULL))
26906 +@@ -746,15 +753,37 @@ static int ovl_copy_up_meta_inode_data(struct ovl_copy_up_ctx *c)
26907 + if (WARN_ON(datapath.dentry == NULL))
26908 + return -EIO;
26909 +
26910 ++ if (c->stat.size) {
26911 ++ err = cap_size = ovl_getxattr(upperpath.dentry, XATTR_NAME_CAPS,
26912 ++ &capability, 0);
26913 ++ if (err < 0 && err != -ENODATA)
26914 ++ goto out;
26915 ++ }
26916 ++
26917 + err = ovl_copy_up_data(&datapath, &upperpath, c->stat.size);
26918 + if (err)
26919 +- return err;
26920 ++ goto out_free;
26921 ++
26922 ++ /*
26923 ++ * Writing to upper file will clear security.capability xattr. We
26924 ++ * don't want that to happen for normal copy-up operation.
26925 ++ */
26926 ++ if (capability) {
26927 ++ err = ovl_do_setxattr(upperpath.dentry, XATTR_NAME_CAPS,
26928 ++ capability, cap_size, 0);
26929 ++ if (err)
26930 ++ goto out_free;
26931 ++ }
26932 ++
26933 +
26934 + err = vfs_removexattr(upperpath.dentry, OVL_XATTR_METACOPY);
26935 + if (err)
26936 +- return err;
26937 ++ goto out_free;
26938 +
26939 + ovl_set_upperdata(d_inode(c->dentry));
26940 ++out_free:
26941 ++ kfree(capability);
26942 ++out:
26943 + return err;
26944 + }
26945 +
26946 +diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
26947 +index 5e45cb3630a0..9c6018287d57 100644
26948 +--- a/fs/overlayfs/overlayfs.h
26949 ++++ b/fs/overlayfs/overlayfs.h
26950 +@@ -277,6 +277,8 @@ int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir);
26951 + int ovl_check_metacopy_xattr(struct dentry *dentry);
26952 + bool ovl_is_metacopy_dentry(struct dentry *dentry);
26953 + char *ovl_get_redirect_xattr(struct dentry *dentry, int padding);
26954 ++ssize_t ovl_getxattr(struct dentry *dentry, char *name, char **value,
26955 ++ size_t padding);
26956 +
26957 + static inline bool ovl_is_impuredir(struct dentry *dentry)
26958 + {
26959 +diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
26960 +index 7c01327b1852..4035e640f402 100644
26961 +--- a/fs/overlayfs/util.c
26962 ++++ b/fs/overlayfs/util.c
26963 +@@ -863,28 +863,49 @@ bool ovl_is_metacopy_dentry(struct dentry *dentry)
26964 + return (oe->numlower > 1);
26965 + }
26966 +
26967 +-char *ovl_get_redirect_xattr(struct dentry *dentry, int padding)
26968 ++ssize_t ovl_getxattr(struct dentry *dentry, char *name, char **value,
26969 ++ size_t padding)
26970 + {
26971 +- int res;
26972 +- char *s, *next, *buf = NULL;
26973 ++ ssize_t res;
26974 ++ char *buf = NULL;
26975 +
26976 +- res = vfs_getxattr(dentry, OVL_XATTR_REDIRECT, NULL, 0);
26977 ++ res = vfs_getxattr(dentry, name, NULL, 0);
26978 + if (res < 0) {
26979 + if (res == -ENODATA || res == -EOPNOTSUPP)
26980 +- return NULL;
26981 ++ return -ENODATA;
26982 + goto fail;
26983 + }
26984 +
26985 +- buf = kzalloc(res + padding + 1, GFP_KERNEL);
26986 +- if (!buf)
26987 +- return ERR_PTR(-ENOMEM);
26988 ++ if (res != 0) {
26989 ++ buf = kzalloc(res + padding, GFP_KERNEL);
26990 ++ if (!buf)
26991 ++ return -ENOMEM;
26992 +
26993 +- if (res == 0)
26994 +- goto invalid;
26995 ++ res = vfs_getxattr(dentry, name, buf, res);
26996 ++ if (res < 0)
26997 ++ goto fail;
26998 ++ }
26999 ++ *value = buf;
27000 ++
27001 ++ return res;
27002 ++
27003 ++fail:
27004 ++ pr_warn_ratelimited("overlayfs: failed to get xattr %s: err=%zi)\n",
27005 ++ name, res);
27006 ++ kfree(buf);
27007 ++ return res;
27008 ++}
27009 +
27010 +- res = vfs_getxattr(dentry, OVL_XATTR_REDIRECT, buf, res);
27011 ++char *ovl_get_redirect_xattr(struct dentry *dentry, int padding)
27012 ++{
27013 ++ int res;
27014 ++ char *s, *next, *buf = NULL;
27015 ++
27016 ++ res = ovl_getxattr(dentry, OVL_XATTR_REDIRECT, &buf, padding + 1);
27017 ++ if (res == -ENODATA)
27018 ++ return NULL;
27019 + if (res < 0)
27020 +- goto fail;
27021 ++ return ERR_PTR(res);
27022 + if (res == 0)
27023 + goto invalid;
27024 +
27025 +@@ -900,15 +921,9 @@ char *ovl_get_redirect_xattr(struct dentry *dentry, int padding)
27026 + }
27027 +
27028 + return buf;
27029 +-
27030 +-err_free:
27031 +- kfree(buf);
27032 +- return ERR_PTR(res);
27033 +-fail:
27034 +- pr_warn_ratelimited("overlayfs: failed to get redirect (%i)\n", res);
27035 +- goto err_free;
27036 + invalid:
27037 + pr_warn_ratelimited("overlayfs: invalid redirect (%s)\n", buf);
27038 + res = -EINVAL;
27039 +- goto err_free;
27040 ++ kfree(buf);
27041 ++ return ERR_PTR(res);
27042 + }
27043 +diff --git a/fs/pipe.c b/fs/pipe.c
27044 +index bdc5d3c0977d..c51750ed4011 100644
27045 +--- a/fs/pipe.c
27046 ++++ b/fs/pipe.c
27047 +@@ -234,6 +234,14 @@ static const struct pipe_buf_operations anon_pipe_buf_ops = {
27048 + .get = generic_pipe_buf_get,
27049 + };
27050 +
27051 ++static const struct pipe_buf_operations anon_pipe_buf_nomerge_ops = {
27052 ++ .can_merge = 0,
27053 ++ .confirm = generic_pipe_buf_confirm,
27054 ++ .release = anon_pipe_buf_release,
27055 ++ .steal = anon_pipe_buf_steal,
27056 ++ .get = generic_pipe_buf_get,
27057 ++};
27058 ++
27059 + static const struct pipe_buf_operations packet_pipe_buf_ops = {
27060 + .can_merge = 0,
27061 + .confirm = generic_pipe_buf_confirm,
27062 +@@ -242,6 +250,12 @@ static const struct pipe_buf_operations packet_pipe_buf_ops = {
27063 + .get = generic_pipe_buf_get,
27064 + };
27065 +
27066 ++void pipe_buf_mark_unmergeable(struct pipe_buffer *buf)
27067 ++{
27068 ++ if (buf->ops == &anon_pipe_buf_ops)
27069 ++ buf->ops = &anon_pipe_buf_nomerge_ops;
27070 ++}
27071 ++
27072 + static ssize_t
27073 + pipe_read(struct kiocb *iocb, struct iov_iter *to)
27074 + {
27075 +diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
27076 +index 4d598a399bbf..d65390727541 100644
27077 +--- a/fs/proc/proc_sysctl.c
27078 ++++ b/fs/proc/proc_sysctl.c
27079 +@@ -1626,7 +1626,8 @@ static void drop_sysctl_table(struct ctl_table_header *header)
27080 + if (--header->nreg)
27081 + return;
27082 +
27083 +- put_links(header);
27084 ++ if (parent)
27085 ++ put_links(header);
27086 + start_unregistering(header);
27087 + if (!--header->count)
27088 + kfree_rcu(header, rcu);
27089 +diff --git a/fs/read_write.c b/fs/read_write.c
27090 +index ff3c5e6f87cf..27b69b85d49f 100644
27091 +--- a/fs/read_write.c
27092 ++++ b/fs/read_write.c
27093 +@@ -1238,6 +1238,9 @@ COMPAT_SYSCALL_DEFINE5(preadv64v2, unsigned long, fd,
27094 + const struct compat_iovec __user *,vec,
27095 + unsigned long, vlen, loff_t, pos, rwf_t, flags)
27096 + {
27097 ++ if (pos == -1)
27098 ++ return do_compat_readv(fd, vec, vlen, flags);
27099 ++
27100 + return do_compat_preadv64(fd, vec, vlen, pos, flags);
27101 + }
27102 + #endif
27103 +@@ -1344,6 +1347,9 @@ COMPAT_SYSCALL_DEFINE5(pwritev64v2, unsigned long, fd,
27104 + const struct compat_iovec __user *,vec,
27105 + unsigned long, vlen, loff_t, pos, rwf_t, flags)
27106 + {
27107 ++ if (pos == -1)
27108 ++ return do_compat_writev(fd, vec, vlen, flags);
27109 ++
27110 + return do_compat_pwritev64(fd, vec, vlen, pos, flags);
27111 + }
27112 + #endif
27113 +diff --git a/fs/splice.c b/fs/splice.c
27114 +index de2ede048473..90c29675d573 100644
27115 +--- a/fs/splice.c
27116 ++++ b/fs/splice.c
27117 +@@ -1597,6 +1597,8 @@ retry:
27118 + */
27119 + obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
27120 +
27121 ++ pipe_buf_mark_unmergeable(obuf);
27122 ++
27123 + obuf->len = len;
27124 + opipe->nrbufs++;
27125 + ibuf->offset += obuf->len;
27126 +@@ -1671,6 +1673,8 @@ static int link_pipe(struct pipe_inode_info *ipipe,
27127 + */
27128 + obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
27129 +
27130 ++ pipe_buf_mark_unmergeable(obuf);
27131 ++
27132 + if (obuf->len > len)
27133 + obuf->len = len;
27134 +
27135 +diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
27136 +index b647f0bd150c..94220ba85628 100644
27137 +--- a/fs/udf/truncate.c
27138 ++++ b/fs/udf/truncate.c
27139 +@@ -260,6 +260,9 @@ void udf_truncate_extents(struct inode *inode)
27140 + epos.block = eloc;
27141 + epos.bh = udf_tread(sb,
27142 + udf_get_lb_pblock(sb, &eloc, 0));
27143 ++ /* Error reading indirect block? */
27144 ++ if (!epos.bh)
27145 ++ return;
27146 + if (elen)
27147 + indirect_ext_len =
27148 + (elen + sb->s_blocksize - 1) >>
27149 +diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
27150 +index 3d7a6a9c2370..f8f6f04c4453 100644
27151 +--- a/include/asm-generic/vmlinux.lds.h
27152 ++++ b/include/asm-generic/vmlinux.lds.h
27153 +@@ -733,7 +733,7 @@
27154 + KEEP(*(.orc_unwind_ip)) \
27155 + __stop_orc_unwind_ip = .; \
27156 + } \
27157 +- . = ALIGN(6); \
27158 ++ . = ALIGN(2); \
27159 + .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \
27160 + __start_orc_unwind = .; \
27161 + KEEP(*(.orc_unwind)) \
27162 +diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
27163 +index bfe1639df02d..97fc498dc767 100644
27164 +--- a/include/drm/drm_cache.h
27165 ++++ b/include/drm/drm_cache.h
27166 +@@ -47,6 +47,24 @@ static inline bool drm_arch_can_wc_memory(void)
27167 + return false;
27168 + #elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
27169 + return false;
27170 ++#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
27171 ++ /*
27172 ++ * The DRM driver stack is designed to work with cache coherent devices
27173 ++ * only, but permits an optimization to be enabled in some cases, where
27174 ++ * for some buffers, both the CPU and the GPU use uncached mappings,
27175 ++ * removing the need for DMA snooping and allocation in the CPU caches.
27176 ++ *
27177 ++ * The use of uncached GPU mappings relies on the correct implementation
27178 ++ * of the PCIe NoSnoop TLP attribute by the platform, otherwise the GPU
27179 ++ * will use cached mappings nonetheless. On x86 platforms, this does not
27180 ++ * seem to matter, as uncached CPU mappings will snoop the caches in any
27181 ++ * case. However, on ARM and arm64, enabling this optimization on a
27182 ++ * platform where NoSnoop is ignored results in loss of coherency, which
27183 ++ * breaks correct operation of the device. Since we have no way of
27184 ++ * detecting whether NoSnoop works or not, just disable this
27185 ++ * optimization entirely for ARM and arm64.
27186 ++ */
27187 ++ return false;
27188 + #else
27189 + return true;
27190 + #endif
27191 +diff --git a/include/linux/atalk.h b/include/linux/atalk.h
27192 +index 23f805562f4e..840cf92307ba 100644
27193 +--- a/include/linux/atalk.h
27194 ++++ b/include/linux/atalk.h
27195 +@@ -161,16 +161,26 @@ extern int sysctl_aarp_resolve_time;
27196 + extern void atalk_register_sysctl(void);
27197 + extern void atalk_unregister_sysctl(void);
27198 + #else
27199 +-#define atalk_register_sysctl() do { } while(0)
27200 +-#define atalk_unregister_sysctl() do { } while(0)
27201 ++static inline int atalk_register_sysctl(void)
27202 ++{
27203 ++ return 0;
27204 ++}
27205 ++static inline void atalk_unregister_sysctl(void)
27206 ++{
27207 ++}
27208 + #endif
27209 +
27210 + #ifdef CONFIG_PROC_FS
27211 + extern int atalk_proc_init(void);
27212 + extern void atalk_proc_exit(void);
27213 + #else
27214 +-#define atalk_proc_init() ({ 0; })
27215 +-#define atalk_proc_exit() do { } while(0)
27216 ++static inline int atalk_proc_init(void)
27217 ++{
27218 ++ return 0;
27219 ++}
27220 ++static inline void atalk_proc_exit(void)
27221 ++{
27222 ++}
27223 + #endif /* CONFIG_PROC_FS */
27224 +
27225 + #endif /* __LINUX_ATALK_H__ */
27226 +diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h
27227 +index 50fb0dee23e8..d35b8ec1c485 100644
27228 +--- a/include/linux/bitrev.h
27229 ++++ b/include/linux/bitrev.h
27230 +@@ -34,41 +34,41 @@ static inline u32 __bitrev32(u32 x)
27231 +
27232 + #define __constant_bitrev32(x) \
27233 + ({ \
27234 +- u32 __x = x; \
27235 +- __x = (__x >> 16) | (__x << 16); \
27236 +- __x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 8); \
27237 +- __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \
27238 +- __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \
27239 +- __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \
27240 +- __x; \
27241 ++ u32 ___x = x; \
27242 ++ ___x = (___x >> 16) | (___x << 16); \
27243 ++ ___x = ((___x & (u32)0xFF00FF00UL) >> 8) | ((___x & (u32)0x00FF00FFUL) << 8); \
27244 ++ ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \
27245 ++ ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \
27246 ++ ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \
27247 ++ ___x; \
27248 + })
27249 +
27250 + #define __constant_bitrev16(x) \
27251 + ({ \
27252 +- u16 __x = x; \
27253 +- __x = (__x >> 8) | (__x << 8); \
27254 +- __x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4); \
27255 +- __x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2); \
27256 +- __x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1); \
27257 +- __x; \
27258 ++ u16 ___x = x; \
27259 ++ ___x = (___x >> 8) | (___x << 8); \
27260 ++ ___x = ((___x & (u16)0xF0F0U) >> 4) | ((___x & (u16)0x0F0FU) << 4); \
27261 ++ ___x = ((___x & (u16)0xCCCCU) >> 2) | ((___x & (u16)0x3333U) << 2); \
27262 ++ ___x = ((___x & (u16)0xAAAAU) >> 1) | ((___x & (u16)0x5555U) << 1); \
27263 ++ ___x; \
27264 + })
27265 +
27266 + #define __constant_bitrev8x4(x) \
27267 + ({ \
27268 +- u32 __x = x; \
27269 +- __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \
27270 +- __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \
27271 +- __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \
27272 +- __x; \
27273 ++ u32 ___x = x; \
27274 ++ ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \
27275 ++ ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \
27276 ++ ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \
27277 ++ ___x; \
27278 + })
27279 +
27280 + #define __constant_bitrev8(x) \
27281 + ({ \
27282 +- u8 __x = x; \
27283 +- __x = (__x >> 4) | (__x << 4); \
27284 +- __x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2); \
27285 +- __x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1); \
27286 +- __x; \
27287 ++ u8 ___x = x; \
27288 ++ ___x = (___x >> 4) | (___x << 4); \
27289 ++ ___x = ((___x & (u8)0xCCU) >> 2) | ((___x & (u8)0x33U) << 2); \
27290 ++ ___x = ((___x & (u8)0xAAU) >> 1) | ((___x & (u8)0x55U) << 1); \
27291 ++ ___x; \
27292 + })
27293 +
27294 + #define bitrev32(x) \
27295 +diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
27296 +index a420c07904bc..337d5049ff93 100644
27297 +--- a/include/linux/ceph/libceph.h
27298 ++++ b/include/linux/ceph/libceph.h
27299 +@@ -294,6 +294,8 @@ extern void ceph_destroy_client(struct ceph_client *client);
27300 + extern int __ceph_open_session(struct ceph_client *client,
27301 + unsigned long started);
27302 + extern int ceph_open_session(struct ceph_client *client);
27303 ++int ceph_wait_for_latest_osdmap(struct ceph_client *client,
27304 ++ unsigned long timeout);
27305 +
27306 + /* pagevec.c */
27307 + extern void ceph_release_page_vector(struct page **pages, int num_pages);
27308 +diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
27309 +index 8fcbae1b8db0..120d1d40704b 100644
27310 +--- a/include/linux/cgroup-defs.h
27311 ++++ b/include/linux/cgroup-defs.h
27312 +@@ -602,7 +602,7 @@ struct cgroup_subsys {
27313 + void (*cancel_fork)(struct task_struct *task);
27314 + void (*fork)(struct task_struct *task);
27315 + void (*exit)(struct task_struct *task);
27316 +- void (*free)(struct task_struct *task);
27317 ++ void (*release)(struct task_struct *task);
27318 + void (*bind)(struct cgroup_subsys_state *root_css);
27319 +
27320 + bool early_init:1;
27321 +diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
27322 +index 9968332cceed..81f58b4a5418 100644
27323 +--- a/include/linux/cgroup.h
27324 ++++ b/include/linux/cgroup.h
27325 +@@ -121,6 +121,7 @@ extern int cgroup_can_fork(struct task_struct *p);
27326 + extern void cgroup_cancel_fork(struct task_struct *p);
27327 + extern void cgroup_post_fork(struct task_struct *p);
27328 + void cgroup_exit(struct task_struct *p);
27329 ++void cgroup_release(struct task_struct *p);
27330 + void cgroup_free(struct task_struct *p);
27331 +
27332 + int cgroup_init_early(void);
27333 +@@ -697,6 +698,7 @@ static inline int cgroup_can_fork(struct task_struct *p) { return 0; }
27334 + static inline void cgroup_cancel_fork(struct task_struct *p) {}
27335 + static inline void cgroup_post_fork(struct task_struct *p) {}
27336 + static inline void cgroup_exit(struct task_struct *p) {}
27337 ++static inline void cgroup_release(struct task_struct *p) {}
27338 + static inline void cgroup_free(struct task_struct *p) {}
27339 +
27340 + static inline int cgroup_init_early(void) { return 0; }
27341 +diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
27342 +index e443fa9fa859..b7cf80a71293 100644
27343 +--- a/include/linux/clk-provider.h
27344 ++++ b/include/linux/clk-provider.h
27345 +@@ -792,6 +792,9 @@ unsigned int __clk_get_enable_count(struct clk *clk);
27346 + unsigned long clk_hw_get_rate(const struct clk_hw *hw);
27347 + unsigned long __clk_get_flags(struct clk *clk);
27348 + unsigned long clk_hw_get_flags(const struct clk_hw *hw);
27349 ++#define clk_hw_can_set_rate_parent(hw) \
27350 ++ (clk_hw_get_flags((hw)) & CLK_SET_RATE_PARENT)
27351 ++
27352 + bool clk_hw_is_prepared(const struct clk_hw *hw);
27353 + bool clk_hw_rate_is_protected(const struct clk_hw *hw);
27354 + bool clk_hw_is_enabled(const struct clk_hw *hw);
27355 +diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
27356 +index c86d6d8bdfed..0b427d5df0fe 100644
27357 +--- a/include/linux/cpufreq.h
27358 ++++ b/include/linux/cpufreq.h
27359 +@@ -254,20 +254,12 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
27360 + static struct freq_attr _name = \
27361 + __ATTR(_name, 0200, NULL, store_##_name)
27362 +
27363 +-struct global_attr {
27364 +- struct attribute attr;
27365 +- ssize_t (*show)(struct kobject *kobj,
27366 +- struct attribute *attr, char *buf);
27367 +- ssize_t (*store)(struct kobject *a, struct attribute *b,
27368 +- const char *c, size_t count);
27369 +-};
27370 +-
27371 + #define define_one_global_ro(_name) \
27372 +-static struct global_attr _name = \
27373 ++static struct kobj_attribute _name = \
27374 + __ATTR(_name, 0444, show_##_name, NULL)
27375 +
27376 + #define define_one_global_rw(_name) \
27377 +-static struct global_attr _name = \
27378 ++static struct kobj_attribute _name = \
27379 + __ATTR(_name, 0644, show_##_name, store_##_name)
27380 +
27381 +
27382 +diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
27383 +index e528baebad69..bee4bb9f81bc 100644
27384 +--- a/include/linux/device-mapper.h
27385 ++++ b/include/linux/device-mapper.h
27386 +@@ -609,7 +609,7 @@ do { \
27387 + */
27388 + #define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
27389 +
27390 +-static inline sector_t to_sector(unsigned long n)
27391 ++static inline sector_t to_sector(unsigned long long n)
27392 + {
27393 + return (n >> SECTOR_SHIFT);
27394 + }
27395 +diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
27396 +index f6ded992c183..5b21f14802e1 100644
27397 +--- a/include/linux/dma-mapping.h
27398 ++++ b/include/linux/dma-mapping.h
27399 +@@ -130,6 +130,7 @@ struct dma_map_ops {
27400 + enum dma_data_direction direction);
27401 + int (*dma_supported)(struct device *dev, u64 mask);
27402 + u64 (*get_required_mask)(struct device *dev);
27403 ++ size_t (*max_mapping_size)(struct device *dev);
27404 + };
27405 +
27406 + #define DMA_MAPPING_ERROR (~(dma_addr_t)0)
27407 +@@ -257,6 +258,8 @@ static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
27408 + }
27409 + #endif
27410 +
27411 ++size_t dma_direct_max_mapping_size(struct device *dev);
27412 ++
27413 + #ifdef CONFIG_HAS_DMA
27414 + #include <asm/dma-mapping.h>
27415 +
27416 +@@ -460,6 +463,7 @@ int dma_supported(struct device *dev, u64 mask);
27417 + int dma_set_mask(struct device *dev, u64 mask);
27418 + int dma_set_coherent_mask(struct device *dev, u64 mask);
27419 + u64 dma_get_required_mask(struct device *dev);
27420 ++size_t dma_max_mapping_size(struct device *dev);
27421 + #else /* CONFIG_HAS_DMA */
27422 + static inline dma_addr_t dma_map_page_attrs(struct device *dev,
27423 + struct page *page, size_t offset, size_t size,
27424 +@@ -561,6 +565,10 @@ static inline u64 dma_get_required_mask(struct device *dev)
27425 + {
27426 + return 0;
27427 + }
27428 ++static inline size_t dma_max_mapping_size(struct device *dev)
27429 ++{
27430 ++ return 0;
27431 ++}
27432 + #endif /* CONFIG_HAS_DMA */
27433 +
27434 + static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
27435 +diff --git a/include/linux/efi.h b/include/linux/efi.h
27436 +index 28604a8d0aa9..a86485ac7c87 100644
27437 +--- a/include/linux/efi.h
27438 ++++ b/include/linux/efi.h
27439 +@@ -1699,19 +1699,19 @@ extern int efi_tpm_eventlog_init(void);
27440 + * fault happened while executing an efi runtime service.
27441 + */
27442 + enum efi_rts_ids {
27443 +- NONE,
27444 +- GET_TIME,
27445 +- SET_TIME,
27446 +- GET_WAKEUP_TIME,
27447 +- SET_WAKEUP_TIME,
27448 +- GET_VARIABLE,
27449 +- GET_NEXT_VARIABLE,
27450 +- SET_VARIABLE,
27451 +- QUERY_VARIABLE_INFO,
27452 +- GET_NEXT_HIGH_MONO_COUNT,
27453 +- RESET_SYSTEM,
27454 +- UPDATE_CAPSULE,
27455 +- QUERY_CAPSULE_CAPS,
27456 ++ EFI_NONE,
27457 ++ EFI_GET_TIME,
27458 ++ EFI_SET_TIME,
27459 ++ EFI_GET_WAKEUP_TIME,
27460 ++ EFI_SET_WAKEUP_TIME,
27461 ++ EFI_GET_VARIABLE,
27462 ++ EFI_GET_NEXT_VARIABLE,
27463 ++ EFI_SET_VARIABLE,
27464 ++ EFI_QUERY_VARIABLE_INFO,
27465 ++ EFI_GET_NEXT_HIGH_MONO_COUNT,
27466 ++ EFI_RESET_SYSTEM,
27467 ++ EFI_UPDATE_CAPSULE,
27468 ++ EFI_QUERY_CAPSULE_CAPS,
27469 + };
27470 +
27471 + /*
27472 +diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
27473 +index d7711048ef93..c524ad7d31da 100644
27474 +--- a/include/linux/f2fs_fs.h
27475 ++++ b/include/linux/f2fs_fs.h
27476 +@@ -489,12 +489,12 @@ typedef __le32 f2fs_hash_t;
27477 +
27478 + /*
27479 + * space utilization of regular dentry and inline dentry (w/o extra reservation)
27480 +- * regular dentry inline dentry
27481 +- * bitmap 1 * 27 = 27 1 * 23 = 23
27482 +- * reserved 1 * 3 = 3 1 * 7 = 7
27483 +- * dentry 11 * 214 = 2354 11 * 182 = 2002
27484 +- * filename 8 * 214 = 1712 8 * 182 = 1456
27485 +- * total 4096 3488
27486 ++ * regular dentry inline dentry (def) inline dentry (min)
27487 ++ * bitmap 1 * 27 = 27 1 * 23 = 23 1 * 1 = 1
27488 ++ * reserved 1 * 3 = 3 1 * 7 = 7 1 * 1 = 1
27489 ++ * dentry 11 * 214 = 2354 11 * 182 = 2002 11 * 2 = 22
27490 ++ * filename 8 * 214 = 1712 8 * 182 = 1456 8 * 2 = 16
27491 ++ * total 4096 3488 40
27492 + *
27493 + * Note: there are more reserved space in inline dentry than in regular
27494 + * dentry, when converting inline dentry we should handle this carefully.
27495 +@@ -506,6 +506,7 @@ typedef __le32 f2fs_hash_t;
27496 + #define SIZE_OF_RESERVED (PAGE_SIZE - ((SIZE_OF_DIR_ENTRY + \
27497 + F2FS_SLOT_LEN) * \
27498 + NR_DENTRY_IN_BLOCK + SIZE_OF_DENTRY_BITMAP))
27499 ++#define MIN_INLINE_DENTRY_SIZE 40 /* just include '.' and '..' entries */
27500 +
27501 + /* One directory entry slot representing F2FS_SLOT_LEN-sized file name */
27502 + struct f2fs_dir_entry {
27503 +diff --git a/include/linux/filter.h b/include/linux/filter.h
27504 +index e532fcc6e4b5..3358646a8e7a 100644
27505 +--- a/include/linux/filter.h
27506 ++++ b/include/linux/filter.h
27507 +@@ -874,7 +874,9 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
27508 + unsigned int alignment,
27509 + bpf_jit_fill_hole_t bpf_fill_ill_insns);
27510 + void bpf_jit_binary_free(struct bpf_binary_header *hdr);
27511 +-
27512 ++u64 bpf_jit_alloc_exec_limit(void);
27513 ++void *bpf_jit_alloc_exec(unsigned long size);
27514 ++void bpf_jit_free_exec(void *addr);
27515 + void bpf_jit_free(struct bpf_prog *fp);
27516 +
27517 + int bpf_jit_get_func_addr(const struct bpf_prog *prog,
27518 +diff --git a/include/linux/fs.h b/include/linux/fs.h
27519 +index 29d8e2cfed0e..fd423fec8d83 100644
27520 +--- a/include/linux/fs.h
27521 ++++ b/include/linux/fs.h
27522 +@@ -304,13 +304,19 @@ enum rw_hint {
27523 +
27524 + struct kiocb {
27525 + struct file *ki_filp;
27526 ++
27527 ++ /* The 'ki_filp' pointer is shared in a union for aio */
27528 ++ randomized_struct_fields_start
27529 ++
27530 + loff_t ki_pos;
27531 + void (*ki_complete)(struct kiocb *iocb, long ret, long ret2);
27532 + void *private;
27533 + int ki_flags;
27534 + u16 ki_hint;
27535 + u16 ki_ioprio; /* See linux/ioprio.h */
27536 +-} __randomize_layout;
27537 ++
27538 ++ randomized_struct_fields_end
27539 ++};
27540 +
27541 + static inline bool is_sync_kiocb(struct kiocb *kiocb)
27542 + {
27543 +diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
27544 +index 0fbbcdf0c178..da0af631ded5 100644
27545 +--- a/include/linux/hardirq.h
27546 ++++ b/include/linux/hardirq.h
27547 +@@ -60,8 +60,14 @@ extern void irq_enter(void);
27548 + */
27549 + extern void irq_exit(void);
27550 +
27551 ++#ifndef arch_nmi_enter
27552 ++#define arch_nmi_enter() do { } while (0)
27553 ++#define arch_nmi_exit() do { } while (0)
27554 ++#endif
27555 ++
27556 + #define nmi_enter() \
27557 + do { \
27558 ++ arch_nmi_enter(); \
27559 + printk_nmi_enter(); \
27560 + lockdep_off(); \
27561 + ftrace_nmi_enter(); \
27562 +@@ -80,6 +86,7 @@ extern void irq_exit(void);
27563 + ftrace_nmi_exit(); \
27564 + lockdep_on(); \
27565 + printk_nmi_exit(); \
27566 ++ arch_nmi_exit(); \
27567 + } while (0)
27568 +
27569 + #endif /* LINUX_HARDIRQ_H */
27570 +diff --git a/include/linux/i2c.h b/include/linux/i2c.h
27571 +index 65b4eaed1d96..7e748648c7d3 100644
27572 +--- a/include/linux/i2c.h
27573 ++++ b/include/linux/i2c.h
27574 +@@ -333,6 +333,7 @@ struct i2c_client {
27575 + char name[I2C_NAME_SIZE];
27576 + struct i2c_adapter *adapter; /* the adapter we sit on */
27577 + struct device dev; /* the device structure */
27578 ++ int init_irq; /* irq set at initialization */
27579 + int irq; /* irq issued by device */
27580 + struct list_head detected;
27581 + #if IS_ENABLED(CONFIG_I2C_SLAVE)
27582 +diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
27583 +index dd1e40ddac7d..875c41b23f20 100644
27584 +--- a/include/linux/irqdesc.h
27585 ++++ b/include/linux/irqdesc.h
27586 +@@ -65,6 +65,7 @@ struct irq_desc {
27587 + unsigned int core_internal_state__do_not_mess_with_it;
27588 + unsigned int depth; /* nested irq disables */
27589 + unsigned int wake_depth; /* nested wake enables */
27590 ++ unsigned int tot_count;
27591 + unsigned int irq_count; /* For detecting broken IRQs */
27592 + unsigned long last_unhandled; /* Aging timer for unhandled count */
27593 + unsigned int irqs_unhandled;
27594 +diff --git a/include/linux/kasan-checks.h b/include/linux/kasan-checks.h
27595 +index d314150658a4..a61dc075e2ce 100644
27596 +--- a/include/linux/kasan-checks.h
27597 ++++ b/include/linux/kasan-checks.h
27598 +@@ -2,7 +2,7 @@
27599 + #ifndef _LINUX_KASAN_CHECKS_H
27600 + #define _LINUX_KASAN_CHECKS_H
27601 +
27602 +-#ifdef CONFIG_KASAN
27603 ++#if defined(__SANITIZE_ADDRESS__) || defined(__KASAN_INTERNAL)
27604 + void kasan_check_read(const volatile void *p, unsigned int size);
27605 + void kasan_check_write(const volatile void *p, unsigned int size);
27606 + #else
27607 +diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
27608 +index c38cc5eb7e73..cf761ff58224 100644
27609 +--- a/include/linux/kvm_host.h
27610 ++++ b/include/linux/kvm_host.h
27611 +@@ -634,7 +634,7 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
27612 + struct kvm_memory_slot *dont);
27613 + int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
27614 + unsigned long npages);
27615 +-void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots);
27616 ++void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
27617 + int kvm_arch_prepare_memory_region(struct kvm *kvm,
27618 + struct kvm_memory_slot *memslot,
27619 + const struct kvm_userspace_memory_region *mem,
27620 +diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
27621 +index 83ae11cbd12c..7391f5fe4eda 100644
27622 +--- a/include/linux/memcontrol.h
27623 ++++ b/include/linux/memcontrol.h
27624 +@@ -561,7 +561,10 @@ struct mem_cgroup *lock_page_memcg(struct page *page);
27625 + void __unlock_page_memcg(struct mem_cgroup *memcg);
27626 + void unlock_page_memcg(struct page *page);
27627 +
27628 +-/* idx can be of type enum memcg_stat_item or node_stat_item */
27629 ++/*
27630 ++ * idx can be of type enum memcg_stat_item or node_stat_item.
27631 ++ * Keep in sync with memcg_exact_page_state().
27632 ++ */
27633 + static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
27634 + int idx)
27635 + {
27636 +diff --git a/include/linux/mii.h b/include/linux/mii.h
27637 +index 6fee8b1a4400..5cd824c1c0ca 100644
27638 +--- a/include/linux/mii.h
27639 ++++ b/include/linux/mii.h
27640 +@@ -469,7 +469,7 @@ static inline u32 linkmode_adv_to_lcl_adv_t(unsigned long *advertising)
27641 + if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
27642 + advertising))
27643 + lcl_adv |= ADVERTISE_PAUSE_CAP;
27644 +- if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
27645 ++ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
27646 + advertising))
27647 + lcl_adv |= ADVERTISE_PAUSE_ASYM;
27648 +
27649 +diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
27650 +index 54299251d40d..4f001619f854 100644
27651 +--- a/include/linux/mlx5/driver.h
27652 ++++ b/include/linux/mlx5/driver.h
27653 +@@ -591,6 +591,8 @@ enum mlx5_pagefault_type_flags {
27654 + };
27655 +
27656 + struct mlx5_td {
27657 ++ /* protects tirs list changes while tirs refresh */
27658 ++ struct mutex list_lock;
27659 + struct list_head tirs_list;
27660 + u32 tdn;
27661 + };
27662 +diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
27663 +index 4eb26d278046..280ae96dc4c3 100644
27664 +--- a/include/linux/page-isolation.h
27665 ++++ b/include/linux/page-isolation.h
27666 +@@ -41,16 +41,6 @@ int move_freepages_block(struct zone *zone, struct page *page,
27667 +
27668 + /*
27669 + * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
27670 +- * If specified range includes migrate types other than MOVABLE or CMA,
27671 +- * this will fail with -EBUSY.
27672 +- *
27673 +- * For isolating all pages in the range finally, the caller have to
27674 +- * free all pages in the range. test_page_isolated() can be used for
27675 +- * test it.
27676 +- *
27677 +- * The following flags are allowed (they can be combined in a bit mask)
27678 +- * SKIP_HWPOISON - ignore hwpoison pages
27679 +- * REPORT_FAILURE - report details about the failure to isolate the range
27680 + */
27681 + int
27682 + start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
27683 +diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
27684 +index e1a051724f7e..7cbbd891bfcd 100644
27685 +--- a/include/linux/perf_event.h
27686 ++++ b/include/linux/perf_event.h
27687 +@@ -409,7 +409,7 @@ struct pmu {
27688 + /*
27689 + * Set up pmu-private data structures for an AUX area
27690 + */
27691 +- void *(*setup_aux) (int cpu, void **pages,
27692 ++ void *(*setup_aux) (struct perf_event *event, void **pages,
27693 + int nr_pages, bool overwrite);
27694 + /* optional */
27695 +
27696 +diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
27697 +index 5a3bb3b7c9ad..3ecd7ea212ae 100644
27698 +--- a/include/linux/pipe_fs_i.h
27699 ++++ b/include/linux/pipe_fs_i.h
27700 +@@ -182,6 +182,7 @@ void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
27701 + int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
27702 + int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
27703 + void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
27704 ++void pipe_buf_mark_unmergeable(struct pipe_buffer *buf);
27705 +
27706 + extern const struct pipe_buf_operations nosteal_pipe_buf_ops;
27707 +
27708 +diff --git a/include/linux/property.h b/include/linux/property.h
27709 +index 3789ec755fb6..65d3420dd5d1 100644
27710 +--- a/include/linux/property.h
27711 ++++ b/include/linux/property.h
27712 +@@ -258,7 +258,7 @@ struct property_entry {
27713 + #define PROPERTY_ENTRY_STRING(_name_, _val_) \
27714 + (struct property_entry) { \
27715 + .name = _name_, \
27716 +- .length = sizeof(_val_), \
27717 ++ .length = sizeof(const char *), \
27718 + .type = DEV_PROP_STRING, \
27719 + { .value = { .str = _val_ } }, \
27720 + }
27721 +diff --git a/include/linux/relay.h b/include/linux/relay.h
27722 +index e1bdf01a86e2..c759f96e39c1 100644
27723 +--- a/include/linux/relay.h
27724 ++++ b/include/linux/relay.h
27725 +@@ -66,7 +66,7 @@ struct rchan
27726 + struct kref kref; /* channel refcount */
27727 + void *private_data; /* for user-defined data */
27728 + size_t last_toobig; /* tried to log event > subbuf size */
27729 +- struct rchan_buf ** __percpu buf; /* per-cpu channel buffers */
27730 ++ struct rchan_buf * __percpu *buf; /* per-cpu channel buffers */
27731 + int is_global; /* One global buffer ? */
27732 + struct list_head list; /* for channel list */
27733 + struct dentry *parent; /* parent dentry passed to open */
27734 +diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
27735 +index 5b9ae62272bb..503778920448 100644
27736 +--- a/include/linux/ring_buffer.h
27737 ++++ b/include/linux/ring_buffer.h
27738 +@@ -128,7 +128,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
27739 + unsigned long *lost_events);
27740 +
27741 + struct ring_buffer_iter *
27742 +-ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu);
27743 ++ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags);
27744 + void ring_buffer_read_prepare_sync(void);
27745 + void ring_buffer_read_start(struct ring_buffer_iter *iter);
27746 + void ring_buffer_read_finish(struct ring_buffer_iter *iter);
27747 +diff --git a/include/linux/sched.h b/include/linux/sched.h
27748 +index f9b43c989577..9b35aff09f70 100644
27749 +--- a/include/linux/sched.h
27750 ++++ b/include/linux/sched.h
27751 +@@ -1748,9 +1748,9 @@ static __always_inline bool need_resched(void)
27752 + static inline unsigned int task_cpu(const struct task_struct *p)
27753 + {
27754 + #ifdef CONFIG_THREAD_INFO_IN_TASK
27755 +- return p->cpu;
27756 ++ return READ_ONCE(p->cpu);
27757 + #else
27758 +- return task_thread_info(p)->cpu;
27759 ++ return READ_ONCE(task_thread_info(p)->cpu);
27760 + #endif
27761 + }
27762 +
27763 +diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
27764 +index c31d3a47a47c..57c7ed3fe465 100644
27765 +--- a/include/linux/sched/topology.h
27766 ++++ b/include/linux/sched/topology.h
27767 +@@ -176,10 +176,10 @@ typedef int (*sched_domain_flags_f)(void);
27768 + #define SDTL_OVERLAP 0x01
27769 +
27770 + struct sd_data {
27771 +- struct sched_domain **__percpu sd;
27772 +- struct sched_domain_shared **__percpu sds;
27773 +- struct sched_group **__percpu sg;
27774 +- struct sched_group_capacity **__percpu sgc;
27775 ++ struct sched_domain *__percpu *sd;
27776 ++ struct sched_domain_shared *__percpu *sds;
27777 ++ struct sched_group *__percpu *sg;
27778 ++ struct sched_group_capacity *__percpu *sgc;
27779 + };
27780 +
27781 + struct sched_domain_topology_level {
27782 +diff --git a/include/linux/slab.h b/include/linux/slab.h
27783 +index 11b45f7ae405..9449b19c5f10 100644
27784 +--- a/include/linux/slab.h
27785 ++++ b/include/linux/slab.h
27786 +@@ -32,6 +32,8 @@
27787 + #define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
27788 + /* Use GFP_DMA memory */
27789 + #define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
27790 ++/* Use GFP_DMA32 memory */
27791 ++#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
27792 + /* DEBUG: Store the last owner for bug hunting */
27793 + #define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
27794 + /* Panic if kmem_cache_create() fails */
27795 +diff --git a/include/linux/string.h b/include/linux/string.h
27796 +index 7927b875f80c..6ab0a6fa512e 100644
27797 +--- a/include/linux/string.h
27798 ++++ b/include/linux/string.h
27799 +@@ -150,6 +150,9 @@ extern void * memscan(void *,int,__kernel_size_t);
27800 + #ifndef __HAVE_ARCH_MEMCMP
27801 + extern int memcmp(const void *,const void *,__kernel_size_t);
27802 + #endif
27803 ++#ifndef __HAVE_ARCH_BCMP
27804 ++extern int bcmp(const void *,const void *,__kernel_size_t);
27805 ++#endif
27806 + #ifndef __HAVE_ARCH_MEMCHR
27807 + extern void * memchr(const void *,int,__kernel_size_t);
27808 + #endif
27809 +diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
27810 +index 7c007ed7505f..29bc3a203283 100644
27811 +--- a/include/linux/swiotlb.h
27812 ++++ b/include/linux/swiotlb.h
27813 +@@ -76,6 +76,8 @@ bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
27814 + size_t size, enum dma_data_direction dir, unsigned long attrs);
27815 + void __init swiotlb_exit(void);
27816 + unsigned int swiotlb_max_segment(void);
27817 ++size_t swiotlb_max_mapping_size(struct device *dev);
27818 ++bool is_swiotlb_active(void);
27819 + #else
27820 + #define swiotlb_force SWIOTLB_NO_FORCE
27821 + static inline bool is_swiotlb_buffer(phys_addr_t paddr)
27822 +@@ -95,6 +97,15 @@ static inline unsigned int swiotlb_max_segment(void)
27823 + {
27824 + return 0;
27825 + }
27826 ++static inline size_t swiotlb_max_mapping_size(struct device *dev)
27827 ++{
27828 ++ return SIZE_MAX;
27829 ++}
27830 ++
27831 ++static inline bool is_swiotlb_active(void)
27832 ++{
27833 ++ return false;
27834 ++}
27835 + #endif /* CONFIG_SWIOTLB */
27836 +
27837 + extern void swiotlb_print_info(void);
27838 +diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
27839 +index fab02133a919..3dc70adfe5f5 100644
27840 +--- a/include/linux/virtio_ring.h
27841 ++++ b/include/linux/virtio_ring.h
27842 +@@ -63,7 +63,7 @@ struct virtqueue;
27843 + /*
27844 + * Creates a virtqueue and allocates the descriptor ring. If
27845 + * may_reduce_num is set, then this may allocate a smaller ring than
27846 +- * expected. The caller should query virtqueue_get_ring_size to learn
27847 ++ * expected. The caller should query virtqueue_get_vring_size to learn
27848 + * the actual size of the ring.
27849 + */
27850 + struct virtqueue *vring_create_virtqueue(unsigned int index,
27851 +diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
27852 +index ec9d6bc65855..fabee6db0abb 100644
27853 +--- a/include/net/bluetooth/bluetooth.h
27854 ++++ b/include/net/bluetooth/bluetooth.h
27855 +@@ -276,7 +276,7 @@ int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
27856 + int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
27857 + int bt_sock_wait_ready(struct sock *sk, unsigned long flags);
27858 +
27859 +-void bt_accept_enqueue(struct sock *parent, struct sock *sk);
27860 ++void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh);
27861 + void bt_accept_unlink(struct sock *sk);
27862 + struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock);
27863 +
27864 +diff --git a/include/net/ip.h b/include/net/ip.h
27865 +index be3cad9c2e4c..583526aad1d0 100644
27866 +--- a/include/net/ip.h
27867 ++++ b/include/net/ip.h
27868 +@@ -677,7 +677,7 @@ int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
27869 + unsigned char __user *data, int optlen);
27870 + void ip_options_undo(struct ip_options *opt);
27871 + void ip_forward_options(struct sk_buff *skb);
27872 +-int ip_options_rcv_srr(struct sk_buff *skb);
27873 ++int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
27874 +
27875 + /*
27876 + * Functions provided by ip_sockglue.c
27877 +diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
27878 +index 99d4148e0f90..1c3126c14930 100644
27879 +--- a/include/net/net_namespace.h
27880 ++++ b/include/net/net_namespace.h
27881 +@@ -58,6 +58,7 @@ struct net {
27882 + */
27883 + spinlock_t rules_mod_lock;
27884 +
27885 ++ u32 hash_mix;
27886 + atomic64_t cookie_gen;
27887 +
27888 + struct list_head list; /* list of network namespaces */
27889 +diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
27890 +index 4cd56808ac4e..89808ce293c4 100644
27891 +--- a/include/net/netfilter/br_netfilter.h
27892 ++++ b/include/net/netfilter/br_netfilter.h
27893 +@@ -43,7 +43,6 @@ static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
27894 + }
27895 +
27896 + struct net_device *setup_pre_routing(struct sk_buff *skb);
27897 +-void br_netfilter_enable(void);
27898 +
27899 + #if IS_ENABLED(CONFIG_IPV6)
27900 + int br_validate_ipv6(struct net *net, struct sk_buff *skb);
27901 +diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
27902 +index b4984bbbe157..0612439909dc 100644
27903 +--- a/include/net/netfilter/nf_tables.h
27904 ++++ b/include/net/netfilter/nf_tables.h
27905 +@@ -416,7 +416,8 @@ struct nft_set {
27906 + unsigned char *udata;
27907 + /* runtime data below here */
27908 + const struct nft_set_ops *ops ____cacheline_aligned;
27909 +- u16 flags:14,
27910 ++ u16 flags:13,
27911 ++ bound:1,
27912 + genmask:2;
27913 + u8 klen;
27914 + u8 dlen;
27915 +@@ -690,10 +691,12 @@ static inline void nft_set_gc_batch_add(struct nft_set_gc_batch *gcb,
27916 + gcb->elems[gcb->head.cnt++] = elem;
27917 + }
27918 +
27919 ++struct nft_expr_ops;
27920 + /**
27921 + * struct nft_expr_type - nf_tables expression type
27922 + *
27923 + * @select_ops: function to select nft_expr_ops
27924 ++ * @release_ops: release nft_expr_ops
27925 + * @ops: default ops, used when no select_ops functions is present
27926 + * @list: used internally
27927 + * @name: Identifier
27928 +@@ -706,6 +709,7 @@ static inline void nft_set_gc_batch_add(struct nft_set_gc_batch *gcb,
27929 + struct nft_expr_type {
27930 + const struct nft_expr_ops *(*select_ops)(const struct nft_ctx *,
27931 + const struct nlattr * const tb[]);
27932 ++ void (*release_ops)(const struct nft_expr_ops *ops);
27933 + const struct nft_expr_ops *ops;
27934 + struct list_head list;
27935 + const char *name;
27936 +@@ -1329,15 +1333,12 @@ struct nft_trans_rule {
27937 + struct nft_trans_set {
27938 + struct nft_set *set;
27939 + u32 set_id;
27940 +- bool bound;
27941 + };
27942 +
27943 + #define nft_trans_set(trans) \
27944 + (((struct nft_trans_set *)trans->data)->set)
27945 + #define nft_trans_set_id(trans) \
27946 + (((struct nft_trans_set *)trans->data)->set_id)
27947 +-#define nft_trans_set_bound(trans) \
27948 +- (((struct nft_trans_set *)trans->data)->bound)
27949 +
27950 + struct nft_trans_chain {
27951 + bool update;
27952 +diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h
27953 +index 16a842456189..d9b665151f3d 100644
27954 +--- a/include/net/netns/hash.h
27955 ++++ b/include/net/netns/hash.h
27956 +@@ -2,16 +2,10 @@
27957 + #ifndef __NET_NS_HASH_H__
27958 + #define __NET_NS_HASH_H__
27959 +
27960 +-#include <asm/cache.h>
27961 +-
27962 +-struct net;
27963 ++#include <net/net_namespace.h>
27964 +
27965 + static inline u32 net_hash_mix(const struct net *net)
27966 + {
27967 +-#ifdef CONFIG_NET_NS
27968 +- return (u32)(((unsigned long)net) >> ilog2(sizeof(*net)));
27969 +-#else
27970 +- return 0;
27971 +-#endif
27972 ++ return net->hash_mix;
27973 + }
27974 + #endif
27975 +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
27976 +index 9481f2c142e2..e7eb4aa6ccc9 100644
27977 +--- a/include/net/sch_generic.h
27978 ++++ b/include/net/sch_generic.h
27979 +@@ -51,7 +51,10 @@ struct qdisc_size_table {
27980 + struct qdisc_skb_head {
27981 + struct sk_buff *head;
27982 + struct sk_buff *tail;
27983 +- __u32 qlen;
27984 ++ union {
27985 ++ u32 qlen;
27986 ++ atomic_t atomic_qlen;
27987 ++ };
27988 + spinlock_t lock;
27989 + };
27990 +
27991 +@@ -408,27 +411,19 @@ static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
27992 + BUILD_BUG_ON(sizeof(qcb->data) < sz);
27993 + }
27994 +
27995 +-static inline int qdisc_qlen_cpu(const struct Qdisc *q)
27996 +-{
27997 +- return this_cpu_ptr(q->cpu_qstats)->qlen;
27998 +-}
27999 +-
28000 + static inline int qdisc_qlen(const struct Qdisc *q)
28001 + {
28002 + return q->q.qlen;
28003 + }
28004 +
28005 +-static inline int qdisc_qlen_sum(const struct Qdisc *q)
28006 ++static inline u32 qdisc_qlen_sum(const struct Qdisc *q)
28007 + {
28008 +- __u32 qlen = q->qstats.qlen;
28009 +- int i;
28010 ++ u32 qlen = q->qstats.qlen;
28011 +
28012 +- if (q->flags & TCQ_F_NOLOCK) {
28013 +- for_each_possible_cpu(i)
28014 +- qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
28015 +- } else {
28016 ++ if (q->flags & TCQ_F_NOLOCK)
28017 ++ qlen += atomic_read(&q->q.atomic_qlen);
28018 ++ else
28019 + qlen += q->q.qlen;
28020 +- }
28021 +
28022 + return qlen;
28023 + }
28024 +@@ -825,14 +820,14 @@ static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
28025 + this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
28026 + }
28027 +
28028 +-static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
28029 ++static inline void qdisc_qstats_atomic_qlen_inc(struct Qdisc *sch)
28030 + {
28031 +- this_cpu_inc(sch->cpu_qstats->qlen);
28032 ++ atomic_inc(&sch->q.atomic_qlen);
28033 + }
28034 +
28035 +-static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
28036 ++static inline void qdisc_qstats_atomic_qlen_dec(struct Qdisc *sch)
28037 + {
28038 +- this_cpu_dec(sch->cpu_qstats->qlen);
28039 ++ atomic_dec(&sch->q.atomic_qlen);
28040 + }
28041 +
28042 + static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
28043 +diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
28044 +index 32ee65a30aff..1c6e6c0766ca 100644
28045 +--- a/include/net/sctp/checksum.h
28046 ++++ b/include/net/sctp/checksum.h
28047 +@@ -61,7 +61,7 @@ static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2,
28048 + static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
28049 + unsigned int offset)
28050 + {
28051 +- struct sctphdr *sh = sctp_hdr(skb);
28052 ++ struct sctphdr *sh = (struct sctphdr *)(skb->data + offset);
28053 + const struct skb_checksum_ops ops = {
28054 + .update = sctp_csum_update,
28055 + .combine = sctp_csum_combine,
28056 +diff --git a/include/net/sock.h b/include/net/sock.h
28057 +index f43f935cb113..89d0d94d5db2 100644
28058 +--- a/include/net/sock.h
28059 ++++ b/include/net/sock.h
28060 +@@ -710,6 +710,12 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
28061 + hlist_add_head_rcu(&sk->sk_node, list);
28062 + }
28063 +
28064 ++static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list)
28065 ++{
28066 ++ sock_hold(sk);
28067 ++ hlist_add_tail_rcu(&sk->sk_node, list);
28068 ++}
28069 ++
28070 + static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
28071 + {
28072 + hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
28073 +diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
28074 +index cb8a273732cf..bb8092fa1e36 100644
28075 +--- a/include/scsi/libfcoe.h
28076 ++++ b/include/scsi/libfcoe.h
28077 +@@ -79,7 +79,7 @@ enum fip_state {
28078 + * It must not change after fcoe_ctlr_init() sets it.
28079 + */
28080 + enum fip_mode {
28081 +- FIP_MODE_AUTO = FIP_ST_AUTO,
28082 ++ FIP_MODE_AUTO,
28083 + FIP_MODE_NON_FIP,
28084 + FIP_MODE_FABRIC,
28085 + FIP_MODE_VN2VN,
28086 +@@ -250,7 +250,7 @@ struct fcoe_rport {
28087 + };
28088 +
28089 + /* FIP API functions */
28090 +-void fcoe_ctlr_init(struct fcoe_ctlr *, enum fip_state);
28091 ++void fcoe_ctlr_init(struct fcoe_ctlr *, enum fip_mode);
28092 + void fcoe_ctlr_destroy(struct fcoe_ctlr *);
28093 + void fcoe_ctlr_link_up(struct fcoe_ctlr *);
28094 + int fcoe_ctlr_link_down(struct fcoe_ctlr *);
28095 +diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
28096 +index b9ba520f7e4b..2832134e5397 100644
28097 +--- a/include/uapi/linux/android/binder.h
28098 ++++ b/include/uapi/linux/android/binder.h
28099 +@@ -41,6 +41,14 @@ enum {
28100 + enum {
28101 + FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
28102 + FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
28103 ++
28104 ++ /**
28105 ++ * @FLAT_BINDER_FLAG_TXN_SECURITY_CTX: request security contexts
28106 ++ *
28107 ++ * Only when set, causes senders to include their security
28108 ++ * context
28109 ++ */
28110 ++ FLAT_BINDER_FLAG_TXN_SECURITY_CTX = 0x1000,
28111 + };
28112 +
28113 + #ifdef BINDER_IPC_32BIT
28114 +@@ -218,6 +226,7 @@ struct binder_node_info_for_ref {
28115 + #define BINDER_VERSION _IOWR('b', 9, struct binder_version)
28116 + #define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
28117 + #define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref)
28118 ++#define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object)
28119 +
28120 + /*
28121 + * NOTE: Two special error codes you should check for when calling
28122 +@@ -276,6 +285,11 @@ struct binder_transaction_data {
28123 + } data;
28124 + };
28125 +
28126 ++struct binder_transaction_data_secctx {
28127 ++ struct binder_transaction_data transaction_data;
28128 ++ binder_uintptr_t secctx;
28129 ++};
28130 ++
28131 + struct binder_transaction_data_sg {
28132 + struct binder_transaction_data transaction_data;
28133 + binder_size_t buffers_size;
28134 +@@ -311,6 +325,11 @@ enum binder_driver_return_protocol {
28135 + BR_OK = _IO('r', 1),
28136 + /* No parameters! */
28137 +
28138 ++ BR_TRANSACTION_SEC_CTX = _IOR('r', 2,
28139 ++ struct binder_transaction_data_secctx),
28140 ++ /*
28141 ++ * binder_transaction_data_secctx: the received command.
28142 ++ */
28143 + BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
28144 + BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
28145 + /*
28146 +diff --git a/kernel/audit.h b/kernel/audit.h
28147 +index 91421679a168..6ffb70575082 100644
28148 +--- a/kernel/audit.h
28149 ++++ b/kernel/audit.h
28150 +@@ -314,7 +314,7 @@ extern void audit_trim_trees(void);
28151 + extern int audit_tag_tree(char *old, char *new);
28152 + extern const char *audit_tree_path(struct audit_tree *tree);
28153 + extern void audit_put_tree(struct audit_tree *tree);
28154 +-extern void audit_kill_trees(struct list_head *list);
28155 ++extern void audit_kill_trees(struct audit_context *context);
28156 + #else
28157 + #define audit_remove_tree_rule(rule) BUG()
28158 + #define audit_add_tree_rule(rule) -EINVAL
28159 +@@ -323,7 +323,7 @@ extern void audit_kill_trees(struct list_head *list);
28160 + #define audit_put_tree(tree) (void)0
28161 + #define audit_tag_tree(old, new) -EINVAL
28162 + #define audit_tree_path(rule) "" /* never called */
28163 +-#define audit_kill_trees(list) BUG()
28164 ++#define audit_kill_trees(context) BUG()
28165 + #endif
28166 +
28167 + extern char *audit_unpack_string(void **bufp, size_t *remain, size_t len);
28168 +diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
28169 +index d4af4d97f847..abfb112f26aa 100644
28170 +--- a/kernel/audit_tree.c
28171 ++++ b/kernel/audit_tree.c
28172 +@@ -524,13 +524,14 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
28173 + return 0;
28174 + }
28175 +
28176 +-static void audit_tree_log_remove_rule(struct audit_krule *rule)
28177 ++static void audit_tree_log_remove_rule(struct audit_context *context,
28178 ++ struct audit_krule *rule)
28179 + {
28180 + struct audit_buffer *ab;
28181 +
28182 + if (!audit_enabled)
28183 + return;
28184 +- ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
28185 ++ ab = audit_log_start(context, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
28186 + if (unlikely(!ab))
28187 + return;
28188 + audit_log_format(ab, "op=remove_rule dir=");
28189 +@@ -540,7 +541,7 @@ static void audit_tree_log_remove_rule(struct audit_krule *rule)
28190 + audit_log_end(ab);
28191 + }
28192 +
28193 +-static void kill_rules(struct audit_tree *tree)
28194 ++static void kill_rules(struct audit_context *context, struct audit_tree *tree)
28195 + {
28196 + struct audit_krule *rule, *next;
28197 + struct audit_entry *entry;
28198 +@@ -551,7 +552,7 @@ static void kill_rules(struct audit_tree *tree)
28199 + list_del_init(&rule->rlist);
28200 + if (rule->tree) {
28201 + /* not a half-baked one */
28202 +- audit_tree_log_remove_rule(rule);
28203 ++ audit_tree_log_remove_rule(context, rule);
28204 + if (entry->rule.exe)
28205 + audit_remove_mark(entry->rule.exe);
28206 + rule->tree = NULL;
28207 +@@ -633,7 +634,7 @@ static void trim_marked(struct audit_tree *tree)
28208 + tree->goner = 1;
28209 + spin_unlock(&hash_lock);
28210 + mutex_lock(&audit_filter_mutex);
28211 +- kill_rules(tree);
28212 ++ kill_rules(audit_context(), tree);
28213 + list_del_init(&tree->list);
28214 + mutex_unlock(&audit_filter_mutex);
28215 + prune_one(tree);
28216 +@@ -973,8 +974,10 @@ static void audit_schedule_prune(void)
28217 + * ... and that one is done if evict_chunk() decides to delay until the end
28218 + * of syscall. Runs synchronously.
28219 + */
28220 +-void audit_kill_trees(struct list_head *list)
28221 ++void audit_kill_trees(struct audit_context *context)
28222 + {
28223 ++ struct list_head *list = &context->killed_trees;
28224 ++
28225 + audit_ctl_lock();
28226 + mutex_lock(&audit_filter_mutex);
28227 +
28228 +@@ -982,7 +985,7 @@ void audit_kill_trees(struct list_head *list)
28229 + struct audit_tree *victim;
28230 +
28231 + victim = list_entry(list->next, struct audit_tree, list);
28232 +- kill_rules(victim);
28233 ++ kill_rules(context, victim);
28234 + list_del_init(&victim->list);
28235 +
28236 + mutex_unlock(&audit_filter_mutex);
28237 +@@ -1017,7 +1020,7 @@ static void evict_chunk(struct audit_chunk *chunk)
28238 + list_del_init(&owner->same_root);
28239 + spin_unlock(&hash_lock);
28240 + if (!postponed) {
28241 +- kill_rules(owner);
28242 ++ kill_rules(audit_context(), owner);
28243 + list_move(&owner->list, &prune_list);
28244 + need_prune = 1;
28245 + } else {
28246 +diff --git a/kernel/auditsc.c b/kernel/auditsc.c
28247 +index 6593a5207fb0..b585ceb2f7a2 100644
28248 +--- a/kernel/auditsc.c
28249 ++++ b/kernel/auditsc.c
28250 +@@ -1444,6 +1444,9 @@ void __audit_free(struct task_struct *tsk)
28251 + if (!context)
28252 + return;
28253 +
28254 ++ if (!list_empty(&context->killed_trees))
28255 ++ audit_kill_trees(context);
28256 ++
28257 + /* We are called either by do_exit() or the fork() error handling code;
28258 + * in the former case tsk == current and in the latter tsk is a
28259 + * random task_struct that doesn't doesn't have any meaningful data we
28260 +@@ -1460,9 +1463,6 @@ void __audit_free(struct task_struct *tsk)
28261 + audit_log_exit();
28262 + }
28263 +
28264 +- if (!list_empty(&context->killed_trees))
28265 +- audit_kill_trees(&context->killed_trees);
28266 +-
28267 + audit_set_context(tsk, NULL);
28268 + audit_free_context(context);
28269 + }
28270 +@@ -1537,6 +1537,9 @@ void __audit_syscall_exit(int success, long return_code)
28271 + if (!context)
28272 + return;
28273 +
28274 ++ if (!list_empty(&context->killed_trees))
28275 ++ audit_kill_trees(context);
28276 ++
28277 + if (!context->dummy && context->in_syscall) {
28278 + if (success)
28279 + context->return_valid = AUDITSC_SUCCESS;
28280 +@@ -1571,9 +1574,6 @@ void __audit_syscall_exit(int success, long return_code)
28281 + context->in_syscall = 0;
28282 + context->prio = context->state == AUDIT_RECORD_CONTEXT ? ~0ULL : 0;
28283 +
28284 +- if (!list_empty(&context->killed_trees))
28285 +- audit_kill_trees(&context->killed_trees);
28286 +-
28287 + audit_free_names(context);
28288 + unroll_tree_refs(context, NULL, 0);
28289 + audit_free_aux(context);
28290 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
28291 +index 5fcce2f4209d..d53825b6fcd9 100644
28292 +--- a/kernel/bpf/verifier.c
28293 ++++ b/kernel/bpf/verifier.c
28294 +@@ -3187,7 +3187,7 @@ do_sim:
28295 + *dst_reg = *ptr_reg;
28296 + }
28297 + ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
28298 +- if (!ptr_is_dst_reg)
28299 ++ if (!ptr_is_dst_reg && ret)
28300 + *dst_reg = tmp;
28301 + return !ret ? -EFAULT : 0;
28302 + }
28303 +diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
28304 +index f31bd61c9466..f84bf28f36ba 100644
28305 +--- a/kernel/cgroup/cgroup.c
28306 ++++ b/kernel/cgroup/cgroup.c
28307 +@@ -197,7 +197,7 @@ static u64 css_serial_nr_next = 1;
28308 + */
28309 + static u16 have_fork_callback __read_mostly;
28310 + static u16 have_exit_callback __read_mostly;
28311 +-static u16 have_free_callback __read_mostly;
28312 ++static u16 have_release_callback __read_mostly;
28313 + static u16 have_canfork_callback __read_mostly;
28314 +
28315 + /* cgroup namespace for init task */
28316 +@@ -2033,7 +2033,7 @@ struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags,
28317 + struct cgroup_namespace *ns)
28318 + {
28319 + struct dentry *dentry;
28320 +- bool new_sb;
28321 ++ bool new_sb = false;
28322 +
28323 + dentry = kernfs_mount(fs_type, flags, root->kf_root, magic, &new_sb);
28324 +
28325 +@@ -2043,6 +2043,7 @@ struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags,
28326 + */
28327 + if (!IS_ERR(dentry) && ns != &init_cgroup_ns) {
28328 + struct dentry *nsdentry;
28329 ++ struct super_block *sb = dentry->d_sb;
28330 + struct cgroup *cgrp;
28331 +
28332 + mutex_lock(&cgroup_mutex);
28333 +@@ -2053,12 +2054,14 @@ struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags,
28334 + spin_unlock_irq(&css_set_lock);
28335 + mutex_unlock(&cgroup_mutex);
28336 +
28337 +- nsdentry = kernfs_node_dentry(cgrp->kn, dentry->d_sb);
28338 ++ nsdentry = kernfs_node_dentry(cgrp->kn, sb);
28339 + dput(dentry);
28340 ++ if (IS_ERR(nsdentry))
28341 ++ deactivate_locked_super(sb);
28342 + dentry = nsdentry;
28343 + }
28344 +
28345 +- if (IS_ERR(dentry) || !new_sb)
28346 ++ if (!new_sb)
28347 + cgroup_put(&root->cgrp);
28348 +
28349 + return dentry;
28350 +@@ -5313,7 +5316,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
28351 +
28352 + have_fork_callback |= (bool)ss->fork << ss->id;
28353 + have_exit_callback |= (bool)ss->exit << ss->id;
28354 +- have_free_callback |= (bool)ss->free << ss->id;
28355 ++ have_release_callback |= (bool)ss->release << ss->id;
28356 + have_canfork_callback |= (bool)ss->can_fork << ss->id;
28357 +
28358 + /* At system boot, before all subsystems have been
28359 +@@ -5749,16 +5752,19 @@ void cgroup_exit(struct task_struct *tsk)
28360 + } while_each_subsys_mask();
28361 + }
28362 +
28363 +-void cgroup_free(struct task_struct *task)
28364 ++void cgroup_release(struct task_struct *task)
28365 + {
28366 +- struct css_set *cset = task_css_set(task);
28367 + struct cgroup_subsys *ss;
28368 + int ssid;
28369 +
28370 +- do_each_subsys_mask(ss, ssid, have_free_callback) {
28371 +- ss->free(task);
28372 ++ do_each_subsys_mask(ss, ssid, have_release_callback) {
28373 ++ ss->release(task);
28374 + } while_each_subsys_mask();
28375 ++}
28376 +
28377 ++void cgroup_free(struct task_struct *task)
28378 ++{
28379 ++ struct css_set *cset = task_css_set(task);
28380 + put_css_set(cset);
28381 + }
28382 +
28383 +diff --git a/kernel/cgroup/pids.c b/kernel/cgroup/pids.c
28384 +index 9829c67ebc0a..c9960baaa14f 100644
28385 +--- a/kernel/cgroup/pids.c
28386 ++++ b/kernel/cgroup/pids.c
28387 +@@ -247,7 +247,7 @@ static void pids_cancel_fork(struct task_struct *task)
28388 + pids_uncharge(pids, 1);
28389 + }
28390 +
28391 +-static void pids_free(struct task_struct *task)
28392 ++static void pids_release(struct task_struct *task)
28393 + {
28394 + struct pids_cgroup *pids = css_pids(task_css(task, pids_cgrp_id));
28395 +
28396 +@@ -342,7 +342,7 @@ struct cgroup_subsys pids_cgrp_subsys = {
28397 + .cancel_attach = pids_cancel_attach,
28398 + .can_fork = pids_can_fork,
28399 + .cancel_fork = pids_cancel_fork,
28400 +- .free = pids_free,
28401 ++ .release = pids_release,
28402 + .legacy_cftypes = pids_files,
28403 + .dfl_cftypes = pids_files,
28404 + .threaded = true,
28405 +diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
28406 +index d503d1a9007c..bb95a35e8c2d 100644
28407 +--- a/kernel/cgroup/rstat.c
28408 ++++ b/kernel/cgroup/rstat.c
28409 +@@ -87,7 +87,6 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
28410 + struct cgroup *root, int cpu)
28411 + {
28412 + struct cgroup_rstat_cpu *rstatc;
28413 +- struct cgroup *parent;
28414 +
28415 + if (pos == root)
28416 + return NULL;
28417 +@@ -115,8 +114,8 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
28418 + * However, due to the way we traverse, @pos will be the first
28419 + * child in most cases. The only exception is @root.
28420 + */
28421 +- parent = cgroup_parent(pos);
28422 +- if (parent && rstatc->updated_next) {
28423 ++ if (rstatc->updated_next) {
28424 ++ struct cgroup *parent = cgroup_parent(pos);
28425 + struct cgroup_rstat_cpu *prstatc = cgroup_rstat_cpu(parent, cpu);
28426 + struct cgroup_rstat_cpu *nrstatc;
28427 + struct cgroup **nextp;
28428 +@@ -140,9 +139,12 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
28429 + * updated stat.
28430 + */
28431 + smp_mb();
28432 ++
28433 ++ return pos;
28434 + }
28435 +
28436 +- return pos;
28437 ++ /* only happens for @root */
28438 ++ return NULL;
28439 + }
28440 +
28441 + /* see cgroup_rstat_flush() */
28442 +diff --git a/kernel/cpu.c b/kernel/cpu.c
28443 +index d1c6d152da89..6754f3ecfd94 100644
28444 +--- a/kernel/cpu.c
28445 ++++ b/kernel/cpu.c
28446 +@@ -313,6 +313,15 @@ void cpus_write_unlock(void)
28447 +
28448 + void lockdep_assert_cpus_held(void)
28449 + {
28450 ++ /*
28451 ++ * We can't have hotplug operations before userspace starts running,
28452 ++ * and some init codepaths will knowingly not take the hotplug lock.
28453 ++ * This is all valid, so mute lockdep until it makes sense to report
28454 ++ * unheld locks.
28455 ++ */
28456 ++ if (system_state < SYSTEM_RUNNING)
28457 ++ return;
28458 ++
28459 + percpu_rwsem_assert_held(&cpu_hotplug_lock);
28460 + }
28461 +
28462 +@@ -555,6 +564,20 @@ static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
28463 + cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
28464 + }
28465 +
28466 ++static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
28467 ++{
28468 ++ if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
28469 ++ return true;
28470 ++ /*
28471 ++ * When CPU hotplug is disabled, then taking the CPU down is not
28472 ++ * possible because takedown_cpu() and the architecture and
28473 ++ * subsystem specific mechanisms are not available. So the CPU
28474 ++ * which would be completely unplugged again needs to stay around
28475 ++ * in the current state.
28476 ++ */
28477 ++ return st->state <= CPUHP_BRINGUP_CPU;
28478 ++}
28479 ++
28480 + static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
28481 + enum cpuhp_state target)
28482 + {
28483 +@@ -565,8 +588,10 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
28484 + st->state++;
28485 + ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
28486 + if (ret) {
28487 +- st->target = prev_state;
28488 +- undo_cpu_up(cpu, st);
28489 ++ if (can_rollback_cpu(st)) {
28490 ++ st->target = prev_state;
28491 ++ undo_cpu_up(cpu, st);
28492 ++ }
28493 + break;
28494 + }
28495 + }
28496 +diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
28497 +index 355d16acee6d..6310ad01f915 100644
28498 +--- a/kernel/dma/direct.c
28499 ++++ b/kernel/dma/direct.c
28500 +@@ -380,3 +380,14 @@ int dma_direct_supported(struct device *dev, u64 mask)
28501 + */
28502 + return mask >= __phys_to_dma(dev, min_mask);
28503 + }
28504 ++
28505 ++size_t dma_direct_max_mapping_size(struct device *dev)
28506 ++{
28507 ++ size_t size = SIZE_MAX;
28508 ++
28509 ++ /* If SWIOTLB is active, use its maximum mapping size */
28510 ++ if (is_swiotlb_active())
28511 ++ size = swiotlb_max_mapping_size(dev);
28512 ++
28513 ++ return size;
28514 ++}
28515 +diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
28516 +index a11006b6d8e8..5753008ab286 100644
28517 +--- a/kernel/dma/mapping.c
28518 ++++ b/kernel/dma/mapping.c
28519 +@@ -357,3 +357,17 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
28520 + ops->cache_sync(dev, vaddr, size, dir);
28521 + }
28522 + EXPORT_SYMBOL(dma_cache_sync);
28523 ++
28524 ++size_t dma_max_mapping_size(struct device *dev)
28525 ++{
28526 ++ const struct dma_map_ops *ops = get_dma_ops(dev);
28527 ++ size_t size = SIZE_MAX;
28528 ++
28529 ++ if (dma_is_direct(ops))
28530 ++ size = dma_direct_max_mapping_size(dev);
28531 ++ else if (ops && ops->max_mapping_size)
28532 ++ size = ops->max_mapping_size(dev);
28533 ++
28534 ++ return size;
28535 ++}
28536 ++EXPORT_SYMBOL_GPL(dma_max_mapping_size);
28537 +diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
28538 +index 1fb6fd68b9c7..c873f9cc2146 100644
28539 +--- a/kernel/dma/swiotlb.c
28540 ++++ b/kernel/dma/swiotlb.c
28541 +@@ -662,3 +662,17 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask)
28542 + {
28543 + return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
28544 + }
28545 ++
28546 ++size_t swiotlb_max_mapping_size(struct device *dev)
28547 ++{
28548 ++ return ((size_t)1 << IO_TLB_SHIFT) * IO_TLB_SEGSIZE;
28549 ++}
28550 ++
28551 ++bool is_swiotlb_active(void)
28552 ++{
28553 ++ /*
28554 ++ * When SWIOTLB is initialized, even if io_tlb_start points to physical
28555 ++ * address zero, io_tlb_end surely doesn't.
28556 ++ */
28557 ++ return io_tlb_end != 0;
28558 ++}
28559 +diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
28560 +index 5ab4fe3b1dcc..878c62ec0190 100644
28561 +--- a/kernel/events/ring_buffer.c
28562 ++++ b/kernel/events/ring_buffer.c
28563 +@@ -658,7 +658,7 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
28564 + goto out;
28565 + }
28566 +
28567 +- rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages,
28568 ++ rb->aux_priv = event->pmu->setup_aux(event, rb->aux_pages, nr_pages,
28569 + overwrite);
28570 + if (!rb->aux_priv)
28571 + goto out;
28572 +diff --git a/kernel/exit.c b/kernel/exit.c
28573 +index 2639a30a8aa5..2166c2d92ddc 100644
28574 +--- a/kernel/exit.c
28575 ++++ b/kernel/exit.c
28576 +@@ -219,6 +219,7 @@ repeat:
28577 + }
28578 +
28579 + write_unlock_irq(&tasklist_lock);
28580 ++ cgroup_release(p);
28581 + release_thread(p);
28582 + call_rcu(&p->rcu, delayed_put_task_struct);
28583 +
28584 +diff --git a/kernel/futex.c b/kernel/futex.c
28585 +index a0514e01c3eb..52668d44e07b 100644
28586 +--- a/kernel/futex.c
28587 ++++ b/kernel/futex.c
28588 +@@ -3440,6 +3440,10 @@ static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int p
28589 + {
28590 + u32 uval, uninitialized_var(nval), mval;
28591 +
28592 ++ /* Futex address must be 32bit aligned */
28593 ++ if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
28594 ++ return -1;
28595 ++
28596 + retry:
28597 + if (get_user(uval, uaddr))
28598 + return -1;
28599 +diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
28600 +index 34e969069488..b07a2acc4eec 100644
28601 +--- a/kernel/irq/chip.c
28602 ++++ b/kernel/irq/chip.c
28603 +@@ -855,7 +855,11 @@ void handle_percpu_irq(struct irq_desc *desc)
28604 + {
28605 + struct irq_chip *chip = irq_desc_get_chip(desc);
28606 +
28607 +- kstat_incr_irqs_this_cpu(desc);
28608 ++ /*
28609 ++ * PER CPU interrupts are not serialized. Do not touch
28610 ++ * desc->tot_count.
28611 ++ */
28612 ++ __kstat_incr_irqs_this_cpu(desc);
28613 +
28614 + if (chip->irq_ack)
28615 + chip->irq_ack(&desc->irq_data);
28616 +@@ -884,7 +888,11 @@ void handle_percpu_devid_irq(struct irq_desc *desc)
28617 + unsigned int irq = irq_desc_get_irq(desc);
28618 + irqreturn_t res;
28619 +
28620 +- kstat_incr_irqs_this_cpu(desc);
28621 ++ /*
28622 ++ * PER CPU interrupts are not serialized. Do not touch
28623 ++ * desc->tot_count.
28624 ++ */
28625 ++ __kstat_incr_irqs_this_cpu(desc);
28626 +
28627 + if (chip->irq_ack)
28628 + chip->irq_ack(&desc->irq_data);
28629 +@@ -1376,6 +1384,10 @@ int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
28630 + int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
28631 + {
28632 + data = data->parent_data;
28633 ++
28634 ++ if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
28635 ++ return 0;
28636 ++
28637 + if (data->chip->irq_set_wake)
28638 + return data->chip->irq_set_wake(data, on);
28639 +
28640 +diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
28641 +index ca6afa267070..e74e7eea76cf 100644
28642 +--- a/kernel/irq/internals.h
28643 ++++ b/kernel/irq/internals.h
28644 +@@ -242,12 +242,18 @@ static inline void irq_state_set_masked(struct irq_desc *desc)
28645 +
28646 + #undef __irqd_to_state
28647 +
28648 +-static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc)
28649 ++static inline void __kstat_incr_irqs_this_cpu(struct irq_desc *desc)
28650 + {
28651 + __this_cpu_inc(*desc->kstat_irqs);
28652 + __this_cpu_inc(kstat.irqs_sum);
28653 + }
28654 +
28655 ++static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc)
28656 ++{
28657 ++ __kstat_incr_irqs_this_cpu(desc);
28658 ++ desc->tot_count++;
28659 ++}
28660 ++
28661 + static inline int irq_desc_get_node(struct irq_desc *desc)
28662 + {
28663 + return irq_common_data_get_node(&desc->irq_common_data);
28664 +diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
28665 +index ef8ad36cadcf..e16e022eae09 100644
28666 +--- a/kernel/irq/irqdesc.c
28667 ++++ b/kernel/irq/irqdesc.c
28668 +@@ -119,6 +119,7 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
28669 + desc->depth = 1;
28670 + desc->irq_count = 0;
28671 + desc->irqs_unhandled = 0;
28672 ++ desc->tot_count = 0;
28673 + desc->name = NULL;
28674 + desc->owner = owner;
28675 + for_each_possible_cpu(cpu)
28676 +@@ -557,6 +558,7 @@ int __init early_irq_init(void)
28677 + alloc_masks(&desc[i], node);
28678 + raw_spin_lock_init(&desc[i].lock);
28679 + lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
28680 ++ mutex_init(&desc[i].request_mutex);
28681 + desc_set_defaults(i, &desc[i], node, NULL, NULL);
28682 + }
28683 + return arch_early_irq_init();
28684 +@@ -919,11 +921,15 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
28685 + unsigned int kstat_irqs(unsigned int irq)
28686 + {
28687 + struct irq_desc *desc = irq_to_desc(irq);
28688 +- int cpu;
28689 + unsigned int sum = 0;
28690 ++ int cpu;
28691 +
28692 + if (!desc || !desc->kstat_irqs)
28693 + return 0;
28694 ++ if (!irq_settings_is_per_cpu_devid(desc) &&
28695 ++ !irq_settings_is_per_cpu(desc))
28696 ++ return desc->tot_count;
28697 ++
28698 + for_each_possible_cpu(cpu)
28699 + sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
28700 + return sum;
28701 +diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
28702 +index 95932333a48b..e805fe3bf87f 100644
28703 +--- a/kernel/locking/lockdep.c
28704 ++++ b/kernel/locking/lockdep.c
28705 +@@ -3535,6 +3535,9 @@ static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
28706 + unsigned int depth;
28707 + int i;
28708 +
28709 ++ if (unlikely(!debug_locks))
28710 ++ return 0;
28711 ++
28712 + depth = curr->lockdep_depth;
28713 + /*
28714 + * This function is about (re)setting the class of a held lock,
28715 +diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
28716 +index 9180158756d2..38d44d27e37a 100644
28717 +--- a/kernel/rcu/tree.c
28718 ++++ b/kernel/rcu/tree.c
28719 +@@ -1557,14 +1557,23 @@ static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
28720 + }
28721 +
28722 + /*
28723 +- * Awaken the grace-period kthread. Don't do a self-awaken, and don't
28724 +- * bother awakening when there is nothing for the grace-period kthread
28725 +- * to do (as in several CPUs raced to awaken, and we lost), and finally
28726 +- * don't try to awaken a kthread that has not yet been created.
28727 ++ * Awaken the grace-period kthread. Don't do a self-awaken (unless in
28728 ++ * an interrupt or softirq handler), and don't bother awakening when there
28729 ++ * is nothing for the grace-period kthread to do (as in several CPUs raced
28730 ++ * to awaken, and we lost), and finally don't try to awaken a kthread that
28731 ++ * has not yet been created. If all those checks are passed, track some
28732 ++ * debug information and awaken.
28733 ++ *
28734 ++ * So why do the self-wakeup when in an interrupt or softirq handler
28735 ++ * in the grace-period kthread's context? Because the kthread might have
28736 ++ * been interrupted just as it was going to sleep, and just after the final
28737 ++ * pre-sleep check of the awaken condition. In this case, a wakeup really
28738 ++ * is required, and is therefore supplied.
28739 + */
28740 + static void rcu_gp_kthread_wake(void)
28741 + {
28742 +- if (current == rcu_state.gp_kthread ||
28743 ++ if ((current == rcu_state.gp_kthread &&
28744 ++ !in_interrupt() && !in_serving_softirq()) ||
28745 + !READ_ONCE(rcu_state.gp_flags) ||
28746 + !rcu_state.gp_kthread)
28747 + return;
28748 +diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
28749 +index 1971869c4072..f4ca36d92138 100644
28750 +--- a/kernel/rcu/update.c
28751 ++++ b/kernel/rcu/update.c
28752 +@@ -52,6 +52,7 @@
28753 + #include <linux/tick.h>
28754 + #include <linux/rcupdate_wait.h>
28755 + #include <linux/sched/isolation.h>
28756 ++#include <linux/kprobes.h>
28757 +
28758 + #define CREATE_TRACE_POINTS
28759 +
28760 +@@ -249,6 +250,7 @@ int notrace debug_lockdep_rcu_enabled(void)
28761 + current->lockdep_recursion == 0;
28762 + }
28763 + EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
28764 ++NOKPROBE_SYMBOL(debug_lockdep_rcu_enabled);
28765 +
28766 + /**
28767 + * rcu_read_lock_held() - might we be in RCU read-side critical section?
28768 +diff --git a/kernel/resource.c b/kernel/resource.c
28769 +index 915c02e8e5dd..ca7ed5158cff 100644
28770 +--- a/kernel/resource.c
28771 ++++ b/kernel/resource.c
28772 +@@ -382,7 +382,7 @@ static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
28773 + int (*func)(struct resource *, void *))
28774 + {
28775 + struct resource res;
28776 +- int ret = -1;
28777 ++ int ret = -EINVAL;
28778 +
28779 + while (start < end &&
28780 + !find_next_iomem_res(start, end, flags, desc, first_lvl, &res)) {
28781 +@@ -462,7 +462,7 @@ int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
28782 + unsigned long flags;
28783 + struct resource res;
28784 + unsigned long pfn, end_pfn;
28785 +- int ret = -1;
28786 ++ int ret = -EINVAL;
28787 +
28788 + start = (u64) start_pfn << PAGE_SHIFT;
28789 + end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
28790 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
28791 +index d8d76a65cfdd..01a2489de94e 100644
28792 +--- a/kernel/sched/core.c
28793 ++++ b/kernel/sched/core.c
28794 +@@ -107,11 +107,12 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
28795 + * [L] ->on_rq
28796 + * RELEASE (rq->lock)
28797 + *
28798 +- * If we observe the old CPU in task_rq_lock, the acquire of
28799 ++ * If we observe the old CPU in task_rq_lock(), the acquire of
28800 + * the old rq->lock will fully serialize against the stores.
28801 + *
28802 +- * If we observe the new CPU in task_rq_lock, the acquire will
28803 +- * pair with the WMB to ensure we must then also see migrating.
28804 ++ * If we observe the new CPU in task_rq_lock(), the address
28805 ++ * dependency headed by '[L] rq = task_rq()' and the acquire
28806 ++ * will pair with the WMB to ensure we then also see migrating.
28807 + */
28808 + if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
28809 + rq_pin_lock(rq, rf);
28810 +@@ -928,7 +929,7 @@ static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
28811 + {
28812 + lockdep_assert_held(&rq->lock);
28813 +
28814 +- p->on_rq = TASK_ON_RQ_MIGRATING;
28815 ++ WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
28816 + dequeue_task(rq, p, DEQUEUE_NOCLOCK);
28817 + set_task_cpu(p, new_cpu);
28818 + rq_unlock(rq, rf);
28819 +diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
28820 +index de3de997e245..8039d62ae36e 100644
28821 +--- a/kernel/sched/debug.c
28822 ++++ b/kernel/sched/debug.c
28823 +@@ -315,6 +315,7 @@ void register_sched_domain_sysctl(void)
28824 + {
28825 + static struct ctl_table *cpu_entries;
28826 + static struct ctl_table **cpu_idx;
28827 ++ static bool init_done = false;
28828 + char buf[32];
28829 + int i;
28830 +
28831 +@@ -344,7 +345,10 @@ void register_sched_domain_sysctl(void)
28832 + if (!cpumask_available(sd_sysctl_cpus)) {
28833 + if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
28834 + return;
28835 ++ }
28836 +
28837 ++ if (!init_done) {
28838 ++ init_done = true;
28839 + /* init to possible to not have holes in @cpu_entries */
28840 + cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
28841 + }
28842 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
28843 +index 310d0637fe4b..5e61a1a99e38 100644
28844 +--- a/kernel/sched/fair.c
28845 ++++ b/kernel/sched/fair.c
28846 +@@ -7713,10 +7713,10 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
28847 + if (cfs_rq->last_h_load_update == now)
28848 + return;
28849 +
28850 +- cfs_rq->h_load_next = NULL;
28851 ++ WRITE_ONCE(cfs_rq->h_load_next, NULL);
28852 + for_each_sched_entity(se) {
28853 + cfs_rq = cfs_rq_of(se);
28854 +- cfs_rq->h_load_next = se;
28855 ++ WRITE_ONCE(cfs_rq->h_load_next, se);
28856 + if (cfs_rq->last_h_load_update == now)
28857 + break;
28858 + }
28859 +@@ -7726,7 +7726,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
28860 + cfs_rq->last_h_load_update = now;
28861 + }
28862 +
28863 +- while ((se = cfs_rq->h_load_next) != NULL) {
28864 ++ while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
28865 + load = cfs_rq->h_load;
28866 + load = div64_ul(load * se->avg.load_avg,
28867 + cfs_rq_load_avg(cfs_rq) + 1);
28868 +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
28869 +index d04530bf251f..425a5589e5f6 100644
28870 +--- a/kernel/sched/sched.h
28871 ++++ b/kernel/sched/sched.h
28872 +@@ -1460,9 +1460,9 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
28873 + */
28874 + smp_wmb();
28875 + #ifdef CONFIG_THREAD_INFO_IN_TASK
28876 +- p->cpu = cpu;
28877 ++ WRITE_ONCE(p->cpu, cpu);
28878 + #else
28879 +- task_thread_info(p)->cpu = cpu;
28880 ++ WRITE_ONCE(task_thread_info(p)->cpu, cpu);
28881 + #endif
28882 + p->wake_cpu = cpu;
28883 + #endif
28884 +@@ -1563,7 +1563,7 @@ static inline int task_on_rq_queued(struct task_struct *p)
28885 +
28886 + static inline int task_on_rq_migrating(struct task_struct *p)
28887 + {
28888 +- return p->on_rq == TASK_ON_RQ_MIGRATING;
28889 ++ return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
28890 + }
28891 +
28892 + /*
28893 +diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
28894 +index 3f35ba1d8fde..efca2489d881 100644
28895 +--- a/kernel/sched/topology.c
28896 ++++ b/kernel/sched/topology.c
28897 +@@ -676,7 +676,7 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
28898 + }
28899 +
28900 + struct s_data {
28901 +- struct sched_domain ** __percpu sd;
28902 ++ struct sched_domain * __percpu *sd;
28903 + struct root_domain *rd;
28904 + };
28905 +
28906 +diff --git a/kernel/sysctl.c b/kernel/sysctl.c
28907 +index ba4d9e85feb8..28ec71d914c7 100644
28908 +--- a/kernel/sysctl.c
28909 ++++ b/kernel/sysctl.c
28910 +@@ -127,6 +127,7 @@ static int __maybe_unused one = 1;
28911 + static int __maybe_unused two = 2;
28912 + static int __maybe_unused four = 4;
28913 + static unsigned long one_ul = 1;
28914 ++static unsigned long long_max = LONG_MAX;
28915 + static int one_hundred = 100;
28916 + static int one_thousand = 1000;
28917 + #ifdef CONFIG_PRINTK
28918 +@@ -1722,6 +1723,8 @@ static struct ctl_table fs_table[] = {
28919 + .maxlen = sizeof(files_stat.max_files),
28920 + .mode = 0644,
28921 + .proc_handler = proc_doulongvec_minmax,
28922 ++ .extra1 = &zero,
28923 ++ .extra2 = &long_max,
28924 + },
28925 + {
28926 + .procname = "nr_open",
28927 +@@ -2579,7 +2582,16 @@ static int do_proc_dointvec_minmax_conv(bool *negp, unsigned long *lvalp,
28928 + {
28929 + struct do_proc_dointvec_minmax_conv_param *param = data;
28930 + if (write) {
28931 +- int val = *negp ? -*lvalp : *lvalp;
28932 ++ int val;
28933 ++ if (*negp) {
28934 ++ if (*lvalp > (unsigned long) INT_MAX + 1)
28935 ++ return -EINVAL;
28936 ++ val = -*lvalp;
28937 ++ } else {
28938 ++ if (*lvalp > (unsigned long) INT_MAX)
28939 ++ return -EINVAL;
28940 ++ val = *lvalp;
28941 ++ }
28942 + if ((param->min && *param->min > val) ||
28943 + (param->max && *param->max < val))
28944 + return -EINVAL;
28945 +diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
28946 +index 2c97e8c2d29f..0519a8805aab 100644
28947 +--- a/kernel/time/alarmtimer.c
28948 ++++ b/kernel/time/alarmtimer.c
28949 +@@ -594,7 +594,7 @@ static ktime_t alarm_timer_remaining(struct k_itimer *timr, ktime_t now)
28950 + {
28951 + struct alarm *alarm = &timr->it.alarm.alarmtimer;
28952 +
28953 +- return ktime_sub(now, alarm->node.expires);
28954 ++ return ktime_sub(alarm->node.expires, now);
28955 + }
28956 +
28957 + /**
28958 +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
28959 +index 06e864a334bb..b49affb4666b 100644
28960 +--- a/kernel/trace/ring_buffer.c
28961 ++++ b/kernel/trace/ring_buffer.c
28962 +@@ -4205,6 +4205,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
28963 + * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
28964 + * @buffer: The ring buffer to read from
28965 + * @cpu: The cpu buffer to iterate over
28966 ++ * @flags: gfp flags to use for memory allocation
28967 + *
28968 + * This performs the initial preparations necessary to iterate
28969 + * through the buffer. Memory is allocated, buffer recording
28970 +@@ -4222,7 +4223,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
28971 + * This overall must be paired with ring_buffer_read_finish.
28972 + */
28973 + struct ring_buffer_iter *
28974 +-ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
28975 ++ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags)
28976 + {
28977 + struct ring_buffer_per_cpu *cpu_buffer;
28978 + struct ring_buffer_iter *iter;
28979 +@@ -4230,7 +4231,7 @@ ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
28980 + if (!cpumask_test_cpu(cpu, buffer->cpumask))
28981 + return NULL;
28982 +
28983 +- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
28984 ++ iter = kmalloc(sizeof(*iter), flags);
28985 + if (!iter)
28986 + return NULL;
28987 +
28988 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
28989 +index c4238b441624..89158aa93fa6 100644
28990 +--- a/kernel/trace/trace.c
28991 ++++ b/kernel/trace/trace.c
28992 +@@ -3904,7 +3904,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
28993 + if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
28994 + for_each_tracing_cpu(cpu) {
28995 + iter->buffer_iter[cpu] =
28996 +- ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
28997 ++ ring_buffer_read_prepare(iter->trace_buffer->buffer,
28998 ++ cpu, GFP_KERNEL);
28999 + }
29000 + ring_buffer_read_prepare_sync();
29001 + for_each_tracing_cpu(cpu) {
29002 +@@ -3914,7 +3915,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
29003 + } else {
29004 + cpu = iter->cpu_file;
29005 + iter->buffer_iter[cpu] =
29006 +- ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
29007 ++ ring_buffer_read_prepare(iter->trace_buffer->buffer,
29008 ++ cpu, GFP_KERNEL);
29009 + ring_buffer_read_prepare_sync();
29010 + ring_buffer_read_start(iter->buffer_iter[cpu]);
29011 + tracing_iter_reset(iter, cpu);
29012 +@@ -5626,7 +5628,6 @@ out:
29013 + return ret;
29014 +
29015 + fail:
29016 +- kfree(iter->trace);
29017 + kfree(iter);
29018 + __trace_array_put(tr);
29019 + mutex_unlock(&trace_types_lock);
29020 +diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c
29021 +index dd1f43588d70..fa100ed3b4de 100644
29022 +--- a/kernel/trace/trace_dynevent.c
29023 ++++ b/kernel/trace/trace_dynevent.c
29024 +@@ -74,7 +74,7 @@ int dyn_event_release(int argc, char **argv, struct dyn_event_operations *type)
29025 + static int create_dyn_event(int argc, char **argv)
29026 + {
29027 + struct dyn_event_operations *ops;
29028 +- int ret;
29029 ++ int ret = -ENODEV;
29030 +
29031 + if (argv[0][0] == '-' || argv[0][0] == '!')
29032 + return dyn_event_release(argc, argv, NULL);
29033 +diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
29034 +index 76217bbef815..4629a6104474 100644
29035 +--- a/kernel/trace/trace_event_perf.c
29036 ++++ b/kernel/trace/trace_event_perf.c
29037 +@@ -299,15 +299,13 @@ int perf_uprobe_init(struct perf_event *p_event,
29038 +
29039 + if (!p_event->attr.uprobe_path)
29040 + return -EINVAL;
29041 +- path = kzalloc(PATH_MAX, GFP_KERNEL);
29042 +- if (!path)
29043 +- return -ENOMEM;
29044 +- ret = strncpy_from_user(
29045 +- path, u64_to_user_ptr(p_event->attr.uprobe_path), PATH_MAX);
29046 +- if (ret == PATH_MAX)
29047 +- return -E2BIG;
29048 +- if (ret < 0)
29049 +- goto out;
29050 ++
29051 ++ path = strndup_user(u64_to_user_ptr(p_event->attr.uprobe_path),
29052 ++ PATH_MAX);
29053 ++ if (IS_ERR(path)) {
29054 ++ ret = PTR_ERR(path);
29055 ++ return (ret == -EINVAL) ? -E2BIG : ret;
29056 ++ }
29057 + if (path[0] == '\0') {
29058 + ret = -EINVAL;
29059 + goto out;
29060 +diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
29061 +index 27821480105e..217ef481fbbb 100644
29062 +--- a/kernel/trace/trace_events_filter.c
29063 ++++ b/kernel/trace/trace_events_filter.c
29064 +@@ -1301,7 +1301,7 @@ static int parse_pred(const char *str, void *data,
29065 + /* go past the last quote */
29066 + i++;
29067 +
29068 +- } else if (isdigit(str[i])) {
29069 ++ } else if (isdigit(str[i]) || str[i] == '-') {
29070 +
29071 + /* Make sure the field is not a string */
29072 + if (is_string_field(field)) {
29073 +@@ -1314,6 +1314,9 @@ static int parse_pred(const char *str, void *data,
29074 + goto err_free;
29075 + }
29076 +
29077 ++ if (str[i] == '-')
29078 ++ i++;
29079 ++
29080 + /* We allow 0xDEADBEEF */
29081 + while (isalnum(str[i]))
29082 + i++;
29083 +diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
29084 +index 449d90cfa151..55b72b1c63a0 100644
29085 +--- a/kernel/trace/trace_events_hist.c
29086 ++++ b/kernel/trace/trace_events_hist.c
29087 +@@ -4695,9 +4695,10 @@ static inline void add_to_key(char *compound_key, void *key,
29088 + /* ensure NULL-termination */
29089 + if (size > key_field->size - 1)
29090 + size = key_field->size - 1;
29091 +- }
29092 +
29093 +- memcpy(compound_key + key_field->offset, key, size);
29094 ++ strncpy(compound_key + key_field->offset, (char *)key, size);
29095 ++ } else
29096 ++ memcpy(compound_key + key_field->offset, key, size);
29097 + }
29098 +
29099 + static void
29100 +diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
29101 +index d953c163a079..810d78a8d14c 100644
29102 +--- a/kernel/trace/trace_kdb.c
29103 ++++ b/kernel/trace/trace_kdb.c
29104 +@@ -51,14 +51,16 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
29105 + if (cpu_file == RING_BUFFER_ALL_CPUS) {
29106 + for_each_tracing_cpu(cpu) {
29107 + iter.buffer_iter[cpu] =
29108 +- ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu);
29109 ++ ring_buffer_read_prepare(iter.trace_buffer->buffer,
29110 ++ cpu, GFP_ATOMIC);
29111 + ring_buffer_read_start(iter.buffer_iter[cpu]);
29112 + tracing_iter_reset(&iter, cpu);
29113 + }
29114 + } else {
29115 + iter.cpu_file = cpu_file;
29116 + iter.buffer_iter[cpu_file] =
29117 +- ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu_file);
29118 ++ ring_buffer_read_prepare(iter.trace_buffer->buffer,
29119 ++ cpu_file, GFP_ATOMIC);
29120 + ring_buffer_read_start(iter.buffer_iter[cpu_file]);
29121 + tracing_iter_reset(&iter, cpu_file);
29122 + }
29123 +diff --git a/kernel/watchdog.c b/kernel/watchdog.c
29124 +index 977918d5d350..bbc4940f21af 100644
29125 +--- a/kernel/watchdog.c
29126 ++++ b/kernel/watchdog.c
29127 +@@ -547,13 +547,15 @@ static void softlockup_start_all(void)
29128 +
29129 + int lockup_detector_online_cpu(unsigned int cpu)
29130 + {
29131 +- watchdog_enable(cpu);
29132 ++ if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
29133 ++ watchdog_enable(cpu);
29134 + return 0;
29135 + }
29136 +
29137 + int lockup_detector_offline_cpu(unsigned int cpu)
29138 + {
29139 +- watchdog_disable(cpu);
29140 ++ if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
29141 ++ watchdog_disable(cpu);
29142 + return 0;
29143 + }
29144 +
29145 +diff --git a/lib/bsearch.c b/lib/bsearch.c
29146 +index 18b445b010c3..82512fe7b33c 100644
29147 +--- a/lib/bsearch.c
29148 ++++ b/lib/bsearch.c
29149 +@@ -11,6 +11,7 @@
29150 +
29151 + #include <linux/export.h>
29152 + #include <linux/bsearch.h>
29153 ++#include <linux/kprobes.h>
29154 +
29155 + /*
29156 + * bsearch - binary search an array of elements
29157 +@@ -53,3 +54,4 @@ void *bsearch(const void *key, const void *base, size_t num, size_t size,
29158 + return NULL;
29159 + }
29160 + EXPORT_SYMBOL(bsearch);
29161 ++NOKPROBE_SYMBOL(bsearch);
29162 +diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
29163 +index 4e90d443d1b0..e723eacf7868 100644
29164 +--- a/lib/raid6/Makefile
29165 ++++ b/lib/raid6/Makefile
29166 +@@ -39,7 +39,7 @@ endif
29167 + ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
29168 + NEON_FLAGS := -ffreestanding
29169 + ifeq ($(ARCH),arm)
29170 +-NEON_FLAGS += -mfloat-abi=softfp -mfpu=neon
29171 ++NEON_FLAGS += -march=armv7-a -mfloat-abi=softfp -mfpu=neon
29172 + endif
29173 + CFLAGS_recov_neon_inner.o += $(NEON_FLAGS)
29174 + ifeq ($(ARCH),arm64)
29175 +diff --git a/lib/rhashtable.c b/lib/rhashtable.c
29176 +index 852ffa5160f1..4edcf3310513 100644
29177 +--- a/lib/rhashtable.c
29178 ++++ b/lib/rhashtable.c
29179 +@@ -416,8 +416,12 @@ static void rht_deferred_worker(struct work_struct *work)
29180 + else if (tbl->nest)
29181 + err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
29182 +
29183 +- if (!err)
29184 +- err = rhashtable_rehash_table(ht);
29185 ++ if (!err || err == -EEXIST) {
29186 ++ int nerr;
29187 ++
29188 ++ nerr = rhashtable_rehash_table(ht);
29189 ++ err = err ?: nerr;
29190 ++ }
29191 +
29192 + mutex_unlock(&ht->mutex);
29193 +
29194 +diff --git a/lib/string.c b/lib/string.c
29195 +index 38e4ca08e757..3ab861c1a857 100644
29196 +--- a/lib/string.c
29197 ++++ b/lib/string.c
29198 +@@ -866,6 +866,26 @@ __visible int memcmp(const void *cs, const void *ct, size_t count)
29199 + EXPORT_SYMBOL(memcmp);
29200 + #endif
29201 +
29202 ++#ifndef __HAVE_ARCH_BCMP
29203 ++/**
29204 ++ * bcmp - returns 0 if and only if the buffers have identical contents.
29205 ++ * @a: pointer to first buffer.
29206 ++ * @b: pointer to second buffer.
29207 ++ * @len: size of buffers.
29208 ++ *
29209 ++ * The sign or magnitude of a non-zero return value has no particular
29210 ++ * meaning, and architectures may implement their own more efficient bcmp(). So
29211 ++ * while this particular implementation is a simple (tail) call to memcmp, do
29212 ++ * not rely on anything but whether the return value is zero or non-zero.
29213 ++ */
29214 ++#undef bcmp
29215 ++int bcmp(const void *a, const void *b, size_t len)
29216 ++{
29217 ++ return memcmp(a, b, len);
29218 ++}
29219 ++EXPORT_SYMBOL(bcmp);
29220 ++#endif
29221 ++
29222 + #ifndef __HAVE_ARCH_MEMSCAN
29223 + /**
29224 + * memscan - Find a character in an area of memory.
29225 +diff --git a/mm/cma.c b/mm/cma.c
29226 +index c7b39dd3b4f6..f4f3a8a57d86 100644
29227 +--- a/mm/cma.c
29228 ++++ b/mm/cma.c
29229 +@@ -353,12 +353,14 @@ int __init cma_declare_contiguous(phys_addr_t base,
29230 +
29231 + ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
29232 + if (ret)
29233 +- goto err;
29234 ++ goto free_mem;
29235 +
29236 + pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
29237 + &base);
29238 + return 0;
29239 +
29240 ++free_mem:
29241 ++ memblock_free(base, size);
29242 + err:
29243 + pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
29244 + return ret;
29245 +diff --git a/mm/debug.c b/mm/debug.c
29246 +index 1611cf00a137..854d5f84047d 100644
29247 +--- a/mm/debug.c
29248 ++++ b/mm/debug.c
29249 +@@ -79,7 +79,7 @@ void __dump_page(struct page *page, const char *reason)
29250 + pr_warn("ksm ");
29251 + else if (mapping) {
29252 + pr_warn("%ps ", mapping->a_ops);
29253 +- if (mapping->host->i_dentry.first) {
29254 ++ if (mapping->host && mapping->host->i_dentry.first) {
29255 + struct dentry *dentry;
29256 + dentry = container_of(mapping->host->i_dentry.first, struct dentry, d_u.d_alias);
29257 + pr_warn("name:\"%pd\" ", dentry);
29258 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
29259 +index faf357eaf0ce..8b03c698f86e 100644
29260 +--- a/mm/huge_memory.c
29261 ++++ b/mm/huge_memory.c
29262 +@@ -753,6 +753,21 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
29263 + spinlock_t *ptl;
29264 +
29265 + ptl = pmd_lock(mm, pmd);
29266 ++ if (!pmd_none(*pmd)) {
29267 ++ if (write) {
29268 ++ if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
29269 ++ WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
29270 ++ goto out_unlock;
29271 ++ }
29272 ++ entry = pmd_mkyoung(*pmd);
29273 ++ entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
29274 ++ if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
29275 ++ update_mmu_cache_pmd(vma, addr, pmd);
29276 ++ }
29277 ++
29278 ++ goto out_unlock;
29279 ++ }
29280 ++
29281 + entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
29282 + if (pfn_t_devmap(pfn))
29283 + entry = pmd_mkdevmap(entry);
29284 +@@ -764,11 +779,16 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
29285 + if (pgtable) {
29286 + pgtable_trans_huge_deposit(mm, pmd, pgtable);
29287 + mm_inc_nr_ptes(mm);
29288 ++ pgtable = NULL;
29289 + }
29290 +
29291 + set_pmd_at(mm, addr, pmd, entry);
29292 + update_mmu_cache_pmd(vma, addr, pmd);
29293 ++
29294 ++out_unlock:
29295 + spin_unlock(ptl);
29296 ++ if (pgtable)
29297 ++ pte_free(mm, pgtable);
29298 + }
29299 +
29300 + vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
29301 +@@ -819,6 +839,20 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
29302 + spinlock_t *ptl;
29303 +
29304 + ptl = pud_lock(mm, pud);
29305 ++ if (!pud_none(*pud)) {
29306 ++ if (write) {
29307 ++ if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
29308 ++ WARN_ON_ONCE(!is_huge_zero_pud(*pud));
29309 ++ goto out_unlock;
29310 ++ }
29311 ++ entry = pud_mkyoung(*pud);
29312 ++ entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
29313 ++ if (pudp_set_access_flags(vma, addr, pud, entry, 1))
29314 ++ update_mmu_cache_pud(vma, addr, pud);
29315 ++ }
29316 ++ goto out_unlock;
29317 ++ }
29318 ++
29319 + entry = pud_mkhuge(pfn_t_pud(pfn, prot));
29320 + if (pfn_t_devmap(pfn))
29321 + entry = pud_mkdevmap(entry);
29322 +@@ -828,6 +862,8 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
29323 + }
29324 + set_pud_at(mm, addr, pud, entry);
29325 + update_mmu_cache_pud(vma, addr, pud);
29326 ++
29327 ++out_unlock:
29328 + spin_unlock(ptl);
29329 + }
29330 +
29331 +diff --git a/mm/kasan/common.c b/mm/kasan/common.c
29332 +index 09b534fbba17..80bbe62b16cd 100644
29333 +--- a/mm/kasan/common.c
29334 ++++ b/mm/kasan/common.c
29335 +@@ -14,6 +14,8 @@
29336 + *
29337 + */
29338 +
29339 ++#define __KASAN_INTERNAL
29340 ++
29341 + #include <linux/export.h>
29342 + #include <linux/interrupt.h>
29343 + #include <linux/init.h>
29344 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
29345 +index af7f18b32389..5bbf2de02a0f 100644
29346 +--- a/mm/memcontrol.c
29347 ++++ b/mm/memcontrol.c
29348 +@@ -248,6 +248,12 @@ enum res_type {
29349 + iter != NULL; \
29350 + iter = mem_cgroup_iter(NULL, iter, NULL))
29351 +
29352 ++static inline bool should_force_charge(void)
29353 ++{
29354 ++ return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
29355 ++ (current->flags & PF_EXITING);
29356 ++}
29357 ++
29358 + /* Some nice accessors for the vmpressure. */
29359 + struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
29360 + {
29361 +@@ -1389,8 +1395,13 @@ static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
29362 + };
29363 + bool ret;
29364 +
29365 +- mutex_lock(&oom_lock);
29366 +- ret = out_of_memory(&oc);
29367 ++ if (mutex_lock_killable(&oom_lock))
29368 ++ return true;
29369 ++ /*
29370 ++ * A few threads which were not waiting at mutex_lock_killable() can
29371 ++ * fail to bail out. Therefore, check again after holding oom_lock.
29372 ++ */
29373 ++ ret = should_force_charge() || out_of_memory(&oc);
29374 + mutex_unlock(&oom_lock);
29375 + return ret;
29376 + }
29377 +@@ -2209,9 +2220,7 @@ retry:
29378 + * bypass the last charges so that they can exit quickly and
29379 + * free their memory.
29380 + */
29381 +- if (unlikely(tsk_is_oom_victim(current) ||
29382 +- fatal_signal_pending(current) ||
29383 +- current->flags & PF_EXITING))
29384 ++ if (unlikely(should_force_charge()))
29385 + goto force;
29386 +
29387 + /*
29388 +@@ -3873,6 +3882,22 @@ struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
29389 + return &memcg->cgwb_domain;
29390 + }
29391 +
29392 ++/*
29393 ++ * idx can be of type enum memcg_stat_item or node_stat_item.
29394 ++ * Keep in sync with memcg_exact_page().
29395 ++ */
29396 ++static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx)
29397 ++{
29398 ++ long x = atomic_long_read(&memcg->stat[idx]);
29399 ++ int cpu;
29400 ++
29401 ++ for_each_online_cpu(cpu)
29402 ++ x += per_cpu_ptr(memcg->stat_cpu, cpu)->count[idx];
29403 ++ if (x < 0)
29404 ++ x = 0;
29405 ++ return x;
29406 ++}
29407 ++
29408 + /**
29409 + * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
29410 + * @wb: bdi_writeback in question
29411 +@@ -3898,10 +3923,10 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
29412 + struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
29413 + struct mem_cgroup *parent;
29414 +
29415 +- *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
29416 ++ *pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY);
29417 +
29418 + /* this should eventually include NR_UNSTABLE_NFS */
29419 +- *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
29420 ++ *pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK);
29421 + *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
29422 + (1 << LRU_ACTIVE_FILE));
29423 + *pheadroom = PAGE_COUNTER_MAX;
29424 +diff --git a/mm/memory-failure.c b/mm/memory-failure.c
29425 +index 831be5ff5f4d..fc8b51744579 100644
29426 +--- a/mm/memory-failure.c
29427 ++++ b/mm/memory-failure.c
29428 +@@ -1825,19 +1825,17 @@ static int soft_offline_in_use_page(struct page *page, int flags)
29429 + struct page *hpage = compound_head(page);
29430 +
29431 + if (!PageHuge(page) && PageTransHuge(hpage)) {
29432 +- lock_page(hpage);
29433 +- if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
29434 +- unlock_page(hpage);
29435 +- if (!PageAnon(hpage))
29436 ++ lock_page(page);
29437 ++ if (!PageAnon(page) || unlikely(split_huge_page(page))) {
29438 ++ unlock_page(page);
29439 ++ if (!PageAnon(page))
29440 + pr_info("soft offline: %#lx: non anonymous thp\n", page_to_pfn(page));
29441 + else
29442 + pr_info("soft offline: %#lx: thp split failed\n", page_to_pfn(page));
29443 +- put_hwpoison_page(hpage);
29444 ++ put_hwpoison_page(page);
29445 + return -EBUSY;
29446 + }
29447 +- unlock_page(hpage);
29448 +- get_hwpoison_page(page);
29449 +- put_hwpoison_page(hpage);
29450 ++ unlock_page(page);
29451 + }
29452 +
29453 + /*
29454 +diff --git a/mm/memory.c b/mm/memory.c
29455 +index e11ca9dd823f..8d3f38fa530d 100644
29456 +--- a/mm/memory.c
29457 ++++ b/mm/memory.c
29458 +@@ -1546,10 +1546,12 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
29459 + WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
29460 + goto out_unlock;
29461 + }
29462 +- entry = *pte;
29463 +- goto out_mkwrite;
29464 +- } else
29465 +- goto out_unlock;
29466 ++ entry = pte_mkyoung(*pte);
29467 ++ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
29468 ++ if (ptep_set_access_flags(vma, addr, pte, entry, 1))
29469 ++ update_mmu_cache(vma, addr, pte);
29470 ++ }
29471 ++ goto out_unlock;
29472 + }
29473 +
29474 + /* Ok, finally just insert the thing.. */
29475 +@@ -1558,7 +1560,6 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
29476 + else
29477 + entry = pte_mkspecial(pfn_t_pte(pfn, prot));
29478 +
29479 +-out_mkwrite:
29480 + if (mkwrite) {
29481 + entry = pte_mkyoung(entry);
29482 + entry = maybe_mkwrite(pte_mkdirty(entry), vma);
29483 +@@ -3517,10 +3518,13 @@ static vm_fault_t do_shared_fault(struct vm_fault *vmf)
29484 + * but allow concurrent faults).
29485 + * The mmap_sem may have been released depending on flags and our
29486 + * return value. See filemap_fault() and __lock_page_or_retry().
29487 ++ * If mmap_sem is released, vma may become invalid (for example
29488 ++ * by other thread calling munmap()).
29489 + */
29490 + static vm_fault_t do_fault(struct vm_fault *vmf)
29491 + {
29492 + struct vm_area_struct *vma = vmf->vma;
29493 ++ struct mm_struct *vm_mm = vma->vm_mm;
29494 + vm_fault_t ret;
29495 +
29496 + /*
29497 +@@ -3561,7 +3565,7 @@ static vm_fault_t do_fault(struct vm_fault *vmf)
29498 +
29499 + /* preallocated pagetable is unused: free it */
29500 + if (vmf->prealloc_pte) {
29501 +- pte_free(vma->vm_mm, vmf->prealloc_pte);
29502 ++ pte_free(vm_mm, vmf->prealloc_pte);
29503 + vmf->prealloc_pte = NULL;
29504 + }
29505 + return ret;
29506 +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
29507 +index 1ad28323fb9f..11593a03c051 100644
29508 +--- a/mm/memory_hotplug.c
29509 ++++ b/mm/memory_hotplug.c
29510 +@@ -1560,7 +1560,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
29511 + {
29512 + unsigned long pfn, nr_pages;
29513 + long offlined_pages;
29514 +- int ret, node;
29515 ++ int ret, node, nr_isolate_pageblock;
29516 + unsigned long flags;
29517 + unsigned long valid_start, valid_end;
29518 + struct zone *zone;
29519 +@@ -1586,10 +1586,11 @@ static int __ref __offline_pages(unsigned long start_pfn,
29520 + ret = start_isolate_page_range(start_pfn, end_pfn,
29521 + MIGRATE_MOVABLE,
29522 + SKIP_HWPOISON | REPORT_FAILURE);
29523 +- if (ret) {
29524 ++ if (ret < 0) {
29525 + reason = "failure to isolate range";
29526 + goto failed_removal;
29527 + }
29528 ++ nr_isolate_pageblock = ret;
29529 +
29530 + arg.start_pfn = start_pfn;
29531 + arg.nr_pages = nr_pages;
29532 +@@ -1642,8 +1643,16 @@ static int __ref __offline_pages(unsigned long start_pfn,
29533 + /* Ok, all of our target is isolated.
29534 + We cannot do rollback at this point. */
29535 + offline_isolated_pages(start_pfn, end_pfn);
29536 +- /* reset pagetype flags and makes migrate type to be MOVABLE */
29537 +- undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
29538 ++
29539 ++ /*
29540 ++ * Onlining will reset pagetype flags and makes migrate type
29541 ++ * MOVABLE, so just need to decrease the number of isolated
29542 ++ * pageblocks zone counter here.
29543 ++ */
29544 ++ spin_lock_irqsave(&zone->lock, flags);
29545 ++ zone->nr_isolate_pageblock -= nr_isolate_pageblock;
29546 ++ spin_unlock_irqrestore(&zone->lock, flags);
29547 ++
29548 + /* removal success */
29549 + adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
29550 + zone->present_pages -= offlined_pages;
29551 +@@ -1675,12 +1684,12 @@ static int __ref __offline_pages(unsigned long start_pfn,
29552 +
29553 + failed_removal_isolated:
29554 + undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
29555 ++ memory_notify(MEM_CANCEL_OFFLINE, &arg);
29556 + failed_removal:
29557 + pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n",
29558 + (unsigned long long) start_pfn << PAGE_SHIFT,
29559 + ((unsigned long long) end_pfn << PAGE_SHIFT) - 1,
29560 + reason);
29561 +- memory_notify(MEM_CANCEL_OFFLINE, &arg);
29562 + /* pushback to free area */
29563 + mem_hotplug_done();
29564 + return ret;
29565 +diff --git a/mm/mempolicy.c b/mm/mempolicy.c
29566 +index ee2bce59d2bf..c2275c1e6d2a 100644
29567 +--- a/mm/mempolicy.c
29568 ++++ b/mm/mempolicy.c
29569 +@@ -350,7 +350,7 @@ static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
29570 + {
29571 + if (!pol)
29572 + return;
29573 +- if (!mpol_store_user_nodemask(pol) &&
29574 ++ if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) &&
29575 + nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
29576 + return;
29577 +
29578 +@@ -428,6 +428,13 @@ static inline bool queue_pages_required(struct page *page,
29579 + return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
29580 + }
29581 +
29582 ++/*
29583 ++ * queue_pages_pmd() has three possible return values:
29584 ++ * 1 - pages are placed on the right node or queued successfully.
29585 ++ * 0 - THP was split.
29586 ++ * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing
29587 ++ * page was already on a node that does not follow the policy.
29588 ++ */
29589 + static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
29590 + unsigned long end, struct mm_walk *walk)
29591 + {
29592 +@@ -437,7 +444,7 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
29593 + unsigned long flags;
29594 +
29595 + if (unlikely(is_pmd_migration_entry(*pmd))) {
29596 +- ret = 1;
29597 ++ ret = -EIO;
29598 + goto unlock;
29599 + }
29600 + page = pmd_page(*pmd);
29601 +@@ -454,8 +461,15 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
29602 + ret = 1;
29603 + flags = qp->flags;
29604 + /* go to thp migration */
29605 +- if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
29606 ++ if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
29607 ++ if (!vma_migratable(walk->vma)) {
29608 ++ ret = -EIO;
29609 ++ goto unlock;
29610 ++ }
29611 ++
29612 + migrate_page_add(page, qp->pagelist, flags);
29613 ++ } else
29614 ++ ret = -EIO;
29615 + unlock:
29616 + spin_unlock(ptl);
29617 + out:
29618 +@@ -480,8 +494,10 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
29619 + ptl = pmd_trans_huge_lock(pmd, vma);
29620 + if (ptl) {
29621 + ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
29622 +- if (ret)
29623 ++ if (ret > 0)
29624 + return 0;
29625 ++ else if (ret < 0)
29626 ++ return ret;
29627 + }
29628 +
29629 + if (pmd_trans_unstable(pmd))
29630 +@@ -502,11 +518,16 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
29631 + continue;
29632 + if (!queue_pages_required(page, qp))
29633 + continue;
29634 +- migrate_page_add(page, qp->pagelist, flags);
29635 ++ if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
29636 ++ if (!vma_migratable(vma))
29637 ++ break;
29638 ++ migrate_page_add(page, qp->pagelist, flags);
29639 ++ } else
29640 ++ break;
29641 + }
29642 + pte_unmap_unlock(pte - 1, ptl);
29643 + cond_resched();
29644 +- return 0;
29645 ++ return addr != end ? -EIO : 0;
29646 + }
29647 +
29648 + static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
29649 +@@ -576,7 +597,12 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
29650 + unsigned long endvma = vma->vm_end;
29651 + unsigned long flags = qp->flags;
29652 +
29653 +- if (!vma_migratable(vma))
29654 ++ /*
29655 ++ * Need check MPOL_MF_STRICT to return -EIO if possible
29656 ++ * regardless of vma_migratable
29657 ++ */
29658 ++ if (!vma_migratable(vma) &&
29659 ++ !(flags & MPOL_MF_STRICT))
29660 + return 1;
29661 +
29662 + if (endvma > end)
29663 +@@ -603,7 +629,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
29664 + }
29665 +
29666 + /* queue pages from current vma */
29667 +- if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
29668 ++ if (flags & MPOL_MF_VALID)
29669 + return 0;
29670 + return 1;
29671 + }
29672 +diff --git a/mm/migrate.c b/mm/migrate.c
29673 +index 181f5d2718a9..76e237b4610c 100644
29674 +--- a/mm/migrate.c
29675 ++++ b/mm/migrate.c
29676 +@@ -248,10 +248,8 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
29677 + pte = swp_entry_to_pte(entry);
29678 + } else if (is_device_public_page(new)) {
29679 + pte = pte_mkdevmap(pte);
29680 +- flush_dcache_page(new);
29681 + }
29682 +- } else
29683 +- flush_dcache_page(new);
29684 ++ }
29685 +
29686 + #ifdef CONFIG_HUGETLB_PAGE
29687 + if (PageHuge(new)) {
29688 +@@ -995,6 +993,13 @@ static int move_to_new_page(struct page *newpage, struct page *page,
29689 + */
29690 + if (!PageMappingFlags(page))
29691 + page->mapping = NULL;
29692 ++
29693 ++ if (unlikely(is_zone_device_page(newpage))) {
29694 ++ if (is_device_public_page(newpage))
29695 ++ flush_dcache_page(newpage);
29696 ++ } else
29697 ++ flush_dcache_page(newpage);
29698 ++
29699 + }
29700 + out:
29701 + return rc;
29702 +diff --git a/mm/oom_kill.c b/mm/oom_kill.c
29703 +index 26ea8636758f..da0e44914085 100644
29704 +--- a/mm/oom_kill.c
29705 ++++ b/mm/oom_kill.c
29706 +@@ -928,7 +928,8 @@ static void __oom_kill_process(struct task_struct *victim)
29707 + */
29708 + static int oom_kill_memcg_member(struct task_struct *task, void *unused)
29709 + {
29710 +- if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
29711 ++ if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN &&
29712 ++ !is_global_init(task)) {
29713 + get_task_struct(task);
29714 + __oom_kill_process(task);
29715 + }
29716 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
29717 +index 0b9f577b1a2a..20dd3283bb1b 100644
29718 +--- a/mm/page_alloc.c
29719 ++++ b/mm/page_alloc.c
29720 +@@ -1945,8 +1945,8 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
29721 +
29722 + arch_alloc_page(page, order);
29723 + kernel_map_pages(page, 1 << order, 1);
29724 +- kernel_poison_pages(page, 1 << order, 1);
29725 + kasan_alloc_pages(page, order);
29726 ++ kernel_poison_pages(page, 1 << order, 1);
29727 + set_page_owner(page, order, gfp_flags);
29728 + }
29729 +
29730 +@@ -8160,7 +8160,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
29731 +
29732 + ret = start_isolate_page_range(pfn_max_align_down(start),
29733 + pfn_max_align_up(end), migratetype, 0);
29734 +- if (ret)
29735 ++ if (ret < 0)
29736 + return ret;
29737 +
29738 + /*
29739 +diff --git a/mm/page_ext.c b/mm/page_ext.c
29740 +index 8c78b8d45117..f116431c3dee 100644
29741 +--- a/mm/page_ext.c
29742 ++++ b/mm/page_ext.c
29743 +@@ -273,6 +273,7 @@ static void free_page_ext(void *addr)
29744 + table_size = get_entry_size() * PAGES_PER_SECTION;
29745 +
29746 + BUG_ON(PageReserved(page));
29747 ++ kmemleak_free(addr);
29748 + free_pages_exact(addr, table_size);
29749 + }
29750 + }
29751 +diff --git a/mm/page_isolation.c b/mm/page_isolation.c
29752 +index ce323e56b34d..019280712e1b 100644
29753 +--- a/mm/page_isolation.c
29754 ++++ b/mm/page_isolation.c
29755 +@@ -59,7 +59,8 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
29756 + * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
29757 + * We just check MOVABLE pages.
29758 + */
29759 +- if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype, flags))
29760 ++ if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype,
29761 ++ isol_flags))
29762 + ret = 0;
29763 +
29764 + /*
29765 +@@ -160,27 +161,36 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
29766 + return NULL;
29767 + }
29768 +
29769 +-/*
29770 +- * start_isolate_page_range() -- make page-allocation-type of range of pages
29771 +- * to be MIGRATE_ISOLATE.
29772 +- * @start_pfn: The lower PFN of the range to be isolated.
29773 +- * @end_pfn: The upper PFN of the range to be isolated.
29774 +- * @migratetype: migrate type to set in error recovery.
29775 ++/**
29776 ++ * start_isolate_page_range() - make page-allocation-type of range of pages to
29777 ++ * be MIGRATE_ISOLATE.
29778 ++ * @start_pfn: The lower PFN of the range to be isolated.
29779 ++ * @end_pfn: The upper PFN of the range to be isolated.
29780 ++ * start_pfn/end_pfn must be aligned to pageblock_order.
29781 ++ * @migratetype: Migrate type to set in error recovery.
29782 ++ * @flags: The following flags are allowed (they can be combined in
29783 ++ * a bit mask)
29784 ++ * SKIP_HWPOISON - ignore hwpoison pages
29785 ++ * REPORT_FAILURE - report details about the failure to
29786 ++ * isolate the range
29787 + *
29788 + * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
29789 + * the range will never be allocated. Any free pages and pages freed in the
29790 +- * future will not be allocated again.
29791 +- *
29792 +- * start_pfn/end_pfn must be aligned to pageblock_order.
29793 +- * Return 0 on success and -EBUSY if any part of range cannot be isolated.
29794 ++ * future will not be allocated again. If specified range includes migrate types
29795 ++ * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all
29796 ++ * pages in the range finally, the caller have to free all pages in the range.
29797 ++ * test_page_isolated() can be used for test it.
29798 + *
29799 + * There is no high level synchronization mechanism that prevents two threads
29800 +- * from trying to isolate overlapping ranges. If this happens, one thread
29801 ++ * from trying to isolate overlapping ranges. If this happens, one thread
29802 + * will notice pageblocks in the overlapping range already set to isolate.
29803 + * This happens in set_migratetype_isolate, and set_migratetype_isolate
29804 +- * returns an error. We then clean up by restoring the migration type on
29805 +- * pageblocks we may have modified and return -EBUSY to caller. This
29806 ++ * returns an error. We then clean up by restoring the migration type on
29807 ++ * pageblocks we may have modified and return -EBUSY to caller. This
29808 + * prevents two threads from simultaneously working on overlapping ranges.
29809 ++ *
29810 ++ * Return: the number of isolated pageblocks on success and -EBUSY if any part
29811 ++ * of range cannot be isolated.
29812 + */
29813 + int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
29814 + unsigned migratetype, int flags)
29815 +@@ -188,6 +198,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
29816 + unsigned long pfn;
29817 + unsigned long undo_pfn;
29818 + struct page *page;
29819 ++ int nr_isolate_pageblock = 0;
29820 +
29821 + BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
29822 + BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
29823 +@@ -196,13 +207,15 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
29824 + pfn < end_pfn;
29825 + pfn += pageblock_nr_pages) {
29826 + page = __first_valid_page(pfn, pageblock_nr_pages);
29827 +- if (page &&
29828 +- set_migratetype_isolate(page, migratetype, flags)) {
29829 +- undo_pfn = pfn;
29830 +- goto undo;
29831 ++ if (page) {
29832 ++ if (set_migratetype_isolate(page, migratetype, flags)) {
29833 ++ undo_pfn = pfn;
29834 ++ goto undo;
29835 ++ }
29836 ++ nr_isolate_pageblock++;
29837 + }
29838 + }
29839 +- return 0;
29840 ++ return nr_isolate_pageblock;
29841 + undo:
29842 + for (pfn = start_pfn;
29843 + pfn < undo_pfn;
29844 +diff --git a/mm/page_poison.c b/mm/page_poison.c
29845 +index f0c15e9017c0..21d4f97cb49b 100644
29846 +--- a/mm/page_poison.c
29847 ++++ b/mm/page_poison.c
29848 +@@ -6,6 +6,7 @@
29849 + #include <linux/page_ext.h>
29850 + #include <linux/poison.h>
29851 + #include <linux/ratelimit.h>
29852 ++#include <linux/kasan.h>
29853 +
29854 + static bool want_page_poisoning __read_mostly;
29855 +
29856 +@@ -40,7 +41,10 @@ static void poison_page(struct page *page)
29857 + {
29858 + void *addr = kmap_atomic(page);
29859 +
29860 ++ /* KASAN still think the page is in-use, so skip it. */
29861 ++ kasan_disable_current();
29862 + memset(addr, PAGE_POISON, PAGE_SIZE);
29863 ++ kasan_enable_current();
29864 + kunmap_atomic(addr);
29865 + }
29866 +
29867 +diff --git a/mm/slab.c b/mm/slab.c
29868 +index 91c1863df93d..2f2aa8eaf7d9 100644
29869 +--- a/mm/slab.c
29870 ++++ b/mm/slab.c
29871 +@@ -550,14 +550,6 @@ static void start_cpu_timer(int cpu)
29872 +
29873 + static void init_arraycache(struct array_cache *ac, int limit, int batch)
29874 + {
29875 +- /*
29876 +- * The array_cache structures contain pointers to free object.
29877 +- * However, when such objects are allocated or transferred to another
29878 +- * cache the pointers are not cleared and they could be counted as
29879 +- * valid references during a kmemleak scan. Therefore, kmemleak must
29880 +- * not scan such objects.
29881 +- */
29882 +- kmemleak_no_scan(ac);
29883 + if (ac) {
29884 + ac->avail = 0;
29885 + ac->limit = limit;
29886 +@@ -573,6 +565,14 @@ static struct array_cache *alloc_arraycache(int node, int entries,
29887 + struct array_cache *ac = NULL;
29888 +
29889 + ac = kmalloc_node(memsize, gfp, node);
29890 ++ /*
29891 ++ * The array_cache structures contain pointers to free object.
29892 ++ * However, when such objects are allocated or transferred to another
29893 ++ * cache the pointers are not cleared and they could be counted as
29894 ++ * valid references during a kmemleak scan. Therefore, kmemleak must
29895 ++ * not scan such objects.
29896 ++ */
29897 ++ kmemleak_no_scan(ac);
29898 + init_arraycache(ac, entries, batchcount);
29899 + return ac;
29900 + }
29901 +@@ -667,6 +667,7 @@ static struct alien_cache *__alloc_alien_cache(int node, int entries,
29902 +
29903 + alc = kmalloc_node(memsize, gfp, node);
29904 + if (alc) {
29905 ++ kmemleak_no_scan(alc);
29906 + init_arraycache(&alc->ac, entries, batch);
29907 + spin_lock_init(&alc->lock);
29908 + }
29909 +@@ -2111,6 +2112,8 @@ done:
29910 + cachep->allocflags = __GFP_COMP;
29911 + if (flags & SLAB_CACHE_DMA)
29912 + cachep->allocflags |= GFP_DMA;
29913 ++ if (flags & SLAB_CACHE_DMA32)
29914 ++ cachep->allocflags |= GFP_DMA32;
29915 + if (flags & SLAB_RECLAIM_ACCOUNT)
29916 + cachep->allocflags |= __GFP_RECLAIMABLE;
29917 + cachep->size = size;
29918 +diff --git a/mm/slab.h b/mm/slab.h
29919 +index 384105318779..27834ead5f14 100644
29920 +--- a/mm/slab.h
29921 ++++ b/mm/slab.h
29922 +@@ -127,7 +127,8 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
29923 +
29924 +
29925 + /* Legal flag mask for kmem_cache_create(), for various configurations */
29926 +-#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
29927 ++#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
29928 ++ SLAB_CACHE_DMA32 | SLAB_PANIC | \
29929 + SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
29930 +
29931 + #if defined(CONFIG_DEBUG_SLAB)
29932 +diff --git a/mm/slab_common.c b/mm/slab_common.c
29933 +index f9d89c1b5977..333618231f8d 100644
29934 +--- a/mm/slab_common.c
29935 ++++ b/mm/slab_common.c
29936 +@@ -53,7 +53,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
29937 + SLAB_FAILSLAB | SLAB_KASAN)
29938 +
29939 + #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
29940 +- SLAB_ACCOUNT)
29941 ++ SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
29942 +
29943 + /*
29944 + * Merge control. If this is set then no merging of slab caches will occur.
29945 +diff --git a/mm/slub.c b/mm/slub.c
29946 +index dc777761b6b7..1673100fd534 100644
29947 +--- a/mm/slub.c
29948 ++++ b/mm/slub.c
29949 +@@ -3591,6 +3591,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
29950 + if (s->flags & SLAB_CACHE_DMA)
29951 + s->allocflags |= GFP_DMA;
29952 +
29953 ++ if (s->flags & SLAB_CACHE_DMA32)
29954 ++ s->allocflags |= GFP_DMA32;
29955 ++
29956 + if (s->flags & SLAB_RECLAIM_ACCOUNT)
29957 + s->allocflags |= __GFP_RECLAIMABLE;
29958 +
29959 +@@ -5681,6 +5684,8 @@ static char *create_unique_id(struct kmem_cache *s)
29960 + */
29961 + if (s->flags & SLAB_CACHE_DMA)
29962 + *p++ = 'd';
29963 ++ if (s->flags & SLAB_CACHE_DMA32)
29964 ++ *p++ = 'D';
29965 + if (s->flags & SLAB_RECLAIM_ACCOUNT)
29966 + *p++ = 'a';
29967 + if (s->flags & SLAB_CONSISTENCY_CHECKS)
29968 +diff --git a/mm/sparse.c b/mm/sparse.c
29969 +index 7ea5dc6c6b19..b3771f35a0ed 100644
29970 +--- a/mm/sparse.c
29971 ++++ b/mm/sparse.c
29972 +@@ -197,7 +197,7 @@ static inline int next_present_section_nr(int section_nr)
29973 + }
29974 + #define for_each_present_section_nr(start, section_nr) \
29975 + for (section_nr = next_present_section_nr(start-1); \
29976 +- ((section_nr >= 0) && \
29977 ++ ((section_nr != -1) && \
29978 + (section_nr <= __highest_present_section_nr)); \
29979 + section_nr = next_present_section_nr(section_nr))
29980 +
29981 +@@ -556,7 +556,7 @@ void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
29982 + }
29983 +
29984 + #ifdef CONFIG_MEMORY_HOTREMOVE
29985 +-/* Mark all memory sections within the pfn range as online */
29986 ++/* Mark all memory sections within the pfn range as offline */
29987 + void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
29988 + {
29989 + unsigned long pfn;
29990 +diff --git a/mm/swapfile.c b/mm/swapfile.c
29991 +index dbac1d49469d..67f60e051814 100644
29992 +--- a/mm/swapfile.c
29993 ++++ b/mm/swapfile.c
29994 +@@ -98,6 +98,15 @@ static atomic_t proc_poll_event = ATOMIC_INIT(0);
29995 +
29996 + atomic_t nr_rotate_swap = ATOMIC_INIT(0);
29997 +
29998 ++static struct swap_info_struct *swap_type_to_swap_info(int type)
29999 ++{
30000 ++ if (type >= READ_ONCE(nr_swapfiles))
30001 ++ return NULL;
30002 ++
30003 ++ smp_rmb(); /* Pairs with smp_wmb in alloc_swap_info. */
30004 ++ return READ_ONCE(swap_info[type]);
30005 ++}
30006 ++
30007 + static inline unsigned char swap_count(unsigned char ent)
30008 + {
30009 + return ent & ~SWAP_HAS_CACHE; /* may include COUNT_CONTINUED flag */
30010 +@@ -1044,12 +1053,14 @@ noswap:
30011 + /* The only caller of this function is now suspend routine */
30012 + swp_entry_t get_swap_page_of_type(int type)
30013 + {
30014 +- struct swap_info_struct *si;
30015 ++ struct swap_info_struct *si = swap_type_to_swap_info(type);
30016 + pgoff_t offset;
30017 +
30018 +- si = swap_info[type];
30019 ++ if (!si)
30020 ++ goto fail;
30021 ++
30022 + spin_lock(&si->lock);
30023 +- if (si && (si->flags & SWP_WRITEOK)) {
30024 ++ if (si->flags & SWP_WRITEOK) {
30025 + atomic_long_dec(&nr_swap_pages);
30026 + /* This is called for allocating swap entry, not cache */
30027 + offset = scan_swap_map(si, 1);
30028 +@@ -1060,6 +1071,7 @@ swp_entry_t get_swap_page_of_type(int type)
30029 + atomic_long_inc(&nr_swap_pages);
30030 + }
30031 + spin_unlock(&si->lock);
30032 ++fail:
30033 + return (swp_entry_t) {0};
30034 + }
30035 +
30036 +@@ -1071,9 +1083,9 @@ static struct swap_info_struct *__swap_info_get(swp_entry_t entry)
30037 + if (!entry.val)
30038 + goto out;
30039 + type = swp_type(entry);
30040 +- if (type >= nr_swapfiles)
30041 ++ p = swap_type_to_swap_info(type);
30042 ++ if (!p)
30043 + goto bad_nofile;
30044 +- p = swap_info[type];
30045 + if (!(p->flags & SWP_USED))
30046 + goto bad_device;
30047 + offset = swp_offset(entry);
30048 +@@ -1697,10 +1709,9 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
30049 + sector_t swapdev_block(int type, pgoff_t offset)
30050 + {
30051 + struct block_device *bdev;
30052 ++ struct swap_info_struct *si = swap_type_to_swap_info(type);
30053 +
30054 +- if ((unsigned int)type >= nr_swapfiles)
30055 +- return 0;
30056 +- if (!(swap_info[type]->flags & SWP_WRITEOK))
30057 ++ if (!si || !(si->flags & SWP_WRITEOK))
30058 + return 0;
30059 + return map_swap_entry(swp_entry(type, offset), &bdev);
30060 + }
30061 +@@ -2258,7 +2269,7 @@ static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
30062 + struct swap_extent *se;
30063 + pgoff_t offset;
30064 +
30065 +- sis = swap_info[swp_type(entry)];
30066 ++ sis = swp_swap_info(entry);
30067 + *bdev = sis->bdev;
30068 +
30069 + offset = swp_offset(entry);
30070 +@@ -2700,9 +2711,7 @@ static void *swap_start(struct seq_file *swap, loff_t *pos)
30071 + if (!l)
30072 + return SEQ_START_TOKEN;
30073 +
30074 +- for (type = 0; type < nr_swapfiles; type++) {
30075 +- smp_rmb(); /* read nr_swapfiles before swap_info[type] */
30076 +- si = swap_info[type];
30077 ++ for (type = 0; (si = swap_type_to_swap_info(type)); type++) {
30078 + if (!(si->flags & SWP_USED) || !si->swap_map)
30079 + continue;
30080 + if (!--l)
30081 +@@ -2722,9 +2731,7 @@ static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
30082 + else
30083 + type = si->type + 1;
30084 +
30085 +- for (; type < nr_swapfiles; type++) {
30086 +- smp_rmb(); /* read nr_swapfiles before swap_info[type] */
30087 +- si = swap_info[type];
30088 ++ for (; (si = swap_type_to_swap_info(type)); type++) {
30089 + if (!(si->flags & SWP_USED) || !si->swap_map)
30090 + continue;
30091 + ++*pos;
30092 +@@ -2831,14 +2838,14 @@ static struct swap_info_struct *alloc_swap_info(void)
30093 + }
30094 + if (type >= nr_swapfiles) {
30095 + p->type = type;
30096 +- swap_info[type] = p;
30097 ++ WRITE_ONCE(swap_info[type], p);
30098 + /*
30099 + * Write swap_info[type] before nr_swapfiles, in case a
30100 + * racing procfs swap_start() or swap_next() is reading them.
30101 + * (We never shrink nr_swapfiles, we never free this entry.)
30102 + */
30103 + smp_wmb();
30104 +- nr_swapfiles++;
30105 ++ WRITE_ONCE(nr_swapfiles, nr_swapfiles + 1);
30106 + } else {
30107 + kvfree(p);
30108 + p = swap_info[type];
30109 +@@ -3358,7 +3365,7 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
30110 + {
30111 + struct swap_info_struct *p;
30112 + struct swap_cluster_info *ci;
30113 +- unsigned long offset, type;
30114 ++ unsigned long offset;
30115 + unsigned char count;
30116 + unsigned char has_cache;
30117 + int err = -EINVAL;
30118 +@@ -3366,10 +3373,10 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
30119 + if (non_swap_entry(entry))
30120 + goto out;
30121 +
30122 +- type = swp_type(entry);
30123 +- if (type >= nr_swapfiles)
30124 ++ p = swp_swap_info(entry);
30125 ++ if (!p)
30126 + goto bad_file;
30127 +- p = swap_info[type];
30128 ++
30129 + offset = swp_offset(entry);
30130 + if (unlikely(offset >= p->max))
30131 + goto out;
30132 +@@ -3466,7 +3473,7 @@ int swapcache_prepare(swp_entry_t entry)
30133 +
30134 + struct swap_info_struct *swp_swap_info(swp_entry_t entry)
30135 + {
30136 +- return swap_info[swp_type(entry)];
30137 ++ return swap_type_to_swap_info(swp_type(entry));
30138 + }
30139 +
30140 + struct swap_info_struct *page_swap_info(struct page *page)
30141 +diff --git a/mm/vmalloc.c b/mm/vmalloc.c
30142 +index 871e41c55e23..583630bf247d 100644
30143 +--- a/mm/vmalloc.c
30144 ++++ b/mm/vmalloc.c
30145 +@@ -498,7 +498,11 @@ nocache:
30146 + }
30147 +
30148 + found:
30149 +- if (addr + size > vend)
30150 ++ /*
30151 ++ * Check also calculated address against the vstart,
30152 ++ * because it can be 0 because of big align request.
30153 ++ */
30154 ++ if (addr + size > vend || addr < vstart)
30155 + goto overflow;
30156 +
30157 + va->va_start = addr;
30158 +@@ -2248,7 +2252,7 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
30159 + if (!(area->flags & VM_USERMAP))
30160 + return -EINVAL;
30161 +
30162 +- if (kaddr + size > area->addr + area->size)
30163 ++ if (kaddr + size > area->addr + get_vm_area_size(area))
30164 + return -EINVAL;
30165 +
30166 + do {
30167 +diff --git a/net/9p/client.c b/net/9p/client.c
30168 +index 357214a51f13..b85d51f4b8eb 100644
30169 +--- a/net/9p/client.c
30170 ++++ b/net/9p/client.c
30171 +@@ -1061,7 +1061,7 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
30172 + p9_debug(P9_DEBUG_ERROR,
30173 + "Please specify a msize of at least 4k\n");
30174 + err = -EINVAL;
30175 +- goto free_client;
30176 ++ goto close_trans;
30177 + }
30178 +
30179 + err = p9_client_version(clnt);
30180 +diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
30181 +index deacc52d7ff1..8d12198eaa94 100644
30182 +--- a/net/bluetooth/af_bluetooth.c
30183 ++++ b/net/bluetooth/af_bluetooth.c
30184 +@@ -154,15 +154,25 @@ void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
30185 + }
30186 + EXPORT_SYMBOL(bt_sock_unlink);
30187 +
30188 +-void bt_accept_enqueue(struct sock *parent, struct sock *sk)
30189 ++void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh)
30190 + {
30191 + BT_DBG("parent %p, sk %p", parent, sk);
30192 +
30193 + sock_hold(sk);
30194 +- lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
30195 ++
30196 ++ if (bh)
30197 ++ bh_lock_sock_nested(sk);
30198 ++ else
30199 ++ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
30200 ++
30201 + list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
30202 + bt_sk(sk)->parent = parent;
30203 +- release_sock(sk);
30204 ++
30205 ++ if (bh)
30206 ++ bh_unlock_sock(sk);
30207 ++ else
30208 ++ release_sock(sk);
30209 ++
30210 + parent->sk_ack_backlog++;
30211 + }
30212 + EXPORT_SYMBOL(bt_accept_enqueue);
30213 +diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
30214 +index 1506e1632394..d4e2a166ae17 100644
30215 +--- a/net/bluetooth/hci_sock.c
30216 ++++ b/net/bluetooth/hci_sock.c
30217 +@@ -831,8 +831,6 @@ static int hci_sock_release(struct socket *sock)
30218 + if (!sk)
30219 + return 0;
30220 +
30221 +- hdev = hci_pi(sk)->hdev;
30222 +-
30223 + switch (hci_pi(sk)->channel) {
30224 + case HCI_CHANNEL_MONITOR:
30225 + atomic_dec(&monitor_promisc);
30226 +@@ -854,6 +852,7 @@ static int hci_sock_release(struct socket *sock)
30227 +
30228 + bt_sock_unlink(&hci_sk_list, sk);
30229 +
30230 ++ hdev = hci_pi(sk)->hdev;
30231 + if (hdev) {
30232 + if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
30233 + /* When releasing a user channel exclusive access,
30234 +diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
30235 +index 2a7fb517d460..ccdc5c67d22a 100644
30236 +--- a/net/bluetooth/l2cap_core.c
30237 ++++ b/net/bluetooth/l2cap_core.c
30238 +@@ -3337,16 +3337,22 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
30239 +
30240 + while (len >= L2CAP_CONF_OPT_SIZE) {
30241 + len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
30242 ++ if (len < 0)
30243 ++ break;
30244 +
30245 + hint = type & L2CAP_CONF_HINT;
30246 + type &= L2CAP_CONF_MASK;
30247 +
30248 + switch (type) {
30249 + case L2CAP_CONF_MTU:
30250 ++ if (olen != 2)
30251 ++ break;
30252 + mtu = val;
30253 + break;
30254 +
30255 + case L2CAP_CONF_FLUSH_TO:
30256 ++ if (olen != 2)
30257 ++ break;
30258 + chan->flush_to = val;
30259 + break;
30260 +
30261 +@@ -3354,26 +3360,30 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
30262 + break;
30263 +
30264 + case L2CAP_CONF_RFC:
30265 +- if (olen == sizeof(rfc))
30266 +- memcpy(&rfc, (void *) val, olen);
30267 ++ if (olen != sizeof(rfc))
30268 ++ break;
30269 ++ memcpy(&rfc, (void *) val, olen);
30270 + break;
30271 +
30272 + case L2CAP_CONF_FCS:
30273 ++ if (olen != 1)
30274 ++ break;
30275 + if (val == L2CAP_FCS_NONE)
30276 + set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
30277 + break;
30278 +
30279 + case L2CAP_CONF_EFS:
30280 +- if (olen == sizeof(efs)) {
30281 +- remote_efs = 1;
30282 +- memcpy(&efs, (void *) val, olen);
30283 +- }
30284 ++ if (olen != sizeof(efs))
30285 ++ break;
30286 ++ remote_efs = 1;
30287 ++ memcpy(&efs, (void *) val, olen);
30288 + break;
30289 +
30290 + case L2CAP_CONF_EWS:
30291 ++ if (olen != 2)
30292 ++ break;
30293 + if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
30294 + return -ECONNREFUSED;
30295 +-
30296 + set_bit(FLAG_EXT_CTRL, &chan->flags);
30297 + set_bit(CONF_EWS_RECV, &chan->conf_state);
30298 + chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
30299 +@@ -3383,7 +3393,6 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
30300 + default:
30301 + if (hint)
30302 + break;
30303 +-
30304 + result = L2CAP_CONF_UNKNOWN;
30305 + *((u8 *) ptr++) = type;
30306 + break;
30307 +@@ -3548,58 +3557,65 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
30308 +
30309 + while (len >= L2CAP_CONF_OPT_SIZE) {
30310 + len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
30311 ++ if (len < 0)
30312 ++ break;
30313 +
30314 + switch (type) {
30315 + case L2CAP_CONF_MTU:
30316 ++ if (olen != 2)
30317 ++ break;
30318 + if (val < L2CAP_DEFAULT_MIN_MTU) {
30319 + *result = L2CAP_CONF_UNACCEPT;
30320 + chan->imtu = L2CAP_DEFAULT_MIN_MTU;
30321 + } else
30322 + chan->imtu = val;
30323 +- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
30324 ++ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
30325 ++ endptr - ptr);
30326 + break;
30327 +
30328 + case L2CAP_CONF_FLUSH_TO:
30329 ++ if (olen != 2)
30330 ++ break;
30331 + chan->flush_to = val;
30332 +- l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
30333 +- 2, chan->flush_to, endptr - ptr);
30334 ++ l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
30335 ++ chan->flush_to, endptr - ptr);
30336 + break;
30337 +
30338 + case L2CAP_CONF_RFC:
30339 +- if (olen == sizeof(rfc))
30340 +- memcpy(&rfc, (void *)val, olen);
30341 +-
30342 ++ if (olen != sizeof(rfc))
30343 ++ break;
30344 ++ memcpy(&rfc, (void *)val, olen);
30345 + if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
30346 + rfc.mode != chan->mode)
30347 + return -ECONNREFUSED;
30348 +-
30349 + chan->fcs = 0;
30350 +-
30351 +- l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
30352 +- sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
30353 ++ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
30354 ++ (unsigned long) &rfc, endptr - ptr);
30355 + break;
30356 +
30357 + case L2CAP_CONF_EWS:
30358 ++ if (olen != 2)
30359 ++ break;
30360 + chan->ack_win = min_t(u16, val, chan->ack_win);
30361 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
30362 + chan->tx_win, endptr - ptr);
30363 + break;
30364 +
30365 + case L2CAP_CONF_EFS:
30366 +- if (olen == sizeof(efs)) {
30367 +- memcpy(&efs, (void *)val, olen);
30368 +-
30369 +- if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
30370 +- efs.stype != L2CAP_SERV_NOTRAFIC &&
30371 +- efs.stype != chan->local_stype)
30372 +- return -ECONNREFUSED;
30373 +-
30374 +- l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
30375 +- (unsigned long) &efs, endptr - ptr);
30376 +- }
30377 ++ if (olen != sizeof(efs))
30378 ++ break;
30379 ++ memcpy(&efs, (void *)val, olen);
30380 ++ if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
30381 ++ efs.stype != L2CAP_SERV_NOTRAFIC &&
30382 ++ efs.stype != chan->local_stype)
30383 ++ return -ECONNREFUSED;
30384 ++ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
30385 ++ (unsigned long) &efs, endptr - ptr);
30386 + break;
30387 +
30388 + case L2CAP_CONF_FCS:
30389 ++ if (olen != 1)
30390 ++ break;
30391 + if (*result == L2CAP_CONF_PENDING)
30392 + if (val == L2CAP_FCS_NONE)
30393 + set_bit(CONF_RECV_NO_FCS,
30394 +@@ -3728,13 +3744,18 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
30395 +
30396 + while (len >= L2CAP_CONF_OPT_SIZE) {
30397 + len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
30398 ++ if (len < 0)
30399 ++ break;
30400 +
30401 + switch (type) {
30402 + case L2CAP_CONF_RFC:
30403 +- if (olen == sizeof(rfc))
30404 +- memcpy(&rfc, (void *)val, olen);
30405 ++ if (olen != sizeof(rfc))
30406 ++ break;
30407 ++ memcpy(&rfc, (void *)val, olen);
30408 + break;
30409 + case L2CAP_CONF_EWS:
30410 ++ if (olen != 2)
30411 ++ break;
30412 + txwin_ext = val;
30413 + break;
30414 + }
30415 +diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
30416 +index 686bdc6b35b0..a3a2cd55e23a 100644
30417 +--- a/net/bluetooth/l2cap_sock.c
30418 ++++ b/net/bluetooth/l2cap_sock.c
30419 +@@ -1252,7 +1252,7 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
30420 +
30421 + l2cap_sock_init(sk, parent);
30422 +
30423 +- bt_accept_enqueue(parent, sk);
30424 ++ bt_accept_enqueue(parent, sk, false);
30425 +
30426 + release_sock(parent);
30427 +
30428 +diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
30429 +index aa0db1d1bd9b..b1f49fcc0478 100644
30430 +--- a/net/bluetooth/rfcomm/sock.c
30431 ++++ b/net/bluetooth/rfcomm/sock.c
30432 +@@ -988,7 +988,7 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc *
30433 + rfcomm_pi(sk)->channel = channel;
30434 +
30435 + sk->sk_state = BT_CONFIG;
30436 +- bt_accept_enqueue(parent, sk);
30437 ++ bt_accept_enqueue(parent, sk, true);
30438 +
30439 + /* Accept connection and return socket DLC */
30440 + *d = rfcomm_pi(sk)->dlc;
30441 +diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
30442 +index 529b38996d8b..9a580999ca57 100644
30443 +--- a/net/bluetooth/sco.c
30444 ++++ b/net/bluetooth/sco.c
30445 +@@ -193,7 +193,7 @@ static void __sco_chan_add(struct sco_conn *conn, struct sock *sk,
30446 + conn->sk = sk;
30447 +
30448 + if (parent)
30449 +- bt_accept_enqueue(parent, sk);
30450 ++ bt_accept_enqueue(parent, sk, true);
30451 + }
30452 +
30453 + static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
30454 +diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
30455 +index ac92b2eb32b1..e4777614a8a0 100644
30456 +--- a/net/bridge/br_multicast.c
30457 ++++ b/net/bridge/br_multicast.c
30458 +@@ -599,6 +599,7 @@ static int br_ip4_multicast_add_group(struct net_bridge *br,
30459 + if (ipv4_is_local_multicast(group))
30460 + return 0;
30461 +
30462 ++ memset(&br_group, 0, sizeof(br_group));
30463 + br_group.u.ip4 = group;
30464 + br_group.proto = htons(ETH_P_IP);
30465 + br_group.vid = vid;
30466 +@@ -1489,6 +1490,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
30467 +
30468 + own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
30469 +
30470 ++ memset(&br_group, 0, sizeof(br_group));
30471 + br_group.u.ip4 = group;
30472 + br_group.proto = htons(ETH_P_IP);
30473 + br_group.vid = vid;
30474 +@@ -1512,6 +1514,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
30475 +
30476 + own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
30477 +
30478 ++ memset(&br_group, 0, sizeof(br_group));
30479 + br_group.u.ip6 = *group;
30480 + br_group.proto = htons(ETH_P_IPV6);
30481 + br_group.vid = vid;
30482 +diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
30483 +index c93c35bb73dd..40d058378b52 100644
30484 +--- a/net/bridge/br_netfilter_hooks.c
30485 ++++ b/net/bridge/br_netfilter_hooks.c
30486 +@@ -881,11 +881,6 @@ static const struct nf_br_ops br_ops = {
30487 + .br_dev_xmit_hook = br_nf_dev_xmit,
30488 + };
30489 +
30490 +-void br_netfilter_enable(void)
30491 +-{
30492 +-}
30493 +-EXPORT_SYMBOL_GPL(br_netfilter_enable);
30494 +-
30495 + /* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
30496 + * br_dev_queue_push_xmit is called afterwards */
30497 + static const struct nf_hook_ops br_nf_ops[] = {
30498 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
30499 +index 6693e209efe8..f77888ec93f1 100644
30500 +--- a/net/bridge/netfilter/ebtables.c
30501 ++++ b/net/bridge/netfilter/ebtables.c
30502 +@@ -31,10 +31,6 @@
30503 + /* needed for logical [in,out]-dev filtering */
30504 + #include "../br_private.h"
30505 +
30506 +-#define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
30507 +- "report to author: "format, ## args)
30508 +-/* #define BUGPRINT(format, args...) */
30509 +-
30510 + /* Each cpu has its own set of counters, so there is no need for write_lock in
30511 + * the softirq
30512 + * For reading or updating the counters, the user context needs to
30513 +@@ -466,8 +462,6 @@ static int ebt_verify_pointers(const struct ebt_replace *repl,
30514 + /* we make userspace set this right,
30515 + * so there is no misunderstanding
30516 + */
30517 +- BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
30518 +- "in distinguisher\n");
30519 + return -EINVAL;
30520 + }
30521 + if (i != NF_BR_NUMHOOKS)
30522 +@@ -485,18 +479,14 @@ static int ebt_verify_pointers(const struct ebt_replace *repl,
30523 + offset += e->next_offset;
30524 + }
30525 + }
30526 +- if (offset != limit) {
30527 +- BUGPRINT("entries_size too small\n");
30528 ++ if (offset != limit)
30529 + return -EINVAL;
30530 +- }
30531 +
30532 + /* check if all valid hooks have a chain */
30533 + for (i = 0; i < NF_BR_NUMHOOKS; i++) {
30534 + if (!newinfo->hook_entry[i] &&
30535 +- (valid_hooks & (1 << i))) {
30536 +- BUGPRINT("Valid hook without chain\n");
30537 ++ (valid_hooks & (1 << i)))
30538 + return -EINVAL;
30539 +- }
30540 + }
30541 + return 0;
30542 + }
30543 +@@ -523,26 +513,20 @@ ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
30544 + /* this checks if the previous chain has as many entries
30545 + * as it said it has
30546 + */
30547 +- if (*n != *cnt) {
30548 +- BUGPRINT("nentries does not equal the nr of entries "
30549 +- "in the chain\n");
30550 ++ if (*n != *cnt)
30551 + return -EINVAL;
30552 +- }
30553 ++
30554 + if (((struct ebt_entries *)e)->policy != EBT_DROP &&
30555 + ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
30556 + /* only RETURN from udc */
30557 + if (i != NF_BR_NUMHOOKS ||
30558 +- ((struct ebt_entries *)e)->policy != EBT_RETURN) {
30559 +- BUGPRINT("bad policy\n");
30560 ++ ((struct ebt_entries *)e)->policy != EBT_RETURN)
30561 + return -EINVAL;
30562 +- }
30563 + }
30564 + if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
30565 + (*udc_cnt)++;
30566 +- if (((struct ebt_entries *)e)->counter_offset != *totalcnt) {
30567 +- BUGPRINT("counter_offset != totalcnt");
30568 ++ if (((struct ebt_entries *)e)->counter_offset != *totalcnt)
30569 + return -EINVAL;
30570 +- }
30571 + *n = ((struct ebt_entries *)e)->nentries;
30572 + *cnt = 0;
30573 + return 0;
30574 +@@ -550,15 +534,13 @@ ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
30575 + /* a plain old entry, heh */
30576 + if (sizeof(struct ebt_entry) > e->watchers_offset ||
30577 + e->watchers_offset > e->target_offset ||
30578 +- e->target_offset >= e->next_offset) {
30579 +- BUGPRINT("entry offsets not in right order\n");
30580 ++ e->target_offset >= e->next_offset)
30581 + return -EINVAL;
30582 +- }
30583 ++
30584 + /* this is not checked anywhere else */
30585 +- if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) {
30586 +- BUGPRINT("target size too small\n");
30587 ++ if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target))
30588 + return -EINVAL;
30589 +- }
30590 ++
30591 + (*cnt)++;
30592 + (*totalcnt)++;
30593 + return 0;
30594 +@@ -678,18 +660,15 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
30595 + if (e->bitmask == 0)
30596 + return 0;
30597 +
30598 +- if (e->bitmask & ~EBT_F_MASK) {
30599 +- BUGPRINT("Unknown flag for bitmask\n");
30600 ++ if (e->bitmask & ~EBT_F_MASK)
30601 + return -EINVAL;
30602 +- }
30603 +- if (e->invflags & ~EBT_INV_MASK) {
30604 +- BUGPRINT("Unknown flag for inv bitmask\n");
30605 ++
30606 ++ if (e->invflags & ~EBT_INV_MASK)
30607 + return -EINVAL;
30608 +- }
30609 +- if ((e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3)) {
30610 +- BUGPRINT("NOPROTO & 802_3 not allowed\n");
30611 ++
30612 ++ if ((e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3))
30613 + return -EINVAL;
30614 +- }
30615 ++
30616 + /* what hook do we belong to? */
30617 + for (i = 0; i < NF_BR_NUMHOOKS; i++) {
30618 + if (!newinfo->hook_entry[i])
30619 +@@ -748,13 +727,11 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
30620 + t->u.target = target;
30621 + if (t->u.target == &ebt_standard_target) {
30622 + if (gap < sizeof(struct ebt_standard_target)) {
30623 +- BUGPRINT("Standard target size too big\n");
30624 + ret = -EFAULT;
30625 + goto cleanup_watchers;
30626 + }
30627 + if (((struct ebt_standard_target *)t)->verdict <
30628 + -NUM_STANDARD_TARGETS) {
30629 +- BUGPRINT("Invalid standard target\n");
30630 + ret = -EFAULT;
30631 + goto cleanup_watchers;
30632 + }
30633 +@@ -813,10 +790,9 @@ static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack
30634 + if (strcmp(t->u.name, EBT_STANDARD_TARGET))
30635 + goto letscontinue;
30636 + if (e->target_offset + sizeof(struct ebt_standard_target) >
30637 +- e->next_offset) {
30638 +- BUGPRINT("Standard target size too big\n");
30639 ++ e->next_offset)
30640 + return -1;
30641 +- }
30642 ++
30643 + verdict = ((struct ebt_standard_target *)t)->verdict;
30644 + if (verdict >= 0) { /* jump to another chain */
30645 + struct ebt_entries *hlp2 =
30646 +@@ -825,14 +801,12 @@ static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack
30647 + if (hlp2 == cl_s[i].cs.chaininfo)
30648 + break;
30649 + /* bad destination or loop */
30650 +- if (i == udc_cnt) {
30651 +- BUGPRINT("bad destination\n");
30652 ++ if (i == udc_cnt)
30653 + return -1;
30654 +- }
30655 +- if (cl_s[i].cs.n) {
30656 +- BUGPRINT("loop\n");
30657 ++
30658 ++ if (cl_s[i].cs.n)
30659 + return -1;
30660 +- }
30661 ++
30662 + if (cl_s[i].hookmask & (1 << hooknr))
30663 + goto letscontinue;
30664 + /* this can't be 0, so the loop test is correct */
30665 +@@ -865,24 +839,21 @@ static int translate_table(struct net *net, const char *name,
30666 + i = 0;
30667 + while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
30668 + i++;
30669 +- if (i == NF_BR_NUMHOOKS) {
30670 +- BUGPRINT("No valid hooks specified\n");
30671 ++ if (i == NF_BR_NUMHOOKS)
30672 + return -EINVAL;
30673 +- }
30674 +- if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) {
30675 +- BUGPRINT("Chains don't start at beginning\n");
30676 ++
30677 ++ if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries)
30678 + return -EINVAL;
30679 +- }
30680 ++
30681 + /* make sure chains are ordered after each other in same order
30682 + * as their corresponding hooks
30683 + */
30684 + for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
30685 + if (!newinfo->hook_entry[j])
30686 + continue;
30687 +- if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) {
30688 +- BUGPRINT("Hook order must be followed\n");
30689 ++ if (newinfo->hook_entry[j] <= newinfo->hook_entry[i])
30690 + return -EINVAL;
30691 +- }
30692 ++
30693 + i = j;
30694 + }
30695 +
30696 +@@ -900,15 +871,11 @@ static int translate_table(struct net *net, const char *name,
30697 + if (ret != 0)
30698 + return ret;
30699 +
30700 +- if (i != j) {
30701 +- BUGPRINT("nentries does not equal the nr of entries in the "
30702 +- "(last) chain\n");
30703 ++ if (i != j)
30704 + return -EINVAL;
30705 +- }
30706 +- if (k != newinfo->nentries) {
30707 +- BUGPRINT("Total nentries is wrong\n");
30708 ++
30709 ++ if (k != newinfo->nentries)
30710 + return -EINVAL;
30711 +- }
30712 +
30713 + /* get the location of the udc, put them in an array
30714 + * while we're at it, allocate the chainstack
30715 +@@ -942,7 +909,6 @@ static int translate_table(struct net *net, const char *name,
30716 + ebt_get_udc_positions, newinfo, &i, cl_s);
30717 + /* sanity check */
30718 + if (i != udc_cnt) {
30719 +- BUGPRINT("i != udc_cnt\n");
30720 + vfree(cl_s);
30721 + return -EFAULT;
30722 + }
30723 +@@ -1042,7 +1008,6 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
30724 + goto free_unlock;
30725 +
30726 + if (repl->num_counters && repl->num_counters != t->private->nentries) {
30727 +- BUGPRINT("Wrong nr. of counters requested\n");
30728 + ret = -EINVAL;
30729 + goto free_unlock;
30730 + }
30731 +@@ -1118,15 +1083,12 @@ static int do_replace(struct net *net, const void __user *user,
30732 + if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
30733 + return -EFAULT;
30734 +
30735 +- if (len != sizeof(tmp) + tmp.entries_size) {
30736 +- BUGPRINT("Wrong len argument\n");
30737 ++ if (len != sizeof(tmp) + tmp.entries_size)
30738 + return -EINVAL;
30739 +- }
30740 +
30741 +- if (tmp.entries_size == 0) {
30742 +- BUGPRINT("Entries_size never zero\n");
30743 ++ if (tmp.entries_size == 0)
30744 + return -EINVAL;
30745 +- }
30746 ++
30747 + /* overflow check */
30748 + if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
30749 + NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
30750 +@@ -1153,7 +1115,6 @@ static int do_replace(struct net *net, const void __user *user,
30751 + }
30752 + if (copy_from_user(
30753 + newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
30754 +- BUGPRINT("Couldn't copy entries from userspace\n");
30755 + ret = -EFAULT;
30756 + goto free_entries;
30757 + }
30758 +@@ -1194,10 +1155,8 @@ int ebt_register_table(struct net *net, const struct ebt_table *input_table,
30759 +
30760 + if (input_table == NULL || (repl = input_table->table) == NULL ||
30761 + repl->entries == NULL || repl->entries_size == 0 ||
30762 +- repl->counters != NULL || input_table->private != NULL) {
30763 +- BUGPRINT("Bad table data for ebt_register_table!!!\n");
30764 ++ repl->counters != NULL || input_table->private != NULL)
30765 + return -EINVAL;
30766 +- }
30767 +
30768 + /* Don't add one table to multiple lists. */
30769 + table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
30770 +@@ -1235,13 +1194,10 @@ int ebt_register_table(struct net *net, const struct ebt_table *input_table,
30771 + ((char *)repl->hook_entry[i] - repl->entries);
30772 + }
30773 + ret = translate_table(net, repl->name, newinfo);
30774 +- if (ret != 0) {
30775 +- BUGPRINT("Translate_table failed\n");
30776 ++ if (ret != 0)
30777 + goto free_chainstack;
30778 +- }
30779 +
30780 + if (table->check && table->check(newinfo, table->valid_hooks)) {
30781 +- BUGPRINT("The table doesn't like its own initial data, lol\n");
30782 + ret = -EINVAL;
30783 + goto free_chainstack;
30784 + }
30785 +@@ -1252,7 +1208,6 @@ int ebt_register_table(struct net *net, const struct ebt_table *input_table,
30786 + list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
30787 + if (strcmp(t->name, table->name) == 0) {
30788 + ret = -EEXIST;
30789 +- BUGPRINT("Table name already exists\n");
30790 + goto free_unlock;
30791 + }
30792 + }
30793 +@@ -1320,7 +1275,6 @@ static int do_update_counters(struct net *net, const char *name,
30794 + goto free_tmp;
30795 +
30796 + if (num_counters != t->private->nentries) {
30797 +- BUGPRINT("Wrong nr of counters\n");
30798 + ret = -EINVAL;
30799 + goto unlock_mutex;
30800 + }
30801 +@@ -1447,10 +1401,8 @@ static int copy_counters_to_user(struct ebt_table *t,
30802 + if (num_counters == 0)
30803 + return 0;
30804 +
30805 +- if (num_counters != nentries) {
30806 +- BUGPRINT("Num_counters wrong\n");
30807 ++ if (num_counters != nentries)
30808 + return -EINVAL;
30809 +- }
30810 +
30811 + counterstmp = vmalloc(array_size(nentries, sizeof(*counterstmp)));
30812 + if (!counterstmp)
30813 +@@ -1496,15 +1448,11 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
30814 + (tmp.num_counters ? nentries * sizeof(struct ebt_counter) : 0))
30815 + return -EINVAL;
30816 +
30817 +- if (tmp.nentries != nentries) {
30818 +- BUGPRINT("Nentries wrong\n");
30819 ++ if (tmp.nentries != nentries)
30820 + return -EINVAL;
30821 +- }
30822 +
30823 +- if (tmp.entries_size != entries_size) {
30824 +- BUGPRINT("Wrong size\n");
30825 ++ if (tmp.entries_size != entries_size)
30826 + return -EINVAL;
30827 +- }
30828 +
30829 + ret = copy_counters_to_user(t, oldcounters, tmp.counters,
30830 + tmp.num_counters, nentries);
30831 +@@ -1576,7 +1524,6 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
30832 + }
30833 + mutex_unlock(&ebt_mutex);
30834 + if (copy_to_user(user, &tmp, *len) != 0) {
30835 +- BUGPRINT("c2u Didn't work\n");
30836 + ret = -EFAULT;
30837 + break;
30838 + }
30839 +diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
30840 +index 9cab80207ced..79eac465ec65 100644
30841 +--- a/net/ceph/ceph_common.c
30842 ++++ b/net/ceph/ceph_common.c
30843 +@@ -738,7 +738,6 @@ int __ceph_open_session(struct ceph_client *client, unsigned long started)
30844 + }
30845 + EXPORT_SYMBOL(__ceph_open_session);
30846 +
30847 +-
30848 + int ceph_open_session(struct ceph_client *client)
30849 + {
30850 + int ret;
30851 +@@ -754,6 +753,23 @@ int ceph_open_session(struct ceph_client *client)
30852 + }
30853 + EXPORT_SYMBOL(ceph_open_session);
30854 +
30855 ++int ceph_wait_for_latest_osdmap(struct ceph_client *client,
30856 ++ unsigned long timeout)
30857 ++{
30858 ++ u64 newest_epoch;
30859 ++ int ret;
30860 ++
30861 ++ ret = ceph_monc_get_version(&client->monc, "osdmap", &newest_epoch);
30862 ++ if (ret)
30863 ++ return ret;
30864 ++
30865 ++ if (client->osdc.osdmap->epoch >= newest_epoch)
30866 ++ return 0;
30867 ++
30868 ++ ceph_osdc_maybe_request_map(&client->osdc);
30869 ++ return ceph_monc_wait_osdmap(&client->monc, newest_epoch, timeout);
30870 ++}
30871 ++EXPORT_SYMBOL(ceph_wait_for_latest_osdmap);
30872 +
30873 + static int __init init_ceph_lib(void)
30874 + {
30875 +diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
30876 +index 18deb3d889c4..a53e4fbb6319 100644
30877 +--- a/net/ceph/mon_client.c
30878 ++++ b/net/ceph/mon_client.c
30879 +@@ -922,6 +922,15 @@ int ceph_monc_blacklist_add(struct ceph_mon_client *monc,
30880 + mutex_unlock(&monc->mutex);
30881 +
30882 + ret = wait_generic_request(req);
30883 ++ if (!ret)
30884 ++ /*
30885 ++ * Make sure we have the osdmap that includes the blacklist
30886 ++ * entry. This is needed to ensure that the OSDs pick up the
30887 ++ * new blacklist before processing any future requests from
30888 ++ * this client.
30889 ++ */
30890 ++ ret = ceph_wait_for_latest_osdmap(monc->client, 0);
30891 ++
30892 + out:
30893 + put_generic_request(req);
30894 + return ret;
30895 +diff --git a/net/core/datagram.c b/net/core/datagram.c
30896 +index b2651bb6d2a3..e657289db4ac 100644
30897 +--- a/net/core/datagram.c
30898 ++++ b/net/core/datagram.c
30899 +@@ -279,7 +279,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
30900 + break;
30901 +
30902 + sk_busy_loop(sk, flags & MSG_DONTWAIT);
30903 +- } while (!skb_queue_empty(&sk->sk_receive_queue));
30904 ++ } while (sk->sk_receive_queue.prev != *last);
30905 +
30906 + error = -EAGAIN;
30907 +
30908 +diff --git a/net/core/dev.c b/net/core/dev.c
30909 +index 5d03889502eb..12824e007e06 100644
30910 +--- a/net/core/dev.c
30911 ++++ b/net/core/dev.c
30912 +@@ -5014,8 +5014,10 @@ static inline void __netif_receive_skb_list_ptype(struct list_head *head,
30913 + if (pt_prev->list_func != NULL)
30914 + pt_prev->list_func(head, pt_prev, orig_dev);
30915 + else
30916 +- list_for_each_entry_safe(skb, next, head, list)
30917 ++ list_for_each_entry_safe(skb, next, head, list) {
30918 ++ skb_list_del_init(skb);
30919 + pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
30920 ++ }
30921 + }
30922 +
30923 + static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
30924 +diff --git a/net/core/ethtool.c b/net/core/ethtool.c
30925 +index 158264f7cfaf..3a7f19a61768 100644
30926 +--- a/net/core/ethtool.c
30927 ++++ b/net/core/ethtool.c
30928 +@@ -1794,11 +1794,16 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
30929 + WARN_ON_ONCE(!ret);
30930 +
30931 + gstrings.len = ret;
30932 +- data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
30933 +- if (gstrings.len && !data)
30934 +- return -ENOMEM;
30935 +
30936 +- __ethtool_get_strings(dev, gstrings.string_set, data);
30937 ++ if (gstrings.len) {
30938 ++ data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
30939 ++ if (!data)
30940 ++ return -ENOMEM;
30941 ++
30942 ++ __ethtool_get_strings(dev, gstrings.string_set, data);
30943 ++ } else {
30944 ++ data = NULL;
30945 ++ }
30946 +
30947 + ret = -EFAULT;
30948 + if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
30949 +@@ -1894,11 +1899,15 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
30950 + return -EFAULT;
30951 +
30952 + stats.n_stats = n_stats;
30953 +- data = vzalloc(array_size(n_stats, sizeof(u64)));
30954 +- if (n_stats && !data)
30955 +- return -ENOMEM;
30956 +
30957 +- ops->get_ethtool_stats(dev, &stats, data);
30958 ++ if (n_stats) {
30959 ++ data = vzalloc(array_size(n_stats, sizeof(u64)));
30960 ++ if (!data)
30961 ++ return -ENOMEM;
30962 ++ ops->get_ethtool_stats(dev, &stats, data);
30963 ++ } else {
30964 ++ data = NULL;
30965 ++ }
30966 +
30967 + ret = -EFAULT;
30968 + if (copy_to_user(useraddr, &stats, sizeof(stats)))
30969 +@@ -1938,16 +1947,21 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
30970 + return -EFAULT;
30971 +
30972 + stats.n_stats = n_stats;
30973 +- data = vzalloc(array_size(n_stats, sizeof(u64)));
30974 +- if (n_stats && !data)
30975 +- return -ENOMEM;
30976 +
30977 +- if (dev->phydev && !ops->get_ethtool_phy_stats) {
30978 +- ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
30979 +- if (ret < 0)
30980 +- return ret;
30981 ++ if (n_stats) {
30982 ++ data = vzalloc(array_size(n_stats, sizeof(u64)));
30983 ++ if (!data)
30984 ++ return -ENOMEM;
30985 ++
30986 ++ if (dev->phydev && !ops->get_ethtool_phy_stats) {
30987 ++ ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
30988 ++ if (ret < 0)
30989 ++ goto out;
30990 ++ } else {
30991 ++ ops->get_ethtool_phy_stats(dev, &stats, data);
30992 ++ }
30993 + } else {
30994 +- ops->get_ethtool_phy_stats(dev, &stats, data);
30995 ++ data = NULL;
30996 + }
30997 +
30998 + ret = -EFAULT;
30999 +diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
31000 +index 9bf1b9ad1780..ac679f74ba47 100644
31001 +--- a/net/core/gen_stats.c
31002 ++++ b/net/core/gen_stats.c
31003 +@@ -291,7 +291,6 @@ __gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats,
31004 + for_each_possible_cpu(i) {
31005 + const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
31006 +
31007 +- qstats->qlen = 0;
31008 + qstats->backlog += qcpu->backlog;
31009 + qstats->drops += qcpu->drops;
31010 + qstats->requeues += qcpu->requeues;
31011 +@@ -307,7 +306,6 @@ void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
31012 + if (cpu) {
31013 + __gnet_stats_copy_queue_cpu(qstats, cpu);
31014 + } else {
31015 +- qstats->qlen = q->qlen;
31016 + qstats->backlog = q->backlog;
31017 + qstats->drops = q->drops;
31018 + qstats->requeues = q->requeues;
31019 +diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c
31020 +index acf45ddbe924..e095fb871d91 100644
31021 +--- a/net/core/gro_cells.c
31022 ++++ b/net/core/gro_cells.c
31023 +@@ -13,22 +13,36 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
31024 + {
31025 + struct net_device *dev = skb->dev;
31026 + struct gro_cell *cell;
31027 ++ int res;
31028 +
31029 +- if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev))
31030 +- return netif_rx(skb);
31031 ++ rcu_read_lock();
31032 ++ if (unlikely(!(dev->flags & IFF_UP)))
31033 ++ goto drop;
31034 ++
31035 ++ if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev)) {
31036 ++ res = netif_rx(skb);
31037 ++ goto unlock;
31038 ++ }
31039 +
31040 + cell = this_cpu_ptr(gcells->cells);
31041 +
31042 + if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
31043 ++drop:
31044 + atomic_long_inc(&dev->rx_dropped);
31045 + kfree_skb(skb);
31046 +- return NET_RX_DROP;
31047 ++ res = NET_RX_DROP;
31048 ++ goto unlock;
31049 + }
31050 +
31051 + __skb_queue_tail(&cell->napi_skbs, skb);
31052 + if (skb_queue_len(&cell->napi_skbs) == 1)
31053 + napi_schedule(&cell->napi);
31054 +- return NET_RX_SUCCESS;
31055 ++
31056 ++ res = NET_RX_SUCCESS;
31057 ++
31058 ++unlock:
31059 ++ rcu_read_unlock();
31060 ++ return res;
31061 + }
31062 + EXPORT_SYMBOL(gro_cells_receive);
31063 +
31064 +diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
31065 +index ff9fd2bb4ce4..aec26584f0ca 100644
31066 +--- a/net/core/net-sysfs.c
31067 ++++ b/net/core/net-sysfs.c
31068 +@@ -934,6 +934,8 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
31069 + if (error)
31070 + return error;
31071 +
31072 ++ dev_hold(queue->dev);
31073 ++
31074 + if (dev->sysfs_rx_queue_group) {
31075 + error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
31076 + if (error) {
31077 +@@ -943,7 +945,6 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
31078 + }
31079 +
31080 + kobject_uevent(kobj, KOBJ_ADD);
31081 +- dev_hold(queue->dev);
31082 +
31083 + return error;
31084 + }
31085 +@@ -1472,6 +1473,8 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
31086 + if (error)
31087 + return error;
31088 +
31089 ++ dev_hold(queue->dev);
31090 ++
31091 + #ifdef CONFIG_BQL
31092 + error = sysfs_create_group(kobj, &dql_group);
31093 + if (error) {
31094 +@@ -1481,7 +1484,6 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
31095 + #endif
31096 +
31097 + kobject_uevent(kobj, KOBJ_ADD);
31098 +- dev_hold(queue->dev);
31099 +
31100 + return 0;
31101 + }
31102 +@@ -1547,6 +1549,9 @@ static int register_queue_kobjects(struct net_device *dev)
31103 + error:
31104 + netdev_queue_update_kobjects(dev, txq, 0);
31105 + net_rx_queue_update_kobjects(dev, rxq, 0);
31106 ++#ifdef CONFIG_SYSFS
31107 ++ kset_unregister(dev->queues_kset);
31108 ++#endif
31109 + return error;
31110 + }
31111 +
31112 +diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
31113 +index b02fb19df2cc..40c249c574c1 100644
31114 +--- a/net/core/net_namespace.c
31115 ++++ b/net/core/net_namespace.c
31116 +@@ -304,6 +304,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
31117 +
31118 + refcount_set(&net->count, 1);
31119 + refcount_set(&net->passive, 1);
31120 ++ get_random_bytes(&net->hash_mix, sizeof(u32));
31121 + net->dev_base_seq = 1;
31122 + net->user_ns = user_ns;
31123 + idr_init(&net->netns_ids);
31124 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
31125 +index 2415d9cb9b89..ef2cd5712098 100644
31126 +--- a/net/core/skbuff.c
31127 ++++ b/net/core/skbuff.c
31128 +@@ -3801,7 +3801,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
31129 + unsigned int delta_truesize;
31130 + struct sk_buff *lp;
31131 +
31132 +- if (unlikely(p->len + len >= 65536))
31133 ++ if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush))
31134 + return -E2BIG;
31135 +
31136 + lp = NAPI_GRO_CB(p)->last;
31137 +diff --git a/net/core/skmsg.c b/net/core/skmsg.c
31138 +index 8c826603bf36..8bc0ba1ebabe 100644
31139 +--- a/net/core/skmsg.c
31140 ++++ b/net/core/skmsg.c
31141 +@@ -545,6 +545,7 @@ static void sk_psock_destroy_deferred(struct work_struct *gc)
31142 + struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
31143 +
31144 + /* No sk_callback_lock since already detached. */
31145 ++ strp_stop(&psock->parser.strp);
31146 + strp_done(&psock->parser.strp);
31147 +
31148 + cancel_work_sync(&psock->work);
31149 +diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
31150 +index d5740bad5b18..57d84e9b7b6f 100644
31151 +--- a/net/dccp/ipv6.c
31152 ++++ b/net/dccp/ipv6.c
31153 +@@ -436,8 +436,8 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
31154 + newnp->ipv6_mc_list = NULL;
31155 + newnp->ipv6_ac_list = NULL;
31156 + newnp->ipv6_fl_list = NULL;
31157 +- newnp->mcast_oif = inet6_iif(skb);
31158 +- newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
31159 ++ newnp->mcast_oif = inet_iif(skb);
31160 ++ newnp->mcast_hops = ip_hdr(skb)->ttl;
31161 +
31162 + /*
31163 + * No need to charge this sock to the relevant IPv6 refcnt debug socks count
31164 +diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
31165 +index b8cd43c9ed5b..a97bf326b231 100644
31166 +--- a/net/hsr/hsr_device.c
31167 ++++ b/net/hsr/hsr_device.c
31168 +@@ -94,9 +94,8 @@ static void hsr_check_announce(struct net_device *hsr_dev,
31169 + && (old_operstate != IF_OPER_UP)) {
31170 + /* Went up */
31171 + hsr->announce_count = 0;
31172 +- hsr->announce_timer.expires = jiffies +
31173 +- msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
31174 +- add_timer(&hsr->announce_timer);
31175 ++ mod_timer(&hsr->announce_timer,
31176 ++ jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
31177 + }
31178 +
31179 + if ((hsr_dev->operstate != IF_OPER_UP) && (old_operstate == IF_OPER_UP))
31180 +@@ -332,6 +331,7 @@ static void hsr_announce(struct timer_list *t)
31181 + {
31182 + struct hsr_priv *hsr;
31183 + struct hsr_port *master;
31184 ++ unsigned long interval;
31185 +
31186 + hsr = from_timer(hsr, t, announce_timer);
31187 +
31188 +@@ -343,18 +343,16 @@ static void hsr_announce(struct timer_list *t)
31189 + hsr->protVersion);
31190 + hsr->announce_count++;
31191 +
31192 +- hsr->announce_timer.expires = jiffies +
31193 +- msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
31194 ++ interval = msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
31195 + } else {
31196 + send_hsr_supervision_frame(master, HSR_TLV_LIFE_CHECK,
31197 + hsr->protVersion);
31198 +
31199 +- hsr->announce_timer.expires = jiffies +
31200 +- msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
31201 ++ interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
31202 + }
31203 +
31204 + if (is_admin_up(master->dev))
31205 +- add_timer(&hsr->announce_timer);
31206 ++ mod_timer(&hsr->announce_timer, jiffies + interval);
31207 +
31208 + rcu_read_unlock();
31209 + }
31210 +@@ -486,7 +484,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
31211 +
31212 + res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER);
31213 + if (res)
31214 +- return res;
31215 ++ goto err_add_port;
31216 +
31217 + res = register_netdevice(hsr_dev);
31218 + if (res)
31219 +@@ -506,6 +504,8 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
31220 + fail:
31221 + hsr_for_each_port(hsr, port)
31222 + hsr_del_port(port);
31223 ++err_add_port:
31224 ++ hsr_del_node(&hsr->self_node_db);
31225 +
31226 + return res;
31227 + }
31228 +diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
31229 +index 286ceb41ac0c..9af16cb68f76 100644
31230 +--- a/net/hsr/hsr_framereg.c
31231 ++++ b/net/hsr/hsr_framereg.c
31232 +@@ -124,6 +124,18 @@ int hsr_create_self_node(struct list_head *self_node_db,
31233 + return 0;
31234 + }
31235 +
31236 ++void hsr_del_node(struct list_head *self_node_db)
31237 ++{
31238 ++ struct hsr_node *node;
31239 ++
31240 ++ rcu_read_lock();
31241 ++ node = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list);
31242 ++ rcu_read_unlock();
31243 ++ if (node) {
31244 ++ list_del_rcu(&node->mac_list);
31245 ++ kfree(node);
31246 ++ }
31247 ++}
31248 +
31249 + /* Allocate an hsr_node and add it to node_db. 'addr' is the node's AddressA;
31250 + * seq_out is used to initialize filtering of outgoing duplicate frames
31251 +diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
31252 +index 370b45998121..531fd3dfcac1 100644
31253 +--- a/net/hsr/hsr_framereg.h
31254 ++++ b/net/hsr/hsr_framereg.h
31255 +@@ -16,6 +16,7 @@
31256 +
31257 + struct hsr_node;
31258 +
31259 ++void hsr_del_node(struct list_head *self_node_db);
31260 + struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
31261 + u16 seq_out);
31262 + struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
31263 +diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
31264 +index 437070d1ffb1..79e98e21cdd7 100644
31265 +--- a/net/ipv4/fou.c
31266 ++++ b/net/ipv4/fou.c
31267 +@@ -1024,7 +1024,7 @@ static int gue_err(struct sk_buff *skb, u32 info)
31268 + int ret;
31269 +
31270 + len = sizeof(struct udphdr) + sizeof(struct guehdr);
31271 +- if (!pskb_may_pull(skb, len))
31272 ++ if (!pskb_may_pull(skb, transport_offset + len))
31273 + return -EINVAL;
31274 +
31275 + guehdr = (struct guehdr *)&udp_hdr(skb)[1];
31276 +@@ -1059,7 +1059,7 @@ static int gue_err(struct sk_buff *skb, u32 info)
31277 +
31278 + optlen = guehdr->hlen << 2;
31279 +
31280 +- if (!pskb_may_pull(skb, len + optlen))
31281 ++ if (!pskb_may_pull(skb, transport_offset + len + optlen))
31282 + return -EINVAL;
31283 +
31284 + guehdr = (struct guehdr *)&udp_hdr(skb)[1];
31285 +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
31286 +index 6ae89f2b541b..2d5734079e6b 100644
31287 +--- a/net/ipv4/ip_gre.c
31288 ++++ b/net/ipv4/ip_gre.c
31289 +@@ -259,7 +259,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
31290 + struct net *net = dev_net(skb->dev);
31291 + struct metadata_dst *tun_dst = NULL;
31292 + struct erspan_base_hdr *ershdr;
31293 +- struct erspan_metadata *pkt_md;
31294 + struct ip_tunnel_net *itn;
31295 + struct ip_tunnel *tunnel;
31296 + const struct iphdr *iph;
31297 +@@ -282,9 +281,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
31298 + if (unlikely(!pskb_may_pull(skb, len)))
31299 + return PACKET_REJECT;
31300 +
31301 +- ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
31302 +- pkt_md = (struct erspan_metadata *)(ershdr + 1);
31303 +-
31304 + if (__iptunnel_pull_header(skb,
31305 + len,
31306 + htons(ETH_P_TEB),
31307 +@@ -292,8 +288,9 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
31308 + goto drop;
31309 +
31310 + if (tunnel->collect_md) {
31311 ++ struct erspan_metadata *pkt_md, *md;
31312 + struct ip_tunnel_info *info;
31313 +- struct erspan_metadata *md;
31314 ++ unsigned char *gh;
31315 + __be64 tun_id;
31316 + __be16 flags;
31317 +
31318 +@@ -306,6 +303,14 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
31319 + if (!tun_dst)
31320 + return PACKET_REJECT;
31321 +
31322 ++ /* skb can be uncloned in __iptunnel_pull_header, so
31323 ++ * old pkt_md is no longer valid and we need to reset
31324 ++ * it
31325 ++ */
31326 ++ gh = skb_network_header(skb) +
31327 ++ skb_network_header_len(skb);
31328 ++ pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
31329 ++ sizeof(*ershdr));
31330 + md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
31331 + md->version = ver;
31332 + md2 = &md->u.md2;
31333 +diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
31334 +index 1f4737b77067..ccf0d31b6ce5 100644
31335 +--- a/net/ipv4/ip_input.c
31336 ++++ b/net/ipv4/ip_input.c
31337 +@@ -257,11 +257,10 @@ int ip_local_deliver(struct sk_buff *skb)
31338 + ip_local_deliver_finish);
31339 + }
31340 +
31341 +-static inline bool ip_rcv_options(struct sk_buff *skb)
31342 ++static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
31343 + {
31344 + struct ip_options *opt;
31345 + const struct iphdr *iph;
31346 +- struct net_device *dev = skb->dev;
31347 +
31348 + /* It looks as overkill, because not all
31349 + IP options require packet mangling.
31350 +@@ -297,7 +296,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
31351 + }
31352 + }
31353 +
31354 +- if (ip_options_rcv_srr(skb))
31355 ++ if (ip_options_rcv_srr(skb, dev))
31356 + goto drop;
31357 + }
31358 +
31359 +@@ -353,7 +352,7 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk,
31360 + }
31361 + #endif
31362 +
31363 +- if (iph->ihl > 5 && ip_rcv_options(skb))
31364 ++ if (iph->ihl > 5 && ip_rcv_options(skb, dev))
31365 + goto drop;
31366 +
31367 + rt = skb_rtable(skb);
31368 +diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
31369 +index 32a35043c9f5..3db31bb9df50 100644
31370 +--- a/net/ipv4/ip_options.c
31371 ++++ b/net/ipv4/ip_options.c
31372 +@@ -612,7 +612,7 @@ void ip_forward_options(struct sk_buff *skb)
31373 + }
31374 + }
31375 +
31376 +-int ip_options_rcv_srr(struct sk_buff *skb)
31377 ++int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev)
31378 + {
31379 + struct ip_options *opt = &(IPCB(skb)->opt);
31380 + int srrspace, srrptr;
31381 +@@ -647,7 +647,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
31382 +
31383 + orefdst = skb->_skb_refdst;
31384 + skb_dst_set(skb, NULL);
31385 +- err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev);
31386 ++ err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev);
31387 + rt2 = skb_rtable(skb);
31388 + if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) {
31389 + skb_dst_drop(skb);
31390 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
31391 +index 7bb9128c8363..e04cdb58a602 100644
31392 +--- a/net/ipv4/route.c
31393 ++++ b/net/ipv4/route.c
31394 +@@ -1303,6 +1303,10 @@ static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
31395 + if (fnhe->fnhe_daddr == daddr) {
31396 + rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
31397 + fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
31398 ++ /* set fnhe_daddr to 0 to ensure it won't bind with
31399 ++ * new dsts in rt_bind_exception().
31400 ++ */
31401 ++ fnhe->fnhe_daddr = 0;
31402 + fnhe_flush_routes(fnhe);
31403 + kfree_rcu(fnhe, rcu);
31404 + break;
31405 +@@ -2144,12 +2148,13 @@ int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
31406 + int our = 0;
31407 + int err = -EINVAL;
31408 +
31409 +- if (in_dev)
31410 +- our = ip_check_mc_rcu(in_dev, daddr, saddr,
31411 +- ip_hdr(skb)->protocol);
31412 ++ if (!in_dev)
31413 ++ return err;
31414 ++ our = ip_check_mc_rcu(in_dev, daddr, saddr,
31415 ++ ip_hdr(skb)->protocol);
31416 +
31417 + /* check l3 master if no match yet */
31418 +- if ((!in_dev || !our) && netif_is_l3_slave(dev)) {
31419 ++ if (!our && netif_is_l3_slave(dev)) {
31420 + struct in_device *l3_in_dev;
31421 +
31422 + l3_in_dev = __in_dev_get_rcu(skb->dev);
31423 +diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
31424 +index 606f868d9f3f..e531344611a0 100644
31425 +--- a/net/ipv4/syncookies.c
31426 ++++ b/net/ipv4/syncookies.c
31427 +@@ -216,7 +216,12 @@ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
31428 + refcount_set(&req->rsk_refcnt, 1);
31429 + tcp_sk(child)->tsoffset = tsoff;
31430 + sock_rps_save_rxhash(child, skb);
31431 +- inet_csk_reqsk_queue_add(sk, req, child);
31432 ++ if (!inet_csk_reqsk_queue_add(sk, req, child)) {
31433 ++ bh_unlock_sock(child);
31434 ++ sock_put(child);
31435 ++ child = NULL;
31436 ++ reqsk_put(req);
31437 ++ }
31438 + } else {
31439 + reqsk_free(req);
31440 + }
31441 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
31442 +index cf3c5095c10e..ce365cbba1d1 100644
31443 +--- a/net/ipv4/tcp.c
31444 ++++ b/net/ipv4/tcp.c
31445 +@@ -1914,6 +1914,11 @@ static int tcp_inq_hint(struct sock *sk)
31446 + inq = tp->rcv_nxt - tp->copied_seq;
31447 + release_sock(sk);
31448 + }
31449 ++ /* After receiving a FIN, tell the user-space to continue reading
31450 ++ * by returning a non-zero inq.
31451 ++ */
31452 ++ if (inq == 0 && sock_flag(sk, SOCK_DONE))
31453 ++ inq = 1;
31454 + return inq;
31455 + }
31456 +
31457 +diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
31458 +index cd4814f7e962..359da68d7c06 100644
31459 +--- a/net/ipv4/tcp_dctcp.c
31460 ++++ b/net/ipv4/tcp_dctcp.c
31461 +@@ -67,11 +67,6 @@ static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA;
31462 + module_param(dctcp_alpha_on_init, uint, 0644);
31463 + MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value");
31464 +
31465 +-static unsigned int dctcp_clamp_alpha_on_loss __read_mostly;
31466 +-module_param(dctcp_clamp_alpha_on_loss, uint, 0644);
31467 +-MODULE_PARM_DESC(dctcp_clamp_alpha_on_loss,
31468 +- "parameter for clamping alpha on loss");
31469 +-
31470 + static struct tcp_congestion_ops dctcp_reno;
31471 +
31472 + static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
31473 +@@ -164,21 +159,23 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
31474 + }
31475 + }
31476 +
31477 +-static void dctcp_state(struct sock *sk, u8 new_state)
31478 ++static void dctcp_react_to_loss(struct sock *sk)
31479 + {
31480 +- if (dctcp_clamp_alpha_on_loss && new_state == TCP_CA_Loss) {
31481 +- struct dctcp *ca = inet_csk_ca(sk);
31482 ++ struct dctcp *ca = inet_csk_ca(sk);
31483 ++ struct tcp_sock *tp = tcp_sk(sk);
31484 +
31485 +- /* If this extension is enabled, we clamp dctcp_alpha to
31486 +- * max on packet loss; the motivation is that dctcp_alpha
31487 +- * is an indicator to the extend of congestion and packet
31488 +- * loss is an indicator of extreme congestion; setting
31489 +- * this in practice turned out to be beneficial, and
31490 +- * effectively assumes total congestion which reduces the
31491 +- * window by half.
31492 +- */
31493 +- ca->dctcp_alpha = DCTCP_MAX_ALPHA;
31494 +- }
31495 ++ ca->loss_cwnd = tp->snd_cwnd;
31496 ++ tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
31497 ++}
31498 ++
31499 ++static void dctcp_state(struct sock *sk, u8 new_state)
31500 ++{
31501 ++ if (new_state == TCP_CA_Recovery &&
31502 ++ new_state != inet_csk(sk)->icsk_ca_state)
31503 ++ dctcp_react_to_loss(sk);
31504 ++ /* We handle RTO in dctcp_cwnd_event to ensure that we perform only
31505 ++ * one loss-adjustment per RTT.
31506 ++ */
31507 + }
31508 +
31509 + static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
31510 +@@ -190,6 +187,9 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
31511 + case CA_EVENT_ECN_NO_CE:
31512 + dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state);
31513 + break;
31514 ++ case CA_EVENT_LOSS:
31515 ++ dctcp_react_to_loss(sk);
31516 ++ break;
31517 + default:
31518 + /* Don't care for the rest. */
31519 + break;
31520 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
31521 +index 76858b14ebe9..7b1ef897b398 100644
31522 +--- a/net/ipv4/tcp_input.c
31523 ++++ b/net/ipv4/tcp_input.c
31524 +@@ -6519,7 +6519,13 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
31525 + af_ops->send_synack(fastopen_sk, dst, &fl, req,
31526 + &foc, TCP_SYNACK_FASTOPEN);
31527 + /* Add the child socket directly into the accept queue */
31528 +- inet_csk_reqsk_queue_add(sk, req, fastopen_sk);
31529 ++ if (!inet_csk_reqsk_queue_add(sk, req, fastopen_sk)) {
31530 ++ reqsk_fastopen_remove(fastopen_sk, req, false);
31531 ++ bh_unlock_sock(fastopen_sk);
31532 ++ sock_put(fastopen_sk);
31533 ++ reqsk_put(req);
31534 ++ goto drop;
31535 ++ }
31536 + sk->sk_data_ready(sk);
31537 + bh_unlock_sock(fastopen_sk);
31538 + sock_put(fastopen_sk);
31539 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
31540 +index ec3cea9d6828..00852f47a73d 100644
31541 +--- a/net/ipv4/tcp_ipv4.c
31542 ++++ b/net/ipv4/tcp_ipv4.c
31543 +@@ -1734,15 +1734,8 @@ EXPORT_SYMBOL(tcp_add_backlog);
31544 + int tcp_filter(struct sock *sk, struct sk_buff *skb)
31545 + {
31546 + struct tcphdr *th = (struct tcphdr *)skb->data;
31547 +- unsigned int eaten = skb->len;
31548 +- int err;
31549 +
31550 +- err = sk_filter_trim_cap(sk, skb, th->doff * 4);
31551 +- if (!err) {
31552 +- eaten -= skb->len;
31553 +- TCP_SKB_CB(skb)->end_seq -= eaten;
31554 +- }
31555 +- return err;
31556 ++ return sk_filter_trim_cap(sk, skb, th->doff * 4);
31557 + }
31558 + EXPORT_SYMBOL(tcp_filter);
31559 +
31560 +@@ -2585,7 +2578,8 @@ static void __net_exit tcp_sk_exit(struct net *net)
31561 + {
31562 + int cpu;
31563 +
31564 +- module_put(net->ipv4.tcp_congestion_control->owner);
31565 ++ if (net->ipv4.tcp_congestion_control)
31566 ++ module_put(net->ipv4.tcp_congestion_control->owner);
31567 +
31568 + for_each_possible_cpu(cpu)
31569 + inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
31570 +diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c
31571 +index 867474abe269..ec4e2ed95f36 100644
31572 +--- a/net/ipv6/fou6.c
31573 ++++ b/net/ipv6/fou6.c
31574 +@@ -94,7 +94,7 @@ static int gue6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
31575 + int ret;
31576 +
31577 + len = sizeof(struct udphdr) + sizeof(struct guehdr);
31578 +- if (!pskb_may_pull(skb, len))
31579 ++ if (!pskb_may_pull(skb, transport_offset + len))
31580 + return -EINVAL;
31581 +
31582 + guehdr = (struct guehdr *)&udp_hdr(skb)[1];
31583 +@@ -129,7 +129,7 @@ static int gue6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
31584 +
31585 + optlen = guehdr->hlen << 2;
31586 +
31587 +- if (!pskb_may_pull(skb, len + optlen))
31588 ++ if (!pskb_may_pull(skb, transport_offset + len + optlen))
31589 + return -EINVAL;
31590 +
31591 + guehdr = (struct guehdr *)&udp_hdr(skb)[1];
31592 +diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
31593 +index 17c455ff69ff..7858fa9ea103 100644
31594 +--- a/net/ipv6/ila/ila_xlat.c
31595 ++++ b/net/ipv6/ila/ila_xlat.c
31596 +@@ -420,6 +420,7 @@ int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info)
31597 +
31598 + done:
31599 + rhashtable_walk_stop(&iter);
31600 ++ rhashtable_walk_exit(&iter);
31601 + return ret;
31602 + }
31603 +
31604 +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
31605 +index 26f25b6e2833..438f1a5fd19a 100644
31606 +--- a/net/ipv6/ip6_gre.c
31607 ++++ b/net/ipv6/ip6_gre.c
31608 +@@ -524,11 +524,10 @@ static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
31609 + return PACKET_REJECT;
31610 + }
31611 +
31612 +-static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
31613 +- struct tnl_ptk_info *tpi)
31614 ++static int ip6erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
31615 ++ int gre_hdr_len)
31616 + {
31617 + struct erspan_base_hdr *ershdr;
31618 +- struct erspan_metadata *pkt_md;
31619 + const struct ipv6hdr *ipv6h;
31620 + struct erspan_md2 *md2;
31621 + struct ip6_tnl *tunnel;
31622 +@@ -547,18 +546,16 @@ static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
31623 + if (unlikely(!pskb_may_pull(skb, len)))
31624 + return PACKET_REJECT;
31625 +
31626 +- ershdr = (struct erspan_base_hdr *)skb->data;
31627 +- pkt_md = (struct erspan_metadata *)(ershdr + 1);
31628 +-
31629 + if (__iptunnel_pull_header(skb, len,
31630 + htons(ETH_P_TEB),
31631 + false, false) < 0)
31632 + return PACKET_REJECT;
31633 +
31634 + if (tunnel->parms.collect_md) {
31635 ++ struct erspan_metadata *pkt_md, *md;
31636 + struct metadata_dst *tun_dst;
31637 + struct ip_tunnel_info *info;
31638 +- struct erspan_metadata *md;
31639 ++ unsigned char *gh;
31640 + __be64 tun_id;
31641 + __be16 flags;
31642 +
31643 +@@ -571,6 +568,14 @@ static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
31644 + if (!tun_dst)
31645 + return PACKET_REJECT;
31646 +
31647 ++ /* skb can be uncloned in __iptunnel_pull_header, so
31648 ++ * old pkt_md is no longer valid and we need to reset
31649 ++ * it
31650 ++ */
31651 ++ gh = skb_network_header(skb) +
31652 ++ skb_network_header_len(skb);
31653 ++ pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
31654 ++ sizeof(*ershdr));
31655 + info = &tun_dst->u.tun_info;
31656 + md = ip_tunnel_info_opts(info);
31657 + md->version = ver;
31658 +@@ -607,7 +612,7 @@ static int gre_rcv(struct sk_buff *skb)
31659 +
31660 + if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
31661 + tpi.proto == htons(ETH_P_ERSPAN2))) {
31662 +- if (ip6erspan_rcv(skb, hdr_len, &tpi) == PACKET_RCVD)
31663 ++ if (ip6erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
31664 + return 0;
31665 + goto out;
31666 + }
31667 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
31668 +index 5f9fa0302b5a..e71227390bec 100644
31669 +--- a/net/ipv6/ip6_output.c
31670 ++++ b/net/ipv6/ip6_output.c
31671 +@@ -595,7 +595,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
31672 + inet6_sk(skb->sk) : NULL;
31673 + struct ipv6hdr *tmp_hdr;
31674 + struct frag_hdr *fh;
31675 +- unsigned int mtu, hlen, left, len;
31676 ++ unsigned int mtu, hlen, left, len, nexthdr_offset;
31677 + int hroom, troom;
31678 + __be32 frag_id;
31679 + int ptr, offset = 0, err = 0;
31680 +@@ -606,6 +606,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
31681 + goto fail;
31682 + hlen = err;
31683 + nexthdr = *prevhdr;
31684 ++ nexthdr_offset = prevhdr - skb_network_header(skb);
31685 +
31686 + mtu = ip6_skb_dst_mtu(skb);
31687 +
31688 +@@ -640,6 +641,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
31689 + (err = skb_checksum_help(skb)))
31690 + goto fail;
31691 +
31692 ++ prevhdr = skb_network_header(skb) + nexthdr_offset;
31693 + hroom = LL_RESERVED_SPACE(rt->dst.dev);
31694 + if (skb_has_frag_list(skb)) {
31695 + unsigned int first_len = skb_pagelen(skb);
31696 +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
31697 +index 0c6403cf8b52..ade1390c6348 100644
31698 +--- a/net/ipv6/ip6_tunnel.c
31699 ++++ b/net/ipv6/ip6_tunnel.c
31700 +@@ -627,7 +627,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
31701 + rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
31702 + eiph->daddr, eiph->saddr, 0, 0,
31703 + IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
31704 +- if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL) {
31705 ++ if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL6) {
31706 + if (!IS_ERR(rt))
31707 + ip_rt_put(rt);
31708 + goto out;
31709 +@@ -636,7 +636,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
31710 + } else {
31711 + if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
31712 + skb2->dev) ||
31713 +- skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
31714 ++ skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
31715 + goto out;
31716 + }
31717 +
31718 +diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
31719 +index cc01aa3f2b5e..af91a1a402f1 100644
31720 +--- a/net/ipv6/ip6mr.c
31721 ++++ b/net/ipv6/ip6mr.c
31722 +@@ -1964,10 +1964,10 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
31723 +
31724 + static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
31725 + {
31726 +- __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
31727 +- IPSTATS_MIB_OUTFORWDATAGRAMS);
31728 +- __IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
31729 +- IPSTATS_MIB_OUTOCTETS, skb->len);
31730 ++ IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
31731 ++ IPSTATS_MIB_OUTFORWDATAGRAMS);
31732 ++ IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
31733 ++ IPSTATS_MIB_OUTOCTETS, skb->len);
31734 + return dst_output(net, sk, skb);
31735 + }
31736 +
31737 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
31738 +index 8dad1d690b78..0086acc16f3c 100644
31739 +--- a/net/ipv6/route.c
31740 ++++ b/net/ipv6/route.c
31741 +@@ -1040,14 +1040,20 @@ static struct rt6_info *ip6_create_rt_rcu(struct fib6_info *rt)
31742 + struct rt6_info *nrt;
31743 +
31744 + if (!fib6_info_hold_safe(rt))
31745 +- return NULL;
31746 ++ goto fallback;
31747 +
31748 + nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
31749 +- if (nrt)
31750 +- ip6_rt_copy_init(nrt, rt);
31751 +- else
31752 ++ if (!nrt) {
31753 + fib6_info_release(rt);
31754 ++ goto fallback;
31755 ++ }
31756 +
31757 ++ ip6_rt_copy_init(nrt, rt);
31758 ++ return nrt;
31759 ++
31760 ++fallback:
31761 ++ nrt = dev_net(dev)->ipv6.ip6_null_entry;
31762 ++ dst_hold(&nrt->dst);
31763 + return nrt;
31764 + }
31765 +
31766 +@@ -1096,10 +1102,6 @@ restart:
31767 + dst_hold(&rt->dst);
31768 + } else {
31769 + rt = ip6_create_rt_rcu(f6i);
31770 +- if (!rt) {
31771 +- rt = net->ipv6.ip6_null_entry;
31772 +- dst_hold(&rt->dst);
31773 +- }
31774 + }
31775 +
31776 + rcu_read_unlock();
31777 +diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
31778 +index 09e440e8dfae..b2109b74857d 100644
31779 +--- a/net/ipv6/sit.c
31780 ++++ b/net/ipv6/sit.c
31781 +@@ -669,6 +669,10 @@ static int ipip6_rcv(struct sk_buff *skb)
31782 + !net_eq(tunnel->net, dev_net(tunnel->dev))))
31783 + goto out;
31784 +
31785 ++ /* skb can be uncloned in iptunnel_pull_header, so
31786 ++ * old iph is no longer valid
31787 ++ */
31788 ++ iph = (const struct iphdr *)skb_mac_header(skb);
31789 + err = IP_ECN_decapsulate(iph, skb);
31790 + if (unlikely(err)) {
31791 + if (log_ecn_error)
31792 +@@ -778,8 +782,9 @@ static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
31793 + pbw0 = tunnel->ip6rd.prefixlen >> 5;
31794 + pbi0 = tunnel->ip6rd.prefixlen & 0x1f;
31795 +
31796 +- d = (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
31797 +- tunnel->ip6rd.relay_prefixlen;
31798 ++ d = tunnel->ip6rd.relay_prefixlen < 32 ?
31799 ++ (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
31800 ++ tunnel->ip6rd.relay_prefixlen : 0;
31801 +
31802 + pbi1 = pbi0 - tunnel->ip6rd.relay_prefixlen;
31803 + if (pbi1 > 0)
31804 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
31805 +index b81eb7cb815e..8505d96483d5 100644
31806 +--- a/net/ipv6/tcp_ipv6.c
31807 ++++ b/net/ipv6/tcp_ipv6.c
31808 +@@ -1112,11 +1112,11 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
31809 + newnp->ipv6_fl_list = NULL;
31810 + newnp->pktoptions = NULL;
31811 + newnp->opt = NULL;
31812 +- newnp->mcast_oif = tcp_v6_iif(skb);
31813 +- newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
31814 +- newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
31815 ++ newnp->mcast_oif = inet_iif(skb);
31816 ++ newnp->mcast_hops = ip_hdr(skb)->ttl;
31817 ++ newnp->rcv_flowinfo = 0;
31818 + if (np->repflow)
31819 +- newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
31820 ++ newnp->flow_label = 0;
31821 +
31822 + /*
31823 + * No need to charge this sock to the relevant IPv6 refcnt debug socks count
31824 +diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
31825 +index 571d824e4e24..b919db02c7f9 100644
31826 +--- a/net/kcm/kcmsock.c
31827 ++++ b/net/kcm/kcmsock.c
31828 +@@ -2054,14 +2054,14 @@ static int __init kcm_init(void)
31829 + if (err)
31830 + goto fail;
31831 +
31832 +- err = sock_register(&kcm_family_ops);
31833 +- if (err)
31834 +- goto sock_register_fail;
31835 +-
31836 + err = register_pernet_device(&kcm_net_ops);
31837 + if (err)
31838 + goto net_ops_fail;
31839 +
31840 ++ err = sock_register(&kcm_family_ops);
31841 ++ if (err)
31842 ++ goto sock_register_fail;
31843 ++
31844 + err = kcm_proc_init();
31845 + if (err)
31846 + goto proc_init_fail;
31847 +@@ -2069,12 +2069,12 @@ static int __init kcm_init(void)
31848 + return 0;
31849 +
31850 + proc_init_fail:
31851 +- unregister_pernet_device(&kcm_net_ops);
31852 +-
31853 +-net_ops_fail:
31854 + sock_unregister(PF_KCM);
31855 +
31856 + sock_register_fail:
31857 ++ unregister_pernet_device(&kcm_net_ops);
31858 ++
31859 ++net_ops_fail:
31860 + proto_unregister(&kcm_proto);
31861 +
31862 + fail:
31863 +@@ -2090,8 +2090,8 @@ fail:
31864 + static void __exit kcm_exit(void)
31865 + {
31866 + kcm_proc_exit();
31867 +- unregister_pernet_device(&kcm_net_ops);
31868 + sock_unregister(PF_KCM);
31869 ++ unregister_pernet_device(&kcm_net_ops);
31870 + proto_unregister(&kcm_proto);
31871 + destroy_workqueue(kcm_wq);
31872 +
31873 +diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
31874 +index 0ae6899edac0..37a69df17cab 100644
31875 +--- a/net/l2tp/l2tp_ip6.c
31876 ++++ b/net/l2tp/l2tp_ip6.c
31877 +@@ -674,9 +674,6 @@ static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
31878 + if (flags & MSG_OOB)
31879 + goto out;
31880 +
31881 +- if (addr_len)
31882 +- *addr_len = sizeof(*lsa);
31883 +-
31884 + if (flags & MSG_ERRQUEUE)
31885 + return ipv6_recv_error(sk, msg, len, addr_len);
31886 +
31887 +@@ -706,6 +703,7 @@ static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
31888 + lsa->l2tp_conn_id = 0;
31889 + if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
31890 + lsa->l2tp_scope_id = inet6_iif(skb);
31891 ++ *addr_len = sizeof(*lsa);
31892 + }
31893 +
31894 + if (np->rxopt.all)
31895 +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
31896 +index db4d46332e86..9dd4c2048a2b 100644
31897 +--- a/net/netfilter/nf_conntrack_core.c
31898 ++++ b/net/netfilter/nf_conntrack_core.c
31899 +@@ -901,10 +901,18 @@ __nf_conntrack_confirm(struct sk_buff *skb)
31900 + * REJECT will give spurious warnings here.
31901 + */
31902 +
31903 +- /* No external references means no one else could have
31904 +- * confirmed us.
31905 ++ /* Another skb with the same unconfirmed conntrack may
31906 ++ * win the race. This may happen for bridge(br_flood)
31907 ++ * or broadcast/multicast packets do skb_clone with
31908 ++ * unconfirmed conntrack.
31909 + */
31910 +- WARN_ON(nf_ct_is_confirmed(ct));
31911 ++ if (unlikely(nf_ct_is_confirmed(ct))) {
31912 ++ WARN_ON_ONCE(1);
31913 ++ nf_conntrack_double_unlock(hash, reply_hash);
31914 ++ local_bh_enable();
31915 ++ return NF_DROP;
31916 ++ }
31917 ++
31918 + pr_debug("Confirming conntrack %p\n", ct);
31919 + /* We have to check the DYING flag after unlink to prevent
31920 + * a race against nf_ct_get_next_corpse() possibly called from
31921 +diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
31922 +index 4dcbd51a8e97..74fb3fa34db4 100644
31923 +--- a/net/netfilter/nf_conntrack_proto_tcp.c
31924 ++++ b/net/netfilter/nf_conntrack_proto_tcp.c
31925 +@@ -828,6 +828,12 @@ static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
31926 + return true;
31927 + }
31928 +
31929 ++static bool nf_conntrack_tcp_established(const struct nf_conn *ct)
31930 ++{
31931 ++ return ct->proto.tcp.state == TCP_CONNTRACK_ESTABLISHED &&
31932 ++ test_bit(IPS_ASSURED_BIT, &ct->status);
31933 ++}
31934 ++
31935 + /* Returns verdict for packet, or -1 for invalid. */
31936 + static int tcp_packet(struct nf_conn *ct,
31937 + struct sk_buff *skb,
31938 +@@ -1030,16 +1036,38 @@ static int tcp_packet(struct nf_conn *ct,
31939 + new_state = TCP_CONNTRACK_ESTABLISHED;
31940 + break;
31941 + case TCP_CONNTRACK_CLOSE:
31942 +- if (index == TCP_RST_SET
31943 +- && (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET)
31944 +- && before(ntohl(th->seq), ct->proto.tcp.seen[!dir].td_maxack)) {
31945 +- /* Invalid RST */
31946 +- spin_unlock_bh(&ct->lock);
31947 +- nf_ct_l4proto_log_invalid(skb, ct, "invalid rst");
31948 +- return -NF_ACCEPT;
31949 ++ if (index != TCP_RST_SET)
31950 ++ break;
31951 ++
31952 ++ if (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) {
31953 ++ u32 seq = ntohl(th->seq);
31954 ++
31955 ++ if (before(seq, ct->proto.tcp.seen[!dir].td_maxack)) {
31956 ++ /* Invalid RST */
31957 ++ spin_unlock_bh(&ct->lock);
31958 ++ nf_ct_l4proto_log_invalid(skb, ct, "invalid rst");
31959 ++ return -NF_ACCEPT;
31960 ++ }
31961 ++
31962 ++ if (!nf_conntrack_tcp_established(ct) ||
31963 ++ seq == ct->proto.tcp.seen[!dir].td_maxack)
31964 ++ break;
31965 ++
31966 ++ /* Check if rst is part of train, such as
31967 ++ * foo:80 > bar:4379: P, 235946583:235946602(19) ack 42
31968 ++ * foo:80 > bar:4379: R, 235946602:235946602(0) ack 42
31969 ++ */
31970 ++ if (ct->proto.tcp.last_index == TCP_ACK_SET &&
31971 ++ ct->proto.tcp.last_dir == dir &&
31972 ++ seq == ct->proto.tcp.last_end)
31973 ++ break;
31974 ++
31975 ++ /* ... RST sequence number doesn't match exactly, keep
31976 ++ * established state to allow a possible challenge ACK.
31977 ++ */
31978 ++ new_state = old_state;
31979 + }
31980 +- if (index == TCP_RST_SET
31981 +- && ((test_bit(IPS_SEEN_REPLY_BIT, &ct->status)
31982 ++ if (((test_bit(IPS_SEEN_REPLY_BIT, &ct->status)
31983 + && ct->proto.tcp.last_index == TCP_SYN_SET)
31984 + || (!test_bit(IPS_ASSURED_BIT, &ct->status)
31985 + && ct->proto.tcp.last_index == TCP_ACK_SET))
31986 +@@ -1055,7 +1083,7 @@ static int tcp_packet(struct nf_conn *ct,
31987 + * segments we ignored. */
31988 + goto in_window;
31989 + }
31990 +- /* Just fall through */
31991 ++ break;
31992 + default:
31993 + /* Keep compilers happy. */
31994 + break;
31995 +@@ -1090,6 +1118,8 @@ static int tcp_packet(struct nf_conn *ct,
31996 + if (ct->proto.tcp.retrans >= tn->tcp_max_retrans &&
31997 + timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
31998 + timeout = timeouts[TCP_CONNTRACK_RETRANS];
31999 ++ else if (unlikely(index == TCP_RST_SET))
32000 ++ timeout = timeouts[TCP_CONNTRACK_CLOSE];
32001 + else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) &
32002 + IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
32003 + timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
32004 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
32005 +index 4893f248dfdc..acb124ce92ec 100644
32006 +--- a/net/netfilter/nf_tables_api.c
32007 ++++ b/net/netfilter/nf_tables_api.c
32008 +@@ -127,7 +127,7 @@ static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
32009 + list_for_each_entry_reverse(trans, &net->nft.commit_list, list) {
32010 + if (trans->msg_type == NFT_MSG_NEWSET &&
32011 + nft_trans_set(trans) == set) {
32012 +- nft_trans_set_bound(trans) = true;
32013 ++ set->bound = true;
32014 + break;
32015 + }
32016 + }
32017 +@@ -2119,9 +2119,11 @@ err1:
32018 + static void nf_tables_expr_destroy(const struct nft_ctx *ctx,
32019 + struct nft_expr *expr)
32020 + {
32021 ++ const struct nft_expr_type *type = expr->ops->type;
32022 ++
32023 + if (expr->ops->destroy)
32024 + expr->ops->destroy(ctx, expr);
32025 +- module_put(expr->ops->type->owner);
32026 ++ module_put(type->owner);
32027 + }
32028 +
32029 + struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
32030 +@@ -2129,6 +2131,7 @@ struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
32031 + {
32032 + struct nft_expr_info info;
32033 + struct nft_expr *expr;
32034 ++ struct module *owner;
32035 + int err;
32036 +
32037 + err = nf_tables_expr_parse(ctx, nla, &info);
32038 +@@ -2148,7 +2151,11 @@ struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
32039 + err3:
32040 + kfree(expr);
32041 + err2:
32042 +- module_put(info.ops->type->owner);
32043 ++ owner = info.ops->type->owner;
32044 ++ if (info.ops->type->release_ops)
32045 ++ info.ops->type->release_ops(info.ops);
32046 ++
32047 ++ module_put(owner);
32048 + err1:
32049 + return ERR_PTR(err);
32050 + }
32051 +@@ -2746,8 +2753,11 @@ err2:
32052 + nf_tables_rule_release(&ctx, rule);
32053 + err1:
32054 + for (i = 0; i < n; i++) {
32055 +- if (info[i].ops != NULL)
32056 ++ if (info[i].ops) {
32057 + module_put(info[i].ops->type->owner);
32058 ++ if (info[i].ops->type->release_ops)
32059 ++ info[i].ops->type->release_ops(info[i].ops);
32060 ++ }
32061 + }
32062 + kvfree(info);
32063 + return err;
32064 +@@ -6617,8 +6627,7 @@ static void nf_tables_abort_release(struct nft_trans *trans)
32065 + nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
32066 + break;
32067 + case NFT_MSG_NEWSET:
32068 +- if (!nft_trans_set_bound(trans))
32069 +- nft_set_destroy(nft_trans_set(trans));
32070 ++ nft_set_destroy(nft_trans_set(trans));
32071 + break;
32072 + case NFT_MSG_NEWSETELEM:
32073 + nft_set_elem_destroy(nft_trans_elem_set(trans),
32074 +@@ -6691,8 +6700,11 @@ static int __nf_tables_abort(struct net *net)
32075 + break;
32076 + case NFT_MSG_NEWSET:
32077 + trans->ctx.table->use--;
32078 +- if (!nft_trans_set_bound(trans))
32079 +- list_del_rcu(&nft_trans_set(trans)->list);
32080 ++ if (nft_trans_set(trans)->bound) {
32081 ++ nft_trans_destroy(trans);
32082 ++ break;
32083 ++ }
32084 ++ list_del_rcu(&nft_trans_set(trans)->list);
32085 + break;
32086 + case NFT_MSG_DELSET:
32087 + trans->ctx.table->use++;
32088 +@@ -6700,8 +6712,11 @@ static int __nf_tables_abort(struct net *net)
32089 + nft_trans_destroy(trans);
32090 + break;
32091 + case NFT_MSG_NEWSETELEM:
32092 ++ if (nft_trans_elem_set(trans)->bound) {
32093 ++ nft_trans_destroy(trans);
32094 ++ break;
32095 ++ }
32096 + te = (struct nft_trans_elem *)trans->data;
32097 +-
32098 + te->set->ops->remove(net, te->set, &te->elem);
32099 + atomic_dec(&te->set->nelems);
32100 + break;
32101 +diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
32102 +index a50500232b0a..7e8dae82ca52 100644
32103 +--- a/net/netfilter/nf_tables_core.c
32104 ++++ b/net/netfilter/nf_tables_core.c
32105 +@@ -98,21 +98,23 @@ static noinline void nft_update_chain_stats(const struct nft_chain *chain,
32106 + const struct nft_pktinfo *pkt)
32107 + {
32108 + struct nft_base_chain *base_chain;
32109 ++ struct nft_stats __percpu *pstats;
32110 + struct nft_stats *stats;
32111 +
32112 + base_chain = nft_base_chain(chain);
32113 +- if (!rcu_access_pointer(base_chain->stats))
32114 +- return;
32115 +
32116 +- local_bh_disable();
32117 +- stats = this_cpu_ptr(rcu_dereference(base_chain->stats));
32118 +- if (stats) {
32119 ++ rcu_read_lock();
32120 ++ pstats = READ_ONCE(base_chain->stats);
32121 ++ if (pstats) {
32122 ++ local_bh_disable();
32123 ++ stats = this_cpu_ptr(pstats);
32124 + u64_stats_update_begin(&stats->syncp);
32125 + stats->pkts++;
32126 + stats->bytes += pkt->skb->len;
32127 + u64_stats_update_end(&stats->syncp);
32128 ++ local_bh_enable();
32129 + }
32130 +- local_bh_enable();
32131 ++ rcu_read_unlock();
32132 + }
32133 +
32134 + struct nft_jumpstack {
32135 +diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
32136 +index 0a4bad55a8aa..469f9da5073b 100644
32137 +--- a/net/netfilter/nft_compat.c
32138 ++++ b/net/netfilter/nft_compat.c
32139 +@@ -22,23 +22,6 @@
32140 + #include <linux/netfilter_bridge/ebtables.h>
32141 + #include <linux/netfilter_arp/arp_tables.h>
32142 + #include <net/netfilter/nf_tables.h>
32143 +-#include <net/netns/generic.h>
32144 +-
32145 +-struct nft_xt {
32146 +- struct list_head head;
32147 +- struct nft_expr_ops ops;
32148 +- refcount_t refcnt;
32149 +-
32150 +- /* used only when transaction mutex is locked */
32151 +- unsigned int listcnt;
32152 +-
32153 +- /* Unlike other expressions, ops doesn't have static storage duration.
32154 +- * nft core assumes they do. We use kfree_rcu so that nft core can
32155 +- * can check expr->ops->size even after nft_compat->destroy() frees
32156 +- * the nft_xt struct that holds the ops structure.
32157 +- */
32158 +- struct rcu_head rcu_head;
32159 +-};
32160 +
32161 + /* Used for matches where *info is larger than X byte */
32162 + #define NFT_MATCH_LARGE_THRESH 192
32163 +@@ -47,46 +30,6 @@ struct nft_xt_match_priv {
32164 + void *info;
32165 + };
32166 +
32167 +-struct nft_compat_net {
32168 +- struct list_head nft_target_list;
32169 +- struct list_head nft_match_list;
32170 +-};
32171 +-
32172 +-static unsigned int nft_compat_net_id __read_mostly;
32173 +-static struct nft_expr_type nft_match_type;
32174 +-static struct nft_expr_type nft_target_type;
32175 +-
32176 +-static struct nft_compat_net *nft_compat_pernet(struct net *net)
32177 +-{
32178 +- return net_generic(net, nft_compat_net_id);
32179 +-}
32180 +-
32181 +-static void nft_xt_get(struct nft_xt *xt)
32182 +-{
32183 +- /* refcount_inc() warns on 0 -> 1 transition, but we can't
32184 +- * init the reference count to 1 in .select_ops -- we can't
32185 +- * undo such an increase when another expression inside the same
32186 +- * rule fails afterwards.
32187 +- */
32188 +- if (xt->listcnt == 0)
32189 +- refcount_set(&xt->refcnt, 1);
32190 +- else
32191 +- refcount_inc(&xt->refcnt);
32192 +-
32193 +- xt->listcnt++;
32194 +-}
32195 +-
32196 +-static bool nft_xt_put(struct nft_xt *xt)
32197 +-{
32198 +- if (refcount_dec_and_test(&xt->refcnt)) {
32199 +- WARN_ON_ONCE(!list_empty(&xt->head));
32200 +- kfree_rcu(xt, rcu_head);
32201 +- return true;
32202 +- }
32203 +-
32204 +- return false;
32205 +-}
32206 +-
32207 + static int nft_compat_chain_validate_dependency(const struct nft_ctx *ctx,
32208 + const char *tablename)
32209 + {
32210 +@@ -281,7 +224,6 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
32211 + struct xt_target *target = expr->ops->data;
32212 + struct xt_tgchk_param par;
32213 + size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO]));
32214 +- struct nft_xt *nft_xt;
32215 + u16 proto = 0;
32216 + bool inv = false;
32217 + union nft_entry e = {};
32218 +@@ -305,8 +247,6 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
32219 + if (!target->target)
32220 + return -EINVAL;
32221 +
32222 +- nft_xt = container_of(expr->ops, struct nft_xt, ops);
32223 +- nft_xt_get(nft_xt);
32224 + return 0;
32225 + }
32226 +
32227 +@@ -325,8 +265,8 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
32228 + if (par.target->destroy != NULL)
32229 + par.target->destroy(&par);
32230 +
32231 +- if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
32232 +- module_put(me);
32233 ++ module_put(me);
32234 ++ kfree(expr->ops);
32235 + }
32236 +
32237 + static int nft_extension_dump_info(struct sk_buff *skb, int attr,
32238 +@@ -499,7 +439,6 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
32239 + struct xt_match *match = expr->ops->data;
32240 + struct xt_mtchk_param par;
32241 + size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO]));
32242 +- struct nft_xt *nft_xt;
32243 + u16 proto = 0;
32244 + bool inv = false;
32245 + union nft_entry e = {};
32246 +@@ -515,13 +454,7 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
32247 +
32248 + nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv);
32249 +
32250 +- ret = xt_check_match(&par, size, proto, inv);
32251 +- if (ret < 0)
32252 +- return ret;
32253 +-
32254 +- nft_xt = container_of(expr->ops, struct nft_xt, ops);
32255 +- nft_xt_get(nft_xt);
32256 +- return 0;
32257 ++ return xt_check_match(&par, size, proto, inv);
32258 + }
32259 +
32260 + static int
32261 +@@ -564,8 +497,8 @@ __nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr,
32262 + if (par.match->destroy != NULL)
32263 + par.match->destroy(&par);
32264 +
32265 +- if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
32266 +- module_put(me);
32267 ++ module_put(me);
32268 ++ kfree(expr->ops);
32269 + }
32270 +
32271 + static void
32272 +@@ -574,18 +507,6 @@ nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
32273 + __nft_match_destroy(ctx, expr, nft_expr_priv(expr));
32274 + }
32275 +
32276 +-static void nft_compat_deactivate(const struct nft_ctx *ctx,
32277 +- const struct nft_expr *expr,
32278 +- enum nft_trans_phase phase)
32279 +-{
32280 +- struct nft_xt *xt = container_of(expr->ops, struct nft_xt, ops);
32281 +-
32282 +- if (phase == NFT_TRANS_ABORT || phase == NFT_TRANS_COMMIT) {
32283 +- if (--xt->listcnt == 0)
32284 +- list_del_init(&xt->head);
32285 +- }
32286 +-}
32287 +-
32288 + static void
32289 + nft_match_large_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
32290 + {
32291 +@@ -780,19 +701,13 @@ static const struct nfnetlink_subsystem nfnl_compat_subsys = {
32292 + .cb = nfnl_nft_compat_cb,
32293 + };
32294 +
32295 +-static bool nft_match_cmp(const struct xt_match *match,
32296 +- const char *name, u32 rev, u32 family)
32297 +-{
32298 +- return strcmp(match->name, name) == 0 && match->revision == rev &&
32299 +- (match->family == NFPROTO_UNSPEC || match->family == family);
32300 +-}
32301 ++static struct nft_expr_type nft_match_type;
32302 +
32303 + static const struct nft_expr_ops *
32304 + nft_match_select_ops(const struct nft_ctx *ctx,
32305 + const struct nlattr * const tb[])
32306 + {
32307 +- struct nft_compat_net *cn;
32308 +- struct nft_xt *nft_match;
32309 ++ struct nft_expr_ops *ops;
32310 + struct xt_match *match;
32311 + unsigned int matchsize;
32312 + char *mt_name;
32313 +@@ -808,16 +723,6 @@ nft_match_select_ops(const struct nft_ctx *ctx,
32314 + rev = ntohl(nla_get_be32(tb[NFTA_MATCH_REV]));
32315 + family = ctx->family;
32316 +
32317 +- cn = nft_compat_pernet(ctx->net);
32318 +-
32319 +- /* Re-use the existing match if it's already loaded. */
32320 +- list_for_each_entry(nft_match, &cn->nft_match_list, head) {
32321 +- struct xt_match *match = nft_match->ops.data;
32322 +-
32323 +- if (nft_match_cmp(match, mt_name, rev, family))
32324 +- return &nft_match->ops;
32325 +- }
32326 +-
32327 + match = xt_request_find_match(family, mt_name, rev);
32328 + if (IS_ERR(match))
32329 + return ERR_PTR(-ENOENT);
32330 +@@ -827,65 +732,62 @@ nft_match_select_ops(const struct nft_ctx *ctx,
32331 + goto err;
32332 + }
32333 +
32334 +- /* This is the first time we use this match, allocate operations */
32335 +- nft_match = kzalloc(sizeof(struct nft_xt), GFP_KERNEL);
32336 +- if (nft_match == NULL) {
32337 ++ ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL);
32338 ++ if (!ops) {
32339 + err = -ENOMEM;
32340 + goto err;
32341 + }
32342 +
32343 +- refcount_set(&nft_match->refcnt, 0);
32344 +- nft_match->ops.type = &nft_match_type;
32345 +- nft_match->ops.eval = nft_match_eval;
32346 +- nft_match->ops.init = nft_match_init;
32347 +- nft_match->ops.destroy = nft_match_destroy;
32348 +- nft_match->ops.deactivate = nft_compat_deactivate;
32349 +- nft_match->ops.dump = nft_match_dump;
32350 +- nft_match->ops.validate = nft_match_validate;
32351 +- nft_match->ops.data = match;
32352 ++ ops->type = &nft_match_type;
32353 ++ ops->eval = nft_match_eval;
32354 ++ ops->init = nft_match_init;
32355 ++ ops->destroy = nft_match_destroy;
32356 ++ ops->dump = nft_match_dump;
32357 ++ ops->validate = nft_match_validate;
32358 ++ ops->data = match;
32359 +
32360 + matchsize = NFT_EXPR_SIZE(XT_ALIGN(match->matchsize));
32361 + if (matchsize > NFT_MATCH_LARGE_THRESH) {
32362 + matchsize = NFT_EXPR_SIZE(sizeof(struct nft_xt_match_priv));
32363 +
32364 +- nft_match->ops.eval = nft_match_large_eval;
32365 +- nft_match->ops.init = nft_match_large_init;
32366 +- nft_match->ops.destroy = nft_match_large_destroy;
32367 +- nft_match->ops.dump = nft_match_large_dump;
32368 ++ ops->eval = nft_match_large_eval;
32369 ++ ops->init = nft_match_large_init;
32370 ++ ops->destroy = nft_match_large_destroy;
32371 ++ ops->dump = nft_match_large_dump;
32372 + }
32373 +
32374 +- nft_match->ops.size = matchsize;
32375 ++ ops->size = matchsize;
32376 +
32377 +- nft_match->listcnt = 0;
32378 +- list_add(&nft_match->head, &cn->nft_match_list);
32379 +-
32380 +- return &nft_match->ops;
32381 ++ return ops;
32382 + err:
32383 + module_put(match->me);
32384 + return ERR_PTR(err);
32385 + }
32386 +
32387 ++static void nft_match_release_ops(const struct nft_expr_ops *ops)
32388 ++{
32389 ++ struct xt_match *match = ops->data;
32390 ++
32391 ++ module_put(match->me);
32392 ++ kfree(ops);
32393 ++}
32394 ++
32395 + static struct nft_expr_type nft_match_type __read_mostly = {
32396 + .name = "match",
32397 + .select_ops = nft_match_select_ops,
32398 ++ .release_ops = nft_match_release_ops,
32399 + .policy = nft_match_policy,
32400 + .maxattr = NFTA_MATCH_MAX,
32401 + .owner = THIS_MODULE,
32402 + };
32403 +
32404 +-static bool nft_target_cmp(const struct xt_target *tg,
32405 +- const char *name, u32 rev, u32 family)
32406 +-{
32407 +- return strcmp(tg->name, name) == 0 && tg->revision == rev &&
32408 +- (tg->family == NFPROTO_UNSPEC || tg->family == family);
32409 +-}
32410 ++static struct nft_expr_type nft_target_type;
32411 +
32412 + static const struct nft_expr_ops *
32413 + nft_target_select_ops(const struct nft_ctx *ctx,
32414 + const struct nlattr * const tb[])
32415 + {
32416 +- struct nft_compat_net *cn;
32417 +- struct nft_xt *nft_target;
32418 ++ struct nft_expr_ops *ops;
32419 + struct xt_target *target;
32420 + char *tg_name;
32421 + u32 rev, family;
32422 +@@ -905,18 +807,6 @@ nft_target_select_ops(const struct nft_ctx *ctx,
32423 + strcmp(tg_name, "standard") == 0)
32424 + return ERR_PTR(-EINVAL);
32425 +
32426 +- cn = nft_compat_pernet(ctx->net);
32427 +- /* Re-use the existing target if it's already loaded. */
32428 +- list_for_each_entry(nft_target, &cn->nft_target_list, head) {
32429 +- struct xt_target *target = nft_target->ops.data;
32430 +-
32431 +- if (!target->target)
32432 +- continue;
32433 +-
32434 +- if (nft_target_cmp(target, tg_name, rev, family))
32435 +- return &nft_target->ops;
32436 +- }
32437 +-
32438 + target = xt_request_find_target(family, tg_name, rev);
32439 + if (IS_ERR(target))
32440 + return ERR_PTR(-ENOENT);
32441 +@@ -931,113 +821,55 @@ nft_target_select_ops(const struct nft_ctx *ctx,
32442 + goto err;
32443 + }
32444 +
32445 +- /* This is the first time we use this target, allocate operations */
32446 +- nft_target = kzalloc(sizeof(struct nft_xt), GFP_KERNEL);
32447 +- if (nft_target == NULL) {
32448 ++ ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL);
32449 ++ if (!ops) {
32450 + err = -ENOMEM;
32451 + goto err;
32452 + }
32453 +
32454 +- refcount_set(&nft_target->refcnt, 0);
32455 +- nft_target->ops.type = &nft_target_type;
32456 +- nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
32457 +- nft_target->ops.init = nft_target_init;
32458 +- nft_target->ops.destroy = nft_target_destroy;
32459 +- nft_target->ops.deactivate = nft_compat_deactivate;
32460 +- nft_target->ops.dump = nft_target_dump;
32461 +- nft_target->ops.validate = nft_target_validate;
32462 +- nft_target->ops.data = target;
32463 ++ ops->type = &nft_target_type;
32464 ++ ops->size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
32465 ++ ops->init = nft_target_init;
32466 ++ ops->destroy = nft_target_destroy;
32467 ++ ops->dump = nft_target_dump;
32468 ++ ops->validate = nft_target_validate;
32469 ++ ops->data = target;
32470 +
32471 + if (family == NFPROTO_BRIDGE)
32472 +- nft_target->ops.eval = nft_target_eval_bridge;
32473 ++ ops->eval = nft_target_eval_bridge;
32474 + else
32475 +- nft_target->ops.eval = nft_target_eval_xt;
32476 +-
32477 +- nft_target->listcnt = 0;
32478 +- list_add(&nft_target->head, &cn->nft_target_list);
32479 ++ ops->eval = nft_target_eval_xt;
32480 +
32481 +- return &nft_target->ops;
32482 ++ return ops;
32483 + err:
32484 + module_put(target->me);
32485 + return ERR_PTR(err);
32486 + }
32487 +
32488 ++static void nft_target_release_ops(const struct nft_expr_ops *ops)
32489 ++{
32490 ++ struct xt_target *target = ops->data;
32491 ++
32492 ++ module_put(target->me);
32493 ++ kfree(ops);
32494 ++}
32495 ++
32496 + static struct nft_expr_type nft_target_type __read_mostly = {
32497 + .name = "target",
32498 + .select_ops = nft_target_select_ops,
32499 ++ .release_ops = nft_target_release_ops,
32500 + .policy = nft_target_policy,
32501 + .maxattr = NFTA_TARGET_MAX,
32502 + .owner = THIS_MODULE,
32503 + };
32504 +
32505 +-static int __net_init nft_compat_init_net(struct net *net)
32506 +-{
32507 +- struct nft_compat_net *cn = nft_compat_pernet(net);
32508 +-
32509 +- INIT_LIST_HEAD(&cn->nft_target_list);
32510 +- INIT_LIST_HEAD(&cn->nft_match_list);
32511 +-
32512 +- return 0;
32513 +-}
32514 +-
32515 +-static void __net_exit nft_compat_exit_net(struct net *net)
32516 +-{
32517 +- struct nft_compat_net *cn = nft_compat_pernet(net);
32518 +- struct nft_xt *xt, *next;
32519 +-
32520 +- if (list_empty(&cn->nft_match_list) &&
32521 +- list_empty(&cn->nft_target_list))
32522 +- return;
32523 +-
32524 +- /* If there was an error that caused nft_xt expr to not be initialized
32525 +- * fully and noone else requested the same expression later, the lists
32526 +- * contain 0-refcount entries that still hold module reference.
32527 +- *
32528 +- * Clean them here.
32529 +- */
32530 +- mutex_lock(&net->nft.commit_mutex);
32531 +- list_for_each_entry_safe(xt, next, &cn->nft_target_list, head) {
32532 +- struct xt_target *target = xt->ops.data;
32533 +-
32534 +- list_del_init(&xt->head);
32535 +-
32536 +- if (refcount_read(&xt->refcnt))
32537 +- continue;
32538 +- module_put(target->me);
32539 +- kfree(xt);
32540 +- }
32541 +-
32542 +- list_for_each_entry_safe(xt, next, &cn->nft_match_list, head) {
32543 +- struct xt_match *match = xt->ops.data;
32544 +-
32545 +- list_del_init(&xt->head);
32546 +-
32547 +- if (refcount_read(&xt->refcnt))
32548 +- continue;
32549 +- module_put(match->me);
32550 +- kfree(xt);
32551 +- }
32552 +- mutex_unlock(&net->nft.commit_mutex);
32553 +-}
32554 +-
32555 +-static struct pernet_operations nft_compat_net_ops = {
32556 +- .init = nft_compat_init_net,
32557 +- .exit = nft_compat_exit_net,
32558 +- .id = &nft_compat_net_id,
32559 +- .size = sizeof(struct nft_compat_net),
32560 +-};
32561 +-
32562 + static int __init nft_compat_module_init(void)
32563 + {
32564 + int ret;
32565 +
32566 +- ret = register_pernet_subsys(&nft_compat_net_ops);
32567 +- if (ret < 0)
32568 +- goto err_target;
32569 +-
32570 + ret = nft_register_expr(&nft_match_type);
32571 + if (ret < 0)
32572 +- goto err_pernet;
32573 ++ return ret;
32574 +
32575 + ret = nft_register_expr(&nft_target_type);
32576 + if (ret < 0)
32577 +@@ -1054,8 +886,6 @@ err_target:
32578 + nft_unregister_expr(&nft_target_type);
32579 + err_match:
32580 + nft_unregister_expr(&nft_match_type);
32581 +-err_pernet:
32582 +- unregister_pernet_subsys(&nft_compat_net_ops);
32583 + return ret;
32584 + }
32585 +
32586 +@@ -1064,7 +894,6 @@ static void __exit nft_compat_module_exit(void)
32587 + nfnetlink_subsys_unregister(&nfnl_compat_subsys);
32588 + nft_unregister_expr(&nft_target_type);
32589 + nft_unregister_expr(&nft_match_type);
32590 +- unregister_pernet_subsys(&nft_compat_net_ops);
32591 + }
32592 +
32593 + MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFT_COMPAT);
32594 +diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
32595 +index 4034d70bff39..b2e39cb6a590 100644
32596 +--- a/net/netfilter/xt_physdev.c
32597 ++++ b/net/netfilter/xt_physdev.c
32598 +@@ -96,8 +96,7 @@ match_outdev:
32599 + static int physdev_mt_check(const struct xt_mtchk_param *par)
32600 + {
32601 + const struct xt_physdev_info *info = par->matchinfo;
32602 +-
32603 +- br_netfilter_enable();
32604 ++ static bool brnf_probed __read_mostly;
32605 +
32606 + if (!(info->bitmask & XT_PHYSDEV_OP_MASK) ||
32607 + info->bitmask & ~XT_PHYSDEV_OP_MASK)
32608 +@@ -111,6 +110,12 @@ static int physdev_mt_check(const struct xt_mtchk_param *par)
32609 + if (par->hook_mask & (1 << NF_INET_LOCAL_OUT))
32610 + return -EINVAL;
32611 + }
32612 ++
32613 ++ if (!brnf_probed) {
32614 ++ brnf_probed = true;
32615 ++ request_module("br_netfilter");
32616 ++ }
32617 ++
32618 + return 0;
32619 + }
32620 +
32621 +diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
32622 +index 25eeb6d2a75a..f0ec068e1d02 100644
32623 +--- a/net/netlink/genetlink.c
32624 ++++ b/net/netlink/genetlink.c
32625 +@@ -366,7 +366,7 @@ int genl_register_family(struct genl_family *family)
32626 + start, end + 1, GFP_KERNEL);
32627 + if (family->id < 0) {
32628 + err = family->id;
32629 +- goto errout_locked;
32630 ++ goto errout_free;
32631 + }
32632 +
32633 + err = genl_validate_assign_mc_groups(family);
32634 +@@ -385,6 +385,7 @@ int genl_register_family(struct genl_family *family)
32635 +
32636 + errout_remove:
32637 + idr_remove(&genl_fam_idr, family->id);
32638 ++errout_free:
32639 + kfree(family->attrbuf);
32640 + errout_locked:
32641 + genl_unlock_all();
32642 +diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
32643 +index 691da853bef5..4bdf5e3ac208 100644
32644 +--- a/net/openvswitch/flow_netlink.c
32645 ++++ b/net/openvswitch/flow_netlink.c
32646 +@@ -2306,14 +2306,14 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
32647 +
32648 + struct sw_flow_actions *acts;
32649 + int new_acts_size;
32650 +- int req_size = NLA_ALIGN(attr_len);
32651 ++ size_t req_size = NLA_ALIGN(attr_len);
32652 + int next_offset = offsetof(struct sw_flow_actions, actions) +
32653 + (*sfa)->actions_len;
32654 +
32655 + if (req_size <= (ksize(*sfa) - next_offset))
32656 + goto out;
32657 +
32658 +- new_acts_size = ksize(*sfa) * 2;
32659 ++ new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
32660 +
32661 + if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
32662 + if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
32663 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
32664 +index 1cd1d83a4be0..8406bf11eef4 100644
32665 +--- a/net/packet/af_packet.c
32666 ++++ b/net/packet/af_packet.c
32667 +@@ -3245,7 +3245,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
32668 + }
32669 +
32670 + mutex_lock(&net->packet.sklist_lock);
32671 +- sk_add_node_rcu(sk, &net->packet.sklist);
32672 ++ sk_add_node_tail_rcu(sk, &net->packet.sklist);
32673 + mutex_unlock(&net->packet.sklist_lock);
32674 +
32675 + preempt_disable();
32676 +@@ -4211,7 +4211,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
32677 + struct pgv *pg_vec;
32678 + int i;
32679 +
32680 +- pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
32681 ++ pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
32682 + if (unlikely(!pg_vec))
32683 + goto out;
32684 +
32685 +diff --git a/net/rds/tcp.c b/net/rds/tcp.c
32686 +index c16f0a362c32..a729c47db781 100644
32687 +--- a/net/rds/tcp.c
32688 ++++ b/net/rds/tcp.c
32689 +@@ -600,7 +600,7 @@ static void rds_tcp_kill_sock(struct net *net)
32690 + list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
32691 + struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
32692 +
32693 +- if (net != c_net || !tc->t_sock)
32694 ++ if (net != c_net)
32695 + continue;
32696 + if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
32697 + list_move_tail(&tc->t_tcp_node, &tmp_list);
32698 +diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c
32699 +index 7ca57741b2fb..7849f286bb93 100644
32700 +--- a/net/rose/rose_subr.c
32701 ++++ b/net/rose/rose_subr.c
32702 +@@ -105,16 +105,17 @@ void rose_write_internal(struct sock *sk, int frametype)
32703 + struct sk_buff *skb;
32704 + unsigned char *dptr;
32705 + unsigned char lci1, lci2;
32706 +- char buffer[100];
32707 +- int len, faclen = 0;
32708 ++ int maxfaclen = 0;
32709 ++ int len, faclen;
32710 ++ int reserve;
32711 +
32712 +- len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 1;
32713 ++ reserve = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1;
32714 ++ len = ROSE_MIN_LEN;
32715 +
32716 + switch (frametype) {
32717 + case ROSE_CALL_REQUEST:
32718 + len += 1 + ROSE_ADDR_LEN + ROSE_ADDR_LEN;
32719 +- faclen = rose_create_facilities(buffer, rose);
32720 +- len += faclen;
32721 ++ maxfaclen = 256;
32722 + break;
32723 + case ROSE_CALL_ACCEPTED:
32724 + case ROSE_CLEAR_REQUEST:
32725 +@@ -123,15 +124,16 @@ void rose_write_internal(struct sock *sk, int frametype)
32726 + break;
32727 + }
32728 +
32729 +- if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
32730 ++ skb = alloc_skb(reserve + len + maxfaclen, GFP_ATOMIC);
32731 ++ if (!skb)
32732 + return;
32733 +
32734 + /*
32735 + * Space for AX.25 header and PID.
32736 + */
32737 +- skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1);
32738 ++ skb_reserve(skb, reserve);
32739 +
32740 +- dptr = skb_put(skb, skb_tailroom(skb));
32741 ++ dptr = skb_put(skb, len);
32742 +
32743 + lci1 = (rose->lci >> 8) & 0x0F;
32744 + lci2 = (rose->lci >> 0) & 0xFF;
32745 +@@ -146,7 +148,8 @@ void rose_write_internal(struct sock *sk, int frametype)
32746 + dptr += ROSE_ADDR_LEN;
32747 + memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN);
32748 + dptr += ROSE_ADDR_LEN;
32749 +- memcpy(dptr, buffer, faclen);
32750 ++ faclen = rose_create_facilities(dptr, rose);
32751 ++ skb_put(skb, faclen);
32752 + dptr += faclen;
32753 + break;
32754 +
32755 +diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
32756 +index b2adfa825363..5cf6d9f4761d 100644
32757 +--- a/net/rxrpc/conn_client.c
32758 ++++ b/net/rxrpc/conn_client.c
32759 +@@ -353,7 +353,7 @@ static int rxrpc_get_client_conn(struct rxrpc_sock *rx,
32760 + * normally have to take channel_lock but we do this before anyone else
32761 + * can see the connection.
32762 + */
32763 +- list_add_tail(&call->chan_wait_link, &candidate->waiting_calls);
32764 ++ list_add(&call->chan_wait_link, &candidate->waiting_calls);
32765 +
32766 + if (cp->exclusive) {
32767 + call->conn = candidate;
32768 +@@ -432,7 +432,7 @@ found_extant_conn:
32769 + call->conn = conn;
32770 + call->security_ix = conn->security_ix;
32771 + call->service_id = conn->service_id;
32772 +- list_add(&call->chan_wait_link, &conn->waiting_calls);
32773 ++ list_add_tail(&call->chan_wait_link, &conn->waiting_calls);
32774 + spin_unlock(&conn->channel_lock);
32775 + _leave(" = 0 [extant %d]", conn->debug_id);
32776 + return 0;
32777 +diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
32778 +index 1a0c682fd734..fd62fe6c8e73 100644
32779 +--- a/net/sched/act_sample.c
32780 ++++ b/net/sched/act_sample.c
32781 +@@ -43,8 +43,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
32782 + struct tc_action_net *tn = net_generic(net, sample_net_id);
32783 + struct nlattr *tb[TCA_SAMPLE_MAX + 1];
32784 + struct psample_group *psample_group;
32785 ++ u32 psample_group_num, rate;
32786 + struct tc_sample *parm;
32787 +- u32 psample_group_num;
32788 + struct tcf_sample *s;
32789 + bool exists = false;
32790 + int ret, err;
32791 +@@ -80,6 +80,12 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
32792 + return -EEXIST;
32793 + }
32794 +
32795 ++ rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
32796 ++ if (!rate) {
32797 ++ NL_SET_ERR_MSG(extack, "invalid sample rate");
32798 ++ tcf_idr_release(*a, bind);
32799 ++ return -EINVAL;
32800 ++ }
32801 + psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]);
32802 + psample_group = psample_group_get(net, psample_group_num);
32803 + if (!psample_group) {
32804 +@@ -91,7 +97,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
32805 +
32806 + spin_lock_bh(&s->tcf_lock);
32807 + s->tcf_action = parm->action;
32808 +- s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
32809 ++ s->rate = rate;
32810 + s->psample_group_num = psample_group_num;
32811 + RCU_INIT_POINTER(s->psample_group, psample_group);
32812 +
32813 +diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
32814 +index 12ca9d13db83..bf67ae5ac1c3 100644
32815 +--- a/net/sched/cls_flower.c
32816 ++++ b/net/sched/cls_flower.c
32817 +@@ -1327,46 +1327,46 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
32818 + if (err < 0)
32819 + goto errout;
32820 +
32821 +- if (!handle) {
32822 +- handle = 1;
32823 +- err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
32824 +- INT_MAX, GFP_KERNEL);
32825 +- } else if (!fold) {
32826 +- /* user specifies a handle and it doesn't exist */
32827 +- err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
32828 +- handle, GFP_KERNEL);
32829 +- }
32830 +- if (err)
32831 +- goto errout;
32832 +- fnew->handle = handle;
32833 +-
32834 + if (tb[TCA_FLOWER_FLAGS]) {
32835 + fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
32836 +
32837 + if (!tc_flags_valid(fnew->flags)) {
32838 + err = -EINVAL;
32839 +- goto errout_idr;
32840 ++ goto errout;
32841 + }
32842 + }
32843 +
32844 + err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
32845 + tp->chain->tmplt_priv, extack);
32846 + if (err)
32847 +- goto errout_idr;
32848 ++ goto errout;
32849 +
32850 + err = fl_check_assign_mask(head, fnew, fold, mask);
32851 + if (err)
32852 +- goto errout_idr;
32853 ++ goto errout;
32854 ++
32855 ++ if (!handle) {
32856 ++ handle = 1;
32857 ++ err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
32858 ++ INT_MAX, GFP_KERNEL);
32859 ++ } else if (!fold) {
32860 ++ /* user specifies a handle and it doesn't exist */
32861 ++ err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
32862 ++ handle, GFP_KERNEL);
32863 ++ }
32864 ++ if (err)
32865 ++ goto errout_mask;
32866 ++ fnew->handle = handle;
32867 +
32868 + if (!fold && __fl_lookup(fnew->mask, &fnew->mkey)) {
32869 + err = -EEXIST;
32870 +- goto errout_mask;
32871 ++ goto errout_idr;
32872 + }
32873 +
32874 + err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
32875 + fnew->mask->filter_ht_params);
32876 + if (err)
32877 +- goto errout_mask;
32878 ++ goto errout_idr;
32879 +
32880 + if (!tc_skip_hw(fnew->flags)) {
32881 + err = fl_hw_replace_filter(tp, fnew, extack);
32882 +@@ -1405,12 +1405,13 @@ errout_mask_ht:
32883 + rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
32884 + fnew->mask->filter_ht_params);
32885 +
32886 +-errout_mask:
32887 +- fl_mask_put(head, fnew->mask, false);
32888 +-
32889 + errout_idr:
32890 + if (!fold)
32891 + idr_remove(&head->handle_idr, fnew->handle);
32892 ++
32893 ++errout_mask:
32894 ++ fl_mask_put(head, fnew->mask, false);
32895 ++
32896 + errout:
32897 + tcf_exts_destroy(&fnew->exts);
32898 + kfree(fnew);
32899 +diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
32900 +index 0e408ee9dcec..5ba07cd11e31 100644
32901 +--- a/net/sched/cls_matchall.c
32902 ++++ b/net/sched/cls_matchall.c
32903 +@@ -125,6 +125,11 @@ static void mall_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
32904 +
32905 + static void *mall_get(struct tcf_proto *tp, u32 handle)
32906 + {
32907 ++ struct cls_mall_head *head = rtnl_dereference(tp->root);
32908 ++
32909 ++ if (head && head->handle == handle)
32910 ++ return head;
32911 ++
32912 + return NULL;
32913 + }
32914 +
32915 +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
32916 +index 968a85fe4d4a..de31f2f3b973 100644
32917 +--- a/net/sched/sch_generic.c
32918 ++++ b/net/sched/sch_generic.c
32919 +@@ -68,7 +68,7 @@ static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
32920 + skb = __skb_dequeue(&q->skb_bad_txq);
32921 + if (qdisc_is_percpu_stats(q)) {
32922 + qdisc_qstats_cpu_backlog_dec(q, skb);
32923 +- qdisc_qstats_cpu_qlen_dec(q);
32924 ++ qdisc_qstats_atomic_qlen_dec(q);
32925 + } else {
32926 + qdisc_qstats_backlog_dec(q, skb);
32927 + q->q.qlen--;
32928 +@@ -108,7 +108,7 @@ static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
32929 +
32930 + if (qdisc_is_percpu_stats(q)) {
32931 + qdisc_qstats_cpu_backlog_inc(q, skb);
32932 +- qdisc_qstats_cpu_qlen_inc(q);
32933 ++ qdisc_qstats_atomic_qlen_inc(q);
32934 + } else {
32935 + qdisc_qstats_backlog_inc(q, skb);
32936 + q->q.qlen++;
32937 +@@ -147,7 +147,7 @@ static inline int dev_requeue_skb_locked(struct sk_buff *skb, struct Qdisc *q)
32938 +
32939 + qdisc_qstats_cpu_requeues_inc(q);
32940 + qdisc_qstats_cpu_backlog_inc(q, skb);
32941 +- qdisc_qstats_cpu_qlen_inc(q);
32942 ++ qdisc_qstats_atomic_qlen_inc(q);
32943 +
32944 + skb = next;
32945 + }
32946 +@@ -252,7 +252,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
32947 + skb = __skb_dequeue(&q->gso_skb);
32948 + if (qdisc_is_percpu_stats(q)) {
32949 + qdisc_qstats_cpu_backlog_dec(q, skb);
32950 +- qdisc_qstats_cpu_qlen_dec(q);
32951 ++ qdisc_qstats_atomic_qlen_dec(q);
32952 + } else {
32953 + qdisc_qstats_backlog_dec(q, skb);
32954 + q->q.qlen--;
32955 +@@ -645,7 +645,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
32956 + if (unlikely(err))
32957 + return qdisc_drop_cpu(skb, qdisc, to_free);
32958 +
32959 +- qdisc_qstats_cpu_qlen_inc(qdisc);
32960 ++ qdisc_qstats_atomic_qlen_inc(qdisc);
32961 + /* Note: skb can not be used after skb_array_produce(),
32962 + * so we better not use qdisc_qstats_cpu_backlog_inc()
32963 + */
32964 +@@ -670,7 +670,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
32965 + if (likely(skb)) {
32966 + qdisc_qstats_cpu_backlog_dec(qdisc, skb);
32967 + qdisc_bstats_cpu_update(qdisc, skb);
32968 +- qdisc_qstats_cpu_qlen_dec(qdisc);
32969 ++ qdisc_qstats_atomic_qlen_dec(qdisc);
32970 + }
32971 +
32972 + return skb;
32973 +@@ -714,7 +714,6 @@ static void pfifo_fast_reset(struct Qdisc *qdisc)
32974 + struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i);
32975 +
32976 + q->backlog = 0;
32977 +- q->qlen = 0;
32978 + }
32979 + }
32980 +
32981 +diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
32982 +index 6abc8b274270..951afdeea5e9 100644
32983 +--- a/net/sctp/protocol.c
32984 ++++ b/net/sctp/protocol.c
32985 +@@ -600,6 +600,7 @@ out:
32986 + static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
32987 + {
32988 + /* No address mapping for V4 sockets */
32989 ++ memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
32990 + return sizeof(struct sockaddr_in);
32991 + }
32992 +
32993 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
32994 +index 65d6d04546ae..5f68420b4b0d 100644
32995 +--- a/net/sctp/socket.c
32996 ++++ b/net/sctp/socket.c
32997 +@@ -999,7 +999,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
32998 + if (unlikely(addrs_size <= 0))
32999 + return -EINVAL;
33000 +
33001 +- kaddrs = vmemdup_user(addrs, addrs_size);
33002 ++ kaddrs = memdup_user(addrs, addrs_size);
33003 + if (unlikely(IS_ERR(kaddrs)))
33004 + return PTR_ERR(kaddrs);
33005 +
33006 +@@ -1007,7 +1007,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
33007 + addr_buf = kaddrs;
33008 + while (walk_size < addrs_size) {
33009 + if (walk_size + sizeof(sa_family_t) > addrs_size) {
33010 +- kvfree(kaddrs);
33011 ++ kfree(kaddrs);
33012 + return -EINVAL;
33013 + }
33014 +
33015 +@@ -1018,7 +1018,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
33016 + * causes the address buffer to overflow return EINVAL.
33017 + */
33018 + if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
33019 +- kvfree(kaddrs);
33020 ++ kfree(kaddrs);
33021 + return -EINVAL;
33022 + }
33023 + addrcnt++;
33024 +@@ -1054,7 +1054,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
33025 + }
33026 +
33027 + out:
33028 +- kvfree(kaddrs);
33029 ++ kfree(kaddrs);
33030 +
33031 + return err;
33032 + }
33033 +@@ -1329,7 +1329,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
33034 + if (unlikely(addrs_size <= 0))
33035 + return -EINVAL;
33036 +
33037 +- kaddrs = vmemdup_user(addrs, addrs_size);
33038 ++ kaddrs = memdup_user(addrs, addrs_size);
33039 + if (unlikely(IS_ERR(kaddrs)))
33040 + return PTR_ERR(kaddrs);
33041 +
33042 +@@ -1349,7 +1349,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
33043 + err = __sctp_connect(sk, kaddrs, addrs_size, flags, assoc_id);
33044 +
33045 + out_free:
33046 +- kvfree(kaddrs);
33047 ++ kfree(kaddrs);
33048 +
33049 + return err;
33050 + }
33051 +@@ -1866,6 +1866,7 @@ static int sctp_sendmsg_check_sflags(struct sctp_association *asoc,
33052 +
33053 + pr_debug("%s: aborting association:%p\n", __func__, asoc);
33054 + sctp_primitive_ABORT(net, asoc, chunk);
33055 ++ iov_iter_revert(&msg->msg_iter, msg_len);
33056 +
33057 + return 0;
33058 + }
33059 +diff --git a/net/sctp/stream.c b/net/sctp/stream.c
33060 +index 2936ed17bf9e..3b47457862cc 100644
33061 +--- a/net/sctp/stream.c
33062 ++++ b/net/sctp/stream.c
33063 +@@ -230,8 +230,6 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
33064 + for (i = 0; i < stream->outcnt; i++)
33065 + SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
33066 +
33067 +- sched->init(stream);
33068 +-
33069 + in:
33070 + sctp_stream_interleave_init(stream);
33071 + if (!incnt)
33072 +diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
33073 +index d7ec6132c046..d455537c8fc6 100644
33074 +--- a/net/sunrpc/clnt.c
33075 ++++ b/net/sunrpc/clnt.c
33076 +@@ -66,9 +66,6 @@ static void call_decode(struct rpc_task *task);
33077 + static void call_bind(struct rpc_task *task);
33078 + static void call_bind_status(struct rpc_task *task);
33079 + static void call_transmit(struct rpc_task *task);
33080 +-#if defined(CONFIG_SUNRPC_BACKCHANNEL)
33081 +-static void call_bc_transmit(struct rpc_task *task);
33082 +-#endif /* CONFIG_SUNRPC_BACKCHANNEL */
33083 + static void call_status(struct rpc_task *task);
33084 + static void call_transmit_status(struct rpc_task *task);
33085 + static void call_refresh(struct rpc_task *task);
33086 +@@ -80,6 +77,7 @@ static void call_connect_status(struct rpc_task *task);
33087 + static __be32 *rpc_encode_header(struct rpc_task *task);
33088 + static __be32 *rpc_verify_header(struct rpc_task *task);
33089 + static int rpc_ping(struct rpc_clnt *clnt);
33090 ++static void rpc_check_timeout(struct rpc_task *task);
33091 +
33092 + static void rpc_register_client(struct rpc_clnt *clnt)
33093 + {
33094 +@@ -1131,6 +1129,8 @@ rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags,
33095 + EXPORT_SYMBOL_GPL(rpc_call_async);
33096 +
33097 + #if defined(CONFIG_SUNRPC_BACKCHANNEL)
33098 ++static void call_bc_encode(struct rpc_task *task);
33099 ++
33100 + /**
33101 + * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
33102 + * rpc_execute against it
33103 +@@ -1152,7 +1152,7 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req)
33104 + task = rpc_new_task(&task_setup_data);
33105 + xprt_init_bc_request(req, task);
33106 +
33107 +- task->tk_action = call_bc_transmit;
33108 ++ task->tk_action = call_bc_encode;
33109 + atomic_inc(&task->tk_count);
33110 + WARN_ON_ONCE(atomic_read(&task->tk_count) != 2);
33111 + rpc_execute(task);
33112 +@@ -1786,7 +1786,12 @@ call_encode(struct rpc_task *task)
33113 + xprt_request_enqueue_receive(task);
33114 + xprt_request_enqueue_transmit(task);
33115 + out:
33116 +- task->tk_action = call_bind;
33117 ++ task->tk_action = call_transmit;
33118 ++ /* Check that the connection is OK */
33119 ++ if (!xprt_bound(task->tk_xprt))
33120 ++ task->tk_action = call_bind;
33121 ++ else if (!xprt_connected(task->tk_xprt))
33122 ++ task->tk_action = call_connect;
33123 + }
33124 +
33125 + /*
33126 +@@ -1937,8 +1942,7 @@ call_connect_status(struct rpc_task *task)
33127 + break;
33128 + if (clnt->cl_autobind) {
33129 + rpc_force_rebind(clnt);
33130 +- task->tk_action = call_bind;
33131 +- return;
33132 ++ goto out_retry;
33133 + }
33134 + /* fall through */
33135 + case -ECONNRESET:
33136 +@@ -1958,16 +1962,19 @@ call_connect_status(struct rpc_task *task)
33137 + /* fall through */
33138 + case -ENOTCONN:
33139 + case -EAGAIN:
33140 +- /* Check for timeouts before looping back to call_bind */
33141 + case -ETIMEDOUT:
33142 +- task->tk_action = call_timeout;
33143 +- return;
33144 ++ goto out_retry;
33145 + case 0:
33146 + clnt->cl_stats->netreconn++;
33147 + task->tk_action = call_transmit;
33148 + return;
33149 + }
33150 + rpc_exit(task, status);
33151 ++ return;
33152 ++out_retry:
33153 ++ /* Check for timeouts before looping back to call_bind */
33154 ++ task->tk_action = call_bind;
33155 ++ rpc_check_timeout(task);
33156 + }
33157 +
33158 + /*
33159 +@@ -1978,13 +1985,19 @@ call_transmit(struct rpc_task *task)
33160 + {
33161 + dprint_status(task);
33162 +
33163 +- task->tk_status = 0;
33164 ++ task->tk_action = call_transmit_status;
33165 + if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) {
33166 + if (!xprt_prepare_transmit(task))
33167 + return;
33168 +- xprt_transmit(task);
33169 ++ task->tk_status = 0;
33170 ++ if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) {
33171 ++ if (!xprt_connected(task->tk_xprt)) {
33172 ++ task->tk_status = -ENOTCONN;
33173 ++ return;
33174 ++ }
33175 ++ xprt_transmit(task);
33176 ++ }
33177 + }
33178 +- task->tk_action = call_transmit_status;
33179 + xprt_end_transmit(task);
33180 + }
33181 +
33182 +@@ -2038,7 +2051,7 @@ call_transmit_status(struct rpc_task *task)
33183 + trace_xprt_ping(task->tk_xprt,
33184 + task->tk_status);
33185 + rpc_exit(task, task->tk_status);
33186 +- break;
33187 ++ return;
33188 + }
33189 + /* fall through */
33190 + case -ECONNRESET:
33191 +@@ -2046,11 +2059,24 @@ call_transmit_status(struct rpc_task *task)
33192 + case -EADDRINUSE:
33193 + case -ENOTCONN:
33194 + case -EPIPE:
33195 ++ task->tk_action = call_bind;
33196 ++ task->tk_status = 0;
33197 + break;
33198 + }
33199 ++ rpc_check_timeout(task);
33200 + }
33201 +
33202 + #if defined(CONFIG_SUNRPC_BACKCHANNEL)
33203 ++static void call_bc_transmit(struct rpc_task *task);
33204 ++static void call_bc_transmit_status(struct rpc_task *task);
33205 ++
33206 ++static void
33207 ++call_bc_encode(struct rpc_task *task)
33208 ++{
33209 ++ xprt_request_enqueue_transmit(task);
33210 ++ task->tk_action = call_bc_transmit;
33211 ++}
33212 ++
33213 + /*
33214 + * 5b. Send the backchannel RPC reply. On error, drop the reply. In
33215 + * addition, disconnect on connectivity errors.
33216 +@@ -2058,26 +2084,23 @@ call_transmit_status(struct rpc_task *task)
33217 + static void
33218 + call_bc_transmit(struct rpc_task *task)
33219 + {
33220 +- struct rpc_rqst *req = task->tk_rqstp;
33221 +-
33222 +- if (rpc_task_need_encode(task))
33223 +- xprt_request_enqueue_transmit(task);
33224 +- if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
33225 +- goto out_wakeup;
33226 +-
33227 +- if (!xprt_prepare_transmit(task))
33228 +- goto out_retry;
33229 +-
33230 +- if (task->tk_status < 0) {
33231 +- printk(KERN_NOTICE "RPC: Could not send backchannel reply "
33232 +- "error: %d\n", task->tk_status);
33233 +- goto out_done;
33234 ++ task->tk_action = call_bc_transmit_status;
33235 ++ if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) {
33236 ++ if (!xprt_prepare_transmit(task))
33237 ++ return;
33238 ++ task->tk_status = 0;
33239 ++ xprt_transmit(task);
33240 + }
33241 ++ xprt_end_transmit(task);
33242 ++}
33243 +
33244 +- xprt_transmit(task);
33245 ++static void
33246 ++call_bc_transmit_status(struct rpc_task *task)
33247 ++{
33248 ++ struct rpc_rqst *req = task->tk_rqstp;
33249 +
33250 +- xprt_end_transmit(task);
33251 + dprint_status(task);
33252 ++
33253 + switch (task->tk_status) {
33254 + case 0:
33255 + /* Success */
33256 +@@ -2091,8 +2114,14 @@ call_bc_transmit(struct rpc_task *task)
33257 + case -ENOTCONN:
33258 + case -EPIPE:
33259 + break;
33260 ++ case -ENOBUFS:
33261 ++ rpc_delay(task, HZ>>2);
33262 ++ /* fall through */
33263 ++ case -EBADSLT:
33264 + case -EAGAIN:
33265 +- goto out_retry;
33266 ++ task->tk_status = 0;
33267 ++ task->tk_action = call_bc_transmit;
33268 ++ return;
33269 + case -ETIMEDOUT:
33270 + /*
33271 + * Problem reaching the server. Disconnect and let the
33272 +@@ -2111,18 +2140,11 @@ call_bc_transmit(struct rpc_task *task)
33273 + * We were unable to reply and will have to drop the
33274 + * request. The server should reconnect and retransmit.
33275 + */
33276 +- WARN_ON_ONCE(task->tk_status == -EAGAIN);
33277 + printk(KERN_NOTICE "RPC: Could not send backchannel reply "
33278 + "error: %d\n", task->tk_status);
33279 + break;
33280 + }
33281 +-out_wakeup:
33282 +- rpc_wake_up_queued_task(&req->rq_xprt->pending, task);
33283 +-out_done:
33284 + task->tk_action = rpc_exit_task;
33285 +- return;
33286 +-out_retry:
33287 +- task->tk_status = 0;
33288 + }
33289 + #endif /* CONFIG_SUNRPC_BACKCHANNEL */
33290 +
33291 +@@ -2178,7 +2200,7 @@ call_status(struct rpc_task *task)
33292 + case -EPIPE:
33293 + case -ENOTCONN:
33294 + case -EAGAIN:
33295 +- task->tk_action = call_encode;
33296 ++ task->tk_action = call_timeout;
33297 + break;
33298 + case -EIO:
33299 + /* shutdown or soft timeout */
33300 +@@ -2192,20 +2214,13 @@ call_status(struct rpc_task *task)
33301 + }
33302 + }
33303 +
33304 +-/*
33305 +- * 6a. Handle RPC timeout
33306 +- * We do not release the request slot, so we keep using the
33307 +- * same XID for all retransmits.
33308 +- */
33309 + static void
33310 +-call_timeout(struct rpc_task *task)
33311 ++rpc_check_timeout(struct rpc_task *task)
33312 + {
33313 + struct rpc_clnt *clnt = task->tk_client;
33314 +
33315 +- if (xprt_adjust_timeout(task->tk_rqstp) == 0) {
33316 +- dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid);
33317 +- goto retry;
33318 +- }
33319 ++ if (xprt_adjust_timeout(task->tk_rqstp) == 0)
33320 ++ return;
33321 +
33322 + dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
33323 + task->tk_timeouts++;
33324 +@@ -2241,10 +2256,19 @@ call_timeout(struct rpc_task *task)
33325 + * event? RFC2203 requires the server to drop all such requests.
33326 + */
33327 + rpcauth_invalcred(task);
33328 ++}
33329 +
33330 +-retry:
33331 ++/*
33332 ++ * 6a. Handle RPC timeout
33333 ++ * We do not release the request slot, so we keep using the
33334 ++ * same XID for all retransmits.
33335 ++ */
33336 ++static void
33337 ++call_timeout(struct rpc_task *task)
33338 ++{
33339 + task->tk_action = call_encode;
33340 + task->tk_status = 0;
33341 ++ rpc_check_timeout(task);
33342 + }
33343 +
33344 + /*
33345 +diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
33346 +index a6a060925e5d..43590a968b73 100644
33347 +--- a/net/sunrpc/svcsock.c
33348 ++++ b/net/sunrpc/svcsock.c
33349 +@@ -349,12 +349,16 @@ static ssize_t svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov,
33350 + /*
33351 + * Set socket snd and rcv buffer lengths
33352 + */
33353 +-static void svc_sock_setbufsize(struct socket *sock, unsigned int snd,
33354 +- unsigned int rcv)
33355 ++static void svc_sock_setbufsize(struct svc_sock *svsk, unsigned int nreqs)
33356 + {
33357 ++ unsigned int max_mesg = svsk->sk_xprt.xpt_server->sv_max_mesg;
33358 ++ struct socket *sock = svsk->sk_sock;
33359 ++
33360 ++ nreqs = min(nreqs, INT_MAX / 2 / max_mesg);
33361 ++
33362 + lock_sock(sock->sk);
33363 +- sock->sk->sk_sndbuf = snd * 2;
33364 +- sock->sk->sk_rcvbuf = rcv * 2;
33365 ++ sock->sk->sk_sndbuf = nreqs * max_mesg * 2;
33366 ++ sock->sk->sk_rcvbuf = nreqs * max_mesg * 2;
33367 + sock->sk->sk_write_space(sock->sk);
33368 + release_sock(sock->sk);
33369 + }
33370 +@@ -516,9 +520,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
33371 + * provides an upper bound on the number of threads
33372 + * which will access the socket.
33373 + */
33374 +- svc_sock_setbufsize(svsk->sk_sock,
33375 +- (serv->sv_nrthreads+3) * serv->sv_max_mesg,
33376 +- (serv->sv_nrthreads+3) * serv->sv_max_mesg);
33377 ++ svc_sock_setbufsize(svsk, serv->sv_nrthreads + 3);
33378 +
33379 + clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
33380 + skb = NULL;
33381 +@@ -681,9 +683,7 @@ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
33382 + * receive and respond to one request.
33383 + * svc_udp_recvfrom will re-adjust if necessary
33384 + */
33385 +- svc_sock_setbufsize(svsk->sk_sock,
33386 +- 3 * svsk->sk_xprt.xpt_server->sv_max_mesg,
33387 +- 3 * svsk->sk_xprt.xpt_server->sv_max_mesg);
33388 ++ svc_sock_setbufsize(svsk, 3);
33389 +
33390 + /* data might have come in before data_ready set up */
33391 + set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
33392 +diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
33393 +index 21113bfd4eca..a5ae9c036b9c 100644
33394 +--- a/net/sunrpc/xprtrdma/verbs.c
33395 ++++ b/net/sunrpc/xprtrdma/verbs.c
33396 +@@ -90,7 +90,7 @@ static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
33397 + /* Flush Receives, then wait for deferred Reply work
33398 + * to complete.
33399 + */
33400 +- ib_drain_qp(ia->ri_id->qp);
33401 ++ ib_drain_rq(ia->ri_id->qp);
33402 + drain_workqueue(buf->rb_completion_wq);
33403 +
33404 + /* Deferred Reply processing might have scheduled
33405 +diff --git a/net/tipc/net.c b/net/tipc/net.c
33406 +index f076edb74338..7ce1e86b024f 100644
33407 +--- a/net/tipc/net.c
33408 ++++ b/net/tipc/net.c
33409 +@@ -163,12 +163,9 @@ void tipc_sched_net_finalize(struct net *net, u32 addr)
33410 +
33411 + void tipc_net_stop(struct net *net)
33412 + {
33413 +- u32 self = tipc_own_addr(net);
33414 +-
33415 +- if (!self)
33416 ++ if (!tipc_own_id(net))
33417 + return;
33418 +
33419 +- tipc_nametbl_withdraw(net, TIPC_CFG_SRV, self, self, self);
33420 + rtnl_lock();
33421 + tipc_bearer_stop(net);
33422 + tipc_node_stop(net);
33423 +diff --git a/net/tipc/socket.c b/net/tipc/socket.c
33424 +index 70343ac448b1..4dca9161f99b 100644
33425 +--- a/net/tipc/socket.c
33426 ++++ b/net/tipc/socket.c
33427 +@@ -1333,7 +1333,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
33428 +
33429 + if (unlikely(!dest)) {
33430 + dest = &tsk->peer;
33431 +- if (!syn || dest->family != AF_TIPC)
33432 ++ if (!syn && dest->family != AF_TIPC)
33433 + return -EDESTADDRREQ;
33434 + }
33435 +
33436 +@@ -2349,6 +2349,16 @@ static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
33437 + return 0;
33438 + }
33439 +
33440 ++static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr)
33441 ++{
33442 ++ if (addr->family != AF_TIPC)
33443 ++ return false;
33444 ++ if (addr->addrtype == TIPC_SERVICE_RANGE)
33445 ++ return (addr->addr.nameseq.lower <= addr->addr.nameseq.upper);
33446 ++ return (addr->addrtype == TIPC_SERVICE_ADDR ||
33447 ++ addr->addrtype == TIPC_SOCKET_ADDR);
33448 ++}
33449 ++
33450 + /**
33451 + * tipc_connect - establish a connection to another TIPC port
33452 + * @sock: socket structure
33453 +@@ -2384,18 +2394,18 @@ static int tipc_connect(struct socket *sock, struct sockaddr *dest,
33454 + if (!tipc_sk_type_connectionless(sk))
33455 + res = -EINVAL;
33456 + goto exit;
33457 +- } else if (dst->family != AF_TIPC) {
33458 +- res = -EINVAL;
33459 + }
33460 +- if (dst->addrtype != TIPC_ADDR_ID && dst->addrtype != TIPC_ADDR_NAME)
33461 ++ if (!tipc_sockaddr_is_sane(dst)) {
33462 + res = -EINVAL;
33463 +- if (res)
33464 + goto exit;
33465 +-
33466 ++ }
33467 + /* DGRAM/RDM connect(), just save the destaddr */
33468 + if (tipc_sk_type_connectionless(sk)) {
33469 + memcpy(&tsk->peer, dest, destlen);
33470 + goto exit;
33471 ++ } else if (dst->addrtype == TIPC_SERVICE_RANGE) {
33472 ++ res = -EINVAL;
33473 ++ goto exit;
33474 + }
33475 +
33476 + previous = sk->sk_state;
33477 +diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
33478 +index a457c0fbbef1..f5edb213d760 100644
33479 +--- a/net/tipc/topsrv.c
33480 ++++ b/net/tipc/topsrv.c
33481 +@@ -365,6 +365,7 @@ static int tipc_conn_rcv_sub(struct tipc_topsrv *srv,
33482 + struct tipc_subscription *sub;
33483 +
33484 + if (tipc_sub_read(s, filter) & TIPC_SUB_CANCEL) {
33485 ++ s->filter &= __constant_ntohl(~TIPC_SUB_CANCEL);
33486 + tipc_conn_delete_sub(con, s);
33487 + return 0;
33488 + }
33489 +diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
33490 +index 3ae3a33da70b..602715fc9a75 100644
33491 +--- a/net/vmw_vsock/virtio_transport_common.c
33492 ++++ b/net/vmw_vsock/virtio_transport_common.c
33493 +@@ -662,6 +662,8 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
33494 + */
33495 + static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
33496 + {
33497 ++ const struct virtio_transport *t;
33498 ++ struct virtio_vsock_pkt *reply;
33499 + struct virtio_vsock_pkt_info info = {
33500 + .op = VIRTIO_VSOCK_OP_RST,
33501 + .type = le16_to_cpu(pkt->hdr.type),
33502 +@@ -672,15 +674,21 @@ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
33503 + if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
33504 + return 0;
33505 +
33506 +- pkt = virtio_transport_alloc_pkt(&info, 0,
33507 +- le64_to_cpu(pkt->hdr.dst_cid),
33508 +- le32_to_cpu(pkt->hdr.dst_port),
33509 +- le64_to_cpu(pkt->hdr.src_cid),
33510 +- le32_to_cpu(pkt->hdr.src_port));
33511 +- if (!pkt)
33512 ++ reply = virtio_transport_alloc_pkt(&info, 0,
33513 ++ le64_to_cpu(pkt->hdr.dst_cid),
33514 ++ le32_to_cpu(pkt->hdr.dst_port),
33515 ++ le64_to_cpu(pkt->hdr.src_cid),
33516 ++ le32_to_cpu(pkt->hdr.src_port));
33517 ++ if (!reply)
33518 + return -ENOMEM;
33519 +
33520 +- return virtio_transport_get_ops()->send_pkt(pkt);
33521 ++ t = virtio_transport_get_ops();
33522 ++ if (!t) {
33523 ++ virtio_transport_free_pkt(reply);
33524 ++ return -ENOTCONN;
33525 ++ }
33526 ++
33527 ++ return t->send_pkt(reply);
33528 + }
33529 +
33530 + static void virtio_transport_wait_close(struct sock *sk, long timeout)
33531 +diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
33532 +index eff31348e20b..20a511398389 100644
33533 +--- a/net/x25/af_x25.c
33534 ++++ b/net/x25/af_x25.c
33535 +@@ -820,8 +820,13 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
33536 + sock->state = SS_CONNECTED;
33537 + rc = 0;
33538 + out_put_neigh:
33539 +- if (rc)
33540 ++ if (rc) {
33541 ++ read_lock_bh(&x25_list_lock);
33542 + x25_neigh_put(x25->neighbour);
33543 ++ x25->neighbour = NULL;
33544 ++ read_unlock_bh(&x25_list_lock);
33545 ++ x25->state = X25_STATE_0;
33546 ++ }
33547 + out_put_route:
33548 + x25_route_put(rt);
33549 + out:
33550 +diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
33551 +index 85e4fe4f18cc..f3031c8907d9 100644
33552 +--- a/net/xdp/xsk.c
33553 ++++ b/net/xdp/xsk.c
33554 +@@ -407,6 +407,10 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
33555 + if (sxdp->sxdp_family != AF_XDP)
33556 + return -EINVAL;
33557 +
33558 ++ flags = sxdp->sxdp_flags;
33559 ++ if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY))
33560 ++ return -EINVAL;
33561 ++
33562 + mutex_lock(&xs->mutex);
33563 + if (xs->dev) {
33564 + err = -EBUSY;
33565 +@@ -425,7 +429,6 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
33566 + }
33567 +
33568 + qid = sxdp->sxdp_queue_id;
33569 +- flags = sxdp->sxdp_flags;
33570 +
33571 + if (flags & XDP_SHARED_UMEM) {
33572 + struct xdp_sock *umem_xs;
33573 +diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in
33574 +index 7aad82406422..d3319a80788a 100644
33575 +--- a/scripts/gdb/linux/constants.py.in
33576 ++++ b/scripts/gdb/linux/constants.py.in
33577 +@@ -37,12 +37,12 @@
33578 + import gdb
33579 +
33580 + /* linux/fs.h */
33581 +-LX_VALUE(MS_RDONLY)
33582 +-LX_VALUE(MS_SYNCHRONOUS)
33583 +-LX_VALUE(MS_MANDLOCK)
33584 +-LX_VALUE(MS_DIRSYNC)
33585 +-LX_VALUE(MS_NOATIME)
33586 +-LX_VALUE(MS_NODIRATIME)
33587 ++LX_VALUE(SB_RDONLY)
33588 ++LX_VALUE(SB_SYNCHRONOUS)
33589 ++LX_VALUE(SB_MANDLOCK)
33590 ++LX_VALUE(SB_DIRSYNC)
33591 ++LX_VALUE(SB_NOATIME)
33592 ++LX_VALUE(SB_NODIRATIME)
33593 +
33594 + /* linux/mount.h */
33595 + LX_VALUE(MNT_NOSUID)
33596 +diff --git a/scripts/gdb/linux/proc.py b/scripts/gdb/linux/proc.py
33597 +index 0aebd7565b03..2f01a958eb22 100644
33598 +--- a/scripts/gdb/linux/proc.py
33599 ++++ b/scripts/gdb/linux/proc.py
33600 +@@ -114,11 +114,11 @@ def info_opts(lst, opt):
33601 + return opts
33602 +
33603 +
33604 +-FS_INFO = {constants.LX_MS_SYNCHRONOUS: ",sync",
33605 +- constants.LX_MS_MANDLOCK: ",mand",
33606 +- constants.LX_MS_DIRSYNC: ",dirsync",
33607 +- constants.LX_MS_NOATIME: ",noatime",
33608 +- constants.LX_MS_NODIRATIME: ",nodiratime"}
33609 ++FS_INFO = {constants.LX_SB_SYNCHRONOUS: ",sync",
33610 ++ constants.LX_SB_MANDLOCK: ",mand",
33611 ++ constants.LX_SB_DIRSYNC: ",dirsync",
33612 ++ constants.LX_SB_NOATIME: ",noatime",
33613 ++ constants.LX_SB_NODIRATIME: ",nodiratime"}
33614 +
33615 + MNT_INFO = {constants.LX_MNT_NOSUID: ",nosuid",
33616 + constants.LX_MNT_NODEV: ",nodev",
33617 +@@ -184,7 +184,7 @@ values of that process namespace"""
33618 + fstype = superblock['s_type']['name'].string()
33619 + s_flags = int(superblock['s_flags'])
33620 + m_flags = int(vfs['mnt']['mnt_flags'])
33621 +- rd = "ro" if (s_flags & constants.LX_MS_RDONLY) else "rw"
33622 ++ rd = "ro" if (s_flags & constants.LX_SB_RDONLY) else "rw"
33623 +
33624 + gdb.write(
33625 + "{} {} {} {}{}{} 0 0\n"
33626 +diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
33627 +index 26bf886bd168..588a3bc29ecc 100644
33628 +--- a/scripts/mod/modpost.c
33629 ++++ b/scripts/mod/modpost.c
33630 +@@ -640,7 +640,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
33631 + info->sechdrs[sym->st_shndx].sh_offset -
33632 + (info->hdr->e_type != ET_REL ?
33633 + info->sechdrs[sym->st_shndx].sh_addr : 0);
33634 +- crc = *crcp;
33635 ++ crc = TO_NATIVE(*crcp);
33636 + }
33637 + sym_update_crc(symname + strlen("__crc_"), mod, crc,
33638 + export);
33639 +diff --git a/scripts/package/Makefile b/scripts/package/Makefile
33640 +index 453fecee62f0..aa39c2b5e46a 100644
33641 +--- a/scripts/package/Makefile
33642 ++++ b/scripts/package/Makefile
33643 +@@ -59,7 +59,7 @@ rpm-pkg: FORCE
33644 + # binrpm-pkg
33645 + # ---------------------------------------------------------------------------
33646 + binrpm-pkg: FORCE
33647 +- $(MAKE) KBUILD_SRC=
33648 ++ $(MAKE) -f $(srctree)/Makefile
33649 + $(CONFIG_SHELL) $(MKSPEC) prebuilt > $(objtree)/binkernel.spec
33650 + +rpmbuild $(RPMOPTS) --define "_builddir $(objtree)" --target \
33651 + $(UTS_MACHINE) -bb $(objtree)/binkernel.spec
33652 +@@ -102,7 +102,7 @@ clean-dirs += $(objtree)/snap/
33653 + # tarball targets
33654 + # ---------------------------------------------------------------------------
33655 + tar%pkg: FORCE
33656 +- $(MAKE) KBUILD_SRC=
33657 ++ $(MAKE) -f $(srctree)/Makefile
33658 + $(CONFIG_SHELL) $(srctree)/scripts/package/buildtar $@
33659 +
33660 + clean-dirs += $(objtree)/tar-install/
33661 +diff --git a/scripts/package/builddeb b/scripts/package/builddeb
33662 +index f43a274f4f1d..8ac25d10a6ad 100755
33663 +--- a/scripts/package/builddeb
33664 ++++ b/scripts/package/builddeb
33665 +@@ -86,12 +86,12 @@ cp "$($MAKE -s -f $srctree/Makefile image_name)" "$tmpdir/$installed_image_path"
33666 + if grep -q "^CONFIG_OF_EARLY_FLATTREE=y" $KCONFIG_CONFIG ; then
33667 + # Only some architectures with OF support have this target
33668 + if [ -d "${srctree}/arch/$SRCARCH/boot/dts" ]; then
33669 +- $MAKE KBUILD_SRC= INSTALL_DTBS_PATH="$tmpdir/usr/lib/$packagename" dtbs_install
33670 ++ $MAKE -f $srctree/Makefile INSTALL_DTBS_PATH="$tmpdir/usr/lib/$packagename" dtbs_install
33671 + fi
33672 + fi
33673 +
33674 + if grep -q '^CONFIG_MODULES=y' $KCONFIG_CONFIG ; then
33675 +- INSTALL_MOD_PATH="$tmpdir" $MAKE KBUILD_SRC= modules_install
33676 ++ INSTALL_MOD_PATH="$tmpdir" $MAKE -f $srctree/Makefile modules_install
33677 + rm -f "$tmpdir/lib/modules/$version/build"
33678 + rm -f "$tmpdir/lib/modules/$version/source"
33679 + if [ "$ARCH" = "um" ] ; then
33680 +@@ -113,14 +113,14 @@ if grep -q '^CONFIG_MODULES=y' $KCONFIG_CONFIG ; then
33681 + # resign stripped modules
33682 + MODULE_SIG_ALL="$(grep -s '^CONFIG_MODULE_SIG_ALL=y' $KCONFIG_CONFIG || true)"
33683 + if [ -n "$MODULE_SIG_ALL" ]; then
33684 +- INSTALL_MOD_PATH="$tmpdir" $MAKE KBUILD_SRC= modules_sign
33685 ++ INSTALL_MOD_PATH="$tmpdir" $MAKE -f $srctree/Makefile modules_sign
33686 + fi
33687 + fi
33688 + fi
33689 +
33690 + if [ "$ARCH" != "um" ]; then
33691 +- $MAKE headers_check KBUILD_SRC=
33692 +- $MAKE headers_install KBUILD_SRC= INSTALL_HDR_PATH="$libc_headers_dir/usr"
33693 ++ $MAKE -f $srctree/Makefile headers_check
33694 ++ $MAKE -f $srctree/Makefile headers_install INSTALL_HDR_PATH="$libc_headers_dir/usr"
33695 + fi
33696 +
33697 + # Install the maintainer scripts
33698 +diff --git a/scripts/package/buildtar b/scripts/package/buildtar
33699 +index d624a07a4e77..cfd2a4a3fe42 100755
33700 +--- a/scripts/package/buildtar
33701 ++++ b/scripts/package/buildtar
33702 +@@ -57,7 +57,7 @@ dirs=boot
33703 + # Try to install modules
33704 + #
33705 + if grep -q '^CONFIG_MODULES=y' "${KCONFIG_CONFIG}"; then
33706 +- make ARCH="${ARCH}" O="${objtree}" KBUILD_SRC= INSTALL_MOD_PATH="${tmpdir}" modules_install
33707 ++ make ARCH="${ARCH}" -f ${srctree}/Makefile INSTALL_MOD_PATH="${tmpdir}" modules_install
33708 + dirs="$dirs lib"
33709 + fi
33710 +
33711 +diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian
33712 +index edcad61fe3cd..f030961c5165 100755
33713 +--- a/scripts/package/mkdebian
33714 ++++ b/scripts/package/mkdebian
33715 +@@ -205,13 +205,15 @@ EOF
33716 + cat <<EOF > debian/rules
33717 + #!$(command -v $MAKE) -f
33718 +
33719 ++srctree ?= .
33720 ++
33721 + build:
33722 + \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} \
33723 +- KBUILD_BUILD_VERSION=${revision} KBUILD_SRC=
33724 ++ KBUILD_BUILD_VERSION=${revision} -f \$(srctree)/Makefile
33725 +
33726 + binary-arch:
33727 + \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} \
33728 +- KBUILD_BUILD_VERSION=${revision} KBUILD_SRC= intdeb-pkg
33729 ++ KBUILD_BUILD_VERSION=${revision} -f \$(srctree)/Makefile intdeb-pkg
33730 +
33731 + clean:
33732 + rm -rf debian/*tmp debian/files
33733 +diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
33734 +index 379682e2a8d5..f6c2bcb2ab14 100644
33735 +--- a/security/apparmor/policy_unpack.c
33736 ++++ b/security/apparmor/policy_unpack.c
33737 +@@ -579,6 +579,7 @@ fail:
33738 + kfree(profile->secmark[i].label);
33739 + kfree(profile->secmark);
33740 + profile->secmark_count = 0;
33741 ++ profile->secmark = NULL;
33742 + }
33743 +
33744 + e->pos = pos;
33745 +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
33746 +index f0e36c3492ba..07b11b5aaf1f 100644
33747 +--- a/security/selinux/hooks.c
33748 ++++ b/security/selinux/hooks.c
33749 +@@ -959,8 +959,11 @@ static int selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
33750 + BUG_ON(!(oldsbsec->flags & SE_SBINITIALIZED));
33751 +
33752 + /* if fs is reusing a sb, make sure that the contexts match */
33753 +- if (newsbsec->flags & SE_SBINITIALIZED)
33754 ++ if (newsbsec->flags & SE_SBINITIALIZED) {
33755 ++ if ((kern_flags & SECURITY_LSM_NATIVE_LABELS) && !set_context)
33756 ++ *set_kern_flags |= SECURITY_LSM_NATIVE_LABELS;
33757 + return selinux_cmp_sb_context(oldsb, newsb);
33758 ++ }
33759 +
33760 + mutex_lock(&newsbsec->lock);
33761 +
33762 +@@ -3241,12 +3244,16 @@ static int selinux_inode_setsecurity(struct inode *inode, const char *name,
33763 + const void *value, size_t size, int flags)
33764 + {
33765 + struct inode_security_struct *isec = inode_security_novalidate(inode);
33766 ++ struct superblock_security_struct *sbsec = inode->i_sb->s_security;
33767 + u32 newsid;
33768 + int rc;
33769 +
33770 + if (strcmp(name, XATTR_SELINUX_SUFFIX))
33771 + return -EOPNOTSUPP;
33772 +
33773 ++ if (!(sbsec->flags & SBLABEL_MNT))
33774 ++ return -EOPNOTSUPP;
33775 ++
33776 + if (!value || !size)
33777 + return -EACCES;
33778 +
33779 +@@ -5120,6 +5127,9 @@ static int selinux_sctp_bind_connect(struct sock *sk, int optname,
33780 + return -EINVAL;
33781 + }
33782 +
33783 ++ if (walk_size + len > addrlen)
33784 ++ return -EINVAL;
33785 ++
33786 + err = -EINVAL;
33787 + switch (optname) {
33788 + /* Bind checks */
33789 +@@ -6392,7 +6402,10 @@ static void selinux_inode_invalidate_secctx(struct inode *inode)
33790 + */
33791 + static int selinux_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
33792 + {
33793 +- return selinux_inode_setsecurity(inode, XATTR_SELINUX_SUFFIX, ctx, ctxlen, 0);
33794 ++ int rc = selinux_inode_setsecurity(inode, XATTR_SELINUX_SUFFIX,
33795 ++ ctx, ctxlen, 0);
33796 ++ /* Do not return error when suppressing label (SBLABEL_MNT not set). */
33797 ++ return rc == -EOPNOTSUPP ? 0 : rc;
33798 + }
33799 +
33800 + /*
33801 +diff --git a/sound/ac97/bus.c b/sound/ac97/bus.c
33802 +index 9f0c480489ef..9cbf6927abe9 100644
33803 +--- a/sound/ac97/bus.c
33804 ++++ b/sound/ac97/bus.c
33805 +@@ -84,7 +84,7 @@ ac97_of_get_child_device(struct ac97_controller *ac97_ctrl, int idx,
33806 + if ((idx != of_property_read_u32(node, "reg", &reg)) ||
33807 + !of_device_is_compatible(node, compat))
33808 + continue;
33809 +- return of_node_get(node);
33810 ++ return node;
33811 + }
33812 +
33813 + return NULL;
33814 +diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
33815 +index 467039b342b5..41abb8bd466a 100644
33816 +--- a/sound/core/oss/pcm_oss.c
33817 ++++ b/sound/core/oss/pcm_oss.c
33818 +@@ -940,6 +940,28 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
33819 + oss_frame_size = snd_pcm_format_physical_width(params_format(params)) *
33820 + params_channels(params) / 8;
33821 +
33822 ++ err = snd_pcm_oss_period_size(substream, params, sparams);
33823 ++ if (err < 0)
33824 ++ goto failure;
33825 ++
33826 ++ n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
33827 ++ err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
33828 ++ if (err < 0)
33829 ++ goto failure;
33830 ++
33831 ++ err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
33832 ++ runtime->oss.periods, NULL);
33833 ++ if (err < 0)
33834 ++ goto failure;
33835 ++
33836 ++ snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
33837 ++
33838 ++ err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams);
33839 ++ if (err < 0) {
33840 ++ pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
33841 ++ goto failure;
33842 ++ }
33843 ++
33844 + #ifdef CONFIG_SND_PCM_OSS_PLUGINS
33845 + snd_pcm_oss_plugin_clear(substream);
33846 + if (!direct) {
33847 +@@ -974,27 +996,6 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
33848 + }
33849 + #endif
33850 +
33851 +- err = snd_pcm_oss_period_size(substream, params, sparams);
33852 +- if (err < 0)
33853 +- goto failure;
33854 +-
33855 +- n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
33856 +- err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
33857 +- if (err < 0)
33858 +- goto failure;
33859 +-
33860 +- err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
33861 +- runtime->oss.periods, NULL);
33862 +- if (err < 0)
33863 +- goto failure;
33864 +-
33865 +- snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
33866 +-
33867 +- if ((err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams)) < 0) {
33868 +- pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
33869 +- goto failure;
33870 +- }
33871 +-
33872 + if (runtime->oss.trigger) {
33873 + sw_params->start_threshold = 1;
33874 + } else {
33875 +diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
33876 +index 818dff1de545..e08c6c6ca029 100644
33877 +--- a/sound/core/pcm_native.c
33878 ++++ b/sound/core/pcm_native.c
33879 +@@ -1426,8 +1426,15 @@ static int snd_pcm_pause(struct snd_pcm_substream *substream, int push)
33880 + static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state)
33881 + {
33882 + struct snd_pcm_runtime *runtime = substream->runtime;
33883 +- if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
33884 ++ switch (runtime->status->state) {
33885 ++ case SNDRV_PCM_STATE_SUSPENDED:
33886 ++ return -EBUSY;
33887 ++ /* unresumable PCM state; return -EBUSY for skipping suspend */
33888 ++ case SNDRV_PCM_STATE_OPEN:
33889 ++ case SNDRV_PCM_STATE_SETUP:
33890 ++ case SNDRV_PCM_STATE_DISCONNECTED:
33891 + return -EBUSY;
33892 ++ }
33893 + runtime->trigger_master = substream;
33894 + return 0;
33895 + }
33896 +@@ -1506,6 +1513,14 @@ int snd_pcm_suspend_all(struct snd_pcm *pcm)
33897 + /* FIXME: the open/close code should lock this as well */
33898 + if (substream->runtime == NULL)
33899 + continue;
33900 ++
33901 ++ /*
33902 ++ * Skip BE dai link PCM's that are internal and may
33903 ++ * not have their substream ops set.
33904 ++ */
33905 ++ if (!substream->ops)
33906 ++ continue;
33907 ++
33908 + err = snd_pcm_suspend(substream);
33909 + if (err < 0 && err != -EBUSY)
33910 + return err;
33911 +diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
33912 +index ee601d7f0926..c0690d1ecd55 100644
33913 +--- a/sound/core/rawmidi.c
33914 ++++ b/sound/core/rawmidi.c
33915 +@@ -30,6 +30,7 @@
33916 + #include <linux/module.h>
33917 + #include <linux/delay.h>
33918 + #include <linux/mm.h>
33919 ++#include <linux/nospec.h>
33920 + #include <sound/rawmidi.h>
33921 + #include <sound/info.h>
33922 + #include <sound/control.h>
33923 +@@ -601,6 +602,7 @@ static int __snd_rawmidi_info_select(struct snd_card *card,
33924 + return -ENXIO;
33925 + if (info->stream < 0 || info->stream > 1)
33926 + return -EINVAL;
33927 ++ info->stream = array_index_nospec(info->stream, 2);
33928 + pstr = &rmidi->streams[info->stream];
33929 + if (pstr->substream_count == 0)
33930 + return -ENOENT;
33931 +diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
33932 +index 278ebb993122..c93945917235 100644
33933 +--- a/sound/core/seq/oss/seq_oss_synth.c
33934 ++++ b/sound/core/seq/oss/seq_oss_synth.c
33935 +@@ -617,13 +617,14 @@ int
33936 + snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_info *inf)
33937 + {
33938 + struct seq_oss_synth *rec;
33939 ++ struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev);
33940 +
33941 +- if (dev < 0 || dev >= dp->max_synthdev)
33942 ++ if (!info)
33943 + return -ENXIO;
33944 +
33945 +- if (dp->synths[dev].is_midi) {
33946 ++ if (info->is_midi) {
33947 + struct midi_info minf;
33948 +- snd_seq_oss_midi_make_info(dp, dp->synths[dev].midi_mapped, &minf);
33949 ++ snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf);
33950 + inf->synth_type = SYNTH_TYPE_MIDI;
33951 + inf->synth_subtype = 0;
33952 + inf->nr_voices = 16;
33953 +diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
33954 +index 7d4640d1fe9f..38e7deab6384 100644
33955 +--- a/sound/core/seq/seq_clientmgr.c
33956 ++++ b/sound/core/seq/seq_clientmgr.c
33957 +@@ -1252,7 +1252,7 @@ static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client,
33958 +
33959 + /* fill the info fields */
33960 + if (client_info->name[0])
33961 +- strlcpy(client->name, client_info->name, sizeof(client->name));
33962 ++ strscpy(client->name, client_info->name, sizeof(client->name));
33963 +
33964 + client->filter = client_info->filter;
33965 + client->event_lost = client_info->event_lost;
33966 +@@ -1530,7 +1530,7 @@ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg)
33967 + /* set queue name */
33968 + if (!info->name[0])
33969 + snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
33970 +- strlcpy(q->name, info->name, sizeof(q->name));
33971 ++ strscpy(q->name, info->name, sizeof(q->name));
33972 + snd_use_lock_free(&q->use_lock);
33973 +
33974 + return 0;
33975 +@@ -1592,7 +1592,7 @@ static int snd_seq_ioctl_set_queue_info(struct snd_seq_client *client,
33976 + queuefree(q);
33977 + return -EPERM;
33978 + }
33979 +- strlcpy(q->name, info->name, sizeof(q->name));
33980 ++ strscpy(q->name, info->name, sizeof(q->name));
33981 + queuefree(q);
33982 +
33983 + return 0;
33984 +diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
33985 +index d91874275d2c..5b46e8dcc2dd 100644
33986 +--- a/sound/firewire/bebob/bebob.c
33987 ++++ b/sound/firewire/bebob/bebob.c
33988 +@@ -448,7 +448,19 @@ static const struct ieee1394_device_id bebob_id_table[] = {
33989 + /* Focusrite, SaffirePro 26 I/O */
33990 + SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, 0x00000003, &saffirepro_26_spec),
33991 + /* Focusrite, SaffirePro 10 I/O */
33992 +- SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, 0x00000006, &saffirepro_10_spec),
33993 ++ {
33994 ++ // The combination of vendor_id and model_id is the same as the
33995 ++ // same as the one of Liquid Saffire 56.
33996 ++ .match_flags = IEEE1394_MATCH_VENDOR_ID |
33997 ++ IEEE1394_MATCH_MODEL_ID |
33998 ++ IEEE1394_MATCH_SPECIFIER_ID |
33999 ++ IEEE1394_MATCH_VERSION,
34000 ++ .vendor_id = VEN_FOCUSRITE,
34001 ++ .model_id = 0x000006,
34002 ++ .specifier_id = 0x00a02d,
34003 ++ .version = 0x010001,
34004 ++ .driver_data = (kernel_ulong_t)&saffirepro_10_spec,
34005 ++ },
34006 + /* Focusrite, Saffire(no label and LE) */
34007 + SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, MODEL_FOCUSRITE_SAFFIRE_BOTH,
34008 + &saffire_spec),
34009 +diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c
34010 +index ed50b222d36e..eee184b05d93 100644
34011 +--- a/sound/firewire/dice/dice.c
34012 ++++ b/sound/firewire/dice/dice.c
34013 +@@ -18,6 +18,7 @@ MODULE_LICENSE("GPL v2");
34014 + #define OUI_ALESIS 0x000595
34015 + #define OUI_MAUDIO 0x000d6c
34016 + #define OUI_MYTEK 0x001ee8
34017 ++#define OUI_SSL 0x0050c2 // Actually ID reserved by IEEE.
34018 +
34019 + #define DICE_CATEGORY_ID 0x04
34020 + #define WEISS_CATEGORY_ID 0x00
34021 +@@ -196,7 +197,7 @@ static int dice_probe(struct fw_unit *unit,
34022 + struct snd_dice *dice;
34023 + int err;
34024 +
34025 +- if (!entry->driver_data) {
34026 ++ if (!entry->driver_data && entry->vendor_id != OUI_SSL) {
34027 + err = check_dice_category(unit);
34028 + if (err < 0)
34029 + return -ENODEV;
34030 +@@ -361,6 +362,15 @@ static const struct ieee1394_device_id dice_id_table[] = {
34031 + .model_id = 0x000002,
34032 + .driver_data = (kernel_ulong_t)snd_dice_detect_mytek_formats,
34033 + },
34034 ++ // Solid State Logic, Duende Classic and Mini.
34035 ++ // NOTE: each field of GUID in config ROM is not compliant to standard
34036 ++ // DICE scheme.
34037 ++ {
34038 ++ .match_flags = IEEE1394_MATCH_VENDOR_ID |
34039 ++ IEEE1394_MATCH_MODEL_ID,
34040 ++ .vendor_id = OUI_SSL,
34041 ++ .model_id = 0x000070,
34042 ++ },
34043 + {
34044 + .match_flags = IEEE1394_MATCH_VERSION,
34045 + .version = DICE_INTERFACE,
34046 +diff --git a/sound/firewire/motu/amdtp-motu.c b/sound/firewire/motu/amdtp-motu.c
34047 +index f0555a24d90e..6c9b743ea74b 100644
34048 +--- a/sound/firewire/motu/amdtp-motu.c
34049 ++++ b/sound/firewire/motu/amdtp-motu.c
34050 +@@ -136,7 +136,9 @@ static void read_pcm_s32(struct amdtp_stream *s,
34051 + byte = (u8 *)buffer + p->pcm_byte_offset;
34052 +
34053 + for (c = 0; c < channels; ++c) {
34054 +- *dst = (byte[0] << 24) | (byte[1] << 16) | byte[2];
34055 ++ *dst = (byte[0] << 24) |
34056 ++ (byte[1] << 16) |
34057 ++ (byte[2] << 8);
34058 + byte += 3;
34059 + dst++;
34060 + }
34061 +diff --git a/sound/firewire/motu/motu.c b/sound/firewire/motu/motu.c
34062 +index 220e61926ea4..513291ba0ab0 100644
34063 +--- a/sound/firewire/motu/motu.c
34064 ++++ b/sound/firewire/motu/motu.c
34065 +@@ -36,7 +36,7 @@ static void name_card(struct snd_motu *motu)
34066 + fw_csr_iterator_init(&it, motu->unit->directory);
34067 + while (fw_csr_iterator_next(&it, &key, &val)) {
34068 + switch (key) {
34069 +- case CSR_VERSION:
34070 ++ case CSR_MODEL:
34071 + version = val;
34072 + break;
34073 + }
34074 +@@ -46,7 +46,7 @@ static void name_card(struct snd_motu *motu)
34075 + strcpy(motu->card->shortname, motu->spec->name);
34076 + strcpy(motu->card->mixername, motu->spec->name);
34077 + snprintf(motu->card->longname, sizeof(motu->card->longname),
34078 +- "MOTU %s (version:%d), GUID %08x%08x at %s, S%d",
34079 ++ "MOTU %s (version:%06x), GUID %08x%08x at %s, S%d",
34080 + motu->spec->name, version,
34081 + fw_dev->config_rom[3], fw_dev->config_rom[4],
34082 + dev_name(&motu->unit->device), 100 << fw_dev->max_speed);
34083 +@@ -237,20 +237,20 @@ static const struct snd_motu_spec motu_audio_express = {
34084 + #define SND_MOTU_DEV_ENTRY(model, data) \
34085 + { \
34086 + .match_flags = IEEE1394_MATCH_VENDOR_ID | \
34087 +- IEEE1394_MATCH_MODEL_ID | \
34088 +- IEEE1394_MATCH_SPECIFIER_ID, \
34089 ++ IEEE1394_MATCH_SPECIFIER_ID | \
34090 ++ IEEE1394_MATCH_VERSION, \
34091 + .vendor_id = OUI_MOTU, \
34092 +- .model_id = model, \
34093 + .specifier_id = OUI_MOTU, \
34094 ++ .version = model, \
34095 + .driver_data = (kernel_ulong_t)data, \
34096 + }
34097 +
34098 + static const struct ieee1394_device_id motu_id_table[] = {
34099 +- SND_MOTU_DEV_ENTRY(0x101800, &motu_828mk2),
34100 +- SND_MOTU_DEV_ENTRY(0x107800, &snd_motu_spec_traveler),
34101 +- SND_MOTU_DEV_ENTRY(0x106800, &motu_828mk3), /* FireWire only. */
34102 +- SND_MOTU_DEV_ENTRY(0x100800, &motu_828mk3), /* Hybrid. */
34103 +- SND_MOTU_DEV_ENTRY(0x104800, &motu_audio_express),
34104 ++ SND_MOTU_DEV_ENTRY(0x000003, &motu_828mk2),
34105 ++ SND_MOTU_DEV_ENTRY(0x000009, &snd_motu_spec_traveler),
34106 ++ SND_MOTU_DEV_ENTRY(0x000015, &motu_828mk3), /* FireWire only. */
34107 ++ SND_MOTU_DEV_ENTRY(0x000035, &motu_828mk3), /* Hybrid. */
34108 ++ SND_MOTU_DEV_ENTRY(0x000033, &motu_audio_express),
34109 + { }
34110 + };
34111 + MODULE_DEVICE_TABLE(ieee1394, motu_id_table);
34112 +diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
34113 +index 617ff1aa818f..27eb0270a711 100644
34114 +--- a/sound/hda/hdac_i915.c
34115 ++++ b/sound/hda/hdac_i915.c
34116 +@@ -144,9 +144,9 @@ int snd_hdac_i915_init(struct hdac_bus *bus)
34117 + return -ENODEV;
34118 + if (!acomp->ops) {
34119 + request_module("i915");
34120 +- /* 10s timeout */
34121 ++ /* 60s timeout */
34122 + wait_for_completion_timeout(&bind_complete,
34123 +- msecs_to_jiffies(10 * 1000));
34124 ++ msecs_to_jiffies(60 * 1000));
34125 + }
34126 + if (!acomp->ops) {
34127 + dev_info(bus->dev, "couldn't bind with audio component\n");
34128 +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
34129 +index 9f8d59e7e89f..b238e903b9d7 100644
34130 +--- a/sound/pci/hda/hda_codec.c
34131 ++++ b/sound/pci/hda/hda_codec.c
34132 +@@ -2917,6 +2917,7 @@ static void hda_call_codec_resume(struct hda_codec *codec)
34133 + hda_jackpoll_work(&codec->jackpoll_work.work);
34134 + else
34135 + snd_hda_jack_report_sync(codec);
34136 ++ codec->core.dev.power.power_state = PMSG_ON;
34137 + snd_hdac_leave_pm(&codec->core);
34138 + }
34139 +
34140 +@@ -2950,10 +2951,62 @@ static int hda_codec_runtime_resume(struct device *dev)
34141 + }
34142 + #endif /* CONFIG_PM */
34143 +
34144 ++#ifdef CONFIG_PM_SLEEP
34145 ++static int hda_codec_force_resume(struct device *dev)
34146 ++{
34147 ++ int ret;
34148 ++
34149 ++ /* The get/put pair below enforces the runtime resume even if the
34150 ++ * device hasn't been used at suspend time. This trick is needed to
34151 ++ * update the jack state change during the sleep.
34152 ++ */
34153 ++ pm_runtime_get_noresume(dev);
34154 ++ ret = pm_runtime_force_resume(dev);
34155 ++ pm_runtime_put(dev);
34156 ++ return ret;
34157 ++}
34158 ++
34159 ++static int hda_codec_pm_suspend(struct device *dev)
34160 ++{
34161 ++ dev->power.power_state = PMSG_SUSPEND;
34162 ++ return pm_runtime_force_suspend(dev);
34163 ++}
34164 ++
34165 ++static int hda_codec_pm_resume(struct device *dev)
34166 ++{
34167 ++ dev->power.power_state = PMSG_RESUME;
34168 ++ return hda_codec_force_resume(dev);
34169 ++}
34170 ++
34171 ++static int hda_codec_pm_freeze(struct device *dev)
34172 ++{
34173 ++ dev->power.power_state = PMSG_FREEZE;
34174 ++ return pm_runtime_force_suspend(dev);
34175 ++}
34176 ++
34177 ++static int hda_codec_pm_thaw(struct device *dev)
34178 ++{
34179 ++ dev->power.power_state = PMSG_THAW;
34180 ++ return hda_codec_force_resume(dev);
34181 ++}
34182 ++
34183 ++static int hda_codec_pm_restore(struct device *dev)
34184 ++{
34185 ++ dev->power.power_state = PMSG_RESTORE;
34186 ++ return hda_codec_force_resume(dev);
34187 ++}
34188 ++#endif /* CONFIG_PM_SLEEP */
34189 ++
34190 + /* referred in hda_bind.c */
34191 + const struct dev_pm_ops hda_codec_driver_pm = {
34192 +- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
34193 +- pm_runtime_force_resume)
34194 ++#ifdef CONFIG_PM_SLEEP
34195 ++ .suspend = hda_codec_pm_suspend,
34196 ++ .resume = hda_codec_pm_resume,
34197 ++ .freeze = hda_codec_pm_freeze,
34198 ++ .thaw = hda_codec_pm_thaw,
34199 ++ .poweroff = hda_codec_pm_suspend,
34200 ++ .restore = hda_codec_pm_restore,
34201 ++#endif /* CONFIG_PM_SLEEP */
34202 + SET_RUNTIME_PM_OPS(hda_codec_runtime_suspend, hda_codec_runtime_resume,
34203 + NULL)
34204 + };
34205 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
34206 +index e5c49003e75f..2ec91085fa3e 100644
34207 +--- a/sound/pci/hda/hda_intel.c
34208 ++++ b/sound/pci/hda/hda_intel.c
34209 +@@ -947,7 +947,7 @@ static void __azx_runtime_suspend(struct azx *chip)
34210 + display_power(chip, false);
34211 + }
34212 +
34213 +-static void __azx_runtime_resume(struct azx *chip)
34214 ++static void __azx_runtime_resume(struct azx *chip, bool from_rt)
34215 + {
34216 + struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
34217 + struct hdac_bus *bus = azx_bus(chip);
34218 +@@ -964,7 +964,7 @@ static void __azx_runtime_resume(struct azx *chip)
34219 + azx_init_pci(chip);
34220 + hda_intel_init_chip(chip, true);
34221 +
34222 +- if (status) {
34223 ++ if (status && from_rt) {
34224 + list_for_each_codec(codec, &chip->bus)
34225 + if (status & (1 << codec->addr))
34226 + schedule_delayed_work(&codec->jackpoll_work,
34227 +@@ -1016,7 +1016,7 @@ static int azx_resume(struct device *dev)
34228 + chip->msi = 0;
34229 + if (azx_acquire_irq(chip, 1) < 0)
34230 + return -EIO;
34231 +- __azx_runtime_resume(chip);
34232 ++ __azx_runtime_resume(chip, false);
34233 + snd_power_change_state(card, SNDRV_CTL_POWER_D0);
34234 +
34235 + trace_azx_resume(chip);
34236 +@@ -1081,7 +1081,7 @@ static int azx_runtime_resume(struct device *dev)
34237 + chip = card->private_data;
34238 + if (!azx_has_pm_runtime(chip))
34239 + return 0;
34240 +- __azx_runtime_resume(chip);
34241 ++ __azx_runtime_resume(chip, true);
34242 +
34243 + /* disable controller Wake Up event*/
34244 + azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) &
34245 +@@ -2142,12 +2142,18 @@ static struct snd_pci_quirk power_save_blacklist[] = {
34246 + SND_PCI_QUIRK(0x8086, 0x2040, "Intel DZ77BH-55K", 0),
34247 + /* https://bugzilla.kernel.org/show_bug.cgi?id=199607 */
34248 + SND_PCI_QUIRK(0x8086, 0x2057, "Intel NUC5i7RYB", 0),
34249 ++ /* https://bugs.launchpad.net/bugs/1821663 */
34250 ++ SND_PCI_QUIRK(0x8086, 0x2064, "Intel SDP 8086:2064", 0),
34251 + /* https://bugzilla.redhat.com/show_bug.cgi?id=1520902 */
34252 + SND_PCI_QUIRK(0x8086, 0x2068, "Intel NUC7i3BNB", 0),
34253 +- /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
34254 +- SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
34255 + /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
34256 + SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
34257 ++ /* https://bugzilla.redhat.com/show_bug.cgi?id=1689623 */
34258 ++ SND_PCI_QUIRK(0x17aa, 0x367b, "Lenovo IdeaCentre B550", 0),
34259 ++ /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
34260 ++ SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
34261 ++ /* https://bugs.launchpad.net/bugs/1821663 */
34262 ++ SND_PCI_QUIRK(0x1631, 0xe017, "Packard Bell NEC IMEDIA 5204", 0),
34263 + {}
34264 + };
34265 + #endif /* CONFIG_PM */
34266 +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
34267 +index a4ee7656d9ee..fb65ad31e86c 100644
34268 +--- a/sound/pci/hda/patch_conexant.c
34269 ++++ b/sound/pci/hda/patch_conexant.c
34270 +@@ -936,6 +936,9 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
34271 + SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
34272 + SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
34273 + SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
34274 ++ SND_PCI_QUIRK(0x103c, 0x8456, "HP Z2 G4 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
34275 ++ SND_PCI_QUIRK(0x103c, 0x8457, "HP Z2 G4 mini", CXT_FIXUP_HP_MIC_NO_PRESENCE),
34276 ++ SND_PCI_QUIRK(0x103c, 0x8458, "HP Z2 G4 mini premium", CXT_FIXUP_HP_MIC_NO_PRESENCE),
34277 + SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
34278 + SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
34279 + SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
34280 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
34281 +index 1ffa36e987b4..84fae0df59e9 100644
34282 +--- a/sound/pci/hda/patch_realtek.c
34283 ++++ b/sound/pci/hda/patch_realtek.c
34284 +@@ -118,6 +118,7 @@ struct alc_spec {
34285 + unsigned int has_alc5505_dsp:1;
34286 + unsigned int no_depop_delay:1;
34287 + unsigned int done_hp_init:1;
34288 ++ unsigned int no_shutup_pins:1;
34289 +
34290 + /* for PLL fix */
34291 + hda_nid_t pll_nid;
34292 +@@ -476,6 +477,14 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on)
34293 + set_eapd(codec, *p, on);
34294 + }
34295 +
34296 ++static void alc_shutup_pins(struct hda_codec *codec)
34297 ++{
34298 ++ struct alc_spec *spec = codec->spec;
34299 ++
34300 ++ if (!spec->no_shutup_pins)
34301 ++ snd_hda_shutup_pins(codec);
34302 ++}
34303 ++
34304 + /* generic shutup callback;
34305 + * just turning off EAPD and a little pause for avoiding pop-noise
34306 + */
34307 +@@ -486,7 +495,7 @@ static void alc_eapd_shutup(struct hda_codec *codec)
34308 + alc_auto_setup_eapd(codec, false);
34309 + if (!spec->no_depop_delay)
34310 + msleep(200);
34311 +- snd_hda_shutup_pins(codec);
34312 ++ alc_shutup_pins(codec);
34313 + }
34314 +
34315 + /* generic EAPD initialization */
34316 +@@ -814,7 +823,7 @@ static inline void alc_shutup(struct hda_codec *codec)
34317 + if (spec && spec->shutup)
34318 + spec->shutup(codec);
34319 + else
34320 +- snd_hda_shutup_pins(codec);
34321 ++ alc_shutup_pins(codec);
34322 + }
34323 +
34324 + static void alc_reboot_notify(struct hda_codec *codec)
34325 +@@ -1855,8 +1864,8 @@ enum {
34326 + ALC887_FIXUP_BASS_CHMAP,
34327 + ALC1220_FIXUP_GB_DUAL_CODECS,
34328 + ALC1220_FIXUP_CLEVO_P950,
34329 +- ALC1220_FIXUP_SYSTEM76_ORYP5,
34330 +- ALC1220_FIXUP_SYSTEM76_ORYP5_PINS,
34331 ++ ALC1220_FIXUP_CLEVO_PB51ED,
34332 ++ ALC1220_FIXUP_CLEVO_PB51ED_PINS,
34333 + };
34334 +
34335 + static void alc889_fixup_coef(struct hda_codec *codec,
34336 +@@ -2061,7 +2070,7 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec,
34337 + static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
34338 + const struct hda_fixup *fix, int action);
34339 +
34340 +-static void alc1220_fixup_system76_oryp5(struct hda_codec *codec,
34341 ++static void alc1220_fixup_clevo_pb51ed(struct hda_codec *codec,
34342 + const struct hda_fixup *fix,
34343 + int action)
34344 + {
34345 +@@ -2313,18 +2322,18 @@ static const struct hda_fixup alc882_fixups[] = {
34346 + .type = HDA_FIXUP_FUNC,
34347 + .v.func = alc1220_fixup_clevo_p950,
34348 + },
34349 +- [ALC1220_FIXUP_SYSTEM76_ORYP5] = {
34350 ++ [ALC1220_FIXUP_CLEVO_PB51ED] = {
34351 + .type = HDA_FIXUP_FUNC,
34352 +- .v.func = alc1220_fixup_system76_oryp5,
34353 ++ .v.func = alc1220_fixup_clevo_pb51ed,
34354 + },
34355 +- [ALC1220_FIXUP_SYSTEM76_ORYP5_PINS] = {
34356 ++ [ALC1220_FIXUP_CLEVO_PB51ED_PINS] = {
34357 + .type = HDA_FIXUP_PINS,
34358 + .v.pins = (const struct hda_pintbl[]) {
34359 + { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
34360 + {}
34361 + },
34362 + .chained = true,
34363 +- .chain_id = ALC1220_FIXUP_SYSTEM76_ORYP5,
34364 ++ .chain_id = ALC1220_FIXUP_CLEVO_PB51ED,
34365 + },
34366 + };
34367 +
34368 +@@ -2402,8 +2411,9 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
34369 + SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
34370 + SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
34371 + SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
34372 +- SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
34373 +- SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
34374 ++ SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
34375 ++ SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
34376 ++ SND_PCI_QUIRK(0x1558, 0x65d1, "Tuxedo Book XC1509", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
34377 + SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
34378 + SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
34379 + SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
34380 +@@ -2950,7 +2960,7 @@ static void alc269_shutup(struct hda_codec *codec)
34381 + (alc_get_coef0(codec) & 0x00ff) == 0x018) {
34382 + msleep(150);
34383 + }
34384 +- snd_hda_shutup_pins(codec);
34385 ++ alc_shutup_pins(codec);
34386 + }
34387 +
34388 + static struct coef_fw alc282_coefs[] = {
34389 +@@ -3053,14 +3063,15 @@ static void alc282_shutup(struct hda_codec *codec)
34390 + if (hp_pin_sense)
34391 + msleep(85);
34392 +
34393 +- snd_hda_codec_write(codec, hp_pin, 0,
34394 +- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
34395 ++ if (!spec->no_shutup_pins)
34396 ++ snd_hda_codec_write(codec, hp_pin, 0,
34397 ++ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
34398 +
34399 + if (hp_pin_sense)
34400 + msleep(100);
34401 +
34402 + alc_auto_setup_eapd(codec, false);
34403 +- snd_hda_shutup_pins(codec);
34404 ++ alc_shutup_pins(codec);
34405 + alc_write_coef_idx(codec, 0x78, coef78);
34406 + }
34407 +
34408 +@@ -3166,15 +3177,16 @@ static void alc283_shutup(struct hda_codec *codec)
34409 + if (hp_pin_sense)
34410 + msleep(100);
34411 +
34412 +- snd_hda_codec_write(codec, hp_pin, 0,
34413 +- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
34414 ++ if (!spec->no_shutup_pins)
34415 ++ snd_hda_codec_write(codec, hp_pin, 0,
34416 ++ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
34417 +
34418 + alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
34419 +
34420 + if (hp_pin_sense)
34421 + msleep(100);
34422 + alc_auto_setup_eapd(codec, false);
34423 +- snd_hda_shutup_pins(codec);
34424 ++ alc_shutup_pins(codec);
34425 + alc_write_coef_idx(codec, 0x43, 0x9614);
34426 + }
34427 +
34428 +@@ -3240,14 +3252,15 @@ static void alc256_shutup(struct hda_codec *codec)
34429 + /* NOTE: call this before clearing the pin, otherwise codec stalls */
34430 + alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
34431 +
34432 +- snd_hda_codec_write(codec, hp_pin, 0,
34433 +- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
34434 ++ if (!spec->no_shutup_pins)
34435 ++ snd_hda_codec_write(codec, hp_pin, 0,
34436 ++ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
34437 +
34438 + if (hp_pin_sense)
34439 + msleep(100);
34440 +
34441 + alc_auto_setup_eapd(codec, false);
34442 +- snd_hda_shutup_pins(codec);
34443 ++ alc_shutup_pins(codec);
34444 + }
34445 +
34446 + static void alc225_init(struct hda_codec *codec)
34447 +@@ -3334,7 +3347,7 @@ static void alc225_shutup(struct hda_codec *codec)
34448 + msleep(100);
34449 +
34450 + alc_auto_setup_eapd(codec, false);
34451 +- snd_hda_shutup_pins(codec);
34452 ++ alc_shutup_pins(codec);
34453 + }
34454 +
34455 + static void alc_default_init(struct hda_codec *codec)
34456 +@@ -3388,14 +3401,15 @@ static void alc_default_shutup(struct hda_codec *codec)
34457 + if (hp_pin_sense)
34458 + msleep(85);
34459 +
34460 +- snd_hda_codec_write(codec, hp_pin, 0,
34461 +- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
34462 ++ if (!spec->no_shutup_pins)
34463 ++ snd_hda_codec_write(codec, hp_pin, 0,
34464 ++ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
34465 +
34466 + if (hp_pin_sense)
34467 + msleep(100);
34468 +
34469 + alc_auto_setup_eapd(codec, false);
34470 +- snd_hda_shutup_pins(codec);
34471 ++ alc_shutup_pins(codec);
34472 + }
34473 +
34474 + static void alc294_hp_init(struct hda_codec *codec)
34475 +@@ -3412,8 +3426,9 @@ static void alc294_hp_init(struct hda_codec *codec)
34476 +
34477 + msleep(100);
34478 +
34479 +- snd_hda_codec_write(codec, hp_pin, 0,
34480 +- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
34481 ++ if (!spec->no_shutup_pins)
34482 ++ snd_hda_codec_write(codec, hp_pin, 0,
34483 ++ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
34484 +
34485 + alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
34486 + alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
34487 +@@ -5007,16 +5022,12 @@ static void alc_fixup_auto_mute_via_amp(struct hda_codec *codec,
34488 + }
34489 + }
34490 +
34491 +-static void alc_no_shutup(struct hda_codec *codec)
34492 +-{
34493 +-}
34494 +-
34495 + static void alc_fixup_no_shutup(struct hda_codec *codec,
34496 + const struct hda_fixup *fix, int action)
34497 + {
34498 + if (action == HDA_FIXUP_ACT_PRE_PROBE) {
34499 + struct alc_spec *spec = codec->spec;
34500 +- spec->shutup = alc_no_shutup;
34501 ++ spec->no_shutup_pins = 1;
34502 + }
34503 + }
34504 +
34505 +@@ -5479,7 +5490,7 @@ static void alc_headset_btn_callback(struct hda_codec *codec,
34506 + jack->jack->button_state = report;
34507 + }
34508 +
34509 +-static void alc_fixup_headset_jack(struct hda_codec *codec,
34510 ++static void alc295_fixup_chromebook(struct hda_codec *codec,
34511 + const struct hda_fixup *fix, int action)
34512 + {
34513 +
34514 +@@ -5489,6 +5500,16 @@ static void alc_fixup_headset_jack(struct hda_codec *codec,
34515 + alc_headset_btn_callback);
34516 + snd_hda_jack_add_kctl(codec, 0x55, "Headset Jack", false,
34517 + SND_JACK_HEADSET, alc_headset_btn_keymap);
34518 ++ switch (codec->core.vendor_id) {
34519 ++ case 0x10ec0295:
34520 ++ alc_update_coef_idx(codec, 0x4a, 0x8000, 1 << 15); /* Reset HP JD */
34521 ++ alc_update_coef_idx(codec, 0x4a, 0x8000, 0 << 15);
34522 ++ break;
34523 ++ case 0x10ec0236:
34524 ++ alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */
34525 ++ alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15);
34526 ++ break;
34527 ++ }
34528 + break;
34529 + case HDA_FIXUP_ACT_INIT:
34530 + switch (codec->core.vendor_id) {
34531 +@@ -5641,6 +5662,7 @@ enum {
34532 + ALC233_FIXUP_ASUS_MIC_NO_PRESENCE,
34533 + ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE,
34534 + ALC233_FIXUP_LENOVO_MULTI_CODECS,
34535 ++ ALC233_FIXUP_ACER_HEADSET_MIC,
34536 + ALC294_FIXUP_LENOVO_MIC_LOCATION,
34537 + ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE,
34538 + ALC700_FIXUP_INTEL_REFERENCE,
34539 +@@ -5658,9 +5680,16 @@ enum {
34540 + ALC294_FIXUP_ASUS_MIC,
34541 + ALC294_FIXUP_ASUS_HEADSET_MIC,
34542 + ALC294_FIXUP_ASUS_SPK,
34543 +- ALC225_FIXUP_HEADSET_JACK,
34544 + ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
34545 + ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
34546 ++ ALC255_FIXUP_ACER_HEADSET_MIC,
34547 ++ ALC295_FIXUP_CHROME_BOOK,
34548 ++ ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE,
34549 ++ ALC225_FIXUP_WYSE_AUTO_MUTE,
34550 ++ ALC225_FIXUP_WYSE_DISABLE_MIC_VREF,
34551 ++ ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
34552 ++ ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
34553 ++ ALC299_FIXUP_PREDATOR_SPK,
34554 + };
34555 +
34556 + static const struct hda_fixup alc269_fixups[] = {
34557 +@@ -6461,6 +6490,16 @@ static const struct hda_fixup alc269_fixups[] = {
34558 + .type = HDA_FIXUP_FUNC,
34559 + .v.func = alc233_alc662_fixup_lenovo_dual_codecs,
34560 + },
34561 ++ [ALC233_FIXUP_ACER_HEADSET_MIC] = {
34562 ++ .type = HDA_FIXUP_VERBS,
34563 ++ .v.verbs = (const struct hda_verb[]) {
34564 ++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 },
34565 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 },
34566 ++ { }
34567 ++ },
34568 ++ .chained = true,
34569 ++ .chain_id = ALC233_FIXUP_ASUS_MIC_NO_PRESENCE
34570 ++ },
34571 + [ALC294_FIXUP_LENOVO_MIC_LOCATION] = {
34572 + .type = HDA_FIXUP_PINS,
34573 + .v.pins = (const struct hda_pintbl[]) {
34574 +@@ -6603,9 +6642,9 @@ static const struct hda_fixup alc269_fixups[] = {
34575 + .chained = true,
34576 + .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
34577 + },
34578 +- [ALC225_FIXUP_HEADSET_JACK] = {
34579 ++ [ALC295_FIXUP_CHROME_BOOK] = {
34580 + .type = HDA_FIXUP_FUNC,
34581 +- .v.func = alc_fixup_headset_jack,
34582 ++ .v.func = alc295_fixup_chromebook,
34583 + },
34584 + [ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE] = {
34585 + .type = HDA_FIXUP_PINS,
34586 +@@ -6627,6 +6666,64 @@ static const struct hda_fixup alc269_fixups[] = {
34587 + .chained = true,
34588 + .chain_id = ALC285_FIXUP_LENOVO_HEADPHONE_NOISE
34589 + },
34590 ++ [ALC255_FIXUP_ACER_HEADSET_MIC] = {
34591 ++ .type = HDA_FIXUP_PINS,
34592 ++ .v.pins = (const struct hda_pintbl[]) {
34593 ++ { 0x19, 0x03a11130 },
34594 ++ { 0x1a, 0x90a60140 }, /* use as internal mic */
34595 ++ { }
34596 ++ },
34597 ++ .chained = true,
34598 ++ .chain_id = ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC
34599 ++ },
34600 ++ [ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE] = {
34601 ++ .type = HDA_FIXUP_PINS,
34602 ++ .v.pins = (const struct hda_pintbl[]) {
34603 ++ { 0x16, 0x01011020 }, /* Rear Line out */
34604 ++ { 0x19, 0x01a1913c }, /* use as Front headset mic, without its own jack detect */
34605 ++ { }
34606 ++ },
34607 ++ .chained = true,
34608 ++ .chain_id = ALC225_FIXUP_WYSE_AUTO_MUTE
34609 ++ },
34610 ++ [ALC225_FIXUP_WYSE_AUTO_MUTE] = {
34611 ++ .type = HDA_FIXUP_FUNC,
34612 ++ .v.func = alc_fixup_auto_mute_via_amp,
34613 ++ .chained = true,
34614 ++ .chain_id = ALC225_FIXUP_WYSE_DISABLE_MIC_VREF
34615 ++ },
34616 ++ [ALC225_FIXUP_WYSE_DISABLE_MIC_VREF] = {
34617 ++ .type = HDA_FIXUP_FUNC,
34618 ++ .v.func = alc_fixup_disable_mic_vref,
34619 ++ .chained = true,
34620 ++ .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
34621 ++ },
34622 ++ [ALC286_FIXUP_ACER_AIO_HEADSET_MIC] = {
34623 ++ .type = HDA_FIXUP_VERBS,
34624 ++ .v.verbs = (const struct hda_verb[]) {
34625 ++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x4f },
34626 ++ { 0x20, AC_VERB_SET_PROC_COEF, 0x5029 },
34627 ++ { }
34628 ++ },
34629 ++ .chained = true,
34630 ++ .chain_id = ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE
34631 ++ },
34632 ++ [ALC256_FIXUP_ASUS_MIC_NO_PRESENCE] = {
34633 ++ .type = HDA_FIXUP_PINS,
34634 ++ .v.pins = (const struct hda_pintbl[]) {
34635 ++ { 0x19, 0x04a11120 }, /* use as headset mic, without its own jack detect */
34636 ++ { }
34637 ++ },
34638 ++ .chained = true,
34639 ++ .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
34640 ++ },
34641 ++ [ALC299_FIXUP_PREDATOR_SPK] = {
34642 ++ .type = HDA_FIXUP_PINS,
34643 ++ .v.pins = (const struct hda_pintbl[]) {
34644 ++ { 0x21, 0x90170150 }, /* use as headset mic, without its own jack detect */
34645 ++ { }
34646 ++ }
34647 ++ },
34648 + };
34649 +
34650 + static const struct snd_pci_quirk alc269_fixup_tbl[] = {
34651 +@@ -6643,9 +6740,15 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
34652 + SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
34653 + SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
34654 + SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
34655 +- SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
34656 +- SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
34657 +- SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
34658 ++ SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
34659 ++ SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
34660 ++ SND_PCI_QUIRK(0x1025, 0x1246, "Acer Predator Helios 500", ALC299_FIXUP_PREDATOR_SPK),
34661 ++ SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
34662 ++ SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
34663 ++ SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
34664 ++ SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
34665 ++ SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC),
34666 ++ SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
34667 + SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
34668 + SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
34669 + SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X),
34670 +@@ -6677,6 +6780,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
34671 + SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
34672 + SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
34673 + SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
34674 ++ SND_PCI_QUIRK(0x1028, 0x0738, "Dell Precision 5820", ALC269_FIXUP_NO_SHUTUP),
34675 + SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
34676 + SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
34677 + SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
34678 +@@ -6689,6 +6793,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
34679 + SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
34680 + SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
34681 + SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB),
34682 ++ SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE),
34683 ++ SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE),
34684 + SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
34685 + SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
34686 + SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
34687 +@@ -6751,11 +6857,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
34688 + SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
34689 + SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
34690 + SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
34691 ++ SND_PCI_QUIRK(0x103c, 0x802e, "HP Z240 SFF", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
34692 ++ SND_PCI_QUIRK(0x103c, 0x802f, "HP Z240", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
34693 + SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
34694 + SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
34695 + SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
34696 +- SND_PCI_QUIRK(0x103c, 0x82bf, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
34697 +- SND_PCI_QUIRK(0x103c, 0x82c0, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
34698 ++ SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
34699 ++ SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
34700 + SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
34701 + SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
34702 + SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
34703 +@@ -6771,7 +6879,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
34704 + SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
34705 + SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
34706 + SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
34707 +- SND_PCI_QUIRK(0x1043, 0x14a1, "ASUS UX533FD", ALC294_FIXUP_ASUS_SPK),
34708 + SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
34709 + SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
34710 + SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
34711 +@@ -7036,7 +7143,8 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
34712 + {.id = ALC255_FIXUP_DUMMY_LINEOUT_VERB, .name = "alc255-dummy-lineout"},
34713 + {.id = ALC255_FIXUP_DELL_HEADSET_MIC, .name = "alc255-dell-headset"},
34714 + {.id = ALC295_FIXUP_HP_X360, .name = "alc295-hp-x360"},
34715 +- {.id = ALC225_FIXUP_HEADSET_JACK, .name = "alc-sense-combo"},
34716 ++ {.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-sense-combo"},
34717 ++ {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"},
34718 + {}
34719 + };
34720 + #define ALC225_STANDARD_PINS \
34721 +@@ -7257,6 +7365,18 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
34722 + {0x14, 0x90170110},
34723 + {0x1b, 0x90a70130},
34724 + {0x21, 0x03211020}),
34725 ++ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
34726 ++ {0x12, 0x90a60130},
34727 ++ {0x14, 0x90170110},
34728 ++ {0x21, 0x03211020}),
34729 ++ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
34730 ++ {0x12, 0x90a60130},
34731 ++ {0x14, 0x90170110},
34732 ++ {0x21, 0x04211020}),
34733 ++ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
34734 ++ {0x1a, 0x90a70130},
34735 ++ {0x1b, 0x90170110},
34736 ++ {0x21, 0x03211020}),
34737 + SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
34738 + {0x12, 0xb7a60130},
34739 + {0x13, 0xb8a61140},
34740 +@@ -7388,6 +7508,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
34741 + {0x14, 0x90170110},
34742 + {0x1b, 0x90a70130},
34743 + {0x21, 0x04211020}),
34744 ++ SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK,
34745 ++ {0x12, 0x90a60130},
34746 ++ {0x17, 0x90170110},
34747 ++ {0x21, 0x03211020}),
34748 + SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK,
34749 + {0x12, 0x90a60130},
34750 + {0x17, 0x90170110},
34751 +diff --git a/sound/soc/codecs/pcm186x.c b/sound/soc/codecs/pcm186x.c
34752 +index 809b7e9f03ca..c5fcc632f670 100644
34753 +--- a/sound/soc/codecs/pcm186x.c
34754 ++++ b/sound/soc/codecs/pcm186x.c
34755 +@@ -42,7 +42,7 @@ struct pcm186x_priv {
34756 + bool is_master_mode;
34757 + };
34758 +
34759 +-static const DECLARE_TLV_DB_SCALE(pcm186x_pga_tlv, -1200, 4000, 50);
34760 ++static const DECLARE_TLV_DB_SCALE(pcm186x_pga_tlv, -1200, 50, 0);
34761 +
34762 + static const struct snd_kcontrol_new pcm1863_snd_controls[] = {
34763 + SOC_DOUBLE_R_S_TLV("ADC Capture Volume", PCM186X_PGA_VAL_CH1_L,
34764 +@@ -158,7 +158,7 @@ static const struct snd_soc_dapm_widget pcm1863_dapm_widgets[] = {
34765 + * Put the codec into SLEEP mode when not in use, allowing the
34766 + * Energysense mechanism to operate.
34767 + */
34768 +- SND_SOC_DAPM_ADC("ADC", "HiFi Capture", PCM186X_POWER_CTRL, 1, 0),
34769 ++ SND_SOC_DAPM_ADC("ADC", "HiFi Capture", PCM186X_POWER_CTRL, 1, 1),
34770 + };
34771 +
34772 + static const struct snd_soc_dapm_widget pcm1865_dapm_widgets[] = {
34773 +@@ -184,8 +184,8 @@ static const struct snd_soc_dapm_widget pcm1865_dapm_widgets[] = {
34774 + * Put the codec into SLEEP mode when not in use, allowing the
34775 + * Energysense mechanism to operate.
34776 + */
34777 +- SND_SOC_DAPM_ADC("ADC1", "HiFi Capture 1", PCM186X_POWER_CTRL, 1, 0),
34778 +- SND_SOC_DAPM_ADC("ADC2", "HiFi Capture 2", PCM186X_POWER_CTRL, 1, 0),
34779 ++ SND_SOC_DAPM_ADC("ADC1", "HiFi Capture 1", PCM186X_POWER_CTRL, 1, 1),
34780 ++ SND_SOC_DAPM_ADC("ADC2", "HiFi Capture 2", PCM186X_POWER_CTRL, 1, 1),
34781 + };
34782 +
34783 + static const struct snd_soc_dapm_route pcm1863_dapm_routes[] = {
34784 +diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
34785 +index 81f2fe2c6d23..60f87a0d99f4 100644
34786 +--- a/sound/soc/fsl/fsl-asoc-card.c
34787 ++++ b/sound/soc/fsl/fsl-asoc-card.c
34788 +@@ -689,6 +689,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
34789 + asrc_fail:
34790 + of_node_put(asrc_np);
34791 + of_node_put(codec_np);
34792 ++ put_device(&cpu_pdev->dev);
34793 + fail:
34794 + of_node_put(cpu_np);
34795 +
34796 +diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
34797 +index 57b484768a58..3623aa9a6f2e 100644
34798 +--- a/sound/soc/fsl/fsl_esai.c
34799 ++++ b/sound/soc/fsl/fsl_esai.c
34800 +@@ -54,6 +54,8 @@ struct fsl_esai {
34801 + u32 fifo_depth;
34802 + u32 slot_width;
34803 + u32 slots;
34804 ++ u32 tx_mask;
34805 ++ u32 rx_mask;
34806 + u32 hck_rate[2];
34807 + u32 sck_rate[2];
34808 + bool hck_dir[2];
34809 +@@ -361,21 +363,13 @@ static int fsl_esai_set_dai_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask,
34810 + regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR,
34811 + ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
34812 +
34813 +- regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMA,
34814 +- ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(tx_mask));
34815 +- regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMB,
34816 +- ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(tx_mask));
34817 +-
34818 + regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR,
34819 + ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
34820 +
34821 +- regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMA,
34822 +- ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(rx_mask));
34823 +- regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMB,
34824 +- ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(rx_mask));
34825 +-
34826 + esai_priv->slot_width = slot_width;
34827 + esai_priv->slots = slots;
34828 ++ esai_priv->tx_mask = tx_mask;
34829 ++ esai_priv->rx_mask = rx_mask;
34830 +
34831 + return 0;
34832 + }
34833 +@@ -398,7 +392,8 @@ static int fsl_esai_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
34834 + break;
34835 + case SND_SOC_DAIFMT_RIGHT_J:
34836 + /* Data on rising edge of bclk, frame high, right aligned */
34837 +- xccr |= ESAI_xCCR_xCKP | ESAI_xCCR_xHCKP | ESAI_xCR_xWA;
34838 ++ xccr |= ESAI_xCCR_xCKP | ESAI_xCCR_xHCKP;
34839 ++ xcr |= ESAI_xCR_xWA;
34840 + break;
34841 + case SND_SOC_DAIFMT_DSP_A:
34842 + /* Data on rising edge of bclk, frame high, 1clk before data */
34843 +@@ -455,12 +450,12 @@ static int fsl_esai_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
34844 + return -EINVAL;
34845 + }
34846 +
34847 +- mask = ESAI_xCR_xFSL | ESAI_xCR_xFSR;
34848 ++ mask = ESAI_xCR_xFSL | ESAI_xCR_xFSR | ESAI_xCR_xWA;
34849 + regmap_update_bits(esai_priv->regmap, REG_ESAI_TCR, mask, xcr);
34850 + regmap_update_bits(esai_priv->regmap, REG_ESAI_RCR, mask, xcr);
34851 +
34852 + mask = ESAI_xCCR_xCKP | ESAI_xCCR_xHCKP | ESAI_xCCR_xFSP |
34853 +- ESAI_xCCR_xFSD | ESAI_xCCR_xCKD | ESAI_xCR_xWA;
34854 ++ ESAI_xCCR_xFSD | ESAI_xCCR_xCKD;
34855 + regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR, mask, xccr);
34856 + regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR, mask, xccr);
34857 +
34858 +@@ -595,6 +590,7 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
34859 + bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
34860 + u8 i, channels = substream->runtime->channels;
34861 + u32 pins = DIV_ROUND_UP(channels, esai_priv->slots);
34862 ++ u32 mask;
34863 +
34864 + switch (cmd) {
34865 + case SNDRV_PCM_TRIGGER_START:
34866 +@@ -607,15 +603,38 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
34867 + for (i = 0; tx && i < channels; i++)
34868 + regmap_write(esai_priv->regmap, REG_ESAI_ETDR, 0x0);
34869 +
34870 ++ /*
34871 ++ * When set the TE/RE in the end of enablement flow, there
34872 ++ * will be channel swap issue for multi data line case.
34873 ++ * In order to workaround this issue, we switch the bit
34874 ++ * enablement sequence to below sequence
34875 ++ * 1) clear the xSMB & xSMA: which is done in probe and
34876 ++ * stop state.
34877 ++ * 2) set TE/RE
34878 ++ * 3) set xSMB
34879 ++ * 4) set xSMA: xSMA is the last one in this flow, which
34880 ++ * will trigger esai to start.
34881 ++ */
34882 + regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx),
34883 + tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK,
34884 + tx ? ESAI_xCR_TE(pins) : ESAI_xCR_RE(pins));
34885 ++ mask = tx ? esai_priv->tx_mask : esai_priv->rx_mask;
34886 ++
34887 ++ regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx),
34888 ++ ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(mask));
34889 ++ regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx),
34890 ++ ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(mask));
34891 ++
34892 + break;
34893 + case SNDRV_PCM_TRIGGER_SUSPEND:
34894 + case SNDRV_PCM_TRIGGER_STOP:
34895 + case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
34896 + regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx),
34897 + tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK, 0);
34898 ++ regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx),
34899 ++ ESAI_xSMA_xS_MASK, 0);
34900 ++ regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx),
34901 ++ ESAI_xSMB_xS_MASK, 0);
34902 +
34903 + /* Disable and reset FIFO */
34904 + regmap_update_bits(esai_priv->regmap, REG_ESAI_xFCR(tx),
34905 +@@ -905,6 +924,15 @@ static int fsl_esai_probe(struct platform_device *pdev)
34906 + return ret;
34907 + }
34908 +
34909 ++ esai_priv->tx_mask = 0xFFFFFFFF;
34910 ++ esai_priv->rx_mask = 0xFFFFFFFF;
34911 ++
34912 ++ /* Clear the TSMA, TSMB, RSMA, RSMB */
34913 ++ regmap_write(esai_priv->regmap, REG_ESAI_TSMA, 0);
34914 ++ regmap_write(esai_priv->regmap, REG_ESAI_TSMB, 0);
34915 ++ regmap_write(esai_priv->regmap, REG_ESAI_RSMA, 0);
34916 ++ regmap_write(esai_priv->regmap, REG_ESAI_RSMB, 0);
34917 ++
34918 + ret = devm_snd_soc_register_component(&pdev->dev, &fsl_esai_component,
34919 + &fsl_esai_dai, 1);
34920 + if (ret) {
34921 +diff --git a/sound/soc/fsl/imx-sgtl5000.c b/sound/soc/fsl/imx-sgtl5000.c
34922 +index c29200cf755a..9b9a7ec52905 100644
34923 +--- a/sound/soc/fsl/imx-sgtl5000.c
34924 ++++ b/sound/soc/fsl/imx-sgtl5000.c
34925 +@@ -108,6 +108,7 @@ static int imx_sgtl5000_probe(struct platform_device *pdev)
34926 + ret = -EPROBE_DEFER;
34927 + goto fail;
34928 + }
34929 ++ put_device(&ssi_pdev->dev);
34930 + codec_dev = of_find_i2c_device_by_node(codec_np);
34931 + if (!codec_dev) {
34932 + dev_err(&pdev->dev, "failed to find codec platform device\n");
34933 +diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
34934 +index b807a47515eb..336895f7fd1e 100644
34935 +--- a/sound/soc/generic/simple-card-utils.c
34936 ++++ b/sound/soc/generic/simple-card-utils.c
34937 +@@ -283,12 +283,20 @@ static int asoc_simple_card_get_dai_id(struct device_node *ep)
34938 + /* use endpoint/port reg if exist */
34939 + ret = of_graph_parse_endpoint(ep, &info);
34940 + if (ret == 0) {
34941 +- if (info.id)
34942 ++ /*
34943 ++ * Because it will count port/endpoint if it doesn't have "reg".
34944 ++ * But, we can't judge whether it has "no reg", or "reg = <0>"
34945 ++ * only of_graph_parse_endpoint().
34946 ++ * We need to check "reg" property
34947 ++ */
34948 ++ if (of_get_property(ep, "reg", NULL))
34949 + return info.id;
34950 +- if (info.port)
34951 ++
34952 ++ node = of_get_parent(ep);
34953 ++ of_node_put(node);
34954 ++ if (of_get_property(node, "reg", NULL))
34955 + return info.port;
34956 + }
34957 +-
34958 + node = of_graph_get_port_parent(ep);
34959 +
34960 + /*
34961 +diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
34962 +index 91a2436ce952..e9623da911d5 100644
34963 +--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
34964 ++++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
34965 +@@ -711,9 +711,17 @@ static int sst_soc_probe(struct snd_soc_component *component)
34966 + return sst_dsp_init_v2_dpcm(component);
34967 + }
34968 +
34969 ++static void sst_soc_remove(struct snd_soc_component *component)
34970 ++{
34971 ++ struct sst_data *drv = dev_get_drvdata(component->dev);
34972 ++
34973 ++ drv->soc_card = NULL;
34974 ++}
34975 ++
34976 + static const struct snd_soc_component_driver sst_soc_platform_drv = {
34977 + .name = DRV_NAME,
34978 + .probe = sst_soc_probe,
34979 ++ .remove = sst_soc_remove,
34980 + .ops = &sst_platform_ops,
34981 + .compr_ops = &sst_platform_compr_ops,
34982 + .pcm_new = sst_pcm_new,
34983 +diff --git a/sound/soc/qcom/common.c b/sound/soc/qcom/common.c
34984 +index 4715527054e5..5661025e8cec 100644
34985 +--- a/sound/soc/qcom/common.c
34986 ++++ b/sound/soc/qcom/common.c
34987 +@@ -42,6 +42,9 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
34988 + link = card->dai_link;
34989 + for_each_child_of_node(dev->of_node, np) {
34990 + cpu = of_get_child_by_name(np, "cpu");
34991 ++ platform = of_get_child_by_name(np, "platform");
34992 ++ codec = of_get_child_by_name(np, "codec");
34993 ++
34994 + if (!cpu) {
34995 + dev_err(dev, "Can't find cpu DT node\n");
34996 + ret = -EINVAL;
34997 +@@ -63,8 +66,6 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
34998 + goto err;
34999 + }
35000 +
35001 +- platform = of_get_child_by_name(np, "platform");
35002 +- codec = of_get_child_by_name(np, "codec");
35003 + if (codec && platform) {
35004 + link->platform_of_node = of_parse_phandle(platform,
35005 + "sound-dai",
35006 +@@ -100,10 +101,15 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
35007 + link->dpcm_capture = 1;
35008 + link->stream_name = link->name;
35009 + link++;
35010 ++
35011 ++ of_node_put(cpu);
35012 ++ of_node_put(codec);
35013 ++ of_node_put(platform);
35014 + }
35015 +
35016 + return 0;
35017 + err:
35018 ++ of_node_put(np);
35019 + of_node_put(cpu);
35020 + of_node_put(codec);
35021 + of_node_put(platform);
35022 +diff --git a/sound/xen/xen_snd_front_alsa.c b/sound/xen/xen_snd_front_alsa.c
35023 +index a7f413cb704d..b14ab512c2ce 100644
35024 +--- a/sound/xen/xen_snd_front_alsa.c
35025 ++++ b/sound/xen/xen_snd_front_alsa.c
35026 +@@ -441,7 +441,7 @@ static int shbuf_setup_backstore(struct xen_snd_front_pcm_stream_info *stream,
35027 + {
35028 + int i;
35029 +
35030 +- stream->buffer = alloc_pages_exact(stream->buffer_sz, GFP_KERNEL);
35031 ++ stream->buffer = alloc_pages_exact(buffer_sz, GFP_KERNEL);
35032 + if (!stream->buffer)
35033 + return -ENOMEM;
35034 +
35035 +diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
35036 +index 5467c6bf9ceb..bb9dca65eb5f 100644
35037 +--- a/tools/build/Makefile.feature
35038 ++++ b/tools/build/Makefile.feature
35039 +@@ -70,7 +70,6 @@ FEATURE_TESTS_BASIC := \
35040 + sched_getcpu \
35041 + sdt \
35042 + setns \
35043 +- libopencsd \
35044 + libaio
35045 +
35046 + # FEATURE_TESTS_BASIC + FEATURE_TESTS_EXTRA is the complete list
35047 +@@ -84,6 +83,7 @@ FEATURE_TESTS_EXTRA := \
35048 + libbabeltrace \
35049 + libbfd-liberty \
35050 + libbfd-liberty-z \
35051 ++ libopencsd \
35052 + libunwind-debug-frame \
35053 + libunwind-debug-frame-arm \
35054 + libunwind-debug-frame-aarch64 \
35055 +diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
35056 +index 20cdaa4fc112..e903b86b742f 100644
35057 +--- a/tools/build/feature/test-all.c
35058 ++++ b/tools/build/feature/test-all.c
35059 +@@ -170,14 +170,14 @@
35060 + # include "test-setns.c"
35061 + #undef main
35062 +
35063 +-#define main main_test_libopencsd
35064 +-# include "test-libopencsd.c"
35065 +-#undef main
35066 +-
35067 + #define main main_test_libaio
35068 + # include "test-libaio.c"
35069 + #undef main
35070 +
35071 ++#define main main_test_reallocarray
35072 ++# include "test-reallocarray.c"
35073 ++#undef main
35074 ++
35075 + int main(int argc, char *argv[])
35076 + {
35077 + main_test_libpython();
35078 +@@ -217,8 +217,8 @@ int main(int argc, char *argv[])
35079 + main_test_sched_getcpu();
35080 + main_test_sdt();
35081 + main_test_setns();
35082 +- main_test_libopencsd();
35083 + main_test_libaio();
35084 ++ main_test_reallocarray();
35085 +
35086 + return 0;
35087 + }
35088 +diff --git a/tools/build/feature/test-reallocarray.c b/tools/build/feature/test-reallocarray.c
35089 +index 8170de35150d..8f6743e31da7 100644
35090 +--- a/tools/build/feature/test-reallocarray.c
35091 ++++ b/tools/build/feature/test-reallocarray.c
35092 +@@ -6,3 +6,5 @@ int main(void)
35093 + {
35094 + return !!reallocarray(NULL, 1, 1);
35095 + }
35096 ++
35097 ++#undef _GNU_SOURCE
35098 +diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
35099 +index 34d9c3619c96..78fd86b85087 100644
35100 +--- a/tools/lib/bpf/Makefile
35101 ++++ b/tools/lib/bpf/Makefile
35102 +@@ -162,7 +162,8 @@ endif
35103 +
35104 + TARGETS = $(CMD_TARGETS)
35105 +
35106 +-all: fixdep all_cmd
35107 ++all: fixdep
35108 ++ $(Q)$(MAKE) all_cmd
35109 +
35110 + all_cmd: $(CMD_TARGETS) check
35111 +
35112 +diff --git a/tools/lib/lockdep/run_tests.sh b/tools/lib/lockdep/run_tests.sh
35113 +index c8fbd0306960..11f425662b43 100755
35114 +--- a/tools/lib/lockdep/run_tests.sh
35115 ++++ b/tools/lib/lockdep/run_tests.sh
35116 +@@ -11,7 +11,7 @@ find tests -name '*.c' | sort | while read -r i; do
35117 + testname=$(basename "$i" .c)
35118 + echo -ne "$testname... "
35119 + if gcc -o "tests/$testname" -pthread "$i" liblockdep.a -Iinclude -D__USE_LIBLOCKDEP &&
35120 +- timeout 1 "tests/$testname" 2>&1 | "tests/${testname}.sh"; then
35121 ++ timeout 1 "tests/$testname" 2>&1 | /bin/bash "tests/${testname}.sh"; then
35122 + echo "PASSED!"
35123 + else
35124 + echo "FAILED!"
35125 +@@ -24,7 +24,7 @@ find tests -name '*.c' | sort | while read -r i; do
35126 + echo -ne "(PRELOAD) $testname... "
35127 + if gcc -o "tests/$testname" -pthread -Iinclude "$i" &&
35128 + timeout 1 ./lockdep "tests/$testname" 2>&1 |
35129 +- "tests/${testname}.sh"; then
35130 ++ /bin/bash "tests/${testname}.sh"; then
35131 + echo "PASSED!"
35132 + else
35133 + echo "FAILED!"
35134 +@@ -37,7 +37,7 @@ find tests -name '*.c' | sort | while read -r i; do
35135 + echo -ne "(PRELOAD + Valgrind) $testname... "
35136 + if gcc -o "tests/$testname" -pthread -Iinclude "$i" &&
35137 + { timeout 10 valgrind --read-var-info=yes ./lockdep "./tests/$testname" >& "tests/${testname}.vg.out"; true; } &&
35138 +- "tests/${testname}.sh" < "tests/${testname}.vg.out" &&
35139 ++ /bin/bash "tests/${testname}.sh" < "tests/${testname}.vg.out" &&
35140 + ! grep -Eq '(^==[0-9]*== (Invalid |Uninitialised ))|Mismatched free|Source and destination overlap| UME ' "tests/${testname}.vg.out"; then
35141 + echo "PASSED!"
35142 + else
35143 +diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
35144 +index abd4fa5d3088..87494c7c619d 100644
35145 +--- a/tools/lib/traceevent/event-parse.c
35146 ++++ b/tools/lib/traceevent/event-parse.c
35147 +@@ -2457,7 +2457,7 @@ static int arg_num_eval(struct tep_print_arg *arg, long long *val)
35148 + static char *arg_eval (struct tep_print_arg *arg)
35149 + {
35150 + long long val;
35151 +- static char buf[20];
35152 ++ static char buf[24];
35153 +
35154 + switch (arg->type) {
35155 + case TEP_PRINT_ATOM:
35156 +diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
35157 +index c9d038f91af6..53f8be0f4a1f 100644
35158 +--- a/tools/objtool/Makefile
35159 ++++ b/tools/objtool/Makefile
35160 +@@ -25,14 +25,17 @@ LIBSUBCMD = $(LIBSUBCMD_OUTPUT)libsubcmd.a
35161 + OBJTOOL := $(OUTPUT)objtool
35162 + OBJTOOL_IN := $(OBJTOOL)-in.o
35163 +
35164 ++LIBELF_FLAGS := $(shell pkg-config libelf --cflags 2>/dev/null)
35165 ++LIBELF_LIBS := $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
35166 ++
35167 + all: $(OBJTOOL)
35168 +
35169 + INCLUDES := -I$(srctree)/tools/include \
35170 + -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
35171 + -I$(srctree)/tools/objtool/arch/$(ARCH)/include
35172 + WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
35173 +-CFLAGS += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES)
35174 +-LDFLAGS += -lelf $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
35175 ++CFLAGS += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS)
35176 ++LDFLAGS += $(LIBELF_LIBS) $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
35177 +
35178 + # Allow old libelf to be used:
35179 + elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr)
35180 +diff --git a/tools/objtool/check.c b/tools/objtool/check.c
35181 +index 0414a0d52262..5dde107083c6 100644
35182 +--- a/tools/objtool/check.c
35183 ++++ b/tools/objtool/check.c
35184 +@@ -2184,9 +2184,10 @@ static void cleanup(struct objtool_file *file)
35185 + elf_close(file->elf);
35186 + }
35187 +
35188 ++static struct objtool_file file;
35189 ++
35190 + int check(const char *_objname, bool orc)
35191 + {
35192 +- struct objtool_file file;
35193 + int ret, warnings = 0;
35194 +
35195 + objname = _objname;
35196 +diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
35197 +index b441c88cafa1..cf4a8329c4c0 100644
35198 +--- a/tools/perf/Makefile.config
35199 ++++ b/tools/perf/Makefile.config
35200 +@@ -218,6 +218,8 @@ FEATURE_CHECK_LDFLAGS-libpython := $(PYTHON_EMBED_LDOPTS)
35201 + FEATURE_CHECK_CFLAGS-libpython-version := $(PYTHON_EMBED_CCOPTS)
35202 + FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS)
35203 +
35204 ++FEATURE_CHECK_LDFLAGS-libaio = -lrt
35205 ++
35206 + CFLAGS += -fno-omit-frame-pointer
35207 + CFLAGS += -ggdb3
35208 + CFLAGS += -funwind-tables
35209 +@@ -386,7 +388,8 @@ ifeq ($(feature-setns), 1)
35210 + $(call detected,CONFIG_SETNS)
35211 + endif
35212 +
35213 +-ifndef NO_CORESIGHT
35214 ++ifdef CORESIGHT
35215 ++ $(call feature_check,libopencsd)
35216 + ifeq ($(feature-libopencsd), 1)
35217 + CFLAGS += -DHAVE_CSTRACE_SUPPORT $(LIBOPENCSD_CFLAGS)
35218 + LDFLAGS += $(LIBOPENCSD_LDFLAGS)
35219 +diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
35220 +index 0ee6795d82cc..77f8f069f1e7 100644
35221 +--- a/tools/perf/Makefile.perf
35222 ++++ b/tools/perf/Makefile.perf
35223 +@@ -102,7 +102,7 @@ include ../scripts/utilities.mak
35224 + # When selected, pass LLVM_CONFIG=/path/to/llvm-config to `make' if
35225 + # llvm-config is not in $PATH.
35226 + #
35227 +-# Define NO_CORESIGHT if you do not want support for CoreSight trace decoding.
35228 ++# Define CORESIGHT if you DO WANT support for CoreSight trace decoding.
35229 + #
35230 + # Define NO_AIO if you do not want support of Posix AIO based trace
35231 + # streaming for record mode. Currently Posix AIO trace streaming is
35232 +diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
35233 +index d340d2e42776..13758a0b367b 100644
35234 +--- a/tools/perf/builtin-c2c.c
35235 ++++ b/tools/perf/builtin-c2c.c
35236 +@@ -2055,6 +2055,12 @@ static int setup_nodes(struct perf_session *session)
35237 + if (!set)
35238 + return -ENOMEM;
35239 +
35240 ++ nodes[node] = set;
35241 ++
35242 ++ /* empty node, skip */
35243 ++ if (cpu_map__empty(map))
35244 ++ continue;
35245 ++
35246 + for (cpu = 0; cpu < map->nr; cpu++) {
35247 + set_bit(map->map[cpu], set);
35248 +
35249 +@@ -2063,8 +2069,6 @@ static int setup_nodes(struct perf_session *session)
35250 +
35251 + cpu2node[map->map[cpu]] = node;
35252 + }
35253 +-
35254 +- nodes[node] = set;
35255 + }
35256 +
35257 + setup_nodes_header();
35258 +diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
35259 +index ac221f137ed2..cff4d10daf49 100644
35260 +--- a/tools/perf/builtin-script.c
35261 ++++ b/tools/perf/builtin-script.c
35262 +@@ -148,6 +148,7 @@ static struct {
35263 + unsigned int print_ip_opts;
35264 + u64 fields;
35265 + u64 invalid_fields;
35266 ++ u64 user_set_fields;
35267 + } output[OUTPUT_TYPE_MAX] = {
35268 +
35269 + [PERF_TYPE_HARDWARE] = {
35270 +@@ -344,7 +345,7 @@ static int perf_evsel__do_check_stype(struct perf_evsel *evsel,
35271 + if (attr->sample_type & sample_type)
35272 + return 0;
35273 +
35274 +- if (output[type].user_set) {
35275 ++ if (output[type].user_set_fields & field) {
35276 + if (allow_user_set)
35277 + return 0;
35278 + evname = perf_evsel__name(evsel);
35279 +@@ -2627,10 +2628,13 @@ parse:
35280 + pr_warning("\'%s\' not valid for %s events. Ignoring.\n",
35281 + all_output_options[i].str, event_type(j));
35282 + } else {
35283 +- if (change == REMOVE)
35284 ++ if (change == REMOVE) {
35285 + output[j].fields &= ~all_output_options[i].field;
35286 +- else
35287 ++ output[j].user_set_fields &= ~all_output_options[i].field;
35288 ++ } else {
35289 + output[j].fields |= all_output_options[i].field;
35290 ++ output[j].user_set_fields |= all_output_options[i].field;
35291 ++ }
35292 + output[j].user_set = true;
35293 + output[j].wildcard_set = true;
35294 + }
35295 +diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
35296 +index b36061cd1ab8..91cdbf504535 100644
35297 +--- a/tools/perf/builtin-trace.c
35298 ++++ b/tools/perf/builtin-trace.c
35299 +@@ -1039,6 +1039,9 @@ static const size_t trace__entry_str_size = 2048;
35300 +
35301 + static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd)
35302 + {
35303 ++ if (fd < 0)
35304 ++ return NULL;
35305 ++
35306 + if (fd > ttrace->files.max) {
35307 + struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file));
35308 +
35309 +@@ -3865,7 +3868,8 @@ int cmd_trace(int argc, const char **argv)
35310 + goto init_augmented_syscall_tp;
35311 + }
35312 +
35313 +- if (strcmp(perf_evsel__name(evsel), "raw_syscalls:sys_enter") == 0) {
35314 ++ if (trace.syscalls.events.augmented->priv == NULL &&
35315 ++ strstr(perf_evsel__name(evsel), "syscalls:sys_enter")) {
35316 + struct perf_evsel *augmented = trace.syscalls.events.augmented;
35317 + if (perf_evsel__init_augmented_syscall_tp(augmented, evsel) ||
35318 + perf_evsel__init_augmented_syscall_tp_args(augmented))
35319 +diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
35320 +index 5cbba70bcdd0..ea7acf403727 100644
35321 +--- a/tools/perf/tests/evsel-tp-sched.c
35322 ++++ b/tools/perf/tests/evsel-tp-sched.c
35323 +@@ -43,7 +43,7 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
35324 + return -1;
35325 + }
35326 +
35327 +- if (perf_evsel__test_field(evsel, "prev_comm", 16, true))
35328 ++ if (perf_evsel__test_field(evsel, "prev_comm", 16, false))
35329 + ret = -1;
35330 +
35331 + if (perf_evsel__test_field(evsel, "prev_pid", 4, true))
35332 +@@ -55,7 +55,7 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
35333 + if (perf_evsel__test_field(evsel, "prev_state", sizeof(long), true))
35334 + ret = -1;
35335 +
35336 +- if (perf_evsel__test_field(evsel, "next_comm", 16, true))
35337 ++ if (perf_evsel__test_field(evsel, "next_comm", 16, false))
35338 + ret = -1;
35339 +
35340 + if (perf_evsel__test_field(evsel, "next_pid", 4, true))
35341 +@@ -73,7 +73,7 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
35342 + return -1;
35343 + }
35344 +
35345 +- if (perf_evsel__test_field(evsel, "comm", 16, true))
35346 ++ if (perf_evsel__test_field(evsel, "comm", 16, false))
35347 + ret = -1;
35348 +
35349 + if (perf_evsel__test_field(evsel, "pid", 4, true))
35350 +diff --git a/tools/perf/trace/beauty/msg_flags.c b/tools/perf/trace/beauty/msg_flags.c
35351 +index d66c66315987..ea68db08b8e7 100644
35352 +--- a/tools/perf/trace/beauty/msg_flags.c
35353 ++++ b/tools/perf/trace/beauty/msg_flags.c
35354 +@@ -29,7 +29,7 @@ static size_t syscall_arg__scnprintf_msg_flags(char *bf, size_t size,
35355 + return scnprintf(bf, size, "NONE");
35356 + #define P_MSG_FLAG(n) \
35357 + if (flags & MSG_##n) { \
35358 +- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
35359 ++ printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
35360 + flags &= ~MSG_##n; \
35361 + }
35362 +
35363 +diff --git a/tools/perf/trace/beauty/waitid_options.c b/tools/perf/trace/beauty/waitid_options.c
35364 +index 6897fab40dcc..d4d10b33ba0e 100644
35365 +--- a/tools/perf/trace/beauty/waitid_options.c
35366 ++++ b/tools/perf/trace/beauty/waitid_options.c
35367 +@@ -11,7 +11,7 @@ static size_t syscall_arg__scnprintf_waitid_options(char *bf, size_t size,
35368 +
35369 + #define P_OPTION(n) \
35370 + if (options & W##n) { \
35371 +- printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : #n); \
35372 ++ printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
35373 + options &= ~W##n; \
35374 + }
35375 +
35376 +diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
35377 +index 70de8f6b3aee..9142fd294e76 100644
35378 +--- a/tools/perf/util/annotate.c
35379 ++++ b/tools/perf/util/annotate.c
35380 +@@ -1889,6 +1889,7 @@ int symbol__annotate(struct symbol *sym, struct map *map,
35381 + struct annotation_options *options,
35382 + struct arch **parch)
35383 + {
35384 ++ struct annotation *notes = symbol__annotation(sym);
35385 + struct annotate_args args = {
35386 + .privsize = privsize,
35387 + .evsel = evsel,
35388 +@@ -1919,6 +1920,7 @@ int symbol__annotate(struct symbol *sym, struct map *map,
35389 +
35390 + args.ms.map = map;
35391 + args.ms.sym = sym;
35392 ++ notes->start = map__rip_2objdump(map, sym->start);
35393 +
35394 + return symbol__disassemble(sym, &args);
35395 + }
35396 +@@ -2794,8 +2796,6 @@ int symbol__annotate2(struct symbol *sym, struct map *map, struct perf_evsel *ev
35397 +
35398 + symbol__calc_percent(sym, evsel);
35399 +
35400 +- notes->start = map__rip_2objdump(map, sym->start);
35401 +-
35402 + annotation__set_offsets(notes, size);
35403 + annotation__mark_jump_targets(notes, sym);
35404 + annotation__compute_ipc(notes, size);
35405 +diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
35406 +index f69961c4a4f3..2921ce08b198 100644
35407 +--- a/tools/perf/util/auxtrace.c
35408 ++++ b/tools/perf/util/auxtrace.c
35409 +@@ -1278,9 +1278,9 @@ static int __auxtrace_mmap__read(struct perf_mmap *map,
35410 + }
35411 +
35412 + /* padding must be written by fn() e.g. record__process_auxtrace() */
35413 +- padding = size & 7;
35414 ++ padding = size & (PERF_AUXTRACE_RECORD_ALIGNMENT - 1);
35415 + if (padding)
35416 +- padding = 8 - padding;
35417 ++ padding = PERF_AUXTRACE_RECORD_ALIGNMENT - padding;
35418 +
35419 + memset(&ev, 0, sizeof(ev));
35420 + ev.auxtrace.header.type = PERF_RECORD_AUXTRACE;
35421 +diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
35422 +index 8e50f96d4b23..fac32482db61 100644
35423 +--- a/tools/perf/util/auxtrace.h
35424 ++++ b/tools/perf/util/auxtrace.h
35425 +@@ -40,6 +40,9 @@ struct record_opts;
35426 + struct auxtrace_info_event;
35427 + struct events_stats;
35428 +
35429 ++/* Auxtrace records must have the same alignment as perf event records */
35430 ++#define PERF_AUXTRACE_RECORD_ALIGNMENT 8
35431 ++
35432 + enum auxtrace_type {
35433 + PERF_AUXTRACE_UNKNOWN,
35434 + PERF_AUXTRACE_INTEL_PT,
35435 +diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
35436 +index 4503f3ca45ab..7c0b975dd2f0 100644
35437 +--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
35438 ++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
35439 +@@ -26,6 +26,7 @@
35440 +
35441 + #include "../cache.h"
35442 + #include "../util.h"
35443 ++#include "../auxtrace.h"
35444 +
35445 + #include "intel-pt-insn-decoder.h"
35446 + #include "intel-pt-pkt-decoder.h"
35447 +@@ -250,19 +251,15 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
35448 + if (!(decoder->tsc_ctc_ratio_n % decoder->tsc_ctc_ratio_d))
35449 + decoder->tsc_ctc_mult = decoder->tsc_ctc_ratio_n /
35450 + decoder->tsc_ctc_ratio_d;
35451 +-
35452 +- /*
35453 +- * Allow for timestamps appearing to backwards because a TSC
35454 +- * packet has slipped past a MTC packet, so allow 2 MTC ticks
35455 +- * or ...
35456 +- */
35457 +- decoder->tsc_slip = multdiv(2 << decoder->mtc_shift,
35458 +- decoder->tsc_ctc_ratio_n,
35459 +- decoder->tsc_ctc_ratio_d);
35460 + }
35461 +- /* ... or 0x100 paranoia */
35462 +- if (decoder->tsc_slip < 0x100)
35463 +- decoder->tsc_slip = 0x100;
35464 ++
35465 ++ /*
35466 ++ * A TSC packet can slip past MTC packets so that the timestamp appears
35467 ++ * to go backwards. One estimate is that can be up to about 40 CPU
35468 ++ * cycles, which is certainly less than 0x1000 TSC ticks, but accept
35469 ++ * slippage an order of magnitude more to be on the safe side.
35470 ++ */
35471 ++ decoder->tsc_slip = 0x10000;
35472 +
35473 + intel_pt_log("timestamp: mtc_shift %u\n", decoder->mtc_shift);
35474 + intel_pt_log("timestamp: tsc_ctc_ratio_n %u\n", decoder->tsc_ctc_ratio_n);
35475 +@@ -1394,7 +1391,6 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder)
35476 + {
35477 + intel_pt_log("ERROR: Buffer overflow\n");
35478 + intel_pt_clear_tx_flags(decoder);
35479 +- decoder->cbr = 0;
35480 + decoder->timestamp_insn_cnt = 0;
35481 + decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
35482 + decoder->overflow = true;
35483 +@@ -2575,6 +2571,34 @@ static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2)
35484 + }
35485 + }
35486 +
35487 ++#define MAX_PADDING (PERF_AUXTRACE_RECORD_ALIGNMENT - 1)
35488 ++
35489 ++/**
35490 ++ * adj_for_padding - adjust overlap to account for padding.
35491 ++ * @buf_b: second buffer
35492 ++ * @buf_a: first buffer
35493 ++ * @len_a: size of first buffer
35494 ++ *
35495 ++ * @buf_a might have up to 7 bytes of padding appended. Adjust the overlap
35496 ++ * accordingly.
35497 ++ *
35498 ++ * Return: A pointer into @buf_b from where non-overlapped data starts
35499 ++ */
35500 ++static unsigned char *adj_for_padding(unsigned char *buf_b,
35501 ++ unsigned char *buf_a, size_t len_a)
35502 ++{
35503 ++ unsigned char *p = buf_b - MAX_PADDING;
35504 ++ unsigned char *q = buf_a + len_a - MAX_PADDING;
35505 ++ int i;
35506 ++
35507 ++ for (i = MAX_PADDING; i; i--, p++, q++) {
35508 ++ if (*p != *q)
35509 ++ break;
35510 ++ }
35511 ++
35512 ++ return p;
35513 ++}
35514 ++
35515 + /**
35516 + * intel_pt_find_overlap_tsc - determine start of non-overlapped trace data
35517 + * using TSC.
35518 +@@ -2625,8 +2649,11 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
35519 +
35520 + /* Same TSC, so buffers are consecutive */
35521 + if (!cmp && rem_b >= rem_a) {
35522 ++ unsigned char *start;
35523 ++
35524 + *consecutive = true;
35525 +- return buf_b + len_b - (rem_b - rem_a);
35526 ++ start = buf_b + len_b - (rem_b - rem_a);
35527 ++ return adj_for_padding(start, buf_a, len_a);
35528 + }
35529 + if (cmp < 0)
35530 + return buf_b; /* tsc_a < tsc_b => no overlap */
35531 +@@ -2689,7 +2716,7 @@ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
35532 + found = memmem(buf_a, len_a, buf_b, len_a);
35533 + if (found) {
35534 + *consecutive = true;
35535 +- return buf_b + len_a;
35536 ++ return adj_for_padding(buf_b + len_a, buf_a, len_a);
35537 + }
35538 +
35539 + /* Try again at next PSB in buffer 'a' */
35540 +diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
35541 +index 2e72373ec6df..4493fc13a6fa 100644
35542 +--- a/tools/perf/util/intel-pt.c
35543 ++++ b/tools/perf/util/intel-pt.c
35544 +@@ -2522,6 +2522,8 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
35545 + }
35546 +
35547 + pt->timeless_decoding = intel_pt_timeless_decoding(pt);
35548 ++ if (pt->timeless_decoding && !pt->tc.time_mult)
35549 ++ pt->tc.time_mult = 1;
35550 + pt->have_tsc = intel_pt_have_tsc(pt);
35551 + pt->sampling_mode = false;
35552 + pt->est_tsc = !pt->timeless_decoding;
35553 +diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
35554 +index 11a234740632..ccd3275feeaa 100644
35555 +--- a/tools/perf/util/pmu.c
35556 ++++ b/tools/perf/util/pmu.c
35557 +@@ -734,10 +734,20 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
35558 +
35559 + if (!is_arm_pmu_core(name)) {
35560 + pname = pe->pmu ? pe->pmu : "cpu";
35561 ++
35562 ++ /*
35563 ++ * uncore alias may be from different PMU
35564 ++ * with common prefix
35565 ++ */
35566 ++ if (pmu_is_uncore(name) &&
35567 ++ !strncmp(pname, name, strlen(pname)))
35568 ++ goto new_alias;
35569 ++
35570 + if (strcmp(pname, name))
35571 + continue;
35572 + }
35573 +
35574 ++new_alias:
35575 + /* need type casts to override 'const' */
35576 + __perf_pmu__new_alias(head, NULL, (char *)pe->name,
35577 + (char *)pe->desc, (char *)pe->event,
35578 +diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
35579 +index 18a59fba97ff..cc4773157b9b 100644
35580 +--- a/tools/perf/util/probe-event.c
35581 ++++ b/tools/perf/util/probe-event.c
35582 +@@ -157,8 +157,10 @@ static struct map *kernel_get_module_map(const char *module)
35583 + if (module && strchr(module, '/'))
35584 + return dso__new_map(module);
35585 +
35586 +- if (!module)
35587 +- module = "kernel";
35588 ++ if (!module) {
35589 ++ pos = machine__kernel_map(host_machine);
35590 ++ return map__get(pos);
35591 ++ }
35592 +
35593 + for (pos = maps__first(maps); pos; pos = map__next(pos)) {
35594 + /* short_name is "[module]" */
35595 +diff --git a/tools/perf/util/s390-cpumsf.c b/tools/perf/util/s390-cpumsf.c
35596 +index 68b2570304ec..08073a4d59a4 100644
35597 +--- a/tools/perf/util/s390-cpumsf.c
35598 ++++ b/tools/perf/util/s390-cpumsf.c
35599 +@@ -301,6 +301,11 @@ static bool s390_cpumsf_validate(int machine_type,
35600 + *dsdes = 85;
35601 + *bsdes = 32;
35602 + break;
35603 ++ case 2964:
35604 ++ case 2965:
35605 ++ *dsdes = 112;
35606 ++ *bsdes = 32;
35607 ++ break;
35608 + default:
35609 + /* Illegal trailer entry */
35610 + return false;
35611 +diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
35612 +index 87ef16a1b17e..7059d1be2d09 100644
35613 +--- a/tools/perf/util/scripting-engines/trace-event-python.c
35614 ++++ b/tools/perf/util/scripting-engines/trace-event-python.c
35615 +@@ -733,8 +733,7 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
35616 + Py_FatalError("couldn't create Python dictionary");
35617 +
35618 + pydict_set_item_string_decref(dict, "ev_name", _PyUnicode_FromString(perf_evsel__name(evsel)));
35619 +- pydict_set_item_string_decref(dict, "attr", _PyUnicode_FromStringAndSize(
35620 +- (const char *)&evsel->attr, sizeof(evsel->attr)));
35621 ++ pydict_set_item_string_decref(dict, "attr", _PyBytes_FromStringAndSize((const char *)&evsel->attr, sizeof(evsel->attr)));
35622 +
35623 + pydict_set_item_string_decref(dict_sample, "pid",
35624 + _PyLong_FromLong(sample->pid));
35625 +@@ -1494,34 +1493,40 @@ static void _free_command_line(wchar_t **command_line, int num)
35626 + static int python_start_script(const char *script, int argc, const char **argv)
35627 + {
35628 + struct tables *tables = &tables_global;
35629 ++ PyMODINIT_FUNC (*initfunc)(void);
35630 + #if PY_MAJOR_VERSION < 3
35631 + const char **command_line;
35632 + #else
35633 + wchar_t **command_line;
35634 + #endif
35635 +- char buf[PATH_MAX];
35636 ++ /*
35637 ++ * Use a non-const name variable to cope with python 2.6's
35638 ++ * PyImport_AppendInittab prototype
35639 ++ */
35640 ++ char buf[PATH_MAX], name[19] = "perf_trace_context";
35641 + int i, err = 0;
35642 + FILE *fp;
35643 +
35644 + #if PY_MAJOR_VERSION < 3
35645 ++ initfunc = initperf_trace_context;
35646 + command_line = malloc((argc + 1) * sizeof(const char *));
35647 + command_line[0] = script;
35648 + for (i = 1; i < argc + 1; i++)
35649 + command_line[i] = argv[i - 1];
35650 + #else
35651 ++ initfunc = PyInit_perf_trace_context;
35652 + command_line = malloc((argc + 1) * sizeof(wchar_t *));
35653 + command_line[0] = Py_DecodeLocale(script, NULL);
35654 + for (i = 1; i < argc + 1; i++)
35655 + command_line[i] = Py_DecodeLocale(argv[i - 1], NULL);
35656 + #endif
35657 +
35658 ++ PyImport_AppendInittab(name, initfunc);
35659 + Py_Initialize();
35660 +
35661 + #if PY_MAJOR_VERSION < 3
35662 +- initperf_trace_context();
35663 + PySys_SetArgv(argc + 1, (char **)command_line);
35664 + #else
35665 +- PyInit_perf_trace_context();
35666 + PySys_SetArgv(argc + 1, command_line);
35667 + #endif
35668 +
35669 +diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
35670 +index 6c1a83768eb0..d0334c33da54 100644
35671 +--- a/tools/perf/util/sort.c
35672 ++++ b/tools/perf/util/sort.c
35673 +@@ -230,8 +230,14 @@ static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
35674 + if (sym_l == sym_r)
35675 + return 0;
35676 +
35677 +- if (sym_l->inlined || sym_r->inlined)
35678 +- return strcmp(sym_l->name, sym_r->name);
35679 ++ if (sym_l->inlined || sym_r->inlined) {
35680 ++ int ret = strcmp(sym_l->name, sym_r->name);
35681 ++
35682 ++ if (ret)
35683 ++ return ret;
35684 ++ if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
35685 ++ return 0;
35686 ++ }
35687 +
35688 + if (sym_l->start != sym_r->start)
35689 + return (int64_t)(sym_r->start - sym_l->start);
35690 +diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
35691 +index dc86597d0cc4..ccf42c4e83f0 100644
35692 +--- a/tools/perf/util/srcline.c
35693 ++++ b/tools/perf/util/srcline.c
35694 +@@ -104,7 +104,7 @@ static struct symbol *new_inline_sym(struct dso *dso,
35695 + } else {
35696 + /* create a fake symbol for the inline frame */
35697 + inline_sym = symbol__new(base_sym ? base_sym->start : 0,
35698 +- base_sym ? base_sym->end : 0,
35699 ++ base_sym ? (base_sym->end - base_sym->start) : 0,
35700 + base_sym ? base_sym->binding : 0,
35701 + base_sym ? base_sym->type : 0,
35702 + funcname);
35703 +diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
35704 +index 48efad6d0f90..ca5f2e4796ea 100644
35705 +--- a/tools/perf/util/symbol.c
35706 ++++ b/tools/perf/util/symbol.c
35707 +@@ -710,6 +710,8 @@ static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct
35708 + }
35709 +
35710 + pos->start -= curr_map->start - curr_map->pgoff;
35711 ++ if (pos->end > curr_map->end)
35712 ++ pos->end = curr_map->end;
35713 + if (pos->end)
35714 + pos->end -= curr_map->start - curr_map->pgoff;
35715 + symbols__insert(&curr_map->dso->symbols, pos);
35716 +diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
35717 +index 41ab7a3668b3..936f726f7cd9 100644
35718 +--- a/tools/testing/selftests/bpf/Makefile
35719 ++++ b/tools/testing/selftests/bpf/Makefile
35720 +@@ -96,6 +96,7 @@ $(BPFOBJ): force
35721 + CLANG ?= clang
35722 + LLC ?= llc
35723 + LLVM_OBJCOPY ?= llvm-objcopy
35724 ++LLVM_READELF ?= llvm-readelf
35725 + BTF_PAHOLE ?= pahole
35726 +
35727 + PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)
35728 +@@ -132,7 +133,7 @@ BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF)
35729 + BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm')
35730 + BTF_LLVM_PROBE := $(shell echo "int main() { return 0; }" | \
35731 + $(CLANG) -target bpf -O2 -g -c -x c - -o ./llvm_btf_verify.o; \
35732 +- readelf -S ./llvm_btf_verify.o | grep BTF; \
35733 ++ $(LLVM_READELF) -S ./llvm_btf_verify.o | grep BTF; \
35734 + /bin/rm -f ./llvm_btf_verify.o)
35735 +
35736 + ifneq ($(BTF_LLVM_PROBE),)
35737 +diff --git a/tools/testing/selftests/bpf/test_map_in_map.c b/tools/testing/selftests/bpf/test_map_in_map.c
35738 +index ce923e67e08e..2985f262846e 100644
35739 +--- a/tools/testing/selftests/bpf/test_map_in_map.c
35740 ++++ b/tools/testing/selftests/bpf/test_map_in_map.c
35741 +@@ -27,6 +27,7 @@ SEC("xdp_mimtest")
35742 + int xdp_mimtest0(struct xdp_md *ctx)
35743 + {
35744 + int value = 123;
35745 ++ int *value_p;
35746 + int key = 0;
35747 + void *map;
35748 +
35749 +@@ -35,6 +36,9 @@ int xdp_mimtest0(struct xdp_md *ctx)
35750 + return XDP_DROP;
35751 +
35752 + bpf_map_update_elem(map, &key, &value, 0);
35753 ++ value_p = bpf_map_lookup_elem(map, &key);
35754 ++ if (!value_p || *value_p != 123)
35755 ++ return XDP_DROP;
35756 +
35757 + map = bpf_map_lookup_elem(&mim_hash, &key);
35758 + if (!map)
35759 +diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
35760 +index e2b9eee37187..6e05a22b346c 100644
35761 +--- a/tools/testing/selftests/bpf/test_maps.c
35762 ++++ b/tools/testing/selftests/bpf/test_maps.c
35763 +@@ -43,7 +43,7 @@ static int map_flags;
35764 + } \
35765 + })
35766 +
35767 +-static void test_hashmap(int task, void *data)
35768 ++static void test_hashmap(unsigned int task, void *data)
35769 + {
35770 + long long key, next_key, first_key, value;
35771 + int fd;
35772 +@@ -133,7 +133,7 @@ static void test_hashmap(int task, void *data)
35773 + close(fd);
35774 + }
35775 +
35776 +-static void test_hashmap_sizes(int task, void *data)
35777 ++static void test_hashmap_sizes(unsigned int task, void *data)
35778 + {
35779 + int fd, i, j;
35780 +
35781 +@@ -153,7 +153,7 @@ static void test_hashmap_sizes(int task, void *data)
35782 + }
35783 + }
35784 +
35785 +-static void test_hashmap_percpu(int task, void *data)
35786 ++static void test_hashmap_percpu(unsigned int task, void *data)
35787 + {
35788 + unsigned int nr_cpus = bpf_num_possible_cpus();
35789 + BPF_DECLARE_PERCPU(long, value);
35790 +@@ -280,7 +280,7 @@ static int helper_fill_hashmap(int max_entries)
35791 + return fd;
35792 + }
35793 +
35794 +-static void test_hashmap_walk(int task, void *data)
35795 ++static void test_hashmap_walk(unsigned int task, void *data)
35796 + {
35797 + int fd, i, max_entries = 1000;
35798 + long long key, value, next_key;
35799 +@@ -351,7 +351,7 @@ static void test_hashmap_zero_seed(void)
35800 + close(second);
35801 + }
35802 +
35803 +-static void test_arraymap(int task, void *data)
35804 ++static void test_arraymap(unsigned int task, void *data)
35805 + {
35806 + int key, next_key, fd;
35807 + long long value;
35808 +@@ -406,7 +406,7 @@ static void test_arraymap(int task, void *data)
35809 + close(fd);
35810 + }
35811 +
35812 +-static void test_arraymap_percpu(int task, void *data)
35813 ++static void test_arraymap_percpu(unsigned int task, void *data)
35814 + {
35815 + unsigned int nr_cpus = bpf_num_possible_cpus();
35816 + BPF_DECLARE_PERCPU(long, values);
35817 +@@ -502,7 +502,7 @@ static void test_arraymap_percpu_many_keys(void)
35818 + close(fd);
35819 + }
35820 +
35821 +-static void test_devmap(int task, void *data)
35822 ++static void test_devmap(unsigned int task, void *data)
35823 + {
35824 + int fd;
35825 + __u32 key, value;
35826 +@@ -517,7 +517,7 @@ static void test_devmap(int task, void *data)
35827 + close(fd);
35828 + }
35829 +
35830 +-static void test_queuemap(int task, void *data)
35831 ++static void test_queuemap(unsigned int task, void *data)
35832 + {
35833 + const int MAP_SIZE = 32;
35834 + __u32 vals[MAP_SIZE + MAP_SIZE/2], val;
35835 +@@ -575,7 +575,7 @@ static void test_queuemap(int task, void *data)
35836 + close(fd);
35837 + }
35838 +
35839 +-static void test_stackmap(int task, void *data)
35840 ++static void test_stackmap(unsigned int task, void *data)
35841 + {
35842 + const int MAP_SIZE = 32;
35843 + __u32 vals[MAP_SIZE + MAP_SIZE/2], val;
35844 +@@ -641,7 +641,7 @@ static void test_stackmap(int task, void *data)
35845 + #define SOCKMAP_PARSE_PROG "./sockmap_parse_prog.o"
35846 + #define SOCKMAP_VERDICT_PROG "./sockmap_verdict_prog.o"
35847 + #define SOCKMAP_TCP_MSG_PROG "./sockmap_tcp_msg_prog.o"
35848 +-static void test_sockmap(int tasks, void *data)
35849 ++static void test_sockmap(unsigned int tasks, void *data)
35850 + {
35851 + struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_msg, *bpf_map_break;
35852 + int map_fd_msg = 0, map_fd_rx = 0, map_fd_tx = 0, map_fd_break;
35853 +@@ -1258,10 +1258,11 @@ static void test_map_large(void)
35854 + }
35855 +
35856 + #define run_parallel(N, FN, DATA) \
35857 +- printf("Fork %d tasks to '" #FN "'\n", N); \
35858 ++ printf("Fork %u tasks to '" #FN "'\n", N); \
35859 + __run_parallel(N, FN, DATA)
35860 +
35861 +-static void __run_parallel(int tasks, void (*fn)(int task, void *data),
35862 ++static void __run_parallel(unsigned int tasks,
35863 ++ void (*fn)(unsigned int task, void *data),
35864 + void *data)
35865 + {
35866 + pid_t pid[tasks];
35867 +@@ -1302,7 +1303,7 @@ static void test_map_stress(void)
35868 + #define DO_UPDATE 1
35869 + #define DO_DELETE 0
35870 +
35871 +-static void test_update_delete(int fn, void *data)
35872 ++static void test_update_delete(unsigned int fn, void *data)
35873 + {
35874 + int do_update = ((int *)data)[1];
35875 + int fd = ((int *)data)[0];
35876 +diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
35877 +index 2fd90d456892..9a967983abed 100644
35878 +--- a/tools/testing/selftests/bpf/test_verifier.c
35879 ++++ b/tools/testing/selftests/bpf/test_verifier.c
35880 +@@ -34,6 +34,7 @@
35881 + #include <linux/if_ether.h>
35882 +
35883 + #include <bpf/bpf.h>
35884 ++#include <bpf/libbpf.h>
35885 +
35886 + #ifdef HAVE_GENHDR
35887 + # include "autoconf.h"
35888 +@@ -59,6 +60,7 @@
35889 +
35890 + #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
35891 + static bool unpriv_disabled = false;
35892 ++static int skips;
35893 +
35894 + struct bpf_test {
35895 + const char *descr;
35896 +@@ -15946,6 +15948,11 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
35897 + pflags |= BPF_F_ANY_ALIGNMENT;
35898 + fd_prog = bpf_verify_program(prog_type, prog, prog_len, pflags,
35899 + "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
35900 ++ if (fd_prog < 0 && !bpf_probe_prog_type(prog_type, 0)) {
35901 ++ printf("SKIP (unsupported program type %d)\n", prog_type);
35902 ++ skips++;
35903 ++ goto close_fds;
35904 ++ }
35905 +
35906 + expected_ret = unpriv && test->result_unpriv != UNDEF ?
35907 + test->result_unpriv : test->result;
35908 +@@ -16099,7 +16106,7 @@ static bool test_as_unpriv(struct bpf_test *test)
35909 +
35910 + static int do_test(bool unpriv, unsigned int from, unsigned int to)
35911 + {
35912 +- int i, passes = 0, errors = 0, skips = 0;
35913 ++ int i, passes = 0, errors = 0;
35914 +
35915 + for (i = from; i < to; i++) {
35916 + struct bpf_test *test = &tests[i];
35917 +diff --git a/tools/testing/selftests/firmware/config b/tools/testing/selftests/firmware/config
35918 +index 913a25a4a32b..bf634dda0720 100644
35919 +--- a/tools/testing/selftests/firmware/config
35920 ++++ b/tools/testing/selftests/firmware/config
35921 +@@ -1,6 +1,5 @@
35922 + CONFIG_TEST_FIRMWARE=y
35923 + CONFIG_FW_LOADER=y
35924 + CONFIG_FW_LOADER_USER_HELPER=y
35925 +-CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
35926 + CONFIG_IKCONFIG=y
35927 + CONFIG_IKCONFIG_PROC=y
35928 +diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh
35929 +index 466cf2f91ba0..a4320c4b44dc 100755
35930 +--- a/tools/testing/selftests/firmware/fw_filesystem.sh
35931 ++++ b/tools/testing/selftests/firmware/fw_filesystem.sh
35932 +@@ -155,8 +155,11 @@ read_firmwares()
35933 + {
35934 + for i in $(seq 0 3); do
35935 + config_set_read_fw_idx $i
35936 +- # Verify the contents match
35937 +- if ! diff -q "$FW" $DIR/read_firmware 2>/dev/null ; then
35938 ++ # Verify the contents are what we expect.
35939 ++ # -Z required for now -- check for yourself, md5sum
35940 ++ # on $FW and DIR/read_firmware will yield the same. Even
35941 ++ # cmp agrees, so something is off.
35942 ++ if ! diff -q -Z "$FW" $DIR/read_firmware 2>/dev/null ; then
35943 + echo "request #$i: firmware was not loaded" >&2
35944 + exit 1
35945 + fi
35946 +@@ -168,7 +171,7 @@ read_firmwares_expect_nofile()
35947 + for i in $(seq 0 3); do
35948 + config_set_read_fw_idx $i
35949 + # Ensures contents differ
35950 +- if diff -q "$FW" $DIR/read_firmware 2>/dev/null ; then
35951 ++ if diff -q -Z "$FW" $DIR/read_firmware 2>/dev/null ; then
35952 + echo "request $i: file was not expected to match" >&2
35953 + exit 1
35954 + fi
35955 +diff --git a/tools/testing/selftests/firmware/fw_lib.sh b/tools/testing/selftests/firmware/fw_lib.sh
35956 +index 6c5f1b2ffb74..1cbb12e284a6 100755
35957 +--- a/tools/testing/selftests/firmware/fw_lib.sh
35958 ++++ b/tools/testing/selftests/firmware/fw_lib.sh
35959 +@@ -91,7 +91,7 @@ verify_reqs()
35960 + if [ "$TEST_REQS_FW_SYSFS_FALLBACK" = "yes" ]; then
35961 + if [ ! "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then
35962 + echo "usermode helper disabled so ignoring test"
35963 +- exit $ksft_skip
35964 ++ exit 0
35965 + fi
35966 + fi
35967 + }
35968 +diff --git a/tools/testing/selftests/ir/ir_loopback.c b/tools/testing/selftests/ir/ir_loopback.c
35969 +index 858c19caf224..8cdf1b89ac9c 100644
35970 +--- a/tools/testing/selftests/ir/ir_loopback.c
35971 ++++ b/tools/testing/selftests/ir/ir_loopback.c
35972 +@@ -27,6 +27,8 @@
35973 +
35974 + #define TEST_SCANCODES 10
35975 + #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
35976 ++#define SYSFS_PATH_MAX 256
35977 ++#define DNAME_PATH_MAX 256
35978 +
35979 + static const struct {
35980 + enum rc_proto proto;
35981 +@@ -56,7 +58,7 @@ static const struct {
35982 + int lirc_open(const char *rc)
35983 + {
35984 + struct dirent *dent;
35985 +- char buf[100];
35986 ++ char buf[SYSFS_PATH_MAX + DNAME_PATH_MAX];
35987 + DIR *d;
35988 + int fd;
35989 +
35990 +diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
35991 +index 7e632b465ab4..6d7a81306f8a 100644
35992 +--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
35993 ++++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
35994 +@@ -2971,6 +2971,12 @@ TEST(get_metadata)
35995 + struct seccomp_metadata md;
35996 + long ret;
35997 +
35998 ++ /* Only real root can get metadata. */
35999 ++ if (geteuid()) {
36000 ++ XFAIL(return, "get_metadata requires real root");
36001 ++ return;
36002 ++ }
36003 ++
36004 + ASSERT_EQ(0, pipe(pipefd));
36005 +
36006 + pid = fork();
36007 +diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
36008 +index 30251e288629..5cc22cdaa5ba 100644
36009 +--- a/virt/kvm/arm/mmu.c
36010 ++++ b/virt/kvm/arm/mmu.c
36011 +@@ -2353,7 +2353,7 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
36012 + return 0;
36013 + }
36014 +
36015 +-void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
36016 ++void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
36017 + {
36018 + }
36019 +
36020 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
36021 +index 076bc38963bf..b4f2d892a1d3 100644
36022 +--- a/virt/kvm/kvm_main.c
36023 ++++ b/virt/kvm/kvm_main.c
36024 +@@ -874,6 +874,7 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
36025 + int as_id, struct kvm_memslots *slots)
36026 + {
36027 + struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id);
36028 ++ u64 gen;
36029 +
36030 + /*
36031 + * Set the low bit in the generation, which disables SPTE caching
36032 +@@ -896,9 +897,11 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
36033 + * space 0 will use generations 0, 4, 8, ... while * address space 1 will
36034 + * use generations 2, 6, 10, 14, ...
36035 + */
36036 +- slots->generation += KVM_ADDRESS_SPACE_NUM * 2 - 1;
36037 ++ gen = slots->generation + KVM_ADDRESS_SPACE_NUM * 2 - 1;
36038 +
36039 +- kvm_arch_memslots_updated(kvm, slots);
36040 ++ kvm_arch_memslots_updated(kvm, gen);
36041 ++
36042 ++ slots->generation = gen;
36043 +
36044 + return old_memslots;
36045 + }
36046 +@@ -2899,6 +2902,9 @@ static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
36047 + {
36048 + struct kvm_device *dev = filp->private_data;
36049 +
36050 ++ if (dev->kvm->mm != current->mm)
36051 ++ return -EIO;
36052 ++
36053 + switch (ioctl) {
36054 + case KVM_SET_DEVICE_ATTR:
36055 + return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);