Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Wed, 21 Apr 2021 11:42:14
Message-Id: 1619005314.fdd5a720e6058faea8253988229ff8f5e367a006.mpagano@gentoo
1 commit: fdd5a720e6058faea8253988229ff8f5e367a006
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Apr 21 11:41:54 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Apr 21 11:41:54 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=fdd5a720
7
8 Linux patch 5.4.114
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1113_linux-5.4.114.patch | 2749 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2753 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 4792b10..95ce3e1 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -495,6 +495,10 @@ Patch: 1112_linux-5.4.113.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.113
23
24 +Patch: 1113_linux-5.4.114.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.114
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1113_linux-5.4.114.patch b/1113_linux-5.4.114.patch
33 new file mode 100644
34 index 0000000..4b1fb51
35 --- /dev/null
36 +++ b/1113_linux-5.4.114.patch
37 @@ -0,0 +1,2749 @@
38 +diff --git a/Makefile b/Makefile
39 +index 7fe00a93c870c..355e05ba065cb 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 4
46 +-SUBLEVEL = 113
47 ++SUBLEVEL = 114
48 + EXTRAVERSION =
49 + NAME = Kleptomaniac Octopus
50 +
51 +diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
52 +index 3d57ed0d85350..4045180510939 100644
53 +--- a/arch/arc/kernel/signal.c
54 ++++ b/arch/arc/kernel/signal.c
55 +@@ -96,7 +96,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
56 + sizeof(sf->uc.uc_mcontext.regs.scratch));
57 + err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
58 +
59 +- return err;
60 ++ return err ? -EFAULT : 0;
61 + }
62 +
63 + static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
64 +@@ -110,7 +110,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
65 + &(sf->uc.uc_mcontext.regs.scratch),
66 + sizeof(sf->uc.uc_mcontext.regs.scratch));
67 + if (err)
68 +- return err;
69 ++ return -EFAULT;
70 +
71 + set_current_blocked(&set);
72 + regs->bta = uregs.scratch.bta;
73 +diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
74 +index 904852006b9b1..0a36b8fe3fa92 100644
75 +--- a/arch/arm/boot/dts/omap4.dtsi
76 ++++ b/arch/arm/boot/dts/omap4.dtsi
77 +@@ -22,6 +22,11 @@
78 + i2c1 = &i2c2;
79 + i2c2 = &i2c3;
80 + i2c3 = &i2c4;
81 ++ mmc0 = &mmc1;
82 ++ mmc1 = &mmc2;
83 ++ mmc2 = &mmc3;
84 ++ mmc3 = &mmc4;
85 ++ mmc4 = &mmc5;
86 + serial0 = &uart1;
87 + serial1 = &uart2;
88 + serial2 = &uart3;
89 +diff --git a/arch/arm/boot/dts/omap44xx-clocks.dtsi b/arch/arm/boot/dts/omap44xx-clocks.dtsi
90 +index e9d9c8460682c..68ab6a95f222d 100644
91 +--- a/arch/arm/boot/dts/omap44xx-clocks.dtsi
92 ++++ b/arch/arm/boot/dts/omap44xx-clocks.dtsi
93 +@@ -770,14 +770,6 @@
94 + ti,max-div = <2>;
95 + };
96 +
97 +- sha2md5_fck: sha2md5_fck@15c8 {
98 +- #clock-cells = <0>;
99 +- compatible = "ti,gate-clock";
100 +- clocks = <&l3_div_ck>;
101 +- ti,bit-shift = <1>;
102 +- reg = <0x15c8>;
103 +- };
104 +-
105 + usb_phy_cm_clk32k: usb_phy_cm_clk32k@640 {
106 + #clock-cells = <0>;
107 + compatible = "ti,gate-clock";
108 +diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
109 +index 041646fabb2db..3b56e993326d0 100644
110 +--- a/arch/arm/boot/dts/omap5.dtsi
111 ++++ b/arch/arm/boot/dts/omap5.dtsi
112 +@@ -25,6 +25,11 @@
113 + i2c2 = &i2c3;
114 + i2c3 = &i2c4;
115 + i2c4 = &i2c5;
116 ++ mmc0 = &mmc1;
117 ++ mmc1 = &mmc2;
118 ++ mmc2 = &mmc3;
119 ++ mmc3 = &mmc4;
120 ++ mmc4 = &mmc5;
121 + serial0 = &uart1;
122 + serial1 = &uart2;
123 + serial2 = &uart3;
124 +diff --git a/arch/arm/mach-footbridge/cats-pci.c b/arch/arm/mach-footbridge/cats-pci.c
125 +index 0b2fd7e2e9b42..90b1e9be430e9 100644
126 +--- a/arch/arm/mach-footbridge/cats-pci.c
127 ++++ b/arch/arm/mach-footbridge/cats-pci.c
128 +@@ -15,14 +15,14 @@
129 + #include <asm/mach-types.h>
130 +
131 + /* cats host-specific stuff */
132 +-static int irqmap_cats[] __initdata = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 };
133 ++static int irqmap_cats[] = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 };
134 +
135 + static u8 cats_no_swizzle(struct pci_dev *dev, u8 *pin)
136 + {
137 + return 0;
138 + }
139 +
140 +-static int __init cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
141 ++static int cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
142 + {
143 + if (dev->irq >= 255)
144 + return -1; /* not a valid interrupt. */
145 +diff --git a/arch/arm/mach-footbridge/ebsa285-pci.c b/arch/arm/mach-footbridge/ebsa285-pci.c
146 +index 6f28aaa9ca79b..c3f280d08fa7f 100644
147 +--- a/arch/arm/mach-footbridge/ebsa285-pci.c
148 ++++ b/arch/arm/mach-footbridge/ebsa285-pci.c
149 +@@ -14,9 +14,9 @@
150 + #include <asm/mach/pci.h>
151 + #include <asm/mach-types.h>
152 +
153 +-static int irqmap_ebsa285[] __initdata = { IRQ_IN3, IRQ_IN1, IRQ_IN0, IRQ_PCI };
154 ++static int irqmap_ebsa285[] = { IRQ_IN3, IRQ_IN1, IRQ_IN0, IRQ_PCI };
155 +
156 +-static int __init ebsa285_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
157 ++static int ebsa285_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
158 + {
159 + if (dev->vendor == PCI_VENDOR_ID_CONTAQ &&
160 + dev->device == PCI_DEVICE_ID_CONTAQ_82C693)
161 +diff --git a/arch/arm/mach-footbridge/netwinder-pci.c b/arch/arm/mach-footbridge/netwinder-pci.c
162 +index 9473aa0305e5f..e8304392074b8 100644
163 +--- a/arch/arm/mach-footbridge/netwinder-pci.c
164 ++++ b/arch/arm/mach-footbridge/netwinder-pci.c
165 +@@ -18,7 +18,7 @@
166 + * We now use the slot ID instead of the device identifiers to select
167 + * which interrupt is routed where.
168 + */
169 +-static int __init netwinder_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
170 ++static int netwinder_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
171 + {
172 + switch (slot) {
173 + case 0: /* host bridge */
174 +diff --git a/arch/arm/mach-footbridge/personal-pci.c b/arch/arm/mach-footbridge/personal-pci.c
175 +index 4391e433a4b2f..9d19aa98a663e 100644
176 +--- a/arch/arm/mach-footbridge/personal-pci.c
177 ++++ b/arch/arm/mach-footbridge/personal-pci.c
178 +@@ -14,13 +14,12 @@
179 + #include <asm/mach/pci.h>
180 + #include <asm/mach-types.h>
181 +
182 +-static int irqmap_personal_server[] __initdata = {
183 ++static int irqmap_personal_server[] = {
184 + IRQ_IN0, IRQ_IN1, IRQ_IN2, IRQ_IN3, 0, 0, 0,
185 + IRQ_DOORBELLHOST, IRQ_DMA1, IRQ_DMA2, IRQ_PCI
186 + };
187 +
188 +-static int __init personal_server_map_irq(const struct pci_dev *dev, u8 slot,
189 +- u8 pin)
190 ++static int personal_server_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
191 + {
192 + unsigned char line;
193 +
194 +diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
195 +index 638808c4e1224..697adedaced49 100644
196 +--- a/arch/arm/mach-keystone/keystone.c
197 ++++ b/arch/arm/mach-keystone/keystone.c
198 +@@ -62,7 +62,7 @@ static void __init keystone_init(void)
199 + static long long __init keystone_pv_fixup(void)
200 + {
201 + long long offset;
202 +- phys_addr_t mem_start, mem_end;
203 ++ u64 mem_start, mem_end;
204 +
205 + mem_start = memblock_start_of_DRAM();
206 + mem_end = memblock_end_of_DRAM();
207 +@@ -75,7 +75,7 @@ static long long __init keystone_pv_fixup(void)
208 + if (mem_start < KEYSTONE_HIGH_PHYS_START ||
209 + mem_end > KEYSTONE_HIGH_PHYS_END) {
210 + pr_crit("Invalid address space for memory (%08llx-%08llx)\n",
211 +- (u64)mem_start, (u64)mem_end);
212 ++ mem_start, mem_end);
213 + return 0;
214 + }
215 +
216 +diff --git a/arch/arm/mach-omap1/ams-delta-fiq-handler.S b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
217 +index 14a6c3eb32985..f745a65d3bd7a 100644
218 +--- a/arch/arm/mach-omap1/ams-delta-fiq-handler.S
219 ++++ b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
220 +@@ -15,6 +15,7 @@
221 + #include <linux/platform_data/gpio-omap.h>
222 +
223 + #include <asm/assembler.h>
224 ++#include <asm/irq.h>
225 +
226 + #include "ams-delta-fiq.h"
227 + #include "board-ams-delta.h"
228 +diff --git a/arch/arm/probes/uprobes/core.c b/arch/arm/probes/uprobes/core.c
229 +index c4b49b322e8a8..f5f790c6e5f89 100644
230 +--- a/arch/arm/probes/uprobes/core.c
231 ++++ b/arch/arm/probes/uprobes/core.c
232 +@@ -204,7 +204,7 @@ unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
233 + static struct undef_hook uprobes_arm_break_hook = {
234 + .instr_mask = 0x0fffffff,
235 + .instr_val = (UPROBE_SWBP_ARM_INSN & 0x0fffffff),
236 +- .cpsr_mask = MODE_MASK,
237 ++ .cpsr_mask = (PSR_T_BIT | MODE_MASK),
238 + .cpsr_val = USR_MODE,
239 + .fn = uprobe_trap_handler,
240 + };
241 +@@ -212,7 +212,7 @@ static struct undef_hook uprobes_arm_break_hook = {
242 + static struct undef_hook uprobes_arm_ss_hook = {
243 + .instr_mask = 0x0fffffff,
244 + .instr_val = (UPROBE_SS_ARM_INSN & 0x0fffffff),
245 +- .cpsr_mask = MODE_MASK,
246 ++ .cpsr_mask = (PSR_T_BIT | MODE_MASK),
247 + .cpsr_val = USR_MODE,
248 + .fn = uprobe_trap_handler,
249 + };
250 +diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
251 +index 72d6961dc3128..8d15164f2a3c6 100644
252 +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
253 ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
254 +@@ -11,3 +11,7 @@
255 + compatible = "pine64,pine64-lts", "allwinner,sun50i-r18",
256 + "allwinner,sun50i-a64";
257 + };
258 ++
259 ++&mmc0 {
260 ++ cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 push-push switch */
261 ++};
262 +diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
263 +index d935e3028fcb6..19e5b7e298fdf 100644
264 +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
265 ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
266 +@@ -57,7 +57,7 @@
267 + vmmc-supply = <&reg_dcdc1>;
268 + disable-wp;
269 + bus-width = <4>;
270 +- cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
271 ++ cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; /* PF6 push-pull switch */
272 + status = "okay";
273 + };
274 +
275 +diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
276 +index 619db9b4c9d5c..3cb3c4ab3ea56 100644
277 +--- a/arch/arm64/include/asm/alternative.h
278 ++++ b/arch/arm64/include/asm/alternative.h
279 +@@ -119,9 +119,9 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
280 + .popsection
281 + .subsection 1
282 + 663: \insn2
283 +-664: .previous
284 +- .org . - (664b-663b) + (662b-661b)
285 ++664: .org . - (664b-663b) + (662b-661b)
286 + .org . - (662b-661b) + (664b-663b)
287 ++ .previous
288 + .endif
289 + .endm
290 +
291 +@@ -191,11 +191,11 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
292 + */
293 + .macro alternative_endif
294 + 664:
295 ++ .org . - (664b-663b) + (662b-661b)
296 ++ .org . - (662b-661b) + (664b-663b)
297 + .if .Lasm_alt_mode==0
298 + .previous
299 + .endif
300 +- .org . - (664b-663b) + (662b-661b)
301 +- .org . - (662b-661b) + (664b-663b)
302 + .endm
303 +
304 + /*
305 +diff --git a/arch/arm64/include/asm/word-at-a-time.h b/arch/arm64/include/asm/word-at-a-time.h
306 +index 3333950b59093..ea487218db790 100644
307 +--- a/arch/arm64/include/asm/word-at-a-time.h
308 ++++ b/arch/arm64/include/asm/word-at-a-time.h
309 +@@ -53,7 +53,7 @@ static inline unsigned long find_zero(unsigned long mask)
310 + */
311 + static inline unsigned long load_unaligned_zeropad(const void *addr)
312 + {
313 +- unsigned long ret, offset;
314 ++ unsigned long ret, tmp;
315 +
316 + /* Load word from unaligned pointer addr */
317 + asm(
318 +@@ -61,9 +61,9 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
319 + "2:\n"
320 + " .pushsection .fixup,\"ax\"\n"
321 + " .align 2\n"
322 +- "3: and %1, %2, #0x7\n"
323 +- " bic %2, %2, #0x7\n"
324 +- " ldr %0, [%2]\n"
325 ++ "3: bic %1, %2, #0x7\n"
326 ++ " ldr %0, [%1]\n"
327 ++ " and %1, %2, #0x7\n"
328 + " lsl %1, %1, #0x3\n"
329 + #ifndef __AARCH64EB__
330 + " lsr %0, %0, %1\n"
331 +@@ -73,7 +73,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
332 + " b 2b\n"
333 + " .popsection\n"
334 + _ASM_EXTABLE(1b, 3b)
335 +- : "=&r" (ret), "=&r" (offset)
336 ++ : "=&r" (ret), "=&r" (tmp)
337 + : "r" (addr), "Q" (*(unsigned long *)addr));
338 +
339 + return ret;
340 +diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
341 +index 6b23a0cb2b353..b21549a34447c 100644
342 +--- a/arch/riscv/Kconfig
343 ++++ b/arch/riscv/Kconfig
344 +@@ -101,7 +101,7 @@ config ARCH_FLATMEM_ENABLE
345 + config ARCH_SPARSEMEM_ENABLE
346 + def_bool y
347 + depends on MMU
348 +- select SPARSEMEM_STATIC if 32BIT && SPARSMEM
349 ++ select SPARSEMEM_STATIC if 32BIT && SPARSEMEM
350 + select SPARSEMEM_VMEMMAP_ENABLE if 64BIT
351 +
352 + config ARCH_SELECT_MEMORY_MODEL
353 +diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
354 +index e5162690de8f1..db25f9b7778c9 100644
355 +--- a/drivers/dma/dw/Kconfig
356 ++++ b/drivers/dma/dw/Kconfig
357 +@@ -10,6 +10,7 @@ config DW_DMAC_CORE
358 +
359 + config DW_DMAC
360 + tristate "Synopsys DesignWare AHB DMA platform driver"
361 ++ depends on HAS_IOMEM
362 + select DW_DMAC_CORE
363 + help
364 + Support the Synopsys DesignWare AHB DMA controller. This
365 +@@ -18,6 +19,7 @@ config DW_DMAC
366 + config DW_DMAC_PCI
367 + tristate "Synopsys DesignWare AHB DMA PCI driver"
368 + depends on PCI
369 ++ depends on HAS_IOMEM
370 + select DW_DMAC_CORE
371 + help
372 + Support the Synopsys DesignWare AHB DMA controller on the
373 +diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
374 +index fbf6b1a0a4fae..558cd900d3996 100644
375 +--- a/drivers/gpio/gpiolib-sysfs.c
376 ++++ b/drivers/gpio/gpiolib-sysfs.c
377 +@@ -457,6 +457,8 @@ static ssize_t export_store(struct class *class,
378 + long gpio;
379 + struct gpio_desc *desc;
380 + int status;
381 ++ struct gpio_chip *gc;
382 ++ int offset;
383 +
384 + status = kstrtol(buf, 0, &gpio);
385 + if (status < 0)
386 +@@ -468,6 +470,12 @@ static ssize_t export_store(struct class *class,
387 + pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
388 + return -EINVAL;
389 + }
390 ++ gc = desc->gdev->chip;
391 ++ offset = gpio_chip_hwgpio(desc);
392 ++ if (!gpiochip_line_is_valid(gc, offset)) {
393 ++ pr_warn("%s: GPIO %ld masked\n", __func__, gpio);
394 ++ return -EINVAL;
395 ++ }
396 +
397 + /* No extra locking here; FLAG_SYSFS just signifies that the
398 + * request and export were done by on behalf of userspace, so
399 +diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
400 +index f84049119f1c1..e3579e5ffa146 100644
401 +--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
402 ++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
403 +@@ -1131,8 +1131,8 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
404 +
405 + static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
406 + {
407 +- *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO,
408 +- REG_A5XX_RBBM_PERFCTR_CP_0_HI);
409 ++ *value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO,
410 ++ REG_A5XX_RBBM_ALWAYSON_COUNTER_HI);
411 +
412 + return 0;
413 + }
414 +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
415 +index ab75f0309d4b6..df2656e579917 100644
416 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
417 ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
418 +@@ -773,8 +773,8 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
419 + /* Force the GPU power on so we can read this register */
420 + a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
421 +
422 +- *value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
423 +- REG_A6XX_RBBM_PERFCTR_CP_0_HI);
424 ++ *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
425 ++ REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
426 +
427 + a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
428 + return 0;
429 +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
430 +index f1928c1ac139c..bf0621e446199 100644
431 +--- a/drivers/hid/wacom_wac.c
432 ++++ b/drivers/hid/wacom_wac.c
433 +@@ -3574,8 +3574,6 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
434 + {
435 + struct wacom_features *features = &wacom_wac->features;
436 +
437 +- input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
438 +-
439 + if (!(features->device_type & WACOM_DEVICETYPE_PEN))
440 + return -ENODEV;
441 +
442 +@@ -3590,6 +3588,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
443 + return 0;
444 + }
445 +
446 ++ input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
447 + __set_bit(BTN_TOUCH, input_dev->keybit);
448 + __set_bit(ABS_MISC, input_dev->absbit);
449 +
450 +@@ -3742,8 +3741,6 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
451 + {
452 + struct wacom_features *features = &wacom_wac->features;
453 +
454 +- input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
455 +-
456 + if (!(features->device_type & WACOM_DEVICETYPE_TOUCH))
457 + return -ENODEV;
458 +
459 +@@ -3756,6 +3753,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
460 + /* setup has already been done */
461 + return 0;
462 +
463 ++ input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
464 + __set_bit(BTN_TOUCH, input_dev->keybit);
465 +
466 + if (features->touch_max == 1) {
467 +diff --git a/drivers/input/keyboard/nspire-keypad.c b/drivers/input/keyboard/nspire-keypad.c
468 +index 63d5e488137dc..e9fa1423f1360 100644
469 +--- a/drivers/input/keyboard/nspire-keypad.c
470 ++++ b/drivers/input/keyboard/nspire-keypad.c
471 +@@ -93,9 +93,15 @@ static irqreturn_t nspire_keypad_irq(int irq, void *dev_id)
472 + return IRQ_HANDLED;
473 + }
474 +
475 +-static int nspire_keypad_chip_init(struct nspire_keypad *keypad)
476 ++static int nspire_keypad_open(struct input_dev *input)
477 + {
478 ++ struct nspire_keypad *keypad = input_get_drvdata(input);
479 + unsigned long val = 0, cycles_per_us, delay_cycles, row_delay_cycles;
480 ++ int error;
481 ++
482 ++ error = clk_prepare_enable(keypad->clk);
483 ++ if (error)
484 ++ return error;
485 +
486 + cycles_per_us = (clk_get_rate(keypad->clk) / 1000000);
487 + if (cycles_per_us == 0)
488 +@@ -121,30 +127,6 @@ static int nspire_keypad_chip_init(struct nspire_keypad *keypad)
489 + keypad->int_mask = 1 << 1;
490 + writel(keypad->int_mask, keypad->reg_base + KEYPAD_INTMSK);
491 +
492 +- /* Disable GPIO interrupts to prevent hanging on touchpad */
493 +- /* Possibly used to detect touchpad events */
494 +- writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT);
495 +- /* Acknowledge existing interrupts */
496 +- writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS);
497 +-
498 +- return 0;
499 +-}
500 +-
501 +-static int nspire_keypad_open(struct input_dev *input)
502 +-{
503 +- struct nspire_keypad *keypad = input_get_drvdata(input);
504 +- int error;
505 +-
506 +- error = clk_prepare_enable(keypad->clk);
507 +- if (error)
508 +- return error;
509 +-
510 +- error = nspire_keypad_chip_init(keypad);
511 +- if (error) {
512 +- clk_disable_unprepare(keypad->clk);
513 +- return error;
514 +- }
515 +-
516 + return 0;
517 + }
518 +
519 +@@ -152,6 +134,11 @@ static void nspire_keypad_close(struct input_dev *input)
520 + {
521 + struct nspire_keypad *keypad = input_get_drvdata(input);
522 +
523 ++ /* Disable interrupts */
524 ++ writel(0, keypad->reg_base + KEYPAD_INTMSK);
525 ++ /* Acknowledge existing interrupts */
526 ++ writel(~0, keypad->reg_base + KEYPAD_INT);
527 ++
528 + clk_disable_unprepare(keypad->clk);
529 + }
530 +
531 +@@ -210,6 +197,25 @@ static int nspire_keypad_probe(struct platform_device *pdev)
532 + return -ENOMEM;
533 + }
534 +
535 ++ error = clk_prepare_enable(keypad->clk);
536 ++ if (error) {
537 ++ dev_err(&pdev->dev, "failed to enable clock\n");
538 ++ return error;
539 ++ }
540 ++
541 ++ /* Disable interrupts */
542 ++ writel(0, keypad->reg_base + KEYPAD_INTMSK);
543 ++ /* Acknowledge existing interrupts */
544 ++ writel(~0, keypad->reg_base + KEYPAD_INT);
545 ++
546 ++ /* Disable GPIO interrupts to prevent hanging on touchpad */
547 ++ /* Possibly used to detect touchpad events */
548 ++ writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT);
549 ++ /* Acknowledge existing GPIO interrupts */
550 ++ writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS);
551 ++
552 ++ clk_disable_unprepare(keypad->clk);
553 ++
554 + input_set_drvdata(input, keypad);
555 +
556 + input->id.bustype = BUS_HOST;
557 +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
558 +index e7346c5f4738a..23442a144b834 100644
559 +--- a/drivers/input/serio/i8042-x86ia64io.h
560 ++++ b/drivers/input/serio/i8042-x86ia64io.h
561 +@@ -588,6 +588,7 @@ static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
562 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
563 + DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
564 + },
565 ++ }, {
566 + .matches = {
567 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
568 + DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
569 +diff --git a/drivers/input/touchscreen/s6sy761.c b/drivers/input/touchscreen/s6sy761.c
570 +index b63d7fdf0cd20..85a1f465c097e 100644
571 +--- a/drivers/input/touchscreen/s6sy761.c
572 ++++ b/drivers/input/touchscreen/s6sy761.c
573 +@@ -145,8 +145,8 @@ static void s6sy761_report_coordinates(struct s6sy761_data *sdata,
574 + u8 major = event[4];
575 + u8 minor = event[5];
576 + u8 z = event[6] & S6SY761_MASK_Z;
577 +- u16 x = (event[1] << 3) | ((event[3] & S6SY761_MASK_X) >> 4);
578 +- u16 y = (event[2] << 3) | (event[3] & S6SY761_MASK_Y);
579 ++ u16 x = (event[1] << 4) | ((event[3] & S6SY761_MASK_X) >> 4);
580 ++ u16 y = (event[2] << 4) | (event[3] & S6SY761_MASK_Y);
581 +
582 + input_mt_slot(sdata->input, tid);
583 +
584 +diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
585 +index 66f4c6398f670..cea2b37897367 100644
586 +--- a/drivers/md/dm-verity-fec.c
587 ++++ b/drivers/md/dm-verity-fec.c
588 +@@ -65,7 +65,7 @@ static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
589 + u8 *res;
590 +
591 + position = (index + rsb) * v->fec->roots;
592 +- block = div64_u64_rem(position, v->fec->roots << SECTOR_SHIFT, &rem);
593 ++ block = div64_u64_rem(position, v->fec->io_size, &rem);
594 + *offset = (unsigned)rem;
595 +
596 + res = dm_bufio_read(v->fec->bufio, block, buf);
597 +@@ -154,7 +154,7 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
598 +
599 + /* read the next block when we run out of parity bytes */
600 + offset += v->fec->roots;
601 +- if (offset >= v->fec->roots << SECTOR_SHIFT) {
602 ++ if (offset >= v->fec->io_size) {
603 + dm_bufio_release(buf);
604 +
605 + par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
606 +@@ -742,8 +742,13 @@ int verity_fec_ctr(struct dm_verity *v)
607 + return -E2BIG;
608 + }
609 +
610 ++ if ((f->roots << SECTOR_SHIFT) & ((1 << v->data_dev_block_bits) - 1))
611 ++ f->io_size = 1 << v->data_dev_block_bits;
612 ++ else
613 ++ f->io_size = v->fec->roots << SECTOR_SHIFT;
614 ++
615 + f->bufio = dm_bufio_client_create(f->dev->bdev,
616 +- f->roots << SECTOR_SHIFT,
617 ++ f->io_size,
618 + 1, 0, NULL, NULL);
619 + if (IS_ERR(f->bufio)) {
620 + ti->error = "Cannot initialize FEC bufio client";
621 +diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h
622 +index 42fbd3a7fc9f1..3c46c8d618833 100644
623 +--- a/drivers/md/dm-verity-fec.h
624 ++++ b/drivers/md/dm-verity-fec.h
625 +@@ -36,6 +36,7 @@ struct dm_verity_fec {
626 + struct dm_dev *dev; /* parity data device */
627 + struct dm_bufio_client *data_bufio; /* for data dev access */
628 + struct dm_bufio_client *bufio; /* for parity data access */
629 ++ size_t io_size; /* IO size for roots */
630 + sector_t start; /* parity data start in blocks */
631 + sector_t blocks; /* number of blocks covered */
632 + sector_t rounds; /* number of interleaving rounds */
633 +diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
634 +index 1af09fd3fed1c..446eb06e50b49 100644
635 +--- a/drivers/net/dsa/mv88e6xxx/chip.c
636 ++++ b/drivers/net/dsa/mv88e6xxx/chip.c
637 +@@ -2766,10 +2766,17 @@ unlock:
638 + return err;
639 + }
640 +
641 ++/* prod_id for switch families which do not have a PHY model number */
642 ++static const u16 family_prod_id_table[] = {
643 ++ [MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
644 ++ [MV88E6XXX_FAMILY_6390] = MV88E6XXX_PORT_SWITCH_ID_PROD_6390,
645 ++};
646 ++
647 + static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
648 + {
649 + struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
650 + struct mv88e6xxx_chip *chip = mdio_bus->chip;
651 ++ u16 prod_id;
652 + u16 val;
653 + int err;
654 +
655 +@@ -2780,23 +2787,12 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
656 + err = chip->info->ops->phy_read(chip, bus, phy, reg, &val);
657 + mv88e6xxx_reg_unlock(chip);
658 +
659 +- if (reg == MII_PHYSID2) {
660 +- /* Some internal PHYs don't have a model number. */
661 +- if (chip->info->family != MV88E6XXX_FAMILY_6165)
662 +- /* Then there is the 6165 family. It gets is
663 +- * PHYs correct. But it can also have two
664 +- * SERDES interfaces in the PHY address
665 +- * space. And these don't have a model
666 +- * number. But they are not PHYs, so we don't
667 +- * want to give them something a PHY driver
668 +- * will recognise.
669 +- *
670 +- * Use the mv88e6390 family model number
671 +- * instead, for anything which really could be
672 +- * a PHY,
673 +- */
674 +- if (!(val & 0x3f0))
675 +- val |= MV88E6XXX_PORT_SWITCH_ID_PROD_6390 >> 4;
676 ++ /* Some internal PHYs don't have a model number. */
677 ++ if (reg == MII_PHYSID2 && !(val & 0x3f0) &&
678 ++ chip->info->family < ARRAY_SIZE(family_prod_id_table)) {
679 ++ prod_id = family_prod_id_table[chip->info->family];
680 ++ if (prod_id)
681 ++ val |= prod_id >> 4;
682 + }
683 +
684 + return err ? err : val;
685 +diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
686 +index f5ad12c109344..da84660ceae1f 100644
687 +--- a/drivers/net/ethernet/amd/pcnet32.c
688 ++++ b/drivers/net/ethernet/amd/pcnet32.c
689 +@@ -1548,8 +1548,7 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
690 + }
691 + pci_set_master(pdev);
692 +
693 +- ioaddr = pci_resource_start(pdev, 0);
694 +- if (!ioaddr) {
695 ++ if (!pci_resource_len(pdev, 0)) {
696 + if (pcnet32_debug & NETIF_MSG_PROBE)
697 + pr_err("card has no PCI IO resources, aborting\n");
698 + err = -ENODEV;
699 +@@ -1562,6 +1561,8 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
700 + pr_err("architecture does not support 32bit PCI busmaster DMA\n");
701 + goto err_disable_dev;
702 + }
703 ++
704 ++ ioaddr = pci_resource_start(pdev, 0);
705 + if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
706 + if (pcnet32_debug & NETIF_MSG_PROBE)
707 + pr_err("io address range already allocated\n");
708 +diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
709 +index bdef5b3dd848c..377668465535f 100644
710 +--- a/drivers/net/ethernet/cadence/macb_main.c
711 ++++ b/drivers/net/ethernet/cadence/macb_main.c
712 +@@ -3590,6 +3590,7 @@ static int macb_init(struct platform_device *pdev)
713 + reg = gem_readl(bp, DCFG8);
714 + bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
715 + GEM_BFEXT(T2SCR, reg));
716 ++ INIT_LIST_HEAD(&bp->rx_fs_list.list);
717 + if (bp->max_tuples > 0) {
718 + /* also needs one ethtype match to check IPv4 */
719 + if (GEM_BFEXT(SCR2ETH, reg) > 0) {
720 +@@ -3600,7 +3601,6 @@ static int macb_init(struct platform_device *pdev)
721 + /* Filtering is supported in hw but don't enable it in kernel now */
722 + dev->hw_features |= NETIF_F_NTUPLE;
723 + /* init Rx flow definitions */
724 +- INIT_LIST_HEAD(&bp->rx_fs_list.list);
725 + bp->rx_fs_list.count = 0;
726 + spin_lock_init(&bp->rx_fs_lock);
727 + } else
728 +diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
729 +index 4b958681d66e7..1d5d8984b49a3 100644
730 +--- a/drivers/net/ethernet/davicom/dm9000.c
731 ++++ b/drivers/net/ethernet/davicom/dm9000.c
732 +@@ -1476,8 +1476,10 @@ dm9000_probe(struct platform_device *pdev)
733 +
734 + /* Init network device */
735 + ndev = alloc_etherdev(sizeof(struct board_info));
736 +- if (!ndev)
737 +- return -ENOMEM;
738 ++ if (!ndev) {
739 ++ ret = -ENOMEM;
740 ++ goto out_regulator_disable;
741 ++ }
742 +
743 + SET_NETDEV_DEV(ndev, &pdev->dev);
744 +
745 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
746 +index 79b13750fa2d2..a2b7b982ee290 100644
747 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
748 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
749 +@@ -1081,19 +1081,13 @@ static int __ibmvnic_open(struct net_device *netdev)
750 +
751 + rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
752 + if (rc) {
753 +- for (i = 0; i < adapter->req_rx_queues; i++)
754 +- napi_disable(&adapter->napi[i]);
755 ++ ibmvnic_napi_disable(adapter);
756 + release_resources(adapter);
757 + return rc;
758 + }
759 +
760 + netif_tx_start_all_queues(netdev);
761 +
762 +- if (prev_state == VNIC_CLOSED) {
763 +- for (i = 0; i < adapter->req_rx_queues; i++)
764 +- napi_schedule(&adapter->napi[i]);
765 +- }
766 +-
767 + adapter->state = VNIC_OPEN;
768 + return rc;
769 + }
770 +@@ -1850,7 +1844,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
771 + u64 old_num_rx_queues, old_num_tx_queues;
772 + u64 old_num_rx_slots, old_num_tx_slots;
773 + struct net_device *netdev = adapter->netdev;
774 +- int i, rc;
775 ++ int rc;
776 +
777 + netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
778 + rwi->reset_reason);
779 +@@ -1995,10 +1989,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
780 + /* refresh device's multicast list */
781 + ibmvnic_set_multi(netdev);
782 +
783 +- /* kick napi */
784 +- for (i = 0; i < adapter->req_rx_queues; i++)
785 +- napi_schedule(&adapter->napi[i]);
786 +-
787 + if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
788 + adapter->reset_reason == VNIC_RESET_MOBILITY) {
789 + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
790 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
791 +index a69aace057925..a1b4e995f2b7e 100644
792 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
793 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
794 +@@ -11872,6 +11872,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
795 + {
796 + int err = 0;
797 + int size;
798 ++ u16 pow;
799 +
800 + /* Set default capability flags */
801 + pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
802 +@@ -11890,6 +11891,11 @@ static int i40e_sw_init(struct i40e_pf *pf)
803 + pf->rss_table_size = pf->hw.func_caps.rss_table_size;
804 + pf->rss_size_max = min_t(int, pf->rss_size_max,
805 + pf->hw.func_caps.num_tx_qp);
806 ++
807 ++ /* find the next higher power-of-2 of num cpus */
808 ++ pow = roundup_pow_of_two(num_online_cpus());
809 ++ pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
810 ++
811 + if (pf->hw.func_caps.rss) {
812 + pf->flags |= I40E_FLAG_RSS_ENABLED;
813 + pf->alloc_rss_size = min_t(int, pf->rss_size_max,
814 +diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
815 +index bd8decc54b871..8ff178fc2670c 100644
816 +--- a/drivers/net/ethernet/realtek/r8169_main.c
817 ++++ b/drivers/net/ethernet/realtek/r8169_main.c
818 +@@ -742,12 +742,6 @@ static void rtl_unlock_config_regs(struct rtl8169_private *tp)
819 + RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
820 + }
821 +
822 +-static void rtl_tx_performance_tweak(struct rtl8169_private *tp, u16 force)
823 +-{
824 +- pcie_capability_clear_and_set_word(tp->pci_dev, PCI_EXP_DEVCTL,
825 +- PCI_EXP_DEVCTL_READRQ, force);
826 +-}
827 +-
828 + static bool rtl_is_8125(struct rtl8169_private *tp)
829 + {
830 + return tp->mac_version >= RTL_GIGA_MAC_VER_60;
831 +@@ -4057,14 +4051,12 @@ static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
832 + {
833 + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
834 + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1);
835 +- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_512B);
836 + }
837 +
838 + static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
839 + {
840 + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
841 + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1);
842 +- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
843 + }
844 +
845 + static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
846 +@@ -4082,7 +4074,6 @@ static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
847 + RTL_W8(tp, MaxTxPacketSize, 0x24);
848 + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
849 + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01);
850 +- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_512B);
851 + }
852 +
853 + static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
854 +@@ -4090,93 +4081,70 @@ static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
855 + RTL_W8(tp, MaxTxPacketSize, 0x3f);
856 + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
857 + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01);
858 +- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
859 +-}
860 +-
861 +-static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
862 +-{
863 +- rtl_tx_performance_tweak(tp,
864 +- PCI_EXP_DEVCTL_READRQ_512B | PCI_EXP_DEVCTL_NOSNOOP_EN);
865 +-}
866 +-
867 +-static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
868 +-{
869 +- rtl_tx_performance_tweak(tp,
870 +- PCI_EXP_DEVCTL_READRQ_4096B | PCI_EXP_DEVCTL_NOSNOOP_EN);
871 + }
872 +
873 + static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
874 + {
875 +- r8168b_0_hw_jumbo_enable(tp);
876 +-
877 + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0));
878 + }
879 +
880 + static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
881 + {
882 +- r8168b_0_hw_jumbo_disable(tp);
883 +-
884 + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0));
885 + }
886 +
887 +-static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
888 ++static void rtl_jumbo_config(struct rtl8169_private *tp)
889 + {
890 +- rtl_unlock_config_regs(tp);
891 +- switch (tp->mac_version) {
892 +- case RTL_GIGA_MAC_VER_11:
893 +- r8168b_0_hw_jumbo_enable(tp);
894 +- break;
895 +- case RTL_GIGA_MAC_VER_12:
896 +- case RTL_GIGA_MAC_VER_17:
897 +- r8168b_1_hw_jumbo_enable(tp);
898 +- break;
899 +- case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
900 +- r8168c_hw_jumbo_enable(tp);
901 +- break;
902 +- case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
903 +- r8168dp_hw_jumbo_enable(tp);
904 +- break;
905 +- case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33:
906 +- r8168e_hw_jumbo_enable(tp);
907 +- break;
908 +- default:
909 +- break;
910 +- }
911 +- rtl_lock_config_regs(tp);
912 +-}
913 ++ bool jumbo = tp->dev->mtu > ETH_DATA_LEN;
914 ++ int readrq = 4096;
915 +
916 +-static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
917 +-{
918 + rtl_unlock_config_regs(tp);
919 + switch (tp->mac_version) {
920 +- case RTL_GIGA_MAC_VER_11:
921 +- r8168b_0_hw_jumbo_disable(tp);
922 +- break;
923 + case RTL_GIGA_MAC_VER_12:
924 + case RTL_GIGA_MAC_VER_17:
925 +- r8168b_1_hw_jumbo_disable(tp);
926 ++ if (jumbo) {
927 ++ readrq = 512;
928 ++ r8168b_1_hw_jumbo_enable(tp);
929 ++ } else {
930 ++ r8168b_1_hw_jumbo_disable(tp);
931 ++ }
932 + break;
933 + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
934 +- r8168c_hw_jumbo_disable(tp);
935 ++ if (jumbo) {
936 ++ readrq = 512;
937 ++ r8168c_hw_jumbo_enable(tp);
938 ++ } else {
939 ++ r8168c_hw_jumbo_disable(tp);
940 ++ }
941 + break;
942 + case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
943 +- r8168dp_hw_jumbo_disable(tp);
944 ++ if (jumbo)
945 ++ r8168dp_hw_jumbo_enable(tp);
946 ++ else
947 ++ r8168dp_hw_jumbo_disable(tp);
948 + break;
949 + case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33:
950 +- r8168e_hw_jumbo_disable(tp);
951 ++ if (jumbo) {
952 ++ pcie_set_readrq(tp->pci_dev, 512);
953 ++ r8168e_hw_jumbo_enable(tp);
954 ++ } else {
955 ++ r8168e_hw_jumbo_disable(tp);
956 ++ }
957 + break;
958 + default:
959 + break;
960 + }
961 + rtl_lock_config_regs(tp);
962 +-}
963 +
964 +-static void rtl_jumbo_config(struct rtl8169_private *tp, int mtu)
965 +-{
966 +- if (mtu > ETH_DATA_LEN)
967 +- rtl_hw_jumbo_enable(tp);
968 +- else
969 +- rtl_hw_jumbo_disable(tp);
970 ++ if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
971 ++ pcie_set_readrq(tp->pci_dev, readrq);
972 ++
973 ++ /* Chip doesn't support pause in jumbo mode */
974 ++ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT,
975 ++ tp->phydev->advertising, !jumbo);
976 ++ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
977 ++ tp->phydev->advertising, !jumbo);
978 ++ phy_start_aneg(tp->phydev);
979 + }
980 +
981 + DECLARE_RTL_COND(rtl_chipcmd_cond)
982 +@@ -4575,18 +4543,12 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp)
983 + rtl_set_def_aspm_entry_latency(tp);
984 +
985 + rtl_disable_clock_request(tp);
986 +-
987 +- if (tp->dev->mtu <= ETH_DATA_LEN)
988 +- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
989 + }
990 +
991 + static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
992 + {
993 + rtl_set_def_aspm_entry_latency(tp);
994 +
995 +- if (tp->dev->mtu <= ETH_DATA_LEN)
996 +- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
997 +-
998 + rtl_disable_clock_request(tp);
999 + }
1000 +
1001 +@@ -4601,8 +4563,6 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
1002 +
1003 + rtl_set_def_aspm_entry_latency(tp);
1004 +
1005 +- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
1006 +-
1007 + rtl_ephy_init(tp, e_info_8168d_4);
1008 +
1009 + rtl_enable_clock_request(tp);
1010 +@@ -4677,8 +4637,6 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
1011 + {
1012 + rtl_set_def_aspm_entry_latency(tp);
1013 +
1014 +- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
1015 +-
1016 + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
1017 + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
1018 + rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);
1019 +@@ -4741,8 +4699,6 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp)
1020 +
1021 + rtl_set_def_aspm_entry_latency(tp);
1022 +
1023 +- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
1024 +-
1025 + rtl_reset_packet_filter(tp);
1026 + rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f);
1027 +
1028 +@@ -4979,8 +4935,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
1029 +
1030 + rtl_set_def_aspm_entry_latency(tp);
1031 +
1032 +- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
1033 +-
1034 + rtl_reset_packet_filter(tp);
1035 +
1036 + rtl_eri_set_bits(tp, 0xdc, ERIAR_MASK_1111, BIT(4));
1037 +@@ -5038,8 +4992,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
1038 +
1039 + rtl_set_def_aspm_entry_latency(tp);
1040 +
1041 +- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
1042 +-
1043 + rtl_reset_packet_filter(tp);
1044 +
1045 + rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f80);
1046 +@@ -5142,8 +5094,6 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
1047 +
1048 + RTL_W8(tp, DBG_REG, FIX_NAK_1);
1049 +
1050 +- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
1051 +-
1052 + RTL_W8(tp, Config1,
1053 + LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
1054 + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
1055 +@@ -5159,8 +5109,6 @@ static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
1056 + {
1057 + rtl_set_def_aspm_entry_latency(tp);
1058 +
1059 +- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
1060 +-
1061 + RTL_W8(tp, Config1, MEMMAP | IOMAP | VPD | PMEnable);
1062 + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
1063 + }
1064 +@@ -5221,8 +5169,6 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
1065 +
1066 + rtl_ephy_init(tp, e_info_8402);
1067 +
1068 +- rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
1069 +-
1070 + rtl_set_fifo_size(tp, 0x00, 0x00, 0x02, 0x06);
1071 + rtl_reset_packet_filter(tp);
1072 + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
1073 +@@ -5438,10 +5384,18 @@ static void rtl_hw_start_8125(struct rtl8169_private *tp)
1074 +
1075 + static void rtl_hw_start_8168(struct rtl8169_private *tp)
1076 + {
1077 +- if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
1078 +- tp->mac_version == RTL_GIGA_MAC_VER_16)
1079 ++ switch (tp->mac_version) {
1080 ++ case RTL_GIGA_MAC_VER_11:
1081 ++ case RTL_GIGA_MAC_VER_12:
1082 ++ case RTL_GIGA_MAC_VER_13:
1083 ++ case RTL_GIGA_MAC_VER_16:
1084 ++ case RTL_GIGA_MAC_VER_17:
1085 + pcie_capability_set_word(tp->pci_dev, PCI_EXP_DEVCTL,
1086 + PCI_EXP_DEVCTL_NOSNOOP_EN);
1087 ++ break;
1088 ++ default:
1089 ++ break;
1090 ++ }
1091 +
1092 + if (rtl_is_8168evl_up(tp))
1093 + RTL_W8(tp, MaxTxPacketSize, EarlySize);
1094 +@@ -5498,7 +5452,7 @@ static void rtl_hw_start(struct rtl8169_private *tp)
1095 + rtl_set_rx_tx_desc_registers(tp);
1096 + rtl_lock_config_regs(tp);
1097 +
1098 +- rtl_jumbo_config(tp, tp->dev->mtu);
1099 ++ rtl_jumbo_config(tp);
1100 +
1101 + /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
1102 + RTL_R16(tp, CPlusCmd);
1103 +@@ -5513,10 +5467,9 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
1104 + {
1105 + struct rtl8169_private *tp = netdev_priv(dev);
1106 +
1107 +- rtl_jumbo_config(tp, new_mtu);
1108 +-
1109 + dev->mtu = new_mtu;
1110 + netdev_update_features(dev);
1111 ++ rtl_jumbo_config(tp);
1112 +
1113 + /* Reportedly at least Asus X453MA truncates packets otherwise */
1114 + if (tp->mac_version == RTL_GIGA_MAC_VER_37)
1115 +@@ -6368,8 +6321,6 @@ static int r8169_phy_connect(struct rtl8169_private *tp)
1116 + if (!tp->supports_gmii)
1117 + phy_set_max_speed(phydev, SPEED_100);
1118 +
1119 +- phy_support_asym_pause(phydev);
1120 +-
1121 + phy_attached_info(phydev);
1122 +
1123 + return 0;
1124 +diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
1125 +index 91cf1d1672637..9dbe625ad4477 100644
1126 +--- a/drivers/net/phy/marvell.c
1127 ++++ b/drivers/net/phy/marvell.c
1128 +@@ -2401,9 +2401,31 @@ static struct phy_driver marvell_drivers[] = {
1129 + .get_stats = marvell_get_stats,
1130 + },
1131 + {
1132 +- .phy_id = MARVELL_PHY_ID_88E6390,
1133 ++ .phy_id = MARVELL_PHY_ID_88E6341_FAMILY,
1134 + .phy_id_mask = MARVELL_PHY_ID_MASK,
1135 +- .name = "Marvell 88E6390",
1136 ++ .name = "Marvell 88E6341 Family",
1137 ++ /* PHY_GBIT_FEATURES */
1138 ++ .probe = m88e1510_probe,
1139 ++ .config_init = &marvell_config_init,
1140 ++ .config_aneg = &m88e6390_config_aneg,
1141 ++ .read_status = &marvell_read_status,
1142 ++ .ack_interrupt = &marvell_ack_interrupt,
1143 ++ .config_intr = &marvell_config_intr,
1144 ++ .did_interrupt = &m88e1121_did_interrupt,
1145 ++ .resume = &genphy_resume,
1146 ++ .suspend = &genphy_suspend,
1147 ++ .read_page = marvell_read_page,
1148 ++ .write_page = marvell_write_page,
1149 ++ .get_sset_count = marvell_get_sset_count,
1150 ++ .get_strings = marvell_get_strings,
1151 ++ .get_stats = marvell_get_stats,
1152 ++ .get_tunable = m88e1540_get_tunable,
1153 ++ .set_tunable = m88e1540_set_tunable,
1154 ++ },
1155 ++ {
1156 ++ .phy_id = MARVELL_PHY_ID_88E6390_FAMILY,
1157 ++ .phy_id_mask = MARVELL_PHY_ID_MASK,
1158 ++ .name = "Marvell 88E6390 Family",
1159 + /* PHY_GBIT_FEATURES */
1160 + .probe = m88e6390_probe,
1161 + .config_init = &marvell_config_init,
1162 +@@ -2441,7 +2463,8 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
1163 + { MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK },
1164 + { MARVELL_PHY_ID_88E1545, MARVELL_PHY_ID_MASK },
1165 + { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK },
1166 +- { MARVELL_PHY_ID_88E6390, MARVELL_PHY_ID_MASK },
1167 ++ { MARVELL_PHY_ID_88E6341_FAMILY, MARVELL_PHY_ID_MASK },
1168 ++ { MARVELL_PHY_ID_88E6390_FAMILY, MARVELL_PHY_ID_MASK },
1169 + { }
1170 + };
1171 +
1172 +diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c
1173 +index 01305ba2d3aac..9d04ca53229b5 100644
1174 +--- a/drivers/net/wireless/virt_wifi.c
1175 ++++ b/drivers/net/wireless/virt_wifi.c
1176 +@@ -12,6 +12,7 @@
1177 + #include <net/cfg80211.h>
1178 + #include <net/rtnetlink.h>
1179 + #include <linux/etherdevice.h>
1180 ++#include <linux/math64.h>
1181 + #include <linux/module.h>
1182 +
1183 + static struct wiphy *common_wiphy;
1184 +@@ -168,11 +169,11 @@ static void virt_wifi_scan_result(struct work_struct *work)
1185 + scan_result.work);
1186 + struct wiphy *wiphy = priv_to_wiphy(priv);
1187 + struct cfg80211_scan_info scan_info = { .aborted = false };
1188 ++ u64 tsf = div_u64(ktime_get_boottime_ns(), 1000);
1189 +
1190 + informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz,
1191 + CFG80211_BSS_FTYPE_PRESP,
1192 +- fake_router_bssid,
1193 +- ktime_get_boottime_ns(),
1194 ++ fake_router_bssid, tsf,
1195 + WLAN_CAPABILITY_ESS, 0,
1196 + (void *)&ssid, sizeof(ssid),
1197 + DBM_TO_MBM(-50), GFP_KERNEL);
1198 +diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
1199 +index ef423ba1a7116..b8236a9e8750d 100644
1200 +--- a/drivers/nvdimm/region_devs.c
1201 ++++ b/drivers/nvdimm/region_devs.c
1202 +@@ -1142,6 +1142,11 @@ int nvdimm_has_flush(struct nd_region *nd_region)
1203 + || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
1204 + return -ENXIO;
1205 +
1206 ++ /* Test if an explicit flush function is defined */
1207 ++ if (test_bit(ND_REGION_ASYNC, &nd_region->flags) && nd_region->flush)
1208 ++ return 1;
1209 ++
1210 ++ /* Test if any flush hints for the region are available */
1211 + for (i = 0; i < nd_region->ndr_mappings; i++) {
1212 + struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1213 + struct nvdimm *nvdimm = nd_mapping->nvdimm;
1214 +@@ -1152,8 +1157,8 @@ int nvdimm_has_flush(struct nd_region *nd_region)
1215 + }
1216 +
1217 + /*
1218 +- * The platform defines dimm devices without hints, assume
1219 +- * platform persistence mechanism like ADR
1220 ++ * The platform defines dimm devices without hints nor explicit flush,
1221 ++ * assume platform persistence mechanism like ADR
1222 + */
1223 + return 0;
1224 + }
1225 +diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
1226 +index dd755a56cf521..5d28bb7f2ca40 100644
1227 +--- a/drivers/scsi/libsas/sas_ata.c
1228 ++++ b/drivers/scsi/libsas/sas_ata.c
1229 +@@ -200,18 +200,17 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
1230 + memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
1231 + task->total_xfer_len = qc->nbytes;
1232 + task->num_scatter = qc->n_elem;
1233 ++ task->data_dir = qc->dma_dir;
1234 ++ } else if (qc->tf.protocol == ATA_PROT_NODATA) {
1235 ++ task->data_dir = DMA_NONE;
1236 + } else {
1237 + for_each_sg(qc->sg, sg, qc->n_elem, si)
1238 + xfer += sg_dma_len(sg);
1239 +
1240 + task->total_xfer_len = xfer;
1241 + task->num_scatter = si;
1242 +- }
1243 +-
1244 +- if (qc->tf.protocol == ATA_PROT_NODATA)
1245 +- task->data_dir = DMA_NONE;
1246 +- else
1247 + task->data_dir = qc->dma_dir;
1248 ++ }
1249 + task->scatter = qc->sg;
1250 + task->ata_task.retry_count = 1;
1251 + task->task_state_flags = SAS_TASK_STATE_PENDING;
1252 +diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
1253 +index 7bbff91f8883e..88a56e8480f71 100644
1254 +--- a/drivers/scsi/qla2xxx/qla_dbg.c
1255 ++++ b/drivers/scsi/qla2xxx/qla_dbg.c
1256 +@@ -18,7 +18,7 @@
1257 + * | Device Discovery | 0x2134 | 0x210e-0x2116 |
1258 + * | | | 0x211a |
1259 + * | | | 0x211c-0x2128 |
1260 +- * | | | 0x212a-0x2130 |
1261 ++ * | | | 0x212a-0x2134 |
1262 + * | Queue Command and IO tracing | 0x3074 | 0x300b |
1263 + * | | | 0x3027-0x3028 |
1264 + * | | | 0x303d-0x3041 |
1265 +diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
1266 +index c57b95a206888..7c22f8eea3ead 100644
1267 +--- a/drivers/scsi/qla2xxx/qla_def.h
1268 ++++ b/drivers/scsi/qla2xxx/qla_def.h
1269 +@@ -2281,7 +2281,7 @@ typedef struct {
1270 + uint8_t fabric_port_name[WWN_SIZE];
1271 + uint16_t fp_speed;
1272 + uint8_t fc4_type;
1273 +- uint8_t fc4f_nvme; /* nvme fc4 feature bits */
1274 ++ uint8_t fc4_features;
1275 + } sw_info_t;
1276 +
1277 + /* FCP-4 types */
1278 +@@ -2452,7 +2452,7 @@ typedef struct fc_port {
1279 + u32 supported_classes;
1280 +
1281 + uint8_t fc4_type;
1282 +- uint8_t fc4f_nvme;
1283 ++ uint8_t fc4_features;
1284 + uint8_t scan_state;
1285 +
1286 + unsigned long last_queue_full;
1287 +@@ -2466,6 +2466,7 @@ typedef struct fc_port {
1288 + struct qla_tgt_sess *tgt_session;
1289 + struct ct_sns_desc ct_desc;
1290 + enum discovery_state disc_state;
1291 ++ atomic_t shadow_disc_state;
1292 + enum discovery_state next_disc_state;
1293 + enum login_state fw_login_state;
1294 + unsigned long dm_login_expire;
1295 +@@ -2483,6 +2484,9 @@ typedef struct fc_port {
1296 + u16 n2n_chip_reset;
1297 + } fc_port_t;
1298 +
1299 ++#define FC4_PRIORITY_NVME 0
1300 ++#define FC4_PRIORITY_FCP 1
1301 ++
1302 + #define QLA_FCPORT_SCAN 1
1303 + #define QLA_FCPORT_FOUND 2
1304 +
1305 +@@ -2507,6 +2511,19 @@ struct event_arg {
1306 +
1307 + extern const char *const port_state_str[5];
1308 +
1309 ++static const char * const port_dstate_str[] = {
1310 ++ "DELETED",
1311 ++ "GNN_ID",
1312 ++ "GNL",
1313 ++ "LOGIN_PEND",
1314 ++ "LOGIN_FAILED",
1315 ++ "GPDB",
1316 ++ "UPD_FCPORT",
1317 ++ "LOGIN_COMPLETE",
1318 ++ "ADISC",
1319 ++ "DELETE_PEND"
1320 ++};
1321 ++
1322 + /*
1323 + * FC port flags.
1324 + */
1325 +@@ -4298,6 +4315,8 @@ struct qla_hw_data {
1326 + atomic_t nvme_active_aen_cnt;
1327 + uint16_t nvme_last_rptd_aen; /* Last recorded aen count */
1328 +
1329 ++ uint8_t fc4_type_priority;
1330 ++
1331 + atomic_t zio_threshold;
1332 + uint16_t last_zio_threshold;
1333 +
1334 +@@ -4823,6 +4842,23 @@ struct sff_8247_a0 {
1335 + ha->current_topology == ISP_CFG_N || \
1336 + !ha->current_topology)
1337 +
1338 ++#define NVME_TYPE(fcport) \
1339 ++ (fcport->fc4_type & FS_FC4TYPE_NVME) \
1340 ++
1341 ++#define FCP_TYPE(fcport) \
1342 ++ (fcport->fc4_type & FS_FC4TYPE_FCP) \
1343 ++
1344 ++#define NVME_ONLY_TARGET(fcport) \
1345 ++ (NVME_TYPE(fcport) && !FCP_TYPE(fcport)) \
1346 ++
1347 ++#define NVME_FCP_TARGET(fcport) \
1348 ++ (FCP_TYPE(fcport) && NVME_TYPE(fcport)) \
1349 ++
1350 ++#define NVME_TARGET(ha, fcport) \
1351 ++ ((NVME_FCP_TARGET(fcport) && \
1352 ++ (ha->fc4_type_priority == FC4_PRIORITY_NVME)) || \
1353 ++ NVME_ONLY_TARGET(fcport)) \
1354 ++
1355 + #define PRLI_PHASE(_cls) \
1356 + ((_cls == DSC_LS_PRLI_PEND) || (_cls == DSC_LS_PRLI_COMP))
1357 +
1358 +diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
1359 +index dc2366a296654..9dc09c1174169 100644
1360 +--- a/drivers/scsi/qla2xxx/qla_fw.h
1361 ++++ b/drivers/scsi/qla2xxx/qla_fw.h
1362 +@@ -2105,4 +2105,6 @@ struct qla_fcp_prio_cfg {
1363 + #define FA_FLASH_LAYOUT_ADDR_83 (0x3F1000/4)
1364 + #define FA_FLASH_LAYOUT_ADDR_28 (0x11000/4)
1365 +
1366 ++#define NVRAM_DUAL_FCP_NVME_FLAG_OFFSET 0x196
1367 ++
1368 + #endif
1369 +diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
1370 +index d11416dcee4ef..7aa233771ec86 100644
1371 +--- a/drivers/scsi/qla2xxx/qla_gbl.h
1372 ++++ b/drivers/scsi/qla2xxx/qla_gbl.h
1373 +@@ -80,6 +80,7 @@ extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *);
1374 + int qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e);
1375 + extern void *qla2x00_alloc_iocbs_ready(struct qla_qpair *, srb_t *);
1376 + extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *);
1377 ++extern int qla24xx_async_abort_cmd(srb_t *, bool);
1378 +
1379 + extern void qla2x00_set_fcport_state(fc_port_t *fcport, int state);
1380 + extern fc_port_t *
1381 +@@ -255,6 +256,7 @@ extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
1382 +
1383 + extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int);
1384 + extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *, int);
1385 ++extern int qla24xx_async_abort_cmd(srb_t *, bool);
1386 +
1387 + extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *);
1388 +
1389 +@@ -917,4 +919,5 @@ int qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode);
1390 +
1391 + /* nvme.c */
1392 + void qla_nvme_unregister_remote_port(struct fc_port *fcport);
1393 ++void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea);
1394 + #endif /* _QLA_GBL_H */
1395 +diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
1396 +index fc6e12fb7d77b..d9b5ea77fde99 100644
1397 +--- a/drivers/scsi/qla2xxx/qla_gs.c
1398 ++++ b/drivers/scsi/qla2xxx/qla_gs.c
1399 +@@ -248,7 +248,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
1400 + WWN_SIZE);
1401 +
1402 + fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ?
1403 +- FC4_TYPE_FCP_SCSI : FC4_TYPE_OTHER;
1404 ++ FS_FC4TYPE_FCP : FC4_TYPE_OTHER;
1405 +
1406 + if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE &&
1407 + ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
1408 +@@ -2887,7 +2887,7 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
1409 + struct ct_sns_req *ct_req;
1410 + struct ct_sns_rsp *ct_rsp;
1411 + struct qla_hw_data *ha = vha->hw;
1412 +- uint8_t fcp_scsi_features = 0;
1413 ++ uint8_t fcp_scsi_features = 0, nvme_features = 0;
1414 + struct ct_arg arg;
1415 +
1416 + for (i = 0; i < ha->max_fibre_devices; i++) {
1417 +@@ -2933,14 +2933,19 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
1418 + ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
1419 + fcp_scsi_features &= 0x0f;
1420 +
1421 +- if (fcp_scsi_features)
1422 +- list[i].fc4_type = FC4_TYPE_FCP_SCSI;
1423 +- else
1424 +- list[i].fc4_type = FC4_TYPE_OTHER;
1425 ++ if (fcp_scsi_features) {
1426 ++ list[i].fc4_type = FS_FC4TYPE_FCP;
1427 ++ list[i].fc4_features = fcp_scsi_features;
1428 ++ }
1429 +
1430 +- list[i].fc4f_nvme =
1431 ++ nvme_features =
1432 + ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
1433 +- list[i].fc4f_nvme &= 0xf;
1434 ++ nvme_features &= 0xf;
1435 ++
1436 ++ if (nvme_features) {
1437 ++ list[i].fc4_type |= FS_FC4TYPE_NVME;
1438 ++ list[i].fc4_features = nvme_features;
1439 ++ }
1440 + }
1441 +
1442 + /* Last device exit. */
1443 +@@ -3435,6 +3440,8 @@ void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
1444 + fc_port_t *fcport = sp->fcport;
1445 + struct ct_sns_rsp *ct_rsp;
1446 + struct event_arg ea;
1447 ++ uint8_t fc4_scsi_feat;
1448 ++ uint8_t fc4_nvme_feat;
1449 +
1450 + ql_dbg(ql_dbg_disc, vha, 0x2133,
1451 + "Async done-%s res %x ID %x. %8phC\n",
1452 +@@ -3442,24 +3449,25 @@ void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
1453 +
1454 + fcport->flags &= ~FCF_ASYNC_SENT;
1455 + ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
1456 ++ fc4_scsi_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
1457 ++ fc4_nvme_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
1458 ++
1459 + /*
1460 + * FC-GS-7, 5.2.3.12 FC-4 Features - format
1461 + * The format of the FC-4 Features object, as defined by the FC-4,
1462 + * Shall be an array of 4-bit values, one for each type code value
1463 + */
1464 + if (!res) {
1465 +- if (ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET] & 0xf) {
1466 ++ if (fc4_scsi_feat & 0xf) {
1467 + /* w1 b00:03 */
1468 +- fcport->fc4_type =
1469 +- ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
1470 +- fcport->fc4_type &= 0xf;
1471 +- }
1472 ++ fcport->fc4_type = FS_FC4TYPE_FCP;
1473 ++ fcport->fc4_features = fc4_scsi_feat & 0xf;
1474 ++ }
1475 +
1476 +- if (ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET] & 0xf) {
1477 ++ if (fc4_nvme_feat & 0xf) {
1478 + /* w5 [00:03]/28h */
1479 +- fcport->fc4f_nvme =
1480 +- ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
1481 +- fcport->fc4f_nvme &= 0xf;
1482 ++ fcport->fc4_type |= FS_FC4TYPE_NVME;
1483 ++ fcport->fc4_features = fc4_nvme_feat & 0xf;
1484 + }
1485 + }
1486 +
1487 +@@ -4282,7 +4290,7 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
1488 + if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
1489 + return rval;
1490 +
1491 +- fcport->disc_state = DSC_GNN_ID;
1492 ++ qla2x00_set_fcport_disc_state(fcport, DSC_GNN_ID);
1493 + sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
1494 + if (!sp)
1495 + goto done;
1496 +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
1497 +index b300e11095828..643b8ae36cbeb 100644
1498 +--- a/drivers/scsi/qla2xxx/qla_init.c
1499 ++++ b/drivers/scsi/qla2xxx/qla_init.c
1500 +@@ -50,16 +50,9 @@ qla2x00_sp_timeout(struct timer_list *t)
1501 + {
1502 + srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
1503 + struct srb_iocb *iocb;
1504 +- struct req_que *req;
1505 +- unsigned long flags;
1506 +- struct qla_hw_data *ha = sp->vha->hw;
1507 +
1508 +- WARN_ON_ONCE(irqs_disabled());
1509 +- spin_lock_irqsave(&ha->hardware_lock, flags);
1510 +- req = sp->qpair->req;
1511 +- req->outstanding_cmds[sp->handle] = NULL;
1512 ++ WARN_ON(irqs_disabled());
1513 + iocb = &sp->u.iocb_cmd;
1514 +- spin_unlock_irqrestore(&ha->hardware_lock, flags);
1515 + iocb->timeout(sp);
1516 + }
1517 +
1518 +@@ -153,7 +146,7 @@ static void qla24xx_abort_sp_done(srb_t *sp, int res)
1519 + sp->free(sp);
1520 + }
1521 +
1522 +-static int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
1523 ++int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
1524 + {
1525 + scsi_qla_host_t *vha = cmd_sp->vha;
1526 + struct srb_iocb *abt_iocb;
1527 +@@ -253,6 +246,7 @@ qla2x00_async_iocb_timeout(void *data)
1528 + case SRB_NACK_PRLI:
1529 + case SRB_NACK_LOGO:
1530 + case SRB_CTRL_VP:
1531 ++ default:
1532 + rc = qla24xx_async_abort_cmd(sp, false);
1533 + if (rc) {
1534 + spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
1535 +@@ -269,10 +263,6 @@ qla2x00_async_iocb_timeout(void *data)
1536 + sp->done(sp, QLA_FUNCTION_TIMEOUT);
1537 + }
1538 + break;
1539 +- default:
1540 +- WARN_ON_ONCE(true);
1541 +- sp->done(sp, QLA_FUNCTION_TIMEOUT);
1542 +- break;
1543 + }
1544 + }
1545 +
1546 +@@ -337,10 +327,10 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
1547 + if (!sp)
1548 + goto done;
1549 +
1550 ++ qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
1551 + fcport->flags |= FCF_ASYNC_SENT;
1552 + fcport->logout_completed = 0;
1553 +
1554 +- fcport->disc_state = DSC_LOGIN_PEND;
1555 + sp->type = SRB_LOGIN_CMD;
1556 + sp->name = "login";
1557 + sp->gen1 = fcport->rscn_gen;
1558 +@@ -356,7 +346,7 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
1559 + else
1560 + lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
1561 +
1562 +- if (fcport->fc4f_nvme)
1563 ++ if (NVME_TARGET(vha->hw, fcport))
1564 + lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
1565 +
1566 + ql_dbg(ql_dbg_disc, vha, 0x2072,
1567 +@@ -544,7 +534,7 @@ static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
1568 +
1569 + e->u.fcport.fcport = fcport;
1570 + fcport->flags |= FCF_ASYNC_ACTIVE;
1571 +- fcport->disc_state = DSC_LOGIN_PEND;
1572 ++ qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
1573 + return qla2x00_post_work(vha, e);
1574 + }
1575 +
1576 +@@ -767,14 +757,12 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
1577 + fcport->fc4_type &= ~FS_FC4TYPE_NVME;
1578 + }
1579 +
1580 +-
1581 + ql_dbg(ql_dbg_disc, vha, 0x20e2,
1582 +- "%s found %8phC CLS [%x|%x] nvme %d ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
1583 ++ "%s found %8phC CLS [%x|%x] fc4_type %d ID[%06x|%06x] lid[%d|%d]\n",
1584 + __func__, fcport->port_name,
1585 + e->current_login_state, fcport->fw_login_state,
1586 +- fcport->fc4f_nvme, id.b.domain, id.b.area, id.b.al_pa,
1587 +- fcport->d_id.b.domain, fcport->d_id.b.area,
1588 +- fcport->d_id.b.al_pa, loop_id, fcport->loop_id);
1589 ++ fcport->fc4_type, id.b24, fcport->d_id.b24,
1590 ++ loop_id, fcport->loop_id);
1591 +
1592 + switch (fcport->disc_state) {
1593 + case DSC_DELETE_PEND:
1594 +@@ -856,7 +844,8 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
1595 + * with GNL. Push disc_state back to DELETED
1596 + * so GNL can go out again
1597 + */
1598 +- fcport->disc_state = DSC_DELETED;
1599 ++ qla2x00_set_fcport_disc_state(fcport,
1600 ++ DSC_DELETED);
1601 + break;
1602 + case DSC_LS_PRLI_COMP:
1603 + if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
1604 +@@ -932,7 +921,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
1605 + qla24xx_fcport_handle_login(vha, fcport);
1606 + break;
1607 + case ISP_CFG_N:
1608 +- fcport->disc_state = DSC_DELETED;
1609 ++ qla2x00_set_fcport_disc_state(fcport, DSC_DELETED);
1610 + if (time_after_eq(jiffies, fcport->dm_login_expire)) {
1611 + if (fcport->n2n_link_reset_cnt < 2) {
1612 + fcport->n2n_link_reset_cnt++;
1613 +@@ -1102,7 +1091,7 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
1614 +
1615 + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1616 + fcport->flags |= FCF_ASYNC_SENT;
1617 +- fcport->disc_state = DSC_GNL;
1618 ++ qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
1619 + fcport->last_rscn_gen = fcport->rscn_gen;
1620 + fcport->last_login_gen = fcport->login_gen;
1621 +
1622 +@@ -1277,13 +1266,13 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
1623 + sp->done = qla2x00_async_prli_sp_done;
1624 + lio->u.logio.flags = 0;
1625 +
1626 +- if (fcport->fc4f_nvme)
1627 ++ if (NVME_TARGET(vha->hw, fcport))
1628 + lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
1629 +
1630 + ql_dbg(ql_dbg_disc, vha, 0x211b,
1631 + "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n",
1632 + fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24,
1633 +- fcport->login_retry, fcport->fc4f_nvme ? "nvme" : "fc");
1634 ++ fcport->login_retry, NVME_TARGET(vha->hw, fcport) ? "nvme" : "fc");
1635 +
1636 + rval = qla2x00_start_sp(sp);
1637 + if (rval != QLA_SUCCESS) {
1638 +@@ -1332,12 +1321,12 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
1639 + return rval;
1640 + }
1641 +
1642 +- fcport->disc_state = DSC_GPDB;
1643 +-
1644 + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1645 + if (!sp)
1646 + goto done;
1647 +
1648 ++ qla2x00_set_fcport_disc_state(fcport, DSC_GPDB);
1649 ++
1650 + fcport->flags |= FCF_ASYNC_SENT;
1651 + sp->type = SRB_MB_IOCB;
1652 + sp->name = "gpdb";
1653 +@@ -1416,7 +1405,7 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1654 + ql_dbg(ql_dbg_disc, vha, 0x20d6,
1655 + "%s %d %8phC session revalidate success\n",
1656 + __func__, __LINE__, ea->fcport->port_name);
1657 +- ea->fcport->disc_state = DSC_LOGIN_COMPLETE;
1658 ++ qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_COMPLETE);
1659 + }
1660 + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1661 + }
1662 +@@ -1434,14 +1423,14 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1663 + fcport->flags &= ~FCF_ASYNC_SENT;
1664 +
1665 + ql_dbg(ql_dbg_disc, vha, 0x20d2,
1666 +- "%s %8phC DS %d LS %d nvme %x rc %d\n", __func__, fcport->port_name,
1667 +- fcport->disc_state, pd->current_login_state, fcport->fc4f_nvme,
1668 +- ea->rc);
1669 ++ "%s %8phC DS %d LS %d fc4_type %x rc %d\n", __func__,
1670 ++ fcport->port_name, fcport->disc_state, pd->current_login_state,
1671 ++ fcport->fc4_type, ea->rc);
1672 +
1673 + if (fcport->disc_state == DSC_DELETE_PEND)
1674 + return;
1675 +
1676 +- if (fcport->fc4f_nvme)
1677 ++ if (NVME_TARGET(vha->hw, fcport))
1678 + ls = pd->current_login_state >> 4;
1679 + else
1680 + ls = pd->current_login_state & 0xf;
1681 +@@ -1470,7 +1459,7 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1682 + /* Set discovery state back to GNL to Relogin attempt */
1683 + if (qla_dual_mode_enabled(vha) ||
1684 + qla_ini_mode_enabled(vha)) {
1685 +- fcport->disc_state = DSC_GNL;
1686 ++ qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
1687 + set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1688 + }
1689 + return;
1690 +@@ -1630,7 +1619,8 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1691 + ql_dbg(ql_dbg_disc, vha, 0x2118,
1692 + "%s %d %8phC post %s PRLI\n",
1693 + __func__, __LINE__, fcport->port_name,
1694 +- fcport->fc4f_nvme ? "NVME" : "FC");
1695 ++ NVME_TARGET(vha->hw, fcport) ? "NVME" :
1696 ++ "FC");
1697 + qla24xx_post_prli_work(vha, fcport);
1698 + }
1699 + break;
1700 +@@ -1757,6 +1747,15 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
1701 + qla24xx_fcport_handle_login(vha, fcport);
1702 + }
1703 +
1704 ++void qla_handle_els_plogi_done(scsi_qla_host_t *vha,
1705 ++ struct event_arg *ea)
1706 ++{
1707 ++ ql_dbg(ql_dbg_disc, vha, 0x2118,
1708 ++ "%s %d %8phC post PRLI\n",
1709 ++ __func__, __LINE__, ea->fcport->port_name);
1710 ++ qla24xx_post_prli_work(vha, ea->fcport);
1711 ++}
1712 ++
1713 + /*
1714 + * RSCN(s) came in for this fcport, but the RSCN(s) was not able
1715 + * to be consumed by the fcport
1716 +@@ -1785,9 +1784,23 @@ qla2x00_tmf_iocb_timeout(void *data)
1717 + {
1718 + srb_t *sp = data;
1719 + struct srb_iocb *tmf = &sp->u.iocb_cmd;
1720 ++ int rc, h;
1721 ++ unsigned long flags;
1722 +
1723 +- tmf->u.tmf.comp_status = CS_TIMEOUT;
1724 +- complete(&tmf->u.tmf.comp);
1725 ++ rc = qla24xx_async_abort_cmd(sp, false);
1726 ++ if (rc) {
1727 ++ spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
1728 ++ for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
1729 ++ if (sp->qpair->req->outstanding_cmds[h] == sp) {
1730 ++ sp->qpair->req->outstanding_cmds[h] = NULL;
1731 ++ break;
1732 ++ }
1733 ++ }
1734 ++ spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
1735 ++ tmf->u.tmf.comp_status = CS_TIMEOUT;
1736 ++ tmf->u.tmf.data = QLA_FUNCTION_FAILED;
1737 ++ complete(&tmf->u.tmf.comp);
1738 ++ }
1739 + }
1740 +
1741 + static void qla2x00_tmf_sp_done(srb_t *sp, int res)
1742 +@@ -1916,12 +1929,20 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1743 + break;
1744 + }
1745 +
1746 +- if (ea->fcport->fc4f_nvme) {
1747 ++ /*
1748 ++ * Retry PRLI with other FC-4 type if failure occurred on dual
1749 ++ * FCP/NVMe port
1750 ++ */
1751 ++ if (NVME_FCP_TARGET(ea->fcport)) {
1752 + ql_dbg(ql_dbg_disc, vha, 0x2118,
1753 +- "%s %d %8phC post fc4 prli\n",
1754 +- __func__, __LINE__, ea->fcport->port_name);
1755 +- ea->fcport->fc4f_nvme = 0;
1756 +- return;
1757 ++ "%s %d %8phC post %s prli\n",
1758 ++ __func__, __LINE__, ea->fcport->port_name,
1759 ++ (ea->fcport->fc4_type & FS_FC4TYPE_NVME) ?
1760 ++ "NVMe" : "FCP");
1761 ++ if (vha->hw->fc4_type_priority == FC4_PRIORITY_NVME)
1762 ++ ea->fcport->fc4_type &= ~FS_FC4TYPE_NVME;
1763 ++ else
1764 ++ ea->fcport->fc4_type &= ~FS_FC4TYPE_FCP;
1765 + }
1766 +
1767 + ea->fcport->flags &= ~FCF_ASYNC_SENT;
1768 +@@ -1988,7 +2009,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1769 + * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
1770 + * requests.
1771 + */
1772 +- if (ea->fcport->fc4f_nvme) {
1773 ++ if (NVME_TARGET(vha->hw, ea->fcport)) {
1774 + ql_dbg(ql_dbg_disc, vha, 0x2117,
1775 + "%s %d %8phC post prli\n",
1776 + __func__, __LINE__, ea->fcport->port_name);
1777 +@@ -2015,7 +2036,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1778 + __func__, __LINE__, ea->fcport->port_name, ea->data[1]);
1779 +
1780 + ea->fcport->flags &= ~FCF_ASYNC_SENT;
1781 +- ea->fcport->disc_state = DSC_LOGIN_FAILED;
1782 ++ qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_FAILED);
1783 + if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED)
1784 + set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1785 + else
1786 +@@ -5395,7 +5416,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
1787 + ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
1788 + __func__, fcport->port_name);
1789 +
1790 +- fcport->disc_state = DSC_UPD_FCPORT;
1791 ++ qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
1792 + fcport->login_retry = vha->hw->login_retry_count;
1793 + fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
1794 + fcport->deleted = 0;
1795 +@@ -5413,9 +5434,9 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
1796 +
1797 + qla2x00_iidma_fcport(vha, fcport);
1798 +
1799 +- if (fcport->fc4f_nvme) {
1800 ++ if (NVME_TARGET(vha->hw, fcport)) {
1801 + qla_nvme_register_remote(vha, fcport);
1802 +- fcport->disc_state = DSC_LOGIN_COMPLETE;
1803 ++ qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
1804 + qla2x00_set_fcport_state(fcport, FCS_ONLINE);
1805 + return;
1806 + }
1807 +@@ -5460,7 +5481,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
1808 + }
1809 + }
1810 +
1811 +- fcport->disc_state = DSC_LOGIN_COMPLETE;
1812 ++ qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
1813 + }
1814 +
1815 + void qla_register_fcport_fn(struct work_struct *work)
1816 +@@ -5741,11 +5762,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
1817 + new_fcport->fc4_type = swl[swl_idx].fc4_type;
1818 +
1819 + new_fcport->nvme_flag = 0;
1820 +- new_fcport->fc4f_nvme = 0;
1821 + if (vha->flags.nvme_enabled &&
1822 +- swl[swl_idx].fc4f_nvme) {
1823 +- new_fcport->fc4f_nvme =
1824 +- swl[swl_idx].fc4f_nvme;
1825 ++ swl[swl_idx].fc4_type & FS_FC4TYPE_NVME) {
1826 + ql_log(ql_log_info, vha, 0x2131,
1827 + "FOUND: NVME port %8phC as FC Type 28h\n",
1828 + new_fcport->port_name);
1829 +@@ -5801,7 +5819,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
1830 +
1831 + /* Bypass ports whose FCP-4 type is not FCP_SCSI */
1832 + if (ql2xgffidenable &&
1833 +- (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
1834 ++ (!(new_fcport->fc4_type & FS_FC4TYPE_FCP) &&
1835 + new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
1836 + continue;
1837 +
1838 +@@ -5870,9 +5888,9 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
1839 + break;
1840 + }
1841 +
1842 +- if (fcport->fc4f_nvme) {
1843 ++ if (NVME_TARGET(vha->hw, fcport)) {
1844 + if (fcport->disc_state == DSC_DELETE_PEND) {
1845 +- fcport->disc_state = DSC_GNL;
1846 ++ qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
1847 + vha->fcport_count--;
1848 + fcport->login_succ = 0;
1849 + }
1850 +@@ -8545,6 +8563,11 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
1851 + /* N2N: driver will initiate Login instead of FW */
1852 + icb->firmware_options_3 |= BIT_8;
1853 +
1854 ++ /* Determine NVMe/FCP priority for target ports */
1855 ++ ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha);
1856 ++ ql_log(ql_log_info, vha, 0xffff, "FC4 priority set to %s\n",
1857 ++ ha->fc4_type_priority & BIT_0 ? "FCP" : "NVMe");
1858 ++
1859 + if (rval) {
1860 + ql_log(ql_log_warn, vha, 0x0076,
1861 + "NVRAM configuration failed.\n");
1862 +diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
1863 +index 6dfde42d799b5..477b0b8a5f4bc 100644
1864 +--- a/drivers/scsi/qla2xxx/qla_inline.h
1865 ++++ b/drivers/scsi/qla2xxx/qla_inline.h
1866 +@@ -105,6 +105,30 @@ qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx)
1867 + INIT_LIST_HEAD(&ctx->dsd_list);
1868 + }
1869 +
1870 ++static inline void
1871 ++qla2x00_set_fcport_disc_state(fc_port_t *fcport, int state)
1872 ++{
1873 ++ int old_val;
1874 ++ uint8_t shiftbits, mask;
1875 ++
1876 ++ /* This will have to change when the max no. of states > 16 */
1877 ++ shiftbits = 4;
1878 ++ mask = (1 << shiftbits) - 1;
1879 ++
1880 ++ fcport->disc_state = state;
1881 ++ while (1) {
1882 ++ old_val = atomic_read(&fcport->shadow_disc_state);
1883 ++ if (old_val == atomic_cmpxchg(&fcport->shadow_disc_state,
1884 ++ old_val, (old_val << shiftbits) | state)) {
1885 ++ ql_dbg(ql_dbg_disc, fcport->vha, 0x2134,
1886 ++ "FCPort %8phC disc_state transition: %s to %s - portid=%06x.\n",
1887 ++ fcport->port_name, port_dstate_str[old_val & mask],
1888 ++ port_dstate_str[state], fcport->d_id.b24);
1889 ++ return;
1890 ++ }
1891 ++ }
1892 ++}
1893 ++
1894 + static inline int
1895 + qla2x00_hba_err_chk_enabled(srb_t *sp)
1896 + {
1897 +@@ -312,3 +336,15 @@ qla_83xx_start_iocbs(struct qla_qpair *qpair)
1898 +
1899 + WRT_REG_DWORD(req->req_q_in, req->ring_index);
1900 + }
1901 ++
1902 ++static inline int
1903 ++qla2xxx_get_fc4_priority(struct scsi_qla_host *vha)
1904 ++{
1905 ++ uint32_t data;
1906 ++
1907 ++ data =
1908 ++ ((uint8_t *)vha->hw->nvram)[NVRAM_DUAL_FCP_NVME_FLAG_OFFSET];
1909 ++
1910 ++
1911 ++ return ((data >> 6) & BIT_0);
1912 ++}
1913 +diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
1914 +index aed4ce66e6cf9..936103604d02d 100644
1915 +--- a/drivers/scsi/qla2xxx/qla_iocb.c
1916 ++++ b/drivers/scsi/qla2xxx/qla_iocb.c
1917 +@@ -2537,13 +2537,32 @@ qla2x00_els_dcmd_iocb_timeout(void *data)
1918 + fc_port_t *fcport = sp->fcport;
1919 + struct scsi_qla_host *vha = sp->vha;
1920 + struct srb_iocb *lio = &sp->u.iocb_cmd;
1921 ++ unsigned long flags = 0;
1922 ++ int res, h;
1923 +
1924 + ql_dbg(ql_dbg_io, vha, 0x3069,
1925 + "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
1926 + sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
1927 + fcport->d_id.b.al_pa);
1928 +
1929 +- complete(&lio->u.els_logo.comp);
1930 ++ /* Abort the exchange */
1931 ++ res = qla24xx_async_abort_cmd(sp, false);
1932 ++ if (res) {
1933 ++ ql_dbg(ql_dbg_io, vha, 0x3070,
1934 ++ "mbx abort_command failed.\n");
1935 ++ spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
1936 ++ for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
1937 ++ if (sp->qpair->req->outstanding_cmds[h] == sp) {
1938 ++ sp->qpair->req->outstanding_cmds[h] = NULL;
1939 ++ break;
1940 ++ }
1941 ++ }
1942 ++ spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
1943 ++ complete(&lio->u.els_logo.comp);
1944 ++ } else {
1945 ++ ql_dbg(ql_dbg_io, vha, 0x3071,
1946 ++ "mbx abort_command success.\n");
1947 ++ }
1948 + }
1949 +
1950 + static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res)
1951 +@@ -2708,23 +2727,29 @@ qla2x00_els_dcmd2_iocb_timeout(void *data)
1952 + srb_t *sp = data;
1953 + fc_port_t *fcport = sp->fcport;
1954 + struct scsi_qla_host *vha = sp->vha;
1955 +- struct qla_hw_data *ha = vha->hw;
1956 + unsigned long flags = 0;
1957 +- int res;
1958 ++ int res, h;
1959 +
1960 + ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
1961 + "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
1962 + sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
1963 +
1964 + /* Abort the exchange */
1965 +- spin_lock_irqsave(&ha->hardware_lock, flags);
1966 +- res = ha->isp_ops->abort_command(sp);
1967 ++ res = qla24xx_async_abort_cmd(sp, false);
1968 + ql_dbg(ql_dbg_io, vha, 0x3070,
1969 + "mbx abort_command %s\n",
1970 + (res == QLA_SUCCESS) ? "successful" : "failed");
1971 +- spin_unlock_irqrestore(&ha->hardware_lock, flags);
1972 +-
1973 +- sp->done(sp, QLA_FUNCTION_TIMEOUT);
1974 ++ if (res) {
1975 ++ spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
1976 ++ for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
1977 ++ if (sp->qpair->req->outstanding_cmds[h] == sp) {
1978 ++ sp->qpair->req->outstanding_cmds[h] = NULL;
1979 ++ break;
1980 ++ }
1981 ++ }
1982 ++ spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
1983 ++ sp->done(sp, QLA_FUNCTION_TIMEOUT);
1984 ++ }
1985 + }
1986 +
1987 + void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi)
1988 +@@ -2769,9 +2794,8 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
1989 + case CS_COMPLETE:
1990 + memset(&ea, 0, sizeof(ea));
1991 + ea.fcport = fcport;
1992 +- ea.data[0] = MBS_COMMAND_COMPLETE;
1993 +- ea.sp = sp;
1994 +- qla24xx_handle_plogi_done_event(vha, &ea);
1995 ++ ea.rc = res;
1996 ++ qla_handle_els_plogi_done(vha, &ea);
1997 + break;
1998 +
1999 + case CS_IOCB_ERROR:
2000 +@@ -2844,7 +2868,8 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
2001 + fw_status[0], fw_status[1], fw_status[2]);
2002 +
2003 + fcport->flags &= ~FCF_ASYNC_SENT;
2004 +- fcport->disc_state = DSC_LOGIN_FAILED;
2005 ++ qla2x00_set_fcport_disc_state(fcport,
2006 ++ DSC_LOGIN_FAILED);
2007 + set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2008 + break;
2009 + }
2010 +@@ -2857,7 +2882,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
2011 + fw_status[0], fw_status[1], fw_status[2]);
2012 +
2013 + sp->fcport->flags &= ~FCF_ASYNC_SENT;
2014 +- sp->fcport->disc_state = DSC_LOGIN_FAILED;
2015 ++ qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_FAILED);
2016 + set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2017 + break;
2018 + }
2019 +@@ -2894,7 +2919,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
2020 + }
2021 +
2022 + fcport->flags |= FCF_ASYNC_SENT;
2023 +- fcport->disc_state = DSC_LOGIN_PEND;
2024 ++ qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
2025 + elsio = &sp->u.iocb_cmd;
2026 + ql_dbg(ql_dbg_io, vha, 0x3073,
2027 + "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
2028 +diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
2029 +index c1631e42d35d1..098388a12febc 100644
2030 +--- a/drivers/scsi/qla2xxx/qla_mbx.c
2031 ++++ b/drivers/scsi/qla2xxx/qla_mbx.c
2032 +@@ -1924,7 +1924,7 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
2033 + pd24 = (struct port_database_24xx *) pd;
2034 +
2035 + /* Check for logged in state. */
2036 +- if (fcport->fc4f_nvme) {
2037 ++ if (NVME_TARGET(ha, fcport)) {
2038 + current_login_state = pd24->current_login_state >> 4;
2039 + last_login_state = pd24->last_login_state >> 4;
2040 + } else {
2041 +@@ -3891,8 +3891,9 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2042 + fcport->scan_state = QLA_FCPORT_FOUND;
2043 + fcport->n2n_flag = 1;
2044 + fcport->keep_nport_handle = 1;
2045 ++ fcport->fc4_type = FS_FC4TYPE_FCP;
2046 + if (vha->flags.nvme_enabled)
2047 +- fcport->fc4f_nvme = 1;
2048 ++ fcport->fc4_type |= FS_FC4TYPE_NVME;
2049 +
2050 + switch (fcport->disc_state) {
2051 + case DSC_DELETED:
2052 +@@ -6350,7 +6351,7 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
2053 + uint64_t zero = 0;
2054 + u8 current_login_state, last_login_state;
2055 +
2056 +- if (fcport->fc4f_nvme) {
2057 ++ if (NVME_TARGET(vha->hw, fcport)) {
2058 + current_login_state = pd->current_login_state >> 4;
2059 + last_login_state = pd->last_login_state >> 4;
2060 + } else {
2061 +@@ -6385,8 +6386,8 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
2062 + fcport->d_id.b.al_pa = pd->port_id[2];
2063 + fcport->d_id.b.rsvd_1 = 0;
2064 +
2065 +- if (fcport->fc4f_nvme) {
2066 +- fcport->port_type = 0;
2067 ++ if (NVME_TARGET(vha->hw, fcport)) {
2068 ++ fcport->port_type = FCT_NVME;
2069 + if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0)
2070 + fcport->port_type |= FCT_NVME_INITIATOR;
2071 + if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
2072 +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
2073 +index 67b1e74fcd1e6..af8306a9777fc 100644
2074 +--- a/drivers/scsi/qla2xxx/qla_os.c
2075 ++++ b/drivers/scsi/qla2xxx/qla_os.c
2076 +@@ -5014,7 +5014,7 @@ void qla24xx_sched_upd_fcport(fc_port_t *fcport)
2077 + fcport->jiffies_at_registration = jiffies;
2078 + fcport->sec_since_registration = 0;
2079 + fcport->next_disc_state = DSC_DELETED;
2080 +- fcport->disc_state = DSC_UPD_FCPORT;
2081 ++ qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
2082 + spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
2083 +
2084 + queue_work(system_unbound_wq, &fcport->reg_work);
2085 +@@ -5055,19 +5055,17 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
2086 + fcport->d_id = e->u.new_sess.id;
2087 + fcport->flags |= FCF_FABRIC_DEVICE;
2088 + fcport->fw_login_state = DSC_LS_PLOGI_PEND;
2089 +- if (e->u.new_sess.fc4_type == FS_FC4TYPE_FCP)
2090 +- fcport->fc4_type = FC4_TYPE_FCP_SCSI;
2091 +-
2092 +- if (e->u.new_sess.fc4_type == FS_FC4TYPE_NVME) {
2093 +- fcport->fc4_type = FC4_TYPE_OTHER;
2094 +- fcport->fc4f_nvme = FC4_TYPE_NVME;
2095 +- }
2096 +
2097 + memcpy(fcport->port_name, e->u.new_sess.port_name,
2098 + WWN_SIZE);
2099 +
2100 +- if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N)
2101 ++ fcport->fc4_type = e->u.new_sess.fc4_type;
2102 ++ if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) {
2103 ++ fcport->fc4_type = FS_FC4TYPE_FCP;
2104 + fcport->n2n_flag = 1;
2105 ++ if (vha->flags.nvme_enabled)
2106 ++ fcport->fc4_type |= FS_FC4TYPE_NVME;
2107 ++ }
2108 +
2109 + } else {
2110 + ql_dbg(ql_dbg_disc, vha, 0xffff,
2111 +@@ -5171,7 +5169,8 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
2112 + fcport->flags &= ~FCF_FABRIC_DEVICE;
2113 + fcport->keep_nport_handle = 1;
2114 + if (vha->flags.nvme_enabled) {
2115 +- fcport->fc4f_nvme = 1;
2116 ++ fcport->fc4_type =
2117 ++ (FS_FC4TYPE_NVME | FS_FC4TYPE_FCP);
2118 + fcport->n2n_flag = 1;
2119 + }
2120 + fcport->fw_login_state = 0;
2121 +diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
2122 +index 8fd0a568303b5..509539ec58e93 100644
2123 +--- a/drivers/scsi/qla2xxx/qla_target.c
2124 ++++ b/drivers/scsi/qla2xxx/qla_target.c
2125 +@@ -596,7 +596,8 @@ static void qla2x00_async_nack_sp_done(srb_t *sp, int res)
2126 + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
2127 + } else {
2128 + sp->fcport->login_retry = 0;
2129 +- sp->fcport->disc_state = DSC_LOGIN_COMPLETE;
2130 ++ qla2x00_set_fcport_disc_state(sp->fcport,
2131 ++ DSC_LOGIN_COMPLETE);
2132 + sp->fcport->deleted = 0;
2133 + sp->fcport->logout_on_delete = 1;
2134 + }
2135 +@@ -1056,7 +1057,7 @@ void qlt_free_session_done(struct work_struct *work)
2136 + tgt->sess_count--;
2137 + }
2138 +
2139 +- sess->disc_state = DSC_DELETED;
2140 ++ qla2x00_set_fcport_disc_state(sess, DSC_DELETED);
2141 + sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
2142 + sess->deleted = QLA_SESS_DELETED;
2143 +
2144 +@@ -1166,7 +1167,7 @@ void qlt_unreg_sess(struct fc_port *sess)
2145 + vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
2146 +
2147 + sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
2148 +- sess->disc_state = DSC_DELETE_PEND;
2149 ++ qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
2150 + sess->last_rscn_gen = sess->rscn_gen;
2151 + sess->last_login_gen = sess->login_gen;
2152 +
2153 +@@ -1268,7 +1269,7 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
2154 + spin_unlock_irqrestore(&sess->vha->work_lock, flags);
2155 +
2156 + sess->prli_pend_timer = 0;
2157 +- sess->disc_state = DSC_DELETE_PEND;
2158 ++ qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
2159 +
2160 + qla24xx_chk_fcp_state(sess);
2161 +
2162 +@@ -6061,7 +6062,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
2163 + if (!IS_SW_RESV_ADDR(fcport->d_id))
2164 + vha->fcport_count++;
2165 + fcport->login_gen++;
2166 +- fcport->disc_state = DSC_LOGIN_COMPLETE;
2167 ++ qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
2168 + fcport->login_succ = 1;
2169 + newfcport = 1;
2170 + }
2171 +diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
2172 +index 8cd0a87764dfd..9fee851c23a56 100644
2173 +--- a/drivers/scsi/scsi_transport_srp.c
2174 ++++ b/drivers/scsi/scsi_transport_srp.c
2175 +@@ -541,7 +541,7 @@ int srp_reconnect_rport(struct srp_rport *rport)
2176 + res = mutex_lock_interruptible(&rport->mutex);
2177 + if (res)
2178 + goto out;
2179 +- if (rport->state != SRP_RPORT_FAIL_FAST)
2180 ++ if (rport->state != SRP_RPORT_FAIL_FAST && rport->state != SRP_RPORT_LOST)
2181 + /*
2182 + * sdev state must be SDEV_TRANSPORT_OFFLINE, transition
2183 + * to SDEV_BLOCK is illegal. Calling scsi_target_unblock()
2184 +diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
2185 +index 2372e161cd5e8..a603f363835c4 100644
2186 +--- a/drivers/vfio/pci/vfio_pci.c
2187 ++++ b/drivers/vfio/pci/vfio_pci.c
2188 +@@ -1474,6 +1474,8 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
2189 +
2190 + index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
2191 +
2192 ++ if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
2193 ++ return -EINVAL;
2194 + if (vma->vm_end < vma->vm_start)
2195 + return -EINVAL;
2196 + if ((vma->vm_flags & VM_SHARED) == 0)
2197 +@@ -1482,7 +1484,7 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
2198 + int regnum = index - VFIO_PCI_NUM_REGIONS;
2199 + struct vfio_pci_region *region = vdev->region + regnum;
2200 +
2201 +- if (region && region->ops && region->ops->mmap &&
2202 ++ if (region->ops && region->ops->mmap &&
2203 + (region->flags & VFIO_REGION_INFO_FLAG_MMAP))
2204 + return region->ops->mmap(vdev, region, vma);
2205 + return -EINVAL;
2206 +diff --git a/fs/readdir.c b/fs/readdir.c
2207 +index de2eceffdee8b..07a3b5baa4047 100644
2208 +--- a/fs/readdir.c
2209 ++++ b/fs/readdir.c
2210 +@@ -150,6 +150,9 @@ static int fillonedir(struct dir_context *ctx, const char *name, int namlen,
2211 +
2212 + if (buf->result)
2213 + return -EINVAL;
2214 ++ buf->result = verify_dirent_name(name, namlen);
2215 ++ if (buf->result < 0)
2216 ++ return buf->result;
2217 + d_ino = ino;
2218 + if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) {
2219 + buf->result = -EOVERFLOW;
2220 +@@ -417,6 +420,9 @@ static int compat_fillonedir(struct dir_context *ctx, const char *name,
2221 +
2222 + if (buf->result)
2223 + return -EINVAL;
2224 ++ buf->result = verify_dirent_name(name, namlen);
2225 ++ if (buf->result < 0)
2226 ++ return buf->result;
2227 + d_ino = ino;
2228 + if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) {
2229 + buf->result = -EOVERFLOW;
2230 +diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
2231 +index af6b11d4d6737..1847a07842437 100644
2232 +--- a/include/linux/marvell_phy.h
2233 ++++ b/include/linux/marvell_phy.h
2234 +@@ -23,11 +23,12 @@
2235 + #define MARVELL_PHY_ID_88X3310 0x002b09a0
2236 + #define MARVELL_PHY_ID_88E2110 0x002b09b0
2237 +
2238 +-/* The MV88e6390 Ethernet switch contains embedded PHYs. These PHYs do
2239 ++/* These Ethernet switch families contain embedded PHYs, but they do
2240 + * not have a model ID. So the switch driver traps reads to the ID2
2241 + * register and returns the switch family ID
2242 + */
2243 +-#define MARVELL_PHY_ID_88E6390 0x01410f90
2244 ++#define MARVELL_PHY_ID_88E6341_FAMILY 0x01410f41
2245 ++#define MARVELL_PHY_ID_88E6390_FAMILY 0x01410f90
2246 +
2247 + #define MARVELL_PHY_FAMILY_ID(id) ((id) >> 4)
2248 +
2249 +diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h
2250 +index e98028f00e479..6988cf9ffe3ae 100644
2251 +--- a/include/linux/netfilter_arp/arp_tables.h
2252 ++++ b/include/linux/netfilter_arp/arp_tables.h
2253 +@@ -52,8 +52,9 @@ extern void *arpt_alloc_initial_table(const struct xt_table *);
2254 + int arpt_register_table(struct net *net, const struct xt_table *table,
2255 + const struct arpt_replace *repl,
2256 + const struct nf_hook_ops *ops, struct xt_table **res);
2257 +-void arpt_unregister_table(struct net *net, struct xt_table *table,
2258 +- const struct nf_hook_ops *ops);
2259 ++void arpt_unregister_table(struct net *net, struct xt_table *table);
2260 ++void arpt_unregister_table_pre_exit(struct net *net, struct xt_table *table,
2261 ++ const struct nf_hook_ops *ops);
2262 + extern unsigned int arpt_do_table(struct sk_buff *skb,
2263 + const struct nf_hook_state *state,
2264 + struct xt_table *table);
2265 +diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
2266 +index 162f59d0d17a2..db472c9cd8e9d 100644
2267 +--- a/include/linux/netfilter_bridge/ebtables.h
2268 ++++ b/include/linux/netfilter_bridge/ebtables.h
2269 +@@ -110,8 +110,9 @@ extern int ebt_register_table(struct net *net,
2270 + const struct ebt_table *table,
2271 + const struct nf_hook_ops *ops,
2272 + struct ebt_table **res);
2273 +-extern void ebt_unregister_table(struct net *net, struct ebt_table *table,
2274 +- const struct nf_hook_ops *);
2275 ++extern void ebt_unregister_table(struct net *net, struct ebt_table *table);
2276 ++void ebt_unregister_table_pre_exit(struct net *net, const char *tablename,
2277 ++ const struct nf_hook_ops *ops);
2278 + extern unsigned int ebt_do_table(struct sk_buff *skb,
2279 + const struct nf_hook_state *state,
2280 + struct ebt_table *table);
2281 +diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
2282 +index bca0f7f71cde4..7429f15717559 100644
2283 +--- a/kernel/locking/lockdep.c
2284 ++++ b/kernel/locking/lockdep.c
2285 +@@ -875,7 +875,8 @@ static bool assign_lock_key(struct lockdep_map *lock)
2286 + /* Debug-check: all keys must be persistent! */
2287 + debug_locks_off();
2288 + pr_err("INFO: trying to register non-static key.\n");
2289 +- pr_err("the code is fine but needs lockdep annotation.\n");
2290 ++ pr_err("The code is fine but needs lockdep annotation, or maybe\n");
2291 ++ pr_err("you didn't initialize this object before use?\n");
2292 + pr_err("turning off the locking correctness validator.\n");
2293 + dump_stack();
2294 + return false;
2295 +diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
2296 +index 66e7af1654943..32bc2821027f3 100644
2297 +--- a/net/bridge/netfilter/ebtable_broute.c
2298 ++++ b/net/bridge/netfilter/ebtable_broute.c
2299 +@@ -105,14 +105,20 @@ static int __net_init broute_net_init(struct net *net)
2300 + &net->xt.broute_table);
2301 + }
2302 +
2303 ++static void __net_exit broute_net_pre_exit(struct net *net)
2304 ++{
2305 ++ ebt_unregister_table_pre_exit(net, "broute", &ebt_ops_broute);
2306 ++}
2307 ++
2308 + static void __net_exit broute_net_exit(struct net *net)
2309 + {
2310 +- ebt_unregister_table(net, net->xt.broute_table, &ebt_ops_broute);
2311 ++ ebt_unregister_table(net, net->xt.broute_table);
2312 + }
2313 +
2314 + static struct pernet_operations broute_net_ops = {
2315 + .init = broute_net_init,
2316 + .exit = broute_net_exit,
2317 ++ .pre_exit = broute_net_pre_exit,
2318 + };
2319 +
2320 + static int __init ebtable_broute_init(void)
2321 +diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
2322 +index 78cb9b21022d0..bcf982e12f16b 100644
2323 +--- a/net/bridge/netfilter/ebtable_filter.c
2324 ++++ b/net/bridge/netfilter/ebtable_filter.c
2325 +@@ -99,14 +99,20 @@ static int __net_init frame_filter_net_init(struct net *net)
2326 + &net->xt.frame_filter);
2327 + }
2328 +
2329 ++static void __net_exit frame_filter_net_pre_exit(struct net *net)
2330 ++{
2331 ++ ebt_unregister_table_pre_exit(net, "filter", ebt_ops_filter);
2332 ++}
2333 ++
2334 + static void __net_exit frame_filter_net_exit(struct net *net)
2335 + {
2336 +- ebt_unregister_table(net, net->xt.frame_filter, ebt_ops_filter);
2337 ++ ebt_unregister_table(net, net->xt.frame_filter);
2338 + }
2339 +
2340 + static struct pernet_operations frame_filter_net_ops = {
2341 + .init = frame_filter_net_init,
2342 + .exit = frame_filter_net_exit,
2343 ++ .pre_exit = frame_filter_net_pre_exit,
2344 + };
2345 +
2346 + static int __init ebtable_filter_init(void)
2347 +diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
2348 +index 0888936ef8537..0d092773f8161 100644
2349 +--- a/net/bridge/netfilter/ebtable_nat.c
2350 ++++ b/net/bridge/netfilter/ebtable_nat.c
2351 +@@ -99,14 +99,20 @@ static int __net_init frame_nat_net_init(struct net *net)
2352 + &net->xt.frame_nat);
2353 + }
2354 +
2355 ++static void __net_exit frame_nat_net_pre_exit(struct net *net)
2356 ++{
2357 ++ ebt_unregister_table_pre_exit(net, "nat", ebt_ops_nat);
2358 ++}
2359 ++
2360 + static void __net_exit frame_nat_net_exit(struct net *net)
2361 + {
2362 +- ebt_unregister_table(net, net->xt.frame_nat, ebt_ops_nat);
2363 ++ ebt_unregister_table(net, net->xt.frame_nat);
2364 + }
2365 +
2366 + static struct pernet_operations frame_nat_net_ops = {
2367 + .init = frame_nat_net_init,
2368 + .exit = frame_nat_net_exit,
2369 ++ .pre_exit = frame_nat_net_pre_exit,
2370 + };
2371 +
2372 + static int __init ebtable_nat_init(void)
2373 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
2374 +index e1256e03a9a86..d9375c52f50e6 100644
2375 +--- a/net/bridge/netfilter/ebtables.c
2376 ++++ b/net/bridge/netfilter/ebtables.c
2377 +@@ -1237,10 +1237,34 @@ out:
2378 + return ret;
2379 + }
2380 +
2381 +-void ebt_unregister_table(struct net *net, struct ebt_table *table,
2382 +- const struct nf_hook_ops *ops)
2383 ++static struct ebt_table *__ebt_find_table(struct net *net, const char *name)
2384 ++{
2385 ++ struct ebt_table *t;
2386 ++
2387 ++ mutex_lock(&ebt_mutex);
2388 ++
2389 ++ list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
2390 ++ if (strcmp(t->name, name) == 0) {
2391 ++ mutex_unlock(&ebt_mutex);
2392 ++ return t;
2393 ++ }
2394 ++ }
2395 ++
2396 ++ mutex_unlock(&ebt_mutex);
2397 ++ return NULL;
2398 ++}
2399 ++
2400 ++void ebt_unregister_table_pre_exit(struct net *net, const char *name, const struct nf_hook_ops *ops)
2401 ++{
2402 ++ struct ebt_table *table = __ebt_find_table(net, name);
2403 ++
2404 ++ if (table)
2405 ++ nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
2406 ++}
2407 ++EXPORT_SYMBOL(ebt_unregister_table_pre_exit);
2408 ++
2409 ++void ebt_unregister_table(struct net *net, struct ebt_table *table)
2410 + {
2411 +- nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
2412 + __ebt_unregister_table(net, table);
2413 + }
2414 +
2415 +diff --git a/net/core/dev.c b/net/core/dev.c
2416 +index 2ec21380f86d9..91909e5d6807e 100644
2417 +--- a/net/core/dev.c
2418 ++++ b/net/core/dev.c
2419 +@@ -5406,7 +5406,8 @@ static void skb_gro_reset_offset(struct sk_buff *skb)
2420 +
2421 + if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
2422 + pinfo->nr_frags &&
2423 +- !PageHighMem(skb_frag_page(frag0))) {
2424 ++ !PageHighMem(skb_frag_page(frag0)) &&
2425 ++ (!NET_IP_ALIGN || !(skb_frag_off(frag0) & 3))) {
2426 + NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
2427 + NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
2428 + skb_frag_size(frag0),
2429 +diff --git a/net/core/neighbour.c b/net/core/neighbour.c
2430 +index 7080d708b7d08..6635b83113f8f 100644
2431 +--- a/net/core/neighbour.c
2432 ++++ b/net/core/neighbour.c
2433 +@@ -1379,7 +1379,7 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
2434 + * we can reinject the packet there.
2435 + */
2436 + n2 = NULL;
2437 +- if (dst) {
2438 ++ if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
2439 + n2 = dst_neigh_lookup_skb(dst, skb);
2440 + if (n2)
2441 + n1 = n2;
2442 +diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
2443 +index f03958fcb5be1..328bb9f5342e5 100644
2444 +--- a/net/ieee802154/nl802154.c
2445 ++++ b/net/ieee802154/nl802154.c
2446 +@@ -1514,6 +1514,11 @@ nl802154_dump_llsec_key(struct sk_buff *skb, struct netlink_callback *cb)
2447 + if (err)
2448 + return err;
2449 +
2450 ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
2451 ++ err = skb->len;
2452 ++ goto out_err;
2453 ++ }
2454 ++
2455 + if (!wpan_dev->netdev) {
2456 + err = -EINVAL;
2457 + goto out_err;
2458 +@@ -1568,6 +1573,9 @@ static int nl802154_add_llsec_key(struct sk_buff *skb, struct genl_info *info)
2459 + struct ieee802154_llsec_key_id id = { };
2460 + u32 commands[NL802154_CMD_FRAME_NR_IDS / 32] = { };
2461 +
2462 ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
2463 ++ return -EOPNOTSUPP;
2464 ++
2465 + if (!info->attrs[NL802154_ATTR_SEC_KEY] ||
2466 + nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
2467 + return -EINVAL;
2468 +@@ -1617,6 +1625,9 @@ static int nl802154_del_llsec_key(struct sk_buff *skb, struct genl_info *info)
2469 + struct nlattr *attrs[NL802154_KEY_ATTR_MAX + 1];
2470 + struct ieee802154_llsec_key_id id;
2471 +
2472 ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
2473 ++ return -EOPNOTSUPP;
2474 ++
2475 + if (!info->attrs[NL802154_ATTR_SEC_KEY] ||
2476 + nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
2477 + return -EINVAL;
2478 +@@ -1682,6 +1693,11 @@ nl802154_dump_llsec_dev(struct sk_buff *skb, struct netlink_callback *cb)
2479 + if (err)
2480 + return err;
2481 +
2482 ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
2483 ++ err = skb->len;
2484 ++ goto out_err;
2485 ++ }
2486 ++
2487 + if (!wpan_dev->netdev) {
2488 + err = -EINVAL;
2489 + goto out_err;
2490 +@@ -1768,6 +1784,9 @@ static int nl802154_add_llsec_dev(struct sk_buff *skb, struct genl_info *info)
2491 + struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
2492 + struct ieee802154_llsec_device dev_desc;
2493 +
2494 ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
2495 ++ return -EOPNOTSUPP;
2496 ++
2497 + if (ieee802154_llsec_parse_device(info->attrs[NL802154_ATTR_SEC_DEVICE],
2498 + &dev_desc) < 0)
2499 + return -EINVAL;
2500 +@@ -1783,6 +1802,9 @@ static int nl802154_del_llsec_dev(struct sk_buff *skb, struct genl_info *info)
2501 + struct nlattr *attrs[NL802154_DEV_ATTR_MAX + 1];
2502 + __le64 extended_addr;
2503 +
2504 ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
2505 ++ return -EOPNOTSUPP;
2506 ++
2507 + if (!info->attrs[NL802154_ATTR_SEC_DEVICE] ||
2508 + nla_parse_nested_deprecated(attrs, NL802154_DEV_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVICE], nl802154_dev_policy, info->extack))
2509 + return -EINVAL;
2510 +@@ -1852,6 +1874,11 @@ nl802154_dump_llsec_devkey(struct sk_buff *skb, struct netlink_callback *cb)
2511 + if (err)
2512 + return err;
2513 +
2514 ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
2515 ++ err = skb->len;
2516 ++ goto out_err;
2517 ++ }
2518 ++
2519 + if (!wpan_dev->netdev) {
2520 + err = -EINVAL;
2521 + goto out_err;
2522 +@@ -1909,6 +1936,9 @@ static int nl802154_add_llsec_devkey(struct sk_buff *skb, struct genl_info *info
2523 + struct ieee802154_llsec_device_key key;
2524 + __le64 extended_addr;
2525 +
2526 ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
2527 ++ return -EOPNOTSUPP;
2528 ++
2529 + if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] ||
2530 + nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack) < 0)
2531 + return -EINVAL;
2532 +@@ -1940,6 +1970,9 @@ static int nl802154_del_llsec_devkey(struct sk_buff *skb, struct genl_info *info
2533 + struct ieee802154_llsec_device_key key;
2534 + __le64 extended_addr;
2535 +
2536 ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
2537 ++ return -EOPNOTSUPP;
2538 ++
2539 + if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] ||
2540 + nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack))
2541 + return -EINVAL;
2542 +@@ -2014,6 +2047,11 @@ nl802154_dump_llsec_seclevel(struct sk_buff *skb, struct netlink_callback *cb)
2543 + if (err)
2544 + return err;
2545 +
2546 ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
2547 ++ err = skb->len;
2548 ++ goto out_err;
2549 ++ }
2550 ++
2551 + if (!wpan_dev->netdev) {
2552 + err = -EINVAL;
2553 + goto out_err;
2554 +@@ -2098,6 +2136,9 @@ static int nl802154_add_llsec_seclevel(struct sk_buff *skb,
2555 + struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
2556 + struct ieee802154_llsec_seclevel sl;
2557 +
2558 ++ if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
2559 ++ return -EOPNOTSUPP;
2560 ++
2561 + if (llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL],
2562 + &sl) < 0)
2563 + return -EINVAL;
2564 +diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
2565 +index dc7dac676415e..a6f2e5bf70456 100644
2566 +--- a/net/ipv4/netfilter/arp_tables.c
2567 ++++ b/net/ipv4/netfilter/arp_tables.c
2568 +@@ -1580,10 +1580,15 @@ out_free:
2569 + return ret;
2570 + }
2571 +
2572 +-void arpt_unregister_table(struct net *net, struct xt_table *table,
2573 +- const struct nf_hook_ops *ops)
2574 ++void arpt_unregister_table_pre_exit(struct net *net, struct xt_table *table,
2575 ++ const struct nf_hook_ops *ops)
2576 + {
2577 + nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
2578 ++}
2579 ++EXPORT_SYMBOL(arpt_unregister_table_pre_exit);
2580 ++
2581 ++void arpt_unregister_table(struct net *net, struct xt_table *table)
2582 ++{
2583 + __arpt_unregister_table(net, table);
2584 + }
2585 +
2586 +diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
2587 +index c216b9ad3bb24..6c300ba5634e2 100644
2588 +--- a/net/ipv4/netfilter/arptable_filter.c
2589 ++++ b/net/ipv4/netfilter/arptable_filter.c
2590 +@@ -56,16 +56,24 @@ static int __net_init arptable_filter_table_init(struct net *net)
2591 + return err;
2592 + }
2593 +
2594 ++static void __net_exit arptable_filter_net_pre_exit(struct net *net)
2595 ++{
2596 ++ if (net->ipv4.arptable_filter)
2597 ++ arpt_unregister_table_pre_exit(net, net->ipv4.arptable_filter,
2598 ++ arpfilter_ops);
2599 ++}
2600 ++
2601 + static void __net_exit arptable_filter_net_exit(struct net *net)
2602 + {
2603 + if (!net->ipv4.arptable_filter)
2604 + return;
2605 +- arpt_unregister_table(net, net->ipv4.arptable_filter, arpfilter_ops);
2606 ++ arpt_unregister_table(net, net->ipv4.arptable_filter);
2607 + net->ipv4.arptable_filter = NULL;
2608 + }
2609 +
2610 + static struct pernet_operations arptable_filter_net_ops = {
2611 + .exit = arptable_filter_net_exit,
2612 ++ .pre_exit = arptable_filter_net_pre_exit,
2613 + };
2614 +
2615 + static int __init arptable_filter_init(void)
2616 +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
2617 +index 8dcf7bacc99a6..69799b612ee80 100644
2618 +--- a/net/ipv6/ip6_tunnel.c
2619 ++++ b/net/ipv6/ip6_tunnel.c
2620 +@@ -2217,6 +2217,16 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct net *net, struct list_head
2621 + t = rtnl_dereference(t->next);
2622 + }
2623 + }
2624 ++
2625 ++ t = rtnl_dereference(ip6n->tnls_wc[0]);
2626 ++ while (t) {
2627 ++ /* If dev is in the same netns, it has already
2628 ++ * been added to the list by the previous loop.
2629 ++ */
2630 ++ if (!net_eq(dev_net(t->dev), net))
2631 ++ unregister_netdevice_queue(t->dev, list);
2632 ++ t = rtnl_dereference(t->next);
2633 ++ }
2634 + }
2635 +
2636 + static int __net_init ip6_tnl_init_net(struct net *net)
2637 +diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
2638 +index de4c871787e2f..2710f3bc856f8 100644
2639 +--- a/net/ipv6/sit.c
2640 ++++ b/net/ipv6/sit.c
2641 +@@ -1819,9 +1819,9 @@ static void __net_exit sit_destroy_tunnels(struct net *net,
2642 + if (dev->rtnl_link_ops == &sit_link_ops)
2643 + unregister_netdevice_queue(dev, head);
2644 +
2645 +- for (prio = 1; prio < 4; prio++) {
2646 ++ for (prio = 0; prio < 4; prio++) {
2647 + int h;
2648 +- for (h = 0; h < IP6_SIT_HASH_SIZE; h++) {
2649 ++ for (h = 0; h < (prio ? IP6_SIT_HASH_SIZE : 1); h++) {
2650 + struct ip_tunnel *t;
2651 +
2652 + t = rtnl_dereference(sitn->tunnels[prio][h]);
2653 +diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
2654 +index 677928bf13d13..1b50bbf030ed8 100644
2655 +--- a/net/mac80211/cfg.c
2656 ++++ b/net/mac80211/cfg.c
2657 +@@ -1670,8 +1670,10 @@ static int ieee80211_change_station(struct wiphy *wiphy,
2658 + }
2659 +
2660 + if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
2661 +- sta->sdata->u.vlan.sta)
2662 ++ sta->sdata->u.vlan.sta) {
2663 ++ ieee80211_clear_fast_rx(sta);
2664 + RCU_INIT_POINTER(sta->sdata->u.vlan.sta, NULL);
2665 ++ }
2666 +
2667 + if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
2668 + ieee80211_vif_dec_num_mcast(sta->sdata);
2669 +diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
2670 +index dc57f530df9db..1a69825401263 100644
2671 +--- a/net/netfilter/nf_conntrack_standalone.c
2672 ++++ b/net/netfilter/nf_conntrack_standalone.c
2673 +@@ -266,6 +266,7 @@ static const char* l4proto_name(u16 proto)
2674 + case IPPROTO_GRE: return "gre";
2675 + case IPPROTO_SCTP: return "sctp";
2676 + case IPPROTO_UDPLITE: return "udplite";
2677 ++ case IPPROTO_ICMPV6: return "icmpv6";
2678 + }
2679 +
2680 + return "unknown";
2681 +diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
2682 +index 35b67d7e36947..6e7b92e6f4246 100644
2683 +--- a/net/netfilter/nft_limit.c
2684 ++++ b/net/netfilter/nft_limit.c
2685 +@@ -76,13 +76,13 @@ static int nft_limit_init(struct nft_limit *limit,
2686 + return -EOVERFLOW;
2687 +
2688 + if (pkts) {
2689 +- tokens = div_u64(limit->nsecs, limit->rate) * limit->burst;
2690 ++ tokens = div64_u64(limit->nsecs, limit->rate) * limit->burst;
2691 + } else {
2692 + /* The token bucket size limits the number of tokens can be
2693 + * accumulated. tokens_max specifies the bucket size.
2694 + * tokens_max = unit * (rate + burst) / rate.
2695 + */
2696 +- tokens = div_u64(limit->nsecs * (limit->rate + limit->burst),
2697 ++ tokens = div64_u64(limit->nsecs * (limit->rate + limit->burst),
2698 + limit->rate);
2699 + }
2700 +
2701 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
2702 +index 41abfff6a6a3d..783fd65b1f98d 100644
2703 +--- a/net/sctp/socket.c
2704 ++++ b/net/sctp/socket.c
2705 +@@ -1539,11 +1539,9 @@ static void sctp_close(struct sock *sk, long timeout)
2706 +
2707 + /* Supposedly, no process has access to the socket, but
2708 + * the net layers still may.
2709 +- * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
2710 +- * held and that should be grabbed before socket lock.
2711 + */
2712 +- spin_lock_bh(&net->sctp.addr_wq_lock);
2713 +- bh_lock_sock_nested(sk);
2714 ++ local_bh_disable();
2715 ++ bh_lock_sock(sk);
2716 +
2717 + /* Hold the sock, since sk_common_release() will put sock_put()
2718 + * and we have just a little more cleanup.
2719 +@@ -1552,7 +1550,7 @@ static void sctp_close(struct sock *sk, long timeout)
2720 + sk_common_release(sk);
2721 +
2722 + bh_unlock_sock(sk);
2723 +- spin_unlock_bh(&net->sctp.addr_wq_lock);
2724 ++ local_bh_enable();
2725 +
2726 + sock_put(sk);
2727 +
2728 +@@ -5115,9 +5113,6 @@ static int sctp_init_sock(struct sock *sk)
2729 + sk_sockets_allocated_inc(sk);
2730 + sock_prot_inuse_add(net, sk->sk_prot, 1);
2731 +
2732 +- /* Nothing can fail after this block, otherwise
2733 +- * sctp_destroy_sock() will be called without addr_wq_lock held
2734 +- */
2735 + if (net->sctp.default_auto_asconf) {
2736 + spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
2737 + list_add_tail(&sp->auto_asconf_list,
2738 +@@ -5152,7 +5147,9 @@ static void sctp_destroy_sock(struct sock *sk)
2739 +
2740 + if (sp->do_auto_asconf) {
2741 + sp->do_auto_asconf = 0;
2742 ++ spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
2743 + list_del(&sp->auto_asconf_list);
2744 ++ spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
2745 + }
2746 + sctp_endpoint_free(sp->ep);
2747 + local_bh_disable();
2748 +diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c
2749 +index 96718e3a1ad0e..16fbc9faed90b 100644
2750 +--- a/sound/soc/codecs/max98373.c
2751 ++++ b/sound/soc/codecs/max98373.c
2752 +@@ -410,11 +410,13 @@ static int max98373_dac_event(struct snd_soc_dapm_widget *w,
2753 + regmap_update_bits(max98373->regmap,
2754 + MAX98373_R20FF_GLOBAL_SHDN,
2755 + MAX98373_GLOBAL_EN_MASK, 1);
2756 ++ usleep_range(30000, 31000);
2757 + break;
2758 + case SND_SOC_DAPM_POST_PMD:
2759 + regmap_update_bits(max98373->regmap,
2760 + MAX98373_R20FF_GLOBAL_SHDN,
2761 + MAX98373_GLOBAL_EN_MASK, 0);
2762 ++ usleep_range(30000, 31000);
2763 + max98373->tdm_mode = false;
2764 + break;
2765 + default:
2766 +diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
2767 +index 84290be778f0e..33ade79fa032e 100644
2768 +--- a/sound/soc/fsl/fsl_esai.c
2769 ++++ b/sound/soc/fsl/fsl_esai.c
2770 +@@ -494,11 +494,13 @@ static int fsl_esai_startup(struct snd_pcm_substream *substream,
2771 + ESAI_SAICR_SYNC, esai_priv->synchronous ?
2772 + ESAI_SAICR_SYNC : 0);
2773 +
2774 +- /* Set a default slot number -- 2 */
2775 ++ /* Set slots count */
2776 + regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR,
2777 +- ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(2));
2778 ++ ESAI_xCCR_xDC_MASK,
2779 ++ ESAI_xCCR_xDC(esai_priv->slots));
2780 + regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR,
2781 +- ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(2));
2782 ++ ESAI_xCCR_xDC_MASK,
2783 ++ ESAI_xCCR_xDC(esai_priv->slots));
2784 + }
2785 +
2786 + return 0;