Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Wed, 01 Apr 2020 12:03:30
Message-Id: 1585742591.38e773d098144e4faff7e03fea1df7541549535a.mpagano@gentoo
1 commit: 38e773d098144e4faff7e03fea1df7541549535a
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Apr 1 12:03:11 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Apr 1 12:03:11 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=38e773d0
7
8 Linux patch 5.4.29
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1028_linux-5.4.29.patch | 5915 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 5919 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index ee28b56..c1165c8 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -155,6 +155,10 @@ Patch: 1027_linux-5.4.28.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.28
23
24 +Patch: 1028_linux-5.4.29.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.29
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1028_linux-5.4.29.patch b/1028_linux-5.4.29.patch
33 new file mode 100644
34 index 0000000..6c1f8e3
35 --- /dev/null
36 +++ b/1028_linux-5.4.29.patch
37 @@ -0,0 +1,5915 @@
38 +diff --git a/Documentation/devicetree/bindings/net/fsl-fman.txt b/Documentation/devicetree/bindings/net/fsl-fman.txt
39 +index 299c0dcd67db..1316f0aec0cf 100644
40 +--- a/Documentation/devicetree/bindings/net/fsl-fman.txt
41 ++++ b/Documentation/devicetree/bindings/net/fsl-fman.txt
42 +@@ -110,6 +110,13 @@ PROPERTIES
43 + Usage: required
44 + Definition: See soc/fsl/qman.txt and soc/fsl/bman.txt
45 +
46 ++- fsl,erratum-a050385
47 ++ Usage: optional
48 ++ Value type: boolean
49 ++ Definition: A boolean property. Indicates the presence of the
50 ++ erratum A050385 which indicates that DMA transactions that are
51 ++ split can result in a FMan lock.
52 ++
53 + =============================================================================
54 + FMan MURAM Node
55 +
56 +diff --git a/Makefile b/Makefile
57 +index b015cc894123..8cb72071a842 100644
58 +--- a/Makefile
59 ++++ b/Makefile
60 +@@ -1,7 +1,7 @@
61 + # SPDX-License-Identifier: GPL-2.0
62 + VERSION = 5
63 + PATCHLEVEL = 4
64 +-SUBLEVEL = 28
65 ++SUBLEVEL = 29
66 + EXTRAVERSION =
67 + NAME = Kleptomaniac Octopus
68 +
69 +diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
70 +index 6481d2b7d6b6..c6be65249f42 100644
71 +--- a/arch/arm/boot/dts/dra7.dtsi
72 ++++ b/arch/arm/boot/dts/dra7.dtsi
73 +@@ -148,6 +148,7 @@
74 + #address-cells = <1>;
75 + #size-cells = <1>;
76 + ranges = <0x0 0x0 0x0 0xc0000000>;
77 ++ dma-ranges = <0x80000000 0x0 0x80000000 0x80000000>;
78 + ti,hwmods = "l3_main_1", "l3_main_2";
79 + reg = <0x0 0x44000000 0x0 0x1000000>,
80 + <0x0 0x45000000 0x0 0x1000>;
81 +diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
82 +index 1fb7937638f0..041646fabb2d 100644
83 +--- a/arch/arm/boot/dts/omap5.dtsi
84 ++++ b/arch/arm/boot/dts/omap5.dtsi
85 +@@ -143,6 +143,7 @@
86 + #address-cells = <1>;
87 + #size-cells = <1>;
88 + ranges = <0 0 0 0xc0000000>;
89 ++ dma-ranges = <0x80000000 0x0 0x80000000 0x80000000>;
90 + ti,hwmods = "l3_main_1", "l3_main_2", "l3_main_3";
91 + reg = <0 0x44000000 0 0x2000>,
92 + <0 0x44800000 0 0x3000>,
93 +diff --git a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
94 +index 3bec3e0a81b2..397140454132 100644
95 +--- a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
96 ++++ b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
97 +@@ -482,7 +482,8 @@
98 + };
99 +
100 + &usbphy {
101 +- usb0_id_det-gpios = <&pio 7 11 GPIO_ACTIVE_HIGH>; /* PH11 */
102 ++ usb0_id_det-gpios = <&pio 7 11 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH11 */
103 ++ usb0_vbus_power-supply = <&usb_power_supply>;
104 + usb0_vbus-supply = <&reg_drivevbus>;
105 + usb1_vbus-supply = <&reg_vmain>;
106 + usb2_vbus-supply = <&reg_vmain>;
107 +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi
108 +index 6082ae022136..d237162a8744 100644
109 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi
110 ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi
111 +@@ -20,6 +20,8 @@
112 + };
113 +
114 + &fman0 {
115 ++ fsl,erratum-a050385;
116 ++
117 + /* these aliases provide the FMan ports mapping */
118 + enet0: ethernet@e0000 {
119 + };
120 +diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
121 +index 2db3b7c4de16..a353f88d299d 100644
122 +--- a/arch/x86/mm/ioremap.c
123 ++++ b/arch/x86/mm/ioremap.c
124 +@@ -115,6 +115,9 @@ static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *des
125 + if (!sev_active())
126 + return;
127 +
128 ++ if (!IS_ENABLED(CONFIG_EFI))
129 ++ return;
130 ++
131 + if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA)
132 + desc->flags |= IORES_MAP_ENCRYPTED;
133 + }
134 +diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c
135 +index 393d251798c0..4d2a7a764602 100644
136 +--- a/arch/x86/net/bpf_jit_comp32.c
137 ++++ b/arch/x86/net/bpf_jit_comp32.c
138 +@@ -2039,10 +2039,12 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
139 + }
140 + /* and dreg_lo,sreg_lo */
141 + EMIT2(0x23, add_2reg(0xC0, sreg_lo, dreg_lo));
142 +- /* and dreg_hi,sreg_hi */
143 +- EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi));
144 +- /* or dreg_lo,dreg_hi */
145 +- EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi));
146 ++ if (is_jmp64) {
147 ++ /* and dreg_hi,sreg_hi */
148 ++ EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi));
149 ++ /* or dreg_lo,dreg_hi */
150 ++ EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi));
151 ++ }
152 + goto emit_cond_jmp;
153 + }
154 + case BPF_JMP | BPF_JSET | BPF_K:
155 +diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
156 +index 827530dae682..ce59a3f32eac 100644
157 +--- a/drivers/acpi/sleep.c
158 ++++ b/drivers/acpi/sleep.c
159 +@@ -977,6 +977,16 @@ static int acpi_s2idle_prepare_late(void)
160 + return 0;
161 + }
162 +
163 ++static void acpi_s2idle_sync(void)
164 ++{
165 ++ /*
166 ++ * The EC driver uses the system workqueue and an additional special
167 ++ * one, so those need to be flushed too.
168 ++ */
169 ++ acpi_ec_flush_work();
170 ++ acpi_os_wait_events_complete(); /* synchronize Notify handling */
171 ++}
172 ++
173 + static bool acpi_s2idle_wake(void)
174 + {
175 + if (!acpi_sci_irq_valid())
176 +@@ -1021,13 +1031,8 @@ static bool acpi_s2idle_wake(void)
177 + * should be missed by canceling the wakeup here.
178 + */
179 + pm_system_cancel_wakeup();
180 +- /*
181 +- * The EC driver uses the system workqueue and an additional
182 +- * special one, so those need to be flushed too.
183 +- */
184 +- acpi_os_wait_events_complete(); /* synchronize EC GPE processing */
185 +- acpi_ec_flush_work();
186 +- acpi_os_wait_events_complete(); /* synchronize Notify handling */
187 ++
188 ++ acpi_s2idle_sync();
189 +
190 + /*
191 + * The SCI is in the "suspended" state now and it cannot produce
192 +@@ -1055,6 +1060,13 @@ static void acpi_s2idle_restore_early(void)
193 +
194 + static void acpi_s2idle_restore(void)
195 + {
196 ++ /*
197 ++ * Drain pending events before restoring the working-state configuration
198 ++ * of GPEs.
199 ++ */
200 ++ acpi_os_wait_events_complete(); /* synchronize GPE processing */
201 ++ acpi_s2idle_sync();
202 ++
203 + s2idle_wakeup = false;
204 +
205 + acpi_enable_all_runtime_gpes();
206 +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
207 +index 1787e3ad9c44..d33528033042 100644
208 +--- a/drivers/ata/ahci.c
209 ++++ b/drivers/ata/ahci.c
210 +@@ -393,6 +393,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
211 + { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
212 + { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
213 + { PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */
214 ++ { PCI_VDEVICE(INTEL, 0x06d7), board_ahci }, /* Comet Lake-H RAID */
215 + { PCI_VDEVICE(INTEL, 0x0f22), board_ahci_mobile }, /* Bay Trail AHCI */
216 + { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */
217 + { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */
218 +diff --git a/drivers/base/memory.c b/drivers/base/memory.c
219 +index 84c4e1f72cbd..5a8c430fb8ff 100644
220 +--- a/drivers/base/memory.c
221 ++++ b/drivers/base/memory.c
222 +@@ -114,30 +114,13 @@ static ssize_t phys_index_show(struct device *dev,
223 + }
224 +
225 + /*
226 +- * Show whether the memory block is likely to be offlineable (or is already
227 +- * offline). Once offline, the memory block could be removed. The return
228 +- * value does, however, not indicate that there is a way to remove the
229 +- * memory block.
230 ++ * Legacy interface that we cannot remove. Always indicate "removable"
231 ++ * with CONFIG_MEMORY_HOTREMOVE - bad heuristic.
232 + */
233 + static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
234 + char *buf)
235 + {
236 +- struct memory_block *mem = to_memory_block(dev);
237 +- unsigned long pfn;
238 +- int ret = 1, i;
239 +-
240 +- if (mem->state != MEM_ONLINE)
241 +- goto out;
242 +-
243 +- for (i = 0; i < sections_per_block; i++) {
244 +- if (!present_section_nr(mem->start_section_nr + i))
245 +- continue;
246 +- pfn = section_nr_to_pfn(mem->start_section_nr + i);
247 +- ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
248 +- }
249 +-
250 +-out:
251 +- return sprintf(buf, "%d\n", ret);
252 ++ return sprintf(buf, "%d\n", (int)IS_ENABLED(CONFIG_MEMORY_HOTREMOVE));
253 + }
254 +
255 + /*
256 +diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
257 +index 2317d4e3daaf..36933e2b3b0d 100644
258 +--- a/drivers/clocksource/hyperv_timer.c
259 ++++ b/drivers/clocksource/hyperv_timer.c
260 +@@ -233,7 +233,8 @@ static u64 notrace read_hv_clock_tsc(struct clocksource *arg)
261 +
262 + static u64 read_hv_sched_clock_tsc(void)
263 + {
264 +- return read_hv_clock_tsc(NULL) - hv_sched_clock_offset;
265 ++ return (read_hv_clock_tsc(NULL) - hv_sched_clock_offset) *
266 ++ (NSEC_PER_SEC / HV_CLOCK_HZ);
267 + }
268 +
269 + static struct clocksource hyperv_cs_tsc = {
270 +@@ -258,7 +259,8 @@ static u64 notrace read_hv_clock_msr(struct clocksource *arg)
271 +
272 + static u64 read_hv_sched_clock_msr(void)
273 + {
274 +- return read_hv_clock_msr(NULL) - hv_sched_clock_offset;
275 ++ return (read_hv_clock_msr(NULL) - hv_sched_clock_offset) *
276 ++ (NSEC_PER_SEC / HV_CLOCK_HZ);
277 + }
278 +
279 + static struct clocksource hyperv_cs_msr = {
280 +diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
281 +index 1f98e988c0d3..a3fb450a9ca8 100644
282 +--- a/drivers/gpio/gpiolib-acpi.c
283 ++++ b/drivers/gpio/gpiolib-acpi.c
284 +@@ -21,18 +21,21 @@
285 + #include "gpiolib.h"
286 + #include "gpiolib-acpi.h"
287 +
288 +-#define QUIRK_NO_EDGE_EVENTS_ON_BOOT 0x01l
289 +-#define QUIRK_NO_WAKEUP 0x02l
290 +-
291 + static int run_edge_events_on_boot = -1;
292 + module_param(run_edge_events_on_boot, int, 0444);
293 + MODULE_PARM_DESC(run_edge_events_on_boot,
294 + "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
295 +
296 +-static int honor_wakeup = -1;
297 +-module_param(honor_wakeup, int, 0444);
298 +-MODULE_PARM_DESC(honor_wakeup,
299 +- "Honor the ACPI wake-capable flag: 0=no, 1=yes, -1=auto");
300 ++static char *ignore_wake;
301 ++module_param(ignore_wake, charp, 0444);
302 ++MODULE_PARM_DESC(ignore_wake,
303 ++ "controller@pin combos on which to ignore the ACPI wake flag "
304 ++ "ignore_wake=controller@pin[,controller@pin[,...]]");
305 ++
306 ++struct acpi_gpiolib_dmi_quirk {
307 ++ bool no_edge_events_on_boot;
308 ++ char *ignore_wake;
309 ++};
310 +
311 + /**
312 + * struct acpi_gpio_event - ACPI GPIO event handler data
313 +@@ -202,6 +205,57 @@ static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
314 + acpi_gpiochip_request_irq(acpi_gpio, event);
315 + }
316 +
317 ++static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in)
318 ++{
319 ++ const char *controller, *pin_str;
320 ++ int len, pin;
321 ++ char *endp;
322 ++
323 ++ controller = ignore_wake;
324 ++ while (controller) {
325 ++ pin_str = strchr(controller, '@');
326 ++ if (!pin_str)
327 ++ goto err;
328 ++
329 ++ len = pin_str - controller;
330 ++ if (len == strlen(controller_in) &&
331 ++ strncmp(controller, controller_in, len) == 0) {
332 ++ pin = simple_strtoul(pin_str + 1, &endp, 10);
333 ++ if (*endp != 0 && *endp != ',')
334 ++ goto err;
335 ++
336 ++ if (pin == pin_in)
337 ++ return true;
338 ++ }
339 ++
340 ++ controller = strchr(controller, ',');
341 ++ if (controller)
342 ++ controller++;
343 ++ }
344 ++
345 ++ return false;
346 ++err:
347 ++ pr_err_once("Error invalid value for gpiolib_acpi.ignore_wake: %s\n",
348 ++ ignore_wake);
349 ++ return false;
350 ++}
351 ++
352 ++static bool acpi_gpio_irq_is_wake(struct device *parent,
353 ++ struct acpi_resource_gpio *agpio)
354 ++{
355 ++ int pin = agpio->pin_table[0];
356 ++
357 ++ if (agpio->wake_capable != ACPI_WAKE_CAPABLE)
358 ++ return false;
359 ++
360 ++ if (acpi_gpio_in_ignore_list(dev_name(parent), pin)) {
361 ++ dev_info(parent, "Ignoring wakeup on pin %d\n", pin);
362 ++ return false;
363 ++ }
364 ++
365 ++ return true;
366 ++}
367 ++
368 + static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
369 + void *context)
370 + {
371 +@@ -282,7 +336,7 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
372 + event->handle = evt_handle;
373 + event->handler = handler;
374 + event->irq = irq;
375 +- event->irq_is_wake = honor_wakeup && agpio->wake_capable == ACPI_WAKE_CAPABLE;
376 ++ event->irq_is_wake = acpi_gpio_irq_is_wake(chip->parent, agpio);
377 + event->pin = pin;
378 + event->desc = desc;
379 +
380 +@@ -1321,7 +1375,9 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] = {
381 + DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
382 + DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
383 + },
384 +- .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT,
385 ++ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
386 ++ .no_edge_events_on_boot = true,
387 ++ },
388 + },
389 + {
390 + /*
391 +@@ -1334,16 +1390,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] = {
392 + DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"),
393 + DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"),
394 + },
395 +- .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT,
396 ++ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
397 ++ .no_edge_events_on_boot = true,
398 ++ },
399 + },
400 + {
401 + /*
402 +- * Various HP X2 10 Cherry Trail models use an external
403 +- * embedded-controller connected via I2C + an ACPI GPIO
404 +- * event handler. The embedded controller generates various
405 +- * spurious wakeup events when suspended. So disable wakeup
406 +- * for its handler (it uses the only ACPI GPIO event handler).
407 +- * This breaks wakeup when opening the lid, the user needs
408 ++ * HP X2 10 models with Cherry Trail SoC + TI PMIC use an
409 ++ * external embedded-controller connected via I2C + an ACPI GPIO
410 ++ * event handler on INT33FF:01 pin 0, causing spurious wakeups.
411 ++ * When suspending by closing the LID, the power to the USB
412 ++ * keyboard is turned off, causing INT0002 ACPI events to
413 ++ * trigger once the XHCI controller notices the keyboard is
414 ++ * gone. So INT0002 events cause spurious wakeups too. Ignoring
415 ++ * EC wakes breaks wakeup when opening the lid, the user needs
416 + * to press the power-button to wakeup the system. The
417 + * alternative is suspend simply not working, which is worse.
418 + */
419 +@@ -1351,33 +1411,46 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] = {
420 + DMI_MATCH(DMI_SYS_VENDOR, "HP"),
421 + DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"),
422 + },
423 +- .driver_data = (void *)QUIRK_NO_WAKEUP,
424 ++ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
425 ++ .ignore_wake = "INT33FF:01@0,INT0002:00@2",
426 ++ },
427 ++ },
428 ++ {
429 ++ /*
430 ++ * HP X2 10 models with Bay Trail SoC + AXP288 PMIC use an
431 ++ * external embedded-controller connected via I2C + an ACPI GPIO
432 ++ * event handler on INT33FC:02 pin 28, causing spurious wakeups.
433 ++ */
434 ++ .matches = {
435 ++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
436 ++ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
437 ++ DMI_MATCH(DMI_BOARD_NAME, "815D"),
438 ++ },
439 ++ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
440 ++ .ignore_wake = "INT33FC:02@28",
441 ++ },
442 + },
443 + {} /* Terminating entry */
444 + };
445 +
446 + static int acpi_gpio_setup_params(void)
447 + {
448 ++ const struct acpi_gpiolib_dmi_quirk *quirk = NULL;
449 + const struct dmi_system_id *id;
450 +- long quirks = 0;
451 +
452 + id = dmi_first_match(gpiolib_acpi_quirks);
453 + if (id)
454 +- quirks = (long)id->driver_data;
455 ++ quirk = id->driver_data;
456 +
457 + if (run_edge_events_on_boot < 0) {
458 +- if (quirks & QUIRK_NO_EDGE_EVENTS_ON_BOOT)
459 ++ if (quirk && quirk->no_edge_events_on_boot)
460 + run_edge_events_on_boot = 0;
461 + else
462 + run_edge_events_on_boot = 1;
463 + }
464 +
465 +- if (honor_wakeup < 0) {
466 +- if (quirks & QUIRK_NO_WAKEUP)
467 +- honor_wakeup = 0;
468 +- else
469 +- honor_wakeup = 1;
470 +- }
471 ++ if (ignore_wake == NULL && quirk && quirk->ignore_wake)
472 ++ ignore_wake = quirk->ignore_wake;
473 +
474 + return 0;
475 + }
476 +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
477 +index 484fa6560adc..a8cf55eb54d8 100644
478 +--- a/drivers/gpio/gpiolib.c
479 ++++ b/drivers/gpio/gpiolib.c
480 +@@ -2194,9 +2194,16 @@ static void gpiochip_irq_disable(struct irq_data *d)
481 + {
482 + struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
483 +
484 ++ /*
485 ++ * Since we override .irq_disable() we need to mimic the
486 ++ * behaviour of __irq_disable() in irq/chip.c.
487 ++ * First call .irq_disable() if it exists, else mimic the
488 ++ * behaviour of mask_irq() which calls .irq_mask() if
489 ++ * it exists.
490 ++ */
491 + if (chip->irq.irq_disable)
492 + chip->irq.irq_disable(d);
493 +- else
494 ++ else if (chip->irq.chip->irq_mask)
495 + chip->irq.chip->irq_mask(d);
496 + gpiochip_disable_irq(chip, d->hwirq);
497 + }
498 +diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
499 +index 80934ca17260..c086262cc181 100644
500 +--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
501 ++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
502 +@@ -84,6 +84,13 @@
503 + #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
504 + #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
505 + #define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
506 ++
507 ++/* for Vega20/arcturus regiter offset change */
508 ++#define mmROM_INDEX_VG20 0x00e4
509 ++#define mmROM_INDEX_VG20_BASE_IDX 0
510 ++#define mmROM_DATA_VG20 0x00e5
511 ++#define mmROM_DATA_VG20_BASE_IDX 0
512 ++
513 + /*
514 + * Indirect registers accessor
515 + */
516 +@@ -304,6 +311,8 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
517 + {
518 + u32 *dw_ptr;
519 + u32 i, length_dw;
520 ++ uint32_t rom_index_offset;
521 ++ uint32_t rom_data_offset;
522 +
523 + if (bios == NULL)
524 + return false;
525 +@@ -316,11 +325,23 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
526 + dw_ptr = (u32 *)bios;
527 + length_dw = ALIGN(length_bytes, 4) / 4;
528 +
529 ++ switch (adev->asic_type) {
530 ++ case CHIP_VEGA20:
531 ++ case CHIP_ARCTURUS:
532 ++ rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX_VG20);
533 ++ rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA_VG20);
534 ++ break;
535 ++ default:
536 ++ rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX);
537 ++ rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA);
538 ++ break;
539 ++ }
540 ++
541 + /* set rom index to 0 */
542 +- WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
543 ++ WREG32(rom_index_offset, 0);
544 + /* read out the rom data */
545 + for (i = 0; i < length_dw; i++)
546 +- dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
547 ++ dw_ptr[i] = RREG32(rom_data_offset);
548 +
549 + return true;
550 + }
551 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
552 +index 3b7769a3e67e..c13dce760098 100644
553 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
554 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
555 +@@ -269,6 +269,117 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
556 + .use_urgent_burst_bw = 0
557 + };
558 +
559 ++struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
560 ++ .clock_limits = {
561 ++ {
562 ++ .state = 0,
563 ++ .dcfclk_mhz = 560.0,
564 ++ .fabricclk_mhz = 560.0,
565 ++ .dispclk_mhz = 513.0,
566 ++ .dppclk_mhz = 513.0,
567 ++ .phyclk_mhz = 540.0,
568 ++ .socclk_mhz = 560.0,
569 ++ .dscclk_mhz = 171.0,
570 ++ .dram_speed_mts = 8960.0,
571 ++ },
572 ++ {
573 ++ .state = 1,
574 ++ .dcfclk_mhz = 694.0,
575 ++ .fabricclk_mhz = 694.0,
576 ++ .dispclk_mhz = 642.0,
577 ++ .dppclk_mhz = 642.0,
578 ++ .phyclk_mhz = 600.0,
579 ++ .socclk_mhz = 694.0,
580 ++ .dscclk_mhz = 214.0,
581 ++ .dram_speed_mts = 11104.0,
582 ++ },
583 ++ {
584 ++ .state = 2,
585 ++ .dcfclk_mhz = 875.0,
586 ++ .fabricclk_mhz = 875.0,
587 ++ .dispclk_mhz = 734.0,
588 ++ .dppclk_mhz = 734.0,
589 ++ .phyclk_mhz = 810.0,
590 ++ .socclk_mhz = 875.0,
591 ++ .dscclk_mhz = 245.0,
592 ++ .dram_speed_mts = 14000.0,
593 ++ },
594 ++ {
595 ++ .state = 3,
596 ++ .dcfclk_mhz = 1000.0,
597 ++ .fabricclk_mhz = 1000.0,
598 ++ .dispclk_mhz = 1100.0,
599 ++ .dppclk_mhz = 1100.0,
600 ++ .phyclk_mhz = 810.0,
601 ++ .socclk_mhz = 1000.0,
602 ++ .dscclk_mhz = 367.0,
603 ++ .dram_speed_mts = 16000.0,
604 ++ },
605 ++ {
606 ++ .state = 4,
607 ++ .dcfclk_mhz = 1200.0,
608 ++ .fabricclk_mhz = 1200.0,
609 ++ .dispclk_mhz = 1284.0,
610 ++ .dppclk_mhz = 1284.0,
611 ++ .phyclk_mhz = 810.0,
612 ++ .socclk_mhz = 1200.0,
613 ++ .dscclk_mhz = 428.0,
614 ++ .dram_speed_mts = 16000.0,
615 ++ },
616 ++ /*Extra state, no dispclk ramping*/
617 ++ {
618 ++ .state = 5,
619 ++ .dcfclk_mhz = 1200.0,
620 ++ .fabricclk_mhz = 1200.0,
621 ++ .dispclk_mhz = 1284.0,
622 ++ .dppclk_mhz = 1284.0,
623 ++ .phyclk_mhz = 810.0,
624 ++ .socclk_mhz = 1200.0,
625 ++ .dscclk_mhz = 428.0,
626 ++ .dram_speed_mts = 16000.0,
627 ++ },
628 ++ },
629 ++ .num_states = 5,
630 ++ .sr_exit_time_us = 8.6,
631 ++ .sr_enter_plus_exit_time_us = 10.9,
632 ++ .urgent_latency_us = 4.0,
633 ++ .urgent_latency_pixel_data_only_us = 4.0,
634 ++ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
635 ++ .urgent_latency_vm_data_only_us = 4.0,
636 ++ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
637 ++ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
638 ++ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
639 ++ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0,
640 ++ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0,
641 ++ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
642 ++ .max_avg_sdp_bw_use_normal_percent = 40.0,
643 ++ .max_avg_dram_bw_use_normal_percent = 40.0,
644 ++ .writeback_latency_us = 12.0,
645 ++ .ideal_dram_bw_after_urgent_percent = 40.0,
646 ++ .max_request_size_bytes = 256,
647 ++ .dram_channel_width_bytes = 2,
648 ++ .fabric_datapath_to_dcn_data_return_bytes = 64,
649 ++ .dcn_downspread_percent = 0.5,
650 ++ .downspread_percent = 0.38,
651 ++ .dram_page_open_time_ns = 50.0,
652 ++ .dram_rw_turnaround_time_ns = 17.5,
653 ++ .dram_return_buffer_per_channel_bytes = 8192,
654 ++ .round_trip_ping_latency_dcfclk_cycles = 131,
655 ++ .urgent_out_of_order_return_per_channel_bytes = 256,
656 ++ .channel_interleave_bytes = 256,
657 ++ .num_banks = 8,
658 ++ .num_chans = 8,
659 ++ .vmm_page_size_bytes = 4096,
660 ++ .dram_clock_change_latency_us = 404.0,
661 ++ .dummy_pstate_latency_us = 5.0,
662 ++ .writeback_dram_clock_change_latency_us = 23.0,
663 ++ .return_bus_width_bytes = 64,
664 ++ .dispclk_dppclk_vco_speed_mhz = 3850,
665 ++ .xfc_bus_transport_time_us = 20,
666 ++ .xfc_xbuf_latency_tolerance_us = 4,
667 ++ .use_urgent_burst_bw = 0
668 ++};
669 ++
670 + struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 };
671 +
672 + #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
673 +@@ -3135,6 +3246,9 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
674 + static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
675 + uint32_t hw_internal_rev)
676 + {
677 ++ if (ASICREV_IS_NAVI14_M(hw_internal_rev))
678 ++ return &dcn2_0_nv14_soc;
679 ++
680 + if (ASICREV_IS_NAVI12_P(hw_internal_rev))
681 + return &dcn2_0_nv12_soc;
682 +
683 +diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
684 +index 2d5cbfda3ca7..9c262daf5816 100644
685 +--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
686 ++++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
687 +@@ -55,6 +55,7 @@ static const char * const decon_clks_name[] = {
688 + struct decon_context {
689 + struct device *dev;
690 + struct drm_device *drm_dev;
691 ++ void *dma_priv;
692 + struct exynos_drm_crtc *crtc;
693 + struct exynos_drm_plane planes[WINDOWS_NR];
694 + struct exynos_drm_plane_config configs[WINDOWS_NR];
695 +@@ -644,7 +645,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
696 +
697 + decon_clear_channels(ctx->crtc);
698 +
699 +- return exynos_drm_register_dma(drm_dev, dev);
700 ++ return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
701 + }
702 +
703 + static void decon_unbind(struct device *dev, struct device *master, void *data)
704 +@@ -654,7 +655,7 @@ static void decon_unbind(struct device *dev, struct device *master, void *data)
705 + decon_disable(ctx->crtc);
706 +
707 + /* detach this sub driver from iommu mapping if supported. */
708 +- exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
709 ++ exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
710 + }
711 +
712 + static const struct component_ops decon_component_ops = {
713 +diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
714 +index f0640950bd46..6fd40410dfd2 100644
715 +--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
716 ++++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
717 +@@ -40,6 +40,7 @@
718 + struct decon_context {
719 + struct device *dev;
720 + struct drm_device *drm_dev;
721 ++ void *dma_priv;
722 + struct exynos_drm_crtc *crtc;
723 + struct exynos_drm_plane planes[WINDOWS_NR];
724 + struct exynos_drm_plane_config configs[WINDOWS_NR];
725 +@@ -127,13 +128,13 @@ static int decon_ctx_initialize(struct decon_context *ctx,
726 +
727 + decon_clear_channels(ctx->crtc);
728 +
729 +- return exynos_drm_register_dma(drm_dev, ctx->dev);
730 ++ return exynos_drm_register_dma(drm_dev, ctx->dev, &ctx->dma_priv);
731 + }
732 +
733 + static void decon_ctx_remove(struct decon_context *ctx)
734 + {
735 + /* detach this sub driver from iommu mapping if supported. */
736 +- exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
737 ++ exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
738 + }
739 +
740 + static u32 decon_calc_clkdiv(struct decon_context *ctx,
741 +diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
742 +index 9ebc02768847..619f81435c1b 100644
743 +--- a/drivers/gpu/drm/exynos/exynos_drm_dma.c
744 ++++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
745 +@@ -58,7 +58,7 @@ static inline void clear_dma_max_seg_size(struct device *dev)
746 + * mapping.
747 + */
748 + static int drm_iommu_attach_device(struct drm_device *drm_dev,
749 +- struct device *subdrv_dev)
750 ++ struct device *subdrv_dev, void **dma_priv)
751 + {
752 + struct exynos_drm_private *priv = drm_dev->dev_private;
753 + int ret;
754 +@@ -74,7 +74,14 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
755 + return ret;
756 +
757 + if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
758 +- if (to_dma_iommu_mapping(subdrv_dev))
759 ++ /*
760 ++ * Keep the original DMA mapping of the sub-device and
761 ++ * restore it on Exynos DRM detach, otherwise the DMA
762 ++ * framework considers it as IOMMU-less during the next
763 ++ * probe (in case of deferred probe or modular build)
764 ++ */
765 ++ *dma_priv = to_dma_iommu_mapping(subdrv_dev);
766 ++ if (*dma_priv)
767 + arm_iommu_detach_device(subdrv_dev);
768 +
769 + ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
770 +@@ -98,19 +105,21 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
771 + * mapping
772 + */
773 + static void drm_iommu_detach_device(struct drm_device *drm_dev,
774 +- struct device *subdrv_dev)
775 ++ struct device *subdrv_dev, void **dma_priv)
776 + {
777 + struct exynos_drm_private *priv = drm_dev->dev_private;
778 +
779 +- if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
780 ++ if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
781 + arm_iommu_detach_device(subdrv_dev);
782 +- else if (IS_ENABLED(CONFIG_IOMMU_DMA))
783 ++ arm_iommu_attach_device(subdrv_dev, *dma_priv);
784 ++ } else if (IS_ENABLED(CONFIG_IOMMU_DMA))
785 + iommu_detach_device(priv->mapping, subdrv_dev);
786 +
787 + clear_dma_max_seg_size(subdrv_dev);
788 + }
789 +
790 +-int exynos_drm_register_dma(struct drm_device *drm, struct device *dev)
791 ++int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
792 ++ void **dma_priv)
793 + {
794 + struct exynos_drm_private *priv = drm->dev_private;
795 +
796 +@@ -137,13 +146,14 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev)
797 + priv->mapping = mapping;
798 + }
799 +
800 +- return drm_iommu_attach_device(drm, dev);
801 ++ return drm_iommu_attach_device(drm, dev, dma_priv);
802 + }
803 +
804 +-void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev)
805 ++void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev,
806 ++ void **dma_priv)
807 + {
808 + if (IS_ENABLED(CONFIG_EXYNOS_IOMMU))
809 +- drm_iommu_detach_device(drm, dev);
810 ++ drm_iommu_detach_device(drm, dev, dma_priv);
811 + }
812 +
813 + void exynos_drm_cleanup_dma(struct drm_device *drm)
814 +diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
815 +index d4014ba592fd..735f436c857c 100644
816 +--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
817 ++++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
818 +@@ -223,8 +223,10 @@ static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
819 + return priv->mapping ? true : false;
820 + }
821 +
822 +-int exynos_drm_register_dma(struct drm_device *drm, struct device *dev);
823 +-void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev);
824 ++int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
825 ++ void **dma_priv);
826 ++void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev,
827 ++ void **dma_priv);
828 + void exynos_drm_cleanup_dma(struct drm_device *drm);
829 +
830 + #ifdef CONFIG_DRM_EXYNOS_DPI
831 +diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
832 +index 8ea2e1d77802..29ab8be8604c 100644
833 +--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
834 ++++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
835 +@@ -97,6 +97,7 @@ struct fimc_scaler {
836 + struct fimc_context {
837 + struct exynos_drm_ipp ipp;
838 + struct drm_device *drm_dev;
839 ++ void *dma_priv;
840 + struct device *dev;
841 + struct exynos_drm_ipp_task *task;
842 + struct exynos_drm_ipp_formats *formats;
843 +@@ -1133,7 +1134,7 @@ static int fimc_bind(struct device *dev, struct device *master, void *data)
844 +
845 + ctx->drm_dev = drm_dev;
846 + ipp->drm_dev = drm_dev;
847 +- exynos_drm_register_dma(drm_dev, dev);
848 ++ exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
849 +
850 + exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
851 + DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
852 +@@ -1153,7 +1154,7 @@ static void fimc_unbind(struct device *dev, struct device *master,
853 + struct exynos_drm_ipp *ipp = &ctx->ipp;
854 +
855 + exynos_drm_ipp_unregister(dev, ipp);
856 +- exynos_drm_unregister_dma(drm_dev, dev);
857 ++ exynos_drm_unregister_dma(drm_dev, dev, &ctx->dma_priv);
858 + }
859 +
860 + static const struct component_ops fimc_component_ops = {
861 +diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
862 +index 8d0a929104e5..34e6b22173fa 100644
863 +--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
864 ++++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
865 +@@ -167,6 +167,7 @@ static struct fimd_driver_data exynos5420_fimd_driver_data = {
866 + struct fimd_context {
867 + struct device *dev;
868 + struct drm_device *drm_dev;
869 ++ void *dma_priv;
870 + struct exynos_drm_crtc *crtc;
871 + struct exynos_drm_plane planes[WINDOWS_NR];
872 + struct exynos_drm_plane_config configs[WINDOWS_NR];
873 +@@ -1090,7 +1091,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
874 + if (is_drm_iommu_supported(drm_dev))
875 + fimd_clear_channels(ctx->crtc);
876 +
877 +- return exynos_drm_register_dma(drm_dev, dev);
878 ++ return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
879 + }
880 +
881 + static void fimd_unbind(struct device *dev, struct device *master,
882 +@@ -1100,7 +1101,7 @@ static void fimd_unbind(struct device *dev, struct device *master,
883 +
884 + fimd_disable(ctx->crtc);
885 +
886 +- exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
887 ++ exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
888 +
889 + if (ctx->encoder)
890 + exynos_dpi_remove(ctx->encoder);
891 +diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
892 +index 2a3382d43bc9..fcee33a43aca 100644
893 +--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
894 ++++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
895 +@@ -232,6 +232,7 @@ struct g2d_runqueue_node {
896 +
897 + struct g2d_data {
898 + struct device *dev;
899 ++ void *dma_priv;
900 + struct clk *gate_clk;
901 + void __iomem *regs;
902 + int irq;
903 +@@ -1409,7 +1410,7 @@ static int g2d_bind(struct device *dev, struct device *master, void *data)
904 + return ret;
905 + }
906 +
907 +- ret = exynos_drm_register_dma(drm_dev, dev);
908 ++ ret = exynos_drm_register_dma(drm_dev, dev, &g2d->dma_priv);
909 + if (ret < 0) {
910 + dev_err(dev, "failed to enable iommu.\n");
911 + g2d_fini_cmdlist(g2d);
912 +@@ -1434,7 +1435,7 @@ static void g2d_unbind(struct device *dev, struct device *master, void *data)
913 + priv->g2d_dev = NULL;
914 +
915 + cancel_work_sync(&g2d->runqueue_work);
916 +- exynos_drm_unregister_dma(g2d->drm_dev, dev);
917 ++ exynos_drm_unregister_dma(g2d->drm_dev, dev, &g2d->dma_priv);
918 + }
919 +
920 + static const struct component_ops g2d_component_ops = {
921 +diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
922 +index 88b6fcaa20be..45e9aee8366a 100644
923 +--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
924 ++++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
925 +@@ -97,6 +97,7 @@ struct gsc_scaler {
926 + struct gsc_context {
927 + struct exynos_drm_ipp ipp;
928 + struct drm_device *drm_dev;
929 ++ void *dma_priv;
930 + struct device *dev;
931 + struct exynos_drm_ipp_task *task;
932 + struct exynos_drm_ipp_formats *formats;
933 +@@ -1169,7 +1170,7 @@ static int gsc_bind(struct device *dev, struct device *master, void *data)
934 +
935 + ctx->drm_dev = drm_dev;
936 + ctx->drm_dev = drm_dev;
937 +- exynos_drm_register_dma(drm_dev, dev);
938 ++ exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
939 +
940 + exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
941 + DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
942 +@@ -1189,7 +1190,7 @@ static void gsc_unbind(struct device *dev, struct device *master,
943 + struct exynos_drm_ipp *ipp = &ctx->ipp;
944 +
945 + exynos_drm_ipp_unregister(dev, ipp);
946 +- exynos_drm_unregister_dma(drm_dev, dev);
947 ++ exynos_drm_unregister_dma(drm_dev, dev, &ctx->dma_priv);
948 + }
949 +
950 + static const struct component_ops gsc_component_ops = {
951 +diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
952 +index b98482990d1a..dafa87b82052 100644
953 +--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
954 ++++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
955 +@@ -56,6 +56,7 @@ struct rot_variant {
956 + struct rot_context {
957 + struct exynos_drm_ipp ipp;
958 + struct drm_device *drm_dev;
959 ++ void *dma_priv;
960 + struct device *dev;
961 + void __iomem *regs;
962 + struct clk *clock;
963 +@@ -243,7 +244,7 @@ static int rotator_bind(struct device *dev, struct device *master, void *data)
964 +
965 + rot->drm_dev = drm_dev;
966 + ipp->drm_dev = drm_dev;
967 +- exynos_drm_register_dma(drm_dev, dev);
968 ++ exynos_drm_register_dma(drm_dev, dev, &rot->dma_priv);
969 +
970 + exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
971 + DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE,
972 +@@ -261,7 +262,7 @@ static void rotator_unbind(struct device *dev, struct device *master,
973 + struct exynos_drm_ipp *ipp = &rot->ipp;
974 +
975 + exynos_drm_ipp_unregister(dev, ipp);
976 +- exynos_drm_unregister_dma(rot->drm_dev, rot->dev);
977 ++ exynos_drm_unregister_dma(rot->drm_dev, rot->dev, &rot->dma_priv);
978 + }
979 +
980 + static const struct component_ops rotator_component_ops = {
981 +diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
982 +index 497973e9b2c5..93c43c8d914e 100644
983 +--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
984 ++++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
985 +@@ -39,6 +39,7 @@ struct scaler_data {
986 + struct scaler_context {
987 + struct exynos_drm_ipp ipp;
988 + struct drm_device *drm_dev;
989 ++ void *dma_priv;
990 + struct device *dev;
991 + void __iomem *regs;
992 + struct clk *clock[SCALER_MAX_CLK];
993 +@@ -450,7 +451,7 @@ static int scaler_bind(struct device *dev, struct device *master, void *data)
994 +
995 + scaler->drm_dev = drm_dev;
996 + ipp->drm_dev = drm_dev;
997 +- exynos_drm_register_dma(drm_dev, dev);
998 ++ exynos_drm_register_dma(drm_dev, dev, &scaler->dma_priv);
999 +
1000 + exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
1001 + DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
1002 +@@ -470,7 +471,8 @@ static void scaler_unbind(struct device *dev, struct device *master,
1003 + struct exynos_drm_ipp *ipp = &scaler->ipp;
1004 +
1005 + exynos_drm_ipp_unregister(dev, ipp);
1006 +- exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev);
1007 ++ exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev,
1008 ++ &scaler->dma_priv);
1009 + }
1010 +
1011 + static const struct component_ops scaler_component_ops = {
1012 +diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
1013 +index 7b24338fad3c..22f494145411 100644
1014 +--- a/drivers/gpu/drm/exynos/exynos_mixer.c
1015 ++++ b/drivers/gpu/drm/exynos/exynos_mixer.c
1016 +@@ -94,6 +94,7 @@ struct mixer_context {
1017 + struct platform_device *pdev;
1018 + struct device *dev;
1019 + struct drm_device *drm_dev;
1020 ++ void *dma_priv;
1021 + struct exynos_drm_crtc *crtc;
1022 + struct exynos_drm_plane planes[MIXER_WIN_NR];
1023 + unsigned long flags;
1024 +@@ -894,12 +895,14 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
1025 + }
1026 + }
1027 +
1028 +- return exynos_drm_register_dma(drm_dev, mixer_ctx->dev);
1029 ++ return exynos_drm_register_dma(drm_dev, mixer_ctx->dev,
1030 ++ &mixer_ctx->dma_priv);
1031 + }
1032 +
1033 + static void mixer_ctx_remove(struct mixer_context *mixer_ctx)
1034 + {
1035 +- exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev);
1036 ++ exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev,
1037 ++ &mixer_ctx->dma_priv);
1038 + }
1039 +
1040 + static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
1041 +diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
1042 +index 8497c7a95dd4..224f830f77f9 100644
1043 +--- a/drivers/i2c/busses/i2c-hix5hd2.c
1044 ++++ b/drivers/i2c/busses/i2c-hix5hd2.c
1045 +@@ -477,6 +477,7 @@ static int hix5hd2_i2c_remove(struct platform_device *pdev)
1046 + i2c_del_adapter(&priv->adap);
1047 + pm_runtime_disable(priv->dev);
1048 + pm_runtime_set_suspended(priv->dev);
1049 ++ clk_disable_unprepare(priv->clk);
1050 +
1051 + return 0;
1052 + }
1053 +diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c
1054 +index 5a1235fd86bb..32cd62188a3d 100644
1055 +--- a/drivers/i2c/busses/i2c-nvidia-gpu.c
1056 ++++ b/drivers/i2c/busses/i2c-nvidia-gpu.c
1057 +@@ -8,6 +8,7 @@
1058 + #include <linux/delay.h>
1059 + #include <linux/i2c.h>
1060 + #include <linux/interrupt.h>
1061 ++#include <linux/iopoll.h>
1062 + #include <linux/module.h>
1063 + #include <linux/pci.h>
1064 + #include <linux/platform_device.h>
1065 +@@ -75,20 +76,15 @@ static void gpu_enable_i2c_bus(struct gpu_i2c_dev *i2cd)
1066 +
1067 + static int gpu_i2c_check_status(struct gpu_i2c_dev *i2cd)
1068 + {
1069 +- unsigned long target = jiffies + msecs_to_jiffies(1000);
1070 + u32 val;
1071 ++ int ret;
1072 +
1073 +- do {
1074 +- val = readl(i2cd->regs + I2C_MST_CNTL);
1075 +- if (!(val & I2C_MST_CNTL_CYCLE_TRIGGER))
1076 +- break;
1077 +- if ((val & I2C_MST_CNTL_STATUS) !=
1078 +- I2C_MST_CNTL_STATUS_BUS_BUSY)
1079 +- break;
1080 +- usleep_range(500, 600);
1081 +- } while (time_is_after_jiffies(target));
1082 +-
1083 +- if (time_is_before_jiffies(target)) {
1084 ++ ret = readl_poll_timeout(i2cd->regs + I2C_MST_CNTL, val,
1085 ++ !(val & I2C_MST_CNTL_CYCLE_TRIGGER) ||
1086 ++ (val & I2C_MST_CNTL_STATUS) != I2C_MST_CNTL_STATUS_BUS_BUSY,
1087 ++ 500, 1000 * USEC_PER_MSEC);
1088 ++
1089 ++ if (ret) {
1090 + dev_err(i2cd->dev, "i2c timeout error %x\n", val);
1091 + return -ETIMEDOUT;
1092 + }
1093 +diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
1094 +index 2a770b8dca00..10ae6c6eab0a 100644
1095 +--- a/drivers/infiniband/core/device.c
1096 ++++ b/drivers/infiniband/core/device.c
1097 +@@ -899,7 +899,9 @@ static int add_one_compat_dev(struct ib_device *device,
1098 + cdev->dev.parent = device->dev.parent;
1099 + rdma_init_coredev(cdev, device, read_pnet(&rnet->net));
1100 + cdev->dev.release = compatdev_release;
1101 +- dev_set_name(&cdev->dev, "%s", dev_name(&device->dev));
1102 ++ ret = dev_set_name(&cdev->dev, "%s", dev_name(&device->dev));
1103 ++ if (ret)
1104 ++ goto add_err;
1105 +
1106 + ret = device_add(&cdev->dev);
1107 + if (ret)
1108 +diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
1109 +index ad4301ecfa59..ef4b0c7061e4 100644
1110 +--- a/drivers/infiniband/core/nldev.c
1111 ++++ b/drivers/infiniband/core/nldev.c
1112 +@@ -863,6 +863,10 @@ static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1113 +
1114 + nla_strlcpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
1115 + IB_DEVICE_NAME_MAX);
1116 ++ if (strlen(name) == 0) {
1117 ++ err = -EINVAL;
1118 ++ goto done;
1119 ++ }
1120 + err = ib_device_rename(device, name);
1121 + goto done;
1122 + }
1123 +@@ -1468,7 +1472,7 @@ static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
1124 +
1125 + nla_strlcpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
1126 + sizeof(ibdev_name));
1127 +- if (strchr(ibdev_name, '%'))
1128 ++ if (strchr(ibdev_name, '%') || strlen(ibdev_name) == 0)
1129 + return -EINVAL;
1130 +
1131 + nla_strlcpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type));
1132 +diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
1133 +index 2d5608315dc8..75e7ec017836 100644
1134 +--- a/drivers/infiniband/core/security.c
1135 ++++ b/drivers/infiniband/core/security.c
1136 +@@ -349,16 +349,11 @@ static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
1137 + else if (qp_pps)
1138 + new_pps->main.pkey_index = qp_pps->main.pkey_index;
1139 +
1140 +- if ((qp_attr_mask & IB_QP_PKEY_INDEX) && (qp_attr_mask & IB_QP_PORT))
1141 ++ if (((qp_attr_mask & IB_QP_PKEY_INDEX) &&
1142 ++ (qp_attr_mask & IB_QP_PORT)) ||
1143 ++ (qp_pps && qp_pps->main.state != IB_PORT_PKEY_NOT_VALID))
1144 + new_pps->main.state = IB_PORT_PKEY_VALID;
1145 +
1146 +- if (!(qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) && qp_pps) {
1147 +- new_pps->main.port_num = qp_pps->main.port_num;
1148 +- new_pps->main.pkey_index = qp_pps->main.pkey_index;
1149 +- if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
1150 +- new_pps->main.state = IB_PORT_PKEY_VALID;
1151 +- }
1152 +-
1153 + if (qp_attr_mask & IB_QP_ALT_PATH) {
1154 + new_pps->alt.port_num = qp_attr->alt_port_num;
1155 + new_pps->alt.pkey_index = qp_attr->alt_pkey_index;
1156 +diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
1157 +index 1235ffb2389b..da229eab5903 100644
1158 +--- a/drivers/infiniband/core/user_mad.c
1159 ++++ b/drivers/infiniband/core/user_mad.c
1160 +@@ -1129,17 +1129,30 @@ static const struct file_operations umad_sm_fops = {
1161 + .llseek = no_llseek,
1162 + };
1163 +
1164 ++static struct ib_umad_port *get_port(struct ib_device *ibdev,
1165 ++ struct ib_umad_device *umad_dev,
1166 ++ unsigned int port)
1167 ++{
1168 ++ if (!umad_dev)
1169 ++ return ERR_PTR(-EOPNOTSUPP);
1170 ++ if (!rdma_is_port_valid(ibdev, port))
1171 ++ return ERR_PTR(-EINVAL);
1172 ++ if (!rdma_cap_ib_mad(ibdev, port))
1173 ++ return ERR_PTR(-EOPNOTSUPP);
1174 ++
1175 ++ return &umad_dev->ports[port - rdma_start_port(ibdev)];
1176 ++}
1177 ++
1178 + static int ib_umad_get_nl_info(struct ib_device *ibdev, void *client_data,
1179 + struct ib_client_nl_info *res)
1180 + {
1181 +- struct ib_umad_device *umad_dev = client_data;
1182 ++ struct ib_umad_port *port = get_port(ibdev, client_data, res->port);
1183 +
1184 +- if (!rdma_is_port_valid(ibdev, res->port))
1185 +- return -EINVAL;
1186 ++ if (IS_ERR(port))
1187 ++ return PTR_ERR(port);
1188 +
1189 + res->abi = IB_USER_MAD_ABI_VERSION;
1190 +- res->cdev = &umad_dev->ports[res->port - rdma_start_port(ibdev)].dev;
1191 +-
1192 ++ res->cdev = &port->dev;
1193 + return 0;
1194 + }
1195 +
1196 +@@ -1154,15 +1167,13 @@ MODULE_ALIAS_RDMA_CLIENT("umad");
1197 + static int ib_issm_get_nl_info(struct ib_device *ibdev, void *client_data,
1198 + struct ib_client_nl_info *res)
1199 + {
1200 +- struct ib_umad_device *umad_dev =
1201 +- ib_get_client_data(ibdev, &umad_client);
1202 ++ struct ib_umad_port *port = get_port(ibdev, client_data, res->port);
1203 +
1204 +- if (!rdma_is_port_valid(ibdev, res->port))
1205 +- return -EINVAL;
1206 ++ if (IS_ERR(port))
1207 ++ return PTR_ERR(port);
1208 +
1209 + res->abi = IB_USER_MAD_ABI_VERSION;
1210 +- res->cdev = &umad_dev->ports[res->port - rdma_start_port(ibdev)].sm_dev;
1211 +-
1212 ++ res->cdev = &port->sm_dev;
1213 + return 0;
1214 + }
1215 +
1216 +diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
1217 +index 45f48cde6b9d..ff664355de55 100644
1218 +--- a/drivers/infiniband/hw/mlx5/cq.c
1219 ++++ b/drivers/infiniband/hw/mlx5/cq.c
1220 +@@ -330,6 +330,22 @@ static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
1221 + dump_cqe(dev, cqe);
1222 + }
1223 +
1224 ++static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
1225 ++ u16 tail, u16 head)
1226 ++{
1227 ++ u16 idx;
1228 ++
1229 ++ do {
1230 ++ idx = tail & (qp->sq.wqe_cnt - 1);
1231 ++ if (idx == head)
1232 ++ break;
1233 ++
1234 ++ tail = qp->sq.w_list[idx].next;
1235 ++ } while (1);
1236 ++ tail = qp->sq.w_list[idx].next;
1237 ++ qp->sq.last_poll = tail;
1238 ++}
1239 ++
1240 + static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
1241 + {
1242 + mlx5_frag_buf_free(dev->mdev, &buf->frag_buf);
1243 +@@ -368,7 +384,7 @@ static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
1244 + }
1245 +
1246 + static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc,
1247 +- int *npolled, int is_send)
1248 ++ int *npolled, bool is_send)
1249 + {
1250 + struct mlx5_ib_wq *wq;
1251 + unsigned int cur;
1252 +@@ -383,10 +399,16 @@ static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc,
1253 + return;
1254 +
1255 + for (i = 0; i < cur && np < num_entries; i++) {
1256 +- wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
1257 ++ unsigned int idx;
1258 ++
1259 ++ idx = (is_send) ? wq->last_poll : wq->tail;
1260 ++ idx &= (wq->wqe_cnt - 1);
1261 ++ wc->wr_id = wq->wrid[idx];
1262 + wc->status = IB_WC_WR_FLUSH_ERR;
1263 + wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
1264 + wq->tail++;
1265 ++ if (is_send)
1266 ++ wq->last_poll = wq->w_list[idx].next;
1267 + np++;
1268 + wc->qp = &qp->ibqp;
1269 + wc++;
1270 +@@ -476,6 +498,7 @@ repoll:
1271 + wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
1272 + idx = wqe_ctr & (wq->wqe_cnt - 1);
1273 + handle_good_req(wc, cqe64, wq, idx);
1274 ++ handle_atomics(*cur_qp, cqe64, wq->last_poll, idx);
1275 + wc->wr_id = wq->wrid[idx];
1276 + wq->tail = wq->wqe_head[idx] + 1;
1277 + wc->status = IB_WC_SUCCESS;
1278 +diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
1279 +index 9a918db9e8db..0a160fd1383a 100644
1280 +--- a/drivers/infiniband/hw/mlx5/main.c
1281 ++++ b/drivers/infiniband/hw/mlx5/main.c
1282 +@@ -5638,9 +5638,10 @@ mlx5_ib_counter_alloc_stats(struct rdma_counter *counter)
1283 + const struct mlx5_ib_counters *cnts =
1284 + get_counters(dev, counter->port - 1);
1285 +
1286 +- /* Q counters are in the beginning of all counters */
1287 + return rdma_alloc_hw_stats_struct(cnts->names,
1288 +- cnts->num_q_counters,
1289 ++ cnts->num_q_counters +
1290 ++ cnts->num_cong_counters +
1291 ++ cnts->num_ext_ppcnt_counters,
1292 + RDMA_HW_STATS_DEFAULT_LIFESPAN);
1293 + }
1294 +
1295 +diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
1296 +index 1a98ee2e01c4..a9ce46c4c1ae 100644
1297 +--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
1298 ++++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
1299 +@@ -283,6 +283,7 @@ struct mlx5_ib_wq {
1300 + unsigned head;
1301 + unsigned tail;
1302 + u16 cur_post;
1303 ++ u16 last_poll;
1304 + void *cur_edge;
1305 + };
1306 +
1307 +diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
1308 +index 0865373bd12d..881decb1309a 100644
1309 +--- a/drivers/infiniband/hw/mlx5/qp.c
1310 ++++ b/drivers/infiniband/hw/mlx5/qp.c
1311 +@@ -3725,6 +3725,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1312 + qp->sq.cur_post = 0;
1313 + if (qp->sq.wqe_cnt)
1314 + qp->sq.cur_edge = get_sq_edge(&qp->sq, 0);
1315 ++ qp->sq.last_poll = 0;
1316 + qp->db.db[MLX5_RCV_DBR] = 0;
1317 + qp->db.db[MLX5_SND_DBR] = 0;
1318 + }
1319 +@@ -6131,6 +6132,10 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
1320 + if (udata->outlen && udata->outlen < min_resp_len)
1321 + return ERR_PTR(-EINVAL);
1322 +
1323 ++ if (!capable(CAP_SYS_RAWIO) &&
1324 ++ init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP)
1325 ++ return ERR_PTR(-EPERM);
1326 ++
1327 + dev = to_mdev(pd->device);
1328 + switch (init_attr->wq_type) {
1329 + case IB_WQT_RQ:
1330 +diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
1331 +index a85571a4cf57..0fee3c87776b 100644
1332 +--- a/drivers/infiniband/sw/rdmavt/cq.c
1333 ++++ b/drivers/infiniband/sw/rdmavt/cq.c
1334 +@@ -327,7 +327,7 @@ void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1335 + if (cq->ip)
1336 + kref_put(&cq->ip->ref, rvt_release_mmap_info);
1337 + else
1338 +- vfree(cq->queue);
1339 ++ vfree(cq->kqueue);
1340 + }
1341 +
1342 + /**
1343 +diff --git a/drivers/input/input.c b/drivers/input/input.c
1344 +index ee6c3234df36..e2eb9b9b8363 100644
1345 +--- a/drivers/input/input.c
1346 ++++ b/drivers/input/input.c
1347 +@@ -190,6 +190,7 @@ static void input_repeat_key(struct timer_list *t)
1348 + input_value_sync
1349 + };
1350 +
1351 ++ input_set_timestamp(dev, ktime_get());
1352 + input_pass_values(dev, vals, ARRAY_SIZE(vals));
1353 +
1354 + if (dev->rep[REP_PERIOD])
1355 +diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
1356 +index 2c666fb34625..4d2036209b45 100644
1357 +--- a/drivers/input/mouse/synaptics.c
1358 ++++ b/drivers/input/mouse/synaptics.c
1359 +@@ -186,6 +186,7 @@ static const char * const smbus_pnp_ids[] = {
1360 + "SYN3052", /* HP EliteBook 840 G4 */
1361 + "SYN3221", /* HP 15-ay000 */
1362 + "SYN323d", /* HP Spectre X360 13-w013dx */
1363 ++ "SYN3257", /* HP Envy 13-ad105ng */
1364 + NULL
1365 + };
1366 +
1367 +diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c
1368 +index 6ed9f22e6401..fe245439adee 100644
1369 +--- a/drivers/input/touchscreen/raydium_i2c_ts.c
1370 ++++ b/drivers/input/touchscreen/raydium_i2c_ts.c
1371 +@@ -432,7 +432,7 @@ static int raydium_i2c_write_object(struct i2c_client *client,
1372 + return 0;
1373 + }
1374 +
1375 +-static bool raydium_i2c_boot_trigger(struct i2c_client *client)
1376 ++static int raydium_i2c_boot_trigger(struct i2c_client *client)
1377 + {
1378 + static const u8 cmd[7][6] = {
1379 + { 0x08, 0x0C, 0x09, 0x00, 0x50, 0xD7 },
1380 +@@ -457,10 +457,10 @@ static bool raydium_i2c_boot_trigger(struct i2c_client *client)
1381 + }
1382 + }
1383 +
1384 +- return false;
1385 ++ return 0;
1386 + }
1387 +
1388 +-static bool raydium_i2c_fw_trigger(struct i2c_client *client)
1389 ++static int raydium_i2c_fw_trigger(struct i2c_client *client)
1390 + {
1391 + static const u8 cmd[5][11] = {
1392 + { 0, 0x09, 0x71, 0x0C, 0x09, 0x00, 0x50, 0xD7, 0, 0, 0 },
1393 +@@ -483,7 +483,7 @@ static bool raydium_i2c_fw_trigger(struct i2c_client *client)
1394 + }
1395 + }
1396 +
1397 +- return false;
1398 ++ return 0;
1399 + }
1400 +
1401 + static int raydium_i2c_check_path(struct i2c_client *client)
1402 +diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
1403 +index 6a9a1b987520..9e393b9c5091 100644
1404 +--- a/drivers/iommu/dmar.c
1405 ++++ b/drivers/iommu/dmar.c
1406 +@@ -371,7 +371,8 @@ dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
1407 + {
1408 + struct dmar_drhd_unit *dmaru;
1409 +
1410 +- list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list)
1411 ++ list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list,
1412 ++ dmar_rcu_check())
1413 + if (dmaru->segment == drhd->segment &&
1414 + dmaru->reg_base_addr == drhd->address)
1415 + return dmaru;
1416 +diff --git a/drivers/iommu/intel-iommu-debugfs.c b/drivers/iommu/intel-iommu-debugfs.c
1417 +index 471f05d452e0..bdf095e9dbe0 100644
1418 +--- a/drivers/iommu/intel-iommu-debugfs.c
1419 ++++ b/drivers/iommu/intel-iommu-debugfs.c
1420 +@@ -32,38 +32,42 @@ struct iommu_regset {
1421 +
1422 + #define IOMMU_REGSET_ENTRY(_reg_) \
1423 + { DMAR_##_reg_##_REG, __stringify(_reg_) }
1424 +-static const struct iommu_regset iommu_regs[] = {
1425 ++
1426 ++static const struct iommu_regset iommu_regs_32[] = {
1427 + IOMMU_REGSET_ENTRY(VER),
1428 +- IOMMU_REGSET_ENTRY(CAP),
1429 +- IOMMU_REGSET_ENTRY(ECAP),
1430 + IOMMU_REGSET_ENTRY(GCMD),
1431 + IOMMU_REGSET_ENTRY(GSTS),
1432 +- IOMMU_REGSET_ENTRY(RTADDR),
1433 +- IOMMU_REGSET_ENTRY(CCMD),
1434 + IOMMU_REGSET_ENTRY(FSTS),
1435 + IOMMU_REGSET_ENTRY(FECTL),
1436 + IOMMU_REGSET_ENTRY(FEDATA),
1437 + IOMMU_REGSET_ENTRY(FEADDR),
1438 + IOMMU_REGSET_ENTRY(FEUADDR),
1439 +- IOMMU_REGSET_ENTRY(AFLOG),
1440 + IOMMU_REGSET_ENTRY(PMEN),
1441 + IOMMU_REGSET_ENTRY(PLMBASE),
1442 + IOMMU_REGSET_ENTRY(PLMLIMIT),
1443 ++ IOMMU_REGSET_ENTRY(ICS),
1444 ++ IOMMU_REGSET_ENTRY(PRS),
1445 ++ IOMMU_REGSET_ENTRY(PECTL),
1446 ++ IOMMU_REGSET_ENTRY(PEDATA),
1447 ++ IOMMU_REGSET_ENTRY(PEADDR),
1448 ++ IOMMU_REGSET_ENTRY(PEUADDR),
1449 ++};
1450 ++
1451 ++static const struct iommu_regset iommu_regs_64[] = {
1452 ++ IOMMU_REGSET_ENTRY(CAP),
1453 ++ IOMMU_REGSET_ENTRY(ECAP),
1454 ++ IOMMU_REGSET_ENTRY(RTADDR),
1455 ++ IOMMU_REGSET_ENTRY(CCMD),
1456 ++ IOMMU_REGSET_ENTRY(AFLOG),
1457 + IOMMU_REGSET_ENTRY(PHMBASE),
1458 + IOMMU_REGSET_ENTRY(PHMLIMIT),
1459 + IOMMU_REGSET_ENTRY(IQH),
1460 + IOMMU_REGSET_ENTRY(IQT),
1461 + IOMMU_REGSET_ENTRY(IQA),
1462 +- IOMMU_REGSET_ENTRY(ICS),
1463 + IOMMU_REGSET_ENTRY(IRTA),
1464 + IOMMU_REGSET_ENTRY(PQH),
1465 + IOMMU_REGSET_ENTRY(PQT),
1466 + IOMMU_REGSET_ENTRY(PQA),
1467 +- IOMMU_REGSET_ENTRY(PRS),
1468 +- IOMMU_REGSET_ENTRY(PECTL),
1469 +- IOMMU_REGSET_ENTRY(PEDATA),
1470 +- IOMMU_REGSET_ENTRY(PEADDR),
1471 +- IOMMU_REGSET_ENTRY(PEUADDR),
1472 + IOMMU_REGSET_ENTRY(MTRRCAP),
1473 + IOMMU_REGSET_ENTRY(MTRRDEF),
1474 + IOMMU_REGSET_ENTRY(MTRR_FIX64K_00000),
1475 +@@ -126,10 +130,16 @@ static int iommu_regset_show(struct seq_file *m, void *unused)
1476 + * by adding the offset to the pointer (virtual address).
1477 + */
1478 + raw_spin_lock_irqsave(&iommu->register_lock, flag);
1479 +- for (i = 0 ; i < ARRAY_SIZE(iommu_regs); i++) {
1480 +- value = dmar_readq(iommu->reg + iommu_regs[i].offset);
1481 ++ for (i = 0 ; i < ARRAY_SIZE(iommu_regs_32); i++) {
1482 ++ value = dmar_readl(iommu->reg + iommu_regs_32[i].offset);
1483 ++ seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n",
1484 ++ iommu_regs_32[i].regs, iommu_regs_32[i].offset,
1485 ++ value);
1486 ++ }
1487 ++ for (i = 0 ; i < ARRAY_SIZE(iommu_regs_64); i++) {
1488 ++ value = dmar_readq(iommu->reg + iommu_regs_64[i].offset);
1489 + seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n",
1490 +- iommu_regs[i].regs, iommu_regs[i].offset,
1491 ++ iommu_regs_64[i].regs, iommu_regs_64[i].offset,
1492 + value);
1493 + }
1494 + raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1495 +@@ -271,9 +281,16 @@ static int dmar_translation_struct_show(struct seq_file *m, void *unused)
1496 + {
1497 + struct dmar_drhd_unit *drhd;
1498 + struct intel_iommu *iommu;
1499 ++ u32 sts;
1500 +
1501 + rcu_read_lock();
1502 + for_each_active_iommu(iommu, drhd) {
1503 ++ sts = dmar_readl(iommu->reg + DMAR_GSTS_REG);
1504 ++ if (!(sts & DMA_GSTS_TES)) {
1505 ++ seq_printf(m, "DMA Remapping is not enabled on %s\n",
1506 ++ iommu->name);
1507 ++ continue;
1508 ++ }
1509 + root_tbl_walk(m, iommu);
1510 + seq_putc(m, '\n');
1511 + }
1512 +@@ -343,6 +360,7 @@ static int ir_translation_struct_show(struct seq_file *m, void *unused)
1513 + struct dmar_drhd_unit *drhd;
1514 + struct intel_iommu *iommu;
1515 + u64 irta;
1516 ++ u32 sts;
1517 +
1518 + rcu_read_lock();
1519 + for_each_active_iommu(iommu, drhd) {
1520 +@@ -352,7 +370,8 @@ static int ir_translation_struct_show(struct seq_file *m, void *unused)
1521 + seq_printf(m, "Remapped Interrupt supported on IOMMU: %s\n",
1522 + iommu->name);
1523 +
1524 +- if (iommu->ir_table) {
1525 ++ sts = dmar_readl(iommu->reg + DMAR_GSTS_REG);
1526 ++ if (iommu->ir_table && (sts & DMA_GSTS_IRES)) {
1527 + irta = virt_to_phys(iommu->ir_table->base);
1528 + seq_printf(m, " IR table address:%llx\n", irta);
1529 + ir_tbl_remap_entry_show(m, iommu);
1530 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1531 +index 1c2b3e78056f..9d47b227e557 100644
1532 +--- a/drivers/iommu/intel-iommu.c
1533 ++++ b/drivers/iommu/intel-iommu.c
1534 +@@ -4961,6 +4961,9 @@ int __init intel_iommu_init(void)
1535 +
1536 + down_write(&dmar_global_lock);
1537 +
1538 ++ if (!no_iommu)
1539 ++ intel_iommu_debugfs_init();
1540 ++
1541 + if (no_iommu || dmar_disabled) {
1542 + /*
1543 + * We exit the function here to ensure IOMMU's remapping and
1544 +@@ -5056,7 +5059,6 @@ int __init intel_iommu_init(void)
1545 + pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
1546 +
1547 + intel_iommu_enabled = 1;
1548 +- intel_iommu_debugfs_init();
1549 +
1550 + return 0;
1551 +
1552 +diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
1553 +index 039963a7765b..198ddfb8d2b1 100644
1554 +--- a/drivers/media/usb/b2c2/flexcop-usb.c
1555 ++++ b/drivers/media/usb/b2c2/flexcop-usb.c
1556 +@@ -511,6 +511,9 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb)
1557 + return ret;
1558 + }
1559 +
1560 ++ if (fc_usb->uintf->cur_altsetting->desc.bNumEndpoints < 1)
1561 ++ return -ENODEV;
1562 ++
1563 + switch (fc_usb->udev->speed) {
1564 + case USB_SPEED_LOW:
1565 + err("cannot handle USB speed because it is too slow.");
1566 +@@ -544,9 +547,6 @@ static int flexcop_usb_probe(struct usb_interface *intf,
1567 + struct flexcop_device *fc = NULL;
1568 + int ret;
1569 +
1570 +- if (intf->cur_altsetting->desc.bNumEndpoints < 1)
1571 +- return -ENODEV;
1572 +-
1573 + if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_usb))) == NULL) {
1574 + err("out of memory\n");
1575 + return -ENOMEM;
1576 +diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
1577 +index e53c58ab6488..ef62dd6c5ae4 100644
1578 +--- a/drivers/media/usb/dvb-usb/dib0700_core.c
1579 ++++ b/drivers/media/usb/dvb-usb/dib0700_core.c
1580 +@@ -818,7 +818,7 @@ int dib0700_rc_setup(struct dvb_usb_device *d, struct usb_interface *intf)
1581 +
1582 + /* Starting in firmware 1.20, the RC info is provided on a bulk pipe */
1583 +
1584 +- if (intf->altsetting[0].desc.bNumEndpoints < rc_ep + 1)
1585 ++ if (intf->cur_altsetting->desc.bNumEndpoints < rc_ep + 1)
1586 + return -ENODEV;
1587 +
1588 + purb = usb_alloc_urb(0, GFP_KERNEL);
1589 +@@ -838,7 +838,7 @@ int dib0700_rc_setup(struct dvb_usb_device *d, struct usb_interface *intf)
1590 + * Some devices like the Hauppauge NovaTD model 52009 use an interrupt
1591 + * endpoint, while others use a bulk one.
1592 + */
1593 +- e = &intf->altsetting[0].endpoint[rc_ep].desc;
1594 ++ e = &intf->cur_altsetting->endpoint[rc_ep].desc;
1595 + if (usb_endpoint_dir_in(e)) {
1596 + if (usb_endpoint_xfer_bulk(e)) {
1597 + pipe = usb_rcvbulkpipe(d->udev, rc_ep);
1598 +diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c
1599 +index f417dfc0b872..0afe70a3f9a2 100644
1600 +--- a/drivers/media/usb/gspca/ov519.c
1601 ++++ b/drivers/media/usb/gspca/ov519.c
1602 +@@ -3477,6 +3477,11 @@ static void ov511_mode_init_regs(struct sd *sd)
1603 + return;
1604 + }
1605 +
1606 ++ if (alt->desc.bNumEndpoints < 1) {
1607 ++ sd->gspca_dev.usb_err = -ENODEV;
1608 ++ return;
1609 ++ }
1610 ++
1611 + packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
1612 + reg_w(sd, R51x_FIFO_PSIZE, packet_size >> 5);
1613 +
1614 +@@ -3603,6 +3608,11 @@ static void ov518_mode_init_regs(struct sd *sd)
1615 + return;
1616 + }
1617 +
1618 ++ if (alt->desc.bNumEndpoints < 1) {
1619 ++ sd->gspca_dev.usb_err = -ENODEV;
1620 ++ return;
1621 ++ }
1622 ++
1623 + packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
1624 + ov518_reg_w32(sd, R51x_FIFO_PSIZE, packet_size & ~7, 2);
1625 +
1626 +diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx.c b/drivers/media/usb/gspca/stv06xx/stv06xx.c
1627 +index 79653d409951..95673fc0a99c 100644
1628 +--- a/drivers/media/usb/gspca/stv06xx/stv06xx.c
1629 ++++ b/drivers/media/usb/gspca/stv06xx/stv06xx.c
1630 +@@ -282,6 +282,9 @@ static int stv06xx_start(struct gspca_dev *gspca_dev)
1631 + return -EIO;
1632 + }
1633 +
1634 ++ if (alt->desc.bNumEndpoints < 1)
1635 ++ return -ENODEV;
1636 ++
1637 + packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
1638 + err = stv06xx_write_bridge(sd, STV_ISO_SIZE_L, packet_size);
1639 + if (err < 0)
1640 +@@ -306,11 +309,21 @@ out:
1641 +
1642 + static int stv06xx_isoc_init(struct gspca_dev *gspca_dev)
1643 + {
1644 ++ struct usb_interface_cache *intfc;
1645 + struct usb_host_interface *alt;
1646 + struct sd *sd = (struct sd *) gspca_dev;
1647 +
1648 ++ intfc = gspca_dev->dev->actconfig->intf_cache[0];
1649 ++
1650 ++ if (intfc->num_altsetting < 2)
1651 ++ return -ENODEV;
1652 ++
1653 ++ alt = &intfc->altsetting[1];
1654 ++
1655 ++ if (alt->desc.bNumEndpoints < 1)
1656 ++ return -ENODEV;
1657 ++
1658 + /* Start isoc bandwidth "negotiation" at max isoc bandwidth */
1659 +- alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1];
1660 + alt->endpoint[0].desc.wMaxPacketSize =
1661 + cpu_to_le16(sd->sensor->max_packet_size[gspca_dev->curr_mode]);
1662 +
1663 +@@ -323,6 +336,10 @@ static int stv06xx_isoc_nego(struct gspca_dev *gspca_dev)
1664 + struct usb_host_interface *alt;
1665 + struct sd *sd = (struct sd *) gspca_dev;
1666 +
1667 ++ /*
1668 ++ * Existence of altsetting and endpoint was verified in
1669 ++ * stv06xx_isoc_init()
1670 ++ */
1671 + alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1];
1672 + packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
1673 + min_packet_size = sd->sensor->min_packet_size[gspca_dev->curr_mode];
1674 +diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c b/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c
1675 +index 6d1007715ff7..ae382b3b5f7f 100644
1676 +--- a/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c
1677 ++++ b/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c
1678 +@@ -185,6 +185,10 @@ static int pb0100_start(struct sd *sd)
1679 + alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
1680 + if (!alt)
1681 + return -ENODEV;
1682 ++
1683 ++ if (alt->desc.bNumEndpoints < 1)
1684 ++ return -ENODEV;
1685 ++
1686 + packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
1687 +
1688 + /* If we don't have enough bandwidth use a lower framerate */
1689 +diff --git a/drivers/media/usb/gspca/xirlink_cit.c b/drivers/media/usb/gspca/xirlink_cit.c
1690 +index 934a90bd78c2..c579b100f066 100644
1691 +--- a/drivers/media/usb/gspca/xirlink_cit.c
1692 ++++ b/drivers/media/usb/gspca/xirlink_cit.c
1693 +@@ -1442,6 +1442,9 @@ static int cit_get_packet_size(struct gspca_dev *gspca_dev)
1694 + return -EIO;
1695 + }
1696 +
1697 ++ if (alt->desc.bNumEndpoints < 1)
1698 ++ return -ENODEV;
1699 ++
1700 + return le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
1701 + }
1702 +
1703 +@@ -2626,6 +2629,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
1704 +
1705 + static int sd_isoc_init(struct gspca_dev *gspca_dev)
1706 + {
1707 ++ struct usb_interface_cache *intfc;
1708 + struct usb_host_interface *alt;
1709 + int max_packet_size;
1710 +
1711 +@@ -2641,8 +2645,17 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
1712 + break;
1713 + }
1714 +
1715 ++ intfc = gspca_dev->dev->actconfig->intf_cache[0];
1716 ++
1717 ++ if (intfc->num_altsetting < 2)
1718 ++ return -ENODEV;
1719 ++
1720 ++ alt = &intfc->altsetting[1];
1721 ++
1722 ++ if (alt->desc.bNumEndpoints < 1)
1723 ++ return -ENODEV;
1724 ++
1725 + /* Start isoc bandwidth "negotiation" at max isoc bandwidth */
1726 +- alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1];
1727 + alt->endpoint[0].desc.wMaxPacketSize = cpu_to_le16(max_packet_size);
1728 +
1729 + return 0;
1730 +@@ -2665,6 +2678,9 @@ static int sd_isoc_nego(struct gspca_dev *gspca_dev)
1731 + break;
1732 + }
1733 +
1734 ++ /*
1735 ++ * Existence of altsetting and endpoint was verified in sd_isoc_init()
1736 ++ */
1737 + alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1];
1738 + packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
1739 + if (packet_size <= min_packet_size)
1740 +diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c
1741 +index 5095c380b2c1..ee9c656d121f 100644
1742 +--- a/drivers/media/usb/usbtv/usbtv-core.c
1743 ++++ b/drivers/media/usb/usbtv/usbtv-core.c
1744 +@@ -56,7 +56,7 @@ int usbtv_set_regs(struct usbtv *usbtv, const u16 regs[][2], int size)
1745 +
1746 + ret = usb_control_msg(usbtv->udev, pipe, USBTV_REQUEST_REG,
1747 + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
1748 +- value, index, NULL, 0, 0);
1749 ++ value, index, NULL, 0, USB_CTRL_GET_TIMEOUT);
1750 + if (ret < 0)
1751 + return ret;
1752 + }
1753 +diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c
1754 +index 3d9284a09ee5..b249f037900c 100644
1755 +--- a/drivers/media/usb/usbtv/usbtv-video.c
1756 ++++ b/drivers/media/usb/usbtv/usbtv-video.c
1757 +@@ -800,7 +800,8 @@ static int usbtv_s_ctrl(struct v4l2_ctrl *ctrl)
1758 + ret = usb_control_msg(usbtv->udev,
1759 + usb_rcvctrlpipe(usbtv->udev, 0), USBTV_CONTROL_REG,
1760 + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
1761 +- 0, USBTV_BASE + 0x0244, (void *)data, 3, 0);
1762 ++ 0, USBTV_BASE + 0x0244, (void *)data, 3,
1763 ++ USB_CTRL_GET_TIMEOUT);
1764 + if (ret < 0)
1765 + goto error;
1766 + }
1767 +@@ -851,7 +852,7 @@ static int usbtv_s_ctrl(struct v4l2_ctrl *ctrl)
1768 + ret = usb_control_msg(usbtv->udev, usb_sndctrlpipe(usbtv->udev, 0),
1769 + USBTV_CONTROL_REG,
1770 + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
1771 +- 0, index, (void *)data, size, 0);
1772 ++ 0, index, (void *)data, size, USB_CTRL_SET_TIMEOUT);
1773 +
1774 + error:
1775 + if (ret < 0)
1776 +diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
1777 +index 63d6b147b21e..41da73ce2e98 100644
1778 +--- a/drivers/media/v4l2-core/v4l2-device.c
1779 ++++ b/drivers/media/v4l2-core/v4l2-device.c
1780 +@@ -179,6 +179,7 @@ static void v4l2_subdev_release(struct v4l2_subdev *sd)
1781 +
1782 + if (sd->internal_ops && sd->internal_ops->release)
1783 + sd->internal_ops->release(sd);
1784 ++ sd->devnode = NULL;
1785 + module_put(owner);
1786 + }
1787 +
1788 +diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
1789 +index abf8f5eb0a1c..26644b7ec13e 100644
1790 +--- a/drivers/mmc/core/core.c
1791 ++++ b/drivers/mmc/core/core.c
1792 +@@ -1732,8 +1732,11 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1793 + * the erase operation does not exceed the max_busy_timeout, we should
1794 + * use R1B response. Or we need to prevent the host from doing hw busy
1795 + * detection, which is done by converting to a R1 response instead.
1796 ++ * Note, some hosts requires R1B, which also means they are on their own
1797 ++ * when it comes to deal with the busy timeout.
1798 + */
1799 +- if (card->host->max_busy_timeout &&
1800 ++ if (!(card->host->caps & MMC_CAP_NEED_RSP_BUSY) &&
1801 ++ card->host->max_busy_timeout &&
1802 + busy_timeout > card->host->max_busy_timeout) {
1803 + cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1804 + } else {
1805 +diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
1806 +index c8804895595f..b7159e243323 100644
1807 +--- a/drivers/mmc/core/mmc.c
1808 ++++ b/drivers/mmc/core/mmc.c
1809 +@@ -1911,9 +1911,12 @@ static int mmc_sleep(struct mmc_host *host)
1810 + * If the max_busy_timeout of the host is specified, validate it against
1811 + * the sleep cmd timeout. A failure means we need to prevent the host
1812 + * from doing hw busy detection, which is done by converting to a R1
1813 +- * response instead of a R1B.
1814 ++ * response instead of a R1B. Note, some hosts requires R1B, which also
1815 ++ * means they are on their own when it comes to deal with the busy
1816 ++ * timeout.
1817 + */
1818 +- if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) {
1819 ++ if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
1820 ++ (timeout_ms > host->max_busy_timeout)) {
1821 + cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1822 + } else {
1823 + cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
1824 +diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
1825 +index 09113b9ad679..18a7afb2a5b2 100644
1826 +--- a/drivers/mmc/core/mmc_ops.c
1827 ++++ b/drivers/mmc/core/mmc_ops.c
1828 +@@ -538,10 +538,12 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
1829 + * If the cmd timeout and the max_busy_timeout of the host are both
1830 + * specified, let's validate them. A failure means we need to prevent
1831 + * the host from doing hw busy detection, which is done by converting
1832 +- * to a R1 response instead of a R1B.
1833 ++ * to a R1 response instead of a R1B. Note, some hosts requires R1B,
1834 ++ * which also means they are on their own when it comes to deal with the
1835 ++ * busy timeout.
1836 + */
1837 +- if (timeout_ms && host->max_busy_timeout &&
1838 +- (timeout_ms > host->max_busy_timeout))
1839 ++ if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && timeout_ms &&
1840 ++ host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
1841 + use_r1b_resp = false;
1842 +
1843 + cmd.opcode = MMC_SWITCH;
1844 +diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
1845 +index 083e7e053c95..d3135249b2e4 100644
1846 +--- a/drivers/mmc/host/sdhci-omap.c
1847 ++++ b/drivers/mmc/host/sdhci-omap.c
1848 +@@ -1134,6 +1134,9 @@ static int sdhci_omap_probe(struct platform_device *pdev)
1849 + host->mmc_host_ops.execute_tuning = sdhci_omap_execute_tuning;
1850 + host->mmc_host_ops.enable_sdio_irq = sdhci_omap_enable_sdio_irq;
1851 +
1852 ++ /* R1B responses is required to properly manage HW busy detection. */
1853 ++ mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
1854 ++
1855 + ret = sdhci_setup_host(host);
1856 + if (ret)
1857 + goto err_put_sync;
1858 +diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
1859 +index 403ac44a7378..a25c3a4d3f6c 100644
1860 +--- a/drivers/mmc/host/sdhci-tegra.c
1861 ++++ b/drivers/mmc/host/sdhci-tegra.c
1862 +@@ -1552,6 +1552,9 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
1863 + if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
1864 + host->mmc->caps |= MMC_CAP_1_8V_DDR;
1865 +
1866 ++ /* R1B responses is required to properly manage HW busy detection. */
1867 ++ host->mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
1868 ++
1869 + tegra_sdhci_parse_dt(host);
1870 +
1871 + tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
1872 +diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
1873 +index df1c7989e13d..df3cd2589bcf 100644
1874 +--- a/drivers/net/Kconfig
1875 ++++ b/drivers/net/Kconfig
1876 +@@ -106,6 +106,7 @@ config NET_FC
1877 + config IFB
1878 + tristate "Intermediate Functional Block support"
1879 + depends on NET_CLS_ACT
1880 ++ select NET_REDIRECT
1881 + ---help---
1882 + This is an intermediate driver that allows sharing of
1883 + resources.
1884 +diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
1885 +index 2f5c287eac95..a3664281a33f 100644
1886 +--- a/drivers/net/can/slcan.c
1887 ++++ b/drivers/net/can/slcan.c
1888 +@@ -625,7 +625,10 @@ err_free_chan:
1889 + tty->disc_data = NULL;
1890 + clear_bit(SLF_INUSE, &sl->flags);
1891 + slc_free_netdev(sl->dev);
1892 ++ /* do not call free_netdev before rtnl_unlock */
1893 ++ rtnl_unlock();
1894 + free_netdev(sl->dev);
1895 ++ return err;
1896 +
1897 + err_exit:
1898 + rtnl_unlock();
1899 +diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
1900 +index 1d8d36de4d20..e0e932f0aed1 100644
1901 +--- a/drivers/net/dsa/mt7530.c
1902 ++++ b/drivers/net/dsa/mt7530.c
1903 +@@ -566,7 +566,7 @@ mt7530_mib_reset(struct dsa_switch *ds)
1904 + static void
1905 + mt7530_port_set_status(struct mt7530_priv *priv, int port, int enable)
1906 + {
1907 +- u32 mask = PMCR_TX_EN | PMCR_RX_EN;
1908 ++ u32 mask = PMCR_TX_EN | PMCR_RX_EN | PMCR_FORCE_LNK;
1909 +
1910 + if (enable)
1911 + mt7530_set(priv, MT7530_PMCR_P(port), mask);
1912 +@@ -1439,7 +1439,7 @@ static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port,
1913 + mcr_new &= ~(PMCR_FORCE_SPEED_1000 | PMCR_FORCE_SPEED_100 |
1914 + PMCR_FORCE_FDX | PMCR_TX_FC_EN | PMCR_RX_FC_EN);
1915 + mcr_new |= PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | PMCR_BACKOFF_EN |
1916 +- PMCR_BACKPR_EN | PMCR_FORCE_MODE | PMCR_FORCE_LNK;
1917 ++ PMCR_BACKPR_EN | PMCR_FORCE_MODE;
1918 +
1919 + /* Are we connected to external phy */
1920 + if (port == 5 && dsa_is_user_port(ds, 5))
1921 +diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
1922 +index f0cddf250cfd..26325f7b3c1f 100644
1923 +--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
1924 ++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
1925 +@@ -3652,13 +3652,15 @@ err_disable_device:
1926 +
1927 + /*****************************************************************************/
1928 +
1929 +-/* ena_remove - Device Removal Routine
1930 ++/* __ena_shutoff - Helper used in both PCI remove/shutdown routines
1931 + * @pdev: PCI device information struct
1932 ++ * @shutdown: Is it a shutdown operation? If false, means it is a removal
1933 + *
1934 +- * ena_remove is called by the PCI subsystem to alert the driver
1935 +- * that it should release a PCI device.
1936 ++ * __ena_shutoff is a helper routine that does the real work on shutdown and
1937 ++ * removal paths; the difference between those paths is with regards to whether
1938 ++ * dettach or unregister the netdevice.
1939 + */
1940 +-static void ena_remove(struct pci_dev *pdev)
1941 ++static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
1942 + {
1943 + struct ena_adapter *adapter = pci_get_drvdata(pdev);
1944 + struct ena_com_dev *ena_dev;
1945 +@@ -3677,13 +3679,17 @@ static void ena_remove(struct pci_dev *pdev)
1946 +
1947 + cancel_work_sync(&adapter->reset_task);
1948 +
1949 +- rtnl_lock();
1950 ++ rtnl_lock(); /* lock released inside the below if-else block */
1951 + ena_destroy_device(adapter, true);
1952 +- rtnl_unlock();
1953 +-
1954 +- unregister_netdev(netdev);
1955 +-
1956 +- free_netdev(netdev);
1957 ++ if (shutdown) {
1958 ++ netif_device_detach(netdev);
1959 ++ dev_close(netdev);
1960 ++ rtnl_unlock();
1961 ++ } else {
1962 ++ rtnl_unlock();
1963 ++ unregister_netdev(netdev);
1964 ++ free_netdev(netdev);
1965 ++ }
1966 +
1967 + ena_com_rss_destroy(ena_dev);
1968 +
1969 +@@ -3698,6 +3704,30 @@ static void ena_remove(struct pci_dev *pdev)
1970 + vfree(ena_dev);
1971 + }
1972 +
1973 ++/* ena_remove - Device Removal Routine
1974 ++ * @pdev: PCI device information struct
1975 ++ *
1976 ++ * ena_remove is called by the PCI subsystem to alert the driver
1977 ++ * that it should release a PCI device.
1978 ++ */
1979 ++
1980 ++static void ena_remove(struct pci_dev *pdev)
1981 ++{
1982 ++ __ena_shutoff(pdev, false);
1983 ++}
1984 ++
1985 ++/* ena_shutdown - Device Shutdown Routine
1986 ++ * @pdev: PCI device information struct
1987 ++ *
1988 ++ * ena_shutdown is called by the PCI subsystem to alert the driver that
1989 ++ * a shutdown/reboot (or kexec) is happening and device must be disabled.
1990 ++ */
1991 ++
1992 ++static void ena_shutdown(struct pci_dev *pdev)
1993 ++{
1994 ++ __ena_shutoff(pdev, true);
1995 ++}
1996 ++
1997 + #ifdef CONFIG_PM
1998 + /* ena_suspend - PM suspend callback
1999 + * @pdev: PCI device information struct
2000 +@@ -3747,6 +3777,7 @@ static struct pci_driver ena_pci_driver = {
2001 + .id_table = ena_pci_tbl,
2002 + .probe = ena_probe,
2003 + .remove = ena_remove,
2004 ++ .shutdown = ena_shutdown,
2005 + #ifdef CONFIG_PM
2006 + .suspend = ena_suspend,
2007 + .resume = ena_resume,
2008 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2009 +index 57c88e157f86..6862594b49ab 100644
2010 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2011 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2012 +@@ -6863,12 +6863,12 @@ skip_rdma:
2013 + }
2014 + ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
2015 + rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
2016 +- if (rc)
2017 ++ if (rc) {
2018 + netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
2019 + rc);
2020 +- else
2021 +- ctx->flags |= BNXT_CTX_FLAG_INITED;
2022 +-
2023 ++ return rc;
2024 ++ }
2025 ++ ctx->flags |= BNXT_CTX_FLAG_INITED;
2026 + return 0;
2027 + }
2028 +
2029 +@@ -7387,14 +7387,22 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
2030 + pri2cos = &resp2->pri0_cos_queue_id;
2031 + for (i = 0; i < 8; i++) {
2032 + u8 queue_id = pri2cos[i];
2033 ++ u8 queue_idx;
2034 +
2035 ++ /* Per port queue IDs start from 0, 10, 20, etc */
2036 ++ queue_idx = queue_id % 10;
2037 ++ if (queue_idx > BNXT_MAX_QUEUE) {
2038 ++ bp->pri2cos_valid = false;
2039 ++ goto qstats_done;
2040 ++ }
2041 + for (j = 0; j < bp->max_q; j++) {
2042 + if (bp->q_ids[j] == queue_id)
2043 +- bp->pri2cos[i] = j;
2044 ++ bp->pri2cos_idx[i] = queue_idx;
2045 + }
2046 + }
2047 + bp->pri2cos_valid = 1;
2048 + }
2049 ++qstats_done:
2050 + mutex_unlock(&bp->hwrm_cmd_lock);
2051 + return rc;
2052 + }
2053 +@@ -11595,6 +11603,10 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
2054 + bp->rx_nr_rings++;
2055 + bp->cp_nr_rings++;
2056 + }
2057 ++ if (rc) {
2058 ++ bp->tx_nr_rings = 0;
2059 ++ bp->rx_nr_rings = 0;
2060 ++ }
2061 + return rc;
2062 + }
2063 +
2064 +@@ -11887,12 +11899,12 @@ init_err_cleanup:
2065 + init_err_pci_clean:
2066 + bnxt_free_hwrm_short_cmd_req(bp);
2067 + bnxt_free_hwrm_resources(bp);
2068 +- bnxt_free_ctx_mem(bp);
2069 +- kfree(bp->ctx);
2070 +- bp->ctx = NULL;
2071 + kfree(bp->fw_health);
2072 + bp->fw_health = NULL;
2073 + bnxt_cleanup_pci(bp);
2074 ++ bnxt_free_ctx_mem(bp);
2075 ++ kfree(bp->ctx);
2076 ++ bp->ctx = NULL;
2077 +
2078 + init_err_free:
2079 + free_netdev(dev);
2080 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
2081 +index 2e6ad53fdc75..cda7ba31095a 100644
2082 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
2083 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
2084 +@@ -1688,7 +1688,7 @@ struct bnxt {
2085 + u16 fw_rx_stats_ext_size;
2086 + u16 fw_tx_stats_ext_size;
2087 + u16 hw_ring_stats_size;
2088 +- u8 pri2cos[8];
2089 ++ u8 pri2cos_idx[8];
2090 + u8 pri2cos_valid;
2091 +
2092 + u16 hwrm_max_req_len;
2093 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
2094 +index fb6f30d0d1d0..b1511bcffb1b 100644
2095 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
2096 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
2097 +@@ -479,24 +479,26 @@ static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
2098 + {
2099 + struct bnxt *bp = netdev_priv(dev);
2100 + struct ieee_ets *my_ets = bp->ieee_ets;
2101 ++ int rc;
2102 +
2103 + ets->ets_cap = bp->max_tc;
2104 +
2105 + if (!my_ets) {
2106 +- int rc;
2107 +-
2108 + if (bp->dcbx_cap & DCB_CAP_DCBX_HOST)
2109 + return 0;
2110 +
2111 + my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL);
2112 + if (!my_ets)
2113 +- return 0;
2114 ++ return -ENOMEM;
2115 + rc = bnxt_hwrm_queue_cos2bw_qcfg(bp, my_ets);
2116 + if (rc)
2117 +- return 0;
2118 ++ goto error;
2119 + rc = bnxt_hwrm_queue_pri2cos_qcfg(bp, my_ets);
2120 + if (rc)
2121 +- return 0;
2122 ++ goto error;
2123 ++
2124 ++ /* cache result */
2125 ++ bp->ieee_ets = my_ets;
2126 + }
2127 +
2128 + ets->cbs = my_ets->cbs;
2129 +@@ -505,6 +507,9 @@ static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
2130 + memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
2131 + memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
2132 + return 0;
2133 ++error:
2134 ++ kfree(my_ets);
2135 ++ return rc;
2136 + }
2137 +
2138 + static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
2139 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
2140 +index cfa647d5b44d..fb1ab58da9fa 100644
2141 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
2142 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
2143 +@@ -589,25 +589,25 @@ skip_ring_stats:
2144 + if (bp->pri2cos_valid) {
2145 + for (i = 0; i < 8; i++, j++) {
2146 + long n = bnxt_rx_bytes_pri_arr[i].base_off +
2147 +- bp->pri2cos[i];
2148 ++ bp->pri2cos_idx[i];
2149 +
2150 + buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
2151 + }
2152 + for (i = 0; i < 8; i++, j++) {
2153 + long n = bnxt_rx_pkts_pri_arr[i].base_off +
2154 +- bp->pri2cos[i];
2155 ++ bp->pri2cos_idx[i];
2156 +
2157 + buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
2158 + }
2159 + for (i = 0; i < 8; i++, j++) {
2160 + long n = bnxt_tx_bytes_pri_arr[i].base_off +
2161 +- bp->pri2cos[i];
2162 ++ bp->pri2cos_idx[i];
2163 +
2164 + buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
2165 + }
2166 + for (i = 0; i < 8; i++, j++) {
2167 + long n = bnxt_tx_pkts_pri_arr[i].base_off +
2168 +- bp->pri2cos[i];
2169 ++ bp->pri2cos_idx[i];
2170 +
2171 + buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
2172 + }
2173 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
2174 +index 928bfea5457b..3a45ac8f0e01 100644
2175 +--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
2176 ++++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
2177 +@@ -1324,8 +1324,9 @@ static inline void t6_fill_tnl_lso(struct sk_buff *skb,
2178 + int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
2179 + int maxreclaim)
2180 + {
2181 ++ unsigned int reclaimed, hw_cidx;
2182 + struct sge_txq *q = &eq->q;
2183 +- unsigned int reclaimed;
2184 ++ int hw_in_use;
2185 +
2186 + if (!q->in_use || !__netif_tx_trylock(eq->txq))
2187 + return 0;
2188 +@@ -1333,12 +1334,17 @@ int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
2189 + /* Reclaim pending completed TX Descriptors. */
2190 + reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true);
2191 +
2192 ++ hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
2193 ++ hw_in_use = q->pidx - hw_cidx;
2194 ++ if (hw_in_use < 0)
2195 ++ hw_in_use += q->size;
2196 ++
2197 + /* If the TX Queue is currently stopped and there's now more than half
2198 + * the queue available, restart it. Otherwise bail out since the rest
2199 + * of what we want do here is with the possibility of shipping any
2200 + * currently buffered Coalesced TX Work Request.
2201 + */
2202 +- if (netif_tx_queue_stopped(eq->txq) && txq_avail(q) > (q->size / 2)) {
2203 ++ if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) {
2204 + netif_tx_wake_queue(eq->txq);
2205 + eq->q.restarts++;
2206 + }
2207 +@@ -1469,16 +1475,7 @@ out_free: dev_kfree_skb_any(skb);
2208 + * has opened up.
2209 + */
2210 + eth_txq_stop(q);
2211 +-
2212 +- /* If we're using the SGE Doorbell Queue Timer facility, we
2213 +- * don't need to ask the Firmware to send us Egress Queue CIDX
2214 +- * Updates: the Hardware will do this automatically. And
2215 +- * since we send the Ingress Queue CIDX Updates to the
2216 +- * corresponding Ethernet Response Queue, we'll get them very
2217 +- * quickly.
2218 +- */
2219 +- if (!q->dbqt)
2220 +- wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
2221 ++ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
2222 + }
2223 +
2224 + wr = (void *)&q->q.desc[q->q.pidx];
2225 +@@ -1792,16 +1789,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
2226 + * has opened up.
2227 + */
2228 + eth_txq_stop(txq);
2229 +-
2230 +- /* If we're using the SGE Doorbell Queue Timer facility, we
2231 +- * don't need to ask the Firmware to send us Egress Queue CIDX
2232 +- * Updates: the Hardware will do this automatically. And
2233 +- * since we send the Ingress Queue CIDX Updates to the
2234 +- * corresponding Ethernet Response Queue, we'll get them very
2235 +- * quickly.
2236 +- */
2237 +- if (!txq->dbqt)
2238 +- wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
2239 ++ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
2240 + }
2241 +
2242 + /* Start filling in our Work Request. Note that we do _not_ handle
2243 +@@ -2924,26 +2912,6 @@ static void t4_tx_completion_handler(struct sge_rspq *rspq,
2244 + }
2245 +
2246 + txq = &s->ethtxq[pi->first_qset + rspq->idx];
2247 +-
2248 +- /* We've got the Hardware Consumer Index Update in the Egress Update
2249 +- * message. If we're using the SGE Doorbell Queue Timer mechanism,
2250 +- * these Egress Update messages will be our sole CIDX Updates we get
2251 +- * since we don't want to chew up PCIe bandwidth for both Ingress
2252 +- * Messages and Status Page writes. However, The code which manages
2253 +- * reclaiming successfully DMA'ed TX Work Requests uses the CIDX value
2254 +- * stored in the Status Page at the end of the TX Queue. It's easiest
2255 +- * to simply copy the CIDX Update value from the Egress Update message
2256 +- * to the Status Page. Also note that no Endian issues need to be
2257 +- * considered here since both are Big Endian and we're just copying
2258 +- * bytes consistently ...
2259 +- */
2260 +- if (txq->dbqt) {
2261 +- struct cpl_sge_egr_update *egr;
2262 +-
2263 +- egr = (struct cpl_sge_egr_update *)rsp;
2264 +- WRITE_ONCE(txq->q.stat->cidx, egr->cidx);
2265 +- }
2266 +-
2267 + t4_sge_eth_txq_egress_update(adapter, txq, -1);
2268 + }
2269 +
2270 +diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
2271 +index e130233b5085..00c4beb760c3 100644
2272 +--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
2273 ++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
2274 +@@ -2770,9 +2770,7 @@ static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
2275 + headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE +
2276 + DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE);
2277 +
2278 +- return DPAA_FD_DATA_ALIGNMENT ? ALIGN(headroom,
2279 +- DPAA_FD_DATA_ALIGNMENT) :
2280 +- headroom;
2281 ++ return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT);
2282 + }
2283 +
2284 + static int dpaa_eth_probe(struct platform_device *pdev)
2285 +diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig
2286 +index 0139cb9042ec..34150182cc35 100644
2287 +--- a/drivers/net/ethernet/freescale/fman/Kconfig
2288 ++++ b/drivers/net/ethernet/freescale/fman/Kconfig
2289 +@@ -8,3 +8,31 @@ config FSL_FMAN
2290 + help
2291 + Freescale Data-Path Acceleration Architecture Frame Manager
2292 + (FMan) support
2293 ++
2294 ++config DPAA_ERRATUM_A050385
2295 ++ bool
2296 ++ depends on ARM64 && FSL_DPAA
2297 ++ default y
2298 ++ help
2299 ++ DPAA FMan erratum A050385 software workaround implementation:
2300 ++ align buffers, data start, SG fragment length to avoid FMan DMA
2301 ++ splits.
2302 ++ FMAN DMA read or writes under heavy traffic load may cause FMAN
2303 ++ internal resource leak thus stopping further packet processing.
2304 ++ The FMAN internal queue can overflow when FMAN splits single
2305 ++ read or write transactions into multiple smaller transactions
2306 ++ such that more than 17 AXI transactions are in flight from FMAN
2307 ++ to interconnect. When the FMAN internal queue overflows, it can
2308 ++ stall further packet processing. The issue can occur with any
2309 ++ one of the following three conditions:
2310 ++ 1. FMAN AXI transaction crosses 4K address boundary (Errata
2311 ++ A010022)
2312 ++ 2. FMAN DMA address for an AXI transaction is not 16 byte
2313 ++ aligned, i.e. the last 4 bits of an address are non-zero
2314 ++ 3. Scatter Gather (SG) frames have more than one SG buffer in
2315 ++ the SG list and any one of the buffers, except the last
2316 ++ buffer in the SG list has data size that is not a multiple
2317 ++ of 16 bytes, i.e., other than 16, 32, 48, 64, etc.
2318 ++ With any one of the above three conditions present, there is
2319 ++ likelihood of stalled FMAN packet processing, especially under
2320 ++ stress with multiple ports injecting line-rate traffic.
2321 +diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
2322 +index 210749bf1eac..4c2fa13a7dd7 100644
2323 +--- a/drivers/net/ethernet/freescale/fman/fman.c
2324 ++++ b/drivers/net/ethernet/freescale/fman/fman.c
2325 +@@ -1,5 +1,6 @@
2326 + /*
2327 + * Copyright 2008-2015 Freescale Semiconductor Inc.
2328 ++ * Copyright 2020 NXP
2329 + *
2330 + * Redistribution and use in source and binary forms, with or without
2331 + * modification, are permitted provided that the following conditions are met:
2332 +@@ -566,6 +567,10 @@ struct fman_cfg {
2333 + u32 qmi_def_tnums_thresh;
2334 + };
2335 +
2336 ++#ifdef CONFIG_DPAA_ERRATUM_A050385
2337 ++static bool fman_has_err_a050385;
2338 ++#endif
2339 ++
2340 + static irqreturn_t fman_exceptions(struct fman *fman,
2341 + enum fman_exceptions exception)
2342 + {
2343 +@@ -2514,6 +2519,14 @@ struct fman *fman_bind(struct device *fm_dev)
2344 + }
2345 + EXPORT_SYMBOL(fman_bind);
2346 +
2347 ++#ifdef CONFIG_DPAA_ERRATUM_A050385
2348 ++bool fman_has_errata_a050385(void)
2349 ++{
2350 ++ return fman_has_err_a050385;
2351 ++}
2352 ++EXPORT_SYMBOL(fman_has_errata_a050385);
2353 ++#endif
2354 ++
2355 + static irqreturn_t fman_err_irq(int irq, void *handle)
2356 + {
2357 + struct fman *fman = (struct fman *)handle;
2358 +@@ -2841,6 +2854,11 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
2359 + goto fman_free;
2360 + }
2361 +
2362 ++#ifdef CONFIG_DPAA_ERRATUM_A050385
2363 ++ fman_has_err_a050385 =
2364 ++ of_property_read_bool(fm_node, "fsl,erratum-a050385");
2365 ++#endif
2366 ++
2367 + return fman;
2368 +
2369 + fman_node_put:
2370 +diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h
2371 +index 935c317fa696..f2ede1360f03 100644
2372 +--- a/drivers/net/ethernet/freescale/fman/fman.h
2373 ++++ b/drivers/net/ethernet/freescale/fman/fman.h
2374 +@@ -1,5 +1,6 @@
2375 + /*
2376 + * Copyright 2008-2015 Freescale Semiconductor Inc.
2377 ++ * Copyright 2020 NXP
2378 + *
2379 + * Redistribution and use in source and binary forms, with or without
2380 + * modification, are permitted provided that the following conditions are met:
2381 +@@ -398,6 +399,10 @@ u16 fman_get_max_frm(void);
2382 +
2383 + int fman_get_rx_extra_headroom(void);
2384 +
2385 ++#ifdef CONFIG_DPAA_ERRATUM_A050385
2386 ++bool fman_has_errata_a050385(void);
2387 ++#endif
2388 ++
2389 + struct fman *fman_bind(struct device *dev);
2390 +
2391 + #endif /* __FM_H */
2392 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
2393 +index 0c8d2269bc46..403e0f089f2a 100644
2394 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
2395 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
2396 +@@ -1596,7 +1596,7 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
2397 + netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc);
2398 +
2399 + return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
2400 +- kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
2401 ++ kinfo->dcb_ops->setup_tc(h, tc ? tc : 1, prio_tc) : -EOPNOTSUPP;
2402 + }
2403 +
2404 + static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
2405 +diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
2406 +index 6b1a81df1465..a10ae28ebc8a 100644
2407 +--- a/drivers/net/ethernet/marvell/mvneta.c
2408 ++++ b/drivers/net/ethernet/marvell/mvneta.c
2409 +@@ -2804,11 +2804,10 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
2410 + /* For the case where the last mvneta_poll did not process all
2411 + * RX packets
2412 + */
2413 +- rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
2414 +-
2415 + cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
2416 + port->cause_rx_tx;
2417 +
2418 ++ rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
2419 + if (rx_queue) {
2420 + rx_queue = rx_queue - 1;
2421 + if (pp->bm_priv)
2422 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
2423 +index 9c8427698238..55ceabf077b2 100644
2424 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
2425 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
2426 +@@ -371,6 +371,7 @@ enum {
2427 +
2428 + struct mlx5e_sq_wqe_info {
2429 + u8 opcode;
2430 ++ u8 num_wqebbs;
2431 +
2432 + /* Auxiliary data for different opcodes. */
2433 + union {
2434 +@@ -1058,6 +1059,7 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
2435 + void mlx5e_activate_rq(struct mlx5e_rq *rq);
2436 + void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
2437 + void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
2438 ++void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
2439 + void mlx5e_activate_icosq(struct mlx5e_icosq *icosq);
2440 + void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq);
2441 +
2442 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
2443 +index d3693fa547ac..e54f70d9af22 100644
2444 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
2445 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
2446 +@@ -10,8 +10,7 @@
2447 +
2448 + static inline bool cqe_syndrome_needs_recover(u8 syndrome)
2449 + {
2450 +- return syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR ||
2451 +- syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR ||
2452 ++ return syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR ||
2453 + syndrome == MLX5_CQE_SYNDROME_LOCAL_PROT_ERR ||
2454 + syndrome == MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
2455 + }
2456 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
2457 +index b860569d4247..9fa4b98001d5 100644
2458 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
2459 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
2460 +@@ -90,7 +90,7 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
2461 + goto out;
2462 +
2463 + mlx5e_reset_icosq_cc_pc(icosq);
2464 +- mlx5e_free_rx_descs(rq);
2465 ++ mlx5e_free_rx_in_progress_descs(rq);
2466 + clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
2467 + mlx5e_activate_icosq(icosq);
2468 + mlx5e_activate_rq(rq);
2469 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
2470 +index a226277b0980..f07b1399744e 100644
2471 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
2472 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
2473 +@@ -181,10 +181,12 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
2474 +
2475 + static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
2476 + {
2477 +- if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
2478 ++ if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
2479 + mlx5_wq_ll_reset(&rq->mpwqe.wq);
2480 +- else
2481 ++ rq->mpwqe.actual_wq_head = 0;
2482 ++ } else {
2483 + mlx5_wq_cyc_reset(&rq->wqe.wq);
2484 ++ }
2485 + }
2486 +
2487 + /* SW parser related functions */
2488 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2489 +index e5e91cbcbc31..ee7c753e9ea0 100644
2490 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2491 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2492 +@@ -824,6 +824,29 @@ int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
2493 + return -ETIMEDOUT;
2494 + }
2495 +
2496 ++void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq)
2497 ++{
2498 ++ struct mlx5_wq_ll *wq;
2499 ++ u16 head;
2500 ++ int i;
2501 ++
2502 ++ if (rq->wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
2503 ++ return;
2504 ++
2505 ++ wq = &rq->mpwqe.wq;
2506 ++ head = wq->head;
2507 ++
2508 ++ /* Outstanding UMR WQEs (in progress) start at wq->head */
2509 ++ for (i = 0; i < rq->mpwqe.umr_in_progress; i++) {
2510 ++ rq->dealloc_wqe(rq, head);
2511 ++ head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
2512 ++ }
2513 ++
2514 ++ rq->mpwqe.actual_wq_head = wq->head;
2515 ++ rq->mpwqe.umr_in_progress = 0;
2516 ++ rq->mpwqe.umr_completed = 0;
2517 ++}
2518 ++
2519 + void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
2520 + {
2521 + __be16 wqe_ix_be;
2522 +@@ -831,14 +854,8 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
2523 +
2524 + if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
2525 + struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
2526 +- u16 head = wq->head;
2527 +- int i;
2528 +
2529 +- /* Outstanding UMR WQEs (in progress) start at wq->head */
2530 +- for (i = 0; i < rq->mpwqe.umr_in_progress; i++) {
2531 +- rq->dealloc_wqe(rq, head);
2532 +- head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
2533 +- }
2534 ++ mlx5e_free_rx_in_progress_descs(rq);
2535 +
2536 + while (!mlx5_wq_ll_is_empty(wq)) {
2537 + struct mlx5e_rx_wqe_ll *wqe;
2538 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2539 +index 82cffb3a9964..1d295a7afc8c 100644
2540 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2541 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2542 +@@ -477,6 +477,7 @@ static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
2543 + /* fill sq frag edge with nops to avoid wqe wrapping two pages */
2544 + for (; wi < edge_wi; wi++) {
2545 + wi->opcode = MLX5_OPCODE_NOP;
2546 ++ wi->num_wqebbs = 1;
2547 + mlx5e_post_nop(wq, sq->sqn, &sq->pc);
2548 + }
2549 + }
2550 +@@ -525,6 +526,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
2551 + umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset);
2552 +
2553 + sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
2554 ++ sq->db.ico_wqe[pi].num_wqebbs = MLX5E_UMR_WQEBBS;
2555 + sq->db.ico_wqe[pi].umr.rq = rq;
2556 + sq->pc += MLX5E_UMR_WQEBBS;
2557 +
2558 +@@ -628,17 +630,14 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
2559 +
2560 + ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
2561 + wi = &sq->db.ico_wqe[ci];
2562 ++ sqcc += wi->num_wqebbs;
2563 +
2564 +- if (likely(wi->opcode == MLX5_OPCODE_UMR)) {
2565 +- sqcc += MLX5E_UMR_WQEBBS;
2566 ++ if (likely(wi->opcode == MLX5_OPCODE_UMR))
2567 + wi->umr.rq->mpwqe.umr_completed++;
2568 +- } else if (likely(wi->opcode == MLX5_OPCODE_NOP)) {
2569 +- sqcc++;
2570 +- } else {
2571 ++ else if (unlikely(wi->opcode != MLX5_OPCODE_NOP))
2572 + netdev_WARN_ONCE(cq->channel->netdev,
2573 + "Bad OPCODE in ICOSQ WQE info: 0x%x\n",
2574 + wi->opcode);
2575 +- }
2576 +
2577 + } while (!last_wqe);
2578 +
2579 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
2580 +index 257a7c9f7a14..800d34ed8a96 100644
2581 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
2582 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
2583 +@@ -78,6 +78,7 @@ void mlx5e_trigger_irq(struct mlx5e_icosq *sq)
2584 + u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
2585 +
2586 + sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
2587 ++ sq->db.ico_wqe[pi].num_wqebbs = 1;
2588 + nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
2589 + mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
2590 + }
2591 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
2592 +index 004c56c2fc0c..b2dfa2b5366f 100644
2593 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
2594 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
2595 +@@ -930,7 +930,6 @@ static int dr_actions_l2_rewrite(struct mlx5dr_domain *dmn,
2596 +
2597 + action->rewrite.data = (void *)ops;
2598 + action->rewrite.num_of_actions = i;
2599 +- action->rewrite.chunk->byte_size = i * sizeof(*ops);
2600 +
2601 + ret = mlx5dr_send_postsend_action(dmn, action);
2602 + if (ret) {
2603 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
2604 +index c7f10d4f8f8d..095ec7b1399d 100644
2605 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
2606 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
2607 +@@ -558,7 +558,8 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
2608 + int ret;
2609 +
2610 + send_info.write.addr = (uintptr_t)action->rewrite.data;
2611 +- send_info.write.length = action->rewrite.chunk->byte_size;
2612 ++ send_info.write.length = action->rewrite.num_of_actions *
2613 ++ DR_MODIFY_ACTION_SIZE;
2614 + send_info.write.lkey = 0;
2615 + send_info.remote_addr = action->rewrite.chunk->mr_addr;
2616 + send_info.rkey = action->rewrite.chunk->rkey;
2617 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
2618 +index 615455a21567..f3d1f9411d10 100644
2619 +--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
2620 ++++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
2621 +@@ -1318,36 +1318,64 @@ static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
2622 + mbox->mapaddr);
2623 + }
2624 +
2625 +-static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
2626 +- const struct pci_device_id *id)
2627 ++static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
2628 ++ const struct pci_device_id *id,
2629 ++ u32 *p_sys_status)
2630 + {
2631 + unsigned long end;
2632 +- char mrsr_pl[MLXSW_REG_MRSR_LEN];
2633 +- int err;
2634 ++ u32 val;
2635 +
2636 +- mlxsw_reg_mrsr_pack(mrsr_pl);
2637 +- err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
2638 +- if (err)
2639 +- return err;
2640 + if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) {
2641 + msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
2642 + return 0;
2643 + }
2644 +
2645 +- /* We must wait for the HW to become responsive once again. */
2646 ++ /* We must wait for the HW to become responsive. */
2647 + msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
2648 +
2649 + end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
2650 + do {
2651 +- u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
2652 +-
2653 ++ val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
2654 + if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
2655 + return 0;
2656 + cond_resched();
2657 + } while (time_before(jiffies, end));
2658 ++
2659 ++ *p_sys_status = val & MLXSW_PCI_FW_READY_MASK;
2660 ++
2661 + return -EBUSY;
2662 + }
2663 +
2664 ++static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
2665 ++ const struct pci_device_id *id)
2666 ++{
2667 ++ struct pci_dev *pdev = mlxsw_pci->pdev;
2668 ++ char mrsr_pl[MLXSW_REG_MRSR_LEN];
2669 ++ u32 sys_status;
2670 ++ int err;
2671 ++
2672 ++ err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
2673 ++ if (err) {
2674 ++ dev_err(&pdev->dev, "Failed to reach system ready status before reset. Status is 0x%x\n",
2675 ++ sys_status);
2676 ++ return err;
2677 ++ }
2678 ++
2679 ++ mlxsw_reg_mrsr_pack(mrsr_pl);
2680 ++ err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
2681 ++ if (err)
2682 ++ return err;
2683 ++
2684 ++ err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
2685 ++ if (err) {
2686 ++ dev_err(&pdev->dev, "Failed to reach system ready status after reset. Status is 0x%x\n",
2687 ++ sys_status);
2688 ++ return err;
2689 ++ }
2690 ++
2691 ++ return 0;
2692 ++}
2693 ++
2694 + static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
2695 + {
2696 + int err;
2697 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
2698 +index 54275624718b..336e5ecc68f8 100644
2699 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
2700 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
2701 +@@ -637,12 +637,12 @@ static int mlxsw_sp_mr_vif_resolve(struct mlxsw_sp_mr_table *mr_table,
2702 + return 0;
2703 +
2704 + err_erif_unresolve:
2705 +- list_for_each_entry_from_reverse(erve, &mr_vif->route_evif_list,
2706 +- vif_node)
2707 ++ list_for_each_entry_continue_reverse(erve, &mr_vif->route_evif_list,
2708 ++ vif_node)
2709 + mlxsw_sp_mr_route_evif_unresolve(mr_table, erve);
2710 + err_irif_unresolve:
2711 +- list_for_each_entry_from_reverse(irve, &mr_vif->route_ivif_list,
2712 +- vif_node)
2713 ++ list_for_each_entry_continue_reverse(irve, &mr_vif->route_ivif_list,
2714 ++ vif_node)
2715 + mlxsw_sp_mr_route_ivif_unresolve(mr_table, irve);
2716 + mr_vif->rif = NULL;
2717 + return err;
2718 +diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
2719 +index a2cef6a004e7..5ebfc3e66331 100644
2720 +--- a/drivers/net/ethernet/realtek/r8169_main.c
2721 ++++ b/drivers/net/ethernet/realtek/r8169_main.c
2722 +@@ -6812,7 +6812,7 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
2723 + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
2724 + rtl_lock_config_regs(tp);
2725 + /* fall through */
2726 +- case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_24:
2727 ++ case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17:
2728 + flags = PCI_IRQ_LEGACY;
2729 + break;
2730 + default:
2731 +@@ -6903,6 +6903,13 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
2732 + if (!tp->phydev) {
2733 + mdiobus_unregister(new_bus);
2734 + return -ENODEV;
2735 ++ } else if (!tp->phydev->drv) {
2736 ++ /* Most chip versions fail with the genphy driver.
2737 ++ * Therefore ensure that the dedicated PHY driver is loaded.
2738 ++ */
2739 ++ dev_err(&pdev->dev, "realtek.ko not loaded, maybe it needs to be added to initramfs?\n");
2740 ++ mdiobus_unregister(new_bus);
2741 ++ return -EUNATCH;
2742 + }
2743 +
2744 + /* PHY will be woken up in rtl_open() */
2745 +@@ -7064,15 +7071,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2746 + int chipset, region;
2747 + int jumbo_max, rc;
2748 +
2749 +- /* Some tools for creating an initramfs don't consider softdeps, then
2750 +- * r8169.ko may be in initramfs, but realtek.ko not. Then the generic
2751 +- * PHY driver is used that doesn't work with most chip versions.
2752 +- */
2753 +- if (!driver_find("RTL8201CP Ethernet", &mdio_bus_type)) {
2754 +- dev_err(&pdev->dev, "realtek.ko not loaded, maybe it needs to be added to initramfs?\n");
2755 +- return -ENOENT;
2756 +- }
2757 +-
2758 + dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp));
2759 + if (!dev)
2760 + return -ENOMEM;
2761 +diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
2762 +index c56fcbb37066..38767d797914 100644
2763 +--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
2764 ++++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
2765 +@@ -2279,7 +2279,7 @@ static int __init sxgbe_cmdline_opt(char *str)
2766 + if (!str || !*str)
2767 + return -EINVAL;
2768 + while ((opt = strsep(&str, ",")) != NULL) {
2769 +- if (!strncmp(opt, "eee_timer:", 6)) {
2770 ++ if (!strncmp(opt, "eee_timer:", 10)) {
2771 + if (kstrtoint(opt + 10, 0, &eee_timer))
2772 + goto err;
2773 + }
2774 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
2775 +index e2e469c37a4d..9f9aaa47a8dc 100644
2776 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
2777 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
2778 +@@ -1411,7 +1411,7 @@ static int rk_gmac_probe(struct platform_device *pdev)
2779 +
2780 + ret = rk_gmac_clk_init(plat_dat);
2781 + if (ret)
2782 +- return ret;
2783 ++ goto err_remove_config_dt;
2784 +
2785 + ret = rk_gmac_powerup(plat_dat->bsp_priv);
2786 + if (ret)
2787 +diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
2788 +index 5c6b7fc04ea6..730ab57201bd 100644
2789 +--- a/drivers/net/geneve.c
2790 ++++ b/drivers/net/geneve.c
2791 +@@ -1845,8 +1845,6 @@ static void geneve_destroy_tunnels(struct net *net, struct list_head *head)
2792 + if (!net_eq(dev_net(geneve->dev), net))
2793 + unregister_netdevice_queue(geneve->dev, head);
2794 + }
2795 +-
2796 +- WARN_ON_ONCE(!list_empty(&gn->sock_list));
2797 + }
2798 +
2799 + static void __net_exit geneve_exit_batch_net(struct list_head *net_list)
2800 +@@ -1861,6 +1859,12 @@ static void __net_exit geneve_exit_batch_net(struct list_head *net_list)
2801 + /* unregister the devices gathered above */
2802 + unregister_netdevice_many(&list);
2803 + rtnl_unlock();
2804 ++
2805 ++ list_for_each_entry(net, net_list, exit_list) {
2806 ++ const struct geneve_net *gn = net_generic(net, geneve_net_id);
2807 ++
2808 ++ WARN_ON_ONCE(!list_empty(&gn->sock_list));
2809 ++ }
2810 + }
2811 +
2812 + static struct pernet_operations geneve_net_ops = {
2813 +diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
2814 +index 242b9b0943f8..7fe306e76281 100644
2815 +--- a/drivers/net/ifb.c
2816 ++++ b/drivers/net/ifb.c
2817 +@@ -75,7 +75,7 @@ static void ifb_ri_tasklet(unsigned long _txp)
2818 + }
2819 +
2820 + while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
2821 +- skb->tc_redirected = 0;
2822 ++ skb->redirected = 0;
2823 + skb->tc_skip_classify = 1;
2824 +
2825 + u64_stats_update_begin(&txp->tsync);
2826 +@@ -96,7 +96,7 @@ static void ifb_ri_tasklet(unsigned long _txp)
2827 + rcu_read_unlock();
2828 + skb->skb_iif = txp->dev->ifindex;
2829 +
2830 +- if (!skb->tc_from_ingress) {
2831 ++ if (!skb->from_ingress) {
2832 + dev_queue_xmit(skb);
2833 + } else {
2834 + skb_pull_rcsum(skb, skb->mac_len);
2835 +@@ -243,7 +243,7 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
2836 + txp->rx_bytes += skb->len;
2837 + u64_stats_update_end(&txp->rsync);
2838 +
2839 +- if (!skb->tc_redirected || !skb->skb_iif) {
2840 ++ if (!skb->redirected || !skb->skb_iif) {
2841 + dev_kfree_skb(skb);
2842 + dev->stats.rx_dropped++;
2843 + return NETDEV_TX_OK;
2844 +diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
2845 +index 6497a5c45220..32c627702ac5 100644
2846 +--- a/drivers/net/macsec.c
2847 ++++ b/drivers/net/macsec.c
2848 +@@ -16,6 +16,7 @@
2849 + #include <net/genetlink.h>
2850 + #include <net/sock.h>
2851 + #include <net/gro_cells.h>
2852 ++#include <linux/if_arp.h>
2853 +
2854 + #include <uapi/linux/if_macsec.h>
2855 +
2856 +@@ -3236,6 +3237,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
2857 + real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
2858 + if (!real_dev)
2859 + return -ENODEV;
2860 ++ if (real_dev->type != ARPHRD_ETHER)
2861 ++ return -EINVAL;
2862 +
2863 + dev->priv_flags |= IFF_MACSEC;
2864 +
2865 +diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
2866 +index 52e80434e45e..31a559513362 100644
2867 +--- a/drivers/net/phy/dp83867.c
2868 ++++ b/drivers/net/phy/dp83867.c
2869 +@@ -25,7 +25,8 @@
2870 + #define DP83867_CFG3 0x1e
2871 +
2872 + /* Extended Registers */
2873 +-#define DP83867_CFG4 0x0031
2874 ++#define DP83867_FLD_THR_CFG 0x002e
2875 ++#define DP83867_CFG4 0x0031
2876 + #define DP83867_CFG4_SGMII_ANEG_MASK (BIT(5) | BIT(6))
2877 + #define DP83867_CFG4_SGMII_ANEG_TIMER_11MS (3 << 5)
2878 + #define DP83867_CFG4_SGMII_ANEG_TIMER_800US (2 << 5)
2879 +@@ -74,6 +75,7 @@
2880 + #define DP83867_STRAP_STS2_CLK_SKEW_RX_MASK GENMASK(2, 0)
2881 + #define DP83867_STRAP_STS2_CLK_SKEW_RX_SHIFT 0
2882 + #define DP83867_STRAP_STS2_CLK_SKEW_NONE BIT(2)
2883 ++#define DP83867_STRAP_STS2_STRAP_FLD BIT(10)
2884 +
2885 + /* PHY CTRL bits */
2886 + #define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14
2887 +@@ -103,6 +105,9 @@
2888 + /* CFG4 bits */
2889 + #define DP83867_CFG4_PORT_MIRROR_EN BIT(0)
2890 +
2891 ++/* FLD_THR_CFG */
2892 ++#define DP83867_FLD_THR_CFG_ENERGY_LOST_THR_MASK 0x7
2893 ++
2894 + enum {
2895 + DP83867_PORT_MIRROING_KEEP,
2896 + DP83867_PORT_MIRROING_EN,
2897 +@@ -318,6 +323,20 @@ static int dp83867_config_init(struct phy_device *phydev)
2898 + phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
2899 + BIT(7));
2900 +
2901 ++ bs = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_STRAP_STS2);
2902 ++ if (bs & DP83867_STRAP_STS2_STRAP_FLD) {
2903 ++ /* When using strap to enable FLD, the ENERGY_LOST_FLD_THR will
2904 ++ * be set to 0x2. This may causes the PHY link to be unstable -
2905 ++ * the default value 0x1 need to be restored.
2906 ++ */
2907 ++ ret = phy_modify_mmd(phydev, DP83867_DEVADDR,
2908 ++ DP83867_FLD_THR_CFG,
2909 ++ DP83867_FLD_THR_CFG_ENERGY_LOST_THR_MASK,
2910 ++ 0x1);
2911 ++ if (ret)
2912 ++ return ret;
2913 ++ }
2914 ++
2915 + if (phy_interface_is_rgmii(phydev)) {
2916 + val = phy_read(phydev, MII_DP83867_PHYCTRL);
2917 + if (val < 0)
2918 +diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c
2919 +index 4a28fb29adaa..fbd36891ee64 100644
2920 +--- a/drivers/net/phy/mdio-bcm-unimac.c
2921 ++++ b/drivers/net/phy/mdio-bcm-unimac.c
2922 +@@ -242,11 +242,9 @@ static int unimac_mdio_probe(struct platform_device *pdev)
2923 + return -ENOMEM;
2924 + }
2925 +
2926 +- priv->clk = devm_clk_get(&pdev->dev, NULL);
2927 +- if (PTR_ERR(priv->clk) == -EPROBE_DEFER)
2928 ++ priv->clk = devm_clk_get_optional(&pdev->dev, NULL);
2929 ++ if (IS_ERR(priv->clk))
2930 + return PTR_ERR(priv->clk);
2931 +- else
2932 +- priv->clk = NULL;
2933 +
2934 + ret = clk_prepare_enable(priv->clk);
2935 + if (ret)
2936 +diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c
2937 +index 88d409e48c1f..aad6809ebe39 100644
2938 +--- a/drivers/net/phy/mdio-mux-bcm-iproc.c
2939 ++++ b/drivers/net/phy/mdio-mux-bcm-iproc.c
2940 +@@ -288,8 +288,13 @@ static int mdio_mux_iproc_suspend(struct device *dev)
2941 + static int mdio_mux_iproc_resume(struct device *dev)
2942 + {
2943 + struct iproc_mdiomux_desc *md = dev_get_drvdata(dev);
2944 ++ int rc;
2945 +
2946 +- clk_prepare_enable(md->core_clk);
2947 ++ rc = clk_prepare_enable(md->core_clk);
2948 ++ if (rc) {
2949 ++ dev_err(md->dev, "failed to enable core clk\n");
2950 ++ return rc;
2951 ++ }
2952 + mdio_mux_iproc_config(md);
2953 +
2954 + return 0;
2955 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
2956 +index 5754bb6ca0ee..6c738a271257 100644
2957 +--- a/drivers/net/usb/qmi_wwan.c
2958 ++++ b/drivers/net/usb/qmi_wwan.c
2959 +@@ -1210,6 +1210,7 @@ static const struct usb_device_id products[] = {
2960 + {QMI_FIXED_INTF(0x1435, 0xd182, 5)}, /* Wistron NeWeb D18 */
2961 + {QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */
2962 + {QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */
2963 ++ {QMI_FIXED_INTF(0x1690, 0x7588, 4)}, /* ASKEY WWHC050 */
2964 + {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
2965 + {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
2966 + {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
2967 +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
2968 +index 283dfeb406ad..93690f77ec9c 100644
2969 +--- a/drivers/net/vxlan.c
2970 ++++ b/drivers/net/vxlan.c
2971 +@@ -2779,10 +2779,19 @@ static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan,
2972 + /* Setup stats when device is created */
2973 + static int vxlan_init(struct net_device *dev)
2974 + {
2975 ++ struct vxlan_dev *vxlan = netdev_priv(dev);
2976 ++ int err;
2977 ++
2978 + dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2979 + if (!dev->tstats)
2980 + return -ENOMEM;
2981 +
2982 ++ err = gro_cells_init(&vxlan->gro_cells, dev);
2983 ++ if (err) {
2984 ++ free_percpu(dev->tstats);
2985 ++ return err;
2986 ++ }
2987 ++
2988 + return 0;
2989 + }
2990 +
2991 +@@ -3043,8 +3052,6 @@ static void vxlan_setup(struct net_device *dev)
2992 +
2993 + vxlan->dev = dev;
2994 +
2995 +- gro_cells_init(&vxlan->gro_cells, dev);
2996 +-
2997 + for (h = 0; h < FDB_HASH_SIZE; ++h) {
2998 + spin_lock_init(&vxlan->hash_lock[h]);
2999 + INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
3000 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
3001 +index c59cbb8cbdd7..c54fe6650018 100644
3002 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
3003 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
3004 +@@ -1181,7 +1181,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
3005 +
3006 + static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
3007 + {
3008 +- return -ENOENT;
3009 ++ return 0;
3010 + }
3011 + #endif /* CONFIG_ACPI */
3012 +
3013 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
3014 +index 917729807514..e17f70b4d199 100644
3015 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
3016 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
3017 +@@ -561,6 +561,7 @@ static inline void clear_pci_tx_desc_content(__le32 *__pdesc, int _size)
3018 + rxmcs == DESC92C_RATE11M)
3019 +
3020 + struct phy_status_rpt {
3021 ++ u8 padding[2];
3022 + u8 ch_corr[2];
3023 + u8 cck_sig_qual_ofdm_pwdb_all;
3024 + u8 cck_agc_rpt_ofdm_cfosho_a;
3025 +diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c
3026 +index 0cc9ac856fe2..ed2123129e0e 100644
3027 +--- a/drivers/nfc/fdp/fdp.c
3028 ++++ b/drivers/nfc/fdp/fdp.c
3029 +@@ -184,7 +184,7 @@ static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type)
3030 + const struct firmware *fw;
3031 + struct sk_buff *skb;
3032 + unsigned long len;
3033 +- u8 max_size, payload_size;
3034 ++ int max_size, payload_size;
3035 + int rc = 0;
3036 +
3037 + if ((type == NCI_PATCH_TYPE_OTP && !info->otp_patch) ||
3038 +@@ -207,8 +207,7 @@ static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type)
3039 +
3040 + while (len) {
3041 +
3042 +- payload_size = min_t(unsigned long, (unsigned long) max_size,
3043 +- len);
3044 ++ payload_size = min_t(unsigned long, max_size, len);
3045 +
3046 + skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + payload_size),
3047 + GFP_KERNEL);
3048 +diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
3049 +index bd6129db6417..c34a6df712ad 100644
3050 +--- a/drivers/of/of_mdio.c
3051 ++++ b/drivers/of/of_mdio.c
3052 +@@ -268,6 +268,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
3053 + rc = of_mdiobus_register_phy(mdio, child, addr);
3054 + if (rc && rc != -ENODEV)
3055 + goto unregister;
3056 ++ break;
3057 + }
3058 + }
3059 + }
3060 +diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
3061 +index b727d1e34523..fe70e9875bde 100644
3062 +--- a/drivers/s390/net/qeth_core_main.c
3063 ++++ b/drivers/s390/net/qeth_core_main.c
3064 +@@ -1244,7 +1244,6 @@ static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
3065 + if (count == 1)
3066 + dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
3067 +
3068 +- card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE;
3069 + card->qdio.no_out_queues = count;
3070 + return 0;
3071 + }
3072 +@@ -2634,12 +2633,12 @@ static int qeth_init_input_buffer(struct qeth_card *card,
3073 + buf->rx_skb = netdev_alloc_skb(card->dev,
3074 + QETH_RX_PULL_LEN + ETH_HLEN);
3075 + if (!buf->rx_skb)
3076 +- return 1;
3077 ++ return -ENOMEM;
3078 + }
3079 +
3080 + pool_entry = qeth_find_free_buffer_pool_entry(card);
3081 + if (!pool_entry)
3082 +- return 1;
3083 ++ return -ENOBUFS;
3084 +
3085 + /*
3086 + * since the buffer is accessed only from the input_tasklet
3087 +@@ -2671,10 +2670,15 @@ int qeth_init_qdio_queues(struct qeth_card *card)
3088 + /* inbound queue */
3089 + qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
3090 + memset(&card->rx, 0, sizeof(struct qeth_rx));
3091 ++
3092 + qeth_initialize_working_pool_list(card);
3093 + /*give only as many buffers to hardware as we have buffer pool entries*/
3094 +- for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
3095 +- qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
3096 ++ for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; i++) {
3097 ++ rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
3098 ++ if (rc)
3099 ++ return rc;
3100 ++ }
3101 ++
3102 + card->qdio.in_q->next_buf_to_init =
3103 + card->qdio.in_buf_pool.buf_count - 1;
3104 + rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
3105 +diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
3106 +index 079c04bc448a..7a57b61f0340 100644
3107 +--- a/drivers/scsi/ipr.c
3108 ++++ b/drivers/scsi/ipr.c
3109 +@@ -9947,6 +9947,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
3110 + ioa_cfg->max_devs_supported = ipr_max_devs;
3111 +
3112 + if (ioa_cfg->sis64) {
3113 ++ host->max_channel = IPR_MAX_SIS64_BUSES;
3114 + host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
3115 + host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
3116 + if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
3117 +@@ -9955,6 +9956,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
3118 + + ((sizeof(struct ipr_config_table_entry64)
3119 + * ioa_cfg->max_devs_supported)));
3120 + } else {
3121 ++ host->max_channel = IPR_VSET_BUS;
3122 + host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
3123 + host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
3124 + if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
3125 +@@ -9964,7 +9966,6 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
3126 + * ioa_cfg->max_devs_supported)));
3127 + }
3128 +
3129 +- host->max_channel = IPR_VSET_BUS;
3130 + host->unique_id = host->host_no;
3131 + host->max_cmd_len = IPR_MAX_CDB_LEN;
3132 + host->can_queue = ioa_cfg->max_cmds;
3133 +diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
3134 +index a67baeb36d1f..b97aa9ac2ffe 100644
3135 +--- a/drivers/scsi/ipr.h
3136 ++++ b/drivers/scsi/ipr.h
3137 +@@ -1300,6 +1300,7 @@ struct ipr_resource_entry {
3138 + #define IPR_ARRAY_VIRTUAL_BUS 0x1
3139 + #define IPR_VSET_VIRTUAL_BUS 0x2
3140 + #define IPR_IOAFP_VIRTUAL_BUS 0x3
3141 ++#define IPR_MAX_SIS64_BUSES 0x4
3142 +
3143 + #define IPR_GET_RES_PHYS_LOC(res) \
3144 + (((res)->bus << 24) | ((res)->target << 8) | (res)->lun)
3145 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
3146 +index ac2e88ec1190..6a2f8bacface 100644
3147 +--- a/drivers/scsi/sd.c
3148 ++++ b/drivers/scsi/sd.c
3149 +@@ -3171,9 +3171,11 @@ static int sd_revalidate_disk(struct gendisk *disk)
3150 + if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
3151 + q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
3152 + rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
3153 +- } else
3154 ++ } else {
3155 ++ q->limits.io_opt = 0;
3156 + rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
3157 + (sector_t)BLK_DEF_MAX_SECTORS);
3158 ++ }
3159 +
3160 + /* Do not exceed controller limit */
3161 + rw_max = min(rw_max, queue_max_hw_sectors(q));
3162 +diff --git a/drivers/staging/kpc2000/kpc2000/core.c b/drivers/staging/kpc2000/kpc2000/core.c
3163 +index 0a23727d0dc3..871441658f0e 100644
3164 +--- a/drivers/staging/kpc2000/kpc2000/core.c
3165 ++++ b/drivers/staging/kpc2000/kpc2000/core.c
3166 +@@ -110,10 +110,10 @@ static ssize_t cpld_reconfigure(struct device *dev,
3167 + const char *buf, size_t count)
3168 + {
3169 + struct kp2000_device *pcard = dev_get_drvdata(dev);
3170 +- long wr_val;
3171 ++ unsigned long wr_val;
3172 + int rv;
3173 +
3174 +- rv = kstrtol(buf, 0, &wr_val);
3175 ++ rv = kstrtoul(buf, 0, &wr_val);
3176 + if (rv < 0)
3177 + return rv;
3178 + if (wr_val > 7)
3179 +diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
3180 +index 845c8817281c..f7f09c0d273f 100644
3181 +--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
3182 ++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
3183 +@@ -32,6 +32,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = {
3184 + /****** 8188EUS ********/
3185 + {USB_DEVICE(0x056e, 0x4008)}, /* Elecom WDC-150SU2M */
3186 + {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */
3187 ++ {USB_DEVICE(0x0B05, 0x18F0)}, /* ASUS USB-N10 Nano B1 */
3188 + {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
3189 + {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
3190 + {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
3191 +diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
3192 +index 28d372a0663a..e29c14e0ed49 100644
3193 +--- a/drivers/staging/wlan-ng/hfa384x_usb.c
3194 ++++ b/drivers/staging/wlan-ng/hfa384x_usb.c
3195 +@@ -3374,6 +3374,8 @@ static void hfa384x_int_rxmonitor(struct wlandevice *wlandev,
3196 + WLAN_HDR_A4_LEN + WLAN_DATA_MAXLEN + WLAN_CRC_LEN)) {
3197 + pr_debug("overlen frm: len=%zd\n",
3198 + skblen - sizeof(struct p80211_caphdr));
3199 ++
3200 ++ return;
3201 + }
3202 +
3203 + skb = dev_alloc_skb(skblen);
3204 +diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
3205 +index b5ba176004c1..d8d86761b790 100644
3206 +--- a/drivers/staging/wlan-ng/prism2usb.c
3207 ++++ b/drivers/staging/wlan-ng/prism2usb.c
3208 +@@ -180,6 +180,7 @@ static void prism2sta_disconnect_usb(struct usb_interface *interface)
3209 +
3210 + cancel_work_sync(&hw->link_bh);
3211 + cancel_work_sync(&hw->commsqual_bh);
3212 ++ cancel_work_sync(&hw->usb_work);
3213 +
3214 + /* Now we complete any outstanding commands
3215 + * and tell everyone who is waiting for their
3216 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
3217 +index 47f09a6ce7bd..84d6f7df09a4 100644
3218 +--- a/drivers/usb/class/cdc-acm.c
3219 ++++ b/drivers/usb/class/cdc-acm.c
3220 +@@ -923,16 +923,16 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
3221 +
3222 + mutex_lock(&acm->port.mutex);
3223 +
3224 +- if ((ss->close_delay != old_close_delay) ||
3225 +- (ss->closing_wait != old_closing_wait)) {
3226 +- if (!capable(CAP_SYS_ADMIN))
3227 ++ if (!capable(CAP_SYS_ADMIN)) {
3228 ++ if ((ss->close_delay != old_close_delay) ||
3229 ++ (ss->closing_wait != old_closing_wait))
3230 + retval = -EPERM;
3231 +- else {
3232 +- acm->port.close_delay = close_delay;
3233 +- acm->port.closing_wait = closing_wait;
3234 +- }
3235 +- } else
3236 +- retval = -EOPNOTSUPP;
3237 ++ else
3238 ++ retval = -EOPNOTSUPP;
3239 ++ } else {
3240 ++ acm->port.close_delay = close_delay;
3241 ++ acm->port.closing_wait = closing_wait;
3242 ++ }
3243 +
3244 + mutex_unlock(&acm->port.mutex);
3245 + return retval;
3246 +diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
3247 +index 5a44b70372d9..fa9922c0c910 100644
3248 +--- a/drivers/usb/musb/musb_host.c
3249 ++++ b/drivers/usb/musb/musb_host.c
3250 +@@ -1462,10 +1462,7 @@ done:
3251 + * We need to map sg if the transfer_buffer is
3252 + * NULL.
3253 + */
3254 +- if (!urb->transfer_buffer)
3255 +- qh->use_sg = true;
3256 +-
3257 +- if (qh->use_sg) {
3258 ++ if (!urb->transfer_buffer) {
3259 + /* sg_miter_start is already done in musb_ep_program */
3260 + if (!sg_miter_next(&qh->sg_miter)) {
3261 + dev_err(musb->controller, "error: sg list empty\n");
3262 +@@ -1473,9 +1470,8 @@ done:
3263 + status = -EINVAL;
3264 + goto done;
3265 + }
3266 +- urb->transfer_buffer = qh->sg_miter.addr;
3267 + length = min_t(u32, length, qh->sg_miter.length);
3268 +- musb_write_fifo(hw_ep, length, urb->transfer_buffer);
3269 ++ musb_write_fifo(hw_ep, length, qh->sg_miter.addr);
3270 + qh->sg_miter.consumed = length;
3271 + sg_miter_stop(&qh->sg_miter);
3272 + } else {
3273 +@@ -1484,11 +1480,6 @@ done:
3274 +
3275 + qh->segsize = length;
3276 +
3277 +- if (qh->use_sg) {
3278 +- if (offset + length >= urb->transfer_buffer_length)
3279 +- qh->use_sg = false;
3280 +- }
3281 +-
3282 + musb_ep_select(mbase, epnum);
3283 + musb_writew(epio, MUSB_TXCSR,
3284 + MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
3285 +@@ -2003,8 +1994,10 @@ finish:
3286 + urb->actual_length += xfer_len;
3287 + qh->offset += xfer_len;
3288 + if (done) {
3289 +- if (qh->use_sg)
3290 ++ if (qh->use_sg) {
3291 + qh->use_sg = false;
3292 ++ urb->transfer_buffer = NULL;
3293 ++ }
3294 +
3295 + if (urb->status == -EINPROGRESS)
3296 + urb->status = status;
3297 +diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
3298 +index 5737add6a2a4..4cca0b836f43 100644
3299 +--- a/drivers/usb/serial/io_edgeport.c
3300 ++++ b/drivers/usb/serial/io_edgeport.c
3301 +@@ -710,7 +710,7 @@ static void edge_interrupt_callback(struct urb *urb)
3302 + /* grab the txcredits for the ports if available */
3303 + position = 2;
3304 + portNumber = 0;
3305 +- while ((position < length) &&
3306 ++ while ((position < length - 1) &&
3307 + (portNumber < edge_serial->serial->num_ports)) {
3308 + txCredits = data[position] | (data[position+1] << 8);
3309 + if (txCredits) {
3310 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
3311 +index 0b5dcf973d94..8bfffca3e4ae 100644
3312 +--- a/drivers/usb/serial/option.c
3313 ++++ b/drivers/usb/serial/option.c
3314 +@@ -1992,8 +1992,14 @@ static const struct usb_device_id option_ids[] = {
3315 + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
3316 + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
3317 + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
3318 ++ { USB_DEVICE_INTERFACE_CLASS(0x1435, 0xd191, 0xff), /* Wistron Neweb D19Q1 */
3319 ++ .driver_info = RSVD(1) | RSVD(4) },
3320 ++ { USB_DEVICE_INTERFACE_CLASS(0x1690, 0x7588, 0xff), /* ASKEY WWHC050 */
3321 ++ .driver_info = RSVD(1) | RSVD(4) },
3322 + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */
3323 + .driver_info = RSVD(4) },
3324 ++ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2033, 0xff), /* BroadMobi BM806U */
3325 ++ .driver_info = RSVD(4) },
3326 + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2060, 0xff), /* BroadMobi BM818 */
3327 + .driver_info = RSVD(4) },
3328 + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
3329 +diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
3330 +index b86195e4dc6c..b378cd780ed5 100644
3331 +--- a/fs/afs/cmservice.c
3332 ++++ b/fs/afs/cmservice.c
3333 +@@ -243,6 +243,17 @@ static void afs_cm_destructor(struct afs_call *call)
3334 + call->buffer = NULL;
3335 + }
3336 +
3337 ++/*
3338 ++ * Abort a service call from within an action function.
3339 ++ */
3340 ++static void afs_abort_service_call(struct afs_call *call, u32 abort_code, int error,
3341 ++ const char *why)
3342 ++{
3343 ++ rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
3344 ++ abort_code, error, why);
3345 ++ afs_set_call_complete(call, error, 0);
3346 ++}
3347 ++
3348 + /*
3349 + * The server supplied a list of callbacks that it wanted to break.
3350 + */
3351 +@@ -510,8 +521,7 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work)
3352 + if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0)
3353 + afs_send_empty_reply(call);
3354 + else
3355 +- rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
3356 +- 1, 1, "K-1");
3357 ++ afs_abort_service_call(call, 1, 1, "K-1");
3358 +
3359 + afs_put_call(call);
3360 + _leave("");
3361 +diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c
3362 +index cfe62b154f68..e1b9ed679045 100644
3363 +--- a/fs/afs/fs_probe.c
3364 ++++ b/fs/afs/fs_probe.c
3365 +@@ -145,6 +145,7 @@ static int afs_do_probe_fileserver(struct afs_net *net,
3366 + read_lock(&server->fs_lock);
3367 + ac.alist = rcu_dereference_protected(server->addresses,
3368 + lockdep_is_held(&server->fs_lock));
3369 ++ afs_get_addrlist(ac.alist);
3370 + read_unlock(&server->fs_lock);
3371 +
3372 + atomic_set(&server->probe_outstanding, ac.alist->nr_addrs);
3373 +@@ -163,6 +164,7 @@ static int afs_do_probe_fileserver(struct afs_net *net,
3374 +
3375 + if (!in_progress)
3376 + afs_fs_probe_done(server);
3377 ++ afs_put_addrlist(ac.alist);
3378 + return in_progress;
3379 + }
3380 +
3381 +diff --git a/fs/afs/internal.h b/fs/afs/internal.h
3382 +index 759e0578012c..d5efb1debebf 100644
3383 +--- a/fs/afs/internal.h
3384 ++++ b/fs/afs/internal.h
3385 +@@ -154,7 +154,7 @@ struct afs_call {
3386 + };
3387 + unsigned char unmarshall; /* unmarshalling phase */
3388 + unsigned char addr_ix; /* Address in ->alist */
3389 +- bool incoming; /* T if incoming call */
3390 ++ bool drop_ref; /* T if need to drop ref for incoming call */
3391 + bool send_pages; /* T if data from mapping should be sent */
3392 + bool need_attention; /* T if RxRPC poked us */
3393 + bool async; /* T if asynchronous */
3394 +@@ -1203,8 +1203,16 @@ static inline void afs_set_call_complete(struct afs_call *call,
3395 + ok = true;
3396 + }
3397 + spin_unlock_bh(&call->state_lock);
3398 +- if (ok)
3399 ++ if (ok) {
3400 + trace_afs_call_done(call);
3401 ++
3402 ++ /* Asynchronous calls have two refs to release - one from the alloc and
3403 ++ * one queued with the work item - and we can't just deallocate the
3404 ++ * call because the work item may be queued again.
3405 ++ */
3406 ++ if (call->drop_ref)
3407 ++ afs_put_call(call);
3408 ++ }
3409 + }
3410 +
3411 + /*
3412 +diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
3413 +index 61498d9f06ef..ef1d09f8920b 100644
3414 +--- a/fs/afs/rxrpc.c
3415 ++++ b/fs/afs/rxrpc.c
3416 +@@ -18,7 +18,6 @@ struct workqueue_struct *afs_async_calls;
3417 +
3418 + static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long);
3419 + static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long);
3420 +-static void afs_delete_async_call(struct work_struct *);
3421 + static void afs_process_async_call(struct work_struct *);
3422 + static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long);
3423 + static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long);
3424 +@@ -169,7 +168,7 @@ void afs_put_call(struct afs_call *call)
3425 + int n = atomic_dec_return(&call->usage);
3426 + int o = atomic_read(&net->nr_outstanding_calls);
3427 +
3428 +- trace_afs_call(call, afs_call_trace_put, n + 1, o,
3429 ++ trace_afs_call(call, afs_call_trace_put, n, o,
3430 + __builtin_return_address(0));
3431 +
3432 + ASSERTCMP(n, >=, 0);
3433 +@@ -402,8 +401,10 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
3434 + /* If the call is going to be asynchronous, we need an extra ref for
3435 + * the call to hold itself so the caller need not hang on to its ref.
3436 + */
3437 +- if (call->async)
3438 ++ if (call->async) {
3439 + afs_get_call(call, afs_call_trace_get);
3440 ++ call->drop_ref = true;
3441 ++ }
3442 +
3443 + /* create a call */
3444 + rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key,
3445 +@@ -584,8 +585,6 @@ static void afs_deliver_to_call(struct afs_call *call)
3446 + done:
3447 + if (call->type->done)
3448 + call->type->done(call);
3449 +- if (state == AFS_CALL_COMPLETE && call->incoming)
3450 +- afs_put_call(call);
3451 + out:
3452 + _leave("");
3453 + return;
3454 +@@ -604,11 +603,7 @@ call_complete:
3455 + long afs_wait_for_call_to_complete(struct afs_call *call,
3456 + struct afs_addr_cursor *ac)
3457 + {
3458 +- signed long rtt2, timeout;
3459 + long ret;
3460 +- bool stalled = false;
3461 +- u64 rtt;
3462 +- u32 life, last_life;
3463 + bool rxrpc_complete = false;
3464 +
3465 + DECLARE_WAITQUEUE(myself, current);
3466 +@@ -619,14 +614,6 @@ long afs_wait_for_call_to_complete(struct afs_call *call,
3467 + if (ret < 0)
3468 + goto out;
3469 +
3470 +- rtt = rxrpc_kernel_get_rtt(call->net->socket, call->rxcall);
3471 +- rtt2 = nsecs_to_jiffies64(rtt) * 2;
3472 +- if (rtt2 < 2)
3473 +- rtt2 = 2;
3474 +-
3475 +- timeout = rtt2;
3476 +- rxrpc_kernel_check_life(call->net->socket, call->rxcall, &last_life);
3477 +-
3478 + add_wait_queue(&call->waitq, &myself);
3479 + for (;;) {
3480 + set_current_state(TASK_UNINTERRUPTIBLE);
3481 +@@ -637,37 +624,19 @@ long afs_wait_for_call_to_complete(struct afs_call *call,
3482 + call->need_attention = false;
3483 + __set_current_state(TASK_RUNNING);
3484 + afs_deliver_to_call(call);
3485 +- timeout = rtt2;
3486 + continue;
3487 + }
3488 +
3489 + if (afs_check_call_state(call, AFS_CALL_COMPLETE))
3490 + break;
3491 +
3492 +- if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall, &life)) {
3493 ++ if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall)) {
3494 + /* rxrpc terminated the call. */
3495 + rxrpc_complete = true;
3496 + break;
3497 + }
3498 +
3499 +- if (call->intr && timeout == 0 &&
3500 +- life == last_life && signal_pending(current)) {
3501 +- if (stalled)
3502 +- break;
3503 +- __set_current_state(TASK_RUNNING);
3504 +- rxrpc_kernel_probe_life(call->net->socket, call->rxcall);
3505 +- timeout = rtt2;
3506 +- stalled = true;
3507 +- continue;
3508 +- }
3509 +-
3510 +- if (life != last_life) {
3511 +- timeout = rtt2;
3512 +- last_life = life;
3513 +- stalled = false;
3514 +- }
3515 +-
3516 +- timeout = schedule_timeout(timeout);
3517 ++ schedule();
3518 + }
3519 +
3520 + remove_wait_queue(&call->waitq, &myself);
3521 +@@ -735,7 +704,7 @@ static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall,
3522 +
3523 + u = atomic_fetch_add_unless(&call->usage, 1, 0);
3524 + if (u != 0) {
3525 +- trace_afs_call(call, afs_call_trace_wake, u,
3526 ++ trace_afs_call(call, afs_call_trace_wake, u + 1,
3527 + atomic_read(&call->net->nr_outstanding_calls),
3528 + __builtin_return_address(0));
3529 +
3530 +@@ -744,21 +713,6 @@ static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall,
3531 + }
3532 + }
3533 +
3534 +-/*
3535 +- * Delete an asynchronous call. The work item carries a ref to the call struct
3536 +- * that we need to release.
3537 +- */
3538 +-static void afs_delete_async_call(struct work_struct *work)
3539 +-{
3540 +- struct afs_call *call = container_of(work, struct afs_call, async_work);
3541 +-
3542 +- _enter("");
3543 +-
3544 +- afs_put_call(call);
3545 +-
3546 +- _leave("");
3547 +-}
3548 +-
3549 + /*
3550 + * Perform I/O processing on an asynchronous call. The work item carries a ref
3551 + * to the call struct that we either need to release or to pass on.
3552 +@@ -774,16 +728,6 @@ static void afs_process_async_call(struct work_struct *work)
3553 + afs_deliver_to_call(call);
3554 + }
3555 +
3556 +- if (call->state == AFS_CALL_COMPLETE) {
3557 +- /* We have two refs to release - one from the alloc and one
3558 +- * queued with the work item - and we can't just deallocate the
3559 +- * call because the work item may be queued again.
3560 +- */
3561 +- call->async_work.func = afs_delete_async_call;
3562 +- if (!queue_work(afs_async_calls, &call->async_work))
3563 +- afs_put_call(call);
3564 +- }
3565 +-
3566 + afs_put_call(call);
3567 + _leave("");
3568 + }
3569 +@@ -810,6 +754,7 @@ void afs_charge_preallocation(struct work_struct *work)
3570 + if (!call)
3571 + break;
3572 +
3573 ++ call->drop_ref = true;
3574 + call->async = true;
3575 + call->state = AFS_CALL_SV_AWAIT_OP_ID;
3576 + init_waitqueue_head(&call->waitq);
3577 +diff --git a/fs/ceph/file.c b/fs/ceph/file.c
3578 +index cd09e63d682b..ce54a1b12819 100644
3579 +--- a/fs/ceph/file.c
3580 ++++ b/fs/ceph/file.c
3581 +@@ -1415,10 +1415,13 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
3582 + struct inode *inode = file_inode(file);
3583 + struct ceph_inode_info *ci = ceph_inode(inode);
3584 + struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
3585 ++ struct ceph_osd_client *osdc = &fsc->client->osdc;
3586 + struct ceph_cap_flush *prealloc_cf;
3587 + ssize_t count, written = 0;
3588 + int err, want, got;
3589 + bool direct_lock = false;
3590 ++ u32 map_flags;
3591 ++ u64 pool_flags;
3592 + loff_t pos;
3593 + loff_t limit = max(i_size_read(inode), fsc->max_file_size);
3594 +
3595 +@@ -1481,8 +1484,12 @@ retry_snap:
3596 + goto out;
3597 + }
3598 +
3599 +- /* FIXME: not complete since it doesn't account for being at quota */
3600 +- if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_FULL)) {
3601 ++ down_read(&osdc->lock);
3602 ++ map_flags = osdc->osdmap->flags;
3603 ++ pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
3604 ++ up_read(&osdc->lock);
3605 ++ if ((map_flags & CEPH_OSDMAP_FULL) ||
3606 ++ (pool_flags & CEPH_POOL_FLAG_FULL)) {
3607 + err = -ENOSPC;
3608 + goto out;
3609 + }
3610 +@@ -1575,7 +1582,8 @@ retry_snap:
3611 + }
3612 +
3613 + if (written >= 0) {
3614 +- if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_NEARFULL))
3615 ++ if ((map_flags & CEPH_OSDMAP_NEARFULL) ||
3616 ++ (pool_flags & CEPH_POOL_FLAG_NEARFULL))
3617 + iocb->ki_flags |= IOCB_DSYNC;
3618 + written = generic_write_sync(iocb, written);
3619 + }
3620 +diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
3621 +index ccfcc66aaf44..923be9399b21 100644
3622 +--- a/fs/ceph/snap.c
3623 ++++ b/fs/ceph/snap.c
3624 +@@ -1155,5 +1155,6 @@ void ceph_cleanup_snapid_map(struct ceph_mds_client *mdsc)
3625 + pr_err("snapid map %llx -> %x still in use\n",
3626 + sm->snap, sm->dev);
3627 + }
3628 ++ kfree(sm);
3629 + }
3630 + }
3631 +diff --git a/fs/libfs.c b/fs/libfs.c
3632 +index 1463b038ffc4..5fd9cc0e2ac9 100644
3633 +--- a/fs/libfs.c
3634 ++++ b/fs/libfs.c
3635 +@@ -821,7 +821,7 @@ int simple_attr_open(struct inode *inode, struct file *file,
3636 + {
3637 + struct simple_attr *attr;
3638 +
3639 +- attr = kmalloc(sizeof(*attr), GFP_KERNEL);
3640 ++ attr = kzalloc(sizeof(*attr), GFP_KERNEL);
3641 + if (!attr)
3642 + return -ENOMEM;
3643 +
3644 +@@ -861,9 +861,11 @@ ssize_t simple_attr_read(struct file *file, char __user *buf,
3645 + if (ret)
3646 + return ret;
3647 +
3648 +- if (*ppos) { /* continued read */
3649 ++ if (*ppos && attr->get_buf[0]) {
3650 ++ /* continued read */
3651 + size = strlen(attr->get_buf);
3652 +- } else { /* first read */
3653 ++ } else {
3654 ++ /* first read */
3655 + u64 val;
3656 + ret = attr->get(attr->data, &val);
3657 + if (ret)
3658 +diff --git a/fs/nfs/client.c b/fs/nfs/client.c
3659 +index 30838304a0bf..a05f77f9c21e 100644
3660 +--- a/fs/nfs/client.c
3661 ++++ b/fs/nfs/client.c
3662 +@@ -153,6 +153,7 @@ struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init)
3663 + if ((clp = kzalloc(sizeof(*clp), GFP_KERNEL)) == NULL)
3664 + goto error_0;
3665 +
3666 ++ clp->cl_minorversion = cl_init->minorversion;
3667 + clp->cl_nfs_mod = cl_init->nfs_mod;
3668 + if (!try_module_get(clp->cl_nfs_mod->owner))
3669 + goto error_dealloc;
3670 +diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
3671 +index 3800ab6f08fa..a6dcc2151e77 100644
3672 +--- a/fs/nfs/fscache.c
3673 ++++ b/fs/nfs/fscache.c
3674 +@@ -31,6 +31,7 @@ static DEFINE_SPINLOCK(nfs_fscache_keys_lock);
3675 + struct nfs_server_key {
3676 + struct {
3677 + uint16_t nfsversion; /* NFS protocol version */
3678 ++ uint32_t minorversion; /* NFSv4 minor version */
3679 + uint16_t family; /* address family */
3680 + __be16 port; /* IP port */
3681 + } hdr;
3682 +@@ -55,6 +56,7 @@ void nfs_fscache_get_client_cookie(struct nfs_client *clp)
3683 +
3684 + memset(&key, 0, sizeof(key));
3685 + key.hdr.nfsversion = clp->rpc_ops->version;
3686 ++ key.hdr.minorversion = clp->cl_minorversion;
3687 + key.hdr.family = clp->cl_addr.ss_family;
3688 +
3689 + switch (clp->cl_addr.ss_family) {
3690 +diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
3691 +index da6204025a2d..914feab64702 100644
3692 +--- a/fs/nfs/nfs4client.c
3693 ++++ b/fs/nfs/nfs4client.c
3694 +@@ -216,7 +216,6 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
3695 + INIT_LIST_HEAD(&clp->cl_ds_clients);
3696 + rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
3697 + clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
3698 +- clp->cl_minorversion = cl_init->minorversion;
3699 + clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion];
3700 + clp->cl_mig_gen = 1;
3701 + #if IS_ENABLED(CONFIG_NFS_V4_1)
3702 +diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
3703 +index e081b56f1c1d..5e601975745f 100644
3704 +--- a/include/linux/ceph/osdmap.h
3705 ++++ b/include/linux/ceph/osdmap.h
3706 +@@ -37,6 +37,9 @@ int ceph_spg_compare(const struct ceph_spg *lhs, const struct ceph_spg *rhs);
3707 + #define CEPH_POOL_FLAG_HASHPSPOOL (1ULL << 0) /* hash pg seed and pool id
3708 + together */
3709 + #define CEPH_POOL_FLAG_FULL (1ULL << 1) /* pool is full */
3710 ++#define CEPH_POOL_FLAG_FULL_QUOTA (1ULL << 10) /* pool ran out of quota,
3711 ++ will set FULL too */
3712 ++#define CEPH_POOL_FLAG_NEARFULL (1ULL << 11) /* pool is nearfull */
3713 +
3714 + struct ceph_pg_pool_info {
3715 + struct rb_node node;
3716 +@@ -304,5 +307,6 @@ extern struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map,
3717 +
3718 + extern const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id);
3719 + extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name);
3720 ++u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id);
3721 +
3722 + #endif
3723 +diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h
3724 +index 3eb0e55665b4..c004bced9b91 100644
3725 +--- a/include/linux/ceph/rados.h
3726 ++++ b/include/linux/ceph/rados.h
3727 +@@ -143,8 +143,10 @@ extern const char *ceph_osd_state_name(int s);
3728 + /*
3729 + * osd map flag bits
3730 + */
3731 +-#define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC) */
3732 +-#define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC) */
3733 ++#define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC),
3734 ++ not set since ~luminous */
3735 ++#define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC),
3736 ++ not set since ~luminous */
3737 + #define CEPH_OSDMAP_PAUSERD (1<<2) /* pause all reads */
3738 + #define CEPH_OSDMAP_PAUSEWR (1<<3) /* pause all writes */
3739 + #define CEPH_OSDMAP_PAUSEREC (1<<4) /* pause recovery */
3740 +diff --git a/include/linux/dmar.h b/include/linux/dmar.h
3741 +index d3ea390336f3..f397e52c2d9d 100644
3742 +--- a/include/linux/dmar.h
3743 ++++ b/include/linux/dmar.h
3744 +@@ -74,11 +74,13 @@ extern struct list_head dmar_drhd_units;
3745 + dmar_rcu_check())
3746 +
3747 + #define for_each_active_drhd_unit(drhd) \
3748 +- list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
3749 ++ list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
3750 ++ dmar_rcu_check()) \
3751 + if (drhd->ignored) {} else
3752 +
3753 + #define for_each_active_iommu(i, drhd) \
3754 +- list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
3755 ++ list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
3756 ++ dmar_rcu_check()) \
3757 + if (i=drhd->iommu, drhd->ignored) {} else
3758 +
3759 + #define for_each_iommu(i, drhd) \
3760 +diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h
3761 +index 0aa803c451a3..c620d9139c28 100644
3762 +--- a/include/linux/dsa/8021q.h
3763 ++++ b/include/linux/dsa/8021q.h
3764 +@@ -28,8 +28,6 @@ int dsa_8021q_rx_switch_id(u16 vid);
3765 +
3766 + int dsa_8021q_rx_source_port(u16 vid);
3767 +
3768 +-struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb);
3769 +-
3770 + #else
3771 +
3772 + int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index,
3773 +@@ -64,11 +62,6 @@ int dsa_8021q_rx_source_port(u16 vid)
3774 + return 0;
3775 + }
3776 +
3777 +-struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb)
3778 +-{
3779 +- return NULL;
3780 +-}
3781 +-
3782 + #endif /* IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q) */
3783 +
3784 + #endif /* _NET_DSA_8021Q_H */
3785 +diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
3786 +index 7d3f2ced92d1..73c66a3a33ae 100644
3787 +--- a/include/linux/ieee80211.h
3788 ++++ b/include/linux/ieee80211.h
3789 +@@ -2102,14 +2102,14 @@ ieee80211_he_spr_size(const u8 *he_spr_ie)
3790 + {
3791 + struct ieee80211_he_spr *he_spr = (void *)he_spr_ie;
3792 + u8 spr_len = sizeof(struct ieee80211_he_spr);
3793 +- u32 he_spr_params;
3794 ++ u8 he_spr_params;
3795 +
3796 + /* Make sure the input is not NULL */
3797 + if (!he_spr_ie)
3798 + return 0;
3799 +
3800 + /* Calc required length */
3801 +- he_spr_params = le32_to_cpu(he_spr->he_sr_control);
3802 ++ he_spr_params = he_spr->he_sr_control;
3803 + if (he_spr_params & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
3804 + spr_len++;
3805 + if (he_spr_params & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT)
3806 +diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
3807 +index 6d8bf4bdf240..1e5dad8b8e59 100644
3808 +--- a/include/linux/intel-iommu.h
3809 ++++ b/include/linux/intel-iommu.h
3810 +@@ -120,6 +120,8 @@
3811 +
3812 + #define dmar_readq(a) readq(a)
3813 + #define dmar_writeq(a,v) writeq(v,a)
3814 ++#define dmar_readl(a) readl(a)
3815 ++#define dmar_writel(a, v) writel(v, a)
3816 +
3817 + #define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
3818 + #define DMAR_VER_MINOR(v) ((v) & 0x0f)
3819 +diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
3820 +index ae703ea3ef48..8faca7b52543 100644
3821 +--- a/include/linux/memcontrol.h
3822 ++++ b/include/linux/memcontrol.h
3823 +@@ -705,6 +705,7 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
3824 + void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
3825 + int val);
3826 + void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val);
3827 ++void mod_memcg_obj_state(void *p, int idx, int val);
3828 +
3829 + static inline void mod_lruvec_state(struct lruvec *lruvec,
3830 + enum node_stat_item idx, int val)
3831 +@@ -1128,6 +1129,10 @@ static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
3832 + __mod_node_page_state(page_pgdat(page), idx, val);
3833 + }
3834 +
3835 ++static inline void mod_memcg_obj_state(void *p, int idx, int val)
3836 ++{
3837 ++}
3838 ++
3839 + static inline
3840 + unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3841 + gfp_t gfp_mask,
3842 +@@ -1432,6 +1437,8 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
3843 + return memcg ? memcg->kmemcg_id : -1;
3844 + }
3845 +
3846 ++struct mem_cgroup *mem_cgroup_from_obj(void *p);
3847 ++
3848 + #else
3849 +
3850 + static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
3851 +@@ -1473,6 +1480,11 @@ static inline void memcg_put_cache_ids(void)
3852 + {
3853 + }
3854 +
3855 ++static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
3856 ++{
3857 ++ return NULL;
3858 ++}
3859 ++
3860 + #endif /* CONFIG_MEMCG_KMEM */
3861 +
3862 + #endif /* _LINUX_MEMCONTROL_H */
3863 +diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
3864 +index ba703384bea0..4c5eb3aa8e72 100644
3865 +--- a/include/linux/mmc/host.h
3866 ++++ b/include/linux/mmc/host.h
3867 +@@ -333,6 +333,7 @@ struct mmc_host {
3868 + MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | \
3869 + MMC_CAP_UHS_DDR50)
3870 + #define MMC_CAP_SYNC_RUNTIME_PM (1 << 21) /* Synced runtime PM suspends. */
3871 ++#define MMC_CAP_NEED_RSP_BUSY (1 << 22) /* Commands with R1B can't use R1. */
3872 + #define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
3873 + #define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
3874 + #define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
3875 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
3876 +index 6ae88b0c1c31..955e1370f033 100644
3877 +--- a/include/linux/skbuff.h
3878 ++++ b/include/linux/skbuff.h
3879 +@@ -634,8 +634,8 @@ typedef unsigned char *sk_buff_data_t;
3880 + * @offload_l3_fwd_mark: Packet was L3-forwarded in hardware
3881 + * @tc_skip_classify: do not classify packet. set by IFB device
3882 + * @tc_at_ingress: used within tc_classify to distinguish in/egress
3883 +- * @tc_redirected: packet was redirected by a tc action
3884 +- * @tc_from_ingress: if tc_redirected, tc_at_ingress at time of redirect
3885 ++ * @redirected: packet was redirected by packet classifier
3886 ++ * @from_ingress: packet was redirected from the ingress path
3887 + * @peeked: this packet has been seen already, so stats have been
3888 + * done for it, don't do them again
3889 + * @nf_trace: netfilter packet trace flag
3890 +@@ -816,8 +816,10 @@ struct sk_buff {
3891 + #ifdef CONFIG_NET_CLS_ACT
3892 + __u8 tc_skip_classify:1;
3893 + __u8 tc_at_ingress:1;
3894 +- __u8 tc_redirected:1;
3895 +- __u8 tc_from_ingress:1;
3896 ++#endif
3897 ++#ifdef CONFIG_NET_REDIRECT
3898 ++ __u8 redirected:1;
3899 ++ __u8 from_ingress:1;
3900 + #endif
3901 + #ifdef CONFIG_TLS_DEVICE
3902 + __u8 decrypted:1;
3903 +@@ -4514,5 +4516,31 @@ static inline __wsum lco_csum(struct sk_buff *skb)
3904 + return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
3905 + }
3906 +
3907 ++static inline bool skb_is_redirected(const struct sk_buff *skb)
3908 ++{
3909 ++#ifdef CONFIG_NET_REDIRECT
3910 ++ return skb->redirected;
3911 ++#else
3912 ++ return false;
3913 ++#endif
3914 ++}
3915 ++
3916 ++static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
3917 ++{
3918 ++#ifdef CONFIG_NET_REDIRECT
3919 ++ skb->redirected = 1;
3920 ++ skb->from_ingress = from_ingress;
3921 ++ if (skb->from_ingress)
3922 ++ skb->tstamp = 0;
3923 ++#endif
3924 ++}
3925 ++
3926 ++static inline void skb_reset_redirect(struct sk_buff *skb)
3927 ++{
3928 ++#ifdef CONFIG_NET_REDIRECT
3929 ++ skb->redirected = 0;
3930 ++#endif
3931 ++}
3932 ++
3933 + #endif /* __KERNEL__ */
3934 + #endif /* _LINUX_SKBUFF_H */
3935 +diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
3936 +index 1abae3c340a5..299240df79e4 100644
3937 +--- a/include/net/af_rxrpc.h
3938 ++++ b/include/net/af_rxrpc.h
3939 +@@ -58,9 +58,7 @@ int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
3940 + rxrpc_user_attach_call_t, unsigned long, gfp_t,
3941 + unsigned int);
3942 + void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
3943 +-bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *,
3944 +- u32 *);
3945 +-void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *);
3946 ++bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
3947 + u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
3948 + bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *,
3949 + ktime_t *);
3950 +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
3951 +index d334e4609dd4..9fb7cf1cdf36 100644
3952 +--- a/include/net/sch_generic.h
3953 ++++ b/include/net/sch_generic.h
3954 +@@ -675,22 +675,6 @@ void __qdisc_calculate_pkt_len(struct sk_buff *skb,
3955 + const struct qdisc_size_table *stab);
3956 + int skb_do_redirect(struct sk_buff *);
3957 +
3958 +-static inline void skb_reset_tc(struct sk_buff *skb)
3959 +-{
3960 +-#ifdef CONFIG_NET_CLS_ACT
3961 +- skb->tc_redirected = 0;
3962 +-#endif
3963 +-}
3964 +-
3965 +-static inline bool skb_is_tc_redirected(const struct sk_buff *skb)
3966 +-{
3967 +-#ifdef CONFIG_NET_CLS_ACT
3968 +- return skb->tc_redirected;
3969 +-#else
3970 +- return false;
3971 +-#endif
3972 +-}
3973 +-
3974 + static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
3975 + {
3976 + #ifdef CONFIG_NET_CLS_ACT
3977 +diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
3978 +index 564ba1b5cf57..c612cabbc378 100644
3979 +--- a/include/trace/events/afs.h
3980 ++++ b/include/trace/events/afs.h
3981 +@@ -233,7 +233,7 @@ enum afs_cb_break_reason {
3982 + EM(afs_call_trace_get, "GET ") \
3983 + EM(afs_call_trace_put, "PUT ") \
3984 + EM(afs_call_trace_wake, "WAKE ") \
3985 +- E_(afs_call_trace_work, "WORK ")
3986 ++ E_(afs_call_trace_work, "QUEUE")
3987 +
3988 + #define afs_server_traces \
3989 + EM(afs_server_trace_alloc, "ALLOC ") \
3990 +diff --git a/include/uapi/linux/serio.h b/include/uapi/linux/serio.h
3991 +index 50e991952c97..ed2a96f43ce4 100644
3992 +--- a/include/uapi/linux/serio.h
3993 ++++ b/include/uapi/linux/serio.h
3994 +@@ -9,7 +9,7 @@
3995 + #ifndef _UAPI_SERIO_H
3996 + #define _UAPI_SERIO_H
3997 +
3998 +-
3999 ++#include <linux/const.h>
4000 + #include <linux/ioctl.h>
4001 +
4002 + #define SPIOCSTYPE _IOW('q', 0x01, unsigned long)
4003 +@@ -18,10 +18,10 @@
4004 + /*
4005 + * bit masks for use in "interrupt" flags (3rd argument)
4006 + */
4007 +-#define SERIO_TIMEOUT BIT(0)
4008 +-#define SERIO_PARITY BIT(1)
4009 +-#define SERIO_FRAME BIT(2)
4010 +-#define SERIO_OOB_DATA BIT(3)
4011 ++#define SERIO_TIMEOUT _BITUL(0)
4012 ++#define SERIO_PARITY _BITUL(1)
4013 ++#define SERIO_FRAME _BITUL(2)
4014 ++#define SERIO_OOB_DATA _BITUL(3)
4015 +
4016 + /*
4017 + * Serio types
4018 +diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
4019 +index 29c7c06c6bd6..b774e2210f7d 100644
4020 +--- a/kernel/bpf/btf.c
4021 ++++ b/kernel/bpf/btf.c
4022 +@@ -2309,7 +2309,7 @@ static int btf_enum_check_member(struct btf_verifier_env *env,
4023 +
4024 + struct_size = struct_type->size;
4025 + bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
4026 +- if (struct_size - bytes_offset < sizeof(int)) {
4027 ++ if (struct_size - bytes_offset < member_type->size) {
4028 + btf_verifier_log_member(env, struct_type, member,
4029 + "Member exceeds struct_size");
4030 + return -EINVAL;
4031 +diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
4032 +index 8bd69062fbe5..869e2e1860e8 100644
4033 +--- a/kernel/bpf/cgroup.c
4034 ++++ b/kernel/bpf/cgroup.c
4035 +@@ -228,6 +228,9 @@ cleanup:
4036 + for (i = 0; i < NR; i++)
4037 + bpf_prog_array_free(arrays[i]);
4038 +
4039 ++ for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
4040 ++ cgroup_bpf_put(p);
4041 ++
4042 + percpu_ref_exit(&cgrp->bpf.refcnt);
4043 +
4044 + return -ENOMEM;
4045 +@@ -300,8 +303,8 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
4046 + {
4047 + struct list_head *progs = &cgrp->bpf.progs[type];
4048 + struct bpf_prog *old_prog = NULL;
4049 +- struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE],
4050 +- *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {NULL};
4051 ++ struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
4052 ++ struct bpf_cgroup_storage *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
4053 + enum bpf_cgroup_storage_type stype;
4054 + struct bpf_prog_list *pl;
4055 + bool pl_was_allocated;
4056 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
4057 +index b2817d0929b3..a0b76b360d6f 100644
4058 +--- a/kernel/bpf/verifier.c
4059 ++++ b/kernel/bpf/verifier.c
4060 +@@ -979,17 +979,6 @@ static void __reg_bound_offset(struct bpf_reg_state *reg)
4061 + reg->umax_value));
4062 + }
4063 +
4064 +-static void __reg_bound_offset32(struct bpf_reg_state *reg)
4065 +-{
4066 +- u64 mask = 0xffffFFFF;
4067 +- struct tnum range = tnum_range(reg->umin_value & mask,
4068 +- reg->umax_value & mask);
4069 +- struct tnum lo32 = tnum_cast(reg->var_off, 4);
4070 +- struct tnum hi32 = tnum_lshift(tnum_rshift(reg->var_off, 32), 32);
4071 +-
4072 +- reg->var_off = tnum_or(hi32, tnum_intersect(lo32, range));
4073 +-}
4074 +-
4075 + /* Reset the min/max bounds of a register */
4076 + static void __mark_reg_unbounded(struct bpf_reg_state *reg)
4077 + {
4078 +@@ -5452,10 +5441,6 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
4079 + /* We might have learned some bits from the bounds. */
4080 + __reg_bound_offset(false_reg);
4081 + __reg_bound_offset(true_reg);
4082 +- if (is_jmp32) {
4083 +- __reg_bound_offset32(false_reg);
4084 +- __reg_bound_offset32(true_reg);
4085 +- }
4086 + /* Intersecting with the old var_off might have improved our bounds
4087 + * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
4088 + * then new var_off is (0; 0x7f...fc) which improves our umax.
4089 +@@ -5565,10 +5550,6 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
4090 + /* We might have learned some bits from the bounds. */
4091 + __reg_bound_offset(false_reg);
4092 + __reg_bound_offset(true_reg);
4093 +- if (is_jmp32) {
4094 +- __reg_bound_offset32(false_reg);
4095 +- __reg_bound_offset32(true_reg);
4096 +- }
4097 + /* Intersecting with the old var_off might have improved our bounds
4098 + * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
4099 + * then new var_off is (0; 0x7f...fc) which improves our umax.
4100 +diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
4101 +index 7f83f4121d8d..f684c82efc2e 100644
4102 +--- a/kernel/cgroup/cgroup-v1.c
4103 ++++ b/kernel/cgroup/cgroup-v1.c
4104 +@@ -473,6 +473,7 @@ static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
4105 + */
4106 + p++;
4107 + if (p >= end) {
4108 ++ (*pos)++;
4109 + return NULL;
4110 + } else {
4111 + *pos = *p;
4112 +@@ -783,7 +784,7 @@ void cgroup1_release_agent(struct work_struct *work)
4113 +
4114 + pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
4115 + agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
4116 +- if (!pathbuf || !agentbuf)
4117 ++ if (!pathbuf || !agentbuf || !strlen(agentbuf))
4118 + goto out;
4119 +
4120 + spin_lock_irq(&css_set_lock);
4121 +diff --git a/kernel/fork.c b/kernel/fork.c
4122 +index 755d8160e001..27c0ef30002e 100644
4123 +--- a/kernel/fork.c
4124 ++++ b/kernel/fork.c
4125 +@@ -394,8 +394,8 @@ static void account_kernel_stack(struct task_struct *tsk, int account)
4126 + mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB,
4127 + THREAD_SIZE / 1024 * account);
4128 +
4129 +- mod_memcg_page_state(first_page, MEMCG_KERNEL_STACK_KB,
4130 +- account * (THREAD_SIZE / 1024));
4131 ++ mod_memcg_obj_state(stack, MEMCG_KERNEL_STACK_KB,
4132 ++ account * (THREAD_SIZE / 1024));
4133 + }
4134 + }
4135 +
4136 +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
4137 +index 55b080101a20..b304c17d53a3 100644
4138 +--- a/kernel/irq/manage.c
4139 ++++ b/kernel/irq/manage.c
4140 +@@ -284,7 +284,11 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
4141 +
4142 + if (desc->affinity_notify) {
4143 + kref_get(&desc->affinity_notify->kref);
4144 +- schedule_work(&desc->affinity_notify->work);
4145 ++ if (!schedule_work(&desc->affinity_notify->work)) {
4146 ++ /* Work was already scheduled, drop our extra ref */
4147 ++ kref_put(&desc->affinity_notify->kref,
4148 ++ desc->affinity_notify->release);
4149 ++ }
4150 + }
4151 + irqd_set(data, IRQD_AFFINITY_SET);
4152 +
4153 +@@ -384,7 +388,10 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
4154 + raw_spin_unlock_irqrestore(&desc->lock, flags);
4155 +
4156 + if (old_notify) {
4157 +- cancel_work_sync(&old_notify->work);
4158 ++ if (cancel_work_sync(&old_notify->work)) {
4159 ++ /* Pending work had a ref, put that one too */
4160 ++ kref_put(&old_notify->kref, old_notify->release);
4161 ++ }
4162 + kref_put(&old_notify->kref, old_notify->release);
4163 + }
4164 +
4165 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
4166 +index d804efb372e2..5d0575d633d2 100644
4167 +--- a/mm/memcontrol.c
4168 ++++ b/mm/memcontrol.c
4169 +@@ -786,6 +786,17 @@ void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
4170 + rcu_read_unlock();
4171 + }
4172 +
4173 ++void mod_memcg_obj_state(void *p, int idx, int val)
4174 ++{
4175 ++ struct mem_cgroup *memcg;
4176 ++
4177 ++ rcu_read_lock();
4178 ++ memcg = mem_cgroup_from_obj(p);
4179 ++ if (memcg)
4180 ++ mod_memcg_state(memcg, idx, val);
4181 ++ rcu_read_unlock();
4182 ++}
4183 ++
4184 + /**
4185 + * __count_memcg_events - account VM events in a cgroup
4186 + * @memcg: the memory cgroup
4187 +@@ -2778,6 +2789,33 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg,
4188 + }
4189 +
4190 + #ifdef CONFIG_MEMCG_KMEM
4191 ++/*
4192 ++ * Returns a pointer to the memory cgroup to which the kernel object is charged.
4193 ++ *
4194 ++ * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
4195 ++ * cgroup_mutex, etc.
4196 ++ */
4197 ++struct mem_cgroup *mem_cgroup_from_obj(void *p)
4198 ++{
4199 ++ struct page *page;
4200 ++
4201 ++ if (mem_cgroup_disabled())
4202 ++ return NULL;
4203 ++
4204 ++ page = virt_to_head_page(p);
4205 ++
4206 ++ /*
4207 ++ * Slab pages don't have page->mem_cgroup set because corresponding
4208 ++ * kmem caches can be reparented during the lifetime. That's why
4209 ++ * memcg_from_slab_page() should be used instead.
4210 ++ */
4211 ++ if (PageSlab(page))
4212 ++ return memcg_from_slab_page(page);
4213 ++
4214 ++ /* All other pages use page->mem_cgroup */
4215 ++ return page->mem_cgroup;
4216 ++}
4217 ++
4218 + static int memcg_alloc_cache_id(void)
4219 + {
4220 + int id, size;
4221 +diff --git a/mm/sparse.c b/mm/sparse.c
4222 +index a18ad9390d9f..78bbecd904c3 100644
4223 +--- a/mm/sparse.c
4224 ++++ b/mm/sparse.c
4225 +@@ -789,6 +789,12 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
4226 + ms->usage = NULL;
4227 + }
4228 + memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
4229 ++ /*
4230 ++ * Mark the section invalid so that valid_section()
4231 ++ * return false. This prevents code from dereferencing
4232 ++ * ms->usage array.
4233 ++ */
4234 ++ ms->section_mem_map &= ~SECTION_HAS_MEM_MAP;
4235 + }
4236 +
4237 + if (section_is_early && memmap)
4238 +diff --git a/mm/swapfile.c b/mm/swapfile.c
4239 +index dab43523afdd..891a3ef48651 100644
4240 +--- a/mm/swapfile.c
4241 ++++ b/mm/swapfile.c
4242 +@@ -2892,10 +2892,6 @@ static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
4243 + p->bdev = inode->i_sb->s_bdev;
4244 + }
4245 +
4246 +- inode_lock(inode);
4247 +- if (IS_SWAPFILE(inode))
4248 +- return -EBUSY;
4249 +-
4250 + return 0;
4251 + }
4252 +
4253 +@@ -3150,17 +3146,22 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
4254 + mapping = swap_file->f_mapping;
4255 + inode = mapping->host;
4256 +
4257 +- /* If S_ISREG(inode->i_mode) will do inode_lock(inode); */
4258 + error = claim_swapfile(p, inode);
4259 + if (unlikely(error))
4260 + goto bad_swap;
4261 +
4262 ++ inode_lock(inode);
4263 ++ if (IS_SWAPFILE(inode)) {
4264 ++ error = -EBUSY;
4265 ++ goto bad_swap_unlock_inode;
4266 ++ }
4267 ++
4268 + /*
4269 + * Read the swap header.
4270 + */
4271 + if (!mapping->a_ops->readpage) {
4272 + error = -EINVAL;
4273 +- goto bad_swap;
4274 ++ goto bad_swap_unlock_inode;
4275 + }
4276 + page = read_mapping_page(mapping, 0, swap_file);
4277 + if (IS_ERR(page)) {
4278 +@@ -3172,14 +3173,14 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
4279 + maxpages = read_swap_header(p, swap_header, inode);
4280 + if (unlikely(!maxpages)) {
4281 + error = -EINVAL;
4282 +- goto bad_swap;
4283 ++ goto bad_swap_unlock_inode;
4284 + }
4285 +
4286 + /* OK, set up the swap map and apply the bad block list */
4287 + swap_map = vzalloc(maxpages);
4288 + if (!swap_map) {
4289 + error = -ENOMEM;
4290 +- goto bad_swap;
4291 ++ goto bad_swap_unlock_inode;
4292 + }
4293 +
4294 + if (bdi_cap_stable_pages_required(inode_to_bdi(inode)))
4295 +@@ -3204,7 +3205,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
4296 + GFP_KERNEL);
4297 + if (!cluster_info) {
4298 + error = -ENOMEM;
4299 +- goto bad_swap;
4300 ++ goto bad_swap_unlock_inode;
4301 + }
4302 +
4303 + for (ci = 0; ci < nr_cluster; ci++)
4304 +@@ -3213,7 +3214,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
4305 + p->percpu_cluster = alloc_percpu(struct percpu_cluster);
4306 + if (!p->percpu_cluster) {
4307 + error = -ENOMEM;
4308 +- goto bad_swap;
4309 ++ goto bad_swap_unlock_inode;
4310 + }
4311 + for_each_possible_cpu(cpu) {
4312 + struct percpu_cluster *cluster;
4313 +@@ -3227,13 +3228,13 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
4314 +
4315 + error = swap_cgroup_swapon(p->type, maxpages);
4316 + if (error)
4317 +- goto bad_swap;
4318 ++ goto bad_swap_unlock_inode;
4319 +
4320 + nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
4321 + cluster_info, maxpages, &span);
4322 + if (unlikely(nr_extents < 0)) {
4323 + error = nr_extents;
4324 +- goto bad_swap;
4325 ++ goto bad_swap_unlock_inode;
4326 + }
4327 + /* frontswap enabled? set up bit-per-page map for frontswap */
4328 + if (IS_ENABLED(CONFIG_FRONTSWAP))
4329 +@@ -3273,7 +3274,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
4330 +
4331 + error = init_swap_address_space(p->type, maxpages);
4332 + if (error)
4333 +- goto bad_swap;
4334 ++ goto bad_swap_unlock_inode;
4335 +
4336 + /*
4337 + * Flush any pending IO and dirty mappings before we start using this
4338 +@@ -3283,7 +3284,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
4339 + error = inode_drain_writes(inode);
4340 + if (error) {
4341 + inode->i_flags &= ~S_SWAPFILE;
4342 +- goto bad_swap;
4343 ++ goto bad_swap_unlock_inode;
4344 + }
4345 +
4346 + mutex_lock(&swapon_mutex);
4347 +@@ -3308,6 +3309,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
4348 +
4349 + error = 0;
4350 + goto out;
4351 ++bad_swap_unlock_inode:
4352 ++ inode_unlock(inode);
4353 + bad_swap:
4354 + free_percpu(p->percpu_cluster);
4355 + p->percpu_cluster = NULL;
4356 +@@ -3315,6 +3318,7 @@ bad_swap:
4357 + set_blocksize(p->bdev, p->old_block_size);
4358 + blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
4359 + }
4360 ++ inode = NULL;
4361 + destroy_swap_extents(p);
4362 + swap_cgroup_swapoff(p->type);
4363 + spin_lock(&swap_lock);
4364 +@@ -3326,13 +3330,8 @@ bad_swap:
4365 + kvfree(frontswap_map);
4366 + if (inced_nr_rotate_swap)
4367 + atomic_dec(&nr_rotate_swap);
4368 +- if (swap_file) {
4369 +- if (inode) {
4370 +- inode_unlock(inode);
4371 +- inode = NULL;
4372 +- }
4373 ++ if (swap_file)
4374 + filp_close(swap_file, NULL);
4375 +- }
4376 + out:
4377 + if (page && !IS_ERR(page)) {
4378 + kunmap(page);
4379 +diff --git a/net/Kconfig b/net/Kconfig
4380 +index 3101bfcbdd7a..0b2fecc83452 100644
4381 +--- a/net/Kconfig
4382 ++++ b/net/Kconfig
4383 +@@ -52,6 +52,9 @@ config NET_INGRESS
4384 + config NET_EGRESS
4385 + bool
4386 +
4387 ++config NET_REDIRECT
4388 ++ bool
4389 ++
4390 + config SKB_EXTENSIONS
4391 + bool
4392 +
4393 +diff --git a/net/bpfilter/main.c b/net/bpfilter/main.c
4394 +index 77396a098fbe..efea4874743e 100644
4395 +--- a/net/bpfilter/main.c
4396 ++++ b/net/bpfilter/main.c
4397 +@@ -10,7 +10,7 @@
4398 + #include <asm/unistd.h>
4399 + #include "msgfmt.h"
4400 +
4401 +-int debug_fd;
4402 ++FILE *debug_f;
4403 +
4404 + static int handle_get_cmd(struct mbox_request *cmd)
4405 + {
4406 +@@ -35,9 +35,10 @@ static void loop(void)
4407 + struct mbox_reply reply;
4408 + int n;
4409 +
4410 ++ fprintf(debug_f, "testing the buffer\n");
4411 + n = read(0, &req, sizeof(req));
4412 + if (n != sizeof(req)) {
4413 +- dprintf(debug_fd, "invalid request %d\n", n);
4414 ++ fprintf(debug_f, "invalid request %d\n", n);
4415 + return;
4416 + }
4417 +
4418 +@@ -47,7 +48,7 @@ static void loop(void)
4419 +
4420 + n = write(1, &reply, sizeof(reply));
4421 + if (n != sizeof(reply)) {
4422 +- dprintf(debug_fd, "reply failed %d\n", n);
4423 ++ fprintf(debug_f, "reply failed %d\n", n);
4424 + return;
4425 + }
4426 + }
4427 +@@ -55,9 +56,10 @@ static void loop(void)
4428 +
4429 + int main(void)
4430 + {
4431 +- debug_fd = open("/dev/kmsg", 00000002);
4432 +- dprintf(debug_fd, "Started bpfilter\n");
4433 ++ debug_f = fopen("/dev/kmsg", "w");
4434 ++ setvbuf(debug_f, 0, _IOLBF, 0);
4435 ++ fprintf(debug_f, "Started bpfilter\n");
4436 + loop();
4437 +- close(debug_fd);
4438 ++ fclose(debug_f);
4439 + return 0;
4440 + }
4441 +diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
4442 +index 4e0de14f80bb..2a6e63a8edbe 100644
4443 +--- a/net/ceph/osdmap.c
4444 ++++ b/net/ceph/osdmap.c
4445 +@@ -710,6 +710,15 @@ int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name)
4446 + }
4447 + EXPORT_SYMBOL(ceph_pg_poolid_by_name);
4448 +
4449 ++u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id)
4450 ++{
4451 ++ struct ceph_pg_pool_info *pi;
4452 ++
4453 ++ pi = __lookup_pg_pool(&map->pg_pools, id);
4454 ++ return pi ? pi->flags : 0;
4455 ++}
4456 ++EXPORT_SYMBOL(ceph_pg_pool_flags);
4457 ++
4458 + static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
4459 + {
4460 + rb_erase(&pi->node, root);
4461 +diff --git a/net/core/dev.c b/net/core/dev.c
4462 +index db8c229e0f4a..931dfdcbabf1 100644
4463 +--- a/net/core/dev.c
4464 ++++ b/net/core/dev.c
4465 +@@ -4237,7 +4237,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
4466 + /* Reinjected packets coming from act_mirred or similar should
4467 + * not get XDP generic processing.
4468 + */
4469 +- if (skb_is_tc_redirected(skb))
4470 ++ if (skb_is_redirected(skb))
4471 + return XDP_PASS;
4472 +
4473 + /* XDP packets must be linear and must have sufficient headroom
4474 +@@ -4786,7 +4786,7 @@ skip_taps:
4475 + goto out;
4476 + }
4477 + #endif
4478 +- skb_reset_tc(skb);
4479 ++ skb_reset_redirect(skb);
4480 + skip_classify:
4481 + if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
4482 + goto drop;
4483 +diff --git a/net/core/pktgen.c b/net/core/pktgen.c
4484 +index 48b1e429857c..cb3b565ff5ad 100644
4485 +--- a/net/core/pktgen.c
4486 ++++ b/net/core/pktgen.c
4487 +@@ -3362,7 +3362,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
4488 + /* skb was 'freed' by stack, so clean few
4489 + * bits and reuse it
4490 + */
4491 +- skb_reset_tc(skb);
4492 ++ skb_reset_redirect(skb);
4493 + } while (--burst > 0);
4494 + goto out; /* Skips xmit_mode M_START_XMIT */
4495 + } else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) {
4496 +diff --git a/net/core/sock_map.c b/net/core/sock_map.c
4497 +index 405397801bb0..8291568b707f 100644
4498 +--- a/net/core/sock_map.c
4499 ++++ b/net/core/sock_map.c
4500 +@@ -233,8 +233,11 @@ static void sock_map_free(struct bpf_map *map)
4501 + struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
4502 + int i;
4503 +
4504 ++ /* After the sync no updates or deletes will be in-flight so it
4505 ++ * is safe to walk map and remove entries without risking a race
4506 ++ * in EEXIST update case.
4507 ++ */
4508 + synchronize_rcu();
4509 +- raw_spin_lock_bh(&stab->lock);
4510 + for (i = 0; i < stab->map.max_entries; i++) {
4511 + struct sock **psk = &stab->sks[i];
4512 + struct sock *sk;
4513 +@@ -248,7 +251,6 @@ static void sock_map_free(struct bpf_map *map)
4514 + release_sock(sk);
4515 + }
4516 + }
4517 +- raw_spin_unlock_bh(&stab->lock);
4518 +
4519 + /* wait for psock readers accessing its map link */
4520 + synchronize_rcu();
4521 +@@ -863,10 +865,13 @@ static void sock_hash_free(struct bpf_map *map)
4522 + struct hlist_node *node;
4523 + int i;
4524 +
4525 ++ /* After the sync no updates or deletes will be in-flight so it
4526 ++ * is safe to walk map and remove entries without risking a race
4527 ++ * in EEXIST update case.
4528 ++ */
4529 + synchronize_rcu();
4530 + for (i = 0; i < htab->buckets_num; i++) {
4531 + bucket = sock_hash_select_bucket(htab, i);
4532 +- raw_spin_lock_bh(&bucket->lock);
4533 + hlist_for_each_entry_safe(elem, node, &bucket->head, node) {
4534 + hlist_del_rcu(&elem->node);
4535 + lock_sock(elem->sk);
4536 +@@ -875,7 +880,6 @@ static void sock_hash_free(struct bpf_map *map)
4537 + rcu_read_unlock();
4538 + release_sock(elem->sk);
4539 + }
4540 +- raw_spin_unlock_bh(&bucket->lock);
4541 + }
4542 +
4543 + /* wait for psock readers accessing its map link */
4544 +diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
4545 +index 9e5a883a9f0c..ebe73848d1cf 100644
4546 +--- a/net/dsa/tag_8021q.c
4547 ++++ b/net/dsa/tag_8021q.c
4548 +@@ -299,49 +299,6 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
4549 + }
4550 + EXPORT_SYMBOL_GPL(dsa_8021q_xmit);
4551 +
4552 +-/* In the DSA packet_type handler, skb->data points in the middle of the VLAN
4553 +- * tag, after tpid and before tci. This is because so far, ETH_HLEN
4554 +- * (DMAC, SMAC, EtherType) bytes were pulled.
4555 +- * There are 2 bytes of VLAN tag left in skb->data, and upper
4556 +- * layers expect the 'real' EtherType to be consumed as well.
4557 +- * Coincidentally, a VLAN header is also of the same size as
4558 +- * the number of bytes that need to be pulled.
4559 +- *
4560 +- * skb_mac_header skb->data
4561 +- * | |
4562 +- * v v
4563 +- * | | | | | | | | | | | | | | | | | | |
4564 +- * +-----------------------+-----------------------+-------+-------+-------+
4565 +- * | Destination MAC | Source MAC | TPID | TCI | EType |
4566 +- * +-----------------------+-----------------------+-------+-------+-------+
4567 +- * ^ | |
4568 +- * |<--VLAN_HLEN-->to <---VLAN_HLEN--->
4569 +- * from |
4570 +- * >>>>>>> v
4571 +- * >>>>>>> | | | | | | | | | | | | | | |
4572 +- * >>>>>>> +-----------------------+-----------------------+-------+
4573 +- * >>>>>>> | Destination MAC | Source MAC | EType |
4574 +- * +-----------------------+-----------------------+-------+
4575 +- * ^ ^
4576 +- * (now part of | |
4577 +- * skb->head) skb_mac_header skb->data
4578 +- */
4579 +-struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb)
4580 +-{
4581 +- u8 *from = skb_mac_header(skb);
4582 +- u8 *dest = from + VLAN_HLEN;
4583 +-
4584 +- memmove(dest, from, ETH_HLEN - VLAN_HLEN);
4585 +- skb_pull(skb, VLAN_HLEN);
4586 +- skb_push(skb, ETH_HLEN);
4587 +- skb_reset_mac_header(skb);
4588 +- skb_reset_mac_len(skb);
4589 +- skb_pull_rcsum(skb, ETH_HLEN);
4590 +-
4591 +- return skb;
4592 +-}
4593 +-EXPORT_SYMBOL_GPL(dsa_8021q_remove_header);
4594 +-
4595 + static const struct dsa_device_ops dsa_8021q_netdev_ops = {
4596 + .name = "8021q",
4597 + .proto = DSA_TAG_PROTO_8021Q,
4598 +diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
4599 +index 9c3114179690..9169b63a89e3 100644
4600 +--- a/net/dsa/tag_brcm.c
4601 ++++ b/net/dsa/tag_brcm.c
4602 +@@ -140,6 +140,8 @@ static struct sk_buff *brcm_tag_rcv_ll(struct sk_buff *skb,
4603 + /* Remove Broadcom tag and update checksum */
4604 + skb_pull_rcsum(skb, BRCM_TAG_LEN);
4605 +
4606 ++ skb->offload_fwd_mark = 1;
4607 ++
4608 + return skb;
4609 + }
4610 + #endif
4611 +diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c
4612 +index 63ef2a14c934..12f3ce52e62e 100644
4613 +--- a/net/dsa/tag_sja1105.c
4614 ++++ b/net/dsa/tag_sja1105.c
4615 +@@ -238,14 +238,14 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
4616 + {
4617 + struct sja1105_meta meta = {0};
4618 + int source_port, switch_id;
4619 +- struct vlan_ethhdr *hdr;
4620 ++ struct ethhdr *hdr;
4621 + u16 tpid, vid, tci;
4622 + bool is_link_local;
4623 + bool is_tagged;
4624 + bool is_meta;
4625 +
4626 +- hdr = vlan_eth_hdr(skb);
4627 +- tpid = ntohs(hdr->h_vlan_proto);
4628 ++ hdr = eth_hdr(skb);
4629 ++ tpid = ntohs(hdr->h_proto);
4630 + is_tagged = (tpid == ETH_P_SJA1105);
4631 + is_link_local = sja1105_is_link_local(skb);
4632 + is_meta = sja1105_is_meta_frame(skb);
4633 +@@ -254,7 +254,12 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
4634 +
4635 + if (is_tagged) {
4636 + /* Normal traffic path. */
4637 +- tci = ntohs(hdr->h_vlan_TCI);
4638 ++ skb_push_rcsum(skb, ETH_HLEN);
4639 ++ __skb_vlan_pop(skb, &tci);
4640 ++ skb_pull_rcsum(skb, ETH_HLEN);
4641 ++ skb_reset_network_header(skb);
4642 ++ skb_reset_transport_header(skb);
4643 ++
4644 + vid = tci & VLAN_VID_MASK;
4645 + source_port = dsa_8021q_rx_source_port(vid);
4646 + switch_id = dsa_8021q_rx_switch_id(vid);
4647 +@@ -283,12 +288,6 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
4648 + return NULL;
4649 + }
4650 +
4651 +- /* Delete/overwrite fake VLAN header, DSA expects to not find
4652 +- * it there, see dsa_switch_rcv: skb_push(skb, ETH_HLEN).
4653 +- */
4654 +- if (is_tagged)
4655 +- skb = dsa_8021q_remove_header(skb);
4656 +-
4657 + return sja1105_rcv_meta_state_machine(skb, &meta, is_link_local,
4658 + is_meta);
4659 + }
4660 +diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
4661 +index 27dc65d7de67..002f341f3564 100644
4662 +--- a/net/hsr/hsr_framereg.c
4663 ++++ b/net/hsr/hsr_framereg.c
4664 +@@ -482,12 +482,9 @@ int hsr_get_node_data(struct hsr_priv *hsr,
4665 + struct hsr_port *port;
4666 + unsigned long tdiff;
4667 +
4668 +- rcu_read_lock();
4669 + node = find_node_by_addr_A(&hsr->node_db, addr);
4670 +- if (!node) {
4671 +- rcu_read_unlock();
4672 +- return -ENOENT; /* No such entry */
4673 +- }
4674 ++ if (!node)
4675 ++ return -ENOENT;
4676 +
4677 + ether_addr_copy(addr_b, node->macaddress_B);
4678 +
4679 +@@ -522,7 +519,5 @@ int hsr_get_node_data(struct hsr_priv *hsr,
4680 + *addr_b_ifindex = -1;
4681 + }
4682 +
4683 +- rcu_read_unlock();
4684 +-
4685 + return 0;
4686 + }
4687 +diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
4688 +index 8dc0547f01d0..fae21c863b1f 100644
4689 +--- a/net/hsr/hsr_netlink.c
4690 ++++ b/net/hsr/hsr_netlink.c
4691 +@@ -251,15 +251,16 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
4692 + if (!na)
4693 + goto invalid;
4694 +
4695 +- hsr_dev = __dev_get_by_index(genl_info_net(info),
4696 +- nla_get_u32(info->attrs[HSR_A_IFINDEX]));
4697 ++ rcu_read_lock();
4698 ++ hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
4699 ++ nla_get_u32(info->attrs[HSR_A_IFINDEX]));
4700 + if (!hsr_dev)
4701 +- goto invalid;
4702 ++ goto rcu_unlock;
4703 + if (!is_hsr_master(hsr_dev))
4704 +- goto invalid;
4705 ++ goto rcu_unlock;
4706 +
4707 + /* Send reply */
4708 +- skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
4709 ++ skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
4710 + if (!skb_out) {
4711 + res = -ENOMEM;
4712 + goto fail;
4713 +@@ -313,12 +314,10 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
4714 + res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
4715 + if (res < 0)
4716 + goto nla_put_failure;
4717 +- rcu_read_lock();
4718 + port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
4719 + if (port)
4720 + res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
4721 + port->dev->ifindex);
4722 +- rcu_read_unlock();
4723 + if (res < 0)
4724 + goto nla_put_failure;
4725 +
4726 +@@ -328,20 +327,22 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
4727 + res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
4728 + if (res < 0)
4729 + goto nla_put_failure;
4730 +- rcu_read_lock();
4731 + port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
4732 + if (port)
4733 + res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
4734 + port->dev->ifindex);
4735 +- rcu_read_unlock();
4736 + if (res < 0)
4737 + goto nla_put_failure;
4738 +
4739 ++ rcu_read_unlock();
4740 ++
4741 + genlmsg_end(skb_out, msg_head);
4742 + genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
4743 +
4744 + return 0;
4745 +
4746 ++rcu_unlock:
4747 ++ rcu_read_unlock();
4748 + invalid:
4749 + netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
4750 + return 0;
4751 +@@ -351,6 +352,7 @@ nla_put_failure:
4752 + /* Fall through */
4753 +
4754 + fail:
4755 ++ rcu_read_unlock();
4756 + return res;
4757 + }
4758 +
4759 +@@ -358,16 +360,14 @@ fail:
4760 + */
4761 + static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
4762 + {
4763 +- /* For receiving */
4764 +- struct nlattr *na;
4765 ++ unsigned char addr[ETH_ALEN];
4766 + struct net_device *hsr_dev;
4767 +-
4768 +- /* For sending */
4769 + struct sk_buff *skb_out;
4770 +- void *msg_head;
4771 + struct hsr_priv *hsr;
4772 +- void *pos;
4773 +- unsigned char addr[ETH_ALEN];
4774 ++ bool restart = false;
4775 ++ struct nlattr *na;
4776 ++ void *pos = NULL;
4777 ++ void *msg_head;
4778 + int res;
4779 +
4780 + if (!info)
4781 +@@ -377,15 +377,17 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
4782 + if (!na)
4783 + goto invalid;
4784 +
4785 +- hsr_dev = __dev_get_by_index(genl_info_net(info),
4786 +- nla_get_u32(info->attrs[HSR_A_IFINDEX]));
4787 ++ rcu_read_lock();
4788 ++ hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
4789 ++ nla_get_u32(info->attrs[HSR_A_IFINDEX]));
4790 + if (!hsr_dev)
4791 +- goto invalid;
4792 ++ goto rcu_unlock;
4793 + if (!is_hsr_master(hsr_dev))
4794 +- goto invalid;
4795 ++ goto rcu_unlock;
4796 +
4797 ++restart:
4798 + /* Send reply */
4799 +- skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
4800 ++ skb_out = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
4801 + if (!skb_out) {
4802 + res = -ENOMEM;
4803 + goto fail;
4804 +@@ -399,18 +401,26 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
4805 + goto nla_put_failure;
4806 + }
4807 +
4808 +- res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
4809 +- if (res < 0)
4810 +- goto nla_put_failure;
4811 ++ if (!restart) {
4812 ++ res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
4813 ++ if (res < 0)
4814 ++ goto nla_put_failure;
4815 ++ }
4816 +
4817 + hsr = netdev_priv(hsr_dev);
4818 +
4819 +- rcu_read_lock();
4820 +- pos = hsr_get_next_node(hsr, NULL, addr);
4821 ++ if (!pos)
4822 ++ pos = hsr_get_next_node(hsr, NULL, addr);
4823 + while (pos) {
4824 + res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
4825 + if (res < 0) {
4826 +- rcu_read_unlock();
4827 ++ if (res == -EMSGSIZE) {
4828 ++ genlmsg_end(skb_out, msg_head);
4829 ++ genlmsg_unicast(genl_info_net(info), skb_out,
4830 ++ info->snd_portid);
4831 ++ restart = true;
4832 ++ goto restart;
4833 ++ }
4834 + goto nla_put_failure;
4835 + }
4836 + pos = hsr_get_next_node(hsr, pos, addr);
4837 +@@ -422,15 +432,18 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
4838 +
4839 + return 0;
4840 +
4841 ++rcu_unlock:
4842 ++ rcu_read_unlock();
4843 + invalid:
4844 + netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
4845 + return 0;
4846 +
4847 + nla_put_failure:
4848 +- kfree_skb(skb_out);
4849 ++ nlmsg_free(skb_out);
4850 + /* Fall through */
4851 +
4852 + fail:
4853 ++ rcu_read_unlock();
4854 + return res;
4855 + }
4856 +
4857 +@@ -457,6 +470,7 @@ static struct genl_family hsr_genl_family __ro_after_init = {
4858 + .version = 1,
4859 + .maxattr = HSR_A_MAX,
4860 + .policy = hsr_genl_policy,
4861 ++ .netnsok = true,
4862 + .module = THIS_MODULE,
4863 + .ops = hsr_ops,
4864 + .n_ops = ARRAY_SIZE(hsr_ops),
4865 +diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c
4866 +index fbfd0db182b7..a9104d42aafb 100644
4867 +--- a/net/hsr/hsr_slave.c
4868 ++++ b/net/hsr/hsr_slave.c
4869 +@@ -145,16 +145,16 @@ int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev,
4870 + if (!port)
4871 + return -ENOMEM;
4872 +
4873 ++ port->hsr = hsr;
4874 ++ port->dev = dev;
4875 ++ port->type = type;
4876 ++
4877 + if (type != HSR_PT_MASTER) {
4878 + res = hsr_portdev_setup(dev, port);
4879 + if (res)
4880 + goto fail_dev_setup;
4881 + }
4882 +
4883 +- port->hsr = hsr;
4884 +- port->dev = dev;
4885 +- port->type = type;
4886 +-
4887 + list_add_tail_rcu(&port->port_list, &hsr->ports);
4888 + synchronize_rcu();
4889 +
4890 +diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
4891 +index 03381f3e12ba..a926de2e42b5 100644
4892 +--- a/net/ipv4/Kconfig
4893 ++++ b/net/ipv4/Kconfig
4894 +@@ -303,6 +303,7 @@ config SYN_COOKIES
4895 +
4896 + config NET_IPVTI
4897 + tristate "Virtual (secure) IP: tunneling"
4898 ++ depends on IPV6 || IPV6=n
4899 + select INET_TUNNEL
4900 + select NET_IP_TUNNEL
4901 + select XFRM
4902 +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
4903 +index 71c78d223dfd..48bf3b9be475 100644
4904 +--- a/net/ipv4/fib_frontend.c
4905 ++++ b/net/ipv4/fib_frontend.c
4906 +@@ -1007,7 +1007,9 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
4907 + return -ENOENT;
4908 + }
4909 +
4910 ++ rcu_read_lock();
4911 + err = fib_table_dump(tb, skb, cb, &filter);
4912 ++ rcu_read_unlock();
4913 + return skb->len ? : err;
4914 + }
4915 +
4916 +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
4917 +index 10636fb6093e..85ba1453ba5c 100644
4918 +--- a/net/ipv4/ip_gre.c
4919 ++++ b/net/ipv4/ip_gre.c
4920 +@@ -1149,6 +1149,24 @@ static int ipgre_netlink_parms(struct net_device *dev,
4921 + if (data[IFLA_GRE_FWMARK])
4922 + *fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
4923 +
4924 ++ return 0;
4925 ++}
4926 ++
4927 ++static int erspan_netlink_parms(struct net_device *dev,
4928 ++ struct nlattr *data[],
4929 ++ struct nlattr *tb[],
4930 ++ struct ip_tunnel_parm *parms,
4931 ++ __u32 *fwmark)
4932 ++{
4933 ++ struct ip_tunnel *t = netdev_priv(dev);
4934 ++ int err;
4935 ++
4936 ++ err = ipgre_netlink_parms(dev, data, tb, parms, fwmark);
4937 ++ if (err)
4938 ++ return err;
4939 ++ if (!data)
4940 ++ return 0;
4941 ++
4942 + if (data[IFLA_GRE_ERSPAN_VER]) {
4943 + t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
4944 +
4945 +@@ -1272,45 +1290,70 @@ static void ipgre_tap_setup(struct net_device *dev)
4946 + ip_tunnel_setup(dev, gre_tap_net_id);
4947 + }
4948 +
4949 +-static int ipgre_newlink(struct net *src_net, struct net_device *dev,
4950 +- struct nlattr *tb[], struct nlattr *data[],
4951 +- struct netlink_ext_ack *extack)
4952 ++static int
4953 ++ipgre_newlink_encap_setup(struct net_device *dev, struct nlattr *data[])
4954 + {
4955 +- struct ip_tunnel_parm p;
4956 + struct ip_tunnel_encap ipencap;
4957 +- __u32 fwmark = 0;
4958 +- int err;
4959 +
4960 + if (ipgre_netlink_encap_parms(data, &ipencap)) {
4961 + struct ip_tunnel *t = netdev_priv(dev);
4962 +- err = ip_tunnel_encap_setup(t, &ipencap);
4963 ++ int err = ip_tunnel_encap_setup(t, &ipencap);
4964 +
4965 + if (err < 0)
4966 + return err;
4967 + }
4968 +
4969 ++ return 0;
4970 ++}
4971 ++
4972 ++static int ipgre_newlink(struct net *src_net, struct net_device *dev,
4973 ++ struct nlattr *tb[], struct nlattr *data[],
4974 ++ struct netlink_ext_ack *extack)
4975 ++{
4976 ++ struct ip_tunnel_parm p;
4977 ++ __u32 fwmark = 0;
4978 ++ int err;
4979 ++
4980 ++ err = ipgre_newlink_encap_setup(dev, data);
4981 ++ if (err)
4982 ++ return err;
4983 ++
4984 + err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
4985 + if (err < 0)
4986 + return err;
4987 + return ip_tunnel_newlink(dev, tb, &p, fwmark);
4988 + }
4989 +
4990 ++static int erspan_newlink(struct net *src_net, struct net_device *dev,
4991 ++ struct nlattr *tb[], struct nlattr *data[],
4992 ++ struct netlink_ext_ack *extack)
4993 ++{
4994 ++ struct ip_tunnel_parm p;
4995 ++ __u32 fwmark = 0;
4996 ++ int err;
4997 ++
4998 ++ err = ipgre_newlink_encap_setup(dev, data);
4999 ++ if (err)
5000 ++ return err;
5001 ++
5002 ++ err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
5003 ++ if (err)
5004 ++ return err;
5005 ++ return ip_tunnel_newlink(dev, tb, &p, fwmark);
5006 ++}
5007 ++
5008 + static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
5009 + struct nlattr *data[],
5010 + struct netlink_ext_ack *extack)
5011 + {
5012 + struct ip_tunnel *t = netdev_priv(dev);
5013 +- struct ip_tunnel_encap ipencap;
5014 + __u32 fwmark = t->fwmark;
5015 + struct ip_tunnel_parm p;
5016 + int err;
5017 +
5018 +- if (ipgre_netlink_encap_parms(data, &ipencap)) {
5019 +- err = ip_tunnel_encap_setup(t, &ipencap);
5020 +-
5021 +- if (err < 0)
5022 +- return err;
5023 +- }
5024 ++ err = ipgre_newlink_encap_setup(dev, data);
5025 ++ if (err)
5026 ++ return err;
5027 +
5028 + err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
5029 + if (err < 0)
5030 +@@ -1323,8 +1366,34 @@ static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
5031 + t->parms.i_flags = p.i_flags;
5032 + t->parms.o_flags = p.o_flags;
5033 +
5034 +- if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
5035 +- ipgre_link_update(dev, !tb[IFLA_MTU]);
5036 ++ ipgre_link_update(dev, !tb[IFLA_MTU]);
5037 ++
5038 ++ return 0;
5039 ++}
5040 ++
5041 ++static int erspan_changelink(struct net_device *dev, struct nlattr *tb[],
5042 ++ struct nlattr *data[],
5043 ++ struct netlink_ext_ack *extack)
5044 ++{
5045 ++ struct ip_tunnel *t = netdev_priv(dev);
5046 ++ __u32 fwmark = t->fwmark;
5047 ++ struct ip_tunnel_parm p;
5048 ++ int err;
5049 ++
5050 ++ err = ipgre_newlink_encap_setup(dev, data);
5051 ++ if (err)
5052 ++ return err;
5053 ++
5054 ++ err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
5055 ++ if (err < 0)
5056 ++ return err;
5057 ++
5058 ++ err = ip_tunnel_changelink(dev, tb, &p, fwmark);
5059 ++ if (err < 0)
5060 ++ return err;
5061 ++
5062 ++ t->parms.i_flags = p.i_flags;
5063 ++ t->parms.o_flags = p.o_flags;
5064 +
5065 + return 0;
5066 + }
5067 +@@ -1515,8 +1584,8 @@ static struct rtnl_link_ops erspan_link_ops __read_mostly = {
5068 + .priv_size = sizeof(struct ip_tunnel),
5069 + .setup = erspan_setup,
5070 + .validate = erspan_validate,
5071 +- .newlink = ipgre_newlink,
5072 +- .changelink = ipgre_changelink,
5073 ++ .newlink = erspan_newlink,
5074 ++ .changelink = erspan_changelink,
5075 + .dellink = ip_tunnel_dellink,
5076 + .get_size = ipgre_get_size,
5077 + .fill_info = ipgre_fill_info,
5078 +diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
5079 +index 79eef5db336a..8ecaf0f26973 100644
5080 +--- a/net/ipv4/ip_vti.c
5081 ++++ b/net/ipv4/ip_vti.c
5082 +@@ -187,17 +187,39 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
5083 + int mtu;
5084 +
5085 + if (!dst) {
5086 +- struct rtable *rt;
5087 +-
5088 +- fl->u.ip4.flowi4_oif = dev->ifindex;
5089 +- fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
5090 +- rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4);
5091 +- if (IS_ERR(rt)) {
5092 ++ switch (skb->protocol) {
5093 ++ case htons(ETH_P_IP): {
5094 ++ struct rtable *rt;
5095 ++
5096 ++ fl->u.ip4.flowi4_oif = dev->ifindex;
5097 ++ fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
5098 ++ rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4);
5099 ++ if (IS_ERR(rt)) {
5100 ++ dev->stats.tx_carrier_errors++;
5101 ++ goto tx_error_icmp;
5102 ++ }
5103 ++ dst = &rt->dst;
5104 ++ skb_dst_set(skb, dst);
5105 ++ break;
5106 ++ }
5107 ++#if IS_ENABLED(CONFIG_IPV6)
5108 ++ case htons(ETH_P_IPV6):
5109 ++ fl->u.ip6.flowi6_oif = dev->ifindex;
5110 ++ fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
5111 ++ dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6);
5112 ++ if (dst->error) {
5113 ++ dst_release(dst);
5114 ++ dst = NULL;
5115 ++ dev->stats.tx_carrier_errors++;
5116 ++ goto tx_error_icmp;
5117 ++ }
5118 ++ skb_dst_set(skb, dst);
5119 ++ break;
5120 ++#endif
5121 ++ default:
5122 + dev->stats.tx_carrier_errors++;
5123 + goto tx_error_icmp;
5124 + }
5125 +- dst = &rt->dst;
5126 +- skb_dst_set(skb, dst);
5127 + }
5128 +
5129 + dst_hold(dst);
5130 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
5131 +index deb466fc3d1f..e378ff17f8c6 100644
5132 +--- a/net/ipv4/tcp.c
5133 ++++ b/net/ipv4/tcp.c
5134 +@@ -2943,8 +2943,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
5135 + err = -EPERM;
5136 + else if (tp->repair_queue == TCP_SEND_QUEUE)
5137 + WRITE_ONCE(tp->write_seq, val);
5138 +- else if (tp->repair_queue == TCP_RECV_QUEUE)
5139 ++ else if (tp->repair_queue == TCP_RECV_QUEUE) {
5140 + WRITE_ONCE(tp->rcv_nxt, val);
5141 ++ WRITE_ONCE(tp->copied_seq, val);
5142 ++ }
5143 + else
5144 + err = -EINVAL;
5145 + break;
5146 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
5147 +index 660b24fe041e..c8d03c1b4c6b 100644
5148 +--- a/net/ipv4/tcp_output.c
5149 ++++ b/net/ipv4/tcp_output.c
5150 +@@ -1048,6 +1048,10 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
5151 +
5152 + if (unlikely(!skb))
5153 + return -ENOBUFS;
5154 ++ /* retransmit skbs might have a non zero value in skb->dev
5155 ++ * because skb->dev is aliased with skb->rbnode.rb_left
5156 ++ */
5157 ++ skb->dev = NULL;
5158 + }
5159 +
5160 + inet = inet_sk(sk);
5161 +@@ -2976,8 +2980,12 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
5162 +
5163 + tcp_skb_tsorted_save(skb) {
5164 + nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
5165 +- err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
5166 +- -ENOBUFS;
5167 ++ if (nskb) {
5168 ++ nskb->dev = NULL;
5169 ++ err = tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC);
5170 ++ } else {
5171 ++ err = -ENOBUFS;
5172 ++ }
5173 + } tcp_skb_tsorted_restore(skb);
5174 +
5175 + if (!err) {
5176 +diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
5177 +index 524006aa0d78..cc6180e08a4f 100644
5178 +--- a/net/ipv6/ip6_vti.c
5179 ++++ b/net/ipv6/ip6_vti.c
5180 +@@ -311,7 +311,7 @@ static int vti6_rcv(struct sk_buff *skb)
5181 +
5182 + if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
5183 + rcu_read_unlock();
5184 +- return 0;
5185 ++ goto discard;
5186 + }
5187 +
5188 + ipv6h = ipv6_hdr(skb);
5189 +@@ -450,15 +450,33 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
5190 + int mtu;
5191 +
5192 + if (!dst) {
5193 +- fl->u.ip6.flowi6_oif = dev->ifindex;
5194 +- fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
5195 +- dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6);
5196 +- if (dst->error) {
5197 +- dst_release(dst);
5198 +- dst = NULL;
5199 ++ switch (skb->protocol) {
5200 ++ case htons(ETH_P_IP): {
5201 ++ struct rtable *rt;
5202 ++
5203 ++ fl->u.ip4.flowi4_oif = dev->ifindex;
5204 ++ fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
5205 ++ rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4);
5206 ++ if (IS_ERR(rt))
5207 ++ goto tx_err_link_failure;
5208 ++ dst = &rt->dst;
5209 ++ skb_dst_set(skb, dst);
5210 ++ break;
5211 ++ }
5212 ++ case htons(ETH_P_IPV6):
5213 ++ fl->u.ip6.flowi6_oif = dev->ifindex;
5214 ++ fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
5215 ++ dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6);
5216 ++ if (dst->error) {
5217 ++ dst_release(dst);
5218 ++ dst = NULL;
5219 ++ goto tx_err_link_failure;
5220 ++ }
5221 ++ skb_dst_set(skb, dst);
5222 ++ break;
5223 ++ default:
5224 + goto tx_err_link_failure;
5225 + }
5226 +- skb_dst_set(skb, dst);
5227 + }
5228 +
5229 + dst_hold(dst);
5230 +diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
5231 +index c8ad20c28c43..70ea4cc126d1 100644
5232 +--- a/net/mac80211/debugfs_sta.c
5233 ++++ b/net/mac80211/debugfs_sta.c
5234 +@@ -5,7 +5,7 @@
5235 + * Copyright 2007 Johannes Berg <johannes@××××××××××××.net>
5236 + * Copyright 2013-2014 Intel Mobile Communications GmbH
5237 + * Copyright(c) 2016 Intel Deutschland GmbH
5238 +- * Copyright (C) 2018 - 2019 Intel Corporation
5239 ++ * Copyright (C) 2018 - 2020 Intel Corporation
5240 + */
5241 +
5242 + #include <linux/debugfs.h>
5243 +@@ -78,6 +78,7 @@ static const char * const sta_flag_names[] = {
5244 + FLAG(MPSP_OWNER),
5245 + FLAG(MPSP_RECIPIENT),
5246 + FLAG(PS_DELIVER),
5247 ++ FLAG(USES_ENCRYPTION),
5248 + #undef FLAG
5249 + };
5250 +
5251 +diff --git a/net/mac80211/key.c b/net/mac80211/key.c
5252 +index 0f889b919b06..efc1acc6543c 100644
5253 +--- a/net/mac80211/key.c
5254 ++++ b/net/mac80211/key.c
5255 +@@ -6,7 +6,7 @@
5256 + * Copyright 2007-2008 Johannes Berg <johannes@××××××××××××.net>
5257 + * Copyright 2013-2014 Intel Mobile Communications GmbH
5258 + * Copyright 2015-2017 Intel Deutschland GmbH
5259 +- * Copyright 2018-2019 Intel Corporation
5260 ++ * Copyright 2018-2020 Intel Corporation
5261 + */
5262 +
5263 + #include <linux/if_ether.h>
5264 +@@ -262,22 +262,29 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
5265 + sta ? sta->sta.addr : bcast_addr, ret);
5266 + }
5267 +
5268 +-int ieee80211_set_tx_key(struct ieee80211_key *key)
5269 ++static int _ieee80211_set_tx_key(struct ieee80211_key *key, bool force)
5270 + {
5271 + struct sta_info *sta = key->sta;
5272 + struct ieee80211_local *local = key->local;
5273 +
5274 + assert_key_lock(local);
5275 +
5276 ++ set_sta_flag(sta, WLAN_STA_USES_ENCRYPTION);
5277 ++
5278 + sta->ptk_idx = key->conf.keyidx;
5279 +
5280 +- if (!ieee80211_hw_check(&local->hw, AMPDU_KEYBORDER_SUPPORT))
5281 ++ if (force || !ieee80211_hw_check(&local->hw, AMPDU_KEYBORDER_SUPPORT))
5282 + clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
5283 + ieee80211_check_fast_xmit(sta);
5284 +
5285 + return 0;
5286 + }
5287 +
5288 ++int ieee80211_set_tx_key(struct ieee80211_key *key)
5289 ++{
5290 ++ return _ieee80211_set_tx_key(key, false);
5291 ++}
5292 ++
5293 + static void ieee80211_pairwise_rekey(struct ieee80211_key *old,
5294 + struct ieee80211_key *new)
5295 + {
5296 +@@ -441,11 +448,8 @@ static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
5297 + if (pairwise) {
5298 + rcu_assign_pointer(sta->ptk[idx], new);
5299 + if (new &&
5300 +- !(new->conf.flags & IEEE80211_KEY_FLAG_NO_AUTO_TX)) {
5301 +- sta->ptk_idx = idx;
5302 +- clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
5303 +- ieee80211_check_fast_xmit(sta);
5304 +- }
5305 ++ !(new->conf.flags & IEEE80211_KEY_FLAG_NO_AUTO_TX))
5306 ++ _ieee80211_set_tx_key(new, true);
5307 + } else {
5308 + rcu_assign_pointer(sta->gtk[idx], new);
5309 + }
5310 +diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
5311 +index d69983370381..38a0383dfbcf 100644
5312 +--- a/net/mac80211/mesh_hwmp.c
5313 ++++ b/net/mac80211/mesh_hwmp.c
5314 +@@ -1152,7 +1152,8 @@ int mesh_nexthop_resolve(struct ieee80211_sub_if_data *sdata,
5315 + }
5316 + }
5317 +
5318 +- if (!(mpath->flags & MESH_PATH_RESOLVING))
5319 ++ if (!(mpath->flags & MESH_PATH_RESOLVING) &&
5320 ++ mesh_path_sel_is_hwmp(sdata))
5321 + mesh_queue_preq(mpath, PREQ_Q_F_START);
5322 +
5323 + if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN)
5324 +diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
5325 +index 8d3a2389b055..21b1422b1b1c 100644
5326 +--- a/net/mac80211/sta_info.c
5327 ++++ b/net/mac80211/sta_info.c
5328 +@@ -4,7 +4,7 @@
5329 + * Copyright 2006-2007 Jiri Benc <jbenc@××××.cz>
5330 + * Copyright 2013-2014 Intel Mobile Communications GmbH
5331 + * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
5332 +- * Copyright (C) 2018-2019 Intel Corporation
5333 ++ * Copyright (C) 2018-2020 Intel Corporation
5334 + */
5335 +
5336 + #include <linux/module.h>
5337 +@@ -1032,6 +1032,11 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
5338 + might_sleep();
5339 + lockdep_assert_held(&local->sta_mtx);
5340 +
5341 ++ while (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
5342 ++ ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
5343 ++ WARN_ON_ONCE(ret);
5344 ++ }
5345 ++
5346 + /* now keys can no longer be reached */
5347 + ieee80211_free_sta_keys(local, sta);
5348 +
5349 +diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
5350 +index 369c2dddce52..be1d9dfa760d 100644
5351 +--- a/net/mac80211/sta_info.h
5352 ++++ b/net/mac80211/sta_info.h
5353 +@@ -98,6 +98,7 @@ enum ieee80211_sta_info_flags {
5354 + WLAN_STA_MPSP_OWNER,
5355 + WLAN_STA_MPSP_RECIPIENT,
5356 + WLAN_STA_PS_DELIVER,
5357 ++ WLAN_STA_USES_ENCRYPTION,
5358 +
5359 + NUM_WLAN_STA_FLAGS,
5360 + };
5361 +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
5362 +index cbd273c0b275..c8fc29f0efcf 100644
5363 +--- a/net/mac80211/tx.c
5364 ++++ b/net/mac80211/tx.c
5365 +@@ -5,7 +5,7 @@
5366 + * Copyright 2006-2007 Jiri Benc <jbenc@××××.cz>
5367 + * Copyright 2007 Johannes Berg <johannes@××××××××××××.net>
5368 + * Copyright 2013-2014 Intel Mobile Communications GmbH
5369 +- * Copyright (C) 2018 Intel Corporation
5370 ++ * Copyright (C) 2018, 2020 Intel Corporation
5371 + *
5372 + * Transmit and frame generation functions.
5373 + */
5374 +@@ -590,10 +590,13 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
5375 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
5376 + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
5377 +
5378 +- if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT))
5379 ++ if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) {
5380 + tx->key = NULL;
5381 +- else if (tx->sta &&
5382 +- (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx])))
5383 ++ return TX_CONTINUE;
5384 ++ }
5385 ++
5386 ++ if (tx->sta &&
5387 ++ (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx])))
5388 + tx->key = key;
5389 + else if (ieee80211_is_group_privacy_action(tx->skb) &&
5390 + (key = rcu_dereference(tx->sdata->default_multicast_key)))
5391 +@@ -654,6 +657,9 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
5392 + if (!skip_hw && tx->key &&
5393 + tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
5394 + info->control.hw_key = &tx->key->conf;
5395 ++ } else if (!ieee80211_is_mgmt(hdr->frame_control) && tx->sta &&
5396 ++ test_sta_flag(tx->sta, WLAN_STA_USES_ENCRYPTION)) {
5397 ++ return TX_DROP;
5398 + }
5399 +
5400 + return TX_CONTINUE;
5401 +@@ -5061,6 +5067,7 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
5402 + struct ieee80211_local *local = sdata->local;
5403 + struct sk_buff *skb;
5404 + struct ethhdr *ehdr;
5405 ++ u32 ctrl_flags = 0;
5406 + u32 flags;
5407 +
5408 + /* Only accept CONTROL_PORT_PROTOCOL configured in CONNECT/ASSOCIATE
5409 +@@ -5070,6 +5077,9 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
5410 + proto != cpu_to_be16(ETH_P_PREAUTH))
5411 + return -EINVAL;
5412 +
5413 ++ if (proto == sdata->control_port_protocol)
5414 ++ ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
5415 ++
5416 + if (unencrypted)
5417 + flags = IEEE80211_TX_INTFL_DONT_ENCRYPT;
5418 + else
5419 +@@ -5095,7 +5105,7 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
5420 + skb_reset_mac_header(skb);
5421 +
5422 + local_bh_disable();
5423 +- __ieee80211_subif_start_xmit(skb, skb->dev, flags, 0);
5424 ++ __ieee80211_subif_start_xmit(skb, skb->dev, flags, ctrl_flags);
5425 + local_bh_enable();
5426 +
5427 + return 0;
5428 +diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
5429 +index b9e7dd6e60ce..e92aa6b7eb80 100644
5430 +--- a/net/netfilter/nf_flow_table_ip.c
5431 ++++ b/net/netfilter/nf_flow_table_ip.c
5432 +@@ -189,6 +189,7 @@ static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
5433 + if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
5434 + return -1;
5435 +
5436 ++ iph = ip_hdr(skb);
5437 + ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
5438 +
5439 + tuple->src_v4.s_addr = iph->saddr;
5440 +@@ -449,6 +450,7 @@ static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
5441 + if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
5442 + return -1;
5443 +
5444 ++ ip6h = ipv6_hdr(skb);
5445 + ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
5446 +
5447 + tuple->src_v6 = ip6h->saddr;
5448 +diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c
5449 +index aba11c2333f3..3087e23297db 100644
5450 +--- a/net/netfilter/nft_fwd_netdev.c
5451 ++++ b/net/netfilter/nft_fwd_netdev.c
5452 +@@ -28,6 +28,9 @@ static void nft_fwd_netdev_eval(const struct nft_expr *expr,
5453 + struct nft_fwd_netdev *priv = nft_expr_priv(expr);
5454 + int oif = regs->data[priv->sreg_dev];
5455 +
5456 ++ /* This is used by ifb only. */
5457 ++ skb_set_redirected(pkt->skb, true);
5458 ++
5459 + nf_fwd_netdev_egress(pkt, oif);
5460 + regs->verdict.code = NF_STOLEN;
5461 + }
5462 +@@ -190,6 +193,13 @@ nla_put_failure:
5463 + return -1;
5464 + }
5465 +
5466 ++static int nft_fwd_validate(const struct nft_ctx *ctx,
5467 ++ const struct nft_expr *expr,
5468 ++ const struct nft_data **data)
5469 ++{
5470 ++ return nft_chain_validate_hooks(ctx->chain, (1 << NF_NETDEV_INGRESS));
5471 ++}
5472 ++
5473 + static struct nft_expr_type nft_fwd_netdev_type;
5474 + static const struct nft_expr_ops nft_fwd_neigh_netdev_ops = {
5475 + .type = &nft_fwd_netdev_type,
5476 +@@ -197,6 +207,7 @@ static const struct nft_expr_ops nft_fwd_neigh_netdev_ops = {
5477 + .eval = nft_fwd_neigh_eval,
5478 + .init = nft_fwd_neigh_init,
5479 + .dump = nft_fwd_neigh_dump,
5480 ++ .validate = nft_fwd_validate,
5481 + };
5482 +
5483 + static const struct nft_expr_ops nft_fwd_netdev_ops = {
5484 +@@ -205,6 +216,7 @@ static const struct nft_expr_ops nft_fwd_netdev_ops = {
5485 + .eval = nft_fwd_netdev_eval,
5486 + .init = nft_fwd_netdev_init,
5487 + .dump = nft_fwd_netdev_dump,
5488 ++ .validate = nft_fwd_validate,
5489 + .offload = nft_fwd_netdev_offload,
5490 + };
5491 +
5492 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
5493 +index 20edb7c25e22..1d63ab3a878a 100644
5494 +--- a/net/packet/af_packet.c
5495 ++++ b/net/packet/af_packet.c
5496 +@@ -2172,6 +2172,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
5497 + struct timespec ts;
5498 + __u32 ts_status;
5499 + bool is_drop_n_account = false;
5500 ++ unsigned int slot_id = 0;
5501 + bool do_vnet = false;
5502 +
5503 + /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
5504 +@@ -2274,6 +2275,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
5505 + if (!h.raw)
5506 + goto drop_n_account;
5507 +
5508 ++ if (po->tp_version <= TPACKET_V2) {
5509 ++ slot_id = po->rx_ring.head;
5510 ++ if (test_bit(slot_id, po->rx_ring.rx_owner_map))
5511 ++ goto drop_n_account;
5512 ++ __set_bit(slot_id, po->rx_ring.rx_owner_map);
5513 ++ }
5514 ++
5515 + if (do_vnet &&
5516 + virtio_net_hdr_from_skb(skb, h.raw + macoff -
5517 + sizeof(struct virtio_net_hdr),
5518 +@@ -2379,7 +2387,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
5519 + #endif
5520 +
5521 + if (po->tp_version <= TPACKET_V2) {
5522 ++ spin_lock(&sk->sk_receive_queue.lock);
5523 + __packet_set_status(po, h.raw, status);
5524 ++ __clear_bit(slot_id, po->rx_ring.rx_owner_map);
5525 ++ spin_unlock(&sk->sk_receive_queue.lock);
5526 + sk->sk_data_ready(sk);
5527 + } else {
5528 + prb_clear_blk_fill_status(&po->rx_ring);
5529 +@@ -4276,6 +4287,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
5530 + {
5531 + struct pgv *pg_vec = NULL;
5532 + struct packet_sock *po = pkt_sk(sk);
5533 ++ unsigned long *rx_owner_map = NULL;
5534 + int was_running, order = 0;
5535 + struct packet_ring_buffer *rb;
5536 + struct sk_buff_head *rb_queue;
5537 +@@ -4361,6 +4373,12 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
5538 + }
5539 + break;
5540 + default:
5541 ++ if (!tx_ring) {
5542 ++ rx_owner_map = bitmap_alloc(req->tp_frame_nr,
5543 ++ GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
5544 ++ if (!rx_owner_map)
5545 ++ goto out_free_pg_vec;
5546 ++ }
5547 + break;
5548 + }
5549 + }
5550 +@@ -4390,6 +4408,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
5551 + err = 0;
5552 + spin_lock_bh(&rb_queue->lock);
5553 + swap(rb->pg_vec, pg_vec);
5554 ++ if (po->tp_version <= TPACKET_V2)
5555 ++ swap(rb->rx_owner_map, rx_owner_map);
5556 + rb->frame_max = (req->tp_frame_nr - 1);
5557 + rb->head = 0;
5558 + rb->frame_size = req->tp_frame_size;
5559 +@@ -4421,6 +4441,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
5560 + }
5561 +
5562 + out_free_pg_vec:
5563 ++ bitmap_free(rx_owner_map);
5564 + if (pg_vec)
5565 + free_pg_vec(pg_vec, order, req->tp_block_nr);
5566 + out:
5567 +diff --git a/net/packet/internal.h b/net/packet/internal.h
5568 +index 82fb2b10f790..907f4cd2a718 100644
5569 +--- a/net/packet/internal.h
5570 ++++ b/net/packet/internal.h
5571 +@@ -70,7 +70,10 @@ struct packet_ring_buffer {
5572 +
5573 + unsigned int __percpu *pending_refcnt;
5574 +
5575 +- struct tpacket_kbdq_core prb_bdqc;
5576 ++ union {
5577 ++ unsigned long *rx_owner_map;
5578 ++ struct tpacket_kbdq_core prb_bdqc;
5579 ++ };
5580 + };
5581 +
5582 + extern struct mutex fanout_mutex;
5583 +diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
5584 +index 4a6ca9723a12..a293238fe1e7 100644
5585 +--- a/net/rxrpc/af_rxrpc.c
5586 ++++ b/net/rxrpc/af_rxrpc.c
5587 +@@ -371,44 +371,17 @@ EXPORT_SYMBOL(rxrpc_kernel_end_call);
5588 + * rxrpc_kernel_check_life - Check to see whether a call is still alive
5589 + * @sock: The socket the call is on
5590 + * @call: The call to check
5591 +- * @_life: Where to store the life value
5592 + *
5593 +- * Allow a kernel service to find out whether a call is still alive - ie. we're
5594 +- * getting ACKs from the server. Passes back in *_life a number representing
5595 +- * the life state which can be compared to that returned by a previous call and
5596 +- * return true if the call is still alive.
5597 +- *
5598 +- * If the life state stalls, rxrpc_kernel_probe_life() should be called and
5599 +- * then 2RTT waited.
5600 ++ * Allow a kernel service to find out whether a call is still alive -
5601 ++ * ie. whether it has completed.
5602 + */
5603 + bool rxrpc_kernel_check_life(const struct socket *sock,
5604 +- const struct rxrpc_call *call,
5605 +- u32 *_life)
5606 ++ const struct rxrpc_call *call)
5607 + {
5608 +- *_life = call->acks_latest;
5609 + return call->state != RXRPC_CALL_COMPLETE;
5610 + }
5611 + EXPORT_SYMBOL(rxrpc_kernel_check_life);
5612 +
5613 +-/**
5614 +- * rxrpc_kernel_probe_life - Poke the peer to see if it's still alive
5615 +- * @sock: The socket the call is on
5616 +- * @call: The call to check
5617 +- *
5618 +- * In conjunction with rxrpc_kernel_check_life(), allow a kernel service to
5619 +- * find out whether a call is still alive by pinging it. This should cause the
5620 +- * life state to be bumped in about 2*RTT.
5621 +- *
5622 +- * The must be called in TASK_RUNNING state on pain of might_sleep() objecting.
5623 +- */
5624 +-void rxrpc_kernel_probe_life(struct socket *sock, struct rxrpc_call *call)
5625 +-{
5626 +- rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
5627 +- rxrpc_propose_ack_ping_for_check_life);
5628 +- rxrpc_send_ack_packet(call, true, NULL);
5629 +-}
5630 +-EXPORT_SYMBOL(rxrpc_kernel_probe_life);
5631 +-
5632 + /**
5633 + * rxrpc_kernel_get_epoch - Retrieve the epoch value from a call.
5634 + * @sock: The socket the call is on
5635 +diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
5636 +index 7d730c438404..394d18857979 100644
5637 +--- a/net/rxrpc/ar-internal.h
5638 ++++ b/net/rxrpc/ar-internal.h
5639 +@@ -675,7 +675,6 @@ struct rxrpc_call {
5640 +
5641 + /* transmission-phase ACK management */
5642 + ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
5643 +- rxrpc_serial_t acks_latest; /* serial number of latest ACK received */
5644 + rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */
5645 + rxrpc_seq_t acks_lost_top; /* tx_top at the time lost-ack ping sent */
5646 + rxrpc_serial_t acks_lost_ping; /* Serial number of probe ACK */
5647 +diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
5648 +index ef10fbf71b15..69e09d69c896 100644
5649 +--- a/net/rxrpc/input.c
5650 ++++ b/net/rxrpc/input.c
5651 +@@ -882,7 +882,6 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
5652 + before(prev_pkt, call->ackr_prev_seq))
5653 + goto out;
5654 + call->acks_latest_ts = skb->tstamp;
5655 +- call->acks_latest = sp->hdr.serial;
5656 +
5657 + call->ackr_first_seq = first_soft_ack;
5658 + call->ackr_prev_seq = prev_pkt;
5659 +diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
5660 +index f3232a00970f..0586546c20d7 100644
5661 +--- a/net/sched/act_ct.c
5662 ++++ b/net/sched/act_ct.c
5663 +@@ -739,7 +739,7 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
5664 + if (goto_ch)
5665 + tcf_chain_put_by_act(goto_ch);
5666 + if (params)
5667 +- kfree_rcu(params, rcu);
5668 ++ call_rcu(&params->rcu, tcf_ct_params_free);
5669 + if (res == ACT_P_CREATED)
5670 + tcf_idr_insert(tn, *a);
5671 +
5672 +diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
5673 +index f0df0d90b8bd..27f624971121 100644
5674 +--- a/net/sched/act_mirred.c
5675 ++++ b/net/sched/act_mirred.c
5676 +@@ -284,10 +284,8 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
5677 +
5678 + /* mirror is always swallowed */
5679 + if (is_redirect) {
5680 +- skb2->tc_redirected = 1;
5681 +- skb2->tc_from_ingress = skb2->tc_at_ingress;
5682 +- if (skb2->tc_from_ingress)
5683 +- skb2->tstamp = 0;
5684 ++ skb_set_redirected(skb2, skb2->tc_at_ingress);
5685 ++
5686 + /* let's the caller reinsert the packet, if possible */
5687 + if (use_reinsert) {
5688 + res->ingress = want_ingress;
5689 +diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
5690 +index 6f8786b06bde..5efa3e7ace15 100644
5691 +--- a/net/sched/cls_route.c
5692 ++++ b/net/sched/cls_route.c
5693 +@@ -534,8 +534,8 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
5694 + fp = &b->ht[h];
5695 + for (pfp = rtnl_dereference(*fp); pfp;
5696 + fp = &pfp->next, pfp = rtnl_dereference(*fp)) {
5697 +- if (pfp == f) {
5698 +- *fp = f->next;
5699 ++ if (pfp == fold) {
5700 ++ rcu_assign_pointer(*fp, fold->next);
5701 + break;
5702 + }
5703 + }
5704 +diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
5705 +index 09b7dc5fe7e0..9904299424a1 100644
5706 +--- a/net/sched/cls_tcindex.c
5707 ++++ b/net/sched/cls_tcindex.c
5708 +@@ -261,8 +261,10 @@ static void tcindex_partial_destroy_work(struct work_struct *work)
5709 + struct tcindex_data,
5710 + rwork);
5711 +
5712 ++ rtnl_lock();
5713 + kfree(p->perfect);
5714 + kfree(p);
5715 ++ rtnl_unlock();
5716 + }
5717 +
5718 + static void tcindex_free_perfect_hash(struct tcindex_data *cp)
5719 +@@ -357,6 +359,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
5720 +
5721 + if (tcindex_alloc_perfect_hash(net, cp) < 0)
5722 + goto errout;
5723 ++ cp->alloc_hash = cp->hash;
5724 + for (i = 0; i < min(cp->hash, p->hash); i++)
5725 + cp->perfect[i].res = p->perfect[i].res;
5726 + balloc = 1;
5727 +diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
5728 +index b2905b03a432..2eaac2ff380f 100644
5729 +--- a/net/sched/sch_cbs.c
5730 ++++ b/net/sched/sch_cbs.c
5731 +@@ -181,6 +181,11 @@ static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch)
5732 + s64 credits;
5733 + int len;
5734 +
5735 ++ /* The previous packet is still being sent */
5736 ++ if (now < q->last) {
5737 ++ qdisc_watchdog_schedule_ns(&q->watchdog, q->last);
5738 ++ return NULL;
5739 ++ }
5740 + if (q->credits < 0) {
5741 + credits = timediff_to_credits(now - q->last, q->idleslope);
5742 +
5743 +@@ -212,7 +217,12 @@ static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch)
5744 + credits += q->credits;
5745 +
5746 + q->credits = max_t(s64, credits, q->locredit);
5747 +- q->last = now;
5748 ++ /* Estimate of the transmission of the last byte of the packet in ns */
5749 ++ if (unlikely(atomic64_read(&q->port_rate) == 0))
5750 ++ q->last = now;
5751 ++ else
5752 ++ q->last = now + div64_s64(len * NSEC_PER_SEC,
5753 ++ atomic64_read(&q->port_rate));
5754 +
5755 + return skb;
5756 + }
5757 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
5758 +index 321c132747ce..dbb6a14968ef 100644
5759 +--- a/net/wireless/nl80211.c
5760 ++++ b/net/wireless/nl80211.c
5761 +@@ -16407,7 +16407,7 @@ void cfg80211_sta_opmode_change_notify(struct net_device *dev, const u8 *mac,
5762 + goto nla_put_failure;
5763 +
5764 + if ((sta_opmode->changed & STA_OPMODE_MAX_BW_CHANGED) &&
5765 +- nla_put_u8(msg, NL80211_ATTR_CHANNEL_WIDTH, sta_opmode->bw))
5766 ++ nla_put_u32(msg, NL80211_ATTR_CHANNEL_WIDTH, sta_opmode->bw))
5767 + goto nla_put_failure;
5768 +
5769 + if ((sta_opmode->changed & STA_OPMODE_N_SS_CHANGED) &&
5770 +diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
5771 +index 189ef15acbbc..64486ad81341 100644
5772 +--- a/net/xfrm/xfrm_device.c
5773 ++++ b/net/xfrm/xfrm_device.c
5774 +@@ -390,6 +390,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void
5775 + return xfrm_dev_feat_change(dev);
5776 +
5777 + case NETDEV_DOWN:
5778 ++ case NETDEV_UNREGISTER:
5779 + return xfrm_dev_down(dev);
5780 + }
5781 + return NOTIFY_DONE;
5782 +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
5783 +index f2d1e573ea55..264cf05a4eaa 100644
5784 +--- a/net/xfrm/xfrm_policy.c
5785 ++++ b/net/xfrm/xfrm_policy.c
5786 +@@ -431,7 +431,9 @@ EXPORT_SYMBOL(xfrm_policy_destroy);
5787 +
5788 + static void xfrm_policy_kill(struct xfrm_policy *policy)
5789 + {
5790 ++ write_lock_bh(&policy->lock);
5791 + policy->walk.dead = 1;
5792 ++ write_unlock_bh(&policy->lock);
5793 +
5794 + atomic_inc(&policy->genid);
5795 +
5796 +diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
5797 +index b88ba45ff1ac..e6cfaa680ef3 100644
5798 +--- a/net/xfrm/xfrm_user.c
5799 ++++ b/net/xfrm/xfrm_user.c
5800 +@@ -110,7 +110,8 @@ static inline int verify_sec_ctx_len(struct nlattr **attrs)
5801 + return 0;
5802 +
5803 + uctx = nla_data(rt);
5804 +- if (uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len))
5805 ++ if (uctx->len > nla_len(rt) ||
5806 ++ uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len))
5807 + return -EINVAL;
5808 +
5809 + return 0;
5810 +@@ -2273,6 +2274,9 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
5811 + xfrm_mark_get(attrs, &mark);
5812 +
5813 + err = verify_newpolicy_info(&ua->policy);
5814 ++ if (err)
5815 ++ goto free_state;
5816 ++ err = verify_sec_ctx_len(attrs);
5817 + if (err)
5818 + goto free_state;
5819 +
5820 +diff --git a/scripts/dtc/dtc-lexer.l b/scripts/dtc/dtc-lexer.l
5821 +index 5c6c3fd557d7..b3b7270300de 100644
5822 +--- a/scripts/dtc/dtc-lexer.l
5823 ++++ b/scripts/dtc/dtc-lexer.l
5824 +@@ -23,7 +23,6 @@ LINECOMMENT "//".*\n
5825 + #include "srcpos.h"
5826 + #include "dtc-parser.tab.h"
5827 +
5828 +-YYLTYPE yylloc;
5829 + extern bool treesource_error;
5830 +
5831 + /* CAUTION: this will stop working if we ever use yyless() or yyunput() */
5832 +diff --git a/tools/perf/Makefile b/tools/perf/Makefile
5833 +index 7902a5681fc8..b8fc7d972be9 100644
5834 +--- a/tools/perf/Makefile
5835 ++++ b/tools/perf/Makefile
5836 +@@ -35,7 +35,7 @@ endif
5837 + # Only pass canonical directory names as the output directory:
5838 + #
5839 + ifneq ($(O),)
5840 +- FULL_O := $(shell readlink -f $(O) || echo $(O))
5841 ++ FULL_O := $(shell cd $(PWD); readlink -f $(O) || echo $(O))
5842 + endif
5843 +
5844 + #
5845 +diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
5846 +index b659466ea498..bf50f464234f 100644
5847 +--- a/tools/perf/util/probe-file.c
5848 ++++ b/tools/perf/util/probe-file.c
5849 +@@ -206,6 +206,9 @@ static struct strlist *__probe_file__get_namelist(int fd, bool include_group)
5850 + } else
5851 + ret = strlist__add(sl, tev.event);
5852 + clear_probe_trace_event(&tev);
5853 ++ /* Skip if there is same name multi-probe event in the list */
5854 ++ if (ret == -EEXIST)
5855 ++ ret = 0;
5856 + if (ret < 0)
5857 + break;
5858 + }
5859 +diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
5860 +index 9ecea45da4ca..aaf3b24fffa4 100644
5861 +--- a/tools/perf/util/probe-finder.c
5862 ++++ b/tools/perf/util/probe-finder.c
5863 +@@ -615,14 +615,19 @@ static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod,
5864 + return -EINVAL;
5865 + }
5866 +
5867 +- /* Try to get actual symbol name from symtab */
5868 +- symbol = dwfl_module_addrsym(mod, paddr, &sym, NULL);
5869 ++ if (dwarf_entrypc(sp_die, &eaddr) == 0) {
5870 ++ /* If the DIE has entrypc, use it. */
5871 ++ symbol = dwarf_diename(sp_die);
5872 ++ } else {
5873 ++ /* Try to get actual symbol name and address from symtab */
5874 ++ symbol = dwfl_module_addrsym(mod, paddr, &sym, NULL);
5875 ++ eaddr = sym.st_value;
5876 ++ }
5877 + if (!symbol) {
5878 + pr_warning("Failed to find symbol at 0x%lx\n",
5879 + (unsigned long)paddr);
5880 + return -ENOENT;
5881 + }
5882 +- eaddr = sym.st_value;
5883 +
5884 + tp->offset = (unsigned long)(paddr - eaddr);
5885 + tp->address = (unsigned long)paddr;
5886 +diff --git a/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c b/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
5887 +index 3f893b99b337..555cb338a71a 100644
5888 +--- a/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
5889 ++++ b/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
5890 +@@ -82,7 +82,7 @@ static struct pci_access *pci_acc;
5891 + static struct pci_dev *amd_fam14h_pci_dev;
5892 + static int nbp1_entered;
5893 +
5894 +-struct timespec start_time;
5895 ++static struct timespec start_time;
5896 + static unsigned long long timediff;
5897 +
5898 + #ifdef DEBUG
5899 +diff --git a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
5900 +index f634aeb65c5f..7fb4f7a291ad 100644
5901 +--- a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
5902 ++++ b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
5903 +@@ -19,7 +19,7 @@ struct cpuidle_monitor cpuidle_sysfs_monitor;
5904 +
5905 + static unsigned long long **previous_count;
5906 + static unsigned long long **current_count;
5907 +-struct timespec start_time;
5908 ++static struct timespec start_time;
5909 + static unsigned long long timediff;
5910 +
5911 + static int cpuidle_get_count_percent(unsigned int id, double *percent,
5912 +diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
5913 +index d3c3e6e7aa26..3d54fd433626 100644
5914 +--- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
5915 ++++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
5916 +@@ -27,6 +27,8 @@ struct cpuidle_monitor *all_monitors[] = {
5917 + 0
5918 + };
5919 +
5920 ++int cpu_count;
5921 ++
5922 + static struct cpuidle_monitor *monitors[MONITORS_MAX];
5923 + static unsigned int avail_monitors;
5924 +
5925 +diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
5926 +index a2d901d3bfaf..eafef38f1982 100644
5927 +--- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
5928 ++++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
5929 +@@ -25,7 +25,7 @@
5930 + #endif
5931 + #define CSTATE_DESC_LEN 60
5932 +
5933 +-int cpu_count;
5934 ++extern int cpu_count;
5935 +
5936 + /* Hard to define the right names ...: */
5937 + enum power_range_e {
5938 +diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
5939 +index ded7a950dc40..6d2f3a1b2249 100644
5940 +--- a/tools/scripts/Makefile.include
5941 ++++ b/tools/scripts/Makefile.include
5942 +@@ -1,8 +1,8 @@
5943 + # SPDX-License-Identifier: GPL-2.0
5944 + ifneq ($(O),)
5945 + ifeq ($(origin O), command line)
5946 +- dummy := $(if $(shell test -d $(O) || echo $(O)),$(error O=$(O) does not exist),)
5947 +- ABSOLUTE_O := $(shell cd $(O) ; pwd)
5948 ++ dummy := $(if $(shell cd $(PWD); test -d $(O) || echo $(O)),$(error O=$(O) does not exist),)
5949 ++ ABSOLUTE_O := $(shell cd $(PWD); cd $(O) ; pwd)
5950 + OUTPUT := $(ABSOLUTE_O)/$(if $(subdir),$(subdir)/)
5951 + COMMAND_O := O=$(ABSOLUTE_O)
5952 + ifeq ($(objtree),)