Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Wed, 30 Jun 2021 14:28:20
Message-Id: 1625063236.934c84f507e96852be315f499fd1910871780bbb.mpagano@gentoo
1 commit: 934c84f507e96852be315f499fd1910871780bbb
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Jun 30 14:27:16 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Jun 30 14:27:16 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=934c84f5
7
8 Linux patch 4.9.274
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1273_linux-4.9.274.patch | 2065 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2069 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 84582f1..5d3fb76 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -1135,6 +1135,10 @@ Patch: 1272_linux-4.9.273.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.9.273
23
24 +Patch: 1273_linux-4.9.274.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.9.274
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1273_linux-4.9.274.patch b/1273_linux-4.9.274.patch
33 new file mode 100644
34 index 0000000..e76267d
35 --- /dev/null
36 +++ b/1273_linux-4.9.274.patch
37 @@ -0,0 +1,2065 @@
38 +diff --git a/Makefile b/Makefile
39 +index e43823c3337f3..3002dfee32314 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,6 +1,6 @@
43 + VERSION = 4
44 + PATCHLEVEL = 9
45 +-SUBLEVEL = 273
46 ++SUBLEVEL = 274
47 + EXTRAVERSION =
48 + NAME = Roaring Lionus
49 +
50 +@@ -718,12 +718,11 @@ KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
51 + # See modpost pattern 2
52 + KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
53 + KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
54 +-else
55 ++endif
56 +
57 + # These warnings generated too much noise in a regular build.
58 + # Use make W=1 to enable them (see scripts/Makefile.extrawarn)
59 + KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
60 +-endif
61 +
62 + KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
63 + ifdef CONFIG_FRAME_POINTER
64 +diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
65 +index 4764742db7b05..627889ea89eff 100644
66 +--- a/arch/arm/kernel/setup.c
67 ++++ b/arch/arm/kernel/setup.c
68 +@@ -547,9 +547,11 @@ void notrace cpu_init(void)
69 + * In Thumb-2, msr with an immediate value is not allowed.
70 + */
71 + #ifdef CONFIG_THUMB2_KERNEL
72 +-#define PLC "r"
73 ++#define PLC_l "l"
74 ++#define PLC_r "r"
75 + #else
76 +-#define PLC "I"
77 ++#define PLC_l "I"
78 ++#define PLC_r "I"
79 + #endif
80 +
81 + /*
82 +@@ -571,15 +573,15 @@ void notrace cpu_init(void)
83 + "msr cpsr_c, %9"
84 + :
85 + : "r" (stk),
86 +- PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
87 ++ PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
88 + "I" (offsetof(struct stack, irq[0])),
89 +- PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
90 ++ PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
91 + "I" (offsetof(struct stack, abt[0])),
92 +- PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
93 ++ PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE),
94 + "I" (offsetof(struct stack, und[0])),
95 +- PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
96 ++ PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
97 + "I" (offsetof(struct stack, fiq[0])),
98 +- PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
99 ++ PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
100 + : "r14");
101 + #endif
102 + }
103 +diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
104 +index 6b6fda65fb3b5..5eeecf83c9e6b 100644
105 +--- a/arch/arm/mach-omap2/board-n8x0.c
106 ++++ b/arch/arm/mach-omap2/board-n8x0.c
107 +@@ -327,6 +327,7 @@ static int n8x0_mmc_get_cover_state(struct device *dev, int slot)
108 +
109 + static void n8x0_mmc_callback(void *data, u8 card_mask)
110 + {
111 ++#ifdef CONFIG_MMC_OMAP
112 + int bit, *openp, index;
113 +
114 + if (board_is_n800()) {
115 +@@ -344,7 +345,6 @@ static void n8x0_mmc_callback(void *data, u8 card_mask)
116 + else
117 + *openp = 0;
118 +
119 +-#ifdef CONFIG_MMC_OMAP
120 + omap_mmc_notify_cover_event(mmc_device, index, *openp);
121 + #else
122 + pr_warn("MMC: notify cover event not available\n");
123 +diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
124 +index 0770d6d1c37ff..7f95d6ac20110 100644
125 +--- a/arch/arm64/kernel/perf_event.c
126 ++++ b/arch/arm64/kernel/perf_event.c
127 +@@ -748,6 +748,28 @@ static void armv8pmu_disable_event(struct perf_event *event)
128 + raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
129 + }
130 +
131 ++static void armv8pmu_start(struct arm_pmu *cpu_pmu)
132 ++{
133 ++ unsigned long flags;
134 ++ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
135 ++
136 ++ raw_spin_lock_irqsave(&events->pmu_lock, flags);
137 ++ /* Enable all counters */
138 ++ armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
139 ++ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
140 ++}
141 ++
142 ++static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
143 ++{
144 ++ unsigned long flags;
145 ++ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
146 ++
147 ++ raw_spin_lock_irqsave(&events->pmu_lock, flags);
148 ++ /* Disable all counters */
149 ++ armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
150 ++ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
151 ++}
152 ++
153 + static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
154 + {
155 + u32 pmovsr;
156 +@@ -773,6 +795,11 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
157 + */
158 + regs = get_irq_regs();
159 +
160 ++ /*
161 ++ * Stop the PMU while processing the counter overflows
162 ++ * to prevent skews in group events.
163 ++ */
164 ++ armv8pmu_stop(cpu_pmu);
165 + for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
166 + struct perf_event *event = cpuc->events[idx];
167 + struct hw_perf_event *hwc;
168 +@@ -797,6 +824,7 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
169 + if (perf_event_overflow(event, &data, regs))
170 + cpu_pmu->disable(event);
171 + }
172 ++ armv8pmu_start(cpu_pmu);
173 +
174 + /*
175 + * Handle the pending perf events.
176 +@@ -810,28 +838,6 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
177 + return IRQ_HANDLED;
178 + }
179 +
180 +-static void armv8pmu_start(struct arm_pmu *cpu_pmu)
181 +-{
182 +- unsigned long flags;
183 +- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
184 +-
185 +- raw_spin_lock_irqsave(&events->pmu_lock, flags);
186 +- /* Enable all counters */
187 +- armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
188 +- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
189 +-}
190 +-
191 +-static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
192 +-{
193 +- unsigned long flags;
194 +- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
195 +-
196 +- raw_spin_lock_irqsave(&events->pmu_lock, flags);
197 +- /* Disable all counters */
198 +- armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
199 +- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
200 +-}
201 +-
202 + static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
203 + struct perf_event *event)
204 + {
205 +diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
206 +index 769831d9fd114..07b0ebd495769 100644
207 +--- a/arch/x86/kernel/fpu/signal.c
208 ++++ b/arch/x86/kernel/fpu/signal.c
209 +@@ -276,15 +276,23 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
210 + return 0;
211 + }
212 +
213 +- if (!access_ok(VERIFY_READ, buf, size))
214 ++ if (!access_ok(VERIFY_READ, buf, size)) {
215 ++ fpu__clear(fpu);
216 + return -EACCES;
217 ++ }
218 +
219 + fpu__activate_curr(fpu);
220 +
221 +- if (!static_cpu_has(X86_FEATURE_FPU))
222 +- return fpregs_soft_set(current, NULL,
223 +- 0, sizeof(struct user_i387_ia32_struct),
224 +- NULL, buf) != 0;
225 ++ if (!static_cpu_has(X86_FEATURE_FPU)) {
226 ++ int ret = fpregs_soft_set(current, NULL, 0,
227 ++ sizeof(struct user_i387_ia32_struct),
228 ++ NULL, buf);
229 ++
230 ++ if (ret)
231 ++ fpu__clear(fpu);
232 ++
233 ++ return ret != 0;
234 ++ }
235 +
236 + if (use_xsave()) {
237 + struct _fpx_sw_bytes fx_sw_user;
238 +diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
239 +index f5a9bb1231882..d8997dafb876c 100644
240 +--- a/drivers/dma/pl330.c
241 ++++ b/drivers/dma/pl330.c
242 +@@ -2579,13 +2579,15 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
243 + for (i = 0; i < len / period_len; i++) {
244 + desc = pl330_get_desc(pch);
245 + if (!desc) {
246 ++ unsigned long iflags;
247 ++
248 + dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
249 + __func__, __LINE__);
250 +
251 + if (!first)
252 + return NULL;
253 +
254 +- spin_lock_irqsave(&pl330->pool_lock, flags);
255 ++ spin_lock_irqsave(&pl330->pool_lock, iflags);
256 +
257 + while (!list_empty(&first->node)) {
258 + desc = list_entry(first->node.next,
259 +@@ -2595,7 +2597,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
260 +
261 + list_move_tail(&first->node, &pl330->desc_pool);
262 +
263 +- spin_unlock_irqrestore(&pl330->pool_lock, flags);
264 ++ spin_unlock_irqrestore(&pl330->pool_lock, iflags);
265 +
266 + return NULL;
267 + }
268 +diff --git a/drivers/dma/qcom/Kconfig b/drivers/dma/qcom/Kconfig
269 +index a7761c4025f41..a97c7123d913c 100644
270 +--- a/drivers/dma/qcom/Kconfig
271 ++++ b/drivers/dma/qcom/Kconfig
272 +@@ -9,6 +9,7 @@ config QCOM_BAM_DMA
273 +
274 + config QCOM_HIDMA_MGMT
275 + tristate "Qualcomm Technologies HIDMA Management support"
276 ++ depends on HAS_IOMEM
277 + select DMA_ENGINE
278 + help
279 + Enable support for the Qualcomm Technologies HIDMA Management.
280 +diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
281 +index 68b41daab3a8f..bf7105814ee72 100644
282 +--- a/drivers/dma/ste_dma40.c
283 ++++ b/drivers/dma/ste_dma40.c
284 +@@ -3674,6 +3674,9 @@ static int __init d40_probe(struct platform_device *pdev)
285 +
286 + kfree(base->lcla_pool.base_unaligned);
287 +
288 ++ if (base->lcpa_base)
289 ++ iounmap(base->lcpa_base);
290 ++
291 + if (base->phy_lcpa)
292 + release_mem_region(base->phy_lcpa,
293 + base->lcpa_size);
294 +diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
295 +index 16239b07ce45d..2610919eb709d 100644
296 +--- a/drivers/gpu/drm/radeon/radeon_uvd.c
297 ++++ b/drivers/gpu/drm/radeon/radeon_uvd.c
298 +@@ -286,7 +286,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
299 + if (rdev->uvd.vcpu_bo == NULL)
300 + return -EINVAL;
301 +
302 +- memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
303 ++ memcpy_toio((void __iomem *)rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
304 +
305 + size = radeon_bo_size(rdev->uvd.vcpu_bo);
306 + size -= rdev->uvd_fw->size;
307 +@@ -294,7 +294,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
308 + ptr = rdev->uvd.cpu_addr;
309 + ptr += rdev->uvd_fw->size;
310 +
311 +- memset(ptr, 0, size);
312 ++ memset_io((void __iomem *)ptr, 0, size);
313 +
314 + return 0;
315 + }
316 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
317 +index 40b36e59a8676..a056850328ef4 100644
318 +--- a/drivers/hid/hid-core.c
319 ++++ b/drivers/hid/hid-core.c
320 +@@ -1804,6 +1804,9 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
321 + case BUS_I2C:
322 + bus = "I2C";
323 + break;
324 ++ case BUS_VIRTUAL:
325 ++ bus = "VIRTUAL";
326 ++ break;
327 + default:
328 + bus = "<UNKNOWN>";
329 + }
330 +diff --git a/drivers/hid/hid-gt683r.c b/drivers/hid/hid-gt683r.c
331 +index a298fbd8db6b9..8ca4c1baeda89 100644
332 +--- a/drivers/hid/hid-gt683r.c
333 ++++ b/drivers/hid/hid-gt683r.c
334 +@@ -64,6 +64,7 @@ static const struct hid_device_id gt683r_led_id[] = {
335 + { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
336 + { }
337 + };
338 ++MODULE_DEVICE_TABLE(hid, gt683r_led_id);
339 +
340 + static void gt683r_brightness_set(struct led_classdev *led_cdev,
341 + enum led_brightness brightness)
342 +diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
343 +index 7001f07ca3996..4ea18f07c65b8 100644
344 +--- a/drivers/hid/hid-sensor-hub.c
345 ++++ b/drivers/hid/hid-sensor-hub.c
346 +@@ -223,16 +223,21 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
347 + buffer_size = buffer_size / sizeof(__s32);
348 + if (buffer_size) {
349 + for (i = 0; i < buffer_size; ++i) {
350 +- hid_set_field(report->field[field_index], i,
351 +- (__force __s32)cpu_to_le32(*buf32));
352 ++ ret = hid_set_field(report->field[field_index], i,
353 ++ (__force __s32)cpu_to_le32(*buf32));
354 ++ if (ret)
355 ++ goto done_proc;
356 ++
357 + ++buf32;
358 + }
359 + }
360 + if (remaining_bytes) {
361 + value = 0;
362 + memcpy(&value, (u8 *)buf32, remaining_bytes);
363 +- hid_set_field(report->field[field_index], i,
364 +- (__force __s32)cpu_to_le32(value));
365 ++ ret = hid_set_field(report->field[field_index], i,
366 ++ (__force __s32)cpu_to_le32(value));
367 ++ if (ret)
368 ++ goto done_proc;
369 + }
370 + hid_hw_request(hsdev->hdev, report, HID_REQ_SET_REPORT);
371 + hid_hw_wait(hsdev->hdev);
372 +diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
373 +index 7838343eb37c5..b6600329a272d 100644
374 +--- a/drivers/hid/usbhid/hid-core.c
375 ++++ b/drivers/hid/usbhid/hid-core.c
376 +@@ -372,7 +372,7 @@ static int hid_submit_ctrl(struct hid_device *hid)
377 + raw_report = usbhid->ctrl[usbhid->ctrltail].raw_report;
378 + dir = usbhid->ctrl[usbhid->ctrltail].dir;
379 +
380 +- len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
381 ++ len = hid_report_len(report);
382 + if (dir == USB_DIR_OUT) {
383 + usbhid->urbctrl->pipe = usb_sndctrlpipe(hid_to_usb_dev(hid), 0);
384 + usbhid->urbctrl->transfer_buffer_length = len;
385 +diff --git a/drivers/i2c/busses/i2c-robotfuzz-osif.c b/drivers/i2c/busses/i2c-robotfuzz-osif.c
386 +index 89d8b41b66680..032e8535e8604 100644
387 +--- a/drivers/i2c/busses/i2c-robotfuzz-osif.c
388 ++++ b/drivers/i2c/busses/i2c-robotfuzz-osif.c
389 +@@ -89,7 +89,7 @@ static int osif_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
390 + }
391 + }
392 +
393 +- ret = osif_usb_read(adapter, OSIFI2C_STOP, 0, 0, NULL, 0);
394 ++ ret = osif_usb_write(adapter, OSIFI2C_STOP, 0, 0, NULL, 0);
395 + if (ret) {
396 + dev_err(&adapter->dev, "failure sending STOP\n");
397 + return -EREMOTEIO;
398 +@@ -159,7 +159,7 @@ static int osif_probe(struct usb_interface *interface,
399 + * Set bus frequency. The frequency is:
400 + * 120,000,000 / ( 16 + 2 * div * 4^prescale).
401 + * Using dev = 52, prescale = 0 give 100KHz */
402 +- ret = osif_usb_read(&priv->adapter, OSIFI2C_SET_BIT_RATE, 52, 0,
403 ++ ret = osif_usb_write(&priv->adapter, OSIFI2C_SET_BIT_RATE, 52, 0,
404 + NULL, 0);
405 + if (ret) {
406 + dev_err(&interface->dev, "failure sending bit rate");
407 +diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
408 +index 32834dad0b836..1243c2e5a86a2 100644
409 +--- a/drivers/net/caif/caif_serial.c
410 ++++ b/drivers/net/caif/caif_serial.c
411 +@@ -362,6 +362,7 @@ static int ldisc_open(struct tty_struct *tty)
412 + rtnl_lock();
413 + result = register_netdevice(dev);
414 + if (result) {
415 ++ tty_kref_put(tty);
416 + rtnl_unlock();
417 + free_netdev(dev);
418 + return -ENODEV;
419 +diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
420 +index 9de0f9f5b11ca..59af298f99e06 100644
421 +--- a/drivers/net/ethernet/atheros/alx/main.c
422 ++++ b/drivers/net/ethernet/atheros/alx/main.c
423 +@@ -1653,6 +1653,7 @@ out_free_netdev:
424 + free_netdev(netdev);
425 + out_pci_release:
426 + pci_release_mem_regions(pdev);
427 ++ pci_disable_pcie_error_reporting(pdev);
428 + out_pci_disable:
429 + pci_disable_device(pdev);
430 + return err;
431 +diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
432 +index f7b42483921c5..0ade0c6d81ee3 100644
433 +--- a/drivers/net/ethernet/ec_bhf.c
434 ++++ b/drivers/net/ethernet/ec_bhf.c
435 +@@ -589,10 +589,12 @@ static void ec_bhf_remove(struct pci_dev *dev)
436 + struct ec_bhf_priv *priv = netdev_priv(net_dev);
437 +
438 + unregister_netdev(net_dev);
439 +- free_netdev(net_dev);
440 +
441 + pci_iounmap(dev, priv->dma_io);
442 + pci_iounmap(dev, priv->io);
443 ++
444 ++ free_netdev(net_dev);
445 ++
446 + pci_release_regions(dev);
447 + pci_clear_master(dev);
448 + pci_disable_device(dev);
449 +diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
450 +index 289560b0f6433..b0b9f77c37406 100644
451 +--- a/drivers/net/ethernet/emulex/benet/be_main.c
452 ++++ b/drivers/net/ethernet/emulex/benet/be_main.c
453 +@@ -5998,6 +5998,7 @@ drv_cleanup:
454 + unmap_bars:
455 + be_unmap_pci_bars(adapter);
456 + free_netdev:
457 ++ pci_disable_pcie_error_reporting(pdev);
458 + free_netdev(netdev);
459 + rel_reg:
460 + pci_release_regions(pdev);
461 +diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
462 +index 1231816125955..031d4b3a544c0 100644
463 +--- a/drivers/net/ethernet/freescale/fec_ptp.c
464 ++++ b/drivers/net/ethernet/freescale/fec_ptp.c
465 +@@ -586,6 +586,10 @@ void fec_ptp_init(struct platform_device *pdev)
466 + fep->ptp_caps.enable = fec_ptp_enable;
467 +
468 + fep->cycle_speed = clk_get_rate(fep->clk_ptp);
469 ++ if (!fep->cycle_speed) {
470 ++ fep->cycle_speed = NSEC_PER_SEC;
471 ++ dev_err(&fep->pdev->dev, "clk_ptp clock rate is zero\n");
472 ++ }
473 + fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
474 +
475 + spin_lock_init(&fep->tmreg_lock);
476 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
477 +index 40644657b1b74..0b1ee353f4150 100644
478 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
479 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
480 +@@ -9059,10 +9059,6 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
481 + 0, 0, nlflags, filter_mask, NULL);
482 + }
483 +
484 +-/* Hardware supports L4 tunnel length of 128B (=2^7) which includes
485 +- * inner mac plus all inner ethertypes.
486 +- */
487 +-#define I40E_MAX_TUNNEL_HDR_LEN 128
488 + /**
489 + * i40e_features_check - Validate encapsulated packet conforms to limits
490 + * @skb: skb buff
491 +@@ -9073,12 +9069,52 @@ static netdev_features_t i40e_features_check(struct sk_buff *skb,
492 + struct net_device *dev,
493 + netdev_features_t features)
494 + {
495 +- if (skb->encapsulation &&
496 +- ((skb_inner_network_header(skb) - skb_transport_header(skb)) >
497 +- I40E_MAX_TUNNEL_HDR_LEN))
498 +- return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
499 ++ size_t len;
500 ++
501 ++ /* No point in doing any of this if neither checksum nor GSO are
502 ++ * being requested for this frame. We can rule out both by just
503 ++ * checking for CHECKSUM_PARTIAL
504 ++ */
505 ++ if (skb->ip_summed != CHECKSUM_PARTIAL)
506 ++ return features;
507 ++
508 ++ /* We cannot support GSO if the MSS is going to be less than
509 ++ * 64 bytes. If it is then we need to drop support for GSO.
510 ++ */
511 ++ if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
512 ++ features &= ~NETIF_F_GSO_MASK;
513 ++
514 ++ /* MACLEN can support at most 63 words */
515 ++ len = skb_network_header(skb) - skb->data;
516 ++ if (len & ~(63 * 2))
517 ++ goto out_err;
518 ++
519 ++ /* IPLEN and EIPLEN can support at most 127 dwords */
520 ++ len = skb_transport_header(skb) - skb_network_header(skb);
521 ++ if (len & ~(127 * 4))
522 ++ goto out_err;
523 ++
524 ++ if (skb->encapsulation) {
525 ++ /* L4TUNLEN can support 127 words */
526 ++ len = skb_inner_network_header(skb) - skb_transport_header(skb);
527 ++ if (len & ~(127 * 2))
528 ++ goto out_err;
529 ++
530 ++ /* IPLEN can support at most 127 dwords */
531 ++ len = skb_inner_transport_header(skb) -
532 ++ skb_inner_network_header(skb);
533 ++ if (len & ~(127 * 4))
534 ++ goto out_err;
535 ++ }
536 ++
537 ++ /* No need to validate L4LEN as TCP is the only protocol with a
538 ++ * a flexible value and we support all possible values supported
539 ++ * by TCP, which is at most 15 dwords
540 ++ */
541 +
542 + return features;
543 ++out_err:
544 ++ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
545 + }
546 +
547 + static const struct net_device_ops i40e_netdev_ops = {
548 +diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
549 +index 02ec326cb1293..5eeba263b5f8a 100644
550 +--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
551 ++++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
552 +@@ -4050,6 +4050,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
553 + dev_err(&pdev->dev,
554 + "invalid sram_size %dB or board span %ldB\n",
555 + mgp->sram_size, mgp->board_span);
556 ++ status = -EINVAL;
557 + goto abort_with_ioremap;
558 + }
559 + memcpy_fromio(mgp->eeprom_strings,
560 +diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
561 +index a5ee3d328f3d6..75e25a3fe4a72 100644
562 +--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
563 ++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
564 +@@ -1617,6 +1617,8 @@ err_out_free_netdev:
565 + free_netdev(netdev);
566 +
567 + err_out_free_res:
568 ++ if (NX_IS_REVISION_P3(pdev->revision))
569 ++ pci_disable_pcie_error_reporting(pdev);
570 + pci_release_regions(pdev);
571 +
572 + err_out_disable_pdev:
573 +diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
574 +index 7b6824e560d2c..59e59878a3a71 100644
575 +--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
576 ++++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
577 +@@ -1205,9 +1205,11 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
578 + p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_IEEE;
579 +
580 + p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
581 ++ BUILD_BUG_ON(sizeof(dcbx_info->operational.params) !=
582 ++ sizeof(p_hwfn->p_dcbx_info->set.config.params));
583 + memcpy(&p_hwfn->p_dcbx_info->set.config.params,
584 + &dcbx_info->operational.params,
585 +- sizeof(struct qed_dcbx_admin_params));
586 ++ sizeof(p_hwfn->p_dcbx_info->set.config.params));
587 + p_hwfn->p_dcbx_info->set.config.valid = true;
588 +
589 + memcpy(params, &p_hwfn->p_dcbx_info->set, sizeof(struct qed_dcbx_set));
590 +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
591 +index 0928da21efd04..19dca845042e0 100644
592 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
593 ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
594 +@@ -2707,6 +2707,7 @@ err_out_free_hw_res:
595 + kfree(ahw);
596 +
597 + err_out_free_res:
598 ++ pci_disable_pcie_error_reporting(pdev);
599 + pci_release_regions(pdev);
600 +
601 + err_out_disable_pdev:
602 +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
603 +index 3521e3a77556d..f321b115719a5 100644
604 +--- a/drivers/net/ethernet/realtek/r8169.c
605 ++++ b/drivers/net/ethernet/realtek/r8169.c
606 +@@ -2338,7 +2338,7 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
607 + {
608 + switch(stringset) {
609 + case ETH_SS_STATS:
610 +- memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
611 ++ memcpy(data, rtl8169_gstrings, sizeof(rtl8169_gstrings));
612 + break;
613 + }
614 + }
615 +diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
616 +index 0e5b1935af50e..468f02beccee4 100644
617 +--- a/drivers/net/ethernet/renesas/sh_eth.c
618 ++++ b/drivers/net/ethernet/renesas/sh_eth.c
619 +@@ -2117,7 +2117,7 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
620 + {
621 + switch (stringset) {
622 + case ETH_SS_STATS:
623 +- memcpy(data, *sh_eth_gstrings_stats,
624 ++ memcpy(data, sh_eth_gstrings_stats,
625 + sizeof(sh_eth_gstrings_stats));
626 + break;
627 + }
628 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
629 +index ff3e5ab39bd0e..24fb7a2bba625 100644
630 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
631 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
632 +@@ -91,10 +91,10 @@ enum power_event {
633 + #define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */
634 +
635 + /* GMAC HW ADDR regs */
636 +-#define GMAC_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \
637 +- (reg * 8))
638 +-#define GMAC_ADDR_LOW(reg) (((reg > 15) ? 0x00000804 : 0x00000044) + \
639 +- (reg * 8))
640 ++#define GMAC_ADDR_HIGH(reg) ((reg > 15) ? 0x00000800 + (reg - 16) * 8 : \
641 ++ 0x00000040 + (reg * 8))
642 ++#define GMAC_ADDR_LOW(reg) ((reg > 15) ? 0x00000804 + (reg - 16) * 8 : \
643 ++ 0x00000044 + (reg * 8))
644 + #define GMAC_MAX_PERFECT_ADDRESSES 1
645 +
646 + #define GMAC_PCS_BASE 0x000000c0 /* PCS register base */
647 +diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
648 +index 545f60877bb7d..9ba36c930ce3b 100644
649 +--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
650 ++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
651 +@@ -735,6 +735,11 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
652 + /* Kick off the transfer */
653 + lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
654 +
655 ++ if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
656 ++ netdev_info(ndev, "%s -> netif_stop_queue\n", __func__);
657 ++ netif_stop_queue(ndev);
658 ++ }
659 ++
660 + return NETDEV_TX_OK;
661 + }
662 +
663 +diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
664 +index 088fe5d34f500..76340bc3cf445 100644
665 +--- a/drivers/net/hamradio/mkiss.c
666 ++++ b/drivers/net/hamradio/mkiss.c
667 +@@ -810,6 +810,7 @@ static void mkiss_close(struct tty_struct *tty)
668 + ax->tty = NULL;
669 +
670 + unregister_netdev(ax->dev);
671 ++ free_netdev(ax->dev);
672 + }
673 +
674 + /* Perform I/O control on an active ax25 channel. */
675 +diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
676 +index f7180f8db39e1..9c15e1a1261be 100644
677 +--- a/drivers/net/usb/cdc_eem.c
678 ++++ b/drivers/net/usb/cdc_eem.c
679 +@@ -138,10 +138,10 @@ static struct sk_buff *eem_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
680 + }
681 +
682 + skb2 = skb_copy_expand(skb, EEM_HEAD, ETH_FCS_LEN + padlen, flags);
683 ++ dev_kfree_skb_any(skb);
684 + if (!skb2)
685 + return NULL;
686 +
687 +- dev_kfree_skb_any(skb);
688 + skb = skb2;
689 +
690 + done:
691 +diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
692 +index 297d3f599efda..5a5db2f09f788 100644
693 +--- a/drivers/net/usb/cdc_ncm.c
694 ++++ b/drivers/net/usb/cdc_ncm.c
695 +@@ -1639,7 +1639,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
696 + static const struct driver_info cdc_ncm_info = {
697 + .description = "CDC NCM",
698 + .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
699 +- | FLAG_LINK_INTR,
700 ++ | FLAG_LINK_INTR | FLAG_ETHER,
701 + .bind = cdc_ncm_bind,
702 + .unbind = cdc_ncm_unbind,
703 + .manage_power = usbnet_manage_power,
704 +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
705 +index 6e74965d26a0a..64fdea3328861 100644
706 +--- a/drivers/net/usb/r8152.c
707 ++++ b/drivers/net/usb/r8152.c
708 +@@ -3938,7 +3938,7 @@ static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data)
709 + {
710 + switch (stringset) {
711 + case ETH_SS_STATS:
712 +- memcpy(data, *rtl8152_gstrings, sizeof(rtl8152_gstrings));
713 ++ memcpy(data, rtl8152_gstrings, sizeof(rtl8152_gstrings));
714 + break;
715 + }
716 + }
717 +diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
718 +index 3a391ae5c4e0d..841d974915929 100644
719 +--- a/drivers/net/usb/smsc75xx.c
720 ++++ b/drivers/net/usb/smsc75xx.c
721 +@@ -1497,7 +1497,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
722 + ret = smsc75xx_wait_ready(dev, 0);
723 + if (ret < 0) {
724 + netdev_warn(dev->net, "device not ready in smsc75xx_bind\n");
725 +- goto err;
726 ++ goto free_pdata;
727 + }
728 +
729 + smsc75xx_init_mac_address(dev);
730 +@@ -1506,7 +1506,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
731 + ret = smsc75xx_reset(dev);
732 + if (ret < 0) {
733 + netdev_warn(dev->net, "smsc75xx_reset error %d\n", ret);
734 +- goto err;
735 ++ goto cancel_work;
736 + }
737 +
738 + dev->net->netdev_ops = &smsc75xx_netdev_ops;
739 +@@ -1516,8 +1516,11 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
740 + dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
741 + return 0;
742 +
743 +-err:
744 ++cancel_work:
745 ++ cancel_work_sync(&pdata->set_multicast);
746 ++free_pdata:
747 + kfree(pdata);
748 ++ dev->data[0] = 0;
749 + return ret;
750 + }
751 +
752 +@@ -1528,7 +1531,6 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
753 + cancel_work_sync(&pdata->set_multicast);
754 + netif_dbg(dev, ifdown, dev->net, "free pdata\n");
755 + kfree(pdata);
756 +- pdata = NULL;
757 + dev->data[0] = 0;
758 + }
759 + }
760 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
761 +index acd89fa9820c4..e09653c73ab4b 100644
762 +--- a/drivers/pci/pci.c
763 ++++ b/drivers/pci/pci.c
764 +@@ -1378,11 +1378,21 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
765 + int err;
766 + int i, bars = 0;
767 +
768 +- if (atomic_inc_return(&dev->enable_cnt) > 1) {
769 +- pci_update_current_state(dev, dev->current_state);
770 +- return 0; /* already enabled */
771 ++ /*
772 ++ * Power state could be unknown at this point, either due to a fresh
773 ++ * boot or a device removal call. So get the current power state
774 ++ * so that things like MSI message writing will behave as expected
775 ++ * (e.g. if the device really is in D0 at enable time).
776 ++ */
777 ++ if (dev->pm_cap) {
778 ++ u16 pmcsr;
779 ++ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
780 ++ dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
781 + }
782 +
783 ++ if (atomic_inc_return(&dev->enable_cnt) > 1)
784 ++ return 0; /* already enabled */
785 ++
786 + bridge = pci_upstream_bridge(dev);
787 + if (bridge)
788 + pci_enable_bridge(bridge);
789 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
790 +index 0ebf7500e171e..096ba11ac1058 100644
791 +--- a/drivers/pci/quirks.c
792 ++++ b/drivers/pci/quirks.c
793 +@@ -3345,6 +3345,18 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
794 + dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
795 + }
796 +
797 ++/*
798 ++ * Some NVIDIA GPU devices do not work with bus reset, SBR needs to be
799 ++ * prevented for those affected devices.
800 ++ */
801 ++static void quirk_nvidia_no_bus_reset(struct pci_dev *dev)
802 ++{
803 ++ if ((dev->device & 0xffc0) == 0x2340)
804 ++ quirk_no_bus_reset(dev);
805 ++}
806 ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
807 ++ quirk_nvidia_no_bus_reset);
808 ++
809 + /*
810 + * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset.
811 + * The device will throw a Link Down error on AER-capable systems and
812 +@@ -3358,6 +3370,16 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
813 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
814 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
815 +
816 ++/*
817 ++ * Some TI KeyStone C667X devices do not support bus/hot reset. The PCIESS
818 ++ * automatically disables LTSSM when Secondary Bus Reset is received and
819 ++ * the device stops working. Prevent bus reset for these devices. With
820 ++ * this change, the device can be assigned to VMs with VFIO, but it will
821 ++ * leak state between VMs. Reference
822 ++ * https://e2e.ti.com/support/processors/f/791/t/954382
823 ++ */
824 ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0xb005, quirk_no_bus_reset);
825 ++
826 + static void quirk_no_pm_reset(struct pci_dev *dev)
827 + {
828 + /*
829 +diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
830 +index dc09f10d5d4b8..604cf3385aae2 100644
831 +--- a/drivers/scsi/hosts.c
832 ++++ b/drivers/scsi/hosts.c
833 +@@ -265,12 +265,11 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
834 +
835 + device_enable_async_suspend(&shost->shost_dev);
836 +
837 ++ get_device(&shost->shost_gendev);
838 + error = device_add(&shost->shost_dev);
839 + if (error)
840 + goto out_del_gendev;
841 +
842 +- get_device(&shost->shost_gendev);
843 +-
844 + if (shost->transportt->host_size) {
845 + shost->shost_data = kzalloc(shost->transportt->host_size,
846 + GFP_KERNEL);
847 +@@ -307,6 +306,11 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
848 + out_del_dev:
849 + device_del(&shost->shost_dev);
850 + out_del_gendev:
851 ++ /*
852 ++ * Host state is SHOST_RUNNING so we have to explicitly release
853 ++ * ->shost_dev.
854 ++ */
855 ++ put_device(&shost->shost_dev);
856 + device_del(&shost->shost_gendev);
857 + out_destroy_freelist:
858 + device_disable_async_suspend(&shost->shost_gendev);
859 +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
860 +index ecd707f74ddcb..6afb65387be6c 100644
861 +--- a/drivers/target/target_core_transport.c
862 ++++ b/drivers/target/target_core_transport.c
863 +@@ -2779,9 +2779,7 @@ __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
864 + __releases(&cmd->t_state_lock)
865 + __acquires(&cmd->t_state_lock)
866 + {
867 +-
868 +- assert_spin_locked(&cmd->t_state_lock);
869 +- WARN_ON_ONCE(!irqs_disabled());
870 ++ lockdep_assert_held(&cmd->t_state_lock);
871 +
872 + if (fabric_stop)
873 + cmd->transport_state |= CMD_T_FABRIC_STOP;
874 +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
875 +index 66254500e7a94..b6d6fe4565fdf 100644
876 +--- a/drivers/usb/dwc3/core.c
877 ++++ b/drivers/usb/dwc3/core.c
878 +@@ -1199,8 +1199,8 @@ static int dwc3_remove(struct platform_device *pdev)
879 + */
880 + res->start -= DWC3_GLOBALS_REGS_START;
881 +
882 +- dwc3_debugfs_exit(dwc);
883 + dwc3_core_exit_mode(dwc);
884 ++ dwc3_debugfs_exit(dwc);
885 +
886 + dwc3_core_exit(dwc);
887 + dwc3_ulpi_exit(dwc);
888 +diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
889 +index f19e49a5d032b..3d4d350834384 100644
890 +--- a/fs/gfs2/glock.c
891 ++++ b/fs/gfs2/glock.c
892 +@@ -1350,6 +1350,7 @@ __acquires(&lru_lock)
893 + while(!list_empty(list)) {
894 + gl = list_entry(list->next, struct gfs2_glock, gl_lru);
895 + list_del_init(&gl->gl_lru);
896 ++ clear_bit(GLF_LRU, &gl->gl_flags);
897 + if (!spin_trylock(&gl->gl_lockref.lock)) {
898 + add_back_to_lru:
899 + list_add(&gl->gl_lru, &lru_list);
900 +@@ -1396,7 +1397,6 @@ static long gfs2_scan_glock_lru(int nr)
901 + if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
902 + list_move(&gl->gl_lru, &dispose);
903 + atomic_dec(&lru_count);
904 +- clear_bit(GLF_LRU, &gl->gl_flags);
905 + freed++;
906 + continue;
907 + }
908 +diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c
909 +index 490303e3d5179..e9903bceb2bf1 100644
910 +--- a/fs/nilfs2/sysfs.c
911 ++++ b/fs/nilfs2/sysfs.c
912 +@@ -1064,6 +1064,7 @@ void nilfs_sysfs_delete_device_group(struct the_nilfs *nilfs)
913 + nilfs_sysfs_delete_superblock_group(nilfs);
914 + nilfs_sysfs_delete_segctor_group(nilfs);
915 + kobject_del(&nilfs->ns_dev_kobj);
916 ++ kobject_put(&nilfs->ns_dev_kobj);
917 + kfree(nilfs->ns_dev_subgroups);
918 + }
919 +
920 +diff --git a/include/linux/hid.h b/include/linux/hid.h
921 +index 41c372573a289..2ed6850356ead 100644
922 +--- a/include/linux/hid.h
923 ++++ b/include/linux/hid.h
924 +@@ -1127,8 +1127,7 @@ static inline void hid_hw_wait(struct hid_device *hdev)
925 + */
926 + static inline u32 hid_report_len(struct hid_report *report)
927 + {
928 +- /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
929 +- return ((report->size - 1) >> 3) + 1 + (report->id > 0);
930 ++ return DIV_ROUND_UP(report->size, 8) + (report->id > 0);
931 + }
932 +
933 + int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
934 +diff --git a/include/linux/swapops.h b/include/linux/swapops.h
935 +index 5c3a5f3e7eec6..c5ff7b217ee6e 100644
936 +--- a/include/linux/swapops.h
937 ++++ b/include/linux/swapops.h
938 +@@ -196,15 +196,6 @@ static inline void num_poisoned_pages_dec(void)
939 + atomic_long_dec(&num_poisoned_pages);
940 + }
941 +
942 +-static inline void num_poisoned_pages_add(long num)
943 +-{
944 +- atomic_long_add(num, &num_poisoned_pages);
945 +-}
946 +-
947 +-static inline void num_poisoned_pages_sub(long num)
948 +-{
949 +- atomic_long_sub(num, &num_poisoned_pages);
950 +-}
951 + #else
952 +
953 + static inline swp_entry_t make_hwpoison_entry(struct page *page)
954 +diff --git a/include/net/sock.h b/include/net/sock.h
955 +index d0e18917d8be8..cf27f3688c39c 100644
956 +--- a/include/net/sock.h
957 ++++ b/include/net/sock.h
958 +@@ -1681,7 +1681,8 @@ static inline u32 net_tx_rndhash(void)
959 +
960 + static inline void sk_set_txhash(struct sock *sk)
961 + {
962 +- sk->sk_txhash = net_tx_rndhash();
963 ++ /* This pairs with READ_ONCE() in skb_set_hash_from_sk() */
964 ++ WRITE_ONCE(sk->sk_txhash, net_tx_rndhash());
965 + }
966 +
967 + static inline void sk_rethink_txhash(struct sock *sk)
968 +@@ -1936,9 +1937,12 @@ static inline void sock_poll_wait(struct file *filp,
969 +
970 + static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
971 + {
972 +- if (sk->sk_txhash) {
973 ++ /* This pairs with WRITE_ONCE() in sk_set_txhash() */
974 ++ u32 txhash = READ_ONCE(sk->sk_txhash);
975 ++
976 ++ if (txhash) {
977 + skb->l4_hash = 1;
978 +- skb->hash = sk->sk_txhash;
979 ++ skb->hash = txhash;
980 + }
981 + }
982 +
983 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
984 +index cdf614943aa3d..e8bd8de856de9 100644
985 +--- a/kernel/trace/trace.c
986 ++++ b/kernel/trace/trace.c
987 +@@ -1616,9 +1616,6 @@ struct saved_cmdlines_buffer {
988 + };
989 + static struct saved_cmdlines_buffer *savedcmd;
990 +
991 +-/* temporary disable recording */
992 +-static atomic_t trace_record_cmdline_disabled __read_mostly;
993 +-
994 + static inline char *get_saved_cmdlines(int idx)
995 + {
996 + return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
997 +@@ -1882,9 +1879,6 @@ void trace_find_cmdline(int pid, char comm[])
998 +
999 + void tracing_record_cmdline(struct task_struct *tsk)
1000 + {
1001 +- if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1002 +- return;
1003 +-
1004 + if (!__this_cpu_read(trace_cmdline_save))
1005 + return;
1006 +
1007 +@@ -2828,9 +2822,6 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1008 + return ERR_PTR(-EBUSY);
1009 + #endif
1010 +
1011 +- if (!iter->snapshot)
1012 +- atomic_inc(&trace_record_cmdline_disabled);
1013 +-
1014 + if (*pos != iter->pos) {
1015 + iter->ent = NULL;
1016 + iter->cpu = 0;
1017 +@@ -2873,9 +2864,6 @@ static void s_stop(struct seq_file *m, void *p)
1018 + return;
1019 + #endif
1020 +
1021 +- if (!iter->snapshot)
1022 +- atomic_dec(&trace_record_cmdline_disabled);
1023 +-
1024 + trace_access_unlock(iter->cpu_file);
1025 + trace_event_read_unlock();
1026 + }
1027 +diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
1028 +index b67ea5eed2a89..b70233a9563f8 100644
1029 +--- a/kernel/trace/trace_clock.c
1030 ++++ b/kernel/trace/trace_clock.c
1031 +@@ -113,9 +113,9 @@ u64 notrace trace_clock_global(void)
1032 + prev_time = READ_ONCE(trace_clock_struct.prev_time);
1033 + now = sched_clock_cpu(this_cpu);
1034 +
1035 +- /* Make sure that now is always greater than prev_time */
1036 ++ /* Make sure that now is always greater than or equal to prev_time */
1037 + if ((s64)(now - prev_time) < 0)
1038 +- now = prev_time + 1;
1039 ++ now = prev_time;
1040 +
1041 + /*
1042 + * If in an NMI context then dont risk lockups and simply return
1043 +@@ -129,7 +129,7 @@ u64 notrace trace_clock_global(void)
1044 + /* Reread prev_time in case it was already updated */
1045 + prev_time = READ_ONCE(trace_clock_struct.prev_time);
1046 + if ((s64)(now - prev_time) < 0)
1047 +- now = prev_time + 1;
1048 ++ now = prev_time;
1049 +
1050 + trace_clock_struct.prev_time = now;
1051 +
1052 +diff --git a/mm/memory-failure.c b/mm/memory-failure.c
1053 +index d6524dce43b26..ad156b42d2adf 100644
1054 +--- a/mm/memory-failure.c
1055 ++++ b/mm/memory-failure.c
1056 +@@ -1010,22 +1010,6 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
1057 + return ret;
1058 + }
1059 +
1060 +-static void set_page_hwpoison_huge_page(struct page *hpage)
1061 +-{
1062 +- int i;
1063 +- int nr_pages = 1 << compound_order(hpage);
1064 +- for (i = 0; i < nr_pages; i++)
1065 +- SetPageHWPoison(hpage + i);
1066 +-}
1067 +-
1068 +-static void clear_page_hwpoison_huge_page(struct page *hpage)
1069 +-{
1070 +- int i;
1071 +- int nr_pages = 1 << compound_order(hpage);
1072 +- for (i = 0; i < nr_pages; i++)
1073 +- ClearPageHWPoison(hpage + i);
1074 +-}
1075 +-
1076 + /**
1077 + * memory_failure - Handle memory failure of a page.
1078 + * @pfn: Page Number of the corrupted page
1079 +@@ -1051,7 +1035,6 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1080 + struct page *hpage;
1081 + struct page *orig_head;
1082 + int res;
1083 +- unsigned int nr_pages;
1084 + unsigned long page_flags;
1085 +
1086 + if (!sysctl_memory_failure_recovery)
1087 +@@ -1065,24 +1048,23 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1088 +
1089 + p = pfn_to_page(pfn);
1090 + orig_head = hpage = compound_head(p);
1091 ++
1092 ++ /* tmporary check code, to be updated in later patches */
1093 ++ if (PageHuge(p)) {
1094 ++ if (TestSetPageHWPoison(hpage)) {
1095 ++ pr_err("Memory failure: %#lx: already hardware poisoned\n", pfn);
1096 ++ return 0;
1097 ++ }
1098 ++ goto tmp;
1099 ++ }
1100 + if (TestSetPageHWPoison(p)) {
1101 + pr_err("Memory failure: %#lx: already hardware poisoned\n",
1102 + pfn);
1103 + return 0;
1104 + }
1105 +
1106 +- /*
1107 +- * Currently errors on hugetlbfs pages are measured in hugepage units,
1108 +- * so nr_pages should be 1 << compound_order. OTOH when errors are on
1109 +- * transparent hugepages, they are supposed to be split and error
1110 +- * measurement is done in normal page units. So nr_pages should be one
1111 +- * in this case.
1112 +- */
1113 +- if (PageHuge(p))
1114 +- nr_pages = 1 << compound_order(hpage);
1115 +- else /* normal page or thp */
1116 +- nr_pages = 1;
1117 +- num_poisoned_pages_add(nr_pages);
1118 ++tmp:
1119 ++ num_poisoned_pages_inc();
1120 +
1121 + /*
1122 + * We need/can do nothing about count=0 pages.
1123 +@@ -1110,12 +1092,11 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1124 + if (PageHWPoison(hpage)) {
1125 + if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
1126 + || (p != hpage && TestSetPageHWPoison(hpage))) {
1127 +- num_poisoned_pages_sub(nr_pages);
1128 ++ num_poisoned_pages_dec();
1129 + unlock_page(hpage);
1130 + return 0;
1131 + }
1132 + }
1133 +- set_page_hwpoison_huge_page(hpage);
1134 + res = dequeue_hwpoisoned_huge_page(hpage);
1135 + action_result(pfn, MF_MSG_FREE_HUGE,
1136 + res ? MF_IGNORED : MF_DELAYED);
1137 +@@ -1138,7 +1119,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1138 + pr_err("Memory failure: %#lx: thp split failed\n",
1139 + pfn);
1140 + if (TestClearPageHWPoison(p))
1141 +- num_poisoned_pages_sub(nr_pages);
1142 ++ num_poisoned_pages_dec();
1143 + put_hwpoison_page(p);
1144 + return -EBUSY;
1145 + }
1146 +@@ -1202,14 +1183,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1147 + */
1148 + if (!PageHWPoison(p)) {
1149 + pr_err("Memory failure: %#lx: just unpoisoned\n", pfn);
1150 +- num_poisoned_pages_sub(nr_pages);
1151 ++ num_poisoned_pages_dec();
1152 + unlock_page(hpage);
1153 + put_hwpoison_page(hpage);
1154 + return 0;
1155 + }
1156 + if (hwpoison_filter(p)) {
1157 + if (TestClearPageHWPoison(p))
1158 +- num_poisoned_pages_sub(nr_pages);
1159 ++ num_poisoned_pages_dec();
1160 + unlock_page(hpage);
1161 + put_hwpoison_page(hpage);
1162 + return 0;
1163 +@@ -1228,14 +1209,6 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1164 + put_hwpoison_page(hpage);
1165 + return 0;
1166 + }
1167 +- /*
1168 +- * Set PG_hwpoison on all pages in an error hugepage,
1169 +- * because containment is done in hugepage unit for now.
1170 +- * Since we have done TestSetPageHWPoison() for the head page with
1171 +- * page lock held, we can safely set PG_hwpoison bits on tail pages.
1172 +- */
1173 +- if (PageHuge(p))
1174 +- set_page_hwpoison_huge_page(hpage);
1175 +
1176 + /*
1177 + * It's very difficult to mess with pages currently under IO
1178 +@@ -1407,7 +1380,6 @@ int unpoison_memory(unsigned long pfn)
1179 + struct page *page;
1180 + struct page *p;
1181 + int freeit = 0;
1182 +- unsigned int nr_pages;
1183 + static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
1184 + DEFAULT_RATELIMIT_BURST);
1185 +
1186 +@@ -1452,8 +1424,6 @@ int unpoison_memory(unsigned long pfn)
1187 + return 0;
1188 + }
1189 +
1190 +- nr_pages = 1 << compound_order(page);
1191 +-
1192 + if (!get_hwpoison_page(p)) {
1193 + /*
1194 + * Since HWPoisoned hugepage should have non-zero refcount,
1195 +@@ -1483,10 +1453,8 @@ int unpoison_memory(unsigned long pfn)
1196 + if (TestClearPageHWPoison(page)) {
1197 + unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n",
1198 + pfn, &unpoison_rs);
1199 +- num_poisoned_pages_sub(nr_pages);
1200 ++ num_poisoned_pages_dec();
1201 + freeit = 1;
1202 +- if (PageHuge(page))
1203 +- clear_page_hwpoison_huge_page(page);
1204 + }
1205 + unlock_page(page);
1206 +
1207 +@@ -1612,14 +1580,10 @@ static int soft_offline_huge_page(struct page *page, int flags)
1208 + ret = -EIO;
1209 + } else {
1210 + /* overcommit hugetlb page will be freed to buddy */
1211 +- if (PageHuge(page)) {
1212 +- set_page_hwpoison_huge_page(hpage);
1213 ++ SetPageHWPoison(page);
1214 ++ if (PageHuge(page))
1215 + dequeue_hwpoisoned_huge_page(hpage);
1216 +- num_poisoned_pages_add(1 << compound_order(hpage));
1217 +- } else {
1218 +- SetPageHWPoison(page);
1219 +- num_poisoned_pages_inc();
1220 +- }
1221 ++ num_poisoned_pages_inc();
1222 + }
1223 + return ret;
1224 + }
1225 +@@ -1728,15 +1692,12 @@ static int soft_offline_in_use_page(struct page *page, int flags)
1226 +
1227 + static void soft_offline_free_page(struct page *page)
1228 + {
1229 +- if (PageHuge(page)) {
1230 +- struct page *hpage = compound_head(page);
1231 ++ struct page *head = compound_head(page);
1232 +
1233 +- set_page_hwpoison_huge_page(hpage);
1234 +- if (!dequeue_hwpoisoned_huge_page(hpage))
1235 +- num_poisoned_pages_add(1 << compound_order(hpage));
1236 +- } else {
1237 +- if (!TestSetPageHWPoison(page))
1238 +- num_poisoned_pages_inc();
1239 ++ if (!TestSetPageHWPoison(head)) {
1240 ++ num_poisoned_pages_inc();
1241 ++ if (PageHuge(head))
1242 ++ dequeue_hwpoisoned_huge_page(head);
1243 + }
1244 + }
1245 +
1246 +diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
1247 +index 2b663622bdb45..f85e6a9ee5eac 100644
1248 +--- a/net/batman-adv/bat_iv_ogm.c
1249 ++++ b/net/batman-adv/bat_iv_ogm.c
1250 +@@ -585,8 +585,10 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
1251 + if (WARN_ON(!forw_packet->if_outgoing))
1252 + return;
1253 +
1254 +- if (WARN_ON(forw_packet->if_outgoing->soft_iface != soft_iface))
1255 ++ if (forw_packet->if_outgoing->soft_iface != soft_iface) {
1256 ++ pr_warn("%s: soft interface switch for queued OGM\n", __func__);
1257 + return;
1258 ++ }
1259 +
1260 + if (forw_packet->if_incoming->if_status != BATADV_IF_ACTIVE)
1261 + return;
1262 +diff --git a/net/can/bcm.c b/net/can/bcm.c
1263 +index c99e7c75eeee1..65fa0ac2fb47d 100644
1264 +--- a/net/can/bcm.c
1265 ++++ b/net/can/bcm.c
1266 +@@ -127,7 +127,7 @@ struct bcm_sock {
1267 + struct sock sk;
1268 + int bound;
1269 + int ifindex;
1270 +- struct notifier_block notifier;
1271 ++ struct list_head notifier;
1272 + struct list_head rx_ops;
1273 + struct list_head tx_ops;
1274 + unsigned long dropped_usr_msgs;
1275 +@@ -135,6 +135,10 @@ struct bcm_sock {
1276 + char procname [32]; /* inode number in decimal with \0 */
1277 + };
1278 +
1279 ++static LIST_HEAD(bcm_notifier_list);
1280 ++static DEFINE_SPINLOCK(bcm_notifier_lock);
1281 ++static struct bcm_sock *bcm_busy_notifier;
1282 ++
1283 + static inline struct bcm_sock *bcm_sk(const struct sock *sk)
1284 + {
1285 + return (struct bcm_sock *)sk;
1286 +@@ -405,6 +409,7 @@ static void bcm_tx_timeout_tsklet(unsigned long data)
1287 + if (!op->count && (op->flags & TX_COUNTEVT)) {
1288 +
1289 + /* create notification to user */
1290 ++ memset(&msg_head, 0, sizeof(msg_head));
1291 + msg_head.opcode = TX_EXPIRED;
1292 + msg_head.flags = op->flags;
1293 + msg_head.count = op->count;
1294 +@@ -452,6 +457,7 @@ static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
1295 + /* this element is not throttled anymore */
1296 + data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV);
1297 +
1298 ++ memset(&head, 0, sizeof(head));
1299 + head.opcode = RX_CHANGED;
1300 + head.flags = op->flags;
1301 + head.count = op->count;
1302 +@@ -566,6 +572,7 @@ static void bcm_rx_timeout_tsklet(unsigned long data)
1303 + struct bcm_msg_head msg_head;
1304 +
1305 + /* create notification to user */
1306 ++ memset(&msg_head, 0, sizeof(msg_head));
1307 + msg_head.opcode = RX_TIMEOUT;
1308 + msg_head.flags = op->flags;
1309 + msg_head.count = op->count;
1310 +@@ -1436,20 +1443,15 @@ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
1311 + /*
1312 + * notification handler for netdevice status changes
1313 + */
1314 +-static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
1315 +- void *ptr)
1316 ++static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
1317 ++ struct net_device *dev)
1318 + {
1319 +- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1320 +- struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier);
1321 + struct sock *sk = &bo->sk;
1322 + struct bcm_op *op;
1323 + int notify_enodev = 0;
1324 +
1325 + if (!net_eq(dev_net(dev), &init_net))
1326 +- return NOTIFY_DONE;
1327 +-
1328 +- if (dev->type != ARPHRD_CAN)
1329 +- return NOTIFY_DONE;
1330 ++ return;
1331 +
1332 + switch (msg) {
1333 +
1334 +@@ -1484,7 +1486,28 @@ static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
1335 + sk->sk_error_report(sk);
1336 + }
1337 + }
1338 ++}
1339 +
1340 ++static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
1341 ++ void *ptr)
1342 ++{
1343 ++ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1344 ++
1345 ++ if (dev->type != ARPHRD_CAN)
1346 ++ return NOTIFY_DONE;
1347 ++ if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
1348 ++ return NOTIFY_DONE;
1349 ++ if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */
1350 ++ return NOTIFY_DONE;
1351 ++
1352 ++ spin_lock(&bcm_notifier_lock);
1353 ++ list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) {
1354 ++ spin_unlock(&bcm_notifier_lock);
1355 ++ bcm_notify(bcm_busy_notifier, msg, dev);
1356 ++ spin_lock(&bcm_notifier_lock);
1357 ++ }
1358 ++ bcm_busy_notifier = NULL;
1359 ++ spin_unlock(&bcm_notifier_lock);
1360 + return NOTIFY_DONE;
1361 + }
1362 +
1363 +@@ -1504,9 +1527,9 @@ static int bcm_init(struct sock *sk)
1364 + INIT_LIST_HEAD(&bo->rx_ops);
1365 +
1366 + /* set notifier */
1367 +- bo->notifier.notifier_call = bcm_notifier;
1368 +-
1369 +- register_netdevice_notifier(&bo->notifier);
1370 ++ spin_lock(&bcm_notifier_lock);
1371 ++ list_add_tail(&bo->notifier, &bcm_notifier_list);
1372 ++ spin_unlock(&bcm_notifier_lock);
1373 +
1374 + return 0;
1375 + }
1376 +@@ -1527,7 +1550,14 @@ static int bcm_release(struct socket *sock)
1377 +
1378 + /* remove bcm_ops, timer, rx_unregister(), etc. */
1379 +
1380 +- unregister_netdevice_notifier(&bo->notifier);
1381 ++ spin_lock(&bcm_notifier_lock);
1382 ++ while (bcm_busy_notifier == bo) {
1383 ++ spin_unlock(&bcm_notifier_lock);
1384 ++ schedule_timeout_uninterruptible(1);
1385 ++ spin_lock(&bcm_notifier_lock);
1386 ++ }
1387 ++ list_del(&bo->notifier);
1388 ++ spin_unlock(&bcm_notifier_lock);
1389 +
1390 + lock_sock(sk);
1391 +
1392 +@@ -1713,6 +1743,10 @@ static const struct can_proto bcm_can_proto = {
1393 + .prot = &bcm_proto,
1394 + };
1395 +
1396 ++static struct notifier_block canbcm_notifier = {
1397 ++ .notifier_call = bcm_notifier
1398 ++};
1399 ++
1400 + static int __init bcm_module_init(void)
1401 + {
1402 + int err;
1403 +@@ -1727,6 +1761,8 @@ static int __init bcm_module_init(void)
1404 +
1405 + /* create /proc/net/can-bcm directory */
1406 + proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
1407 ++ register_netdevice_notifier(&canbcm_notifier);
1408 ++
1409 + return 0;
1410 + }
1411 +
1412 +@@ -1736,6 +1772,8 @@ static void __exit bcm_module_exit(void)
1413 +
1414 + if (proc_dir)
1415 + remove_proc_entry("can-bcm", init_net.proc_net);
1416 ++
1417 ++ unregister_netdevice_notifier(&canbcm_notifier);
1418 + }
1419 +
1420 + module_init(bcm_module_init);
1421 +diff --git a/net/can/raw.c b/net/can/raw.c
1422 +index 6dc546a06673f..2bb50b1535c2f 100644
1423 +--- a/net/can/raw.c
1424 ++++ b/net/can/raw.c
1425 +@@ -84,7 +84,7 @@ struct raw_sock {
1426 + struct sock sk;
1427 + int bound;
1428 + int ifindex;
1429 +- struct notifier_block notifier;
1430 ++ struct list_head notifier;
1431 + int loopback;
1432 + int recv_own_msgs;
1433 + int fd_frames;
1434 +@@ -96,6 +96,10 @@ struct raw_sock {
1435 + struct uniqframe __percpu *uniq;
1436 + };
1437 +
1438 ++static LIST_HEAD(raw_notifier_list);
1439 ++static DEFINE_SPINLOCK(raw_notifier_lock);
1440 ++static struct raw_sock *raw_busy_notifier;
1441 ++
1442 + /*
1443 + * Return pointer to store the extra msg flags for raw_recvmsg().
1444 + * We use the space of one unsigned int beyond the 'struct sockaddr_can'
1445 +@@ -260,21 +264,16 @@ static int raw_enable_allfilters(struct net_device *dev, struct sock *sk)
1446 + return err;
1447 + }
1448 +
1449 +-static int raw_notifier(struct notifier_block *nb,
1450 +- unsigned long msg, void *ptr)
1451 ++static void raw_notify(struct raw_sock *ro, unsigned long msg,
1452 ++ struct net_device *dev)
1453 + {
1454 +- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1455 +- struct raw_sock *ro = container_of(nb, struct raw_sock, notifier);
1456 + struct sock *sk = &ro->sk;
1457 +
1458 + if (!net_eq(dev_net(dev), &init_net))
1459 +- return NOTIFY_DONE;
1460 +-
1461 +- if (dev->type != ARPHRD_CAN)
1462 +- return NOTIFY_DONE;
1463 ++ return;
1464 +
1465 + if (ro->ifindex != dev->ifindex)
1466 +- return NOTIFY_DONE;
1467 ++ return;
1468 +
1469 + switch (msg) {
1470 +
1471 +@@ -303,7 +302,28 @@ static int raw_notifier(struct notifier_block *nb,
1472 + sk->sk_error_report(sk);
1473 + break;
1474 + }
1475 ++}
1476 ++
1477 ++static int raw_notifier(struct notifier_block *nb, unsigned long msg,
1478 ++ void *ptr)
1479 ++{
1480 ++ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1481 ++
1482 ++ if (dev->type != ARPHRD_CAN)
1483 ++ return NOTIFY_DONE;
1484 ++ if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
1485 ++ return NOTIFY_DONE;
1486 ++ if (unlikely(raw_busy_notifier)) /* Check for reentrant bug. */
1487 ++ return NOTIFY_DONE;
1488 +
1489 ++ spin_lock(&raw_notifier_lock);
1490 ++ list_for_each_entry(raw_busy_notifier, &raw_notifier_list, notifier) {
1491 ++ spin_unlock(&raw_notifier_lock);
1492 ++ raw_notify(raw_busy_notifier, msg, dev);
1493 ++ spin_lock(&raw_notifier_lock);
1494 ++ }
1495 ++ raw_busy_notifier = NULL;
1496 ++ spin_unlock(&raw_notifier_lock);
1497 + return NOTIFY_DONE;
1498 + }
1499 +
1500 +@@ -332,9 +352,9 @@ static int raw_init(struct sock *sk)
1501 + return -ENOMEM;
1502 +
1503 + /* set notifier */
1504 +- ro->notifier.notifier_call = raw_notifier;
1505 +-
1506 +- register_netdevice_notifier(&ro->notifier);
1507 ++ spin_lock(&raw_notifier_lock);
1508 ++ list_add_tail(&ro->notifier, &raw_notifier_list);
1509 ++ spin_unlock(&raw_notifier_lock);
1510 +
1511 + return 0;
1512 + }
1513 +@@ -349,7 +369,14 @@ static int raw_release(struct socket *sock)
1514 +
1515 + ro = raw_sk(sk);
1516 +
1517 +- unregister_netdevice_notifier(&ro->notifier);
1518 ++ spin_lock(&raw_notifier_lock);
1519 ++ while (raw_busy_notifier == ro) {
1520 ++ spin_unlock(&raw_notifier_lock);
1521 ++ schedule_timeout_uninterruptible(1);
1522 ++ spin_lock(&raw_notifier_lock);
1523 ++ }
1524 ++ list_del(&ro->notifier);
1525 ++ spin_unlock(&raw_notifier_lock);
1526 +
1527 + lock_sock(sk);
1528 +
1529 +@@ -857,6 +884,10 @@ static const struct can_proto raw_can_proto = {
1530 + .prot = &raw_proto,
1531 + };
1532 +
1533 ++static struct notifier_block canraw_notifier = {
1534 ++ .notifier_call = raw_notifier
1535 ++};
1536 ++
1537 + static __init int raw_module_init(void)
1538 + {
1539 + int err;
1540 +@@ -866,6 +897,8 @@ static __init int raw_module_init(void)
1541 + err = can_proto_register(&raw_can_proto);
1542 + if (err < 0)
1543 + printk(KERN_ERR "can: registration of raw protocol failed\n");
1544 ++ else
1545 ++ register_netdevice_notifier(&canraw_notifier);
1546 +
1547 + return err;
1548 + }
1549 +@@ -873,6 +906,7 @@ static __init int raw_module_init(void)
1550 + static __exit void raw_module_exit(void)
1551 + {
1552 + can_proto_unregister(&raw_can_proto);
1553 ++ unregister_netdevice_notifier(&canraw_notifier);
1554 + }
1555 +
1556 + module_init(raw_module_init);
1557 +diff --git a/net/compat.c b/net/compat.c
1558 +index ce851cf4d0f9d..1f08f0e49e071 100644
1559 +--- a/net/compat.c
1560 ++++ b/net/compat.c
1561 +@@ -159,7 +159,7 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk,
1562 + if (kcmlen > stackbuf_size)
1563 + kcmsg_base = kcmsg = sock_kmalloc(sk, kcmlen, GFP_KERNEL);
1564 + if (kcmsg == NULL)
1565 +- return -ENOBUFS;
1566 ++ return -ENOMEM;
1567 +
1568 + /* Now copy them over neatly. */
1569 + memset(kcmsg, 0, kcmlen);
1570 +diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
1571 +index 9f172906cc889..cc6e7ca0aff5a 100644
1572 +--- a/net/core/fib_rules.c
1573 ++++ b/net/core/fib_rules.c
1574 +@@ -767,7 +767,7 @@ static void notify_rule_change(int event, struct fib_rule *rule,
1575 + {
1576 + struct net *net;
1577 + struct sk_buff *skb;
1578 +- int err = -ENOBUFS;
1579 ++ int err = -ENOMEM;
1580 +
1581 + net = ops->fro_net;
1582 + skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
1583 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
1584 +index e652e376fb30f..911752e8a3e64 100644
1585 +--- a/net/core/rtnetlink.c
1586 ++++ b/net/core/rtnetlink.c
1587 +@@ -3530,6 +3530,10 @@ static int rtnl_bridge_notify(struct net_device *dev)
1588 + if (err < 0)
1589 + goto errout;
1590 +
1591 ++ /* Notification info is only filled for bridge ports, not the bridge
1592 ++ * device itself. Therefore, a zero notification length is valid and
1593 ++ * should not result in an error.
1594 ++ */
1595 + if (!skb->len)
1596 + goto errout;
1597 +
1598 +diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
1599 +index cfc01314958f7..936371340dc37 100644
1600 +--- a/net/ieee802154/nl802154.c
1601 ++++ b/net/ieee802154/nl802154.c
1602 +@@ -1330,19 +1330,20 @@ ieee802154_llsec_parse_dev_addr(struct nlattr *nla,
1603 + nl802154_dev_addr_policy))
1604 + return -EINVAL;
1605 +
1606 +- if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] ||
1607 +- !attrs[NL802154_DEV_ADDR_ATTR_MODE] ||
1608 +- !(attrs[NL802154_DEV_ADDR_ATTR_SHORT] ||
1609 +- attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]))
1610 ++ if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] || !attrs[NL802154_DEV_ADDR_ATTR_MODE])
1611 + return -EINVAL;
1612 +
1613 + addr->pan_id = nla_get_le16(attrs[NL802154_DEV_ADDR_ATTR_PAN_ID]);
1614 + addr->mode = nla_get_u32(attrs[NL802154_DEV_ADDR_ATTR_MODE]);
1615 + switch (addr->mode) {
1616 + case NL802154_DEV_ADDR_SHORT:
1617 ++ if (!attrs[NL802154_DEV_ADDR_ATTR_SHORT])
1618 ++ return -EINVAL;
1619 + addr->short_addr = nla_get_le16(attrs[NL802154_DEV_ADDR_ATTR_SHORT]);
1620 + break;
1621 + case NL802154_DEV_ADDR_EXTENDED:
1622 ++ if (!attrs[NL802154_DEV_ADDR_ATTR_EXTENDED])
1623 ++ return -EINVAL;
1624 + addr->extended_addr = nla_get_le64(attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]);
1625 + break;
1626 + default:
1627 +diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
1628 +index 71409928763b0..553cda6f887ad 100644
1629 +--- a/net/ipv4/cipso_ipv4.c
1630 ++++ b/net/ipv4/cipso_ipv4.c
1631 +@@ -486,6 +486,7 @@ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def)
1632 + kfree(doi_def->map.std->lvl.local);
1633 + kfree(doi_def->map.std->cat.cipso);
1634 + kfree(doi_def->map.std->cat.local);
1635 ++ kfree(doi_def->map.std);
1636 + break;
1637 + }
1638 + kfree(doi_def);
1639 +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
1640 +index 02c1736c0b897..f4a827964b685 100644
1641 +--- a/net/ipv4/igmp.c
1642 ++++ b/net/ipv4/igmp.c
1643 +@@ -1782,6 +1782,7 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
1644 + while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
1645 + in_dev->mc_list = i->next_rcu;
1646 + in_dev->mc_count--;
1647 ++ ip_mc_clear_src(i);
1648 + ip_ma_put(i);
1649 + }
1650 + }
1651 +diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
1652 +index d278b06459ac9..79d8ea98a5b1f 100644
1653 +--- a/net/ipv4/ipconfig.c
1654 ++++ b/net/ipv4/ipconfig.c
1655 +@@ -880,7 +880,7 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
1656 +
1657 +
1658 + /*
1659 +- * Copy BOOTP-supplied string if not already set.
1660 ++ * Copy BOOTP-supplied string
1661 + */
1662 + static int __init ic_bootp_string(char *dest, char *src, int len, int max)
1663 + {
1664 +@@ -929,12 +929,15 @@ static void __init ic_do_bootp_ext(u8 *ext)
1665 + }
1666 + break;
1667 + case 12: /* Host name */
1668 +- ic_bootp_string(utsname()->nodename, ext+1, *ext,
1669 +- __NEW_UTS_LEN);
1670 +- ic_host_name_set = 1;
1671 ++ if (!ic_host_name_set) {
1672 ++ ic_bootp_string(utsname()->nodename, ext+1, *ext,
1673 ++ __NEW_UTS_LEN);
1674 ++ ic_host_name_set = 1;
1675 ++ }
1676 + break;
1677 + case 15: /* Domain name (DNS) */
1678 +- ic_bootp_string(ic_domain, ext+1, *ext, sizeof(ic_domain));
1679 ++ if (!ic_domain[0])
1680 ++ ic_bootp_string(ic_domain, ext+1, *ext, sizeof(ic_domain));
1681 + break;
1682 + case 17: /* Root path */
1683 + if (!root_server_path[0])
1684 +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
1685 +index 4fda9abf38ee1..dd4e46af1e531 100644
1686 +--- a/net/ipv4/ping.c
1687 ++++ b/net/ipv4/ping.c
1688 +@@ -976,6 +976,7 @@ bool ping_rcv(struct sk_buff *skb)
1689 + struct sock *sk;
1690 + struct net *net = dev_net(skb->dev);
1691 + struct icmphdr *icmph = icmp_hdr(skb);
1692 ++ bool rc = false;
1693 +
1694 + /* We assume the packet has already been checked by icmp_rcv */
1695 +
1696 +@@ -990,14 +991,15 @@ bool ping_rcv(struct sk_buff *skb)
1697 + struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1698 +
1699 + pr_debug("rcv on socket %p\n", sk);
1700 +- if (skb2)
1701 +- ping_queue_rcv_skb(sk, skb2);
1702 ++ if (skb2 && !ping_queue_rcv_skb(sk, skb2))
1703 ++ rc = true;
1704 + sock_put(sk);
1705 +- return true;
1706 + }
1707 +- pr_debug("no socket, dropping\n");
1708 +
1709 +- return false;
1710 ++ if (!rc)
1711 ++ pr_debug("no socket, dropping\n");
1712 ++
1713 ++ return rc;
1714 + }
1715 + EXPORT_SYMBOL_GPL(ping_rcv);
1716 +
1717 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
1718 +index e9aae4686536a..5350e1b61c06b 100644
1719 +--- a/net/ipv4/route.c
1720 ++++ b/net/ipv4/route.c
1721 +@@ -70,6 +70,7 @@
1722 + #include <linux/types.h>
1723 + #include <linux/kernel.h>
1724 + #include <linux/mm.h>
1725 ++#include <linux/bootmem.h>
1726 + #include <linux/string.h>
1727 + #include <linux/socket.h>
1728 + #include <linux/sockios.h>
1729 +@@ -463,8 +464,10 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
1730 + return neigh_create(&arp_tbl, pkey, dev);
1731 + }
1732 +
1733 +-#define IP_IDENTS_SZ 2048u
1734 +-
1735 ++/* Hash tables of size 2048..262144 depending on RAM size.
1736 ++ * Each bucket uses 8 bytes.
1737 ++ */
1738 ++static u32 ip_idents_mask __read_mostly;
1739 + static atomic_t *ip_idents __read_mostly;
1740 + static u32 *ip_tstamps __read_mostly;
1741 +
1742 +@@ -474,12 +477,16 @@ static u32 *ip_tstamps __read_mostly;
1743 + */
1744 + u32 ip_idents_reserve(u32 hash, int segs)
1745 + {
1746 +- u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
1747 +- atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
1748 +- u32 old = ACCESS_ONCE(*p_tstamp);
1749 +- u32 now = (u32)jiffies;
1750 ++ u32 bucket, old, now = (u32)jiffies;
1751 ++ atomic_t *p_id;
1752 ++ u32 *p_tstamp;
1753 + u32 delta = 0;
1754 +
1755 ++ bucket = hash & ip_idents_mask;
1756 ++ p_tstamp = ip_tstamps + bucket;
1757 ++ p_id = ip_idents + bucket;
1758 ++ old = ACCESS_ONCE(*p_tstamp);
1759 ++
1760 + if (old != now && cmpxchg(p_tstamp, old, now) == old)
1761 + delta = prandom_u32_max(now - old);
1762 +
1763 +@@ -2936,18 +2943,27 @@ struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
1764 +
1765 + int __init ip_rt_init(void)
1766 + {
1767 ++ void *idents_hash;
1768 + int rc = 0;
1769 + int cpu;
1770 +
1771 +- ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
1772 +- if (!ip_idents)
1773 +- panic("IP: failed to allocate ip_idents\n");
1774 ++ /* For modern hosts, this will use 2 MB of memory */
1775 ++ idents_hash = alloc_large_system_hash("IP idents",
1776 ++ sizeof(*ip_idents) + sizeof(*ip_tstamps),
1777 ++ 0,
1778 ++ 16, /* one bucket per 64 KB */
1779 ++ 0,
1780 ++ NULL,
1781 ++ &ip_idents_mask,
1782 ++ 2048,
1783 ++ 256*1024);
1784 ++
1785 ++ ip_idents = idents_hash;
1786 +
1787 +- prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
1788 ++ prandom_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents));
1789 +
1790 +- ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
1791 +- if (!ip_tstamps)
1792 +- panic("IP: failed to allocate ip_tstamps\n");
1793 ++ ip_tstamps = idents_hash + (ip_idents_mask + 1) * sizeof(*ip_idents);
1794 ++ memset(ip_tstamps, 0, (ip_idents_mask + 1) * sizeof(*ip_tstamps));
1795 +
1796 + for_each_possible_cpu(cpu) {
1797 + struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
1798 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
1799 +index 18a1a4890c5f5..79249a44e4a3b 100644
1800 +--- a/net/ipv4/udp.c
1801 ++++ b/net/ipv4/udp.c
1802 +@@ -1998,6 +1998,9 @@ void udp_destroy_sock(struct sock *sk)
1803 + {
1804 + struct udp_sock *up = udp_sk(sk);
1805 + bool slow = lock_sock_fast(sk);
1806 ++
1807 ++ /* protects from races with udp_abort() */
1808 ++ sock_set_flag(sk, SOCK_DEAD);
1809 + udp_flush_pending_frames(sk);
1810 + unlock_sock_fast(sk, slow);
1811 + if (static_key_false(&udp_encap_needed) && up->encap_type) {
1812 +@@ -2228,10 +2231,17 @@ int udp_abort(struct sock *sk, int err)
1813 + {
1814 + lock_sock(sk);
1815 +
1816 ++ /* udp{v6}_destroy_sock() sets it under the sk lock, avoid racing
1817 ++ * with close()
1818 ++ */
1819 ++ if (sock_flag(sk, SOCK_DEAD))
1820 ++ goto out;
1821 ++
1822 + sk->sk_err = err;
1823 + sk->sk_error_report(sk);
1824 + __udp_disconnect(sk, 0);
1825 +
1826 ++out:
1827 + release_sock(sk);
1828 +
1829 + return 0;
1830 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
1831 +index 1ad84e18c03b7..3a876a2fdd82d 100644
1832 +--- a/net/ipv6/udp.c
1833 ++++ b/net/ipv6/udp.c
1834 +@@ -1325,6 +1325,9 @@ void udpv6_destroy_sock(struct sock *sk)
1835 + {
1836 + struct udp_sock *up = udp_sk(sk);
1837 + lock_sock(sk);
1838 ++
1839 ++ /* protects from races with udp_abort() */
1840 ++ sock_set_flag(sk, SOCK_DEAD);
1841 + udp_v6_flush_pending_frames(sk);
1842 + release_sock(sk);
1843 +
1844 +diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
1845 +index 21b35255ecc24..f5532a3ce72e1 100644
1846 +--- a/net/mac80211/ieee80211_i.h
1847 ++++ b/net/mac80211/ieee80211_i.h
1848 +@@ -1391,7 +1391,7 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata)
1849 + rcu_read_lock();
1850 + chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
1851 +
1852 +- if (WARN_ON_ONCE(!chanctx_conf)) {
1853 ++ if (!chanctx_conf) {
1854 + rcu_read_unlock();
1855 + return NULL;
1856 + }
1857 +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
1858 +index 721caa5a5430f..3a069cb188b72 100644
1859 +--- a/net/mac80211/rx.c
1860 ++++ b/net/mac80211/rx.c
1861 +@@ -1988,17 +1988,15 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1862 + sc = le16_to_cpu(hdr->seq_ctrl);
1863 + frag = sc & IEEE80211_SCTL_FRAG;
1864 +
1865 +- if (is_multicast_ether_addr(hdr->addr1)) {
1866 +- I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount);
1867 +- goto out_no_led;
1868 +- }
1869 +-
1870 + if (rx->sta)
1871 + cache = &rx->sta->frags;
1872 +
1873 + if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
1874 + goto out;
1875 +
1876 ++ if (is_multicast_ether_addr(hdr->addr1))
1877 ++ return RX_DROP_MONITOR;
1878 ++
1879 + I802_DEBUG_INC(rx->local->rx_handlers_fragments);
1880 +
1881 + if (skb_linearize(rx->skb))
1882 +@@ -2127,7 +2125,6 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1883 +
1884 + out:
1885 + ieee80211_led_rx(rx->local);
1886 +- out_no_led:
1887 + if (rx->sta)
1888 + rx->sta->rx_stats.packets++;
1889 + return RX_CONTINUE;
1890 +diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
1891 +index c8a4a48bced98..8be604eb69616 100644
1892 +--- a/net/netfilter/nf_synproxy_core.c
1893 ++++ b/net/netfilter/nf_synproxy_core.c
1894 +@@ -34,6 +34,9 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
1895 + int length = (th->doff * 4) - sizeof(*th);
1896 + u8 buf[40], *ptr;
1897 +
1898 ++ if (unlikely(length < 0))
1899 ++ return false;
1900 ++
1901 + ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf);
1902 + if (ptr == NULL)
1903 + return false;
1904 +@@ -50,6 +53,8 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
1905 + length--;
1906 + continue;
1907 + default:
1908 ++ if (length < 2)
1909 ++ return true;
1910 + opsize = *ptr++;
1911 + if (opsize < 2)
1912 + return true;
1913 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1914 +index b5b79f5015415..370d0a4af1f97 100644
1915 +--- a/net/packet/af_packet.c
1916 ++++ b/net/packet/af_packet.c
1917 +@@ -2674,7 +2674,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
1918 + }
1919 + if (likely(saddr == NULL)) {
1920 + dev = packet_cached_dev_get(po);
1921 +- proto = po->num;
1922 ++ proto = READ_ONCE(po->num);
1923 + } else {
1924 + err = -EINVAL;
1925 + if (msg->msg_namelen < sizeof(struct sockaddr_ll))
1926 +@@ -2886,7 +2886,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
1927 +
1928 + if (likely(saddr == NULL)) {
1929 + dev = packet_cached_dev_get(po);
1930 +- proto = po->num;
1931 ++ proto = READ_ONCE(po->num);
1932 + } else {
1933 + err = -EINVAL;
1934 + if (msg->msg_namelen < sizeof(struct sockaddr_ll))
1935 +@@ -3157,7 +3157,7 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
1936 + /* prevents packet_notifier() from calling
1937 + * register_prot_hook()
1938 + */
1939 +- po->num = 0;
1940 ++ WRITE_ONCE(po->num, 0);
1941 + __unregister_prot_hook(sk, true);
1942 + rcu_read_lock();
1943 + dev_curr = po->prot_hook.dev;
1944 +@@ -3167,17 +3167,17 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
1945 + }
1946 +
1947 + BUG_ON(po->running);
1948 +- po->num = proto;
1949 ++ WRITE_ONCE(po->num, proto);
1950 + po->prot_hook.type = proto;
1951 +
1952 + if (unlikely(unlisted)) {
1953 + dev_put(dev);
1954 + po->prot_hook.dev = NULL;
1955 +- po->ifindex = -1;
1956 ++ WRITE_ONCE(po->ifindex, -1);
1957 + packet_cached_dev_reset(po);
1958 + } else {
1959 + po->prot_hook.dev = dev;
1960 +- po->ifindex = dev ? dev->ifindex : 0;
1961 ++ WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0);
1962 + packet_cached_dev_assign(po, dev);
1963 + }
1964 + }
1965 +@@ -3492,7 +3492,7 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
1966 + uaddr->sa_family = AF_PACKET;
1967 + memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
1968 + rcu_read_lock();
1969 +- dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
1970 ++ dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex));
1971 + if (dev)
1972 + strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
1973 + rcu_read_unlock();
1974 +@@ -3508,16 +3508,18 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
1975 + struct sock *sk = sock->sk;
1976 + struct packet_sock *po = pkt_sk(sk);
1977 + DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
1978 ++ int ifindex;
1979 +
1980 + if (peer)
1981 + return -EOPNOTSUPP;
1982 +
1983 ++ ifindex = READ_ONCE(po->ifindex);
1984 + sll->sll_family = AF_PACKET;
1985 +- sll->sll_ifindex = po->ifindex;
1986 +- sll->sll_protocol = po->num;
1987 ++ sll->sll_ifindex = ifindex;
1988 ++ sll->sll_protocol = READ_ONCE(po->num);
1989 + sll->sll_pkttype = 0;
1990 + rcu_read_lock();
1991 +- dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
1992 ++ dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
1993 + if (dev) {
1994 + sll->sll_hatype = dev->type;
1995 + sll->sll_halen = dev->addr_len;
1996 +@@ -4097,7 +4099,7 @@ static int packet_notifier(struct notifier_block *this,
1997 + }
1998 + if (msg == NETDEV_UNREGISTER) {
1999 + packet_cached_dev_reset(po);
2000 +- po->ifindex = -1;
2001 ++ WRITE_ONCE(po->ifindex, -1);
2002 + if (po->prot_hook.dev)
2003 + dev_put(po->prot_hook.dev);
2004 + po->prot_hook.dev = NULL;
2005 +@@ -4400,7 +4402,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
2006 + was_running = po->running;
2007 + num = po->num;
2008 + if (was_running) {
2009 +- po->num = 0;
2010 ++ WRITE_ONCE(po->num, 0);
2011 + __unregister_prot_hook(sk, false);
2012 + }
2013 + spin_unlock(&po->bind_lock);
2014 +@@ -4433,7 +4435,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
2015 +
2016 + spin_lock(&po->bind_lock);
2017 + if (was_running) {
2018 +- po->num = num;
2019 ++ WRITE_ONCE(po->num, num);
2020 + register_prot_hook(sk);
2021 + }
2022 + spin_unlock(&po->bind_lock);
2023 +@@ -4602,8 +4604,8 @@ static int packet_seq_show(struct seq_file *seq, void *v)
2024 + s,
2025 + atomic_read(&s->sk_refcnt),
2026 + s->sk_type,
2027 +- ntohs(po->num),
2028 +- po->ifindex,
2029 ++ ntohs(READ_ONCE(po->num)),
2030 ++ READ_ONCE(po->ifindex),
2031 + po->running,
2032 + atomic_read(&s->sk_rmem_alloc),
2033 + from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
2034 +diff --git a/net/rds/recv.c b/net/rds/recv.c
2035 +index 488a198be3e1f..4bd307e31b404 100644
2036 +--- a/net/rds/recv.c
2037 ++++ b/net/rds/recv.c
2038 +@@ -596,7 +596,7 @@ int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2039 +
2040 + if (rds_cmsg_recv(inc, msg, rs)) {
2041 + ret = -EFAULT;
2042 +- goto out;
2043 ++ break;
2044 + }
2045 +
2046 + rds_stats_inc(s_recv_delivered);
2047 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
2048 +index bcd6ed6e7e25c..8bbaa35937dd9 100644
2049 +--- a/net/unix/af_unix.c
2050 ++++ b/net/unix/af_unix.c
2051 +@@ -534,12 +534,14 @@ static void unix_release_sock(struct sock *sk, int embrion)
2052 + u->path.mnt = NULL;
2053 + state = sk->sk_state;
2054 + sk->sk_state = TCP_CLOSE;
2055 ++
2056 ++ skpair = unix_peer(sk);
2057 ++ unix_peer(sk) = NULL;
2058 ++
2059 + unix_state_unlock(sk);
2060 +
2061 + wake_up_interruptible_all(&u->peer_wait);
2062 +
2063 +- skpair = unix_peer(sk);
2064 +-
2065 + if (skpair != NULL) {
2066 + if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
2067 + unix_state_lock(skpair);
2068 +@@ -554,7 +556,6 @@ static void unix_release_sock(struct sock *sk, int embrion)
2069 +
2070 + unix_dgram_peer_wake_disconnect(sk, skpair);
2071 + sock_put(skpair); /* It may now die */
2072 +- unix_peer(sk) = NULL;
2073 + }
2074 +
2075 + /* Try to flush out this socket. Throw out buffers at least */
2076 +diff --git a/net/wireless/util.c b/net/wireless/util.c
2077 +index 939320571d71f..a16e805c4857f 100644
2078 +--- a/net/wireless/util.c
2079 ++++ b/net/wireless/util.c
2080 +@@ -1050,6 +1050,9 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
2081 + case NL80211_IFTYPE_MESH_POINT:
2082 + /* mesh should be handled? */
2083 + break;
2084 ++ case NL80211_IFTYPE_OCB:
2085 ++ cfg80211_leave_ocb(rdev, dev);
2086 ++ break;
2087 + default:
2088 + break;
2089 + }
2090 +diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
2091 +index 9c3fbf4553cc7..c23c04d38a82e 100644
2092 +--- a/net/x25/af_x25.c
2093 ++++ b/net/x25/af_x25.c
2094 +@@ -550,7 +550,7 @@ static int x25_create(struct net *net, struct socket *sock, int protocol,
2095 + if (protocol)
2096 + goto out;
2097 +
2098 +- rc = -ENOBUFS;
2099 ++ rc = -ENOMEM;
2100 + if ((sk = x25_alloc_socket(net, kern)) == NULL)
2101 + goto out;
2102 +