Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Tue, 08 Feb 2022 17:56:34
Message-Id: 1644342979.79b553e704b22f42a793a7452f84bdf53453b509.mpagano@gentoo
1 commit: 79b553e704b22f42a793a7452f84bdf53453b509
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Feb 8 17:56:19 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Feb 8 17:56:19 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=79b553e7
7
8 Linux patch 4.19.228
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1227_linux-4.19.228.patch | 2831 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2835 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index cc68b74d..e1d00b7a 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -947,6 +947,10 @@ Patch: 1226_linux-4.19.227.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.19.227
23
24 +Patch: 1227_linux-4.19.228.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.19.228
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1227_linux-4.19.228.patch b/1227_linux-4.19.228.patch
33 new file mode 100644
34 index 00000000..8a18f864
35 --- /dev/null
36 +++ b/1227_linux-4.19.228.patch
37 @@ -0,0 +1,2831 @@
38 +diff --git a/Makefile b/Makefile
39 +index 1e9652cb9c1fc..1779149108cff 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 19
46 +-SUBLEVEL = 227
47 ++SUBLEVEL = 228
48 + EXTRAVERSION =
49 + NAME = "People's Front"
50 +
51 +diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
52 +index 1e64cfe22a83e..bf19c5514d6c2 100644
53 +--- a/arch/powerpc/kernel/Makefile
54 ++++ b/arch/powerpc/kernel/Makefile
55 +@@ -15,6 +15,7 @@ CFLAGS_prom_init.o += -fPIC
56 + CFLAGS_btext.o += -fPIC
57 + endif
58 +
59 ++CFLAGS_setup_32.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
60 + CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
61 + CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
62 + CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
63 +diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
64 +index 6702868089283..36f9130844294 100644
65 +--- a/arch/powerpc/lib/Makefile
66 ++++ b/arch/powerpc/lib/Makefile
67 +@@ -10,6 +10,9 @@ ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
68 + CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE)
69 + CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE)
70 +
71 ++CFLAGS_code-patching.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
72 ++CFLAGS_feature-fixups.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
73 ++
74 + obj-y += string.o alloc.o code-patching.o feature-fixups.o
75 +
76 + obj-$(CONFIG_PPC32) += div64.o copy_32.o crtsavres.o strlen_32.o
77 +diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
78 +index c4b7b681e0554..90740be25cf86 100644
79 +--- a/arch/s390/hypfs/hypfs_vm.c
80 ++++ b/arch/s390/hypfs/hypfs_vm.c
81 +@@ -20,6 +20,7 @@
82 +
83 + static char local_guest[] = " ";
84 + static char all_guests[] = "* ";
85 ++static char *all_groups = all_guests;
86 + static char *guest_query;
87 +
88 + struct diag2fc_data {
89 +@@ -62,10 +63,11 @@ static int diag2fc(int size, char* query, void *addr)
90 +
91 + memcpy(parm_list.userid, query, NAME_LEN);
92 + ASCEBC(parm_list.userid, NAME_LEN);
93 +- parm_list.addr = (unsigned long) addr ;
94 ++ memcpy(parm_list.aci_grp, all_groups, NAME_LEN);
95 ++ ASCEBC(parm_list.aci_grp, NAME_LEN);
96 ++ parm_list.addr = (unsigned long)addr;
97 + parm_list.size = size;
98 + parm_list.fmt = 0x02;
99 +- memset(parm_list.aci_grp, 0x40, NAME_LEN);
100 + rc = -1;
101 +
102 + diag_stat_inc(DIAG_STAT_X2FC);
103 +diff --git a/block/bio-integrity.c b/block/bio-integrity.c
104 +index 0b96220d0efd4..2e22a3f7466a8 100644
105 +--- a/block/bio-integrity.c
106 ++++ b/block/bio-integrity.c
107 +@@ -399,7 +399,7 @@ void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
108 + struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
109 + unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
110 +
111 +- bip->bip_iter.bi_sector += bytes_done >> 9;
112 ++ bip->bip_iter.bi_sector += bio_integrity_intervals(bi, bytes_done >> 9);
113 + bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
114 + }
115 + EXPORT_SYMBOL(bio_integrity_advance);
116 +diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
117 +index 56de378ad13dc..3145d009d541b 100644
118 +--- a/drivers/edac/altera_edac.c
119 ++++ b/drivers/edac/altera_edac.c
120 +@@ -366,7 +366,7 @@ static int altr_sdram_probe(struct platform_device *pdev)
121 + if (irq < 0) {
122 + edac_printk(KERN_ERR, EDAC_MC,
123 + "No irq %d in DT\n", irq);
124 +- return -ENODEV;
125 ++ return irq;
126 + }
127 +
128 + /* Arria10 has a 2nd IRQ */
129 +diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
130 +index e8b81d7ef61fa..028ddc7903254 100644
131 +--- a/drivers/edac/xgene_edac.c
132 ++++ b/drivers/edac/xgene_edac.c
133 +@@ -1934,7 +1934,7 @@ static int xgene_edac_probe(struct platform_device *pdev)
134 + irq = platform_get_irq(pdev, i);
135 + if (irq < 0) {
136 + dev_err(&pdev->dev, "No IRQ resource\n");
137 +- rc = -EINVAL;
138 ++ rc = irq;
139 + goto out_err;
140 + }
141 + rc = devm_request_irq(&pdev->dev, irq,
142 +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
143 +index 2b57a581b29e2..b60623d1db0ea 100644
144 +--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
145 ++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
146 +@@ -444,8 +444,8 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
147 + return -EINVAL;
148 + }
149 +
150 +- if (args->stream_size > SZ_64K || args->nr_relocs > SZ_64K ||
151 +- args->nr_bos > SZ_64K || args->nr_pmrs > 128) {
152 ++ if (args->stream_size > SZ_128K || args->nr_relocs > SZ_128K ||
153 ++ args->nr_bos > SZ_128K || args->nr_pmrs > 128) {
154 + DRM_ERROR("submit arguments out of size limits\n");
155 + return -EINVAL;
156 + }
157 +diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
158 +index 443dfaefd7a6b..bcfbba14f2171 100644
159 +--- a/drivers/gpu/drm/i915/intel_overlay.c
160 ++++ b/drivers/gpu/drm/i915/intel_overlay.c
161 +@@ -929,6 +929,9 @@ static int check_overlay_dst(struct intel_overlay *overlay,
162 + const struct intel_crtc_state *pipe_config =
163 + overlay->crtc->config;
164 +
165 ++ if (rec->dst_height == 0 || rec->dst_width == 0)
166 ++ return -EINVAL;
167 ++
168 + if (rec->dst_x < pipe_config->pipe_src_w &&
169 + rec->dst_x + rec->dst_width <= pipe_config->pipe_src_w &&
170 + rec->dst_y < pipe_config->pipe_src_h &&
171 +diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
172 +index c630871de7c5b..f01e245cd0eee 100644
173 +--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
174 ++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
175 +@@ -667,12 +667,14 @@ void __exit msm_dsi_phy_driver_unregister(void)
176 + int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
177 + struct msm_dsi_phy_clk_request *clk_req)
178 + {
179 +- struct device *dev = &phy->pdev->dev;
180 ++ struct device *dev;
181 + int ret;
182 +
183 + if (!phy || !phy->cfg->ops.enable)
184 + return -EINVAL;
185 +
186 ++ dev = &phy->pdev->dev;
187 ++
188 + ret = dsi_phy_enable_resource(phy);
189 + if (ret) {
190 + dev_err(dev, "%s: resource enable failed, %d\n",
191 +diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
192 +index 08ff9d7645d74..11bad11101b9e 100644
193 +--- a/drivers/gpu/drm/msm/msm_drv.c
194 ++++ b/drivers/gpu/drm/msm/msm_drv.c
195 +@@ -388,7 +388,7 @@ static int msm_init_vram(struct drm_device *dev)
196 + of_node_put(node);
197 + if (ret)
198 + return ret;
199 +- size = r.end - r.start;
200 ++ size = r.end - r.start + 1;
201 + DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
202 +
203 + /* if we have no IOMMU, then we need to use carveout allocator.
204 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
205 +index f3c30b2a788e8..8bff14ae16b0e 100644
206 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
207 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
208 +@@ -38,7 +38,7 @@ nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size)
209 + *addr += bios->imaged_addr;
210 + }
211 +
212 +- if (unlikely(*addr + size >= bios->size)) {
213 ++ if (unlikely(*addr + size > bios->size)) {
214 + nvkm_error(&bios->subdev, "OOB %d %08x %08x\n", size, p, *addr);
215 + return false;
216 + }
217 +diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
218 +index 30a7f7fde6511..033c89f8359d1 100644
219 +--- a/drivers/hwmon/lm90.c
220 ++++ b/drivers/hwmon/lm90.c
221 +@@ -359,7 +359,7 @@ static const struct lm90_params lm90_params[] = {
222 + .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
223 + | LM90_HAVE_BROKEN_ALERT,
224 + .alert_alarms = 0x7c,
225 +- .max_convrate = 8,
226 ++ .max_convrate = 7,
227 + },
228 + [lm86] = {
229 + .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
230 +diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
231 +index e64d934f7bac7..10d7aa87beaed 100644
232 +--- a/drivers/infiniband/hw/mlx4/main.c
233 ++++ b/drivers/infiniband/hw/mlx4/main.c
234 +@@ -3351,7 +3351,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
235 + case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
236 + ew = kmalloc(sizeof *ew, GFP_ATOMIC);
237 + if (!ew)
238 +- break;
239 ++ return;
240 +
241 + INIT_WORK(&ew->work, handle_port_mgmt_change_event);
242 + memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
243 +diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
244 +index c7d0bb3b4a307..76ae6968801e4 100644
245 +--- a/drivers/iommu/amd_iommu_init.c
246 ++++ b/drivers/iommu/amd_iommu_init.c
247 +@@ -30,6 +30,7 @@
248 + #include <linux/iommu.h>
249 + #include <linux/kmemleak.h>
250 + #include <linux/mem_encrypt.h>
251 ++#include <linux/iopoll.h>
252 + #include <asm/pci-direct.h>
253 + #include <asm/iommu.h>
254 + #include <asm/gart.h>
255 +@@ -772,6 +773,7 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
256 + status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
257 + if (status & (MMIO_STATUS_GALOG_RUN_MASK))
258 + break;
259 ++ udelay(10);
260 + }
261 +
262 + if (i >= LOOP_TIMEOUT)
263 +diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
264 +index cd2e5b44119ad..17fc262f2ee88 100644
265 +--- a/drivers/iommu/intel_irq_remapping.c
266 ++++ b/drivers/iommu/intel_irq_remapping.c
267 +@@ -543,9 +543,8 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
268 + fn, &intel_ir_domain_ops,
269 + iommu);
270 + if (!iommu->ir_domain) {
271 +- irq_domain_free_fwnode(fn);
272 + pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
273 +- goto out_free_bitmap;
274 ++ goto out_free_fwnode;
275 + }
276 + iommu->ir_msi_domain =
277 + arch_create_remap_msi_irq_domain(iommu->ir_domain,
278 +@@ -569,7 +568,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
279 +
280 + if (dmar_enable_qi(iommu)) {
281 + pr_err("Failed to enable queued invalidation\n");
282 +- goto out_free_bitmap;
283 ++ goto out_free_ir_domain;
284 + }
285 + }
286 +
287 +@@ -593,6 +592,14 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
288 +
289 + return 0;
290 +
291 ++out_free_ir_domain:
292 ++ if (iommu->ir_msi_domain)
293 ++ irq_domain_remove(iommu->ir_msi_domain);
294 ++ iommu->ir_msi_domain = NULL;
295 ++ irq_domain_remove(iommu->ir_domain);
296 ++ iommu->ir_domain = NULL;
297 ++out_free_fwnode:
298 ++ irq_domain_free_fwnode(fn);
299 + out_free_bitmap:
300 + kfree(bitmap);
301 + out_free_pages:
302 +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
303 +index 80cf6af822f72..35659f0dbe74e 100644
304 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
305 ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
306 +@@ -722,7 +722,9 @@ static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
307 + if (!channel->tx_ring)
308 + break;
309 +
310 ++ /* Deactivate the Tx timer */
311 + del_timer_sync(&channel->tx_timer);
312 ++ channel->tx_timer_active = 0;
313 + }
314 + }
315 +
316 +@@ -2766,6 +2768,14 @@ read_again:
317 + buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
318 + len += buf2_len;
319 +
320 ++ if (buf2_len > rdata->rx.buf.dma_len) {
321 ++ /* Hardware inconsistency within the descriptors
322 ++ * that has resulted in a length underflow.
323 ++ */
324 ++ error = 1;
325 ++ goto skip_data;
326 ++ }
327 ++
328 + if (!skb) {
329 + skb = xgbe_create_skb(pdata, napi, rdata,
330 + buf1_len);
331 +@@ -2795,8 +2805,10 @@ skip_data:
332 + if (!last || context_next)
333 + goto read_again;
334 +
335 +- if (!skb)
336 ++ if (!skb || error) {
337 ++ dev_kfree_skb(skb);
338 + goto next_packet;
339 ++ }
340 +
341 + /* Be sure we don't exceed the configured MTU */
342 + max_len = netdev->mtu + ETH_HLEN;
343 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
344 +index c52c26fc44e59..ffea634e03e8a 100644
345 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
346 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
347 +@@ -3044,11 +3044,25 @@ static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
348 + struct device *dev = &adapter->vdev->dev;
349 + union ibmvnic_crq crq;
350 + int max_entries;
351 ++ int cap_reqs;
352 ++
353 ++ /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on
354 ++ * the PROMISC flag). Initialize this count upfront. When the tasklet
355 ++ * receives a response to all of these, it will send the next protocol
356 ++ * message (QUERY_IP_OFFLOAD).
357 ++ */
358 ++ if (!(adapter->netdev->flags & IFF_PROMISC) ||
359 ++ adapter->promisc_supported)
360 ++ cap_reqs = 7;
361 ++ else
362 ++ cap_reqs = 6;
363 +
364 + if (!retry) {
365 + /* Sub-CRQ entries are 32 byte long */
366 + int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
367 +
368 ++ atomic_set(&adapter->running_cap_crqs, cap_reqs);
369 ++
370 + if (adapter->min_tx_entries_per_subcrq > entries_page ||
371 + adapter->min_rx_add_entries_per_subcrq > entries_page) {
372 + dev_err(dev, "Fatal, invalid entries per sub-crq\n");
373 +@@ -3109,44 +3123,45 @@ static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
374 + adapter->opt_rx_comp_queues;
375 +
376 + adapter->req_rx_add_queues = adapter->max_rx_add_queues;
377 ++ } else {
378 ++ atomic_add(cap_reqs, &adapter->running_cap_crqs);
379 + }
380 +-
381 + memset(&crq, 0, sizeof(crq));
382 + crq.request_capability.first = IBMVNIC_CRQ_CMD;
383 + crq.request_capability.cmd = REQUEST_CAPABILITY;
384 +
385 + crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
386 + crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
387 +- atomic_inc(&adapter->running_cap_crqs);
388 ++ cap_reqs--;
389 + ibmvnic_send_crq(adapter, &crq);
390 +
391 + crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
392 + crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
393 +- atomic_inc(&adapter->running_cap_crqs);
394 ++ cap_reqs--;
395 + ibmvnic_send_crq(adapter, &crq);
396 +
397 + crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
398 + crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
399 +- atomic_inc(&adapter->running_cap_crqs);
400 ++ cap_reqs--;
401 + ibmvnic_send_crq(adapter, &crq);
402 +
403 + crq.request_capability.capability =
404 + cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
405 + crq.request_capability.number =
406 + cpu_to_be64(adapter->req_tx_entries_per_subcrq);
407 +- atomic_inc(&adapter->running_cap_crqs);
408 ++ cap_reqs--;
409 + ibmvnic_send_crq(adapter, &crq);
410 +
411 + crq.request_capability.capability =
412 + cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
413 + crq.request_capability.number =
414 + cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
415 +- atomic_inc(&adapter->running_cap_crqs);
416 ++ cap_reqs--;
417 + ibmvnic_send_crq(adapter, &crq);
418 +
419 + crq.request_capability.capability = cpu_to_be16(REQ_MTU);
420 + crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
421 +- atomic_inc(&adapter->running_cap_crqs);
422 ++ cap_reqs--;
423 + ibmvnic_send_crq(adapter, &crq);
424 +
425 + if (adapter->netdev->flags & IFF_PROMISC) {
426 +@@ -3154,16 +3169,21 @@ static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
427 + crq.request_capability.capability =
428 + cpu_to_be16(PROMISC_REQUESTED);
429 + crq.request_capability.number = cpu_to_be64(1);
430 +- atomic_inc(&adapter->running_cap_crqs);
431 ++ cap_reqs--;
432 + ibmvnic_send_crq(adapter, &crq);
433 + }
434 + } else {
435 + crq.request_capability.capability =
436 + cpu_to_be16(PROMISC_REQUESTED);
437 + crq.request_capability.number = cpu_to_be64(0);
438 +- atomic_inc(&adapter->running_cap_crqs);
439 ++ cap_reqs--;
440 + ibmvnic_send_crq(adapter, &crq);
441 + }
442 ++
443 ++ /* Keep at end to catch any discrepancy between expected and actual
444 ++ * CRQs sent.
445 ++ */
446 ++ WARN_ON(cap_reqs != 0);
447 + }
448 +
449 + static int pending_scrq(struct ibmvnic_adapter *adapter,
450 +@@ -3568,118 +3588,132 @@ static void send_map_query(struct ibmvnic_adapter *adapter)
451 + static void send_cap_queries(struct ibmvnic_adapter *adapter)
452 + {
453 + union ibmvnic_crq crq;
454 ++ int cap_reqs;
455 ++
456 ++ /* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count
457 ++ * upfront. When the tasklet receives a response to all of these, it
458 ++ * can send out the next protocol messaage (REQUEST_CAPABILITY).
459 ++ */
460 ++ cap_reqs = 25;
461 ++
462 ++ atomic_set(&adapter->running_cap_crqs, cap_reqs);
463 +
464 +- atomic_set(&adapter->running_cap_crqs, 0);
465 + memset(&crq, 0, sizeof(crq));
466 + crq.query_capability.first = IBMVNIC_CRQ_CMD;
467 + crq.query_capability.cmd = QUERY_CAPABILITY;
468 +
469 + crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
470 +- atomic_inc(&adapter->running_cap_crqs);
471 + ibmvnic_send_crq(adapter, &crq);
472 ++ cap_reqs--;
473 +
474 + crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
475 +- atomic_inc(&adapter->running_cap_crqs);
476 + ibmvnic_send_crq(adapter, &crq);
477 ++ cap_reqs--;
478 +
479 + crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
480 +- atomic_inc(&adapter->running_cap_crqs);
481 + ibmvnic_send_crq(adapter, &crq);
482 ++ cap_reqs--;
483 +
484 + crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
485 +- atomic_inc(&adapter->running_cap_crqs);
486 + ibmvnic_send_crq(adapter, &crq);
487 ++ cap_reqs--;
488 +
489 + crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
490 +- atomic_inc(&adapter->running_cap_crqs);
491 + ibmvnic_send_crq(adapter, &crq);
492 ++ cap_reqs--;
493 +
494 + crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
495 +- atomic_inc(&adapter->running_cap_crqs);
496 + ibmvnic_send_crq(adapter, &crq);
497 ++ cap_reqs--;
498 +
499 + crq.query_capability.capability =
500 + cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
501 +- atomic_inc(&adapter->running_cap_crqs);
502 + ibmvnic_send_crq(adapter, &crq);
503 ++ cap_reqs--;
504 +
505 + crq.query_capability.capability =
506 + cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
507 +- atomic_inc(&adapter->running_cap_crqs);
508 + ibmvnic_send_crq(adapter, &crq);
509 ++ cap_reqs--;
510 +
511 + crq.query_capability.capability =
512 + cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
513 +- atomic_inc(&adapter->running_cap_crqs);
514 + ibmvnic_send_crq(adapter, &crq);
515 ++ cap_reqs--;
516 +
517 + crq.query_capability.capability =
518 + cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
519 +- atomic_inc(&adapter->running_cap_crqs);
520 + ibmvnic_send_crq(adapter, &crq);
521 ++ cap_reqs--;
522 +
523 + crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
524 +- atomic_inc(&adapter->running_cap_crqs);
525 + ibmvnic_send_crq(adapter, &crq);
526 ++ cap_reqs--;
527 +
528 + crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
529 +- atomic_inc(&adapter->running_cap_crqs);
530 + ibmvnic_send_crq(adapter, &crq);
531 ++ cap_reqs--;
532 +
533 + crq.query_capability.capability = cpu_to_be16(MIN_MTU);
534 +- atomic_inc(&adapter->running_cap_crqs);
535 + ibmvnic_send_crq(adapter, &crq);
536 ++ cap_reqs--;
537 +
538 + crq.query_capability.capability = cpu_to_be16(MAX_MTU);
539 +- atomic_inc(&adapter->running_cap_crqs);
540 + ibmvnic_send_crq(adapter, &crq);
541 ++ cap_reqs--;
542 +
543 + crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
544 +- atomic_inc(&adapter->running_cap_crqs);
545 + ibmvnic_send_crq(adapter, &crq);
546 ++ cap_reqs--;
547 +
548 + crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
549 +- atomic_inc(&adapter->running_cap_crqs);
550 + ibmvnic_send_crq(adapter, &crq);
551 ++ cap_reqs--;
552 +
553 + crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
554 +- atomic_inc(&adapter->running_cap_crqs);
555 + ibmvnic_send_crq(adapter, &crq);
556 ++ cap_reqs--;
557 +
558 + crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
559 +- atomic_inc(&adapter->running_cap_crqs);
560 + ibmvnic_send_crq(adapter, &crq);
561 ++ cap_reqs--;
562 +
563 + crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
564 +- atomic_inc(&adapter->running_cap_crqs);
565 + ibmvnic_send_crq(adapter, &crq);
566 ++ cap_reqs--;
567 +
568 + crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
569 +- atomic_inc(&adapter->running_cap_crqs);
570 + ibmvnic_send_crq(adapter, &crq);
571 ++ cap_reqs--;
572 +
573 + crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
574 +- atomic_inc(&adapter->running_cap_crqs);
575 + ibmvnic_send_crq(adapter, &crq);
576 ++ cap_reqs--;
577 +
578 + crq.query_capability.capability =
579 + cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
580 +- atomic_inc(&adapter->running_cap_crqs);
581 + ibmvnic_send_crq(adapter, &crq);
582 ++ cap_reqs--;
583 +
584 + crq.query_capability.capability =
585 + cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
586 +- atomic_inc(&adapter->running_cap_crqs);
587 + ibmvnic_send_crq(adapter, &crq);
588 ++ cap_reqs--;
589 +
590 + crq.query_capability.capability =
591 + cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
592 +- atomic_inc(&adapter->running_cap_crqs);
593 + ibmvnic_send_crq(adapter, &crq);
594 ++ cap_reqs--;
595 +
596 + crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
597 +- atomic_inc(&adapter->running_cap_crqs);
598 ++
599 + ibmvnic_send_crq(adapter, &crq);
600 ++ cap_reqs--;
601 ++
602 ++ /* Keep at end to catch any discrepancy between expected and actual
603 ++ * CRQs sent.
604 ++ */
605 ++ WARN_ON(cap_reqs != 0);
606 + }
607 +
608 + static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
609 +@@ -3923,6 +3957,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
610 + char *name;
611 +
612 + atomic_dec(&adapter->running_cap_crqs);
613 ++ netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n",
614 ++ atomic_read(&adapter->running_cap_crqs));
615 + switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
616 + case REQ_TX_QUEUES:
617 + req_value = &adapter->req_tx_queues;
618 +@@ -4457,12 +4493,6 @@ static void ibmvnic_tasklet(void *data)
619 + ibmvnic_handle_crq(crq, adapter);
620 + crq->generic.first = 0;
621 + }
622 +-
623 +- /* remain in tasklet until all
624 +- * capabilities responses are received
625 +- */
626 +- if (!adapter->wait_capability)
627 +- done = true;
628 + }
629 + /* if capabilities CRQ's were sent in this tasklet, the following
630 + * tasklet must wait until all responses are received
631 +diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
632 +index 519b595944235..dc99e296f349e 100644
633 +--- a/drivers/net/ethernet/intel/i40e/i40e.h
634 ++++ b/drivers/net/ethernet/intel/i40e/i40e.h
635 +@@ -179,7 +179,6 @@ enum i40e_interrupt_policy {
636 +
637 + struct i40e_lump_tracking {
638 + u16 num_entries;
639 +- u16 search_hint;
640 + u16 list[0];
641 + #define I40E_PILE_VALID_BIT 0x8000
642 + #define I40E_IWARP_IRQ_PILE_ID (I40E_PILE_VALID_BIT - 2)
643 +@@ -709,12 +708,12 @@ struct i40e_vsi {
644 + struct rtnl_link_stats64 net_stats_offsets;
645 + struct i40e_eth_stats eth_stats;
646 + struct i40e_eth_stats eth_stats_offsets;
647 +- u32 tx_restart;
648 +- u32 tx_busy;
649 ++ u64 tx_restart;
650 ++ u64 tx_busy;
651 + u64 tx_linearize;
652 + u64 tx_force_wb;
653 +- u32 rx_buf_failed;
654 +- u32 rx_page_failed;
655 ++ u64 rx_buf_failed;
656 ++ u64 rx_page_failed;
657 +
658 + /* These are containers of ring pointers, allocated at run-time */
659 + struct i40e_ring **rx_rings;
660 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
661 +index 56b911a5dd8be..a66492b9403c4 100644
662 +--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
663 ++++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
664 +@@ -236,7 +236,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
665 + (unsigned long int)vsi->net_stats_offsets.rx_compressed,
666 + (unsigned long int)vsi->net_stats_offsets.tx_compressed);
667 + dev_info(&pf->pdev->dev,
668 +- " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
669 ++ " tx_restart = %llu, tx_busy = %llu, rx_buf_failed = %llu, rx_page_failed = %llu\n",
670 + vsi->tx_restart, vsi->tx_busy,
671 + vsi->rx_buf_failed, vsi->rx_page_failed);
672 + rcu_read_lock();
673 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
674 +index 1fadc4991c48e..21ea0cdea6668 100644
675 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
676 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
677 +@@ -193,10 +193,6 @@ int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
678 + * @id: an owner id to stick on the items assigned
679 + *
680 + * Returns the base item index of the lump, or negative for error
681 +- *
682 +- * The search_hint trick and lack of advanced fit-finding only work
683 +- * because we're highly likely to have all the same size lump requests.
684 +- * Linear search time and any fragmentation should be minimal.
685 + **/
686 + static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
687 + u16 needed, u16 id)
688 +@@ -211,8 +207,21 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
689 + return -EINVAL;
690 + }
691 +
692 +- /* start the linear search with an imperfect hint */
693 +- i = pile->search_hint;
694 ++ /* Allocate last queue in the pile for FDIR VSI queue
695 ++ * so it doesn't fragment the qp_pile
696 ++ */
697 ++ if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) {
698 ++ if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) {
699 ++ dev_err(&pf->pdev->dev,
700 ++ "Cannot allocate queue %d for I40E_VSI_FDIR\n",
701 ++ pile->num_entries - 1);
702 ++ return -ENOMEM;
703 ++ }
704 ++ pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT;
705 ++ return pile->num_entries - 1;
706 ++ }
707 ++
708 ++ i = 0;
709 + while (i < pile->num_entries) {
710 + /* skip already allocated entries */
711 + if (pile->list[i] & I40E_PILE_VALID_BIT) {
712 +@@ -231,7 +240,6 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
713 + for (j = 0; j < needed; j++)
714 + pile->list[i+j] = id | I40E_PILE_VALID_BIT;
715 + ret = i;
716 +- pile->search_hint = i + j;
717 + break;
718 + }
719 +
720 +@@ -254,7 +262,7 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
721 + {
722 + int valid_id = (id | I40E_PILE_VALID_BIT);
723 + int count = 0;
724 +- int i;
725 ++ u16 i;
726 +
727 + if (!pile || index >= pile->num_entries)
728 + return -EINVAL;
729 +@@ -266,8 +274,6 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
730 + count++;
731 + }
732 +
733 +- if (count && index < pile->search_hint)
734 +- pile->search_hint = index;
735 +
736 + return count;
737 + }
738 +@@ -785,9 +791,9 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
739 + struct rtnl_link_stats64 *ns; /* netdev stats */
740 + struct i40e_eth_stats *oes;
741 + struct i40e_eth_stats *es; /* device's eth stats */
742 +- u32 tx_restart, tx_busy;
743 ++ u64 tx_restart, tx_busy;
744 + struct i40e_ring *p;
745 +- u32 rx_page, rx_buf;
746 ++ u64 rx_page, rx_buf;
747 + u64 bytes, packets;
748 + unsigned int start;
749 + u64 tx_linearize;
750 +@@ -9486,15 +9492,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
751 + }
752 + i40e_get_oem_version(&pf->hw);
753 +
754 +- if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
755 +- ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
756 +- hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
757 +- /* The following delay is necessary for 4.33 firmware and older
758 +- * to recover after EMP reset. 200 ms should suffice but we
759 +- * put here 300 ms to be sure that FW is ready to operate
760 +- * after reset.
761 +- */
762 +- mdelay(300);
763 ++ if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) {
764 ++ /* The following delay is necessary for firmware update. */
765 ++ mdelay(1000);
766 + }
767 +
768 + /* re-verify the eeprom if we just had an EMP reset */
769 +@@ -10733,7 +10733,6 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
770 + return -ENOMEM;
771 +
772 + pf->irq_pile->num_entries = vectors;
773 +- pf->irq_pile->search_hint = 0;
774 +
775 + /* track first vector for misc interrupts, ignore return */
776 + (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
777 +@@ -11442,7 +11441,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
778 + goto sw_init_done;
779 + }
780 + pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
781 +- pf->qp_pile->search_hint = 0;
782 +
783 + pf->tx_timeout_recovery_level = 1;
784 +
785 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
786 +index 55710028c99f3..a39a8fe073ca8 100644
787 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
788 ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
789 +@@ -2338,6 +2338,59 @@ error_param:
790 + aq_ret);
791 + }
792 +
793 ++/**
794 ++ * i40e_check_enough_queue - find big enough queue number
795 ++ * @vf: pointer to the VF info
796 ++ * @needed: the number of items needed
797 ++ *
798 ++ * Returns the base item index of the queue, or negative for error
799 ++ **/
800 ++static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed)
801 ++{
802 ++ unsigned int i, cur_queues, more, pool_size;
803 ++ struct i40e_lump_tracking *pile;
804 ++ struct i40e_pf *pf = vf->pf;
805 ++ struct i40e_vsi *vsi;
806 ++
807 ++ vsi = pf->vsi[vf->lan_vsi_idx];
808 ++ cur_queues = vsi->alloc_queue_pairs;
809 ++
810 ++ /* if current allocated queues are enough for need */
811 ++ if (cur_queues >= needed)
812 ++ return vsi->base_queue;
813 ++
814 ++ pile = pf->qp_pile;
815 ++ if (cur_queues > 0) {
816 ++ /* if the allocated queues are not zero
817 ++ * just check if there are enough queues for more
818 ++ * behind the allocated queues.
819 ++ */
820 ++ more = needed - cur_queues;
821 ++ for (i = vsi->base_queue + cur_queues;
822 ++ i < pile->num_entries; i++) {
823 ++ if (pile->list[i] & I40E_PILE_VALID_BIT)
824 ++ break;
825 ++
826 ++ if (more-- == 1)
827 ++ /* there is enough */
828 ++ return vsi->base_queue;
829 ++ }
830 ++ }
831 ++
832 ++ pool_size = 0;
833 ++ for (i = 0; i < pile->num_entries; i++) {
834 ++ if (pile->list[i] & I40E_PILE_VALID_BIT) {
835 ++ pool_size = 0;
836 ++ continue;
837 ++ }
838 ++ if (needed <= ++pool_size)
839 ++ /* there is enough */
840 ++ return i;
841 ++ }
842 ++
843 ++ return -ENOMEM;
844 ++}
845 ++
846 + /**
847 + * i40e_vc_request_queues_msg
848 + * @vf: pointer to the VF info
849 +@@ -2377,6 +2430,12 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen)
850 + req_pairs - cur_pairs,
851 + pf->queues_left);
852 + vfres->num_queue_pairs = pf->queues_left + cur_pairs;
853 ++ } else if (i40e_check_enough_queue(vf, req_pairs) < 0) {
854 ++ dev_warn(&pf->pdev->dev,
855 ++ "VF %d requested %d more queues, but there is not enough for it.\n",
856 ++ vf->vf_id,
857 ++ req_pairs - cur_pairs);
858 ++ vfres->num_queue_pairs = cur_pairs;
859 + } else {
860 + /* successful request */
861 + vf->num_req_queues = req_pairs;
862 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
863 +index e1fbd7c81bfa9..08a058e1bc75c 100644
864 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
865 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
866 +@@ -159,15 +159,20 @@ static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
867 +
868 + static void get_systime(void __iomem *ioaddr, u64 *systime)
869 + {
870 +- u64 ns;
871 +-
872 +- /* Get the TSSS value */
873 +- ns = readl(ioaddr + PTP_STNSR);
874 +- /* Get the TSS and convert sec time value to nanosecond */
875 +- ns += readl(ioaddr + PTP_STSR) * 1000000000ULL;
876 ++ u64 ns, sec0, sec1;
877 ++
878 ++ /* Get the TSS value */
879 ++ sec1 = readl_relaxed(ioaddr + PTP_STSR);
880 ++ do {
881 ++ sec0 = sec1;
882 ++ /* Get the TSSS value */
883 ++ ns = readl_relaxed(ioaddr + PTP_STNSR);
884 ++ /* Get the TSS value */
885 ++ sec1 = readl_relaxed(ioaddr + PTP_STSR);
886 ++ } while (sec0 != sec1);
887 +
888 + if (systime)
889 +- *systime = ns;
890 ++ *systime = ns + (sec1 * 1000000000ULL);
891 + }
892 +
893 + const struct stmmac_hwtimestamp stmmac_ptp = {
894 +diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
895 +index fdab498725878..3db86f247bf45 100644
896 +--- a/drivers/net/hamradio/yam.c
897 ++++ b/drivers/net/hamradio/yam.c
898 +@@ -966,9 +966,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
899 + sizeof(struct yamdrv_ioctl_mcs));
900 + if (IS_ERR(ym))
901 + return PTR_ERR(ym);
902 +- if (ym->cmd != SIOCYAMSMCS)
903 +- return -EINVAL;
904 +- if (ym->bitrate > YAM_MAXBITRATE) {
905 ++ if (ym->cmd != SIOCYAMSMCS || ym->bitrate > YAM_MAXBITRATE) {
906 + kfree(ym);
907 + return -EINVAL;
908 + }
909 +diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
910 +index deace0aadad24..3169c174006ab 100644
911 +--- a/drivers/net/ieee802154/ca8210.c
912 ++++ b/drivers/net/ieee802154/ca8210.c
913 +@@ -1769,6 +1769,7 @@ static int ca8210_async_xmit_complete(
914 + status
915 + );
916 + if (status != MAC_TRANSACTION_OVERFLOW) {
917 ++ dev_kfree_skb_any(priv->tx_skb);
918 + ieee802154_wake_queue(priv->hw);
919 + return 0;
920 + }
921 +diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
922 +index ed60e691cc2b4..d07e5571e07ae 100644
923 +--- a/drivers/net/ieee802154/mac802154_hwsim.c
924 ++++ b/drivers/net/ieee802154/mac802154_hwsim.c
925 +@@ -805,6 +805,7 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
926 + goto err_pib;
927 + }
928 +
929 ++ pib->channel = 13;
930 + rcu_assign_pointer(phy->pib, pib);
931 + phy->idx = idx;
932 + INIT_LIST_HEAD(&phy->edges);
933 +diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
934 +index fe4057fca83d8..7c7ef32f99655 100644
935 +--- a/drivers/net/ieee802154/mcr20a.c
936 ++++ b/drivers/net/ieee802154/mcr20a.c
937 +@@ -1005,8 +1005,8 @@ static void mcr20a_hw_setup(struct mcr20a_local *lp)
938 + dev_dbg(printdev(lp), "%s\n", __func__);
939 +
940 + phy->symbol_duration = 16;
941 +- phy->lifs_period = 40;
942 +- phy->sifs_period = 12;
943 ++ phy->lifs_period = 40 * phy->symbol_duration;
944 ++ phy->sifs_period = 12 * phy->symbol_duration;
945 +
946 + hw->flags = IEEE802154_HW_TX_OMIT_CKSUM |
947 + IEEE802154_HW_AFILT |
948 +diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
949 +index 4c5b67a2d63a0..6c0f80bea8161 100644
950 +--- a/drivers/net/macsec.c
951 ++++ b/drivers/net/macsec.c
952 +@@ -3259,6 +3259,15 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
953 +
954 + macsec->real_dev = real_dev;
955 +
956 ++ /* send_sci must be set to true when transmit sci explicitly is set */
957 ++ if ((data && data[IFLA_MACSEC_SCI]) &&
958 ++ (data && data[IFLA_MACSEC_INC_SCI])) {
959 ++ u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
960 ++
961 ++ if (!send_sci)
962 ++ return -EINVAL;
963 ++ }
964 ++
965 + if (data && data[IFLA_MACSEC_ICV_LEN])
966 + icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
967 + mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
968 +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
969 +index b884b681d5c52..a03d0627efb06 100644
970 +--- a/drivers/net/phy/phy_device.c
971 ++++ b/drivers/net/phy/phy_device.c
972 +@@ -1166,6 +1166,9 @@ void phy_detach(struct phy_device *phydev)
973 + phydev->mdio.dev.driver == &genphy_driver.mdiodrv.driver)
974 + device_release_driver(&phydev->mdio.dev);
975 +
976 ++ /* Assert the reset signal */
977 ++ phy_device_reset(phydev, 1);
978 ++
979 + /*
980 + * The phydev might go away on the put_device() below, so avoid
981 + * a use-after-free bug by reading the underlying bus first.
982 +@@ -1175,9 +1178,6 @@ void phy_detach(struct phy_device *phydev)
983 + put_device(&phydev->mdio.dev);
984 + if (ndev_owner != bus->owner)
985 + module_put(bus->owner);
986 +-
987 +- /* Assert the reset signal */
988 +- phy_device_reset(phydev, 1);
989 + }
990 + EXPORT_SYMBOL(phy_detach);
991 +
992 +diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
993 +index e808efd762122..e0e175c691d45 100644
994 +--- a/drivers/net/phy/phylink.c
995 ++++ b/drivers/net/phy/phylink.c
996 +@@ -554,6 +554,11 @@ static int phylink_register_sfp(struct phylink *pl,
997 + return ret;
998 + }
999 +
1000 ++ if (!fwnode_device_is_available(ref.fwnode)) {
1001 ++ fwnode_handle_put(ref.fwnode);
1002 ++ return 0;
1003 ++ }
1004 ++
1005 + pl->sfp_bus = sfp_register_upstream(ref.fwnode, pl->netdev, pl,
1006 + &sfp_phylink_ops);
1007 + if (!pl->sfp_bus)
1008 +diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
1009 +index 8e2eb20613548..cea005cc7b2ab 100644
1010 +--- a/drivers/net/usb/ipheth.c
1011 ++++ b/drivers/net/usb/ipheth.c
1012 +@@ -173,7 +173,7 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone)
1013 + if (tx_buf == NULL)
1014 + goto free_rx_urb;
1015 +
1016 +- rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE,
1017 ++ rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN,
1018 + GFP_KERNEL, &rx_urb->transfer_dma);
1019 + if (rx_buf == NULL)
1020 + goto free_tx_buf;
1021 +@@ -198,7 +198,7 @@ error_nomem:
1022 +
1023 + static void ipheth_free_urbs(struct ipheth_device *iphone)
1024 + {
1025 +- usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->rx_buf,
1026 ++ usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN, iphone->rx_buf,
1027 + iphone->rx_urb->transfer_dma);
1028 + usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf,
1029 + iphone->tx_urb->transfer_dma);
1030 +@@ -371,7 +371,7 @@ static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags)
1031 +
1032 + usb_fill_bulk_urb(dev->rx_urb, udev,
1033 + usb_rcvbulkpipe(udev, dev->bulk_in),
1034 +- dev->rx_buf, IPHETH_BUF_SIZE,
1035 ++ dev->rx_buf, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN,
1036 + ipheth_rcvbulk_callback,
1037 + dev);
1038 + dev->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1039 +diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
1040 +index a76b963a7e50f..d153fb1bf65f1 100644
1041 +--- a/drivers/rpmsg/rpmsg_char.c
1042 ++++ b/drivers/rpmsg/rpmsg_char.c
1043 +@@ -92,7 +92,7 @@ static int rpmsg_eptdev_destroy(struct device *dev, void *data)
1044 + /* wake up any blocked readers */
1045 + wake_up_interruptible(&eptdev->readq);
1046 +
1047 +- device_del(&eptdev->dev);
1048 ++ cdev_device_del(&eptdev->cdev, &eptdev->dev);
1049 + put_device(&eptdev->dev);
1050 +
1051 + return 0;
1052 +@@ -329,7 +329,6 @@ static void rpmsg_eptdev_release_device(struct device *dev)
1053 +
1054 + ida_simple_remove(&rpmsg_ept_ida, dev->id);
1055 + ida_simple_remove(&rpmsg_minor_ida, MINOR(eptdev->dev.devt));
1056 +- cdev_del(&eptdev->cdev);
1057 + kfree(eptdev);
1058 + }
1059 +
1060 +@@ -374,19 +373,13 @@ static int rpmsg_eptdev_create(struct rpmsg_ctrldev *ctrldev,
1061 + dev->id = ret;
1062 + dev_set_name(dev, "rpmsg%d", ret);
1063 +
1064 +- ret = cdev_add(&eptdev->cdev, dev->devt, 1);
1065 ++ ret = cdev_device_add(&eptdev->cdev, &eptdev->dev);
1066 + if (ret)
1067 + goto free_ept_ida;
1068 +
1069 + /* We can now rely on the release function for cleanup */
1070 + dev->release = rpmsg_eptdev_release_device;
1071 +
1072 +- ret = device_add(dev);
1073 +- if (ret) {
1074 +- dev_err(dev, "device_add failed: %d\n", ret);
1075 +- put_device(dev);
1076 +- }
1077 +-
1078 + return ret;
1079 +
1080 + free_ept_ida:
1081 +@@ -455,7 +448,6 @@ static void rpmsg_ctrldev_release_device(struct device *dev)
1082 +
1083 + ida_simple_remove(&rpmsg_ctrl_ida, dev->id);
1084 + ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
1085 +- cdev_del(&ctrldev->cdev);
1086 + kfree(ctrldev);
1087 + }
1088 +
1089 +@@ -490,19 +482,13 @@ static int rpmsg_chrdev_probe(struct rpmsg_device *rpdev)
1090 + dev->id = ret;
1091 + dev_set_name(&ctrldev->dev, "rpmsg_ctrl%d", ret);
1092 +
1093 +- ret = cdev_add(&ctrldev->cdev, dev->devt, 1);
1094 ++ ret = cdev_device_add(&ctrldev->cdev, &ctrldev->dev);
1095 + if (ret)
1096 + goto free_ctrl_ida;
1097 +
1098 + /* We can now rely on the release function for cleanup */
1099 + dev->release = rpmsg_ctrldev_release_device;
1100 +
1101 +- ret = device_add(dev);
1102 +- if (ret) {
1103 +- dev_err(&rpdev->dev, "device_add failed: %d\n", ret);
1104 +- put_device(dev);
1105 +- }
1106 +-
1107 + dev_set_drvdata(&rpdev->dev, ctrldev);
1108 +
1109 + return ret;
1110 +@@ -528,7 +514,7 @@ static void rpmsg_chrdev_remove(struct rpmsg_device *rpdev)
1111 + if (ret)
1112 + dev_warn(&rpdev->dev, "failed to nuke endpoints: %d\n", ret);
1113 +
1114 +- device_del(&ctrldev->dev);
1115 ++ cdev_device_del(&ctrldev->cdev, &ctrldev->dev);
1116 + put_device(&ctrldev->dev);
1117 + }
1118 +
1119 +diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c
1120 +index 18a6f15e313d8..86b8858917b62 100644
1121 +--- a/drivers/rtc/rtc-mc146818-lib.c
1122 ++++ b/drivers/rtc/rtc-mc146818-lib.c
1123 +@@ -82,7 +82,7 @@ unsigned int mc146818_get_time(struct rtc_time *time)
1124 + time->tm_year += real_year - 72;
1125 + #endif
1126 +
1127 +- if (century > 20)
1128 ++ if (century > 19)
1129 + time->tm_year += (century - 19) * 100;
1130 +
1131 + /*
1132 +diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
1133 +index 5eb7aabe2d8b2..09ce175bbfcf6 100644
1134 +--- a/drivers/s390/scsi/zfcp_fc.c
1135 ++++ b/drivers/s390/scsi/zfcp_fc.c
1136 +@@ -521,6 +521,8 @@ static void zfcp_fc_adisc_handler(void *data)
1137 + goto out;
1138 + }
1139 +
1140 ++ /* re-init to undo drop from zfcp_fc_adisc() */
1141 ++ port->d_id = ntoh24(adisc_resp->adisc_port_id);
1142 + /* port is good, unblock rport without going through erp */
1143 + zfcp_scsi_schedule_rport_register(port);
1144 + out:
1145 +@@ -534,6 +536,7 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
1146 + struct zfcp_fc_req *fc_req;
1147 + struct zfcp_adapter *adapter = port->adapter;
1148 + struct Scsi_Host *shost = adapter->scsi_host;
1149 ++ u32 d_id;
1150 + int ret;
1151 +
1152 + fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
1153 +@@ -558,7 +561,15 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
1154 + fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
1155 + hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
1156 +
1157 +- ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els,
1158 ++ d_id = port->d_id; /* remember as destination for send els below */
1159 ++ /*
1160 ++ * Force fresh GID_PN lookup on next port recovery.
1161 ++ * Must happen after request setup and before sending request,
1162 ++ * to prevent race with port->d_id re-init in zfcp_fc_adisc_handler().
1163 ++ */
1164 ++ port->d_id = 0;
1165 ++
1166 ++ ret = zfcp_fsf_send_els(adapter, d_id, &fc_req->ct_els,
1167 + ZFCP_FC_CTELS_TMO);
1168 + if (ret)
1169 + kmem_cache_free(zfcp_fc_req_cache, fc_req);
1170 +diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
1171 +index 780651c4fc0c4..ea2c601da8e15 100644
1172 +--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
1173 ++++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
1174 +@@ -80,7 +80,7 @@ static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
1175 + static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba);
1176 + static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
1177 + struct device *parent, int npiv);
1178 +-static void bnx2fc_destroy_work(struct work_struct *work);
1179 ++static void bnx2fc_port_destroy(struct fcoe_port *port);
1180 +
1181 + static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
1182 + static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
1183 +@@ -515,7 +515,8 @@ static int bnx2fc_l2_rcv_thread(void *arg)
1184 +
1185 + static void bnx2fc_recv_frame(struct sk_buff *skb)
1186 + {
1187 +- u32 fr_len;
1188 ++ u64 crc_err;
1189 ++ u32 fr_len, fr_crc;
1190 + struct fc_lport *lport;
1191 + struct fcoe_rcv_info *fr;
1192 + struct fc_stats *stats;
1193 +@@ -549,6 +550,11 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
1194 + skb_pull(skb, sizeof(struct fcoe_hdr));
1195 + fr_len = skb->len - sizeof(struct fcoe_crc_eof);
1196 +
1197 ++ stats = per_cpu_ptr(lport->stats, get_cpu());
1198 ++ stats->RxFrames++;
1199 ++ stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
1200 ++ put_cpu();
1201 ++
1202 + fp = (struct fc_frame *)skb;
1203 + fc_frame_init(fp);
1204 + fr_dev(fp) = lport;
1205 +@@ -631,16 +637,15 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
1206 + return;
1207 + }
1208 +
1209 +- stats = per_cpu_ptr(lport->stats, smp_processor_id());
1210 +- stats->RxFrames++;
1211 +- stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
1212 ++ fr_crc = le32_to_cpu(fr_crc(fp));
1213 +
1214 +- if (le32_to_cpu(fr_crc(fp)) !=
1215 +- ~crc32(~0, skb->data, fr_len)) {
1216 +- if (stats->InvalidCRCCount < 5)
1217 ++ if (unlikely(fr_crc != ~crc32(~0, skb->data, fr_len))) {
1218 ++ stats = per_cpu_ptr(lport->stats, get_cpu());
1219 ++ crc_err = (stats->InvalidCRCCount++);
1220 ++ put_cpu();
1221 ++ if (crc_err < 5)
1222 + printk(KERN_WARNING PFX "dropping frame with "
1223 + "CRC error\n");
1224 +- stats->InvalidCRCCount++;
1225 + kfree_skb(skb);
1226 + return;
1227 + }
1228 +@@ -911,9 +916,6 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
1229 + __bnx2fc_destroy(interface);
1230 + }
1231 + mutex_unlock(&bnx2fc_dev_lock);
1232 +-
1233 +- /* Ensure ALL destroy work has been completed before return */
1234 +- flush_workqueue(bnx2fc_wq);
1235 + return;
1236 +
1237 + default:
1238 +@@ -1220,8 +1222,8 @@ static int bnx2fc_vport_destroy(struct fc_vport *vport)
1239 + mutex_unlock(&n_port->lp_mutex);
1240 + bnx2fc_free_vport(interface->hba, port->lport);
1241 + bnx2fc_port_shutdown(port->lport);
1242 ++ bnx2fc_port_destroy(port);
1243 + bnx2fc_interface_put(interface);
1244 +- queue_work(bnx2fc_wq, &port->destroy_work);
1245 + return 0;
1246 + }
1247 +
1248 +@@ -1530,7 +1532,6 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
1249 + port->lport = lport;
1250 + port->priv = interface;
1251 + port->get_netdev = bnx2fc_netdev;
1252 +- INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
1253 +
1254 + /* Configure fcoe_port */
1255 + rc = bnx2fc_lport_config(lport);
1256 +@@ -1658,8 +1659,8 @@ static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
1257 + bnx2fc_interface_cleanup(interface);
1258 + bnx2fc_stop(interface);
1259 + list_del(&interface->list);
1260 ++ bnx2fc_port_destroy(port);
1261 + bnx2fc_interface_put(interface);
1262 +- queue_work(bnx2fc_wq, &port->destroy_work);
1263 + }
1264 +
1265 + /**
1266 +@@ -1700,15 +1701,12 @@ netdev_err:
1267 + return rc;
1268 + }
1269 +
1270 +-static void bnx2fc_destroy_work(struct work_struct *work)
1271 ++static void bnx2fc_port_destroy(struct fcoe_port *port)
1272 + {
1273 +- struct fcoe_port *port;
1274 + struct fc_lport *lport;
1275 +
1276 +- port = container_of(work, struct fcoe_port, destroy_work);
1277 + lport = port->lport;
1278 +-
1279 +- BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
1280 ++ BNX2FC_HBA_DBG(lport, "Entered %s, destroying lport %p\n", __func__, lport);
1281 +
1282 + bnx2fc_if_destroy(lport);
1283 + }
1284 +@@ -2562,9 +2560,6 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
1285 + __bnx2fc_destroy(interface);
1286 + mutex_unlock(&bnx2fc_dev_lock);
1287 +
1288 +- /* Ensure ALL destroy work has been completed before return */
1289 +- flush_workqueue(bnx2fc_wq);
1290 +-
1291 + bnx2fc_ulp_stop(hba);
1292 + /* unregister cnic device */
1293 + if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
1294 +diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
1295 +index 01fcad7c8fae8..ef54f1638d207 100644
1296 +--- a/drivers/soc/mediatek/mtk-scpsys.c
1297 ++++ b/drivers/soc/mediatek/mtk-scpsys.c
1298 +@@ -341,17 +341,12 @@ out:
1299 + return ret;
1300 + }
1301 +
1302 +-static int init_clks(struct platform_device *pdev, struct clk **clk)
1303 ++static void init_clks(struct platform_device *pdev, struct clk **clk)
1304 + {
1305 + int i;
1306 +
1307 +- for (i = CLK_NONE + 1; i < CLK_MAX; i++) {
1308 ++ for (i = CLK_NONE + 1; i < CLK_MAX; i++)
1309 + clk[i] = devm_clk_get(&pdev->dev, clk_names[i]);
1310 +- if (IS_ERR(clk[i]))
1311 +- return PTR_ERR(clk[i]);
1312 +- }
1313 +-
1314 +- return 0;
1315 + }
1316 +
1317 + static struct scp *init_scp(struct platform_device *pdev,
1318 +@@ -361,7 +356,7 @@ static struct scp *init_scp(struct platform_device *pdev,
1319 + {
1320 + struct genpd_onecell_data *pd_data;
1321 + struct resource *res;
1322 +- int i, j, ret;
1323 ++ int i, j;
1324 + struct scp *scp;
1325 + struct clk *clk[CLK_MAX];
1326 +
1327 +@@ -416,9 +411,7 @@ static struct scp *init_scp(struct platform_device *pdev,
1328 +
1329 + pd_data->num_domains = num;
1330 +
1331 +- ret = init_clks(pdev, clk);
1332 +- if (ret)
1333 +- return ERR_PTR(ret);
1334 ++ init_clks(pdev, clk);
1335 +
1336 + for (i = 0; i < num; i++) {
1337 + struct scp_domain *scpd = &scp->domains[i];
1338 +diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
1339 +index b2fd7a3691964..82c24c85f45bc 100644
1340 +--- a/drivers/spi/spi-bcm-qspi.c
1341 ++++ b/drivers/spi/spi-bcm-qspi.c
1342 +@@ -520,7 +520,7 @@ static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs)
1343 + u32 rd = 0;
1344 + u32 wr = 0;
1345 +
1346 +- if (qspi->base[CHIP_SELECT]) {
1347 ++ if (cs >= 0 && qspi->base[CHIP_SELECT]) {
1348 + rd = bcm_qspi_read(qspi, CHIP_SELECT, 0);
1349 + wr = (rd & ~0xff) | (1 << cs);
1350 + if (rd == wr)
1351 +diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
1352 +index 419756ebf2c05..24196fb0d78a9 100644
1353 +--- a/drivers/spi/spi-meson-spicc.c
1354 ++++ b/drivers/spi/spi-meson-spicc.c
1355 +@@ -529,6 +529,11 @@ static int meson_spicc_probe(struct platform_device *pdev)
1356 + writel_relaxed(0, spicc->base + SPICC_INTREG);
1357 +
1358 + irq = platform_get_irq(pdev, 0);
1359 ++ if (irq < 0) {
1360 ++ ret = irq;
1361 ++ goto out_master;
1362 ++ }
1363 ++
1364 + ret = devm_request_irq(&pdev->dev, irq, meson_spicc_irq,
1365 + 0, NULL, spicc);
1366 + if (ret) {
1367 +diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
1368 +index 690e8ddf5f6b8..faca2ab758992 100644
1369 +--- a/drivers/spi/spi-mt65xx.c
1370 ++++ b/drivers/spi/spi-mt65xx.c
1371 +@@ -498,7 +498,7 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
1372 + else
1373 + mdata->state = MTK_SPI_IDLE;
1374 +
1375 +- if (!master->can_dma(master, master->cur_msg->spi, trans)) {
1376 ++ if (!master->can_dma(master, NULL, trans)) {
1377 + if (trans->rx_buf) {
1378 + cnt = mdata->xfer_len / 4;
1379 + ioread32_rep(mdata->base + SPI_RX_DATA_REG,
1380 +diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
1381 +index 5e9457d199279..294182cb71caf 100644
1382 +--- a/drivers/tty/n_gsm.c
1383 ++++ b/drivers/tty/n_gsm.c
1384 +@@ -313,6 +313,7 @@ static struct tty_driver *gsm_tty_driver;
1385 + #define GSM1_ESCAPE_BITS 0x20
1386 + #define XON 0x11
1387 + #define XOFF 0x13
1388 ++#define ISO_IEC_646_MASK 0x7F
1389 +
1390 + static const struct tty_port_operations gsm_port_ops;
1391 +
1392 +@@ -531,7 +532,8 @@ static int gsm_stuff_frame(const u8 *input, u8 *output, int len)
1393 + int olen = 0;
1394 + while (len--) {
1395 + if (*input == GSM1_SOF || *input == GSM1_ESCAPE
1396 +- || *input == XON || *input == XOFF) {
1397 ++ || (*input & ISO_IEC_646_MASK) == XON
1398 ++ || (*input & ISO_IEC_646_MASK) == XOFF) {
1399 + *output++ = GSM1_ESCAPE;
1400 + *output++ = *input++ ^ GSM1_ESCAPE_BITS;
1401 + olen++;
1402 +diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
1403 +index 8fedc075fb1eb..a76533a482459 100644
1404 +--- a/drivers/tty/serial/8250/8250_of.c
1405 ++++ b/drivers/tty/serial/8250/8250_of.c
1406 +@@ -104,8 +104,17 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
1407 + port->mapsize = resource_size(&resource);
1408 +
1409 + /* Check for shifted address mapping */
1410 +- if (of_property_read_u32(np, "reg-offset", &prop) == 0)
1411 ++ if (of_property_read_u32(np, "reg-offset", &prop) == 0) {
1412 ++ if (prop >= port->mapsize) {
1413 ++ dev_warn(&ofdev->dev, "reg-offset %u exceeds region size %pa\n",
1414 ++ prop, &port->mapsize);
1415 ++ ret = -EINVAL;
1416 ++ goto err_unprepare;
1417 ++ }
1418 ++
1419 + port->mapbase += prop;
1420 ++ port->mapsize -= prop;
1421 ++ }
1422 +
1423 + port->iotype = UPIO_MEM;
1424 + if (of_property_read_u32(np, "reg-io-width", &prop) == 0) {
1425 +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
1426 +index f54c18e4ae909..173885837e77f 100644
1427 +--- a/drivers/tty/serial/8250/8250_pci.c
1428 ++++ b/drivers/tty/serial/8250/8250_pci.c
1429 +@@ -4797,8 +4797,30 @@ static const struct pci_device_id serial_pci_tbl[] = {
1430 + { PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400,
1431 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */
1432 + pbn_b2_4_115200 },
1433 ++ /* Brainboxes Devices */
1434 + /*
1435 +- * BrainBoxes UC-260
1436 ++ * Brainboxes UC-101
1437 ++ */
1438 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0BA1,
1439 ++ PCI_ANY_ID, PCI_ANY_ID,
1440 ++ 0, 0,
1441 ++ pbn_b2_2_115200 },
1442 ++ /*
1443 ++ * Brainboxes UC-235/246
1444 ++ */
1445 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0AA1,
1446 ++ PCI_ANY_ID, PCI_ANY_ID,
1447 ++ 0, 0,
1448 ++ pbn_b2_1_115200 },
1449 ++ /*
1450 ++ * Brainboxes UC-257
1451 ++ */
1452 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0861,
1453 ++ PCI_ANY_ID, PCI_ANY_ID,
1454 ++ 0, 0,
1455 ++ pbn_b2_2_115200 },
1456 ++ /*
1457 ++ * Brainboxes UC-260/271/701/756
1458 + */
1459 + { PCI_VENDOR_ID_INTASHIELD, 0x0D21,
1460 + PCI_ANY_ID, PCI_ANY_ID,
1461 +@@ -4806,7 +4828,81 @@ static const struct pci_device_id serial_pci_tbl[] = {
1462 + pbn_b2_4_115200 },
1463 + { PCI_VENDOR_ID_INTASHIELD, 0x0E34,
1464 + PCI_ANY_ID, PCI_ANY_ID,
1465 +- PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
1466 ++ PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
1467 ++ pbn_b2_4_115200 },
1468 ++ /*
1469 ++ * Brainboxes UC-268
1470 ++ */
1471 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0841,
1472 ++ PCI_ANY_ID, PCI_ANY_ID,
1473 ++ 0, 0,
1474 ++ pbn_b2_4_115200 },
1475 ++ /*
1476 ++ * Brainboxes UC-275/279
1477 ++ */
1478 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0881,
1479 ++ PCI_ANY_ID, PCI_ANY_ID,
1480 ++ 0, 0,
1481 ++ pbn_b2_8_115200 },
1482 ++ /*
1483 ++ * Brainboxes UC-302
1484 ++ */
1485 ++ { PCI_VENDOR_ID_INTASHIELD, 0x08E1,
1486 ++ PCI_ANY_ID, PCI_ANY_ID,
1487 ++ 0, 0,
1488 ++ pbn_b2_2_115200 },
1489 ++ /*
1490 ++ * Brainboxes UC-310
1491 ++ */
1492 ++ { PCI_VENDOR_ID_INTASHIELD, 0x08C1,
1493 ++ PCI_ANY_ID, PCI_ANY_ID,
1494 ++ 0, 0,
1495 ++ pbn_b2_2_115200 },
1496 ++ /*
1497 ++ * Brainboxes UC-313
1498 ++ */
1499 ++ { PCI_VENDOR_ID_INTASHIELD, 0x08A3,
1500 ++ PCI_ANY_ID, PCI_ANY_ID,
1501 ++ 0, 0,
1502 ++ pbn_b2_2_115200 },
1503 ++ /*
1504 ++ * Brainboxes UC-320/324
1505 ++ */
1506 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0A61,
1507 ++ PCI_ANY_ID, PCI_ANY_ID,
1508 ++ 0, 0,
1509 ++ pbn_b2_1_115200 },
1510 ++ /*
1511 ++ * Brainboxes UC-346
1512 ++ */
1513 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0B02,
1514 ++ PCI_ANY_ID, PCI_ANY_ID,
1515 ++ 0, 0,
1516 ++ pbn_b2_4_115200 },
1517 ++ /*
1518 ++ * Brainboxes UC-357
1519 ++ */
1520 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0A81,
1521 ++ PCI_ANY_ID, PCI_ANY_ID,
1522 ++ 0, 0,
1523 ++ pbn_b2_2_115200 },
1524 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0A83,
1525 ++ PCI_ANY_ID, PCI_ANY_ID,
1526 ++ 0, 0,
1527 ++ pbn_b2_2_115200 },
1528 ++ /*
1529 ++ * Brainboxes UC-368
1530 ++ */
1531 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0C41,
1532 ++ PCI_ANY_ID, PCI_ANY_ID,
1533 ++ 0, 0,
1534 ++ pbn_b2_4_115200 },
1535 ++ /*
1536 ++ * Brainboxes UC-420/431
1537 ++ */
1538 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0921,
1539 ++ PCI_ANY_ID, PCI_ANY_ID,
1540 ++ 0, 0,
1541 + pbn_b2_4_115200 },
1542 + /*
1543 + * Perle PCI-RAS cards
1544 +diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
1545 +index 50073ead5881b..ccaaf804df06d 100644
1546 +--- a/drivers/tty/serial/stm32-usart.c
1547 ++++ b/drivers/tty/serial/stm32-usart.c
1548 +@@ -509,7 +509,7 @@ static void stm32_start_tx(struct uart_port *port)
1549 + {
1550 + struct circ_buf *xmit = &port->state->xmit;
1551 +
1552 +- if (uart_circ_empty(xmit))
1553 ++ if (uart_circ_empty(xmit) && !port->x_char)
1554 + return;
1555 +
1556 + stm32_transmit_chars(port);
1557 +diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
1558 +index 9a2ab6751a23c..5a4d08de546fe 100644
1559 +--- a/drivers/usb/common/ulpi.c
1560 ++++ b/drivers/usb/common/ulpi.c
1561 +@@ -39,8 +39,11 @@ static int ulpi_match(struct device *dev, struct device_driver *driver)
1562 + struct ulpi *ulpi = to_ulpi_dev(dev);
1563 + const struct ulpi_device_id *id;
1564 +
1565 +- /* Some ULPI devices don't have a vendor id so rely on OF match */
1566 +- if (ulpi->id.vendor == 0)
1567 ++ /*
1568 ++ * Some ULPI devices don't have a vendor id
1569 ++ * or provide an id_table so rely on OF match.
1570 ++ */
1571 ++ if (ulpi->id.vendor == 0 || !drv->id_table)
1572 + return of_driver_match_device(dev, driver);
1573 +
1574 + for (id = drv->id_table; id->vendor; id++)
1575 +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
1576 +index 11cc189bf1055..df661460e9f96 100644
1577 +--- a/drivers/usb/core/hcd.c
1578 ++++ b/drivers/usb/core/hcd.c
1579 +@@ -1670,6 +1670,13 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
1580 + urb->hcpriv = NULL;
1581 + INIT_LIST_HEAD(&urb->urb_list);
1582 + atomic_dec(&urb->use_count);
1583 ++ /*
1584 ++ * Order the write of urb->use_count above before the read
1585 ++ * of urb->reject below. Pairs with the memory barriers in
1586 ++ * usb_kill_urb() and usb_poison_urb().
1587 ++ */
1588 ++ smp_mb__after_atomic();
1589 ++
1590 + atomic_dec(&urb->dev->urbnum);
1591 + if (atomic_read(&urb->reject))
1592 + wake_up(&usb_kill_urb_queue);
1593 +@@ -1779,6 +1786,13 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
1594 +
1595 + usb_anchor_resume_wakeups(anchor);
1596 + atomic_dec(&urb->use_count);
1597 ++ /*
1598 ++ * Order the write of urb->use_count above before the read
1599 ++ * of urb->reject below. Pairs with the memory barriers in
1600 ++ * usb_kill_urb() and usb_poison_urb().
1601 ++ */
1602 ++ smp_mb__after_atomic();
1603 ++
1604 + if (unlikely(atomic_read(&urb->reject)))
1605 + wake_up(&usb_kill_urb_queue);
1606 + usb_put_urb(urb);
1607 +diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
1608 +index 3cd7732c086e0..e88e04a24103f 100644
1609 +--- a/drivers/usb/core/urb.c
1610 ++++ b/drivers/usb/core/urb.c
1611 +@@ -692,6 +692,12 @@ void usb_kill_urb(struct urb *urb)
1612 + if (!(urb && urb->dev && urb->ep))
1613 + return;
1614 + atomic_inc(&urb->reject);
1615 ++ /*
1616 ++ * Order the write of urb->reject above before the read
1617 ++ * of urb->use_count below. Pairs with the barriers in
1618 ++ * __usb_hcd_giveback_urb() and usb_hcd_submit_urb().
1619 ++ */
1620 ++ smp_mb__after_atomic();
1621 +
1622 + usb_hcd_unlink_urb(urb, -ENOENT);
1623 + wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
1624 +@@ -733,6 +739,12 @@ void usb_poison_urb(struct urb *urb)
1625 + if (!urb)
1626 + return;
1627 + atomic_inc(&urb->reject);
1628 ++ /*
1629 ++ * Order the write of urb->reject above before the read
1630 ++ * of urb->use_count below. Pairs with the barriers in
1631 ++ * __usb_hcd_giveback_urb() and usb_hcd_submit_urb().
1632 ++ */
1633 ++ smp_mb__after_atomic();
1634 +
1635 + if (!urb->dev || !urb->ep)
1636 + return;
1637 +diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
1638 +index 282737e4609ce..2c65a9bb3c81b 100644
1639 +--- a/drivers/usb/gadget/function/f_sourcesink.c
1640 ++++ b/drivers/usb/gadget/function/f_sourcesink.c
1641 +@@ -583,6 +583,7 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in,
1642 +
1643 + if (is_iso) {
1644 + switch (speed) {
1645 ++ case USB_SPEED_SUPER_PLUS:
1646 + case USB_SPEED_SUPER:
1647 + size = ss->isoc_maxpacket *
1648 + (ss->isoc_mult + 1) *
1649 +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
1650 +index 3ba4e060fd051..66e7f5d123c46 100644
1651 +--- a/drivers/usb/storage/unusual_devs.h
1652 ++++ b/drivers/usb/storage/unusual_devs.h
1653 +@@ -2301,6 +2301,16 @@ UNUSUAL_DEV( 0x2027, 0xa001, 0x0000, 0x9999,
1654 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
1655 + US_FL_SCM_MULT_TARG ),
1656 +
1657 ++/*
1658 ++ * Reported by DocMAX <mail@××××××××××.de>
1659 ++ * and Thomas Weißschuh <linux@××××××××××.net>
1660 ++ */
1661 ++UNUSUAL_DEV( 0x2109, 0x0715, 0x9999, 0x9999,
1662 ++ "VIA Labs, Inc.",
1663 ++ "VL817 SATA Bridge",
1664 ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1665 ++ US_FL_IGNORE_UAS),
1666 ++
1667 + UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
1668 + "ST",
1669 + "2A",
1670 +diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c
1671 +index 228d88c7bdb26..e4308f97d9739 100644
1672 +--- a/drivers/usb/typec/tcpm.c
1673 ++++ b/drivers/usb/typec/tcpm.c
1674 +@@ -3865,7 +3865,8 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
1675 + case SNK_TRYWAIT_DEBOUNCE:
1676 + break;
1677 + case SNK_ATTACH_WAIT:
1678 +- tcpm_set_state(port, SNK_UNATTACHED, 0);
1679 ++ case SNK_DEBOUNCED:
1680 ++ /* Do nothing, as TCPM is still waiting for vbus to reaach VSAFE5V to connect */
1681 + break;
1682 +
1683 + case SNK_NEGOTIATE_CAPABILITIES:
1684 +diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
1685 +index c64e4a1be2f29..e90b60470344d 100644
1686 +--- a/fs/ext4/inline.c
1687 ++++ b/fs/ext4/inline.c
1688 +@@ -1125,7 +1125,15 @@ static void ext4_restore_inline_data(handle_t *handle, struct inode *inode,
1689 + struct ext4_iloc *iloc,
1690 + void *buf, int inline_size)
1691 + {
1692 +- ext4_create_inline_data(handle, inode, inline_size);
1693 ++ int ret;
1694 ++
1695 ++ ret = ext4_create_inline_data(handle, inode, inline_size);
1696 ++ if (ret) {
1697 ++ ext4_msg(inode->i_sb, KERN_EMERG,
1698 ++ "error restoring inline_data for inode -- potential data loss! (inode %lu, error %d)",
1699 ++ inode->i_ino, ret);
1700 ++ return;
1701 ++ }
1702 + ext4_write_inline_data(inode, iloc, buf, 0, inline_size);
1703 + ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
1704 + }
1705 +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
1706 +index 733fd9e4f0a15..a968b8b4b982f 100644
1707 +--- a/fs/nfs/dir.c
1708 ++++ b/fs/nfs/dir.c
1709 +@@ -1626,6 +1626,24 @@ out:
1710 +
1711 + no_open:
1712 + res = nfs_lookup(dir, dentry, lookup_flags);
1713 ++ if (!res) {
1714 ++ inode = d_inode(dentry);
1715 ++ if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
1716 ++ !S_ISDIR(inode->i_mode))
1717 ++ res = ERR_PTR(-ENOTDIR);
1718 ++ else if (inode && S_ISREG(inode->i_mode))
1719 ++ res = ERR_PTR(-EOPENSTALE);
1720 ++ } else if (!IS_ERR(res)) {
1721 ++ inode = d_inode(res);
1722 ++ if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
1723 ++ !S_ISDIR(inode->i_mode)) {
1724 ++ dput(res);
1725 ++ res = ERR_PTR(-ENOTDIR);
1726 ++ } else if (inode && S_ISREG(inode->i_mode)) {
1727 ++ dput(res);
1728 ++ res = ERR_PTR(-EOPENSTALE);
1729 ++ }
1730 ++ }
1731 + if (switched) {
1732 + d_lookup_done(dentry);
1733 + if (!res)
1734 +@@ -2015,6 +2033,8 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1735 +
1736 + trace_nfs_link_enter(inode, dir, dentry);
1737 + d_drop(dentry);
1738 ++ if (S_ISREG(inode->i_mode))
1739 ++ nfs_sync_inode(inode);
1740 + error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name);
1741 + if (error == 0) {
1742 + ihold(inode);
1743 +@@ -2103,6 +2123,8 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1744 + }
1745 + }
1746 +
1747 ++ if (S_ISREG(old_inode->i_mode))
1748 ++ nfs_sync_inode(old_inode);
1749 + task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, NULL);
1750 + if (IS_ERR(task)) {
1751 + error = PTR_ERR(task);
1752 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
1753 +index dfb2a790efc13..ed5429d18595c 100644
1754 +--- a/fs/nfsd/nfs4state.c
1755 ++++ b/fs/nfsd/nfs4state.c
1756 +@@ -3446,8 +3446,10 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
1757 + status = nfserr_clid_inuse;
1758 + if (client_has_state(old)
1759 + && !same_creds(&unconf->cl_cred,
1760 +- &old->cl_cred))
1761 ++ &old->cl_cred)) {
1762 ++ old = NULL;
1763 + goto out;
1764 ++ }
1765 + status = mark_client_expired_locked(old);
1766 + if (status) {
1767 + old = NULL;
1768 +diff --git a/fs/udf/inode.c b/fs/udf/inode.c
1769 +index f5500d2a38797..ec8089a313906 100644
1770 +--- a/fs/udf/inode.c
1771 ++++ b/fs/udf/inode.c
1772 +@@ -251,10 +251,6 @@ int udf_expand_file_adinicb(struct inode *inode)
1773 + char *kaddr;
1774 + struct udf_inode_info *iinfo = UDF_I(inode);
1775 + int err;
1776 +- struct writeback_control udf_wbc = {
1777 +- .sync_mode = WB_SYNC_NONE,
1778 +- .nr_to_write = 1,
1779 +- };
1780 +
1781 + WARN_ON_ONCE(!inode_is_locked(inode));
1782 + if (!iinfo->i_lenAlloc) {
1783 +@@ -298,8 +294,10 @@ int udf_expand_file_adinicb(struct inode *inode)
1784 + iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
1785 + /* from now on we have normal address_space methods */
1786 + inode->i_data.a_ops = &udf_aops;
1787 ++ set_page_dirty(page);
1788 ++ unlock_page(page);
1789 + up_write(&iinfo->i_data_sem);
1790 +- err = inode->i_data.a_ops->writepage(page, &udf_wbc);
1791 ++ err = filemap_fdatawrite(inode->i_mapping);
1792 + if (err) {
1793 + /* Restore everything back so that we don't lose data... */
1794 + lock_page(page);
1795 +@@ -311,6 +309,7 @@ int udf_expand_file_adinicb(struct inode *inode)
1796 + unlock_page(page);
1797 + iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
1798 + inode->i_data.a_ops = &udf_adinicb_aops;
1799 ++ iinfo->i_lenAlloc = inode->i_size;
1800 + up_write(&iinfo->i_data_sem);
1801 + }
1802 + put_page(page);
1803 +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
1804 +index 50ab7c8fd3090..58ee9d2d6a3ca 100644
1805 +--- a/include/linux/netdevice.h
1806 ++++ b/include/linux/netdevice.h
1807 +@@ -2346,6 +2346,7 @@ struct packet_type {
1808 + struct net_device *);
1809 + bool (*id_match)(struct packet_type *ptype,
1810 + struct sock *sk);
1811 ++ struct net *af_packet_net;
1812 + void *af_packet_priv;
1813 + struct list_head list;
1814 + };
1815 +diff --git a/include/net/ip.h b/include/net/ip.h
1816 +index e8fa25280cbfd..d1a4efedbc039 100644
1817 +--- a/include/net/ip.h
1818 ++++ b/include/net/ip.h
1819 +@@ -441,19 +441,18 @@ static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
1820 + {
1821 + struct iphdr *iph = ip_hdr(skb);
1822 +
1823 ++ /* We had many attacks based on IPID, use the private
1824 ++ * generator as much as we can.
1825 ++ */
1826 ++ if (sk && inet_sk(sk)->inet_daddr) {
1827 ++ iph->id = htons(inet_sk(sk)->inet_id);
1828 ++ inet_sk(sk)->inet_id += segs;
1829 ++ return;
1830 ++ }
1831 + if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
1832 +- /* This is only to work around buggy Windows95/2000
1833 +- * VJ compression implementations. If the ID field
1834 +- * does not change, they drop every other packet in
1835 +- * a TCP stream using header compression.
1836 +- */
1837 +- if (sk && inet_sk(sk)->inet_daddr) {
1838 +- iph->id = htons(inet_sk(sk)->inet_id);
1839 +- inet_sk(sk)->inet_id += segs;
1840 +- } else {
1841 +- iph->id = 0;
1842 +- }
1843 ++ iph->id = 0;
1844 + } else {
1845 ++ /* Unfortunately we need the big hammer to get a suitable IPID */
1846 + __ip_select_ident(net, iph, segs);
1847 + }
1848 + }
1849 +diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
1850 +index 62c936230cc8b..b4fea9dd589d4 100644
1851 +--- a/include/net/ip6_fib.h
1852 ++++ b/include/net/ip6_fib.h
1853 +@@ -243,7 +243,7 @@ static inline bool fib6_get_cookie_safe(const struct fib6_info *f6i,
1854 + fn = rcu_dereference(f6i->fib6_node);
1855 +
1856 + if (fn) {
1857 +- *cookie = fn->fn_sernum;
1858 ++ *cookie = READ_ONCE(fn->fn_sernum);
1859 + /* pairs with smp_wmb() in fib6_update_sernum_upto_root() */
1860 + smp_rmb();
1861 + status = true;
1862 +diff --git a/include/net/netfilter/nf_nat_l4proto.h b/include/net/netfilter/nf_nat_l4proto.h
1863 +index b4d6b29bca62a..7ecac2cd10206 100644
1864 +--- a/include/net/netfilter/nf_nat_l4proto.h
1865 ++++ b/include/net/netfilter/nf_nat_l4proto.h
1866 +@@ -74,7 +74,7 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
1867 + struct nf_conntrack_tuple *tuple,
1868 + const struct nf_nat_range2 *range,
1869 + enum nf_nat_manip_type maniptype,
1870 +- const struct nf_conn *ct, u16 *rover);
1871 ++ const struct nf_conn *ct);
1872 +
1873 + int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
1874 + struct nf_nat_range2 *range);
1875 +diff --git a/kernel/audit.c b/kernel/audit.c
1876 +index c5e034fe14bbb..7dc14a4d9e3cf 100644
1877 +--- a/kernel/audit.c
1878 ++++ b/kernel/audit.c
1879 +@@ -549,20 +549,22 @@ static void kauditd_printk_skb(struct sk_buff *skb)
1880 + /**
1881 + * kauditd_rehold_skb - Handle a audit record send failure in the hold queue
1882 + * @skb: audit record
1883 ++ * @error: error code (unused)
1884 + *
1885 + * Description:
1886 + * This should only be used by the kauditd_thread when it fails to flush the
1887 + * hold queue.
1888 + */
1889 +-static void kauditd_rehold_skb(struct sk_buff *skb)
1890 ++static void kauditd_rehold_skb(struct sk_buff *skb, __always_unused int error)
1891 + {
1892 +- /* put the record back in the queue at the same place */
1893 +- skb_queue_head(&audit_hold_queue, skb);
1894 ++ /* put the record back in the queue */
1895 ++ skb_queue_tail(&audit_hold_queue, skb);
1896 + }
1897 +
1898 + /**
1899 + * kauditd_hold_skb - Queue an audit record, waiting for auditd
1900 + * @skb: audit record
1901 ++ * @error: error code
1902 + *
1903 + * Description:
1904 + * Queue the audit record, waiting for an instance of auditd. When this
1905 +@@ -572,19 +574,31 @@ static void kauditd_rehold_skb(struct sk_buff *skb)
1906 + * and queue it, if we have room. If we want to hold on to the record, but we
1907 + * don't have room, record a record lost message.
1908 + */
1909 +-static void kauditd_hold_skb(struct sk_buff *skb)
1910 ++static void kauditd_hold_skb(struct sk_buff *skb, int error)
1911 + {
1912 + /* at this point it is uncertain if we will ever send this to auditd so
1913 + * try to send the message via printk before we go any further */
1914 + kauditd_printk_skb(skb);
1915 +
1916 + /* can we just silently drop the message? */
1917 +- if (!audit_default) {
1918 +- kfree_skb(skb);
1919 +- return;
1920 ++ if (!audit_default)
1921 ++ goto drop;
1922 ++
1923 ++ /* the hold queue is only for when the daemon goes away completely,
1924 ++ * not -EAGAIN failures; if we are in a -EAGAIN state requeue the
1925 ++ * record on the retry queue unless it's full, in which case drop it
1926 ++ */
1927 ++ if (error == -EAGAIN) {
1928 ++ if (!audit_backlog_limit ||
1929 ++ skb_queue_len(&audit_retry_queue) < audit_backlog_limit) {
1930 ++ skb_queue_tail(&audit_retry_queue, skb);
1931 ++ return;
1932 ++ }
1933 ++ audit_log_lost("kauditd retry queue overflow");
1934 ++ goto drop;
1935 + }
1936 +
1937 +- /* if we have room, queue the message */
1938 ++ /* if we have room in the hold queue, queue the message */
1939 + if (!audit_backlog_limit ||
1940 + skb_queue_len(&audit_hold_queue) < audit_backlog_limit) {
1941 + skb_queue_tail(&audit_hold_queue, skb);
1942 +@@ -593,24 +607,32 @@ static void kauditd_hold_skb(struct sk_buff *skb)
1943 +
1944 + /* we have no other options - drop the message */
1945 + audit_log_lost("kauditd hold queue overflow");
1946 ++drop:
1947 + kfree_skb(skb);
1948 + }
1949 +
1950 + /**
1951 + * kauditd_retry_skb - Queue an audit record, attempt to send again to auditd
1952 + * @skb: audit record
1953 ++ * @error: error code (unused)
1954 + *
1955 + * Description:
1956 + * Not as serious as kauditd_hold_skb() as we still have a connected auditd,
1957 + * but for some reason we are having problems sending it audit records so
1958 + * queue the given record and attempt to resend.
1959 + */
1960 +-static void kauditd_retry_skb(struct sk_buff *skb)
1961 ++static void kauditd_retry_skb(struct sk_buff *skb, __always_unused int error)
1962 + {
1963 +- /* NOTE: because records should only live in the retry queue for a
1964 +- * short period of time, before either being sent or moved to the hold
1965 +- * queue, we don't currently enforce a limit on this queue */
1966 +- skb_queue_tail(&audit_retry_queue, skb);
1967 ++ if (!audit_backlog_limit ||
1968 ++ skb_queue_len(&audit_retry_queue) < audit_backlog_limit) {
1969 ++ skb_queue_tail(&audit_retry_queue, skb);
1970 ++ return;
1971 ++ }
1972 ++
1973 ++ /* we have to drop the record, send it via printk as a last effort */
1974 ++ kauditd_printk_skb(skb);
1975 ++ audit_log_lost("kauditd retry queue overflow");
1976 ++ kfree_skb(skb);
1977 + }
1978 +
1979 + /**
1980 +@@ -648,7 +670,7 @@ static void auditd_reset(const struct auditd_connection *ac)
1981 + /* flush the retry queue to the hold queue, but don't touch the main
1982 + * queue since we need to process that normally for multicast */
1983 + while ((skb = skb_dequeue(&audit_retry_queue)))
1984 +- kauditd_hold_skb(skb);
1985 ++ kauditd_hold_skb(skb, -ECONNREFUSED);
1986 + }
1987 +
1988 + /**
1989 +@@ -722,16 +744,18 @@ static int kauditd_send_queue(struct sock *sk, u32 portid,
1990 + struct sk_buff_head *queue,
1991 + unsigned int retry_limit,
1992 + void (*skb_hook)(struct sk_buff *skb),
1993 +- void (*err_hook)(struct sk_buff *skb))
1994 ++ void (*err_hook)(struct sk_buff *skb, int error))
1995 + {
1996 + int rc = 0;
1997 +- struct sk_buff *skb;
1998 ++ struct sk_buff *skb = NULL;
1999 ++ struct sk_buff *skb_tail;
2000 + unsigned int failed = 0;
2001 +
2002 + /* NOTE: kauditd_thread takes care of all our locking, we just use
2003 + * the netlink info passed to us (e.g. sk and portid) */
2004 +
2005 +- while ((skb = skb_dequeue(queue))) {
2006 ++ skb_tail = skb_peek_tail(queue);
2007 ++ while ((skb != skb_tail) && (skb = skb_dequeue(queue))) {
2008 + /* call the skb_hook for each skb we touch */
2009 + if (skb_hook)
2010 + (*skb_hook)(skb);
2011 +@@ -739,7 +763,7 @@ static int kauditd_send_queue(struct sock *sk, u32 portid,
2012 + /* can we send to anyone via unicast? */
2013 + if (!sk) {
2014 + if (err_hook)
2015 +- (*err_hook)(skb);
2016 ++ (*err_hook)(skb, -ECONNREFUSED);
2017 + continue;
2018 + }
2019 +
2020 +@@ -753,7 +777,7 @@ retry:
2021 + rc == -ECONNREFUSED || rc == -EPERM) {
2022 + sk = NULL;
2023 + if (err_hook)
2024 +- (*err_hook)(skb);
2025 ++ (*err_hook)(skb, rc);
2026 + if (rc == -EAGAIN)
2027 + rc = 0;
2028 + /* continue to drain the queue */
2029 +diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c
2030 +index 4210152e56f0f..aad7c8fcb22fe 100644
2031 +--- a/kernel/power/wakelock.c
2032 ++++ b/kernel/power/wakelock.c
2033 +@@ -39,23 +39,19 @@ ssize_t pm_show_wakelocks(char *buf, bool show_active)
2034 + {
2035 + struct rb_node *node;
2036 + struct wakelock *wl;
2037 +- char *str = buf;
2038 +- char *end = buf + PAGE_SIZE;
2039 ++ int len = 0;
2040 +
2041 + mutex_lock(&wakelocks_lock);
2042 +
2043 + for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) {
2044 + wl = rb_entry(node, struct wakelock, node);
2045 + if (wl->ws.active == show_active)
2046 +- str += scnprintf(str, end - str, "%s ", wl->name);
2047 ++ len += sysfs_emit_at(buf, len, "%s ", wl->name);
2048 + }
2049 +- if (str > buf)
2050 +- str--;
2051 +-
2052 +- str += scnprintf(str, end - str, "\n");
2053 ++ len += sysfs_emit_at(buf, len, "\n");
2054 +
2055 + mutex_unlock(&wakelocks_lock);
2056 +- return (str - buf);
2057 ++ return len;
2058 + }
2059 +
2060 + #if CONFIG_PM_WAKELOCKS_LIMIT > 0
2061 +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
2062 +index c0dbb8ad00376..196d0d8320070 100644
2063 +--- a/net/bluetooth/hci_event.c
2064 ++++ b/net/bluetooth/hci_event.c
2065 +@@ -5391,6 +5391,11 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
2066 + struct hci_ev_le_advertising_info *ev = ptr;
2067 + s8 rssi;
2068 +
2069 ++ if (ptr > (void *)skb_tail_pointer(skb) - sizeof(*ev)) {
2070 ++ bt_dev_err(hdev, "Malicious advertising data.");
2071 ++ break;
2072 ++ }
2073 ++
2074 + if (ev->length <= HCI_MAX_AD_LENGTH &&
2075 + ev->data + ev->length <= skb_tail_pointer(skb)) {
2076 + rssi = ev->data[ev->length];
2077 +@@ -5402,11 +5407,6 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
2078 + }
2079 +
2080 + ptr += sizeof(*ev) + ev->length + 1;
2081 +-
2082 +- if (ptr > (void *) skb_tail_pointer(skb) - sizeof(*ev)) {
2083 +- bt_dev_err(hdev, "Malicious advertising data. Stopping processing");
2084 +- break;
2085 +- }
2086 + }
2087 +
2088 + hci_dev_unlock(hdev);
2089 +diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
2090 +index 63881f72ef71c..2808c5f9c1f05 100644
2091 +--- a/net/core/net-procfs.c
2092 ++++ b/net/core/net-procfs.c
2093 +@@ -182,12 +182,23 @@ static const struct seq_operations softnet_seq_ops = {
2094 + .show = softnet_seq_show,
2095 + };
2096 +
2097 +-static void *ptype_get_idx(loff_t pos)
2098 ++static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
2099 + {
2100 ++ struct list_head *ptype_list = NULL;
2101 + struct packet_type *pt = NULL;
2102 ++ struct net_device *dev;
2103 + loff_t i = 0;
2104 + int t;
2105 +
2106 ++ for_each_netdev_rcu(seq_file_net(seq), dev) {
2107 ++ ptype_list = &dev->ptype_all;
2108 ++ list_for_each_entry_rcu(pt, ptype_list, list) {
2109 ++ if (i == pos)
2110 ++ return pt;
2111 ++ ++i;
2112 ++ }
2113 ++ }
2114 ++
2115 + list_for_each_entry_rcu(pt, &ptype_all, list) {
2116 + if (i == pos)
2117 + return pt;
2118 +@@ -208,22 +219,40 @@ static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
2119 + __acquires(RCU)
2120 + {
2121 + rcu_read_lock();
2122 +- return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
2123 ++ return *pos ? ptype_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2124 + }
2125 +
2126 + static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2127 + {
2128 ++ struct net_device *dev;
2129 + struct packet_type *pt;
2130 + struct list_head *nxt;
2131 + int hash;
2132 +
2133 + ++*pos;
2134 + if (v == SEQ_START_TOKEN)
2135 +- return ptype_get_idx(0);
2136 ++ return ptype_get_idx(seq, 0);
2137 +
2138 + pt = v;
2139 + nxt = pt->list.next;
2140 ++ if (pt->dev) {
2141 ++ if (nxt != &pt->dev->ptype_all)
2142 ++ goto found;
2143 ++
2144 ++ dev = pt->dev;
2145 ++ for_each_netdev_continue_rcu(seq_file_net(seq), dev) {
2146 ++ if (!list_empty(&dev->ptype_all)) {
2147 ++ nxt = dev->ptype_all.next;
2148 ++ goto found;
2149 ++ }
2150 ++ }
2151 ++
2152 ++ nxt = ptype_all.next;
2153 ++ goto ptype_all;
2154 ++ }
2155 ++
2156 + if (pt->type == htons(ETH_P_ALL)) {
2157 ++ptype_all:
2158 + if (nxt != &ptype_all)
2159 + goto found;
2160 + hash = 0;
2161 +@@ -252,7 +281,8 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
2162 +
2163 + if (v == SEQ_START_TOKEN)
2164 + seq_puts(seq, "Type Device Function\n");
2165 +- else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
2166 ++ else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) &&
2167 ++ (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) {
2168 + if (pt->type == htons(ETH_P_ALL))
2169 + seq_puts(seq, "ALL ");
2170 + else
2171 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
2172 +index 907dd0c7e8a66..2837cc03f69e2 100644
2173 +--- a/net/core/rtnetlink.c
2174 ++++ b/net/core/rtnetlink.c
2175 +@@ -2942,9 +2942,9 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
2176 + {
2177 + struct net *net = sock_net(skb->sk);
2178 + const struct rtnl_link_ops *ops;
2179 +- const struct rtnl_link_ops *m_ops = NULL;
2180 ++ const struct rtnl_link_ops *m_ops;
2181 + struct net_device *dev;
2182 +- struct net_device *master_dev = NULL;
2183 ++ struct net_device *master_dev;
2184 + struct ifinfomsg *ifm;
2185 + char kind[MODULE_NAME_LEN];
2186 + char ifname[IFNAMSIZ];
2187 +@@ -2979,6 +2979,8 @@ replay:
2188 + dev = NULL;
2189 + }
2190 +
2191 ++ master_dev = NULL;
2192 ++ m_ops = NULL;
2193 + if (dev) {
2194 + master_dev = netdev_master_upper_dev_get(dev);
2195 + if (master_dev)
2196 +diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
2197 +index 6d4c71a52b6b2..3407ee1159f7b 100644
2198 +--- a/net/ieee802154/nl802154.c
2199 ++++ b/net/ieee802154/nl802154.c
2200 +@@ -1459,7 +1459,7 @@ static int nl802154_send_key(struct sk_buff *msg, u32 cmd, u32 portid,
2201 +
2202 + hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
2203 + if (!hdr)
2204 +- return -1;
2205 ++ return -ENOBUFS;
2206 +
2207 + if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
2208 + goto nla_put_failure;
2209 +@@ -1650,7 +1650,7 @@ static int nl802154_send_device(struct sk_buff *msg, u32 cmd, u32 portid,
2210 +
2211 + hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
2212 + if (!hdr)
2213 +- return -1;
2214 ++ return -ENOBUFS;
2215 +
2216 + if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
2217 + goto nla_put_failure;
2218 +@@ -1828,7 +1828,7 @@ static int nl802154_send_devkey(struct sk_buff *msg, u32 cmd, u32 portid,
2219 +
2220 + hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
2221 + if (!hdr)
2222 +- return -1;
2223 ++ return -ENOBUFS;
2224 +
2225 + if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
2226 + goto nla_put_failure;
2227 +@@ -2005,7 +2005,7 @@ static int nl802154_send_seclevel(struct sk_buff *msg, u32 cmd, u32 portid,
2228 +
2229 + hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
2230 + if (!hdr)
2231 +- return -1;
2232 ++ return -ENOBUFS;
2233 +
2234 + if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
2235 + goto nla_put_failure;
2236 +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
2237 +index 25beecee89494..06a981676356c 100644
2238 +--- a/net/ipv4/ip_output.c
2239 ++++ b/net/ipv4/ip_output.c
2240 +@@ -160,12 +160,19 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
2241 + iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
2242 + iph->saddr = saddr;
2243 + iph->protocol = sk->sk_protocol;
2244 +- if (ip_dont_fragment(sk, &rt->dst)) {
2245 ++ /* Do not bother generating IPID for small packets (eg SYNACK) */
2246 ++ if (skb->len <= IPV4_MIN_MTU || ip_dont_fragment(sk, &rt->dst)) {
2247 + iph->frag_off = htons(IP_DF);
2248 + iph->id = 0;
2249 + } else {
2250 + iph->frag_off = 0;
2251 +- __ip_select_ident(net, iph, 1);
2252 ++ /* TCP packets here are SYNACK with fat IPv4/TCP options.
2253 ++ * Avoid using the hashed IP ident generator.
2254 ++ */
2255 ++ if (sk->sk_protocol == IPPROTO_TCP)
2256 ++ iph->id = (__force __be16)prandom_u32();
2257 ++ else
2258 ++ __ip_select_ident(net, iph, 1);
2259 + }
2260 +
2261 + if (opt && opt->opt.optlen) {
2262 +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
2263 +index 862744c285482..276442443d322 100644
2264 +--- a/net/ipv4/ping.c
2265 ++++ b/net/ipv4/ping.c
2266 +@@ -225,7 +225,8 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
2267 + continue;
2268 + }
2269 +
2270 +- if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
2271 ++ if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif &&
2272 ++ sk->sk_bound_dev_if != inet_sdif(skb))
2273 + continue;
2274 +
2275 + sock_hold(sk);
2276 +diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
2277 +index 21800979ed621..8cae691c3c9f4 100644
2278 +--- a/net/ipv4/raw.c
2279 ++++ b/net/ipv4/raw.c
2280 +@@ -725,6 +725,7 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
2281 + int ret = -EINVAL;
2282 + int chk_addr_ret;
2283 +
2284 ++ lock_sock(sk);
2285 + if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in))
2286 + goto out;
2287 +
2288 +@@ -744,7 +745,9 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
2289 + inet->inet_saddr = 0; /* Use device */
2290 + sk_dst_reset(sk);
2291 + ret = 0;
2292 +-out: return ret;
2293 ++out:
2294 ++ release_sock(sk);
2295 ++ return ret;
2296 + }
2297 +
2298 + /*
2299 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
2300 +index 941c655cad917..c97c027a8d773 100644
2301 +--- a/net/ipv4/tcp_output.c
2302 ++++ b/net/ipv4/tcp_output.c
2303 +@@ -968,6 +968,8 @@ enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer)
2304 +
2305 + static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb)
2306 + {
2307 ++ struct tcp_sock *tp = tcp_sk(sk);
2308 ++ ktime_t expire, now;
2309 + u64 len_ns;
2310 + u32 rate;
2311 +
2312 +@@ -979,12 +981,28 @@ static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb)
2313 +
2314 + len_ns = (u64)skb->len * NSEC_PER_SEC;
2315 + do_div(len_ns, rate);
2316 +- hrtimer_start(&tcp_sk(sk)->pacing_timer,
2317 +- ktime_add_ns(ktime_get(), len_ns),
2318 ++ now = ktime_get();
2319 ++ /* If hrtimer is already armed, then our caller has not
2320 ++ * used tcp_pacing_check().
2321 ++ */
2322 ++ if (unlikely(hrtimer_is_queued(&tp->pacing_timer))) {
2323 ++ expire = hrtimer_get_softexpires(&tp->pacing_timer);
2324 ++ if (ktime_after(expire, now))
2325 ++ now = expire;
2326 ++ if (hrtimer_try_to_cancel(&tp->pacing_timer) == 1)
2327 ++ __sock_put(sk);
2328 ++ }
2329 ++ hrtimer_start(&tp->pacing_timer, ktime_add_ns(now, len_ns),
2330 + HRTIMER_MODE_ABS_PINNED_SOFT);
2331 + sock_hold(sk);
2332 + }
2333 +
2334 ++static bool tcp_pacing_check(const struct sock *sk)
2335 ++{
2336 ++ return tcp_needs_internal_pacing(sk) &&
2337 ++ hrtimer_is_queued(&tcp_sk(sk)->pacing_timer);
2338 ++}
2339 ++
2340 + static void tcp_update_skb_after_send(struct tcp_sock *tp, struct sk_buff *skb)
2341 + {
2342 + skb->skb_mstamp = tp->tcp_mstamp;
2343 +@@ -2121,6 +2139,9 @@ static int tcp_mtu_probe(struct sock *sk)
2344 + if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
2345 + return -1;
2346 +
2347 ++ if (tcp_pacing_check(sk))
2348 ++ return -1;
2349 ++
2350 + /* We're allowed to probe. Build it now. */
2351 + nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
2352 + if (!nskb)
2353 +@@ -2194,12 +2215,6 @@ static int tcp_mtu_probe(struct sock *sk)
2354 + return -1;
2355 + }
2356 +
2357 +-static bool tcp_pacing_check(const struct sock *sk)
2358 +-{
2359 +- return tcp_needs_internal_pacing(sk) &&
2360 +- hrtimer_is_queued(&tcp_sk(sk)->pacing_timer);
2361 +-}
2362 +-
2363 + /* TCP Small Queues :
2364 + * Control number of packets in qdisc/devices to two packets / or ~1 ms.
2365 + * (These limits are doubled for retransmits)
2366 +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
2367 +index e0e464b72c1fa..5ff67cb8b6ace 100644
2368 +--- a/net/ipv6/ip6_fib.c
2369 ++++ b/net/ipv6/ip6_fib.c
2370 +@@ -112,7 +112,7 @@ void fib6_update_sernum(struct net *net, struct fib6_info *f6i)
2371 + fn = rcu_dereference_protected(f6i->fib6_node,
2372 + lockdep_is_held(&f6i->fib6_table->tb6_lock));
2373 + if (fn)
2374 +- fn->fn_sernum = fib6_new_sernum(net);
2375 ++ WRITE_ONCE(fn->fn_sernum, fib6_new_sernum(net));
2376 + }
2377 +
2378 + /*
2379 +@@ -544,12 +544,13 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
2380 + spin_unlock_bh(&table->tb6_lock);
2381 + if (res > 0) {
2382 + cb->args[4] = 1;
2383 +- cb->args[5] = w->root->fn_sernum;
2384 ++ cb->args[5] = READ_ONCE(w->root->fn_sernum);
2385 + }
2386 + } else {
2387 +- if (cb->args[5] != w->root->fn_sernum) {
2388 ++ int sernum = READ_ONCE(w->root->fn_sernum);
2389 ++ if (cb->args[5] != sernum) {
2390 + /* Begin at the root if the tree changed */
2391 +- cb->args[5] = w->root->fn_sernum;
2392 ++ cb->args[5] = sernum;
2393 + w->state = FWS_INIT;
2394 + w->node = w->root;
2395 + w->skip = w->count;
2396 +@@ -1203,7 +1204,7 @@ static void __fib6_update_sernum_upto_root(struct fib6_info *rt,
2397 + /* paired with smp_rmb() in rt6_get_cookie_safe() */
2398 + smp_wmb();
2399 + while (fn) {
2400 +- fn->fn_sernum = sernum;
2401 ++ WRITE_ONCE(fn->fn_sernum, sernum);
2402 + fn = rcu_dereference_protected(fn->parent,
2403 + lockdep_is_held(&rt->fib6_table->tb6_lock));
2404 + }
2405 +@@ -1983,8 +1984,8 @@ static int fib6_clean_node(struct fib6_walker *w)
2406 + };
2407 +
2408 + if (c->sernum != FIB6_NO_SERNUM_CHANGE &&
2409 +- w->node->fn_sernum != c->sernum)
2410 +- w->node->fn_sernum = c->sernum;
2411 ++ READ_ONCE(w->node->fn_sernum) != c->sernum)
2412 ++ WRITE_ONCE(w->node->fn_sernum, c->sernum);
2413 +
2414 + if (!c->func) {
2415 + WARN_ON_ONCE(c->sernum == FIB6_NO_SERNUM_CHANGE);
2416 +@@ -2332,7 +2333,7 @@ static void ipv6_route_seq_setup_walk(struct ipv6_route_iter *iter,
2417 + iter->w.state = FWS_INIT;
2418 + iter->w.node = iter->w.root;
2419 + iter->w.args = iter;
2420 +- iter->sernum = iter->w.root->fn_sernum;
2421 ++ iter->sernum = READ_ONCE(iter->w.root->fn_sernum);
2422 + INIT_LIST_HEAD(&iter->w.lh);
2423 + fib6_walker_link(net, &iter->w);
2424 + }
2425 +@@ -2360,8 +2361,10 @@ static struct fib6_table *ipv6_route_seq_next_table(struct fib6_table *tbl,
2426 +
2427 + static void ipv6_route_check_sernum(struct ipv6_route_iter *iter)
2428 + {
2429 +- if (iter->sernum != iter->w.root->fn_sernum) {
2430 +- iter->sernum = iter->w.root->fn_sernum;
2431 ++ int sernum = READ_ONCE(iter->w.root->fn_sernum);
2432 ++
2433 ++ if (iter->sernum != sernum) {
2434 ++ iter->sernum = sernum;
2435 + iter->w.state = FWS_INIT;
2436 + iter->w.node = iter->w.root;
2437 + WARN_ON(iter->w.skip);
2438 +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
2439 +index 35c127c3eee78..b647a40376795 100644
2440 +--- a/net/ipv6/ip6_tunnel.c
2441 ++++ b/net/ipv6/ip6_tunnel.c
2442 +@@ -1005,14 +1005,14 @@ int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
2443 +
2444 + if (unlikely(!ipv6_chk_addr_and_flags(net, laddr, ldev, false,
2445 + 0, IFA_F_TENTATIVE)))
2446 +- pr_warn("%s xmit: Local address not yet configured!\n",
2447 +- p->name);
2448 ++ pr_warn_ratelimited("%s xmit: Local address not yet configured!\n",
2449 ++ p->name);
2450 + else if (!(p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) &&
2451 + !ipv6_addr_is_multicast(raddr) &&
2452 + unlikely(ipv6_chk_addr_and_flags(net, raddr, ldev,
2453 + true, 0, IFA_F_TENTATIVE)))
2454 +- pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
2455 +- p->name);
2456 ++ pr_warn_ratelimited("%s xmit: Routing loop! Remote address found on this node!\n",
2457 ++ p->name);
2458 + else
2459 + ret = 1;
2460 + rcu_read_unlock();
2461 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
2462 +index 7e3ab23fc995c..b940285a111eb 100644
2463 +--- a/net/ipv6/route.c
2464 ++++ b/net/ipv6/route.c
2465 +@@ -2320,7 +2320,7 @@ static void ip6_link_failure(struct sk_buff *skb)
2466 + if (from) {
2467 + fn = rcu_dereference(from->fib6_node);
2468 + if (fn && (rt->rt6i_flags & RTF_DEFAULT))
2469 +- fn->fn_sernum = -1;
2470 ++ WRITE_ONCE(fn->fn_sernum, -1);
2471 + }
2472 + }
2473 + rcu_read_unlock();
2474 +diff --git a/net/netfilter/nf_nat_proto_common.c b/net/netfilter/nf_nat_proto_common.c
2475 +index 5d849d8355617..234f535d350e9 100644
2476 +--- a/net/netfilter/nf_nat_proto_common.c
2477 ++++ b/net/netfilter/nf_nat_proto_common.c
2478 +@@ -38,12 +38,12 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
2479 + struct nf_conntrack_tuple *tuple,
2480 + const struct nf_nat_range2 *range,
2481 + enum nf_nat_manip_type maniptype,
2482 +- const struct nf_conn *ct,
2483 +- u16 *rover)
2484 ++ const struct nf_conn *ct)
2485 + {
2486 +- unsigned int range_size, min, max, i;
2487 ++ unsigned int range_size, min, max, i, attempts;
2488 + __be16 *portptr;
2489 +- u_int16_t off;
2490 ++ u16 off;
2491 ++ static const unsigned int max_attempts = 128;
2492 +
2493 + if (maniptype == NF_NAT_MANIP_SRC)
2494 + portptr = &tuple->src.u.all;
2495 +@@ -86,18 +86,31 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
2496 + } else if (range->flags & NF_NAT_RANGE_PROTO_OFFSET) {
2497 + off = (ntohs(*portptr) - ntohs(range->base_proto.all));
2498 + } else {
2499 +- off = *rover;
2500 ++ off = prandom_u32();
2501 + }
2502 +
2503 +- for (i = 0; ; ++off) {
2504 ++ attempts = range_size;
2505 ++ if (attempts > max_attempts)
2506 ++ attempts = max_attempts;
2507 ++
2508 ++ /* We are in softirq; doing a search of the entire range risks
2509 ++ * soft lockup when all tuples are already used.
2510 ++ *
2511 ++ * If we can't find any free port from first offset, pick a new
2512 ++ * one and try again, with ever smaller search window.
2513 ++ */
2514 ++another_round:
2515 ++ for (i = 0; i < attempts; i++, off++) {
2516 + *portptr = htons(min + off % range_size);
2517 +- if (++i != range_size && nf_nat_used_tuple(tuple, ct))
2518 +- continue;
2519 +- if (!(range->flags & (NF_NAT_RANGE_PROTO_RANDOM_ALL|
2520 +- NF_NAT_RANGE_PROTO_OFFSET)))
2521 +- *rover = off;
2522 +- return;
2523 ++ if (!nf_nat_used_tuple(tuple, ct))
2524 ++ return;
2525 + }
2526 ++
2527 ++ if (attempts >= range_size || attempts < 16)
2528 ++ return;
2529 ++ attempts /= 2;
2530 ++ off = prandom_u32();
2531 ++ goto another_round;
2532 + }
2533 + EXPORT_SYMBOL_GPL(nf_nat_l4proto_unique_tuple);
2534 +
2535 +diff --git a/net/netfilter/nf_nat_proto_dccp.c b/net/netfilter/nf_nat_proto_dccp.c
2536 +index 67ea0d83aa5a8..7d4d2c124990b 100644
2537 +--- a/net/netfilter/nf_nat_proto_dccp.c
2538 ++++ b/net/netfilter/nf_nat_proto_dccp.c
2539 +@@ -18,8 +18,6 @@
2540 + #include <net/netfilter/nf_nat_l3proto.h>
2541 + #include <net/netfilter/nf_nat_l4proto.h>
2542 +
2543 +-static u_int16_t dccp_port_rover;
2544 +-
2545 + static void
2546 + dccp_unique_tuple(const struct nf_nat_l3proto *l3proto,
2547 + struct nf_conntrack_tuple *tuple,
2548 +@@ -27,8 +25,7 @@ dccp_unique_tuple(const struct nf_nat_l3proto *l3proto,
2549 + enum nf_nat_manip_type maniptype,
2550 + const struct nf_conn *ct)
2551 + {
2552 +- nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct,
2553 +- &dccp_port_rover);
2554 ++ nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct);
2555 + }
2556 +
2557 + static bool
2558 +diff --git a/net/netfilter/nf_nat_proto_sctp.c b/net/netfilter/nf_nat_proto_sctp.c
2559 +index 1c5d9b65fbbab..f05ad8fa7b208 100644
2560 +--- a/net/netfilter/nf_nat_proto_sctp.c
2561 ++++ b/net/netfilter/nf_nat_proto_sctp.c
2562 +@@ -12,8 +12,6 @@
2563 +
2564 + #include <net/netfilter/nf_nat_l4proto.h>
2565 +
2566 +-static u_int16_t nf_sctp_port_rover;
2567 +-
2568 + static void
2569 + sctp_unique_tuple(const struct nf_nat_l3proto *l3proto,
2570 + struct nf_conntrack_tuple *tuple,
2571 +@@ -21,8 +19,7 @@ sctp_unique_tuple(const struct nf_nat_l3proto *l3proto,
2572 + enum nf_nat_manip_type maniptype,
2573 + const struct nf_conn *ct)
2574 + {
2575 +- nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct,
2576 +- &nf_sctp_port_rover);
2577 ++ nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct);
2578 + }
2579 +
2580 + static bool
2581 +diff --git a/net/netfilter/nf_nat_proto_tcp.c b/net/netfilter/nf_nat_proto_tcp.c
2582 +index f15fcd475f987..c312e6b3e2ea6 100644
2583 +--- a/net/netfilter/nf_nat_proto_tcp.c
2584 ++++ b/net/netfilter/nf_nat_proto_tcp.c
2585 +@@ -18,8 +18,6 @@
2586 + #include <net/netfilter/nf_nat_l4proto.h>
2587 + #include <net/netfilter/nf_nat_core.h>
2588 +
2589 +-static u16 tcp_port_rover;
2590 +-
2591 + static void
2592 + tcp_unique_tuple(const struct nf_nat_l3proto *l3proto,
2593 + struct nf_conntrack_tuple *tuple,
2594 +@@ -27,8 +25,7 @@ tcp_unique_tuple(const struct nf_nat_l3proto *l3proto,
2595 + enum nf_nat_manip_type maniptype,
2596 + const struct nf_conn *ct)
2597 + {
2598 +- nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct,
2599 +- &tcp_port_rover);
2600 ++ nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct);
2601 + }
2602 +
2603 + static bool
2604 +diff --git a/net/netfilter/nf_nat_proto_udp.c b/net/netfilter/nf_nat_proto_udp.c
2605 +index d85c31c2433cf..357539d158497 100644
2606 +--- a/net/netfilter/nf_nat_proto_udp.c
2607 ++++ b/net/netfilter/nf_nat_proto_udp.c
2608 +@@ -17,8 +17,6 @@
2609 + #include <net/netfilter/nf_nat_l3proto.h>
2610 + #include <net/netfilter/nf_nat_l4proto.h>
2611 +
2612 +-static u16 udp_port_rover;
2613 +-
2614 + static void
2615 + udp_unique_tuple(const struct nf_nat_l3proto *l3proto,
2616 + struct nf_conntrack_tuple *tuple,
2617 +@@ -26,8 +24,7 @@ udp_unique_tuple(const struct nf_nat_l3proto *l3proto,
2618 + enum nf_nat_manip_type maniptype,
2619 + const struct nf_conn *ct)
2620 + {
2621 +- nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct,
2622 +- &udp_port_rover);
2623 ++ nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct);
2624 + }
2625 +
2626 + static void
2627 +@@ -78,8 +75,6 @@ static bool udp_manip_pkt(struct sk_buff *skb,
2628 + }
2629 +
2630 + #ifdef CONFIG_NF_NAT_PROTO_UDPLITE
2631 +-static u16 udplite_port_rover;
2632 +-
2633 + static bool udplite_manip_pkt(struct sk_buff *skb,
2634 + const struct nf_nat_l3proto *l3proto,
2635 + unsigned int iphdroff, unsigned int hdroff,
2636 +@@ -103,8 +98,7 @@ udplite_unique_tuple(const struct nf_nat_l3proto *l3proto,
2637 + enum nf_nat_manip_type maniptype,
2638 + const struct nf_conn *ct)
2639 + {
2640 +- nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct,
2641 +- &udplite_port_rover);
2642 ++ nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct);
2643 + }
2644 +
2645 + const struct nf_nat_l4proto nf_nat_l4proto_udplite = {
2646 +diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
2647 +index b1a9f330a51fe..fd87216bc0a99 100644
2648 +--- a/net/netfilter/nft_payload.c
2649 ++++ b/net/netfilter/nft_payload.c
2650 +@@ -194,6 +194,9 @@ static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
2651 + struct sk_buff *skb,
2652 + unsigned int *l4csum_offset)
2653 + {
2654 ++ if (pkt->xt.fragoff)
2655 ++ return -1;
2656 ++
2657 + switch (pkt->tprot) {
2658 + case IPPROTO_TCP:
2659 + *l4csum_offset = offsetof(struct tcphdr, check);
2660 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
2661 +index 1309161032d50..bd7e8d406c71e 100644
2662 +--- a/net/packet/af_packet.c
2663 ++++ b/net/packet/af_packet.c
2664 +@@ -1716,6 +1716,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
2665 + match->prot_hook.dev = po->prot_hook.dev;
2666 + match->prot_hook.func = packet_rcv_fanout;
2667 + match->prot_hook.af_packet_priv = match;
2668 ++ match->prot_hook.af_packet_net = read_pnet(&match->net);
2669 + match->prot_hook.id_match = match_fanout_group;
2670 + list_add(&match->list, &fanout_list);
2671 + }
2672 +@@ -1729,7 +1730,10 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
2673 + err = -ENOSPC;
2674 + if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
2675 + __dev_remove_pack(&po->prot_hook);
2676 +- po->fanout = match;
2677 ++
2678 ++ /* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */
2679 ++ WRITE_ONCE(po->fanout, match);
2680 ++
2681 + po->rollover = rollover;
2682 + rollover = NULL;
2683 + refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
2684 +@@ -3294,6 +3298,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
2685 + po->prot_hook.func = packet_rcv_spkt;
2686 +
2687 + po->prot_hook.af_packet_priv = sk;
2688 ++ po->prot_hook.af_packet_net = sock_net(sk);
2689 +
2690 + if (proto) {
2691 + po->prot_hook.type = proto;
2692 +@@ -3875,7 +3880,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
2693 + }
2694 + case PACKET_FANOUT_DATA:
2695 + {
2696 +- if (!po->fanout)
2697 ++ /* Paired with the WRITE_ONCE() in fanout_add() */
2698 ++ if (!READ_ONCE(po->fanout))
2699 + return -EINVAL;
2700 +
2701 + return fanout_set_data(po, optval, optlen);
2702 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2703 +index c23993b9c6238..459f629fa0a86 100644
2704 +--- a/sound/pci/hda/patch_realtek.c
2705 ++++ b/sound/pci/hda/patch_realtek.c
2706 +@@ -2106,6 +2106,7 @@ static void alc1220_fixup_gb_x570(struct hda_codec *codec,
2707 + {
2708 + static const hda_nid_t conn1[] = { 0x0c };
2709 + static const struct coef_fw gb_x570_coefs[] = {
2710 ++ WRITE_COEF(0x07, 0x03c0),
2711 + WRITE_COEF(0x1a, 0x01c1),
2712 + WRITE_COEF(0x1b, 0x0202),
2713 + WRITE_COEF(0x43, 0x3005),
2714 +@@ -2532,7 +2533,8 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
2715 + SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
2716 + SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
2717 + SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_GB_X570),
2718 +- SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950),
2719 ++ SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_GB_X570),
2720 ++ SND_PCI_QUIRK(0x1458, 0xa0d5, "Gigabyte X570S Aorus Master", ALC1220_FIXUP_GB_X570),
2721 + SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950),
2722 + SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
2723 + SND_PCI_QUIRK(0x1462, 0x1229, "MSI-GP73", ALC1220_FIXUP_CLEVO_P950),
2724 +@@ -2607,6 +2609,7 @@ static const struct hda_model_fixup alc882_fixup_models[] = {
2725 + {.id = ALC882_FIXUP_NO_PRIMARY_HP, .name = "no-primary-hp"},
2726 + {.id = ALC887_FIXUP_ASUS_BASS, .name = "asus-bass"},
2727 + {.id = ALC1220_FIXUP_GB_DUAL_CODECS, .name = "dual-codecs"},
2728 ++ {.id = ALC1220_FIXUP_GB_X570, .name = "gb-x570"},
2729 + {.id = ALC1220_FIXUP_CLEVO_P950, .name = "clevo-p950"},
2730 + {}
2731 + };
2732 +diff --git a/sound/soc/codecs/cpcap.c b/sound/soc/codecs/cpcap.c
2733 +index 1902689c5ea2c..acd88fe38cd4c 100644
2734 +--- a/sound/soc/codecs/cpcap.c
2735 ++++ b/sound/soc/codecs/cpcap.c
2736 +@@ -1541,6 +1541,8 @@ static int cpcap_codec_probe(struct platform_device *pdev)
2737 + {
2738 + struct device_node *codec_node =
2739 + of_get_child_by_name(pdev->dev.parent->of_node, "audio-codec");
2740 ++ if (!codec_node)
2741 ++ return -ENODEV;
2742 +
2743 + pdev->dev.of_node = codec_node;
2744 +
2745 +diff --git a/sound/soc/codecs/max9759.c b/sound/soc/codecs/max9759.c
2746 +index ecfb4a80424bc..ec0a482e9000b 100644
2747 +--- a/sound/soc/codecs/max9759.c
2748 ++++ b/sound/soc/codecs/max9759.c
2749 +@@ -64,7 +64,8 @@ static int speaker_gain_control_put(struct snd_kcontrol *kcontrol,
2750 + struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol);
2751 + struct max9759 *priv = snd_soc_component_get_drvdata(c);
2752 +
2753 +- if (ucontrol->value.integer.value[0] > 3)
2754 ++ if (ucontrol->value.integer.value[0] < 0 ||
2755 ++ ucontrol->value.integer.value[0] > 3)
2756 + return -EINVAL;
2757 +
2758 + priv->gain = ucontrol->value.integer.value[0];
2759 +diff --git a/sound/soc/fsl/pcm030-audio-fabric.c b/sound/soc/fsl/pcm030-audio-fabric.c
2760 +index ec731223cab3d..72d4548994842 100644
2761 +--- a/sound/soc/fsl/pcm030-audio-fabric.c
2762 ++++ b/sound/soc/fsl/pcm030-audio-fabric.c
2763 +@@ -90,16 +90,21 @@ static int pcm030_fabric_probe(struct platform_device *op)
2764 + dev_err(&op->dev, "platform_device_alloc() failed\n");
2765 +
2766 + ret = platform_device_add(pdata->codec_device);
2767 +- if (ret)
2768 ++ if (ret) {
2769 + dev_err(&op->dev, "platform_device_add() failed: %d\n", ret);
2770 ++ platform_device_put(pdata->codec_device);
2771 ++ }
2772 +
2773 + ret = snd_soc_register_card(card);
2774 +- if (ret)
2775 ++ if (ret) {
2776 + dev_err(&op->dev, "snd_soc_register_card() failed: %d\n", ret);
2777 ++ platform_device_del(pdata->codec_device);
2778 ++ platform_device_put(pdata->codec_device);
2779 ++ }
2780 +
2781 + platform_set_drvdata(op, pdata);
2782 +-
2783 + return ret;
2784 ++
2785 + }
2786 +
2787 + static int pcm030_fabric_remove(struct platform_device *op)
2788 +diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
2789 +index 95fc24580f85f..ef8fd331526b0 100644
2790 +--- a/sound/soc/soc-ops.c
2791 ++++ b/sound/soc/soc-ops.c
2792 +@@ -322,13 +322,27 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol,
2793 + if (sign_bit)
2794 + mask = BIT(sign_bit + 1) - 1;
2795 +
2796 +- val = ((ucontrol->value.integer.value[0] + min) & mask);
2797 ++ val = ucontrol->value.integer.value[0];
2798 ++ if (mc->platform_max && val > mc->platform_max)
2799 ++ return -EINVAL;
2800 ++ if (val > max - min)
2801 ++ return -EINVAL;
2802 ++ if (val < 0)
2803 ++ return -EINVAL;
2804 ++ val = (val + min) & mask;
2805 + if (invert)
2806 + val = max - val;
2807 + val_mask = mask << shift;
2808 + val = val << shift;
2809 + if (snd_soc_volsw_is_stereo(mc)) {
2810 +- val2 = ((ucontrol->value.integer.value[1] + min) & mask);
2811 ++ val2 = ucontrol->value.integer.value[1];
2812 ++ if (mc->platform_max && val2 > mc->platform_max)
2813 ++ return -EINVAL;
2814 ++ if (val2 > max - min)
2815 ++ return -EINVAL;
2816 ++ if (val2 < 0)
2817 ++ return -EINVAL;
2818 ++ val2 = (val2 + min) & mask;
2819 + if (invert)
2820 + val2 = max - val2;
2821 + if (reg == reg2) {
2822 +@@ -422,8 +436,15 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
2823 + int err = 0;
2824 + unsigned int val, val_mask, val2 = 0;
2825 +
2826 ++ val = ucontrol->value.integer.value[0];
2827 ++ if (mc->platform_max && val > mc->platform_max)
2828 ++ return -EINVAL;
2829 ++ if (val > max - min)
2830 ++ return -EINVAL;
2831 ++ if (val < 0)
2832 ++ return -EINVAL;
2833 + val_mask = mask << shift;
2834 +- val = (ucontrol->value.integer.value[0] + min) & mask;
2835 ++ val = (val + min) & mask;
2836 + val = val << shift;
2837 +
2838 + err = snd_soc_component_update_bits(component, reg, val_mask, val);
2839 +@@ -889,6 +910,8 @@ int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol,
2840 + unsigned int i, regval, regmask;
2841 + int err;
2842 +
2843 ++ if (val < mc->min || val > mc->max)
2844 ++ return -EINVAL;
2845 + if (invert)
2846 + val = max - val;
2847 + val &= mask;
2848 +diff --git a/tools/testing/selftests/futex/Makefile b/tools/testing/selftests/futex/Makefile
2849 +index 12631f0076a10..11e157d7533b8 100644
2850 +--- a/tools/testing/selftests/futex/Makefile
2851 ++++ b/tools/testing/selftests/futex/Makefile
2852 +@@ -11,7 +11,7 @@ all:
2853 + @for DIR in $(SUBDIRS); do \
2854 + BUILD_TARGET=$(OUTPUT)/$$DIR; \
2855 + mkdir $$BUILD_TARGET -p; \
2856 +- make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
2857 ++ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
2858 + if [ -e $$DIR/$(TEST_PROGS) ]; then \
2859 + rsync -a $$DIR/$(TEST_PROGS) $$BUILD_TARGET/; \
2860 + fi \
2861 +@@ -32,6 +32,6 @@ override define CLEAN
2862 + @for DIR in $(SUBDIRS); do \
2863 + BUILD_TARGET=$(OUTPUT)/$$DIR; \
2864 + mkdir $$BUILD_TARGET -p; \
2865 +- make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
2866 ++ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
2867 + done
2868 + endef