Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Tue, 01 Feb 2022 17:24:26
Message-Id: 1643736252.998225b203b8f9ea94fc543f621982c1fb5c268b.mpagano@gentoo
1 commit: 998225b203b8f9ea94fc543f621982c1fb5c268b
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Feb 1 17:24:12 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Feb 1 17:24:12 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=998225b2
7
8 Linux patch 5.4.176
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1175_linux-5.4.176.patch | 2235 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2239 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 2c90c35b..347c7e70 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -743,6 +743,10 @@ Patch: 1174_linux-5.4.175.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.175
23
24 +Patch: 1175_linux-5.4.176.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.176
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1175_linux-5.4.176.patch b/1175_linux-5.4.176.patch
33 new file mode 100644
34 index 00000000..1128710c
35 --- /dev/null
36 +++ b/1175_linux-5.4.176.patch
37 @@ -0,0 +1,2235 @@
38 +diff --git a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
39 +index 9cb3560756d00..53c26ffd020a3 100644
40 +--- a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
41 ++++ b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
42 +@@ -31,7 +31,7 @@ tcan4x5x: tcan4x5x@0 {
43 + #address-cells = <1>;
44 + #size-cells = <1>;
45 + spi-max-frequency = <10000000>;
46 +- bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>;
47 ++ bosch,mram-cfg = <0x0 0 0 16 0 0 1 1>;
48 + interrupt-parent = <&gpio1>;
49 + interrupts = <14 IRQ_TYPE_LEVEL_LOW>;
50 + device-state-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
51 +diff --git a/Makefile b/Makefile
52 +index 2f6c51097d003..b23aa51ada93e 100644
53 +--- a/Makefile
54 ++++ b/Makefile
55 +@@ -1,7 +1,7 @@
56 + # SPDX-License-Identifier: GPL-2.0
57 + VERSION = 5
58 + PATCHLEVEL = 4
59 +-SUBLEVEL = 175
60 ++SUBLEVEL = 176
61 + EXTRAVERSION =
62 + NAME = Kleptomaniac Octopus
63 +
64 +diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
65 +index f61ef46ebff74..d07fbc21f14ce 100644
66 +--- a/arch/arm64/kernel/process.c
67 ++++ b/arch/arm64/kernel/process.c
68 +@@ -500,34 +500,26 @@ static void entry_task_switch(struct task_struct *next)
69 +
70 + /*
71 + * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT.
72 +- * Assuming the virtual counter is enabled at the beginning of times:
73 +- *
74 +- * - disable access when switching from a 64bit task to a 32bit task
75 +- * - enable access when switching from a 32bit task to a 64bit task
76 ++ * Ensure access is disabled when switching to a 32bit task, ensure
77 ++ * access is enabled when switching to a 64bit task.
78 + */
79 +-static void erratum_1418040_thread_switch(struct task_struct *prev,
80 +- struct task_struct *next)
81 ++static void erratum_1418040_thread_switch(struct task_struct *next)
82 + {
83 +- bool prev32, next32;
84 +- u64 val;
85 +-
86 +- if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040))
87 +- return;
88 +-
89 +- prev32 = is_compat_thread(task_thread_info(prev));
90 +- next32 = is_compat_thread(task_thread_info(next));
91 +-
92 +- if (prev32 == next32 || !this_cpu_has_cap(ARM64_WORKAROUND_1418040))
93 ++ if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) ||
94 ++ !this_cpu_has_cap(ARM64_WORKAROUND_1418040))
95 + return;
96 +
97 +- val = read_sysreg(cntkctl_el1);
98 +-
99 +- if (!next32)
100 +- val |= ARCH_TIMER_USR_VCT_ACCESS_EN;
101 ++ if (is_compat_thread(task_thread_info(next)))
102 ++ sysreg_clear_set(cntkctl_el1, ARCH_TIMER_USR_VCT_ACCESS_EN, 0);
103 + else
104 +- val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN;
105 ++ sysreg_clear_set(cntkctl_el1, 0, ARCH_TIMER_USR_VCT_ACCESS_EN);
106 ++}
107 +
108 +- write_sysreg(val, cntkctl_el1);
109 ++static void erratum_1418040_new_exec(void)
110 ++{
111 ++ preempt_disable();
112 ++ erratum_1418040_thread_switch(current);
113 ++ preempt_enable();
114 + }
115 +
116 + /*
117 +@@ -546,7 +538,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
118 + uao_thread_switch(next);
119 + ptrauth_thread_switch(next);
120 + ssbs_thread_switch(next);
121 +- erratum_1418040_thread_switch(prev, next);
122 ++ erratum_1418040_thread_switch(next);
123 +
124 + /*
125 + * Complete any pending TLB or cache maintenance on this CPU in case
126 +@@ -605,6 +597,7 @@ void arch_setup_new_exec(void)
127 + current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
128 +
129 + ptrauth_thread_init_user(current);
130 ++ erratum_1418040_new_exec();
131 + }
132 +
133 + #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
134 +diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
135 +index afbd47b0a75cc..5819a577d267a 100644
136 +--- a/arch/powerpc/kernel/Makefile
137 ++++ b/arch/powerpc/kernel/Makefile
138 +@@ -13,6 +13,7 @@ CFLAGS_prom_init.o += -fPIC
139 + CFLAGS_btext.o += -fPIC
140 + endif
141 +
142 ++CFLAGS_early_32.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
143 + CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
144 + CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
145 + CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
146 +diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
147 +index b8de3be10eb47..8656b8d2ce555 100644
148 +--- a/arch/powerpc/lib/Makefile
149 ++++ b/arch/powerpc/lib/Makefile
150 +@@ -16,6 +16,9 @@ CFLAGS_code-patching.o += -DDISABLE_BRANCH_PROFILING
151 + CFLAGS_feature-fixups.o += -DDISABLE_BRANCH_PROFILING
152 + endif
153 +
154 ++CFLAGS_code-patching.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
155 ++CFLAGS_feature-fixups.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
156 ++
157 + obj-y += alloc.o code-patching.o feature-fixups.o pmem.o
158 +
159 + ifndef CONFIG_KASAN
160 +diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
161 +index e1fcc03159ef2..a927adccb4ba7 100644
162 +--- a/arch/s390/hypfs/hypfs_vm.c
163 ++++ b/arch/s390/hypfs/hypfs_vm.c
164 +@@ -20,6 +20,7 @@
165 +
166 + static char local_guest[] = " ";
167 + static char all_guests[] = "* ";
168 ++static char *all_groups = all_guests;
169 + static char *guest_query;
170 +
171 + struct diag2fc_data {
172 +@@ -62,10 +63,11 @@ static int diag2fc(int size, char* query, void *addr)
173 +
174 + memcpy(parm_list.userid, query, NAME_LEN);
175 + ASCEBC(parm_list.userid, NAME_LEN);
176 +- parm_list.addr = (unsigned long) addr ;
177 ++ memcpy(parm_list.aci_grp, all_groups, NAME_LEN);
178 ++ ASCEBC(parm_list.aci_grp, NAME_LEN);
179 ++ parm_list.addr = (unsigned long)addr;
180 + parm_list.size = size;
181 + parm_list.fmt = 0x02;
182 +- memset(parm_list.aci_grp, 0x40, NAME_LEN);
183 + rc = -1;
184 +
185 + diag_stat_inc(DIAG_STAT_X2FC);
186 +diff --git a/block/bio.c b/block/bio.c
187 +index cb38d6f3acceb..1c52d0196e15c 100644
188 +--- a/block/bio.c
189 ++++ b/block/bio.c
190 +@@ -569,7 +569,8 @@ void bio_truncate(struct bio *bio, unsigned new_size)
191 + offset = new_size - done;
192 + else
193 + offset = 0;
194 +- zero_user(bv.bv_page, offset, bv.bv_len - offset);
195 ++ zero_user(bv.bv_page, bv.bv_offset + offset,
196 ++ bv.bv_len - offset);
197 + truncated = true;
198 + }
199 + done += bv.bv_len;
200 +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
201 +index 7d155938e2916..9baf5af919e1e 100644
202 +--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
203 ++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
204 +@@ -471,8 +471,8 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
205 + return -EINVAL;
206 + }
207 +
208 +- if (args->stream_size > SZ_64K || args->nr_relocs > SZ_64K ||
209 +- args->nr_bos > SZ_64K || args->nr_pmrs > 128) {
210 ++ if (args->stream_size > SZ_128K || args->nr_relocs > SZ_128K ||
211 ++ args->nr_bos > SZ_128K || args->nr_pmrs > 128) {
212 + DRM_ERROR("submit arguments out of size limits\n");
213 + return -EINVAL;
214 + }
215 +diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
216 +index a11b98e990019..16194971a99f9 100644
217 +--- a/drivers/gpu/drm/msm/dsi/dsi.c
218 ++++ b/drivers/gpu/drm/msm/dsi/dsi.c
219 +@@ -33,7 +33,12 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
220 +
221 + of_node_put(phy_node);
222 +
223 +- if (!phy_pdev || !msm_dsi->phy) {
224 ++ if (!phy_pdev) {
225 ++ DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__);
226 ++ return -EPROBE_DEFER;
227 ++ }
228 ++ if (!msm_dsi->phy) {
229 ++ put_device(&phy_pdev->dev);
230 + DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__);
231 + return -EPROBE_DEFER;
232 + }
233 +diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
234 +index 21519229fe73a..60d50643d0b5c 100644
235 +--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
236 ++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
237 +@@ -665,12 +665,14 @@ void __exit msm_dsi_phy_driver_unregister(void)
238 + int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
239 + struct msm_dsi_phy_clk_request *clk_req)
240 + {
241 +- struct device *dev = &phy->pdev->dev;
242 ++ struct device *dev;
243 + int ret;
244 +
245 + if (!phy || !phy->cfg->ops.enable)
246 + return -EINVAL;
247 +
248 ++ dev = &phy->pdev->dev;
249 ++
250 + ret = dsi_phy_enable_resource(phy);
251 + if (ret) {
252 + DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n",
253 +diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
254 +index 355afb936401a..1a7e77373407f 100644
255 +--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
256 ++++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
257 +@@ -97,10 +97,15 @@ static int msm_hdmi_get_phy(struct hdmi *hdmi)
258 +
259 + of_node_put(phy_node);
260 +
261 +- if (!phy_pdev || !hdmi->phy) {
262 ++ if (!phy_pdev) {
263 + DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n");
264 + return -EPROBE_DEFER;
265 + }
266 ++ if (!hdmi->phy) {
267 ++ DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n");
268 ++ put_device(&phy_pdev->dev);
269 ++ return -EPROBE_DEFER;
270 ++ }
271 +
272 + hdmi->phy_dev = get_device(&phy_pdev->dev);
273 +
274 +diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
275 +index 7443df77cadb5..407b51cf67909 100644
276 +--- a/drivers/gpu/drm/msm/msm_drv.c
277 ++++ b/drivers/gpu/drm/msm/msm_drv.c
278 +@@ -337,7 +337,7 @@ static int msm_init_vram(struct drm_device *dev)
279 + of_node_put(node);
280 + if (ret)
281 + return ret;
282 +- size = r.end - r.start;
283 ++ size = r.end - r.start + 1;
284 + DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
285 +
286 + /* if we have no IOMMU, then we need to use carveout allocator.
287 +diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
288 +index 9abb4507f572b..b62763a85d6e4 100644
289 +--- a/drivers/hwmon/lm90.c
290 ++++ b/drivers/hwmon/lm90.c
291 +@@ -373,7 +373,7 @@ static const struct lm90_params lm90_params[] = {
292 + .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
293 + | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT,
294 + .alert_alarms = 0x7c,
295 +- .max_convrate = 8,
296 ++ .max_convrate = 7,
297 + },
298 + [lm86] = {
299 + .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
300 +@@ -394,12 +394,13 @@ static const struct lm90_params lm90_params[] = {
301 + .max_convrate = 9,
302 + },
303 + [max6646] = {
304 +- .flags = LM90_HAVE_CRIT,
305 ++ .flags = LM90_HAVE_CRIT | LM90_HAVE_BROKEN_ALERT,
306 + .alert_alarms = 0x7c,
307 + .max_convrate = 6,
308 + .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
309 + },
310 + [max6654] = {
311 ++ .flags = LM90_HAVE_BROKEN_ALERT,
312 + .alert_alarms = 0x7c,
313 + .max_convrate = 7,
314 + .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
315 +@@ -418,7 +419,7 @@ static const struct lm90_params lm90_params[] = {
316 + },
317 + [max6680] = {
318 + .flags = LM90_HAVE_OFFSET | LM90_HAVE_CRIT
319 +- | LM90_HAVE_CRIT_ALRM_SWP,
320 ++ | LM90_HAVE_CRIT_ALRM_SWP | LM90_HAVE_BROKEN_ALERT,
321 + .alert_alarms = 0x7c,
322 + .max_convrate = 7,
323 + },
324 +diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c
325 +index 8b90def6686fb..a5eb0a1f559c7 100644
326 +--- a/drivers/mtd/nand/raw/mpc5121_nfc.c
327 ++++ b/drivers/mtd/nand/raw/mpc5121_nfc.c
328 +@@ -290,7 +290,6 @@ static int ads5121_chipselect_init(struct mtd_info *mtd)
329 + /* Control chips select signal on ADS5121 board */
330 + static void ads5121_select_chip(struct nand_chip *nand, int chip)
331 + {
332 +- struct mtd_info *mtd = nand_to_mtd(nand);
333 + struct mpc5121_nfc_prv *prv = nand_get_controller_data(nand);
334 + u8 v;
335 +
336 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
337 +index ce6a4e1965e1d..403c1b9cf6ab8 100644
338 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
339 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
340 +@@ -1970,8 +1970,7 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
341 + break;
342 + }
343 +
344 +- if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
345 +- hclgevf_enable_vector(&hdev->misc_vector, true);
346 ++ hclgevf_enable_vector(&hdev->misc_vector, true);
347 +
348 + return IRQ_HANDLED;
349 + }
350 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
351 +index 9adfc0a7ab823..26d49dcdbeb3e 100644
352 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
353 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
354 +@@ -3258,11 +3258,25 @@ static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
355 + struct device *dev = &adapter->vdev->dev;
356 + union ibmvnic_crq crq;
357 + int max_entries;
358 ++ int cap_reqs;
359 ++
360 ++ /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on
361 ++ * the PROMISC flag). Initialize this count upfront. When the tasklet
362 ++ * receives a response to all of these, it will send the next protocol
363 ++ * message (QUERY_IP_OFFLOAD).
364 ++ */
365 ++ if (!(adapter->netdev->flags & IFF_PROMISC) ||
366 ++ adapter->promisc_supported)
367 ++ cap_reqs = 7;
368 ++ else
369 ++ cap_reqs = 6;
370 +
371 + if (!retry) {
372 + /* Sub-CRQ entries are 32 byte long */
373 + int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
374 +
375 ++ atomic_set(&adapter->running_cap_crqs, cap_reqs);
376 ++
377 + if (adapter->min_tx_entries_per_subcrq > entries_page ||
378 + adapter->min_rx_add_entries_per_subcrq > entries_page) {
379 + dev_err(dev, "Fatal, invalid entries per sub-crq\n");
380 +@@ -3323,44 +3337,45 @@ static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
381 + adapter->opt_rx_comp_queues;
382 +
383 + adapter->req_rx_add_queues = adapter->max_rx_add_queues;
384 ++ } else {
385 ++ atomic_add(cap_reqs, &adapter->running_cap_crqs);
386 + }
387 +-
388 + memset(&crq, 0, sizeof(crq));
389 + crq.request_capability.first = IBMVNIC_CRQ_CMD;
390 + crq.request_capability.cmd = REQUEST_CAPABILITY;
391 +
392 + crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
393 + crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
394 +- atomic_inc(&adapter->running_cap_crqs);
395 ++ cap_reqs--;
396 + ibmvnic_send_crq(adapter, &crq);
397 +
398 + crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
399 + crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
400 +- atomic_inc(&adapter->running_cap_crqs);
401 ++ cap_reqs--;
402 + ibmvnic_send_crq(adapter, &crq);
403 +
404 + crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
405 + crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
406 +- atomic_inc(&adapter->running_cap_crqs);
407 ++ cap_reqs--;
408 + ibmvnic_send_crq(adapter, &crq);
409 +
410 + crq.request_capability.capability =
411 + cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
412 + crq.request_capability.number =
413 + cpu_to_be64(adapter->req_tx_entries_per_subcrq);
414 +- atomic_inc(&adapter->running_cap_crqs);
415 ++ cap_reqs--;
416 + ibmvnic_send_crq(adapter, &crq);
417 +
418 + crq.request_capability.capability =
419 + cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
420 + crq.request_capability.number =
421 + cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
422 +- atomic_inc(&adapter->running_cap_crqs);
423 ++ cap_reqs--;
424 + ibmvnic_send_crq(adapter, &crq);
425 +
426 + crq.request_capability.capability = cpu_to_be16(REQ_MTU);
427 + crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
428 +- atomic_inc(&adapter->running_cap_crqs);
429 ++ cap_reqs--;
430 + ibmvnic_send_crq(adapter, &crq);
431 +
432 + if (adapter->netdev->flags & IFF_PROMISC) {
433 +@@ -3368,16 +3383,21 @@ static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
434 + crq.request_capability.capability =
435 + cpu_to_be16(PROMISC_REQUESTED);
436 + crq.request_capability.number = cpu_to_be64(1);
437 +- atomic_inc(&adapter->running_cap_crqs);
438 ++ cap_reqs--;
439 + ibmvnic_send_crq(adapter, &crq);
440 + }
441 + } else {
442 + crq.request_capability.capability =
443 + cpu_to_be16(PROMISC_REQUESTED);
444 + crq.request_capability.number = cpu_to_be64(0);
445 +- atomic_inc(&adapter->running_cap_crqs);
446 ++ cap_reqs--;
447 + ibmvnic_send_crq(adapter, &crq);
448 + }
449 ++
450 ++ /* Keep at end to catch any discrepancy between expected and actual
451 ++ * CRQs sent.
452 ++ */
453 ++ WARN_ON(cap_reqs != 0);
454 + }
455 +
456 + static int pending_scrq(struct ibmvnic_adapter *adapter,
457 +@@ -3782,118 +3802,132 @@ static void send_map_query(struct ibmvnic_adapter *adapter)
458 + static void send_cap_queries(struct ibmvnic_adapter *adapter)
459 + {
460 + union ibmvnic_crq crq;
461 ++ int cap_reqs;
462 ++
463 ++ /* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count
464 ++ * upfront. When the tasklet receives a response to all of these, it
465 ++ * can send out the next protocol messaage (REQUEST_CAPABILITY).
466 ++ */
467 ++ cap_reqs = 25;
468 ++
469 ++ atomic_set(&adapter->running_cap_crqs, cap_reqs);
470 +
471 +- atomic_set(&adapter->running_cap_crqs, 0);
472 + memset(&crq, 0, sizeof(crq));
473 + crq.query_capability.first = IBMVNIC_CRQ_CMD;
474 + crq.query_capability.cmd = QUERY_CAPABILITY;
475 +
476 + crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
477 +- atomic_inc(&adapter->running_cap_crqs);
478 + ibmvnic_send_crq(adapter, &crq);
479 ++ cap_reqs--;
480 +
481 + crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
482 +- atomic_inc(&adapter->running_cap_crqs);
483 + ibmvnic_send_crq(adapter, &crq);
484 ++ cap_reqs--;
485 +
486 + crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
487 +- atomic_inc(&adapter->running_cap_crqs);
488 + ibmvnic_send_crq(adapter, &crq);
489 ++ cap_reqs--;
490 +
491 + crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
492 +- atomic_inc(&adapter->running_cap_crqs);
493 + ibmvnic_send_crq(adapter, &crq);
494 ++ cap_reqs--;
495 +
496 + crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
497 +- atomic_inc(&adapter->running_cap_crqs);
498 + ibmvnic_send_crq(adapter, &crq);
499 ++ cap_reqs--;
500 +
501 + crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
502 +- atomic_inc(&adapter->running_cap_crqs);
503 + ibmvnic_send_crq(adapter, &crq);
504 ++ cap_reqs--;
505 +
506 + crq.query_capability.capability =
507 + cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
508 +- atomic_inc(&adapter->running_cap_crqs);
509 + ibmvnic_send_crq(adapter, &crq);
510 ++ cap_reqs--;
511 +
512 + crq.query_capability.capability =
513 + cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
514 +- atomic_inc(&adapter->running_cap_crqs);
515 + ibmvnic_send_crq(adapter, &crq);
516 ++ cap_reqs--;
517 +
518 + crq.query_capability.capability =
519 + cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
520 +- atomic_inc(&adapter->running_cap_crqs);
521 + ibmvnic_send_crq(adapter, &crq);
522 ++ cap_reqs--;
523 +
524 + crq.query_capability.capability =
525 + cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
526 +- atomic_inc(&adapter->running_cap_crqs);
527 + ibmvnic_send_crq(adapter, &crq);
528 ++ cap_reqs--;
529 +
530 + crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
531 +- atomic_inc(&adapter->running_cap_crqs);
532 + ibmvnic_send_crq(adapter, &crq);
533 ++ cap_reqs--;
534 +
535 + crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
536 +- atomic_inc(&adapter->running_cap_crqs);
537 + ibmvnic_send_crq(adapter, &crq);
538 ++ cap_reqs--;
539 +
540 + crq.query_capability.capability = cpu_to_be16(MIN_MTU);
541 +- atomic_inc(&adapter->running_cap_crqs);
542 + ibmvnic_send_crq(adapter, &crq);
543 ++ cap_reqs--;
544 +
545 + crq.query_capability.capability = cpu_to_be16(MAX_MTU);
546 +- atomic_inc(&adapter->running_cap_crqs);
547 + ibmvnic_send_crq(adapter, &crq);
548 ++ cap_reqs--;
549 +
550 + crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
551 +- atomic_inc(&adapter->running_cap_crqs);
552 + ibmvnic_send_crq(adapter, &crq);
553 ++ cap_reqs--;
554 +
555 + crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
556 +- atomic_inc(&adapter->running_cap_crqs);
557 + ibmvnic_send_crq(adapter, &crq);
558 ++ cap_reqs--;
559 +
560 + crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
561 +- atomic_inc(&adapter->running_cap_crqs);
562 + ibmvnic_send_crq(adapter, &crq);
563 ++ cap_reqs--;
564 +
565 + crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
566 +- atomic_inc(&adapter->running_cap_crqs);
567 + ibmvnic_send_crq(adapter, &crq);
568 ++ cap_reqs--;
569 +
570 + crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
571 +- atomic_inc(&adapter->running_cap_crqs);
572 + ibmvnic_send_crq(adapter, &crq);
573 ++ cap_reqs--;
574 +
575 + crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
576 +- atomic_inc(&adapter->running_cap_crqs);
577 + ibmvnic_send_crq(adapter, &crq);
578 ++ cap_reqs--;
579 +
580 + crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
581 +- atomic_inc(&adapter->running_cap_crqs);
582 + ibmvnic_send_crq(adapter, &crq);
583 ++ cap_reqs--;
584 +
585 + crq.query_capability.capability =
586 + cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
587 +- atomic_inc(&adapter->running_cap_crqs);
588 + ibmvnic_send_crq(adapter, &crq);
589 ++ cap_reqs--;
590 +
591 + crq.query_capability.capability =
592 + cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
593 +- atomic_inc(&adapter->running_cap_crqs);
594 + ibmvnic_send_crq(adapter, &crq);
595 ++ cap_reqs--;
596 +
597 + crq.query_capability.capability =
598 + cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
599 +- atomic_inc(&adapter->running_cap_crqs);
600 + ibmvnic_send_crq(adapter, &crq);
601 ++ cap_reqs--;
602 +
603 + crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
604 +- atomic_inc(&adapter->running_cap_crqs);
605 ++
606 + ibmvnic_send_crq(adapter, &crq);
607 ++ cap_reqs--;
608 ++
609 ++ /* Keep at end to catch any discrepancy between expected and actual
610 ++ * CRQs sent.
611 ++ */
612 ++ WARN_ON(cap_reqs != 0);
613 + }
614 +
615 + static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
616 +@@ -4160,6 +4194,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
617 + char *name;
618 +
619 + atomic_dec(&adapter->running_cap_crqs);
620 ++ netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n",
621 ++ atomic_read(&adapter->running_cap_crqs));
622 + switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
623 + case REQ_TX_QUEUES:
624 + req_value = &adapter->req_tx_queues;
625 +@@ -4787,12 +4823,6 @@ static void ibmvnic_tasklet(void *data)
626 + ibmvnic_handle_crq(crq, adapter);
627 + crq->generic.first = 0;
628 + }
629 +-
630 +- /* remain in tasklet until all
631 +- * capabilities responses are received
632 +- */
633 +- if (!adapter->wait_capability)
634 +- done = true;
635 + }
636 + /* if capabilities CRQ's were sent in this tasklet, the following
637 + * tasklet must wait until all responses are received
638 +diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
639 +index f8422dbfd54e6..4c8c31692e9e0 100644
640 +--- a/drivers/net/ethernet/intel/i40e/i40e.h
641 ++++ b/drivers/net/ethernet/intel/i40e/i40e.h
642 +@@ -182,7 +182,6 @@ enum i40e_interrupt_policy {
643 +
644 + struct i40e_lump_tracking {
645 + u16 num_entries;
646 +- u16 search_hint;
647 + u16 list[0];
648 + #define I40E_PILE_VALID_BIT 0x8000
649 + #define I40E_IWARP_IRQ_PILE_ID (I40E_PILE_VALID_BIT - 2)
650 +@@ -757,12 +756,12 @@ struct i40e_vsi {
651 + struct rtnl_link_stats64 net_stats_offsets;
652 + struct i40e_eth_stats eth_stats;
653 + struct i40e_eth_stats eth_stats_offsets;
654 +- u32 tx_restart;
655 +- u32 tx_busy;
656 ++ u64 tx_restart;
657 ++ u64 tx_busy;
658 + u64 tx_linearize;
659 + u64 tx_force_wb;
660 +- u32 rx_buf_failed;
661 +- u32 rx_page_failed;
662 ++ u64 rx_buf_failed;
663 ++ u64 rx_page_failed;
664 +
665 + /* These are containers of ring pointers, allocated at run-time */
666 + struct i40e_ring **rx_rings;
667 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
668 +index 99ea543dd2453..276f04c0e51d6 100644
669 +--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
670 ++++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
671 +@@ -234,7 +234,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
672 + (unsigned long int)vsi->net_stats_offsets.rx_compressed,
673 + (unsigned long int)vsi->net_stats_offsets.tx_compressed);
674 + dev_info(&pf->pdev->dev,
675 +- " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
676 ++ " tx_restart = %llu, tx_busy = %llu, rx_buf_failed = %llu, rx_page_failed = %llu\n",
677 + vsi->tx_restart, vsi->tx_busy,
678 + vsi->rx_buf_failed, vsi->rx_page_failed);
679 + rcu_read_lock();
680 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
681 +index a2326683be170..a6ae4b7b11afd 100644
682 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
683 ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
684 +@@ -204,10 +204,6 @@ int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
685 + * @id: an owner id to stick on the items assigned
686 + *
687 + * Returns the base item index of the lump, or negative for error
688 +- *
689 +- * The search_hint trick and lack of advanced fit-finding only work
690 +- * because we're highly likely to have all the same size lump requests.
691 +- * Linear search time and any fragmentation should be minimal.
692 + **/
693 + static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
694 + u16 needed, u16 id)
695 +@@ -222,8 +218,21 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
696 + return -EINVAL;
697 + }
698 +
699 +- /* start the linear search with an imperfect hint */
700 +- i = pile->search_hint;
701 ++ /* Allocate last queue in the pile for FDIR VSI queue
702 ++ * so it doesn't fragment the qp_pile
703 ++ */
704 ++ if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) {
705 ++ if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) {
706 ++ dev_err(&pf->pdev->dev,
707 ++ "Cannot allocate queue %d for I40E_VSI_FDIR\n",
708 ++ pile->num_entries - 1);
709 ++ return -ENOMEM;
710 ++ }
711 ++ pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT;
712 ++ return pile->num_entries - 1;
713 ++ }
714 ++
715 ++ i = 0;
716 + while (i < pile->num_entries) {
717 + /* skip already allocated entries */
718 + if (pile->list[i] & I40E_PILE_VALID_BIT) {
719 +@@ -242,7 +251,6 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
720 + for (j = 0; j < needed; j++)
721 + pile->list[i+j] = id | I40E_PILE_VALID_BIT;
722 + ret = i;
723 +- pile->search_hint = i + j;
724 + break;
725 + }
726 +
727 +@@ -265,7 +273,7 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
728 + {
729 + int valid_id = (id | I40E_PILE_VALID_BIT);
730 + int count = 0;
731 +- int i;
732 ++ u16 i;
733 +
734 + if (!pile || index >= pile->num_entries)
735 + return -EINVAL;
736 +@@ -277,8 +285,6 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
737 + count++;
738 + }
739 +
740 +- if (count && index < pile->search_hint)
741 +- pile->search_hint = index;
742 +
743 + return count;
744 + }
745 +@@ -798,9 +804,9 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
746 + struct rtnl_link_stats64 *ns; /* netdev stats */
747 + struct i40e_eth_stats *oes;
748 + struct i40e_eth_stats *es; /* device's eth stats */
749 +- u32 tx_restart, tx_busy;
750 ++ u64 tx_restart, tx_busy;
751 + struct i40e_ring *p;
752 +- u32 rx_page, rx_buf;
753 ++ u64 rx_page, rx_buf;
754 + u64 bytes, packets;
755 + unsigned int start;
756 + u64 tx_linearize;
757 +@@ -10084,15 +10090,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
758 + }
759 + i40e_get_oem_version(&pf->hw);
760 +
761 +- if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
762 +- ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
763 +- hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
764 +- /* The following delay is necessary for 4.33 firmware and older
765 +- * to recover after EMP reset. 200 ms should suffice but we
766 +- * put here 300 ms to be sure that FW is ready to operate
767 +- * after reset.
768 +- */
769 +- mdelay(300);
770 ++ if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) {
771 ++ /* The following delay is necessary for firmware update. */
772 ++ mdelay(1000);
773 + }
774 +
775 + /* re-verify the eeprom if we just had an EMP reset */
776 +@@ -11388,7 +11388,6 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
777 + return -ENOMEM;
778 +
779 + pf->irq_pile->num_entries = vectors;
780 +- pf->irq_pile->search_hint = 0;
781 +
782 + /* track first vector for misc interrupts, ignore return */
783 + (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
784 +@@ -12139,7 +12138,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
785 + goto sw_init_done;
786 + }
787 + pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
788 +- pf->qp_pile->search_hint = 0;
789 +
790 + pf->tx_timeout_recovery_level = 1;
791 +
792 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
793 +index cd6f5bd982559..4962e6193eeca 100644
794 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
795 ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
796 +@@ -2485,6 +2485,59 @@ error_param:
797 + aq_ret);
798 + }
799 +
800 ++/**
801 ++ * i40e_check_enough_queue - find big enough queue number
802 ++ * @vf: pointer to the VF info
803 ++ * @needed: the number of items needed
804 ++ *
805 ++ * Returns the base item index of the queue, or negative for error
806 ++ **/
807 ++static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed)
808 ++{
809 ++ unsigned int i, cur_queues, more, pool_size;
810 ++ struct i40e_lump_tracking *pile;
811 ++ struct i40e_pf *pf = vf->pf;
812 ++ struct i40e_vsi *vsi;
813 ++
814 ++ vsi = pf->vsi[vf->lan_vsi_idx];
815 ++ cur_queues = vsi->alloc_queue_pairs;
816 ++
817 ++ /* if current allocated queues are enough for need */
818 ++ if (cur_queues >= needed)
819 ++ return vsi->base_queue;
820 ++
821 ++ pile = pf->qp_pile;
822 ++ if (cur_queues > 0) {
823 ++ /* if the allocated queues are not zero
824 ++ * just check if there are enough queues for more
825 ++ * behind the allocated queues.
826 ++ */
827 ++ more = needed - cur_queues;
828 ++ for (i = vsi->base_queue + cur_queues;
829 ++ i < pile->num_entries; i++) {
830 ++ if (pile->list[i] & I40E_PILE_VALID_BIT)
831 ++ break;
832 ++
833 ++ if (more-- == 1)
834 ++ /* there is enough */
835 ++ return vsi->base_queue;
836 ++ }
837 ++ }
838 ++
839 ++ pool_size = 0;
840 ++ for (i = 0; i < pile->num_entries; i++) {
841 ++ if (pile->list[i] & I40E_PILE_VALID_BIT) {
842 ++ pool_size = 0;
843 ++ continue;
844 ++ }
845 ++ if (needed <= ++pool_size)
846 ++ /* there is enough */
847 ++ return i;
848 ++ }
849 ++
850 ++ return -ENOMEM;
851 ++}
852 ++
853 + /**
854 + * i40e_vc_request_queues_msg
855 + * @vf: pointer to the VF info
856 +@@ -2519,6 +2572,12 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
857 + req_pairs - cur_pairs,
858 + pf->queues_left);
859 + vfres->num_queue_pairs = pf->queues_left + cur_pairs;
860 ++ } else if (i40e_check_enough_queue(vf, req_pairs) < 0) {
861 ++ dev_warn(&pf->pdev->dev,
862 ++ "VF %d requested %d more queues, but there is not enough for it.\n",
863 ++ vf->vf_id,
864 ++ req_pairs - cur_pairs);
865 ++ vfres->num_queue_pairs = cur_pairs;
866 + } else {
867 + /* successful request */
868 + vf->num_req_queues = req_pairs;
869 +diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
870 +index 5ab53e9942f30..5d30b3e1806ab 100644
871 +--- a/drivers/net/hamradio/yam.c
872 ++++ b/drivers/net/hamradio/yam.c
873 +@@ -951,9 +951,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
874 + sizeof(struct yamdrv_ioctl_mcs));
875 + if (IS_ERR(ym))
876 + return PTR_ERR(ym);
877 +- if (ym->cmd != SIOCYAMSMCS)
878 +- return -EINVAL;
879 +- if (ym->bitrate > YAM_MAXBITRATE) {
880 ++ if (ym->cmd != SIOCYAMSMCS || ym->bitrate > YAM_MAXBITRATE) {
881 + kfree(ym);
882 + return -EINVAL;
883 + }
884 +diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
885 +index 5e956089bf525..c23fec34b50e9 100644
886 +--- a/drivers/net/phy/broadcom.c
887 ++++ b/drivers/net/phy/broadcom.c
888 +@@ -646,6 +646,7 @@ static struct phy_driver broadcom_drivers[] = {
889 + .phy_id_mask = 0xfffffff0,
890 + .name = "Broadcom BCM54616S",
891 + /* PHY_GBIT_FEATURES */
892 ++ .soft_reset = genphy_soft_reset,
893 + .config_init = bcm54xx_config_init,
894 + .config_aneg = bcm54616s_config_aneg,
895 + .ack_interrupt = bcm_phy_ack_intr,
896 +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
897 +index 35ade5d21de51..78b918dcd5472 100644
898 +--- a/drivers/net/phy/phy_device.c
899 ++++ b/drivers/net/phy/phy_device.c
900 +@@ -1433,6 +1433,9 @@ void phy_detach(struct phy_device *phydev)
901 + phy_driver_is_genphy_10g(phydev))
902 + device_release_driver(&phydev->mdio.dev);
903 +
904 ++ /* Assert the reset signal */
905 ++ phy_device_reset(phydev, 1);
906 ++
907 + /*
908 + * The phydev might go away on the put_device() below, so avoid
909 + * a use-after-free bug by reading the underlying bus first.
910 +@@ -1444,9 +1447,6 @@ void phy_detach(struct phy_device *phydev)
911 + ndev_owner = dev->dev.parent->driver->owner;
912 + if (ndev_owner != bus->owner)
913 + module_put(bus->owner);
914 +-
915 +- /* Assert the reset signal */
916 +- phy_device_reset(phydev, 1);
917 + }
918 + EXPORT_SYMBOL(phy_detach);
919 +
920 +diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
921 +index 7be43a1eaefda..5b2bf75269033 100644
922 +--- a/drivers/net/phy/phylink.c
923 ++++ b/drivers/net/phy/phylink.c
924 +@@ -582,6 +582,11 @@ static int phylink_register_sfp(struct phylink *pl,
925 + return ret;
926 + }
927 +
928 ++ if (!fwnode_device_is_available(ref.fwnode)) {
929 ++ fwnode_handle_put(ref.fwnode);
930 ++ return 0;
931 ++ }
932 ++
933 + pl->sfp_bus = sfp_register_upstream(ref.fwnode, pl, &sfp_phylink_ops);
934 + if (!pl->sfp_bus)
935 + return -ENOMEM;
936 +diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
937 +index c655074c07c2e..ac50ed7577651 100644
938 +--- a/drivers/rpmsg/rpmsg_char.c
939 ++++ b/drivers/rpmsg/rpmsg_char.c
940 +@@ -92,7 +92,7 @@ static int rpmsg_eptdev_destroy(struct device *dev, void *data)
941 + /* wake up any blocked readers */
942 + wake_up_interruptible(&eptdev->readq);
943 +
944 +- device_del(&eptdev->dev);
945 ++ cdev_device_del(&eptdev->cdev, &eptdev->dev);
946 + put_device(&eptdev->dev);
947 +
948 + return 0;
949 +@@ -336,7 +336,6 @@ static void rpmsg_eptdev_release_device(struct device *dev)
950 +
951 + ida_simple_remove(&rpmsg_ept_ida, dev->id);
952 + ida_simple_remove(&rpmsg_minor_ida, MINOR(eptdev->dev.devt));
953 +- cdev_del(&eptdev->cdev);
954 + kfree(eptdev);
955 + }
956 +
957 +@@ -381,19 +380,13 @@ static int rpmsg_eptdev_create(struct rpmsg_ctrldev *ctrldev,
958 + dev->id = ret;
959 + dev_set_name(dev, "rpmsg%d", ret);
960 +
961 +- ret = cdev_add(&eptdev->cdev, dev->devt, 1);
962 ++ ret = cdev_device_add(&eptdev->cdev, &eptdev->dev);
963 + if (ret)
964 + goto free_ept_ida;
965 +
966 + /* We can now rely on the release function for cleanup */
967 + dev->release = rpmsg_eptdev_release_device;
968 +
969 +- ret = device_add(dev);
970 +- if (ret) {
971 +- dev_err(dev, "device_add failed: %d\n", ret);
972 +- put_device(dev);
973 +- }
974 +-
975 + return ret;
976 +
977 + free_ept_ida:
978 +@@ -462,7 +455,6 @@ static void rpmsg_ctrldev_release_device(struct device *dev)
979 +
980 + ida_simple_remove(&rpmsg_ctrl_ida, dev->id);
981 + ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
982 +- cdev_del(&ctrldev->cdev);
983 + kfree(ctrldev);
984 + }
985 +
986 +@@ -497,19 +489,13 @@ static int rpmsg_chrdev_probe(struct rpmsg_device *rpdev)
987 + dev->id = ret;
988 + dev_set_name(&ctrldev->dev, "rpmsg_ctrl%d", ret);
989 +
990 +- ret = cdev_add(&ctrldev->cdev, dev->devt, 1);
991 ++ ret = cdev_device_add(&ctrldev->cdev, &ctrldev->dev);
992 + if (ret)
993 + goto free_ctrl_ida;
994 +
995 + /* We can now rely on the release function for cleanup */
996 + dev->release = rpmsg_ctrldev_release_device;
997 +
998 +- ret = device_add(dev);
999 +- if (ret) {
1000 +- dev_err(&rpdev->dev, "device_add failed: %d\n", ret);
1001 +- put_device(dev);
1002 +- }
1003 +-
1004 + dev_set_drvdata(&rpdev->dev, ctrldev);
1005 +
1006 + return ret;
1007 +@@ -535,7 +521,7 @@ static void rpmsg_chrdev_remove(struct rpmsg_device *rpdev)
1008 + if (ret)
1009 + dev_warn(&rpdev->dev, "failed to nuke endpoints: %d\n", ret);
1010 +
1011 +- device_del(&ctrldev->dev);
1012 ++ cdev_device_del(&ctrldev->cdev, &ctrldev->dev);
1013 + put_device(&ctrldev->dev);
1014 + }
1015 +
1016 +diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
1017 +index b018b61bd168e..d4c2c44b863dd 100644
1018 +--- a/drivers/s390/scsi/zfcp_fc.c
1019 ++++ b/drivers/s390/scsi/zfcp_fc.c
1020 +@@ -521,6 +521,8 @@ static void zfcp_fc_adisc_handler(void *data)
1021 + goto out;
1022 + }
1023 +
1024 ++ /* re-init to undo drop from zfcp_fc_adisc() */
1025 ++ port->d_id = ntoh24(adisc_resp->adisc_port_id);
1026 + /* port is good, unblock rport without going through erp */
1027 + zfcp_scsi_schedule_rport_register(port);
1028 + out:
1029 +@@ -534,6 +536,7 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
1030 + struct zfcp_fc_req *fc_req;
1031 + struct zfcp_adapter *adapter = port->adapter;
1032 + struct Scsi_Host *shost = adapter->scsi_host;
1033 ++ u32 d_id;
1034 + int ret;
1035 +
1036 + fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
1037 +@@ -558,7 +561,15 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
1038 + fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
1039 + hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
1040 +
1041 +- ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els,
1042 ++ d_id = port->d_id; /* remember as destination for send els below */
1043 ++ /*
1044 ++ * Force fresh GID_PN lookup on next port recovery.
1045 ++ * Must happen after request setup and before sending request,
1046 ++ * to prevent race with port->d_id re-init in zfcp_fc_adisc_handler().
1047 ++ */
1048 ++ port->d_id = 0;
1049 ++
1050 ++ ret = zfcp_fsf_send_els(adapter, d_id, &fc_req->ct_els,
1051 + ZFCP_FC_CTELS_TMO);
1052 + if (ret)
1053 + kmem_cache_free(zfcp_fc_req_cache, fc_req);
1054 +diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
1055 +index b4bfab5edf8ff..e6c3e7c070aaf 100644
1056 +--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
1057 ++++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
1058 +@@ -80,7 +80,7 @@ static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
1059 + static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba);
1060 + static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
1061 + struct device *parent, int npiv);
1062 +-static void bnx2fc_destroy_work(struct work_struct *work);
1063 ++static void bnx2fc_port_destroy(struct fcoe_port *port);
1064 +
1065 + static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
1066 + static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
1067 +@@ -902,9 +902,6 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
1068 + __bnx2fc_destroy(interface);
1069 + }
1070 + mutex_unlock(&bnx2fc_dev_lock);
1071 +-
1072 +- /* Ensure ALL destroy work has been completed before return */
1073 +- flush_workqueue(bnx2fc_wq);
1074 + return;
1075 +
1076 + default:
1077 +@@ -1211,8 +1208,8 @@ static int bnx2fc_vport_destroy(struct fc_vport *vport)
1078 + mutex_unlock(&n_port->lp_mutex);
1079 + bnx2fc_free_vport(interface->hba, port->lport);
1080 + bnx2fc_port_shutdown(port->lport);
1081 ++ bnx2fc_port_destroy(port);
1082 + bnx2fc_interface_put(interface);
1083 +- queue_work(bnx2fc_wq, &port->destroy_work);
1084 + return 0;
1085 + }
1086 +
1087 +@@ -1521,7 +1518,6 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
1088 + port->lport = lport;
1089 + port->priv = interface;
1090 + port->get_netdev = bnx2fc_netdev;
1091 +- INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
1092 +
1093 + /* Configure fcoe_port */
1094 + rc = bnx2fc_lport_config(lport);
1095 +@@ -1649,8 +1645,8 @@ static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
1096 + bnx2fc_interface_cleanup(interface);
1097 + bnx2fc_stop(interface);
1098 + list_del(&interface->list);
1099 ++ bnx2fc_port_destroy(port);
1100 + bnx2fc_interface_put(interface);
1101 +- queue_work(bnx2fc_wq, &port->destroy_work);
1102 + }
1103 +
1104 + /**
1105 +@@ -1691,15 +1687,12 @@ netdev_err:
1106 + return rc;
1107 + }
1108 +
1109 +-static void bnx2fc_destroy_work(struct work_struct *work)
1110 ++static void bnx2fc_port_destroy(struct fcoe_port *port)
1111 + {
1112 +- struct fcoe_port *port;
1113 + struct fc_lport *lport;
1114 +
1115 +- port = container_of(work, struct fcoe_port, destroy_work);
1116 + lport = port->lport;
1117 +-
1118 +- BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
1119 ++ BNX2FC_HBA_DBG(lport, "Entered %s, destroying lport %p\n", __func__, lport);
1120 +
1121 + bnx2fc_if_destroy(lport);
1122 + }
1123 +@@ -2553,9 +2546,6 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
1124 + __bnx2fc_destroy(interface);
1125 + mutex_unlock(&bnx2fc_dev_lock);
1126 +
1127 +- /* Ensure ALL destroy work has been completed before return */
1128 +- flush_workqueue(bnx2fc_wq);
1129 +-
1130 + bnx2fc_ulp_stop(hba);
1131 + /* unregister cnic device */
1132 + if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
1133 +diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
1134 +index 38eb49ba361f0..3d3d616e58989 100644
1135 +--- a/drivers/tty/n_gsm.c
1136 ++++ b/drivers/tty/n_gsm.c
1137 +@@ -313,6 +313,7 @@ static struct tty_driver *gsm_tty_driver;
1138 + #define GSM1_ESCAPE_BITS 0x20
1139 + #define XON 0x11
1140 + #define XOFF 0x13
1141 ++#define ISO_IEC_646_MASK 0x7F
1142 +
1143 + static const struct tty_port_operations gsm_port_ops;
1144 +
1145 +@@ -531,7 +532,8 @@ static int gsm_stuff_frame(const u8 *input, u8 *output, int len)
1146 + int olen = 0;
1147 + while (len--) {
1148 + if (*input == GSM1_SOF || *input == GSM1_ESCAPE
1149 +- || *input == XON || *input == XOFF) {
1150 ++ || (*input & ISO_IEC_646_MASK) == XON
1151 ++ || (*input & ISO_IEC_646_MASK) == XOFF) {
1152 + *output++ = GSM1_ESCAPE;
1153 + *output++ = *input++ ^ GSM1_ESCAPE_BITS;
1154 + olen++;
1155 +diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
1156 +index 9ba31701a372e..a9b0a84b1e433 100644
1157 +--- a/drivers/tty/serial/8250/8250_of.c
1158 ++++ b/drivers/tty/serial/8250/8250_of.c
1159 +@@ -105,8 +105,17 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
1160 + port->mapsize = resource_size(&resource);
1161 +
1162 + /* Check for shifted address mapping */
1163 +- if (of_property_read_u32(np, "reg-offset", &prop) == 0)
1164 ++ if (of_property_read_u32(np, "reg-offset", &prop) == 0) {
1165 ++ if (prop >= port->mapsize) {
1166 ++ dev_warn(&ofdev->dev, "reg-offset %u exceeds region size %pa\n",
1167 ++ prop, &port->mapsize);
1168 ++ ret = -EINVAL;
1169 ++ goto err_unprepare;
1170 ++ }
1171 ++
1172 + port->mapbase += prop;
1173 ++ port->mapsize -= prop;
1174 ++ }
1175 +
1176 + port->iotype = UPIO_MEM;
1177 + if (of_property_read_u32(np, "reg-io-width", &prop) == 0) {
1178 +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
1179 +index c82c7181348de..fd443bc4c2983 100644
1180 +--- a/drivers/tty/serial/8250/8250_pci.c
1181 ++++ b/drivers/tty/serial/8250/8250_pci.c
1182 +@@ -5130,8 +5130,30 @@ static const struct pci_device_id serial_pci_tbl[] = {
1183 + { PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400,
1184 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */
1185 + pbn_b2_4_115200 },
1186 ++ /* Brainboxes Devices */
1187 + /*
1188 +- * BrainBoxes UC-260
1189 ++ * Brainboxes UC-101
1190 ++ */
1191 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0BA1,
1192 ++ PCI_ANY_ID, PCI_ANY_ID,
1193 ++ 0, 0,
1194 ++ pbn_b2_2_115200 },
1195 ++ /*
1196 ++ * Brainboxes UC-235/246
1197 ++ */
1198 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0AA1,
1199 ++ PCI_ANY_ID, PCI_ANY_ID,
1200 ++ 0, 0,
1201 ++ pbn_b2_1_115200 },
1202 ++ /*
1203 ++ * Brainboxes UC-257
1204 ++ */
1205 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0861,
1206 ++ PCI_ANY_ID, PCI_ANY_ID,
1207 ++ 0, 0,
1208 ++ pbn_b2_2_115200 },
1209 ++ /*
1210 ++ * Brainboxes UC-260/271/701/756
1211 + */
1212 + { PCI_VENDOR_ID_INTASHIELD, 0x0D21,
1213 + PCI_ANY_ID, PCI_ANY_ID,
1214 +@@ -5139,7 +5161,81 @@ static const struct pci_device_id serial_pci_tbl[] = {
1215 + pbn_b2_4_115200 },
1216 + { PCI_VENDOR_ID_INTASHIELD, 0x0E34,
1217 + PCI_ANY_ID, PCI_ANY_ID,
1218 +- PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
1219 ++ PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
1220 ++ pbn_b2_4_115200 },
1221 ++ /*
1222 ++ * Brainboxes UC-268
1223 ++ */
1224 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0841,
1225 ++ PCI_ANY_ID, PCI_ANY_ID,
1226 ++ 0, 0,
1227 ++ pbn_b2_4_115200 },
1228 ++ /*
1229 ++ * Brainboxes UC-275/279
1230 ++ */
1231 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0881,
1232 ++ PCI_ANY_ID, PCI_ANY_ID,
1233 ++ 0, 0,
1234 ++ pbn_b2_8_115200 },
1235 ++ /*
1236 ++ * Brainboxes UC-302
1237 ++ */
1238 ++ { PCI_VENDOR_ID_INTASHIELD, 0x08E1,
1239 ++ PCI_ANY_ID, PCI_ANY_ID,
1240 ++ 0, 0,
1241 ++ pbn_b2_2_115200 },
1242 ++ /*
1243 ++ * Brainboxes UC-310
1244 ++ */
1245 ++ { PCI_VENDOR_ID_INTASHIELD, 0x08C1,
1246 ++ PCI_ANY_ID, PCI_ANY_ID,
1247 ++ 0, 0,
1248 ++ pbn_b2_2_115200 },
1249 ++ /*
1250 ++ * Brainboxes UC-313
1251 ++ */
1252 ++ { PCI_VENDOR_ID_INTASHIELD, 0x08A3,
1253 ++ PCI_ANY_ID, PCI_ANY_ID,
1254 ++ 0, 0,
1255 ++ pbn_b2_2_115200 },
1256 ++ /*
1257 ++ * Brainboxes UC-320/324
1258 ++ */
1259 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0A61,
1260 ++ PCI_ANY_ID, PCI_ANY_ID,
1261 ++ 0, 0,
1262 ++ pbn_b2_1_115200 },
1263 ++ /*
1264 ++ * Brainboxes UC-346
1265 ++ */
1266 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0B02,
1267 ++ PCI_ANY_ID, PCI_ANY_ID,
1268 ++ 0, 0,
1269 ++ pbn_b2_4_115200 },
1270 ++ /*
1271 ++ * Brainboxes UC-357
1272 ++ */
1273 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0A81,
1274 ++ PCI_ANY_ID, PCI_ANY_ID,
1275 ++ 0, 0,
1276 ++ pbn_b2_2_115200 },
1277 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0A83,
1278 ++ PCI_ANY_ID, PCI_ANY_ID,
1279 ++ 0, 0,
1280 ++ pbn_b2_2_115200 },
1281 ++ /*
1282 ++ * Brainboxes UC-368
1283 ++ */
1284 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0C41,
1285 ++ PCI_ANY_ID, PCI_ANY_ID,
1286 ++ 0, 0,
1287 ++ pbn_b2_4_115200 },
1288 ++ /*
1289 ++ * Brainboxes UC-420/431
1290 ++ */
1291 ++ { PCI_VENDOR_ID_INTASHIELD, 0x0921,
1292 ++ PCI_ANY_ID, PCI_ANY_ID,
1293 ++ 0, 0,
1294 + pbn_b2_4_115200 },
1295 + /*
1296 + * Perle PCI-RAS cards
1297 +diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
1298 +index 23b7bdae173c8..d517b911cd042 100644
1299 +--- a/drivers/tty/serial/stm32-usart.c
1300 ++++ b/drivers/tty/serial/stm32-usart.c
1301 +@@ -536,7 +536,7 @@ static void stm32_start_tx(struct uart_port *port)
1302 + {
1303 + struct circ_buf *xmit = &port->state->xmit;
1304 +
1305 +- if (uart_circ_empty(xmit))
1306 ++ if (uart_circ_empty(xmit) && !port->x_char)
1307 + return;
1308 +
1309 + stm32_transmit_chars(port);
1310 +diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
1311 +index 9a2ab6751a23c..5a4d08de546fe 100644
1312 +--- a/drivers/usb/common/ulpi.c
1313 ++++ b/drivers/usb/common/ulpi.c
1314 +@@ -39,8 +39,11 @@ static int ulpi_match(struct device *dev, struct device_driver *driver)
1315 + struct ulpi *ulpi = to_ulpi_dev(dev);
1316 + const struct ulpi_device_id *id;
1317 +
1318 +- /* Some ULPI devices don't have a vendor id so rely on OF match */
1319 +- if (ulpi->id.vendor == 0)
1320 ++ /*
1321 ++ * Some ULPI devices don't have a vendor id
1322 ++ * or provide an id_table so rely on OF match.
1323 ++ */
1324 ++ if (ulpi->id.vendor == 0 || !drv->id_table)
1325 + return of_driver_match_device(dev, driver);
1326 +
1327 + for (id = drv->id_table; id->vendor; id++)
1328 +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
1329 +index fe9b392bffee3..39203f2ce6a19 100644
1330 +--- a/drivers/usb/core/hcd.c
1331 ++++ b/drivers/usb/core/hcd.c
1332 +@@ -1567,6 +1567,13 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
1333 + urb->hcpriv = NULL;
1334 + INIT_LIST_HEAD(&urb->urb_list);
1335 + atomic_dec(&urb->use_count);
1336 ++ /*
1337 ++ * Order the write of urb->use_count above before the read
1338 ++ * of urb->reject below. Pairs with the memory barriers in
1339 ++ * usb_kill_urb() and usb_poison_urb().
1340 ++ */
1341 ++ smp_mb__after_atomic();
1342 ++
1343 + atomic_dec(&urb->dev->urbnum);
1344 + if (atomic_read(&urb->reject))
1345 + wake_up(&usb_kill_urb_queue);
1346 +@@ -1662,6 +1669,13 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
1347 +
1348 + usb_anchor_resume_wakeups(anchor);
1349 + atomic_dec(&urb->use_count);
1350 ++ /*
1351 ++ * Order the write of urb->use_count above before the read
1352 ++ * of urb->reject below. Pairs with the memory barriers in
1353 ++ * usb_kill_urb() and usb_poison_urb().
1354 ++ */
1355 ++ smp_mb__after_atomic();
1356 ++
1357 + if (unlikely(atomic_read(&urb->reject)))
1358 + wake_up(&usb_kill_urb_queue);
1359 + usb_put_urb(urb);
1360 +diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
1361 +index 31ca5abb4c12a..0045bbc3627dd 100644
1362 +--- a/drivers/usb/core/urb.c
1363 ++++ b/drivers/usb/core/urb.c
1364 +@@ -691,6 +691,12 @@ void usb_kill_urb(struct urb *urb)
1365 + if (!(urb && urb->dev && urb->ep))
1366 + return;
1367 + atomic_inc(&urb->reject);
1368 ++ /*
1369 ++ * Order the write of urb->reject above before the read
1370 ++ * of urb->use_count below. Pairs with the barriers in
1371 ++ * __usb_hcd_giveback_urb() and usb_hcd_submit_urb().
1372 ++ */
1373 ++ smp_mb__after_atomic();
1374 +
1375 + usb_hcd_unlink_urb(urb, -ENOENT);
1376 + wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
1377 +@@ -732,6 +738,12 @@ void usb_poison_urb(struct urb *urb)
1378 + if (!urb)
1379 + return;
1380 + atomic_inc(&urb->reject);
1381 ++ /*
1382 ++ * Order the write of urb->reject above before the read
1383 ++ * of urb->use_count below. Pairs with the barriers in
1384 ++ * __usb_hcd_giveback_urb() and usb_hcd_submit_urb().
1385 ++ */
1386 ++ smp_mb__after_atomic();
1387 +
1388 + if (!urb->dev || !urb->ep)
1389 + return;
1390 +diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
1391 +index 282737e4609ce..2c65a9bb3c81b 100644
1392 +--- a/drivers/usb/gadget/function/f_sourcesink.c
1393 ++++ b/drivers/usb/gadget/function/f_sourcesink.c
1394 +@@ -583,6 +583,7 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in,
1395 +
1396 + if (is_iso) {
1397 + switch (speed) {
1398 ++ case USB_SPEED_SUPER_PLUS:
1399 + case USB_SPEED_SUPER:
1400 + size = ss->isoc_maxpacket *
1401 + (ss->isoc_mult + 1) *
1402 +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
1403 +index 3ba4e060fd051..66e7f5d123c46 100644
1404 +--- a/drivers/usb/storage/unusual_devs.h
1405 ++++ b/drivers/usb/storage/unusual_devs.h
1406 +@@ -2301,6 +2301,16 @@ UNUSUAL_DEV( 0x2027, 0xa001, 0x0000, 0x9999,
1407 + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
1408 + US_FL_SCM_MULT_TARG ),
1409 +
1410 ++/*
1411 ++ * Reported by DocMAX <mail@××××××××××.de>
1412 ++ * and Thomas Weißschuh <linux@××××××××××.net>
1413 ++ */
1414 ++UNUSUAL_DEV( 0x2109, 0x0715, 0x9999, 0x9999,
1415 ++ "VIA Labs, Inc.",
1416 ++ "VL817 SATA Bridge",
1417 ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1418 ++ US_FL_IGNORE_UAS),
1419 ++
1420 + UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
1421 + "ST",
1422 + "2A",
1423 +diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
1424 +index 1316464cf2933..fb18264b702e6 100644
1425 +--- a/drivers/usb/typec/tcpm/tcpm.c
1426 ++++ b/drivers/usb/typec/tcpm/tcpm.c
1427 +@@ -3903,7 +3903,8 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
1428 + case SNK_TRYWAIT_DEBOUNCE:
1429 + break;
1430 + case SNK_ATTACH_WAIT:
1431 +- tcpm_set_state(port, SNK_UNATTACHED, 0);
1432 ++ case SNK_DEBOUNCED:
1433 ++ /* Do nothing, as TCPM is still waiting for vbus to reaach VSAFE5V to connect */
1434 + break;
1435 +
1436 + case SNK_NEGOTIATE_CAPABILITIES:
1437 +diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
1438 +index d772fce519057..0a38f98f78650 100644
1439 +--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
1440 ++++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
1441 +@@ -304,7 +304,7 @@ static int ucsi_ccg_init(struct ucsi_ccg *uc)
1442 + if (status < 0)
1443 + return status;
1444 +
1445 +- if (!data)
1446 ++ if (!(data & DEV_INT))
1447 + return 0;
1448 +
1449 + status = ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
1450 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
1451 +index e9d3eb7f0e2b8..675112aa998f2 100644
1452 +--- a/fs/btrfs/ioctl.c
1453 ++++ b/fs/btrfs/ioctl.c
1454 +@@ -3027,10 +3027,8 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
1455 + inode_lock(inode);
1456 + err = btrfs_delete_subvolume(dir, dentry);
1457 + inode_unlock(inode);
1458 +- if (!err) {
1459 +- fsnotify_rmdir(dir, dentry);
1460 +- d_delete(dentry);
1461 +- }
1462 ++ if (!err)
1463 ++ d_delete_notify(dir, dentry);
1464 +
1465 + out_dput:
1466 + dput(dentry);
1467 +diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
1468 +index cb733652ecca6..2992cebb78661 100644
1469 +--- a/fs/configfs/dir.c
1470 ++++ b/fs/configfs/dir.c
1471 +@@ -1805,8 +1805,8 @@ void configfs_unregister_group(struct config_group *group)
1472 + configfs_detach_group(&group->cg_item);
1473 + d_inode(dentry)->i_flags |= S_DEAD;
1474 + dont_mount(dentry);
1475 ++ d_drop(dentry);
1476 + fsnotify_rmdir(d_inode(parent), dentry);
1477 +- d_delete(dentry);
1478 + inode_unlock(d_inode(parent));
1479 +
1480 + dput(dentry);
1481 +@@ -1947,10 +1947,10 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
1482 + configfs_detach_group(&group->cg_item);
1483 + d_inode(dentry)->i_flags |= S_DEAD;
1484 + dont_mount(dentry);
1485 +- fsnotify_rmdir(d_inode(root), dentry);
1486 + inode_unlock(d_inode(dentry));
1487 +
1488 +- d_delete(dentry);
1489 ++ d_drop(dentry);
1490 ++ fsnotify_rmdir(d_inode(root), dentry);
1491 +
1492 + inode_unlock(d_inode(root));
1493 +
1494 +diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
1495 +index 42e5a766d33c7..4f25015aa5342 100644
1496 +--- a/fs/devpts/inode.c
1497 ++++ b/fs/devpts/inode.c
1498 +@@ -621,8 +621,8 @@ void devpts_pty_kill(struct dentry *dentry)
1499 +
1500 + dentry->d_fsdata = NULL;
1501 + drop_nlink(dentry->d_inode);
1502 +- fsnotify_unlink(d_inode(dentry->d_parent), dentry);
1503 + d_drop(dentry);
1504 ++ fsnotify_unlink(d_inode(dentry->d_parent), dentry);
1505 + dput(dentry); /* d_alloc_name() in devpts_pty_new() */
1506 + }
1507 +
1508 +diff --git a/fs/namei.c b/fs/namei.c
1509 +index 5b5759d708220..b952ecbd49c29 100644
1510 +--- a/fs/namei.c
1511 ++++ b/fs/namei.c
1512 +@@ -3878,13 +3878,12 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
1513 + dentry->d_inode->i_flags |= S_DEAD;
1514 + dont_mount(dentry);
1515 + detach_mounts(dentry);
1516 +- fsnotify_rmdir(dir, dentry);
1517 +
1518 + out:
1519 + inode_unlock(dentry->d_inode);
1520 + dput(dentry);
1521 + if (!error)
1522 +- d_delete(dentry);
1523 ++ d_delete_notify(dir, dentry);
1524 + return error;
1525 + }
1526 + EXPORT_SYMBOL(vfs_rmdir);
1527 +@@ -3995,7 +3994,6 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegate
1528 + if (!error) {
1529 + dont_mount(dentry);
1530 + detach_mounts(dentry);
1531 +- fsnotify_unlink(dir, dentry);
1532 + }
1533 + }
1534 + }
1535 +@@ -4003,9 +4001,11 @@ out:
1536 + inode_unlock(target);
1537 +
1538 + /* We don't d_delete() NFS sillyrenamed files--they still exist. */
1539 +- if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) {
1540 ++ if (!error && dentry->d_flags & DCACHE_NFSFS_RENAMED) {
1541 ++ fsnotify_unlink(dir, dentry);
1542 ++ } else if (!error) {
1543 + fsnotify_link_count(target);
1544 +- d_delete(dentry);
1545 ++ d_delete_notify(dir, dentry);
1546 + }
1547 +
1548 + return error;
1549 +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
1550 +index e7c0790308fe0..f1c99fe486c4d 100644
1551 +--- a/fs/nfs/dir.c
1552 ++++ b/fs/nfs/dir.c
1553 +@@ -1638,6 +1638,24 @@ out:
1554 +
1555 + no_open:
1556 + res = nfs_lookup(dir, dentry, lookup_flags);
1557 ++ if (!res) {
1558 ++ inode = d_inode(dentry);
1559 ++ if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
1560 ++ !S_ISDIR(inode->i_mode))
1561 ++ res = ERR_PTR(-ENOTDIR);
1562 ++ else if (inode && S_ISREG(inode->i_mode))
1563 ++ res = ERR_PTR(-EOPENSTALE);
1564 ++ } else if (!IS_ERR(res)) {
1565 ++ inode = d_inode(res);
1566 ++ if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
1567 ++ !S_ISDIR(inode->i_mode)) {
1568 ++ dput(res);
1569 ++ res = ERR_PTR(-ENOTDIR);
1570 ++ } else if (inode && S_ISREG(inode->i_mode)) {
1571 ++ dput(res);
1572 ++ res = ERR_PTR(-EOPENSTALE);
1573 ++ }
1574 ++ }
1575 + if (switched) {
1576 + d_lookup_done(dentry);
1577 + if (!res)
1578 +@@ -2035,6 +2053,8 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1579 +
1580 + trace_nfs_link_enter(inode, dir, dentry);
1581 + d_drop(dentry);
1582 ++ if (S_ISREG(inode->i_mode))
1583 ++ nfs_sync_inode(inode);
1584 + error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name);
1585 + if (error == 0) {
1586 + ihold(inode);
1587 +@@ -2123,6 +2143,8 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1588 + }
1589 + }
1590 +
1591 ++ if (S_ISREG(old_inode->i_mode))
1592 ++ nfs_sync_inode(old_inode);
1593 + task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, NULL);
1594 + if (IS_ERR(task)) {
1595 + error = PTR_ERR(task);
1596 +diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
1597 +index f6328ae9b2da4..055cc0458f270 100644
1598 +--- a/fs/nfsd/nfsctl.c
1599 ++++ b/fs/nfsd/nfsctl.c
1600 +@@ -1247,7 +1247,8 @@ static void nfsdfs_remove_file(struct inode *dir, struct dentry *dentry)
1601 + clear_ncl(d_inode(dentry));
1602 + dget(dentry);
1603 + ret = simple_unlink(dir, dentry);
1604 +- d_delete(dentry);
1605 ++ d_drop(dentry);
1606 ++ fsnotify_unlink(dir, dentry);
1607 + dput(dentry);
1608 + WARN_ON_ONCE(ret);
1609 + }
1610 +@@ -1336,8 +1337,8 @@ void nfsd_client_rmdir(struct dentry *dentry)
1611 + dget(dentry);
1612 + ret = simple_rmdir(dir, dentry);
1613 + WARN_ON_ONCE(ret);
1614 ++ d_drop(dentry);
1615 + fsnotify_rmdir(dir, dentry);
1616 +- d_delete(dentry);
1617 + dput(dentry);
1618 + inode_unlock(dir);
1619 + }
1620 +diff --git a/fs/udf/inode.c b/fs/udf/inode.c
1621 +index 507f8f9103270..639aabf30eaf0 100644
1622 +--- a/fs/udf/inode.c
1623 ++++ b/fs/udf/inode.c
1624 +@@ -258,10 +258,6 @@ int udf_expand_file_adinicb(struct inode *inode)
1625 + char *kaddr;
1626 + struct udf_inode_info *iinfo = UDF_I(inode);
1627 + int err;
1628 +- struct writeback_control udf_wbc = {
1629 +- .sync_mode = WB_SYNC_NONE,
1630 +- .nr_to_write = 1,
1631 +- };
1632 +
1633 + WARN_ON_ONCE(!inode_is_locked(inode));
1634 + if (!iinfo->i_lenAlloc) {
1635 +@@ -305,8 +301,10 @@ int udf_expand_file_adinicb(struct inode *inode)
1636 + iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
1637 + /* from now on we have normal address_space methods */
1638 + inode->i_data.a_ops = &udf_aops;
1639 ++ set_page_dirty(page);
1640 ++ unlock_page(page);
1641 + up_write(&iinfo->i_data_sem);
1642 +- err = inode->i_data.a_ops->writepage(page, &udf_wbc);
1643 ++ err = filemap_fdatawrite(inode->i_mapping);
1644 + if (err) {
1645 + /* Restore everything back so that we don't lose data... */
1646 + lock_page(page);
1647 +@@ -318,6 +316,7 @@ int udf_expand_file_adinicb(struct inode *inode)
1648 + unlock_page(page);
1649 + iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
1650 + inode->i_data.a_ops = &udf_adinicb_aops;
1651 ++ iinfo->i_lenAlloc = inode->i_size;
1652 + up_write(&iinfo->i_data_sem);
1653 + }
1654 + put_page(page);
1655 +diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
1656 +index a2d5d175d3c15..e9d2024473b0f 100644
1657 +--- a/include/linux/fsnotify.h
1658 ++++ b/include/linux/fsnotify.h
1659 +@@ -188,6 +188,42 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct
1660 + fsnotify(dir, FS_CREATE, inode, FSNOTIFY_EVENT_INODE, &new_dentry->d_name, 0);
1661 + }
1662 +
1663 ++/*
1664 ++ * fsnotify_delete - @dentry was unlinked and unhashed
1665 ++ *
1666 ++ * Caller must make sure that dentry->d_name is stable.
1667 ++ *
1668 ++ * Note: unlike fsnotify_unlink(), we have to pass also the unlinked inode
1669 ++ * as this may be called after d_delete() and old_dentry may be negative.
1670 ++ */
1671 ++static inline void fsnotify_delete(struct inode *dir, struct inode *inode,
1672 ++ struct dentry *dentry)
1673 ++{
1674 ++ __u32 mask = FS_DELETE;
1675 ++
1676 ++ if (S_ISDIR(inode->i_mode))
1677 ++ mask |= FS_ISDIR;
1678 ++
1679 ++ fsnotify(dir, mask, inode, FSNOTIFY_EVENT_INODE, &dentry->d_name, 0);
1680 ++}
1681 ++
1682 ++/**
1683 ++ * d_delete_notify - delete a dentry and call fsnotify_delete()
1684 ++ * @dentry: The dentry to delete
1685 ++ *
1686 ++ * This helper is used to guaranty that the unlinked inode cannot be found
1687 ++ * by lookup of this name after fsnotify_delete() event has been delivered.
1688 ++ */
1689 ++static inline void d_delete_notify(struct inode *dir, struct dentry *dentry)
1690 ++{
1691 ++ struct inode *inode = d_inode(dentry);
1692 ++
1693 ++ ihold(inode);
1694 ++ d_delete(dentry);
1695 ++ fsnotify_delete(dir, inode, dentry);
1696 ++ iput(inode);
1697 ++}
1698 ++
1699 + /*
1700 + * fsnotify_unlink - 'name' was unlinked
1701 + *
1702 +@@ -195,10 +231,10 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct
1703 + */
1704 + static inline void fsnotify_unlink(struct inode *dir, struct dentry *dentry)
1705 + {
1706 +- /* Expected to be called before d_delete() */
1707 +- WARN_ON_ONCE(d_is_negative(dentry));
1708 ++ if (WARN_ON_ONCE(d_is_negative(dentry)))
1709 ++ return;
1710 +
1711 +- fsnotify_dirent(dir, dentry, FS_DELETE);
1712 ++ fsnotify_delete(dir, d_inode(dentry), dentry);
1713 + }
1714 +
1715 + /*
1716 +@@ -218,10 +254,10 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
1717 + */
1718 + static inline void fsnotify_rmdir(struct inode *dir, struct dentry *dentry)
1719 + {
1720 +- /* Expected to be called before d_delete() */
1721 +- WARN_ON_ONCE(d_is_negative(dentry));
1722 ++ if (WARN_ON_ONCE(d_is_negative(dentry)))
1723 ++ return;
1724 +
1725 +- fsnotify_dirent(dir, dentry, FS_DELETE | FS_ISDIR);
1726 ++ fsnotify_delete(dir, d_inode(dentry), dentry);
1727 + }
1728 +
1729 + /*
1730 +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
1731 +index 4860944e936db..288a586782563 100644
1732 +--- a/include/linux/netdevice.h
1733 ++++ b/include/linux/netdevice.h
1734 +@@ -2397,6 +2397,7 @@ struct packet_type {
1735 + struct net_device *);
1736 + bool (*id_match)(struct packet_type *ptype,
1737 + struct sock *sk);
1738 ++ struct net *af_packet_net;
1739 + void *af_packet_priv;
1740 + struct list_head list;
1741 + };
1742 +diff --git a/include/net/ip.h b/include/net/ip.h
1743 +index 52abfc00b5e3d..3f3ea86b2173c 100644
1744 +--- a/include/net/ip.h
1745 ++++ b/include/net/ip.h
1746 +@@ -509,19 +509,18 @@ static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
1747 + {
1748 + struct iphdr *iph = ip_hdr(skb);
1749 +
1750 ++ /* We had many attacks based on IPID, use the private
1751 ++ * generator as much as we can.
1752 ++ */
1753 ++ if (sk && inet_sk(sk)->inet_daddr) {
1754 ++ iph->id = htons(inet_sk(sk)->inet_id);
1755 ++ inet_sk(sk)->inet_id += segs;
1756 ++ return;
1757 ++ }
1758 + if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
1759 +- /* This is only to work around buggy Windows95/2000
1760 +- * VJ compression implementations. If the ID field
1761 +- * does not change, they drop every other packet in
1762 +- * a TCP stream using header compression.
1763 +- */
1764 +- if (sk && inet_sk(sk)->inet_daddr) {
1765 +- iph->id = htons(inet_sk(sk)->inet_id);
1766 +- inet_sk(sk)->inet_id += segs;
1767 +- } else {
1768 +- iph->id = 0;
1769 +- }
1770 ++ iph->id = 0;
1771 + } else {
1772 ++ /* Unfortunately we need the big hammer to get a suitable IPID */
1773 + __ip_select_ident(net, iph, segs);
1774 + }
1775 + }
1776 +diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
1777 +index 05ecaefeb6322..780754b9cbcd4 100644
1778 +--- a/include/net/ip6_fib.h
1779 ++++ b/include/net/ip6_fib.h
1780 +@@ -247,7 +247,7 @@ static inline bool fib6_get_cookie_safe(const struct fib6_info *f6i,
1781 + fn = rcu_dereference(f6i->fib6_node);
1782 +
1783 + if (fn) {
1784 +- *cookie = fn->fn_sernum;
1785 ++ *cookie = READ_ONCE(fn->fn_sernum);
1786 + /* pairs with smp_wmb() in fib6_update_sernum_upto_root() */
1787 + smp_rmb();
1788 + status = true;
1789 +diff --git a/include/net/route.h b/include/net/route.h
1790 +index 6c516840380db..b85d1912d84fd 100644
1791 +--- a/include/net/route.h
1792 ++++ b/include/net/route.h
1793 +@@ -359,7 +359,7 @@ static inline struct neighbour *ip_neigh_gw4(struct net_device *dev,
1794 + {
1795 + struct neighbour *neigh;
1796 +
1797 +- neigh = __ipv4_neigh_lookup_noref(dev, daddr);
1798 ++ neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)daddr);
1799 + if (unlikely(!neigh))
1800 + neigh = __neigh_create(&arp_tbl, &daddr, dev, false);
1801 +
1802 +diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c
1803 +index 105df4dfc7839..52571dcad768b 100644
1804 +--- a/kernel/power/wakelock.c
1805 ++++ b/kernel/power/wakelock.c
1806 +@@ -39,23 +39,20 @@ ssize_t pm_show_wakelocks(char *buf, bool show_active)
1807 + {
1808 + struct rb_node *node;
1809 + struct wakelock *wl;
1810 +- char *str = buf;
1811 +- char *end = buf + PAGE_SIZE;
1812 ++ int len = 0;
1813 +
1814 + mutex_lock(&wakelocks_lock);
1815 +
1816 + for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) {
1817 + wl = rb_entry(node, struct wakelock, node);
1818 + if (wl->ws->active == show_active)
1819 +- str += scnprintf(str, end - str, "%s ", wl->name);
1820 ++ len += sysfs_emit_at(buf, len, "%s ", wl->name);
1821 + }
1822 +- if (str > buf)
1823 +- str--;
1824 +
1825 +- str += scnprintf(str, end - str, "\n");
1826 ++ len += sysfs_emit_at(buf, len, "\n");
1827 +
1828 + mutex_unlock(&wakelocks_lock);
1829 +- return (str - buf);
1830 ++ return len;
1831 + }
1832 +
1833 + #if CONFIG_PM_WAKELOCKS_LIMIT > 0
1834 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
1835 +index 54f5b2f080f53..5a4dfb55ba16b 100644
1836 +--- a/kernel/trace/trace.c
1837 ++++ b/kernel/trace/trace.c
1838 +@@ -6994,7 +6994,8 @@ static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
1839 + err = kzalloc(sizeof(*err), GFP_KERNEL);
1840 + if (!err)
1841 + err = ERR_PTR(-ENOMEM);
1842 +- tr->n_err_log_entries++;
1843 ++ else
1844 ++ tr->n_err_log_entries++;
1845 +
1846 + return err;
1847 + }
1848 +diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
1849 +index 8b33a3c872750..3cef24c6391a5 100644
1850 +--- a/kernel/trace/trace_events_hist.c
1851 ++++ b/kernel/trace/trace_events_hist.c
1852 +@@ -4398,6 +4398,7 @@ static int trace_action_create(struct hist_trigger_data *hist_data,
1853 +
1854 + var_ref_idx = find_var_ref_idx(hist_data, var_ref);
1855 + if (WARN_ON(var_ref_idx < 0)) {
1856 ++ kfree(p);
1857 + ret = var_ref_idx;
1858 + goto err;
1859 + }
1860 +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
1861 +index 40f1593651e84..082a262ab49c3 100644
1862 +--- a/net/bluetooth/hci_event.c
1863 ++++ b/net/bluetooth/hci_event.c
1864 +@@ -5506,6 +5506,11 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
1865 + struct hci_ev_le_advertising_info *ev = ptr;
1866 + s8 rssi;
1867 +
1868 ++ if (ptr > (void *)skb_tail_pointer(skb) - sizeof(*ev)) {
1869 ++ bt_dev_err(hdev, "Malicious advertising data.");
1870 ++ break;
1871 ++ }
1872 ++
1873 + if (ev->length <= HCI_MAX_AD_LENGTH &&
1874 + ev->data + ev->length <= skb_tail_pointer(skb)) {
1875 + rssi = ev->data[ev->length];
1876 +@@ -5517,11 +5522,6 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
1877 + }
1878 +
1879 + ptr += sizeof(*ev) + ev->length + 1;
1880 +-
1881 +- if (ptr > (void *) skb_tail_pointer(skb) - sizeof(*ev)) {
1882 +- bt_dev_err(hdev, "Malicious advertising data. Stopping processing");
1883 +- break;
1884 +- }
1885 + }
1886 +
1887 + hci_dev_unlock(hdev);
1888 +diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
1889 +index 36347933ec3af..61f5570645e38 100644
1890 +--- a/net/core/net-procfs.c
1891 ++++ b/net/core/net-procfs.c
1892 +@@ -182,12 +182,23 @@ static const struct seq_operations softnet_seq_ops = {
1893 + .show = softnet_seq_show,
1894 + };
1895 +
1896 +-static void *ptype_get_idx(loff_t pos)
1897 ++static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
1898 + {
1899 ++ struct list_head *ptype_list = NULL;
1900 + struct packet_type *pt = NULL;
1901 ++ struct net_device *dev;
1902 + loff_t i = 0;
1903 + int t;
1904 +
1905 ++ for_each_netdev_rcu(seq_file_net(seq), dev) {
1906 ++ ptype_list = &dev->ptype_all;
1907 ++ list_for_each_entry_rcu(pt, ptype_list, list) {
1908 ++ if (i == pos)
1909 ++ return pt;
1910 ++ ++i;
1911 ++ }
1912 ++ }
1913 ++
1914 + list_for_each_entry_rcu(pt, &ptype_all, list) {
1915 + if (i == pos)
1916 + return pt;
1917 +@@ -208,22 +219,40 @@ static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
1918 + __acquires(RCU)
1919 + {
1920 + rcu_read_lock();
1921 +- return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
1922 ++ return *pos ? ptype_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1923 + }
1924 +
1925 + static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1926 + {
1927 ++ struct net_device *dev;
1928 + struct packet_type *pt;
1929 + struct list_head *nxt;
1930 + int hash;
1931 +
1932 + ++*pos;
1933 + if (v == SEQ_START_TOKEN)
1934 +- return ptype_get_idx(0);
1935 ++ return ptype_get_idx(seq, 0);
1936 +
1937 + pt = v;
1938 + nxt = pt->list.next;
1939 ++ if (pt->dev) {
1940 ++ if (nxt != &pt->dev->ptype_all)
1941 ++ goto found;
1942 ++
1943 ++ dev = pt->dev;
1944 ++ for_each_netdev_continue_rcu(seq_file_net(seq), dev) {
1945 ++ if (!list_empty(&dev->ptype_all)) {
1946 ++ nxt = dev->ptype_all.next;
1947 ++ goto found;
1948 ++ }
1949 ++ }
1950 ++
1951 ++ nxt = ptype_all.next;
1952 ++ goto ptype_all;
1953 ++ }
1954 ++
1955 + if (pt->type == htons(ETH_P_ALL)) {
1956 ++ptype_all:
1957 + if (nxt != &ptype_all)
1958 + goto found;
1959 + hash = 0;
1960 +@@ -252,7 +281,8 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
1961 +
1962 + if (v == SEQ_START_TOKEN)
1963 + seq_puts(seq, "Type Device Function\n");
1964 +- else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
1965 ++ else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) &&
1966 ++ (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) {
1967 + if (pt->type == htons(ETH_P_ALL))
1968 + seq_puts(seq, "ALL ");
1969 + else
1970 +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
1971 +index 0ec529d77a56e..418e939878004 100644
1972 +--- a/net/ipv4/ip_output.c
1973 ++++ b/net/ipv4/ip_output.c
1974 +@@ -161,12 +161,19 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
1975 + iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
1976 + iph->saddr = saddr;
1977 + iph->protocol = sk->sk_protocol;
1978 +- if (ip_dont_fragment(sk, &rt->dst)) {
1979 ++ /* Do not bother generating IPID for small packets (eg SYNACK) */
1980 ++ if (skb->len <= IPV4_MIN_MTU || ip_dont_fragment(sk, &rt->dst)) {
1981 + iph->frag_off = htons(IP_DF);
1982 + iph->id = 0;
1983 + } else {
1984 + iph->frag_off = 0;
1985 +- __ip_select_ident(net, iph, 1);
1986 ++ /* TCP packets here are SYNACK with fat IPv4/TCP options.
1987 ++ * Avoid using the hashed IP ident generator.
1988 ++ */
1989 ++ if (sk->sk_protocol == IPPROTO_TCP)
1990 ++ iph->id = (__force __be16)prandom_u32();
1991 ++ else
1992 ++ __ip_select_ident(net, iph, 1);
1993 + }
1994 +
1995 + if (opt && opt->opt.optlen) {
1996 +diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
1997 +index 1c3d5d3702a10..b0f51c7cc0d90 100644
1998 +--- a/net/ipv4/ping.c
1999 ++++ b/net/ipv4/ping.c
2000 +@@ -220,7 +220,8 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
2001 + continue;
2002 + }
2003 +
2004 +- if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
2005 ++ if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif &&
2006 ++ sk->sk_bound_dev_if != inet_sdif(skb))
2007 + continue;
2008 +
2009 + sock_hold(sk);
2010 +diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
2011 +index 3183413ebc6c2..ddc24e57dc555 100644
2012 +--- a/net/ipv4/raw.c
2013 ++++ b/net/ipv4/raw.c
2014 +@@ -720,6 +720,7 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
2015 + int ret = -EINVAL;
2016 + int chk_addr_ret;
2017 +
2018 ++ lock_sock(sk);
2019 + if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in))
2020 + goto out;
2021 +
2022 +@@ -739,7 +740,9 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
2023 + inet->inet_saddr = 0; /* Use device */
2024 + sk_dst_reset(sk);
2025 + ret = 0;
2026 +-out: return ret;
2027 ++out:
2028 ++ release_sock(sk);
2029 ++ return ret;
2030 + }
2031 +
2032 + /*
2033 +diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
2034 +index 9a6f66e0e9a27..ef55489651f87 100644
2035 +--- a/net/ipv6/ip6_fib.c
2036 ++++ b/net/ipv6/ip6_fib.c
2037 +@@ -110,7 +110,7 @@ void fib6_update_sernum(struct net *net, struct fib6_info *f6i)
2038 + fn = rcu_dereference_protected(f6i->fib6_node,
2039 + lockdep_is_held(&f6i->fib6_table->tb6_lock));
2040 + if (fn)
2041 +- fn->fn_sernum = fib6_new_sernum(net);
2042 ++ WRITE_ONCE(fn->fn_sernum, fib6_new_sernum(net));
2043 + }
2044 +
2045 + /*
2046 +@@ -535,12 +535,13 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
2047 + spin_unlock_bh(&table->tb6_lock);
2048 + if (res > 0) {
2049 + cb->args[4] = 1;
2050 +- cb->args[5] = w->root->fn_sernum;
2051 ++ cb->args[5] = READ_ONCE(w->root->fn_sernum);
2052 + }
2053 + } else {
2054 +- if (cb->args[5] != w->root->fn_sernum) {
2055 ++ int sernum = READ_ONCE(w->root->fn_sernum);
2056 ++ if (cb->args[5] != sernum) {
2057 + /* Begin at the root if the tree changed */
2058 +- cb->args[5] = w->root->fn_sernum;
2059 ++ cb->args[5] = sernum;
2060 + w->state = FWS_INIT;
2061 + w->node = w->root;
2062 + w->skip = w->count;
2063 +@@ -1276,7 +1277,7 @@ static void __fib6_update_sernum_upto_root(struct fib6_info *rt,
2064 + /* paired with smp_rmb() in rt6_get_cookie_safe() */
2065 + smp_wmb();
2066 + while (fn) {
2067 +- fn->fn_sernum = sernum;
2068 ++ WRITE_ONCE(fn->fn_sernum, sernum);
2069 + fn = rcu_dereference_protected(fn->parent,
2070 + lockdep_is_held(&rt->fib6_table->tb6_lock));
2071 + }
2072 +@@ -2068,8 +2069,8 @@ static int fib6_clean_node(struct fib6_walker *w)
2073 + };
2074 +
2075 + if (c->sernum != FIB6_NO_SERNUM_CHANGE &&
2076 +- w->node->fn_sernum != c->sernum)
2077 +- w->node->fn_sernum = c->sernum;
2078 ++ READ_ONCE(w->node->fn_sernum) != c->sernum)
2079 ++ WRITE_ONCE(w->node->fn_sernum, c->sernum);
2080 +
2081 + if (!c->func) {
2082 + WARN_ON_ONCE(c->sernum == FIB6_NO_SERNUM_CHANGE);
2083 +@@ -2433,7 +2434,7 @@ static void ipv6_route_seq_setup_walk(struct ipv6_route_iter *iter,
2084 + iter->w.state = FWS_INIT;
2085 + iter->w.node = iter->w.root;
2086 + iter->w.args = iter;
2087 +- iter->sernum = iter->w.root->fn_sernum;
2088 ++ iter->sernum = READ_ONCE(iter->w.root->fn_sernum);
2089 + INIT_LIST_HEAD(&iter->w.lh);
2090 + fib6_walker_link(net, &iter->w);
2091 + }
2092 +@@ -2461,8 +2462,10 @@ static struct fib6_table *ipv6_route_seq_next_table(struct fib6_table *tbl,
2093 +
2094 + static void ipv6_route_check_sernum(struct ipv6_route_iter *iter)
2095 + {
2096 +- if (iter->sernum != iter->w.root->fn_sernum) {
2097 +- iter->sernum = iter->w.root->fn_sernum;
2098 ++ int sernum = READ_ONCE(iter->w.root->fn_sernum);
2099 ++
2100 ++ if (iter->sernum != sernum) {
2101 ++ iter->sernum = sernum;
2102 + iter->w.state = FWS_INIT;
2103 + iter->w.node = iter->w.root;
2104 + WARN_ON(iter->w.skip);
2105 +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
2106 +index fd0d1cee2d3f5..878a08c40fffd 100644
2107 +--- a/net/ipv6/ip6_tunnel.c
2108 ++++ b/net/ipv6/ip6_tunnel.c
2109 +@@ -1000,14 +1000,14 @@ int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
2110 +
2111 + if (unlikely(!ipv6_chk_addr_and_flags(net, laddr, ldev, false,
2112 + 0, IFA_F_TENTATIVE)))
2113 +- pr_warn("%s xmit: Local address not yet configured!\n",
2114 +- p->name);
2115 ++ pr_warn_ratelimited("%s xmit: Local address not yet configured!\n",
2116 ++ p->name);
2117 + else if (!(p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) &&
2118 + !ipv6_addr_is_multicast(raddr) &&
2119 + unlikely(ipv6_chk_addr_and_flags(net, raddr, ldev,
2120 + true, 0, IFA_F_TENTATIVE)))
2121 +- pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
2122 +- p->name);
2123 ++ pr_warn_ratelimited("%s xmit: Routing loop! Remote address found on this node!\n",
2124 ++ p->name);
2125 + else
2126 + ret = 1;
2127 + rcu_read_unlock();
2128 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
2129 +index 5ef6e27e026e9..2a13394ab8541 100644
2130 +--- a/net/ipv6/route.c
2131 ++++ b/net/ipv6/route.c
2132 +@@ -2697,7 +2697,7 @@ static void ip6_link_failure(struct sk_buff *skb)
2133 + if (from) {
2134 + fn = rcu_dereference(from->fib6_node);
2135 + if (fn && (rt->rt6i_flags & RTF_DEFAULT))
2136 +- fn->fn_sernum = -1;
2137 ++ WRITE_ONCE(fn->fn_sernum, -1);
2138 + }
2139 + }
2140 + rcu_read_unlock();
2141 +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
2142 +index 4bcc36e4b2ef0..d9b6f2001d006 100644
2143 +--- a/net/netfilter/nf_conntrack_core.c
2144 ++++ b/net/netfilter/nf_conntrack_core.c
2145 +@@ -1709,15 +1709,17 @@ repeat:
2146 + pr_debug("nf_conntrack_in: Can't track with proto module\n");
2147 + nf_conntrack_put(&ct->ct_general);
2148 + skb->_nfct = 0;
2149 +- NF_CT_STAT_INC_ATOMIC(state->net, invalid);
2150 +- if (ret == -NF_DROP)
2151 +- NF_CT_STAT_INC_ATOMIC(state->net, drop);
2152 + /* Special case: TCP tracker reports an attempt to reopen a
2153 + * closed/aborted connection. We have to go back and create a
2154 + * fresh conntrack.
2155 + */
2156 + if (ret == -NF_REPEAT)
2157 + goto repeat;
2158 ++
2159 ++ NF_CT_STAT_INC_ATOMIC(state->net, invalid);
2160 ++ if (ret == -NF_DROP)
2161 ++ NF_CT_STAT_INC_ATOMIC(state->net, drop);
2162 ++
2163 + ret = -ret;
2164 + goto out;
2165 + }
2166 +diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
2167 +index 921f8f45b17f4..cf0512fc648e7 100644
2168 +--- a/net/netfilter/nft_payload.c
2169 ++++ b/net/netfilter/nft_payload.c
2170 +@@ -420,6 +420,9 @@ static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
2171 + struct sk_buff *skb,
2172 + unsigned int *l4csum_offset)
2173 + {
2174 ++ if (pkt->xt.fragoff)
2175 ++ return -1;
2176 ++
2177 + switch (pkt->tprot) {
2178 + case IPPROTO_TCP:
2179 + *l4csum_offset = offsetof(struct tcphdr, check);
2180 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
2181 +index 6062bd5bf132b..839e1caa57a59 100644
2182 +--- a/net/packet/af_packet.c
2183 ++++ b/net/packet/af_packet.c
2184 +@@ -1715,6 +1715,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
2185 + match->prot_hook.dev = po->prot_hook.dev;
2186 + match->prot_hook.func = packet_rcv_fanout;
2187 + match->prot_hook.af_packet_priv = match;
2188 ++ match->prot_hook.af_packet_net = read_pnet(&match->net);
2189 + match->prot_hook.id_match = match_fanout_group;
2190 + list_add(&match->list, &fanout_list);
2191 + }
2192 +@@ -3294,6 +3295,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
2193 + po->prot_hook.func = packet_rcv_spkt;
2194 +
2195 + po->prot_hook.af_packet_priv = sk;
2196 ++ po->prot_hook.af_packet_net = sock_net(sk);
2197 +
2198 + if (proto) {
2199 + po->prot_hook.type = proto;
2200 +diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
2201 +index 9ff85ee8337cd..80e15310f1b29 100644
2202 +--- a/net/rxrpc/call_event.c
2203 ++++ b/net/rxrpc/call_event.c
2204 +@@ -157,7 +157,7 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call)
2205 + static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
2206 + {
2207 + struct sk_buff *skb;
2208 +- unsigned long resend_at, rto_j;
2209 ++ unsigned long resend_at;
2210 + rxrpc_seq_t cursor, seq, top;
2211 + ktime_t now, max_age, oldest, ack_ts;
2212 + int ix;
2213 +@@ -165,10 +165,8 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
2214 +
2215 + _enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
2216 +
2217 +- rto_j = call->peer->rto_j;
2218 +-
2219 + now = ktime_get_real();
2220 +- max_age = ktime_sub(now, jiffies_to_usecs(rto_j));
2221 ++ max_age = ktime_sub(now, jiffies_to_usecs(call->peer->rto_j));
2222 +
2223 + spin_lock_bh(&call->lock);
2224 +
2225 +@@ -213,7 +211,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
2226 + }
2227 +
2228 + resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest)));
2229 +- resend_at += jiffies + rto_j;
2230 ++ resend_at += jiffies + rxrpc_get_rto_backoff(call->peer, retrans);
2231 + WRITE_ONCE(call->resend_at, resend_at);
2232 +
2233 + if (unacked)
2234 +diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
2235 +index f8b632a5c6197..a4a6f8ee07201 100644
2236 +--- a/net/rxrpc/output.c
2237 ++++ b/net/rxrpc/output.c
2238 +@@ -426,7 +426,7 @@ done:
2239 + if (call->peer->rtt_count > 1) {
2240 + unsigned long nowj = jiffies, ack_lost_at;
2241 +
2242 +- ack_lost_at = rxrpc_get_rto_backoff(call->peer, retrans);
2243 ++ ack_lost_at = rxrpc_get_rto_backoff(call->peer, false);
2244 + ack_lost_at += nowj;
2245 + WRITE_ONCE(call->ack_lost_at, ack_lost_at);
2246 + rxrpc_reduce_call_timer(call, ack_lost_at, nowj,
2247 +diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
2248 +index 37792675ed571..3b825942e2f67 100644
2249 +--- a/net/sunrpc/rpc_pipe.c
2250 ++++ b/net/sunrpc/rpc_pipe.c
2251 +@@ -599,9 +599,9 @@ static int __rpc_rmdir(struct inode *dir, struct dentry *dentry)
2252 +
2253 + dget(dentry);
2254 + ret = simple_rmdir(dir, dentry);
2255 ++ d_drop(dentry);
2256 + if (!ret)
2257 + fsnotify_rmdir(dir, dentry);
2258 +- d_delete(dentry);
2259 + dput(dentry);
2260 + return ret;
2261 + }
2262 +@@ -612,9 +612,9 @@ static int __rpc_unlink(struct inode *dir, struct dentry *dentry)
2263 +
2264 + dget(dentry);
2265 + ret = simple_unlink(dir, dentry);
2266 ++ d_drop(dentry);
2267 + if (!ret)
2268 + fsnotify_unlink(dir, dentry);
2269 +- d_delete(dentry);
2270 + dput(dentry);
2271 + return ret;
2272 + }