Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.10 commit in: /
Date: Wed, 05 Jan 2022 12:53:49
Message-Id: 1641387215.0efaded8af6a285602c5eafa7a14f16b15c8e93b.mpagano@gentoo
1 commit: 0efaded8af6a285602c5eafa7a14f16b15c8e93b
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Jan 5 12:53:35 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Jan 5 12:53:35 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0efaded8
7
8 Linux patch 5.10.90
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1089_linux-5.10.90.patch | 2487 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2491 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index aa52e9d4..46422e5d 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -399,6 +399,10 @@ Patch: 1088_linux-5.10.89.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.10.89
23
24 +Patch: 1089_linux-5.10.90.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.10.90
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1089_linux-5.10.90.patch b/1089_linux-5.10.90.patch
33 new file mode 100644
34 index 00000000..30c61e72
35 --- /dev/null
36 +++ b/1089_linux-5.10.90.patch
37 @@ -0,0 +1,2487 @@
38 +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
39 +index ccaa72562538e..d00618967854d 100644
40 +--- a/Documentation/admin-guide/kernel-parameters.txt
41 ++++ b/Documentation/admin-guide/kernel-parameters.txt
42 +@@ -1617,6 +1617,8 @@
43 + architectures force reset to be always executed
44 + i8042.unlock [HW] Unlock (ignore) the keylock
45 + i8042.kbdreset [HW] Reset device connected to KBD port
46 ++ i8042.probe_defer
47 ++ [HW] Allow deferred probing upon i8042 probe errors
48 +
49 + i810= [HW,DRM]
50 +
51 +diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
52 +index d4b32cc32bb79..7d5e8a67c775f 100644
53 +--- a/Documentation/admin-guide/sysctl/kernel.rst
54 ++++ b/Documentation/admin-guide/sysctl/kernel.rst
55 +@@ -1457,11 +1457,22 @@ unprivileged_bpf_disabled
56 + =========================
57 +
58 + Writing 1 to this entry will disable unprivileged calls to ``bpf()``;
59 +-once disabled, calling ``bpf()`` without ``CAP_SYS_ADMIN`` will return
60 +-``-EPERM``.
61 ++once disabled, calling ``bpf()`` without ``CAP_SYS_ADMIN`` or ``CAP_BPF``
62 ++will return ``-EPERM``. Once set to 1, this can't be cleared from the
63 ++running kernel anymore.
64 +
65 +-Once set, this can't be cleared.
66 ++Writing 2 to this entry will also disable unprivileged calls to ``bpf()``,
67 ++however, an admin can still change this setting later on, if needed, by
68 ++writing 0 or 1 to this entry.
69 +
70 ++If ``BPF_UNPRIV_DEFAULT_OFF`` is enabled in the kernel config, then this
71 ++entry will default to 2 instead of 0.
72 ++
73 ++= =============================================================
74 ++0 Unprivileged calls to ``bpf()`` are enabled
75 ++1 Unprivileged calls to ``bpf()`` are disabled without recovery
76 ++2 Unprivileged calls to ``bpf()`` are disabled
77 ++= =============================================================
78 +
79 + watchdog
80 + ========
81 +diff --git a/Makefile b/Makefile
82 +index 1500ea340424d..556241a10821f 100644
83 +--- a/Makefile
84 ++++ b/Makefile
85 +@@ -1,7 +1,7 @@
86 + # SPDX-License-Identifier: GPL-2.0
87 + VERSION = 5
88 + PATCHLEVEL = 10
89 +-SUBLEVEL = 89
90 ++SUBLEVEL = 90
91 + EXTRAVERSION =
92 + NAME = Dare mighty things
93 +
94 +diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
95 +index a52c7abf2ca49..43f56335759a4 100644
96 +--- a/arch/parisc/kernel/traps.c
97 ++++ b/arch/parisc/kernel/traps.c
98 +@@ -729,6 +729,8 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
99 + }
100 + mmap_read_unlock(current->mm);
101 + }
102 ++ /* CPU could not fetch instruction, so clear stale IIR value. */
103 ++ regs->iir = 0xbaadf00d;
104 + fallthrough;
105 + case 27:
106 + /* Data memory protection ID trap */
107 +diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
108 +index 7caf74ad24053..95ca4f934d283 100644
109 +--- a/drivers/android/binder_alloc.c
110 ++++ b/drivers/android/binder_alloc.c
111 +@@ -662,7 +662,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
112 + BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
113 +
114 + if (buffer->async_transaction) {
115 +- alloc->free_async_space += size + sizeof(struct binder_buffer);
116 ++ alloc->free_async_space += buffer_size + sizeof(struct binder_buffer);
117 +
118 + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
119 + "%d: binder_free_buf size %zd async free %zd\n",
120 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
121 +index bfb95143ba5e8..ec6bfa316daa3 100644
122 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
123 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
124 +@@ -372,10 +372,15 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
125 + return -EINVAL;
126 + }
127 +
128 ++union gc_info {
129 ++ struct gc_info_v1_0 v1;
130 ++ struct gc_info_v2_0 v2;
131 ++};
132 ++
133 + int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
134 + {
135 + struct binary_header *bhdr;
136 +- struct gc_info_v1_0 *gc_info;
137 ++ union gc_info *gc_info;
138 +
139 + if (!adev->mman.discovery_bin) {
140 + DRM_ERROR("ip discovery uninitialized\n");
141 +@@ -383,27 +388,54 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
142 + }
143 +
144 + bhdr = (struct binary_header *)adev->mman.discovery_bin;
145 +- gc_info = (struct gc_info_v1_0 *)(adev->mman.discovery_bin +
146 ++ gc_info = (union gc_info *)(adev->mman.discovery_bin +
147 + le16_to_cpu(bhdr->table_list[GC].offset));
148 +-
149 +- adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->gc_num_se);
150 +- adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->gc_num_wgp0_per_sa) +
151 +- le32_to_cpu(gc_info->gc_num_wgp1_per_sa));
152 +- adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->gc_num_sa_per_se);
153 +- adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->gc_num_rb_per_se);
154 +- adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->gc_num_gl2c);
155 +- adev->gfx.config.max_gprs = le32_to_cpu(gc_info->gc_num_gprs);
156 +- adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->gc_num_max_gs_thds);
157 +- adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->gc_gs_table_depth);
158 +- adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->gc_gsprim_buff_depth);
159 +- adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->gc_double_offchip_lds_buffer);
160 +- adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->gc_wave_size);
161 +- adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->gc_max_waves_per_simd);
162 +- adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->gc_max_scratch_slots_per_cu);
163 +- adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->gc_lds_size);
164 +- adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->gc_num_sc_per_se) /
165 +- le32_to_cpu(gc_info->gc_num_sa_per_se);
166 +- adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->gc_num_packer_per_sc);
167 +-
168 ++ switch (gc_info->v1.header.version_major) {
169 ++ case 1:
170 ++ adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
171 ++ adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
172 ++ le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
173 ++ adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
174 ++ adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
175 ++ adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
176 ++ adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
177 ++ adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
178 ++ adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
179 ++ adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
180 ++ adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
181 ++ adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
182 ++ adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
183 ++ adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
184 ++ adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
185 ++ adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
186 ++ le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
187 ++ adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
188 ++ break;
189 ++ case 2:
190 ++ adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
191 ++ adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
192 ++ adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
193 ++ adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
194 ++ adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
195 ++ adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
196 ++ adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
197 ++ adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
198 ++ adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
199 ++ adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
200 ++ adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
201 ++ adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
202 ++ adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
203 ++ adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
204 ++ adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
205 ++ le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
206 ++ adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
207 ++ break;
208 ++ default:
209 ++ dev_err(adev->dev,
210 ++ "Unhandled GC info table %d.%d\n",
211 ++ gc_info->v1.header.version_major,
212 ++ gc_info->v1.header.version_minor);
213 ++ return -EINVAL;
214 ++ }
215 + return 0;
216 + }
217 +diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
218 +index e8737fa438f06..7115f6dbb1372 100644
219 +--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
220 ++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
221 +@@ -254,6 +254,13 @@ static int vcn_v1_0_suspend(void *handle)
222 + {
223 + int r;
224 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
225 ++ bool idle_work_unexecuted;
226 ++
227 ++ idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.idle_work);
228 ++ if (idle_work_unexecuted) {
229 ++ if (adev->pm.dpm_enabled)
230 ++ amdgpu_dpm_enable_uvd(adev, false);
231 ++ }
232 +
233 + r = vcn_v1_0_hw_fini(adev);
234 + if (r)
235 +diff --git a/drivers/gpu/drm/amd/include/discovery.h b/drivers/gpu/drm/amd/include/discovery.h
236 +index 7ec4331e67f26..a486769b66c6a 100644
237 +--- a/drivers/gpu/drm/amd/include/discovery.h
238 ++++ b/drivers/gpu/drm/amd/include/discovery.h
239 +@@ -143,6 +143,55 @@ struct gc_info_v1_0 {
240 + uint32_t gc_num_gl2a;
241 + };
242 +
243 ++struct gc_info_v1_1 {
244 ++ struct gpu_info_header header;
245 ++
246 ++ uint32_t gc_num_se;
247 ++ uint32_t gc_num_wgp0_per_sa;
248 ++ uint32_t gc_num_wgp1_per_sa;
249 ++ uint32_t gc_num_rb_per_se;
250 ++ uint32_t gc_num_gl2c;
251 ++ uint32_t gc_num_gprs;
252 ++ uint32_t gc_num_max_gs_thds;
253 ++ uint32_t gc_gs_table_depth;
254 ++ uint32_t gc_gsprim_buff_depth;
255 ++ uint32_t gc_parameter_cache_depth;
256 ++ uint32_t gc_double_offchip_lds_buffer;
257 ++ uint32_t gc_wave_size;
258 ++ uint32_t gc_max_waves_per_simd;
259 ++ uint32_t gc_max_scratch_slots_per_cu;
260 ++ uint32_t gc_lds_size;
261 ++ uint32_t gc_num_sc_per_se;
262 ++ uint32_t gc_num_sa_per_se;
263 ++ uint32_t gc_num_packer_per_sc;
264 ++ uint32_t gc_num_gl2a;
265 ++ uint32_t gc_num_tcp_per_sa;
266 ++ uint32_t gc_num_sdp_interface;
267 ++ uint32_t gc_num_tcps;
268 ++};
269 ++
270 ++struct gc_info_v2_0 {
271 ++ struct gpu_info_header header;
272 ++
273 ++ uint32_t gc_num_se;
274 ++ uint32_t gc_num_cu_per_sh;
275 ++ uint32_t gc_num_sh_per_se;
276 ++ uint32_t gc_num_rb_per_se;
277 ++ uint32_t gc_num_tccs;
278 ++ uint32_t gc_num_gprs;
279 ++ uint32_t gc_num_max_gs_thds;
280 ++ uint32_t gc_gs_table_depth;
281 ++ uint32_t gc_gsprim_buff_depth;
282 ++ uint32_t gc_parameter_cache_depth;
283 ++ uint32_t gc_double_offchip_lds_buffer;
284 ++ uint32_t gc_wave_size;
285 ++ uint32_t gc_max_waves_per_simd;
286 ++ uint32_t gc_max_scratch_slots_per_cu;
287 ++ uint32_t gc_lds_size;
288 ++ uint32_t gc_num_sc_per_se;
289 ++ uint32_t gc_num_packer_per_sc;
290 ++};
291 ++
292 + typedef struct harvest_info_header {
293 + uint32_t signature; /* Table Signature */
294 + uint32_t version; /* Table Version */
295 +diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
296 +index f358120d59b38..dafad891998ec 100644
297 +--- a/drivers/i2c/i2c-dev.c
298 ++++ b/drivers/i2c/i2c-dev.c
299 +@@ -536,6 +536,9 @@ static long compat_i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned lo
300 + sizeof(rdwr_arg)))
301 + return -EFAULT;
302 +
303 ++ if (!rdwr_arg.msgs || rdwr_arg.nmsgs == 0)
304 ++ return -EINVAL;
305 ++
306 + if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
307 + return -EINVAL;
308 +
309 +diff --git a/drivers/input/joystick/spaceball.c b/drivers/input/joystick/spaceball.c
310 +index 429411c6c0a8e..a85a4f33aea8c 100644
311 +--- a/drivers/input/joystick/spaceball.c
312 ++++ b/drivers/input/joystick/spaceball.c
313 +@@ -19,6 +19,7 @@
314 + #include <linux/module.h>
315 + #include <linux/input.h>
316 + #include <linux/serio.h>
317 ++#include <asm/unaligned.h>
318 +
319 + #define DRIVER_DESC "SpaceTec SpaceBall 2003/3003/4000 FLX driver"
320 +
321 +@@ -75,9 +76,15 @@ static void spaceball_process_packet(struct spaceball* spaceball)
322 +
323 + case 'D': /* Ball data */
324 + if (spaceball->idx != 15) return;
325 +- for (i = 0; i < 6; i++)
326 ++ /*
327 ++ * Skip first three bytes; read six axes worth of data.
328 ++ * Axis values are signed 16-bit big-endian.
329 ++ */
330 ++ data += 3;
331 ++ for (i = 0; i < ARRAY_SIZE(spaceball_axes); i++) {
332 + input_report_abs(dev, spaceball_axes[i],
333 +- (__s16)((data[2 * i + 3] << 8) | data[2 * i + 2]));
334 ++ (__s16)get_unaligned_be16(&data[i * 2]));
335 ++ }
336 + break;
337 +
338 + case 'K': /* Button data */
339 +diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c
340 +index bfa26651c0be7..627048bc6a12e 100644
341 +--- a/drivers/input/mouse/appletouch.c
342 ++++ b/drivers/input/mouse/appletouch.c
343 +@@ -916,6 +916,8 @@ static int atp_probe(struct usb_interface *iface,
344 + set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
345 + set_bit(BTN_LEFT, input_dev->keybit);
346 +
347 ++ INIT_WORK(&dev->work, atp_reinit);
348 ++
349 + error = input_register_device(dev->input);
350 + if (error)
351 + goto err_free_buffer;
352 +@@ -923,8 +925,6 @@ static int atp_probe(struct usb_interface *iface,
353 + /* save our data pointer in this interface device */
354 + usb_set_intfdata(iface, dev);
355 +
356 +- INIT_WORK(&dev->work, atp_reinit);
357 +-
358 + return 0;
359 +
360 + err_free_buffer:
361 +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
362 +index aedd055410443..148a7c5fd0e22 100644
363 +--- a/drivers/input/serio/i8042-x86ia64io.h
364 ++++ b/drivers/input/serio/i8042-x86ia64io.h
365 +@@ -995,6 +995,24 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
366 + { }
367 + };
368 +
369 ++static const struct dmi_system_id i8042_dmi_probe_defer_table[] __initconst = {
370 ++ {
371 ++ /* ASUS ZenBook UX425UA */
372 ++ .matches = {
373 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
374 ++ DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425UA"),
375 ++ },
376 ++ },
377 ++ {
378 ++ /* ASUS ZenBook UM325UA */
379 ++ .matches = {
380 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
381 ++ DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
382 ++ },
383 ++ },
384 ++ { }
385 ++};
386 ++
387 + #endif /* CONFIG_X86 */
388 +
389 + #ifdef CONFIG_PNP
390 +@@ -1315,6 +1333,9 @@ static int __init i8042_platform_init(void)
391 + if (dmi_check_system(i8042_dmi_kbdreset_table))
392 + i8042_kbdreset = true;
393 +
394 ++ if (dmi_check_system(i8042_dmi_probe_defer_table))
395 ++ i8042_probe_defer = true;
396 ++
397 + /*
398 + * A20 was already enabled during early kernel init. But some buggy
399 + * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
400 +diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
401 +index abae23af0791e..a9f68f535b727 100644
402 +--- a/drivers/input/serio/i8042.c
403 ++++ b/drivers/input/serio/i8042.c
404 +@@ -45,6 +45,10 @@ static bool i8042_unlock;
405 + module_param_named(unlock, i8042_unlock, bool, 0);
406 + MODULE_PARM_DESC(unlock, "Ignore keyboard lock.");
407 +
408 ++static bool i8042_probe_defer;
409 ++module_param_named(probe_defer, i8042_probe_defer, bool, 0);
410 ++MODULE_PARM_DESC(probe_defer, "Allow deferred probing.");
411 ++
412 + enum i8042_controller_reset_mode {
413 + I8042_RESET_NEVER,
414 + I8042_RESET_ALWAYS,
415 +@@ -711,7 +715,7 @@ static int i8042_set_mux_mode(bool multiplex, unsigned char *mux_version)
416 + * LCS/Telegraphics.
417 + */
418 +
419 +-static int __init i8042_check_mux(void)
420 ++static int i8042_check_mux(void)
421 + {
422 + unsigned char mux_version;
423 +
424 +@@ -740,10 +744,10 @@ static int __init i8042_check_mux(void)
425 + /*
426 + * The following is used to test AUX IRQ delivery.
427 + */
428 +-static struct completion i8042_aux_irq_delivered __initdata;
429 +-static bool i8042_irq_being_tested __initdata;
430 ++static struct completion i8042_aux_irq_delivered;
431 ++static bool i8042_irq_being_tested;
432 +
433 +-static irqreturn_t __init i8042_aux_test_irq(int irq, void *dev_id)
434 ++static irqreturn_t i8042_aux_test_irq(int irq, void *dev_id)
435 + {
436 + unsigned long flags;
437 + unsigned char str, data;
438 +@@ -770,7 +774,7 @@ static irqreturn_t __init i8042_aux_test_irq(int irq, void *dev_id)
439 + * verifies success by readinng CTR. Used when testing for presence of AUX
440 + * port.
441 + */
442 +-static int __init i8042_toggle_aux(bool on)
443 ++static int i8042_toggle_aux(bool on)
444 + {
445 + unsigned char param;
446 + int i;
447 +@@ -798,7 +802,7 @@ static int __init i8042_toggle_aux(bool on)
448 + * the presence of an AUX interface.
449 + */
450 +
451 +-static int __init i8042_check_aux(void)
452 ++static int i8042_check_aux(void)
453 + {
454 + int retval = -1;
455 + bool irq_registered = false;
456 +@@ -1005,7 +1009,7 @@ static int i8042_controller_init(void)
457 +
458 + if (i8042_command(&ctr[n++ % 2], I8042_CMD_CTL_RCTR)) {
459 + pr_err("Can't read CTR while initializing i8042\n");
460 +- return -EIO;
461 ++ return i8042_probe_defer ? -EPROBE_DEFER : -EIO;
462 + }
463 +
464 + } while (n < 2 || ctr[0] != ctr[1]);
465 +@@ -1320,7 +1324,7 @@ static void i8042_shutdown(struct platform_device *dev)
466 + i8042_controller_reset(false);
467 + }
468 +
469 +-static int __init i8042_create_kbd_port(void)
470 ++static int i8042_create_kbd_port(void)
471 + {
472 + struct serio *serio;
473 + struct i8042_port *port = &i8042_ports[I8042_KBD_PORT_NO];
474 +@@ -1349,7 +1353,7 @@ static int __init i8042_create_kbd_port(void)
475 + return 0;
476 + }
477 +
478 +-static int __init i8042_create_aux_port(int idx)
479 ++static int i8042_create_aux_port(int idx)
480 + {
481 + struct serio *serio;
482 + int port_no = idx < 0 ? I8042_AUX_PORT_NO : I8042_MUX_PORT_NO + idx;
483 +@@ -1386,13 +1390,13 @@ static int __init i8042_create_aux_port(int idx)
484 + return 0;
485 + }
486 +
487 +-static void __init i8042_free_kbd_port(void)
488 ++static void i8042_free_kbd_port(void)
489 + {
490 + kfree(i8042_ports[I8042_KBD_PORT_NO].serio);
491 + i8042_ports[I8042_KBD_PORT_NO].serio = NULL;
492 + }
493 +
494 +-static void __init i8042_free_aux_ports(void)
495 ++static void i8042_free_aux_ports(void)
496 + {
497 + int i;
498 +
499 +@@ -1402,7 +1406,7 @@ static void __init i8042_free_aux_ports(void)
500 + }
501 + }
502 +
503 +-static void __init i8042_register_ports(void)
504 ++static void i8042_register_ports(void)
505 + {
506 + int i;
507 +
508 +@@ -1443,7 +1447,7 @@ static void i8042_free_irqs(void)
509 + i8042_aux_irq_registered = i8042_kbd_irq_registered = false;
510 + }
511 +
512 +-static int __init i8042_setup_aux(void)
513 ++static int i8042_setup_aux(void)
514 + {
515 + int (*aux_enable)(void);
516 + int error;
517 +@@ -1485,7 +1489,7 @@ static int __init i8042_setup_aux(void)
518 + return error;
519 + }
520 +
521 +-static int __init i8042_setup_kbd(void)
522 ++static int i8042_setup_kbd(void)
523 + {
524 + int error;
525 +
526 +@@ -1535,7 +1539,7 @@ static int i8042_kbd_bind_notifier(struct notifier_block *nb,
527 + return 0;
528 + }
529 +
530 +-static int __init i8042_probe(struct platform_device *dev)
531 ++static int i8042_probe(struct platform_device *dev)
532 + {
533 + int error;
534 +
535 +@@ -1600,6 +1604,7 @@ static struct platform_driver i8042_driver = {
536 + .pm = &i8042_pm_ops,
537 + #endif
538 + },
539 ++ .probe = i8042_probe,
540 + .remove = i8042_remove,
541 + .shutdown = i8042_shutdown,
542 + };
543 +@@ -1610,7 +1615,6 @@ static struct notifier_block i8042_kbd_bind_notifier_block = {
544 +
545 + static int __init i8042_init(void)
546 + {
547 +- struct platform_device *pdev;
548 + int err;
549 +
550 + dbg_init();
551 +@@ -1626,17 +1630,29 @@ static int __init i8042_init(void)
552 + /* Set this before creating the dev to allow i8042_command to work right away */
553 + i8042_present = true;
554 +
555 +- pdev = platform_create_bundle(&i8042_driver, i8042_probe, NULL, 0, NULL, 0);
556 +- if (IS_ERR(pdev)) {
557 +- err = PTR_ERR(pdev);
558 ++ err = platform_driver_register(&i8042_driver);
559 ++ if (err)
560 + goto err_platform_exit;
561 ++
562 ++ i8042_platform_device = platform_device_alloc("i8042", -1);
563 ++ if (!i8042_platform_device) {
564 ++ err = -ENOMEM;
565 ++ goto err_unregister_driver;
566 + }
567 +
568 ++ err = platform_device_add(i8042_platform_device);
569 ++ if (err)
570 ++ goto err_free_device;
571 ++
572 + bus_register_notifier(&serio_bus, &i8042_kbd_bind_notifier_block);
573 + panic_blink = i8042_panic_blink;
574 +
575 + return 0;
576 +
577 ++err_free_device:
578 ++ platform_device_put(i8042_platform_device);
579 ++err_unregister_driver:
580 ++ platform_driver_unregister(&i8042_driver);
581 + err_platform_exit:
582 + i8042_platform_exit();
583 + return err;
584 +diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
585 +index a60ce90305819..c26c9b0c00d8f 100644
586 +--- a/drivers/net/ethernet/atheros/ag71xx.c
587 ++++ b/drivers/net/ethernet/atheros/ag71xx.c
588 +@@ -1904,15 +1904,12 @@ static int ag71xx_probe(struct platform_device *pdev)
589 + ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac");
590 + if (IS_ERR(ag->mac_reset)) {
591 + netif_err(ag, probe, ndev, "missing mac reset\n");
592 +- err = PTR_ERR(ag->mac_reset);
593 +- goto err_free;
594 ++ return PTR_ERR(ag->mac_reset);
595 + }
596 +
597 + ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
598 +- if (!ag->mac_base) {
599 +- err = -ENOMEM;
600 +- goto err_free;
601 +- }
602 ++ if (!ag->mac_base)
603 ++ return -ENOMEM;
604 +
605 + ndev->irq = platform_get_irq(pdev, 0);
606 + err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt,
607 +@@ -1920,7 +1917,7 @@ static int ag71xx_probe(struct platform_device *pdev)
608 + if (err) {
609 + netif_err(ag, probe, ndev, "unable to request IRQ %d\n",
610 + ndev->irq);
611 +- goto err_free;
612 ++ return err;
613 + }
614 +
615 + ndev->netdev_ops = &ag71xx_netdev_ops;
616 +@@ -1948,10 +1945,8 @@ static int ag71xx_probe(struct platform_device *pdev)
617 + ag->stop_desc = dmam_alloc_coherent(&pdev->dev,
618 + sizeof(struct ag71xx_desc),
619 + &ag->stop_desc_dma, GFP_KERNEL);
620 +- if (!ag->stop_desc) {
621 +- err = -ENOMEM;
622 +- goto err_free;
623 +- }
624 ++ if (!ag->stop_desc)
625 ++ return -ENOMEM;
626 +
627 + ag->stop_desc->data = 0;
628 + ag->stop_desc->ctrl = 0;
629 +@@ -1968,7 +1963,7 @@ static int ag71xx_probe(struct platform_device *pdev)
630 + err = of_get_phy_mode(np, &ag->phy_if_mode);
631 + if (err) {
632 + netif_err(ag, probe, ndev, "missing phy-mode property in DT\n");
633 +- goto err_free;
634 ++ return err;
635 + }
636 +
637 + netif_napi_add(ndev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT);
638 +@@ -1976,7 +1971,7 @@ static int ag71xx_probe(struct platform_device *pdev)
639 + err = clk_prepare_enable(ag->clk_eth);
640 + if (err) {
641 + netif_err(ag, probe, ndev, "Failed to enable eth clk.\n");
642 +- goto err_free;
643 ++ return err;
644 + }
645 +
646 + ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0);
647 +@@ -2012,8 +2007,6 @@ err_mdio_remove:
648 + ag71xx_mdio_remove(ag);
649 + err_put_clk:
650 + clk_disable_unprepare(ag->clk_eth);
651 +-err_free:
652 +- free_netdev(ndev);
653 + return err;
654 + }
655 +
656 +diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
657 +index d9baac0dbc7d0..4c9d05c45c033 100644
658 +--- a/drivers/net/ethernet/freescale/fman/fman_port.c
659 ++++ b/drivers/net/ethernet/freescale/fman/fman_port.c
660 +@@ -1805,7 +1805,7 @@ static int fman_port_probe(struct platform_device *of_dev)
661 + fman = dev_get_drvdata(&fm_pdev->dev);
662 + if (!fman) {
663 + err = -EINVAL;
664 +- goto return_err;
665 ++ goto put_device;
666 + }
667 +
668 + err = of_property_read_u32(port_node, "cell-index", &val);
669 +@@ -1813,7 +1813,7 @@ static int fman_port_probe(struct platform_device *of_dev)
670 + dev_err(port->dev, "%s: reading cell-index for %pOF failed\n",
671 + __func__, port_node);
672 + err = -EINVAL;
673 +- goto return_err;
674 ++ goto put_device;
675 + }
676 + port_id = (u8)val;
677 + port->dts_params.id = port_id;
678 +@@ -1847,7 +1847,7 @@ static int fman_port_probe(struct platform_device *of_dev)
679 + } else {
680 + dev_err(port->dev, "%s: Illegal port type\n", __func__);
681 + err = -EINVAL;
682 +- goto return_err;
683 ++ goto put_device;
684 + }
685 +
686 + port->dts_params.type = port_type;
687 +@@ -1861,7 +1861,7 @@ static int fman_port_probe(struct platform_device *of_dev)
688 + dev_err(port->dev, "%s: incorrect qman-channel-id\n",
689 + __func__);
690 + err = -EINVAL;
691 +- goto return_err;
692 ++ goto put_device;
693 + }
694 + port->dts_params.qman_channel_id = qman_channel_id;
695 + }
696 +@@ -1871,7 +1871,7 @@ static int fman_port_probe(struct platform_device *of_dev)
697 + dev_err(port->dev, "%s: of_address_to_resource() failed\n",
698 + __func__);
699 + err = -ENOMEM;
700 +- goto return_err;
701 ++ goto put_device;
702 + }
703 +
704 + port->dts_params.fman = fman;
705 +@@ -1896,6 +1896,8 @@ static int fman_port_probe(struct platform_device *of_dev)
706 +
707 + return 0;
708 +
709 ++put_device:
710 ++ put_device(&fm_pdev->dev);
711 + return_err:
712 + of_node_put(port_node);
713 + free_port:
714 +diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
715 +index cae090a072524..61cebb7df6bcb 100644
716 +--- a/drivers/net/ethernet/intel/igc/igc_main.c
717 ++++ b/drivers/net/ethernet/intel/igc/igc_main.c
718 +@@ -4422,6 +4422,9 @@ static irqreturn_t igc_intr_msi(int irq, void *data)
719 + mod_timer(&adapter->watchdog_timer, jiffies + 1);
720 + }
721 +
722 ++ if (icr & IGC_ICR_TS)
723 ++ igc_tsync_interrupt(adapter);
724 ++
725 + napi_schedule(&q_vector->napi);
726 +
727 + return IRQ_HANDLED;
728 +@@ -4465,6 +4468,9 @@ static irqreturn_t igc_intr(int irq, void *data)
729 + mod_timer(&adapter->watchdog_timer, jiffies + 1);
730 + }
731 +
732 ++ if (icr & IGC_ICR_TS)
733 ++ igc_tsync_interrupt(adapter);
734 ++
735 + napi_schedule(&q_vector->napi);
736 +
737 + return IRQ_HANDLED;
738 +diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
739 +index 072075bc60ee9..500511b72ac60 100644
740 +--- a/drivers/net/ethernet/lantiq_xrx200.c
741 ++++ b/drivers/net/ethernet/lantiq_xrx200.c
742 +@@ -209,7 +209,7 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
743 + skb->protocol = eth_type_trans(skb, net_dev);
744 + netif_receive_skb(skb);
745 + net_dev->stats.rx_packets++;
746 +- net_dev->stats.rx_bytes += len - ETH_FCS_LEN;
747 ++ net_dev->stats.rx_bytes += len;
748 +
749 + return 0;
750 + }
751 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
752 +index 9da34f82d4668..73060b30fece3 100644
753 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
754 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
755 +@@ -916,9 +916,6 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
756 + void mlx5e_close_rq(struct mlx5e_rq *rq);
757 +
758 + struct mlx5e_sq_param;
759 +-int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
760 +- struct mlx5e_sq_param *param, struct mlx5e_icosq *sq);
761 +-void mlx5e_close_icosq(struct mlx5e_icosq *sq);
762 + int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
763 + struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
764 + struct mlx5e_xdpsq *sq, bool is_redirect);
765 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
766 +index 8be6eaa3eeb14..13dd34c571b9f 100644
767 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
768 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
769 +@@ -335,6 +335,14 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms
770 + return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
771 + }
772 +
773 ++static int mlx5e_tx_reporter_timeout_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
774 ++ void *ctx)
775 ++{
776 ++ struct mlx5e_tx_timeout_ctx *to_ctx = ctx;
777 ++
778 ++ return mlx5e_tx_reporter_dump_sq(priv, fmsg, to_ctx->sq);
779 ++}
780 ++
781 + static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
782 + struct devlink_fmsg *fmsg)
783 + {
784 +@@ -418,7 +426,7 @@ int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
785 + to_ctx.sq = sq;
786 + err_ctx.ctx = &to_ctx;
787 + err_ctx.recover = mlx5e_tx_reporter_timeout_recover;
788 +- err_ctx.dump = mlx5e_tx_reporter_dump_sq;
789 ++ err_ctx.dump = mlx5e_tx_reporter_timeout_dump;
790 + snprintf(err_str, sizeof(err_str),
791 + "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u",
792 + sq->channel->ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
793 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
794 +index 6ec4b96497ffb..2f6c3a5813ed1 100644
795 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
796 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
797 +@@ -1051,9 +1051,20 @@ static void mlx5e_icosq_err_cqe_work(struct work_struct *recover_work)
798 + mlx5e_reporter_icosq_cqe_err(sq);
799 + }
800 +
801 ++static void mlx5e_async_icosq_err_cqe_work(struct work_struct *recover_work)
802 ++{
803 ++ struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq,
804 ++ recover_work);
805 ++
806 ++ /* Not implemented yet. */
807 ++
808 ++ netdev_warn(sq->channel->netdev, "async_icosq recovery is not implemented\n");
809 ++}
810 ++
811 + static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
812 + struct mlx5e_sq_param *param,
813 +- struct mlx5e_icosq *sq)
814 ++ struct mlx5e_icosq *sq,
815 ++ work_func_t recover_work_func)
816 + {
817 + void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
818 + struct mlx5_core_dev *mdev = c->mdev;
819 +@@ -1073,7 +1084,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
820 + if (err)
821 + goto err_sq_wq_destroy;
822 +
823 +- INIT_WORK(&sq->recover_work, mlx5e_icosq_err_cqe_work);
824 ++ INIT_WORK(&sq->recover_work, recover_work_func);
825 +
826 + return 0;
827 +
828 +@@ -1423,13 +1434,14 @@ static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
829 + mlx5e_reporter_tx_err_cqe(sq);
830 + }
831 +
832 +-int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
833 +- struct mlx5e_sq_param *param, struct mlx5e_icosq *sq)
834 ++static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
835 ++ struct mlx5e_sq_param *param, struct mlx5e_icosq *sq,
836 ++ work_func_t recover_work_func)
837 + {
838 + struct mlx5e_create_sq_param csp = {};
839 + int err;
840 +
841 +- err = mlx5e_alloc_icosq(c, param, sq);
842 ++ err = mlx5e_alloc_icosq(c, param, sq, recover_work_func);
843 + if (err)
844 + return err;
845 +
846 +@@ -1459,7 +1471,7 @@ void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq)
847 + synchronize_net(); /* Sync with NAPI. */
848 + }
849 +
850 +-void mlx5e_close_icosq(struct mlx5e_icosq *sq)
851 ++static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
852 + {
853 + struct mlx5e_channel *c = sq->channel;
854 +
855 +@@ -1862,11 +1874,13 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
856 +
857 + spin_lock_init(&c->async_icosq_lock);
858 +
859 +- err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq);
860 ++ err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq,
861 ++ mlx5e_async_icosq_err_cqe_work);
862 + if (err)
863 + goto err_disable_napi;
864 +
865 +- err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
866 ++ err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq,
867 ++ mlx5e_icosq_err_cqe_work);
868 + if (err)
869 + goto err_close_async_icosq;
870 +
871 +@@ -3921,12 +3935,11 @@ static int set_feature_arfs(struct net_device *netdev, bool enable)
872 +
873 + static int mlx5e_handle_feature(struct net_device *netdev,
874 + netdev_features_t *features,
875 +- netdev_features_t wanted_features,
876 + netdev_features_t feature,
877 + mlx5e_feature_handler feature_handler)
878 + {
879 +- netdev_features_t changes = wanted_features ^ netdev->features;
880 +- bool enable = !!(wanted_features & feature);
881 ++ netdev_features_t changes = *features ^ netdev->features;
882 ++ bool enable = !!(*features & feature);
883 + int err;
884 +
885 + if (!(changes & feature))
886 +@@ -3934,22 +3947,22 @@ static int mlx5e_handle_feature(struct net_device *netdev,
887 +
888 + err = feature_handler(netdev, enable);
889 + if (err) {
890 ++ MLX5E_SET_FEATURE(features, feature, !enable);
891 + netdev_err(netdev, "%s feature %pNF failed, err %d\n",
892 + enable ? "Enable" : "Disable", &feature, err);
893 + return err;
894 + }
895 +
896 +- MLX5E_SET_FEATURE(features, feature, enable);
897 + return 0;
898 + }
899 +
900 + int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
901 + {
902 +- netdev_features_t oper_features = netdev->features;
903 ++ netdev_features_t oper_features = features;
904 + int err = 0;
905 +
906 + #define MLX5E_HANDLE_FEATURE(feature, handler) \
907 +- mlx5e_handle_feature(netdev, &oper_features, features, feature, handler)
908 ++ mlx5e_handle_feature(netdev, &oper_features, feature, handler)
909 +
910 + err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
911 + err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
912 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
913 +index 00d861361428f..16a7c7ec5e138 100644
914 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
915 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
916 +@@ -2,6 +2,7 @@
917 + /* Copyright (c) 2019 Mellanox Technologies. */
918 +
919 + #include <linux/mlx5/eswitch.h>
920 ++#include <linux/err.h>
921 + #include "dr_types.h"
922 +
923 + #define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
924 +@@ -69,9 +70,9 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
925 + }
926 +
927 + dmn->uar = mlx5_get_uars_page(dmn->mdev);
928 +- if (!dmn->uar) {
929 ++ if (IS_ERR(dmn->uar)) {
930 + mlx5dr_err(dmn, "Couldn't allocate UAR\n");
931 +- ret = -ENOMEM;
932 ++ ret = PTR_ERR(dmn->uar);
933 + goto clean_pd;
934 + }
935 +
936 +diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
937 +index 1b44155fa24b2..e95c09dc2c30d 100644
938 +--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
939 ++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
940 +@@ -2836,7 +2836,7 @@ int ionic_lif_init(struct ionic_lif *lif)
941 + return -EINVAL;
942 + }
943 +
944 +- lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL);
945 ++ lif->dbid_inuse = bitmap_zalloc(lif->dbid_count, GFP_KERNEL);
946 + if (!lif->dbid_inuse) {
947 + dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
948 + return -ENOMEM;
949 +diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
950 +index 2a748a924f838..138279bbb544b 100644
951 +--- a/drivers/net/usb/pegasus.c
952 ++++ b/drivers/net/usb/pegasus.c
953 +@@ -518,11 +518,11 @@ static void read_bulk_callback(struct urb *urb)
954 + goto goon;
955 +
956 + rx_status = buf[count - 2];
957 +- if (rx_status & 0x1e) {
958 ++ if (rx_status & 0x1c) {
959 + netif_dbg(pegasus, rx_err, net,
960 + "RX packet error %x\n", rx_status);
961 + net->stats.rx_errors++;
962 +- if (rx_status & 0x06) /* long or runt */
963 ++ if (rx_status & 0x04) /* runt */
964 + net->stats.rx_length_errors++;
965 + if (rx_status & 0x08)
966 + net->stats.rx_crc_errors++;
967 +diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
968 +index 23ed11f91213d..6ea59426ab0bf 100644
969 +--- a/drivers/nfc/st21nfca/i2c.c
970 ++++ b/drivers/nfc/st21nfca/i2c.c
971 +@@ -533,7 +533,8 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
972 + phy->gpiod_ena = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
973 + if (IS_ERR(phy->gpiod_ena)) {
974 + nfc_err(dev, "Unable to get ENABLE GPIO\n");
975 +- return PTR_ERR(phy->gpiod_ena);
976 ++ r = PTR_ERR(phy->gpiod_ena);
977 ++ goto out_free;
978 + }
979 +
980 + phy->se_status.is_ese_present =
981 +@@ -544,7 +545,7 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
982 + r = st21nfca_hci_platform_init(phy);
983 + if (r < 0) {
984 + nfc_err(&client->dev, "Unable to reboot st21nfca\n");
985 +- return r;
986 ++ goto out_free;
987 + }
988 +
989 + r = devm_request_threaded_irq(&client->dev, client->irq, NULL,
990 +@@ -553,15 +554,23 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
991 + ST21NFCA_HCI_DRIVER_NAME, phy);
992 + if (r < 0) {
993 + nfc_err(&client->dev, "Unable to register IRQ handler\n");
994 +- return r;
995 ++ goto out_free;
996 + }
997 +
998 +- return st21nfca_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME,
999 +- ST21NFCA_FRAME_HEADROOM,
1000 +- ST21NFCA_FRAME_TAILROOM,
1001 +- ST21NFCA_HCI_LLC_MAX_PAYLOAD,
1002 +- &phy->hdev,
1003 +- &phy->se_status);
1004 ++ r = st21nfca_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME,
1005 ++ ST21NFCA_FRAME_HEADROOM,
1006 ++ ST21NFCA_FRAME_TAILROOM,
1007 ++ ST21NFCA_HCI_LLC_MAX_PAYLOAD,
1008 ++ &phy->hdev,
1009 ++ &phy->se_status);
1010 ++ if (r)
1011 ++ goto out_free;
1012 ++
1013 ++ return 0;
1014 ++
1015 ++out_free:
1016 ++ kfree_skb(phy->pending_skb);
1017 ++ return r;
1018 + }
1019 +
1020 + static int st21nfca_hci_i2c_remove(struct i2c_client *client)
1021 +@@ -574,6 +583,8 @@ static int st21nfca_hci_i2c_remove(struct i2c_client *client)
1022 +
1023 + if (phy->powered)
1024 + st21nfca_hci_i2c_disable(phy);
1025 ++ if (phy->pending_skb)
1026 ++ kfree_skb(phy->pending_skb);
1027 +
1028 + return 0;
1029 + }
1030 +diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
1031 +index 9aae45a452002..57553f9b4d1dc 100644
1032 +--- a/drivers/platform/x86/apple-gmux.c
1033 ++++ b/drivers/platform/x86/apple-gmux.c
1034 +@@ -625,7 +625,7 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
1035 + }
1036 +
1037 + gmux_data->iostart = res->start;
1038 +- gmux_data->iolen = res->end - res->start;
1039 ++ gmux_data->iolen = resource_size(res);
1040 +
1041 + if (gmux_data->iolen < GMUX_MIN_IO_LEN) {
1042 + pr_err("gmux I/O region too small (%lu < %u)\n",
1043 +diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
1044 +index b89c5513243e8..beaf3a8d206f8 100644
1045 +--- a/drivers/scsi/lpfc/lpfc_debugfs.c
1046 ++++ b/drivers/scsi/lpfc/lpfc_debugfs.c
1047 +@@ -2956,8 +2956,8 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf,
1048 + char mybuf[64];
1049 + char *pbuf;
1050 +
1051 +- if (nbytes > 64)
1052 +- nbytes = 64;
1053 ++ if (nbytes > 63)
1054 ++ nbytes = 63;
1055 +
1056 + memset(mybuf, 0, sizeof(mybuf));
1057 +
1058 +diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
1059 +index 1421b1394d816..7d51ff4672d75 100644
1060 +--- a/drivers/scsi/vmw_pvscsi.c
1061 ++++ b/drivers/scsi/vmw_pvscsi.c
1062 +@@ -591,9 +591,12 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
1063 + * Commands like INQUIRY may transfer less data than
1064 + * requested by the initiator via bufflen. Set residual
1065 + * count to make upper layer aware of the actual amount
1066 +- * of data returned.
1067 ++ * of data returned. There are cases when controller
1068 ++ * returns zero dataLen with non zero data - do not set
1069 ++ * residual count in that case.
1070 + */
1071 +- scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
1072 ++ if (e->dataLen && (e->dataLen < scsi_bufflen(cmd)))
1073 ++ scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
1074 + cmd->result = (DID_OK << 16);
1075 + break;
1076 +
1077 +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
1078 +index 725e35167837e..cbb7947f366f9 100644
1079 +--- a/drivers/usb/gadget/function/f_fs.c
1080 ++++ b/drivers/usb/gadget/function/f_fs.c
1081 +@@ -1772,11 +1772,15 @@ static void ffs_data_clear(struct ffs_data *ffs)
1082 +
1083 + BUG_ON(ffs->gadget);
1084 +
1085 +- if (ffs->epfiles)
1086 ++ if (ffs->epfiles) {
1087 + ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
1088 ++ ffs->epfiles = NULL;
1089 ++ }
1090 +
1091 +- if (ffs->ffs_eventfd)
1092 ++ if (ffs->ffs_eventfd) {
1093 + eventfd_ctx_put(ffs->ffs_eventfd);
1094 ++ ffs->ffs_eventfd = NULL;
1095 ++ }
1096 +
1097 + kfree(ffs->raw_descs_data);
1098 + kfree(ffs->raw_strings);
1099 +@@ -1789,7 +1793,6 @@ static void ffs_data_reset(struct ffs_data *ffs)
1100 +
1101 + ffs_data_clear(ffs);
1102 +
1103 +- ffs->epfiles = NULL;
1104 + ffs->raw_descs_data = NULL;
1105 + ffs->raw_descs = NULL;
1106 + ffs->raw_strings = NULL;
1107 +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
1108 +index c9133df71e52b..dafb58f05c9fb 100644
1109 +--- a/drivers/usb/host/xhci-pci.c
1110 ++++ b/drivers/usb/host/xhci-pci.c
1111 +@@ -122,7 +122,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
1112 + /* Look for vendor-specific quirks */
1113 + if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
1114 + (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK ||
1115 +- pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1100 ||
1116 + pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1400)) {
1117 + if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
1118 + pdev->revision == 0x0) {
1119 +@@ -157,6 +156,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
1120 + pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009)
1121 + xhci->quirks |= XHCI_BROKEN_STREAMS;
1122 +
1123 ++ if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
1124 ++ pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1100)
1125 ++ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
1126 ++
1127 + if (pdev->vendor == PCI_VENDOR_ID_NEC)
1128 + xhci->quirks |= XHCI_NEC_HOST;
1129 +
1130 +diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c
1131 +index 0b3aa7c65857a..a3e1105c5c662 100644
1132 +--- a/drivers/usb/mtu3/mtu3_gadget.c
1133 ++++ b/drivers/usb/mtu3/mtu3_gadget.c
1134 +@@ -92,6 +92,13 @@ static int mtu3_ep_enable(struct mtu3_ep *mep)
1135 + interval = clamp_val(interval, 1, 16) - 1;
1136 + mult = usb_endpoint_maxp_mult(desc) - 1;
1137 + }
1138 ++ break;
1139 ++ case USB_SPEED_FULL:
1140 ++ if (usb_endpoint_xfer_isoc(desc))
1141 ++ interval = clamp_val(desc->bInterval, 1, 16);
1142 ++ else if (usb_endpoint_xfer_int(desc))
1143 ++ interval = clamp_val(desc->bInterval, 1, 255);
1144 ++
1145 + break;
1146 + default:
1147 + break; /*others are ignored */
1148 +@@ -235,6 +242,7 @@ struct usb_request *mtu3_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1149 + mreq->request.dma = DMA_ADDR_INVALID;
1150 + mreq->epnum = mep->epnum;
1151 + mreq->mep = mep;
1152 ++ INIT_LIST_HEAD(&mreq->list);
1153 + trace_mtu3_alloc_request(mreq);
1154 +
1155 + return &mreq->request;
1156 +diff --git a/drivers/usb/mtu3/mtu3_qmu.c b/drivers/usb/mtu3/mtu3_qmu.c
1157 +index 3f414f91b5899..2ea3157ddb6e2 100644
1158 +--- a/drivers/usb/mtu3/mtu3_qmu.c
1159 ++++ b/drivers/usb/mtu3/mtu3_qmu.c
1160 +@@ -273,6 +273,8 @@ static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
1161 + gpd->dw3_info |= cpu_to_le32(GPD_EXT_FLAG_ZLP);
1162 + }
1163 +
1164 ++ /* prevent reorder, make sure GPD's HWO is set last */
1165 ++ mb();
1166 + gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
1167 +
1168 + mreq->gpd = gpd;
1169 +@@ -306,6 +308,8 @@ static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
1170 + gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
1171 + ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma));
1172 + gpd->dw3_info = cpu_to_le32(ext_addr);
1173 ++ /* prevent reorder, make sure GPD's HWO is set last */
1174 ++ mb();
1175 + gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
1176 +
1177 + mreq->gpd = gpd;
1178 +@@ -445,7 +449,8 @@ static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum)
1179 + return;
1180 + }
1181 + mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY);
1182 +-
1183 ++ /* prevent reorder, make sure GPD's HWO is set last */
1184 ++ mb();
1185 + /* by pass the current GDP */
1186 + gpd_current->dw0_info |= cpu_to_le32(GPD_FLAGS_BPS | GPD_FLAGS_HWO);
1187 +
1188 +diff --git a/include/linux/memblock.h b/include/linux/memblock.h
1189 +index 1a8d25f2e0412..3baea2ef33fbb 100644
1190 +--- a/include/linux/memblock.h
1191 ++++ b/include/linux/memblock.h
1192 +@@ -387,8 +387,8 @@ phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
1193 + phys_addr_t end, int nid, bool exact_nid);
1194 + phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
1195 +
1196 +-static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
1197 +- phys_addr_t align)
1198 ++static __always_inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
1199 ++ phys_addr_t align)
1200 + {
1201 + return memblock_phys_alloc_range(size, align, 0,
1202 + MEMBLOCK_ALLOC_ACCESSIBLE);
1203 +diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
1204 +index 4fc747b778eb6..33475d061823e 100644
1205 +--- a/include/net/sctp/sctp.h
1206 ++++ b/include/net/sctp/sctp.h
1207 +@@ -103,6 +103,7 @@ extern struct percpu_counter sctp_sockets_allocated;
1208 + int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
1209 + struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
1210 +
1211 ++typedef int (*sctp_callback_t)(struct sctp_endpoint *, struct sctp_transport *, void *);
1212 + void sctp_transport_walk_start(struct rhashtable_iter *iter);
1213 + void sctp_transport_walk_stop(struct rhashtable_iter *iter);
1214 + struct sctp_transport *sctp_transport_get_next(struct net *net,
1215 +@@ -113,9 +114,8 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
1216 + struct net *net,
1217 + const union sctp_addr *laddr,
1218 + const union sctp_addr *paddr, void *p);
1219 +-int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
1220 +- int (*cb_done)(struct sctp_transport *, void *),
1221 +- struct net *net, int *pos, void *p);
1222 ++int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done,
1223 ++ struct net *net, int *pos, void *p);
1224 + int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), void *p);
1225 + int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
1226 + struct sctp_info *info);
1227 +diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
1228 +index 51d698f2656fc..be9ff0422c162 100644
1229 +--- a/include/net/sctp/structs.h
1230 ++++ b/include/net/sctp/structs.h
1231 +@@ -1339,6 +1339,7 @@ struct sctp_endpoint {
1232 +
1233 + u32 secid;
1234 + u32 peer_secid;
1235 ++ struct rcu_head rcu;
1236 + };
1237 +
1238 + /* Recover the outter endpoint structure. */
1239 +@@ -1354,7 +1355,7 @@ static inline struct sctp_endpoint *sctp_ep(struct sctp_ep_common *base)
1240 + struct sctp_endpoint *sctp_endpoint_new(struct sock *, gfp_t);
1241 + void sctp_endpoint_free(struct sctp_endpoint *);
1242 + void sctp_endpoint_put(struct sctp_endpoint *);
1243 +-void sctp_endpoint_hold(struct sctp_endpoint *);
1244 ++int sctp_endpoint_hold(struct sctp_endpoint *ep);
1245 + void sctp_endpoint_add_asoc(struct sctp_endpoint *, struct sctp_association *);
1246 + struct sctp_association *sctp_endpoint_lookup_assoc(
1247 + const struct sctp_endpoint *ep,
1248 +diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h
1249 +index f6e3c8c9c7449..4fa4e979e948a 100644
1250 +--- a/include/uapi/linux/nfc.h
1251 ++++ b/include/uapi/linux/nfc.h
1252 +@@ -263,7 +263,7 @@ enum nfc_sdp_attr {
1253 + #define NFC_SE_ENABLED 0x1
1254 +
1255 + struct sockaddr_nfc {
1256 +- sa_family_t sa_family;
1257 ++ __kernel_sa_family_t sa_family;
1258 + __u32 dev_idx;
1259 + __u32 target_idx;
1260 + __u32 nfc_protocol;
1261 +@@ -271,14 +271,14 @@ struct sockaddr_nfc {
1262 +
1263 + #define NFC_LLCP_MAX_SERVICE_NAME 63
1264 + struct sockaddr_nfc_llcp {
1265 +- sa_family_t sa_family;
1266 ++ __kernel_sa_family_t sa_family;
1267 + __u32 dev_idx;
1268 + __u32 target_idx;
1269 + __u32 nfc_protocol;
1270 + __u8 dsap; /* Destination SAP, if known */
1271 + __u8 ssap; /* Source SAP to be bound to */
1272 + char service_name[NFC_LLCP_MAX_SERVICE_NAME]; /* Service name URI */;
1273 +- size_t service_name_len;
1274 ++ __kernel_size_t service_name_len;
1275 + };
1276 +
1277 + /* NFC socket protocols */
1278 +diff --git a/init/Kconfig b/init/Kconfig
1279 +index fc4c9f416fadb..13685bffef370 100644
1280 +--- a/init/Kconfig
1281 ++++ b/init/Kconfig
1282 +@@ -1722,6 +1722,16 @@ config BPF_JIT_DEFAULT_ON
1283 + def_bool ARCH_WANT_DEFAULT_BPF_JIT || BPF_JIT_ALWAYS_ON
1284 + depends on HAVE_EBPF_JIT && BPF_JIT
1285 +
1286 ++config BPF_UNPRIV_DEFAULT_OFF
1287 ++ bool "Disable unprivileged BPF by default"
1288 ++ depends on BPF_SYSCALL
1289 ++ help
1290 ++ Disables unprivileged BPF by default by setting the corresponding
1291 ++ /proc/sys/kernel/unprivileged_bpf_disabled knob to 2. An admin can
1292 ++ still reenable it by setting it to 0 later on, or permanently
1293 ++ disable it by setting it to 1 (from which no other transition to
1294 ++ 0 is possible anymore).
1295 ++
1296 + source "kernel/bpf/preload/Kconfig"
1297 +
1298 + config USERFAULTFD
1299 +diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
1300 +index bb9a9cb1f321e..209e6567cdab0 100644
1301 +--- a/kernel/bpf/syscall.c
1302 ++++ b/kernel/bpf/syscall.c
1303 +@@ -50,7 +50,8 @@ static DEFINE_SPINLOCK(map_idr_lock);
1304 + static DEFINE_IDR(link_idr);
1305 + static DEFINE_SPINLOCK(link_idr_lock);
1306 +
1307 +-int sysctl_unprivileged_bpf_disabled __read_mostly;
1308 ++int sysctl_unprivileged_bpf_disabled __read_mostly =
1309 ++ IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0;
1310 +
1311 + static const struct bpf_map_ops * const bpf_map_types[] = {
1312 + #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
1313 +diff --git a/kernel/sysctl.c b/kernel/sysctl.c
1314 +index b9306d2bb4269..72ceb19574d0c 100644
1315 +--- a/kernel/sysctl.c
1316 ++++ b/kernel/sysctl.c
1317 +@@ -233,7 +233,27 @@ static int bpf_stats_handler(struct ctl_table *table, int write,
1318 + mutex_unlock(&bpf_stats_enabled_mutex);
1319 + return ret;
1320 + }
1321 +-#endif
1322 ++
1323 ++static int bpf_unpriv_handler(struct ctl_table *table, int write,
1324 ++ void *buffer, size_t *lenp, loff_t *ppos)
1325 ++{
1326 ++ int ret, unpriv_enable = *(int *)table->data;
1327 ++ bool locked_state = unpriv_enable == 1;
1328 ++ struct ctl_table tmp = *table;
1329 ++
1330 ++ if (write && !capable(CAP_SYS_ADMIN))
1331 ++ return -EPERM;
1332 ++
1333 ++ tmp.data = &unpriv_enable;
1334 ++ ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
1335 ++ if (write && !ret) {
1336 ++ if (locked_state && unpriv_enable != 1)
1337 ++ return -EPERM;
1338 ++ *(int *)table->data = unpriv_enable;
1339 ++ }
1340 ++ return ret;
1341 ++}
1342 ++#endif /* CONFIG_BPF_SYSCALL && CONFIG_SYSCTL */
1343 +
1344 + /*
1345 + * /proc/sys support
1346 +@@ -2626,10 +2646,9 @@ static struct ctl_table kern_table[] = {
1347 + .data = &sysctl_unprivileged_bpf_disabled,
1348 + .maxlen = sizeof(sysctl_unprivileged_bpf_disabled),
1349 + .mode = 0644,
1350 +- /* only handle a transition from default "0" to "1" */
1351 +- .proc_handler = proc_dointvec_minmax,
1352 +- .extra1 = SYSCTL_ONE,
1353 +- .extra2 = SYSCTL_ONE,
1354 ++ .proc_handler = bpf_unpriv_handler,
1355 ++ .extra1 = SYSCTL_ZERO,
1356 ++ .extra2 = &two,
1357 + },
1358 + {
1359 + .procname = "bpf_stats_enabled",
1360 +diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
1361 +index 8267349afe231..e2f85a16fad9b 100644
1362 +--- a/net/ipv4/af_inet.c
1363 ++++ b/net/ipv4/af_inet.c
1364 +@@ -2003,6 +2003,10 @@ static int __init inet_init(void)
1365 +
1366 + ip_init();
1367 +
1368 ++ /* Initialise per-cpu ipv4 mibs */
1369 ++ if (init_ipv4_mibs())
1370 ++ panic("%s: Cannot init ipv4 mibs\n", __func__);
1371 ++
1372 + /* Setup TCP slab cache for open requests. */
1373 + tcp_init();
1374 +
1375 +@@ -2033,12 +2037,6 @@ static int __init inet_init(void)
1376 +
1377 + if (init_inet_pernet_ops())
1378 + pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__);
1379 +- /*
1380 +- * Initialise per-cpu ipv4 mibs
1381 +- */
1382 +-
1383 +- if (init_ipv4_mibs())
1384 +- pr_crit("%s: Cannot init ipv4 mibs\n", __func__);
1385 +
1386 + ipv4_proc_init();
1387 +
1388 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
1389 +index 8a1863146f34c..069551a04369e 100644
1390 +--- a/net/ipv6/udp.c
1391 ++++ b/net/ipv6/udp.c
1392 +@@ -1189,7 +1189,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1393 + kfree_skb(skb);
1394 + return -EINVAL;
1395 + }
1396 +- if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
1397 ++ if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
1398 + kfree_skb(skb);
1399 + return -EINVAL;
1400 + }
1401 +diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c
1402 +index bb5f1650f11cb..c189b4c8a1823 100644
1403 +--- a/net/ncsi/ncsi-netlink.c
1404 ++++ b/net/ncsi/ncsi-netlink.c
1405 +@@ -112,7 +112,11 @@ static int ncsi_write_package_info(struct sk_buff *skb,
1406 + pnest = nla_nest_start_noflag(skb, NCSI_PKG_ATTR);
1407 + if (!pnest)
1408 + return -ENOMEM;
1409 +- nla_put_u32(skb, NCSI_PKG_ATTR_ID, np->id);
1410 ++ rc = nla_put_u32(skb, NCSI_PKG_ATTR_ID, np->id);
1411 ++ if (rc) {
1412 ++ nla_nest_cancel(skb, pnest);
1413 ++ return rc;
1414 ++ }
1415 + if ((0x1 << np->id) == ndp->package_whitelist)
1416 + nla_put_flag(skb, NCSI_PKG_ATTR_FORCED);
1417 + cnest = nla_nest_start_noflag(skb, NCSI_PKG_ATTR_CHANNEL_LIST);
1418 +diff --git a/net/sctp/diag.c b/net/sctp/diag.c
1419 +index 493fc01e5d2b7..babadd6720a2b 100644
1420 +--- a/net/sctp/diag.c
1421 ++++ b/net/sctp/diag.c
1422 +@@ -292,9 +292,8 @@ out:
1423 + return err;
1424 + }
1425 +
1426 +-static int sctp_sock_dump(struct sctp_transport *tsp, void *p)
1427 ++static int sctp_sock_dump(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
1428 + {
1429 +- struct sctp_endpoint *ep = tsp->asoc->ep;
1430 + struct sctp_comm_param *commp = p;
1431 + struct sock *sk = ep->base.sk;
1432 + struct sk_buff *skb = commp->skb;
1433 +@@ -304,6 +303,8 @@ static int sctp_sock_dump(struct sctp_transport *tsp, void *p)
1434 + int err = 0;
1435 +
1436 + lock_sock(sk);
1437 ++ if (ep != tsp->asoc->ep)
1438 ++ goto release;
1439 + list_for_each_entry(assoc, &ep->asocs, asocs) {
1440 + if (cb->args[4] < cb->args[1])
1441 + goto next;
1442 +@@ -346,9 +347,8 @@ release:
1443 + return err;
1444 + }
1445 +
1446 +-static int sctp_sock_filter(struct sctp_transport *tsp, void *p)
1447 ++static int sctp_sock_filter(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
1448 + {
1449 +- struct sctp_endpoint *ep = tsp->asoc->ep;
1450 + struct sctp_comm_param *commp = p;
1451 + struct sock *sk = ep->base.sk;
1452 + const struct inet_diag_req_v2 *r = commp->r;
1453 +@@ -507,8 +507,8 @@ skip:
1454 + if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE)))
1455 + goto done;
1456 +
1457 +- sctp_for_each_transport(sctp_sock_filter, sctp_sock_dump,
1458 +- net, &pos, &commp);
1459 ++ sctp_transport_traverse_process(sctp_sock_filter, sctp_sock_dump,
1460 ++ net, &pos, &commp);
1461 + cb->args[2] = pos;
1462 +
1463 + done:
1464 +diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
1465 +index 48c9c2c7602f7..efffde7f2328e 100644
1466 +--- a/net/sctp/endpointola.c
1467 ++++ b/net/sctp/endpointola.c
1468 +@@ -184,6 +184,18 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
1469 + }
1470 +
1471 + /* Final destructor for endpoint. */
1472 ++static void sctp_endpoint_destroy_rcu(struct rcu_head *head)
1473 ++{
1474 ++ struct sctp_endpoint *ep = container_of(head, struct sctp_endpoint, rcu);
1475 ++ struct sock *sk = ep->base.sk;
1476 ++
1477 ++ sctp_sk(sk)->ep = NULL;
1478 ++ sock_put(sk);
1479 ++
1480 ++ kfree(ep);
1481 ++ SCTP_DBG_OBJCNT_DEC(ep);
1482 ++}
1483 ++
1484 + static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
1485 + {
1486 + struct sock *sk;
1487 +@@ -213,18 +225,13 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
1488 + if (sctp_sk(sk)->bind_hash)
1489 + sctp_put_port(sk);
1490 +
1491 +- sctp_sk(sk)->ep = NULL;
1492 +- /* Give up our hold on the sock */
1493 +- sock_put(sk);
1494 +-
1495 +- kfree(ep);
1496 +- SCTP_DBG_OBJCNT_DEC(ep);
1497 ++ call_rcu(&ep->rcu, sctp_endpoint_destroy_rcu);
1498 + }
1499 +
1500 + /* Hold a reference to an endpoint. */
1501 +-void sctp_endpoint_hold(struct sctp_endpoint *ep)
1502 ++int sctp_endpoint_hold(struct sctp_endpoint *ep)
1503 + {
1504 +- refcount_inc(&ep->base.refcnt);
1505 ++ return refcount_inc_not_zero(&ep->base.refcnt);
1506 + }
1507 +
1508 + /* Release a reference to an endpoint and clean up if there are
1509 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
1510 +index e872bc50bbe61..0a9e2c7d8e5f5 100644
1511 +--- a/net/sctp/socket.c
1512 ++++ b/net/sctp/socket.c
1513 +@@ -5223,11 +5223,12 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
1514 + }
1515 + EXPORT_SYMBOL_GPL(sctp_transport_lookup_process);
1516 +
1517 +-int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
1518 +- int (*cb_done)(struct sctp_transport *, void *),
1519 +- struct net *net, int *pos, void *p) {
1520 ++int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done,
1521 ++ struct net *net, int *pos, void *p)
1522 ++{
1523 + struct rhashtable_iter hti;
1524 + struct sctp_transport *tsp;
1525 ++ struct sctp_endpoint *ep;
1526 + int ret;
1527 +
1528 + again:
1529 +@@ -5236,26 +5237,32 @@ again:
1530 +
1531 + tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
1532 + for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
1533 +- ret = cb(tsp, p);
1534 +- if (ret)
1535 +- break;
1536 ++ ep = tsp->asoc->ep;
1537 ++ if (sctp_endpoint_hold(ep)) { /* asoc can be peeled off */
1538 ++ ret = cb(ep, tsp, p);
1539 ++ if (ret)
1540 ++ break;
1541 ++ sctp_endpoint_put(ep);
1542 ++ }
1543 + (*pos)++;
1544 + sctp_transport_put(tsp);
1545 + }
1546 + sctp_transport_walk_stop(&hti);
1547 +
1548 + if (ret) {
1549 +- if (cb_done && !cb_done(tsp, p)) {
1550 ++ if (cb_done && !cb_done(ep, tsp, p)) {
1551 + (*pos)++;
1552 ++ sctp_endpoint_put(ep);
1553 + sctp_transport_put(tsp);
1554 + goto again;
1555 + }
1556 ++ sctp_endpoint_put(ep);
1557 + sctp_transport_put(tsp);
1558 + }
1559 +
1560 + return ret;
1561 + }
1562 +-EXPORT_SYMBOL_GPL(sctp_for_each_transport);
1563 ++EXPORT_SYMBOL_GPL(sctp_transport_traverse_process);
1564 +
1565 + /* 7.2.1 Association Status (SCTP_STATUS)
1566 +
1567 +diff --git a/net/smc/smc.h b/net/smc/smc.h
1568 +index d65e15f0c944c..e6919fe31617b 100644
1569 +--- a/net/smc/smc.h
1570 ++++ b/net/smc/smc.h
1571 +@@ -170,6 +170,11 @@ struct smc_connection {
1572 + u16 tx_cdc_seq; /* sequence # for CDC send */
1573 + u16 tx_cdc_seq_fin; /* sequence # - tx completed */
1574 + spinlock_t send_lock; /* protect wr_sends */
1575 ++ atomic_t cdc_pend_tx_wr; /* number of pending tx CDC wqe
1576 ++ * - inc when post wqe,
1577 ++ * - dec on polled tx cqe
1578 ++ */
1579 ++ wait_queue_head_t cdc_pend_tx_wq; /* wakeup on no cdc_pend_tx_wr*/
1580 + struct delayed_work tx_work; /* retry of smc_cdc_msg_send */
1581 + u32 tx_off; /* base offset in peer rmb */
1582 +
1583 +diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
1584 +index b1ce6ccbfaec8..0c490cdde6a49 100644
1585 +--- a/net/smc/smc_cdc.c
1586 ++++ b/net/smc/smc_cdc.c
1587 +@@ -31,10 +31,6 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
1588 + struct smc_sock *smc;
1589 + int diff;
1590 +
1591 +- if (!conn)
1592 +- /* already dismissed */
1593 +- return;
1594 +-
1595 + smc = container_of(conn, struct smc_sock, conn);
1596 + bh_lock_sock(&smc->sk);
1597 + if (!wc_status) {
1598 +@@ -51,6 +47,12 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
1599 + conn);
1600 + conn->tx_cdc_seq_fin = cdcpend->ctrl_seq;
1601 + }
1602 ++
1603 ++ if (atomic_dec_and_test(&conn->cdc_pend_tx_wr) &&
1604 ++ unlikely(wq_has_sleeper(&conn->cdc_pend_tx_wq)))
1605 ++ wake_up(&conn->cdc_pend_tx_wq);
1606 ++ WARN_ON(atomic_read(&conn->cdc_pend_tx_wr) < 0);
1607 ++
1608 + smc_tx_sndbuf_nonfull(smc);
1609 + bh_unlock_sock(&smc->sk);
1610 + }
1611 +@@ -107,6 +109,10 @@ int smc_cdc_msg_send(struct smc_connection *conn,
1612 + conn->tx_cdc_seq++;
1613 + conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
1614 + smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, conn, &cfed);
1615 ++
1616 ++ atomic_inc(&conn->cdc_pend_tx_wr);
1617 ++ smp_mb__after_atomic(); /* Make sure cdc_pend_tx_wr added before post */
1618 ++
1619 + rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
1620 + if (!rc) {
1621 + smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn);
1622 +@@ -114,6 +120,7 @@ int smc_cdc_msg_send(struct smc_connection *conn,
1623 + } else {
1624 + conn->tx_cdc_seq--;
1625 + conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
1626 ++ atomic_dec(&conn->cdc_pend_tx_wr);
1627 + }
1628 +
1629 + return rc;
1630 +@@ -136,7 +143,18 @@ int smcr_cdc_msg_send_validation(struct smc_connection *conn,
1631 + peer->token = htonl(local->token);
1632 + peer->prod_flags.failover_validation = 1;
1633 +
1634 ++ /* We need to set pend->conn here to make sure smc_cdc_tx_handler()
1635 ++ * can handle properly
1636 ++ */
1637 ++ smc_cdc_add_pending_send(conn, pend);
1638 ++
1639 ++ atomic_inc(&conn->cdc_pend_tx_wr);
1640 ++ smp_mb__after_atomic(); /* Make sure cdc_pend_tx_wr added before post */
1641 ++
1642 + rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
1643 ++ if (unlikely(rc))
1644 ++ atomic_dec(&conn->cdc_pend_tx_wr);
1645 ++
1646 + return rc;
1647 + }
1648 +
1649 +@@ -150,9 +168,11 @@ static int smcr_cdc_get_slot_and_msg_send(struct smc_connection *conn)
1650 +
1651 + again:
1652 + link = conn->lnk;
1653 ++ if (!smc_wr_tx_link_hold(link))
1654 ++ return -ENOLINK;
1655 + rc = smc_cdc_get_free_slot(conn, link, &wr_buf, NULL, &pend);
1656 + if (rc)
1657 +- return rc;
1658 ++ goto put_out;
1659 +
1660 + spin_lock_bh(&conn->send_lock);
1661 + if (link != conn->lnk) {
1662 +@@ -160,6 +180,7 @@ again:
1663 + spin_unlock_bh(&conn->send_lock);
1664 + smc_wr_tx_put_slot(link,
1665 + (struct smc_wr_tx_pend_priv *)pend);
1666 ++ smc_wr_tx_link_put(link);
1667 + if (again)
1668 + return -ENOLINK;
1669 + again = true;
1670 +@@ -167,6 +188,8 @@ again:
1671 + }
1672 + rc = smc_cdc_msg_send(conn, wr_buf, pend);
1673 + spin_unlock_bh(&conn->send_lock);
1674 ++put_out:
1675 ++ smc_wr_tx_link_put(link);
1676 + return rc;
1677 + }
1678 +
1679 +@@ -188,31 +211,9 @@ int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
1680 + return rc;
1681 + }
1682 +
1683 +-static bool smc_cdc_tx_filter(struct smc_wr_tx_pend_priv *tx_pend,
1684 +- unsigned long data)
1685 ++void smc_cdc_wait_pend_tx_wr(struct smc_connection *conn)
1686 + {
1687 +- struct smc_connection *conn = (struct smc_connection *)data;
1688 +- struct smc_cdc_tx_pend *cdc_pend =
1689 +- (struct smc_cdc_tx_pend *)tx_pend;
1690 +-
1691 +- return cdc_pend->conn == conn;
1692 +-}
1693 +-
1694 +-static void smc_cdc_tx_dismisser(struct smc_wr_tx_pend_priv *tx_pend)
1695 +-{
1696 +- struct smc_cdc_tx_pend *cdc_pend =
1697 +- (struct smc_cdc_tx_pend *)tx_pend;
1698 +-
1699 +- cdc_pend->conn = NULL;
1700 +-}
1701 +-
1702 +-void smc_cdc_tx_dismiss_slots(struct smc_connection *conn)
1703 +-{
1704 +- struct smc_link *link = conn->lnk;
1705 +-
1706 +- smc_wr_tx_dismiss_slots(link, SMC_CDC_MSG_TYPE,
1707 +- smc_cdc_tx_filter, smc_cdc_tx_dismisser,
1708 +- (unsigned long)conn);
1709 ++ wait_event(conn->cdc_pend_tx_wq, !atomic_read(&conn->cdc_pend_tx_wr));
1710 + }
1711 +
1712 + /* Send a SMC-D CDC header.
1713 +diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h
1714 +index 0a0a89abd38b2..696cc11f2303b 100644
1715 +--- a/net/smc/smc_cdc.h
1716 ++++ b/net/smc/smc_cdc.h
1717 +@@ -291,7 +291,7 @@ int smc_cdc_get_free_slot(struct smc_connection *conn,
1718 + struct smc_wr_buf **wr_buf,
1719 + struct smc_rdma_wr **wr_rdma_buf,
1720 + struct smc_cdc_tx_pend **pend);
1721 +-void smc_cdc_tx_dismiss_slots(struct smc_connection *conn);
1722 ++void smc_cdc_wait_pend_tx_wr(struct smc_connection *conn);
1723 + int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf,
1724 + struct smc_cdc_tx_pend *pend);
1725 + int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn);
1726 +diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
1727 +index 3f1343dfa16ba..2a22dc85951ee 100644
1728 +--- a/net/smc/smc_core.c
1729 ++++ b/net/smc/smc_core.c
1730 +@@ -226,7 +226,7 @@ static void smcr_lgr_link_deactivate_all(struct smc_link_group *lgr)
1731 + for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1732 + struct smc_link *lnk = &lgr->lnk[i];
1733 +
1734 +- if (smc_link_usable(lnk))
1735 ++ if (smc_link_sendable(lnk))
1736 + lnk->state = SMC_LNK_INACTIVE;
1737 + }
1738 + wake_up_all(&lgr->llc_msg_waiter);
1739 +@@ -550,7 +550,7 @@ struct smc_link *smc_switch_conns(struct smc_link_group *lgr,
1740 + to_lnk = &lgr->lnk[i];
1741 + break;
1742 + }
1743 +- if (!to_lnk) {
1744 ++ if (!to_lnk || !smc_wr_tx_link_hold(to_lnk)) {
1745 + smc_lgr_terminate_sched(lgr);
1746 + return NULL;
1747 + }
1748 +@@ -582,24 +582,26 @@ again:
1749 + read_unlock_bh(&lgr->conns_lock);
1750 + /* pre-fetch buffer outside of send_lock, might sleep */
1751 + rc = smc_cdc_get_free_slot(conn, to_lnk, &wr_buf, NULL, &pend);
1752 +- if (rc) {
1753 +- smcr_link_down_cond_sched(to_lnk);
1754 +- return NULL;
1755 +- }
1756 ++ if (rc)
1757 ++ goto err_out;
1758 + /* avoid race with smcr_tx_sndbuf_nonempty() */
1759 + spin_lock_bh(&conn->send_lock);
1760 + conn->lnk = to_lnk;
1761 + rc = smc_switch_cursor(smc, pend, wr_buf);
1762 + spin_unlock_bh(&conn->send_lock);
1763 + sock_put(&smc->sk);
1764 +- if (rc) {
1765 +- smcr_link_down_cond_sched(to_lnk);
1766 +- return NULL;
1767 +- }
1768 ++ if (rc)
1769 ++ goto err_out;
1770 + goto again;
1771 + }
1772 + read_unlock_bh(&lgr->conns_lock);
1773 ++ smc_wr_tx_link_put(to_lnk);
1774 + return to_lnk;
1775 ++
1776 ++err_out:
1777 ++ smcr_link_down_cond_sched(to_lnk);
1778 ++ smc_wr_tx_link_put(to_lnk);
1779 ++ return NULL;
1780 + }
1781 +
1782 + static void smcr_buf_unuse(struct smc_buf_desc *rmb_desc,
1783 +@@ -655,7 +657,7 @@ void smc_conn_free(struct smc_connection *conn)
1784 + smc_ism_unset_conn(conn);
1785 + tasklet_kill(&conn->rx_tsklet);
1786 + } else {
1787 +- smc_cdc_tx_dismiss_slots(conn);
1788 ++ smc_cdc_wait_pend_tx_wr(conn);
1789 + if (current_work() != &conn->abort_work)
1790 + cancel_work_sync(&conn->abort_work);
1791 + }
1792 +@@ -732,7 +734,7 @@ void smcr_link_clear(struct smc_link *lnk, bool log)
1793 + smc_llc_link_clear(lnk, log);
1794 + smcr_buf_unmap_lgr(lnk);
1795 + smcr_rtoken_clear_link(lnk);
1796 +- smc_ib_modify_qp_reset(lnk);
1797 ++ smc_ib_modify_qp_error(lnk);
1798 + smc_wr_free_link(lnk);
1799 + smc_ib_destroy_queue_pair(lnk);
1800 + smc_ib_dealloc_protection_domain(lnk);
1801 +@@ -876,7 +878,7 @@ static void smc_conn_kill(struct smc_connection *conn, bool soft)
1802 + else
1803 + tasklet_unlock_wait(&conn->rx_tsklet);
1804 + } else {
1805 +- smc_cdc_tx_dismiss_slots(conn);
1806 ++ smc_cdc_wait_pend_tx_wr(conn);
1807 + }
1808 + smc_lgr_unregister_conn(conn);
1809 + smc_close_active_abort(smc);
1810 +@@ -1000,11 +1002,16 @@ void smc_smcd_terminate_all(struct smcd_dev *smcd)
1811 + /* Called when an SMCR device is removed or the smc module is unloaded.
1812 + * If smcibdev is given, all SMCR link groups using this device are terminated.
1813 + * If smcibdev is NULL, all SMCR link groups are terminated.
1814 ++ *
1815 ++ * We must wait here for QPs been destroyed before we destroy the CQs,
1816 ++ * or we won't received any CQEs and cdc_pend_tx_wr cannot reach 0 thus
1817 ++ * smc_sock cannot be released.
1818 + */
1819 + void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
1820 + {
1821 + struct smc_link_group *lgr, *lg;
1822 + LIST_HEAD(lgr_free_list);
1823 ++ LIST_HEAD(lgr_linkdown_list);
1824 + int i;
1825 +
1826 + spin_lock_bh(&smc_lgr_list.lock);
1827 +@@ -1016,7 +1023,7 @@ void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
1828 + list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) {
1829 + for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1830 + if (lgr->lnk[i].smcibdev == smcibdev)
1831 +- smcr_link_down_cond_sched(&lgr->lnk[i]);
1832 ++ list_move_tail(&lgr->list, &lgr_linkdown_list);
1833 + }
1834 + }
1835 + }
1836 +@@ -1028,6 +1035,16 @@ void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
1837 + __smc_lgr_terminate(lgr, false);
1838 + }
1839 +
1840 ++ list_for_each_entry_safe(lgr, lg, &lgr_linkdown_list, list) {
1841 ++ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1842 ++ if (lgr->lnk[i].smcibdev == smcibdev) {
1843 ++ mutex_lock(&lgr->llc_conf_mutex);
1844 ++ smcr_link_down_cond(&lgr->lnk[i]);
1845 ++ mutex_unlock(&lgr->llc_conf_mutex);
1846 ++ }
1847 ++ }
1848 ++ }
1849 ++
1850 + if (smcibdev) {
1851 + if (atomic_read(&smcibdev->lnk_cnt))
1852 + wait_event(smcibdev->lnks_deleted,
1853 +@@ -1127,7 +1144,6 @@ static void smcr_link_down(struct smc_link *lnk)
1854 + if (!lgr || lnk->state == SMC_LNK_UNUSED || list_empty(&lgr->list))
1855 + return;
1856 +
1857 +- smc_ib_modify_qp_reset(lnk);
1858 + to_lnk = smc_switch_conns(lgr, lnk, true);
1859 + if (!to_lnk) { /* no backup link available */
1860 + smcr_link_clear(lnk, true);
1861 +@@ -1355,6 +1371,7 @@ create:
1862 + conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
1863 + conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
1864 + conn->urg_state = SMC_URG_READ;
1865 ++ init_waitqueue_head(&conn->cdc_pend_tx_wq);
1866 + INIT_WORK(&smc->conn.abort_work, smc_conn_abort_work);
1867 + if (ini->is_smcd) {
1868 + conn->rx_off = sizeof(struct smcd_cdc_msg);
1869 +diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
1870 +index 4745a9a5a28f5..9364d0f35ccec 100644
1871 +--- a/net/smc/smc_core.h
1872 ++++ b/net/smc/smc_core.h
1873 +@@ -359,6 +359,12 @@ static inline bool smc_link_usable(struct smc_link *lnk)
1874 + return true;
1875 + }
1876 +
1877 ++static inline bool smc_link_sendable(struct smc_link *lnk)
1878 ++{
1879 ++ return smc_link_usable(lnk) &&
1880 ++ lnk->qp_attr.cur_qp_state == IB_QPS_RTS;
1881 ++}
1882 ++
1883 + static inline bool smc_link_active(struct smc_link *lnk)
1884 + {
1885 + return lnk->state == SMC_LNK_ACTIVE;
1886 +diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
1887 +index fc766b537ac7a..f1ffbd414602e 100644
1888 +--- a/net/smc/smc_ib.c
1889 ++++ b/net/smc/smc_ib.c
1890 +@@ -100,12 +100,12 @@ int smc_ib_modify_qp_rts(struct smc_link *lnk)
1891 + IB_QP_MAX_QP_RD_ATOMIC);
1892 + }
1893 +
1894 +-int smc_ib_modify_qp_reset(struct smc_link *lnk)
1895 ++int smc_ib_modify_qp_error(struct smc_link *lnk)
1896 + {
1897 + struct ib_qp_attr qp_attr;
1898 +
1899 + memset(&qp_attr, 0, sizeof(qp_attr));
1900 +- qp_attr.qp_state = IB_QPS_RESET;
1901 ++ qp_attr.qp_state = IB_QPS_ERR;
1902 + return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE);
1903 + }
1904 +
1905 +diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h
1906 +index 2ce481187dd0b..f90d15eae2aab 100644
1907 +--- a/net/smc/smc_ib.h
1908 ++++ b/net/smc/smc_ib.h
1909 +@@ -74,6 +74,7 @@ int smc_ib_create_queue_pair(struct smc_link *lnk);
1910 + int smc_ib_ready_link(struct smc_link *lnk);
1911 + int smc_ib_modify_qp_rts(struct smc_link *lnk);
1912 + int smc_ib_modify_qp_reset(struct smc_link *lnk);
1913 ++int smc_ib_modify_qp_error(struct smc_link *lnk);
1914 + long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev);
1915 + int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
1916 + struct smc_buf_desc *buf_slot, u8 link_idx);
1917 +diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
1918 +index d8fe4e1f24d1f..ee1f0fdba0855 100644
1919 +--- a/net/smc/smc_llc.c
1920 ++++ b/net/smc/smc_llc.c
1921 +@@ -383,9 +383,11 @@ int smc_llc_send_confirm_link(struct smc_link *link,
1922 + struct smc_wr_buf *wr_buf;
1923 + int rc;
1924 +
1925 ++ if (!smc_wr_tx_link_hold(link))
1926 ++ return -ENOLINK;
1927 + rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
1928 + if (rc)
1929 +- return rc;
1930 ++ goto put_out;
1931 + confllc = (struct smc_llc_msg_confirm_link *)wr_buf;
1932 + memset(confllc, 0, sizeof(*confllc));
1933 + confllc->hd.common.type = SMC_LLC_CONFIRM_LINK;
1934 +@@ -402,6 +404,8 @@ int smc_llc_send_confirm_link(struct smc_link *link,
1935 + confllc->max_links = SMC_LLC_ADD_LNK_MAX_LINKS;
1936 + /* send llc message */
1937 + rc = smc_wr_tx_send(link, pend);
1938 ++put_out:
1939 ++ smc_wr_tx_link_put(link);
1940 + return rc;
1941 + }
1942 +
1943 +@@ -415,9 +419,11 @@ static int smc_llc_send_confirm_rkey(struct smc_link *send_link,
1944 + struct smc_link *link;
1945 + int i, rc, rtok_ix;
1946 +
1947 ++ if (!smc_wr_tx_link_hold(send_link))
1948 ++ return -ENOLINK;
1949 + rc = smc_llc_add_pending_send(send_link, &wr_buf, &pend);
1950 + if (rc)
1951 +- return rc;
1952 ++ goto put_out;
1953 + rkeyllc = (struct smc_llc_msg_confirm_rkey *)wr_buf;
1954 + memset(rkeyllc, 0, sizeof(*rkeyllc));
1955 + rkeyllc->hd.common.type = SMC_LLC_CONFIRM_RKEY;
1956 +@@ -444,6 +450,8 @@ static int smc_llc_send_confirm_rkey(struct smc_link *send_link,
1957 + (u64)sg_dma_address(rmb_desc->sgt[send_link->link_idx].sgl));
1958 + /* send llc message */
1959 + rc = smc_wr_tx_send(send_link, pend);
1960 ++put_out:
1961 ++ smc_wr_tx_link_put(send_link);
1962 + return rc;
1963 + }
1964 +
1965 +@@ -456,9 +464,11 @@ static int smc_llc_send_delete_rkey(struct smc_link *link,
1966 + struct smc_wr_buf *wr_buf;
1967 + int rc;
1968 +
1969 ++ if (!smc_wr_tx_link_hold(link))
1970 ++ return -ENOLINK;
1971 + rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
1972 + if (rc)
1973 +- return rc;
1974 ++ goto put_out;
1975 + rkeyllc = (struct smc_llc_msg_delete_rkey *)wr_buf;
1976 + memset(rkeyllc, 0, sizeof(*rkeyllc));
1977 + rkeyllc->hd.common.type = SMC_LLC_DELETE_RKEY;
1978 +@@ -467,6 +477,8 @@ static int smc_llc_send_delete_rkey(struct smc_link *link,
1979 + rkeyllc->rkey[0] = htonl(rmb_desc->mr_rx[link->link_idx]->rkey);
1980 + /* send llc message */
1981 + rc = smc_wr_tx_send(link, pend);
1982 ++put_out:
1983 ++ smc_wr_tx_link_put(link);
1984 + return rc;
1985 + }
1986 +
1987 +@@ -480,9 +492,11 @@ int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[],
1988 + struct smc_wr_buf *wr_buf;
1989 + int rc;
1990 +
1991 ++ if (!smc_wr_tx_link_hold(link))
1992 ++ return -ENOLINK;
1993 + rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
1994 + if (rc)
1995 +- return rc;
1996 ++ goto put_out;
1997 + addllc = (struct smc_llc_msg_add_link *)wr_buf;
1998 +
1999 + memset(addllc, 0, sizeof(*addllc));
2000 +@@ -504,6 +518,8 @@ int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[],
2001 + }
2002 + /* send llc message */
2003 + rc = smc_wr_tx_send(link, pend);
2004 ++put_out:
2005 ++ smc_wr_tx_link_put(link);
2006 + return rc;
2007 + }
2008 +
2009 +@@ -517,9 +533,11 @@ int smc_llc_send_delete_link(struct smc_link *link, u8 link_del_id,
2010 + struct smc_wr_buf *wr_buf;
2011 + int rc;
2012 +
2013 ++ if (!smc_wr_tx_link_hold(link))
2014 ++ return -ENOLINK;
2015 + rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
2016 + if (rc)
2017 +- return rc;
2018 ++ goto put_out;
2019 + delllc = (struct smc_llc_msg_del_link *)wr_buf;
2020 +
2021 + memset(delllc, 0, sizeof(*delllc));
2022 +@@ -536,6 +554,8 @@ int smc_llc_send_delete_link(struct smc_link *link, u8 link_del_id,
2023 + delllc->reason = htonl(reason);
2024 + /* send llc message */
2025 + rc = smc_wr_tx_send(link, pend);
2026 ++put_out:
2027 ++ smc_wr_tx_link_put(link);
2028 + return rc;
2029 + }
2030 +
2031 +@@ -547,9 +567,11 @@ static int smc_llc_send_test_link(struct smc_link *link, u8 user_data[16])
2032 + struct smc_wr_buf *wr_buf;
2033 + int rc;
2034 +
2035 ++ if (!smc_wr_tx_link_hold(link))
2036 ++ return -ENOLINK;
2037 + rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
2038 + if (rc)
2039 +- return rc;
2040 ++ goto put_out;
2041 + testllc = (struct smc_llc_msg_test_link *)wr_buf;
2042 + memset(testllc, 0, sizeof(*testllc));
2043 + testllc->hd.common.type = SMC_LLC_TEST_LINK;
2044 +@@ -557,6 +579,8 @@ static int smc_llc_send_test_link(struct smc_link *link, u8 user_data[16])
2045 + memcpy(testllc->user_data, user_data, sizeof(testllc->user_data));
2046 + /* send llc message */
2047 + rc = smc_wr_tx_send(link, pend);
2048 ++put_out:
2049 ++ smc_wr_tx_link_put(link);
2050 + return rc;
2051 + }
2052 +
2053 +@@ -567,13 +591,16 @@ static int smc_llc_send_message(struct smc_link *link, void *llcbuf)
2054 + struct smc_wr_buf *wr_buf;
2055 + int rc;
2056 +
2057 +- if (!smc_link_usable(link))
2058 ++ if (!smc_wr_tx_link_hold(link))
2059 + return -ENOLINK;
2060 + rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
2061 + if (rc)
2062 +- return rc;
2063 ++ goto put_out;
2064 + memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg));
2065 +- return smc_wr_tx_send(link, pend);
2066 ++ rc = smc_wr_tx_send(link, pend);
2067 ++put_out:
2068 ++ smc_wr_tx_link_put(link);
2069 ++ return rc;
2070 + }
2071 +
2072 + /* schedule an llc send on link, may wait for buffers,
2073 +@@ -586,13 +613,16 @@ static int smc_llc_send_message_wait(struct smc_link *link, void *llcbuf)
2074 + struct smc_wr_buf *wr_buf;
2075 + int rc;
2076 +
2077 +- if (!smc_link_usable(link))
2078 ++ if (!smc_wr_tx_link_hold(link))
2079 + return -ENOLINK;
2080 + rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
2081 + if (rc)
2082 +- return rc;
2083 ++ goto put_out;
2084 + memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg));
2085 +- return smc_wr_tx_send_wait(link, pend, SMC_LLC_WAIT_TIME);
2086 ++ rc = smc_wr_tx_send_wait(link, pend, SMC_LLC_WAIT_TIME);
2087 ++put_out:
2088 ++ smc_wr_tx_link_put(link);
2089 ++ return rc;
2090 + }
2091 +
2092 + /********************************* receive ***********************************/
2093 +@@ -672,9 +702,11 @@ static int smc_llc_add_link_cont(struct smc_link *link,
2094 + struct smc_buf_desc *rmb;
2095 + u8 n;
2096 +
2097 ++ if (!smc_wr_tx_link_hold(link))
2098 ++ return -ENOLINK;
2099 + rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
2100 + if (rc)
2101 +- return rc;
2102 ++ goto put_out;
2103 + addc_llc = (struct smc_llc_msg_add_link_cont *)wr_buf;
2104 + memset(addc_llc, 0, sizeof(*addc_llc));
2105 +
2106 +@@ -706,7 +738,10 @@ static int smc_llc_add_link_cont(struct smc_link *link,
2107 + addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont);
2108 + if (lgr->role == SMC_CLNT)
2109 + addc_llc->hd.flags |= SMC_LLC_FLAG_RESP;
2110 +- return smc_wr_tx_send(link, pend);
2111 ++ rc = smc_wr_tx_send(link, pend);
2112 ++put_out:
2113 ++ smc_wr_tx_link_put(link);
2114 ++ return rc;
2115 + }
2116 +
2117 + static int smc_llc_cli_rkey_exchange(struct smc_link *link,
2118 +@@ -1323,7 +1358,7 @@ void smc_llc_send_link_delete_all(struct smc_link_group *lgr, bool ord, u32 rsn)
2119 + delllc.reason = htonl(rsn);
2120 +
2121 + for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
2122 +- if (!smc_link_usable(&lgr->lnk[i]))
2123 ++ if (!smc_link_sendable(&lgr->lnk[i]))
2124 + continue;
2125 + if (!smc_llc_send_message_wait(&lgr->lnk[i], &delllc))
2126 + break;
2127 +diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
2128 +index ff02952b3d03e..52ef1fca0b604 100644
2129 +--- a/net/smc/smc_tx.c
2130 ++++ b/net/smc/smc_tx.c
2131 +@@ -479,7 +479,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn,
2132 + /* Wakeup sndbuf consumers from any context (IRQ or process)
2133 + * since there is more data to transmit; usable snd_wnd as max transmit
2134 + */
2135 +-static int _smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
2136 ++static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
2137 + {
2138 + struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;
2139 + struct smc_link *link = conn->lnk;
2140 +@@ -488,8 +488,11 @@ static int _smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
2141 + struct smc_wr_buf *wr_buf;
2142 + int rc;
2143 +
2144 ++ if (!link || !smc_wr_tx_link_hold(link))
2145 ++ return -ENOLINK;
2146 + rc = smc_cdc_get_free_slot(conn, link, &wr_buf, &wr_rdma_buf, &pend);
2147 + if (rc < 0) {
2148 ++ smc_wr_tx_link_put(link);
2149 + if (rc == -EBUSY) {
2150 + struct smc_sock *smc =
2151 + container_of(conn, struct smc_sock, conn);
2152 +@@ -530,22 +533,7 @@ static int _smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
2153 +
2154 + out_unlock:
2155 + spin_unlock_bh(&conn->send_lock);
2156 +- return rc;
2157 +-}
2158 +-
2159 +-static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
2160 +-{
2161 +- struct smc_link *link = conn->lnk;
2162 +- int rc = -ENOLINK;
2163 +-
2164 +- if (!link)
2165 +- return rc;
2166 +-
2167 +- atomic_inc(&link->wr_tx_refcnt);
2168 +- if (smc_link_usable(link))
2169 +- rc = _smcr_tx_sndbuf_nonempty(conn);
2170 +- if (atomic_dec_and_test(&link->wr_tx_refcnt))
2171 +- wake_up_all(&link->wr_tx_wait);
2172 ++ smc_wr_tx_link_put(link);
2173 + return rc;
2174 + }
2175 +
2176 +diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
2177 +index 9dbe4804853e0..5a81f8c9ebf90 100644
2178 +--- a/net/smc/smc_wr.c
2179 ++++ b/net/smc/smc_wr.c
2180 +@@ -62,13 +62,9 @@ static inline bool smc_wr_is_tx_pend(struct smc_link *link)
2181 + }
2182 +
2183 + /* wait till all pending tx work requests on the given link are completed */
2184 +-int smc_wr_tx_wait_no_pending_sends(struct smc_link *link)
2185 ++void smc_wr_tx_wait_no_pending_sends(struct smc_link *link)
2186 + {
2187 +- if (wait_event_timeout(link->wr_tx_wait, !smc_wr_is_tx_pend(link),
2188 +- SMC_WR_TX_WAIT_PENDING_TIME))
2189 +- return 0;
2190 +- else /* timeout */
2191 +- return -EPIPE;
2192 ++ wait_event(link->wr_tx_wait, !smc_wr_is_tx_pend(link));
2193 + }
2194 +
2195 + static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id)
2196 +@@ -87,7 +83,6 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
2197 + struct smc_wr_tx_pend pnd_snd;
2198 + struct smc_link *link;
2199 + u32 pnd_snd_idx;
2200 +- int i;
2201 +
2202 + link = wc->qp->qp_context;
2203 +
2204 +@@ -115,14 +110,6 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
2205 + if (!test_and_clear_bit(pnd_snd_idx, link->wr_tx_mask))
2206 + return;
2207 + if (wc->status) {
2208 +- for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
2209 +- /* clear full struct smc_wr_tx_pend including .priv */
2210 +- memset(&link->wr_tx_pends[i], 0,
2211 +- sizeof(link->wr_tx_pends[i]));
2212 +- memset(&link->wr_tx_bufs[i], 0,
2213 +- sizeof(link->wr_tx_bufs[i]));
2214 +- clear_bit(i, link->wr_tx_mask);
2215 +- }
2216 + /* terminate link */
2217 + smcr_link_down_cond_sched(link);
2218 + }
2219 +@@ -169,7 +156,7 @@ void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
2220 + static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
2221 + {
2222 + *idx = link->wr_tx_cnt;
2223 +- if (!smc_link_usable(link))
2224 ++ if (!smc_link_sendable(link))
2225 + return -ENOLINK;
2226 + for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) {
2227 + if (!test_and_set_bit(*idx, link->wr_tx_mask))
2228 +@@ -212,7 +199,7 @@ int smc_wr_tx_get_free_slot(struct smc_link *link,
2229 + } else {
2230 + rc = wait_event_interruptible_timeout(
2231 + link->wr_tx_wait,
2232 +- !smc_link_usable(link) ||
2233 ++ !smc_link_sendable(link) ||
2234 + lgr->terminating ||
2235 + (smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY),
2236 + SMC_WR_TX_WAIT_FREE_SLOT_TIME);
2237 +@@ -288,18 +275,20 @@ int smc_wr_tx_send_wait(struct smc_link *link, struct smc_wr_tx_pend_priv *priv,
2238 + unsigned long timeout)
2239 + {
2240 + struct smc_wr_tx_pend *pend;
2241 ++ u32 pnd_idx;
2242 + int rc;
2243 +
2244 + pend = container_of(priv, struct smc_wr_tx_pend, priv);
2245 + pend->compl_requested = 1;
2246 +- init_completion(&link->wr_tx_compl[pend->idx]);
2247 ++ pnd_idx = pend->idx;
2248 ++ init_completion(&link->wr_tx_compl[pnd_idx]);
2249 +
2250 + rc = smc_wr_tx_send(link, priv);
2251 + if (rc)
2252 + return rc;
2253 + /* wait for completion by smc_wr_tx_process_cqe() */
2254 + rc = wait_for_completion_interruptible_timeout(
2255 +- &link->wr_tx_compl[pend->idx], timeout);
2256 ++ &link->wr_tx_compl[pnd_idx], timeout);
2257 + if (rc <= 0)
2258 + rc = -ENODATA;
2259 + if (rc > 0)
2260 +@@ -349,25 +338,6 @@ int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr)
2261 + return rc;
2262 + }
2263 +
2264 +-void smc_wr_tx_dismiss_slots(struct smc_link *link, u8 wr_tx_hdr_type,
2265 +- smc_wr_tx_filter filter,
2266 +- smc_wr_tx_dismisser dismisser,
2267 +- unsigned long data)
2268 +-{
2269 +- struct smc_wr_tx_pend_priv *tx_pend;
2270 +- struct smc_wr_rx_hdr *wr_tx;
2271 +- int i;
2272 +-
2273 +- for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
2274 +- wr_tx = (struct smc_wr_rx_hdr *)&link->wr_tx_bufs[i];
2275 +- if (wr_tx->type != wr_tx_hdr_type)
2276 +- continue;
2277 +- tx_pend = &link->wr_tx_pends[i].priv;
2278 +- if (filter(tx_pend, data))
2279 +- dismisser(tx_pend);
2280 +- }
2281 +-}
2282 +-
2283 + /****************************** receive queue ********************************/
2284 +
2285 + int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler)
2286 +@@ -572,10 +542,7 @@ void smc_wr_free_link(struct smc_link *lnk)
2287 + smc_wr_wakeup_reg_wait(lnk);
2288 + smc_wr_wakeup_tx_wait(lnk);
2289 +
2290 +- if (smc_wr_tx_wait_no_pending_sends(lnk))
2291 +- memset(lnk->wr_tx_mask, 0,
2292 +- BITS_TO_LONGS(SMC_WR_BUF_CNT) *
2293 +- sizeof(*lnk->wr_tx_mask));
2294 ++ smc_wr_tx_wait_no_pending_sends(lnk);
2295 + wait_event(lnk->wr_reg_wait, (!atomic_read(&lnk->wr_reg_refcnt)));
2296 + wait_event(lnk->wr_tx_wait, (!atomic_read(&lnk->wr_tx_refcnt)));
2297 +
2298 +diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h
2299 +index 423b8709f1c9e..cb58e60078f57 100644
2300 +--- a/net/smc/smc_wr.h
2301 ++++ b/net/smc/smc_wr.h
2302 +@@ -22,7 +22,6 @@
2303 + #define SMC_WR_BUF_CNT 16 /* # of ctrl buffers per link */
2304 +
2305 + #define SMC_WR_TX_WAIT_FREE_SLOT_TIME (10 * HZ)
2306 +-#define SMC_WR_TX_WAIT_PENDING_TIME (5 * HZ)
2307 +
2308 + #define SMC_WR_TX_SIZE 44 /* actual size of wr_send data (<=SMC_WR_BUF_SIZE) */
2309 +
2310 +@@ -60,6 +59,20 @@ static inline void smc_wr_tx_set_wr_id(atomic_long_t *wr_tx_id, long val)
2311 + atomic_long_set(wr_tx_id, val);
2312 + }
2313 +
2314 ++static inline bool smc_wr_tx_link_hold(struct smc_link *link)
2315 ++{
2316 ++ if (!smc_link_sendable(link))
2317 ++ return false;
2318 ++ atomic_inc(&link->wr_tx_refcnt);
2319 ++ return true;
2320 ++}
2321 ++
2322 ++static inline void smc_wr_tx_link_put(struct smc_link *link)
2323 ++{
2324 ++ if (atomic_dec_and_test(&link->wr_tx_refcnt))
2325 ++ wake_up_all(&link->wr_tx_wait);
2326 ++}
2327 ++
2328 + static inline void smc_wr_wakeup_tx_wait(struct smc_link *lnk)
2329 + {
2330 + wake_up_all(&lnk->wr_tx_wait);
2331 +@@ -108,7 +121,7 @@ void smc_wr_tx_dismiss_slots(struct smc_link *lnk, u8 wr_rx_hdr_type,
2332 + smc_wr_tx_filter filter,
2333 + smc_wr_tx_dismisser dismisser,
2334 + unsigned long data);
2335 +-int smc_wr_tx_wait_no_pending_sends(struct smc_link *link);
2336 ++void smc_wr_tx_wait_no_pending_sends(struct smc_link *link);
2337 +
2338 + int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler);
2339 + int smc_wr_rx_post_init(struct smc_link *link);
2340 +diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
2341 +index a4ca050815aba..dc1d3696af6b8 100755
2342 +--- a/scripts/recordmcount.pl
2343 ++++ b/scripts/recordmcount.pl
2344 +@@ -252,7 +252,7 @@ if ($arch eq "x86_64") {
2345 +
2346 + } elsif ($arch eq "s390" && $bits == 64) {
2347 + if ($cc =~ /-DCC_USING_HOTPATCH/) {
2348 +- $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*c0 04 00 00 00 00\\s*(bcrl\\s*0,|jgnop\\s*)[0-9a-f]+ <([^\+]*)>\$";
2349 ++ $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*c0 04 00 00 00 00\\s*(brcl\\s*0,|jgnop\\s*)[0-9a-f]+ <([^\+]*)>\$";
2350 + $mcount_adjust = 0;
2351 + } else {
2352 + $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$";
2353 +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
2354 +index f32026bc96b42..ff2191ae53528 100644
2355 +--- a/security/selinux/hooks.c
2356 ++++ b/security/selinux/hooks.c
2357 +@@ -5665,7 +5665,7 @@ static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb,
2358 + struct common_audit_data ad;
2359 + struct lsm_network_audit net = {0,};
2360 + char *addrp;
2361 +- u8 proto;
2362 ++ u8 proto = 0;
2363 +
2364 + if (sk == NULL)
2365 + return NF_ACCEPT;
2366 +diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
2367 +index cd458e10cf2af..11dd8260c9cc7 100644
2368 +--- a/security/tomoyo/util.c
2369 ++++ b/security/tomoyo/util.c
2370 +@@ -1046,10 +1046,11 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
2371 + return false;
2372 + if (!domain)
2373 + return true;
2374 ++ if (READ_ONCE(domain->flags[TOMOYO_DIF_QUOTA_WARNED]))
2375 ++ return false;
2376 + list_for_each_entry_rcu(ptr, &domain->acl_info_list, list,
2377 + srcu_read_lock_held(&tomoyo_ss)) {
2378 + u16 perm;
2379 +- u8 i;
2380 +
2381 + if (ptr->is_deleted)
2382 + continue;
2383 +@@ -1060,23 +1061,23 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
2384 + */
2385 + switch (ptr->type) {
2386 + case TOMOYO_TYPE_PATH_ACL:
2387 +- data_race(perm = container_of(ptr, struct tomoyo_path_acl, head)->perm);
2388 ++ perm = data_race(container_of(ptr, struct tomoyo_path_acl, head)->perm);
2389 + break;
2390 + case TOMOYO_TYPE_PATH2_ACL:
2391 +- data_race(perm = container_of(ptr, struct tomoyo_path2_acl, head)->perm);
2392 ++ perm = data_race(container_of(ptr, struct tomoyo_path2_acl, head)->perm);
2393 + break;
2394 + case TOMOYO_TYPE_PATH_NUMBER_ACL:
2395 +- data_race(perm = container_of(ptr, struct tomoyo_path_number_acl, head)
2396 ++ perm = data_race(container_of(ptr, struct tomoyo_path_number_acl, head)
2397 + ->perm);
2398 + break;
2399 + case TOMOYO_TYPE_MKDEV_ACL:
2400 +- data_race(perm = container_of(ptr, struct tomoyo_mkdev_acl, head)->perm);
2401 ++ perm = data_race(container_of(ptr, struct tomoyo_mkdev_acl, head)->perm);
2402 + break;
2403 + case TOMOYO_TYPE_INET_ACL:
2404 +- data_race(perm = container_of(ptr, struct tomoyo_inet_acl, head)->perm);
2405 ++ perm = data_race(container_of(ptr, struct tomoyo_inet_acl, head)->perm);
2406 + break;
2407 + case TOMOYO_TYPE_UNIX_ACL:
2408 +- data_race(perm = container_of(ptr, struct tomoyo_unix_acl, head)->perm);
2409 ++ perm = data_race(container_of(ptr, struct tomoyo_unix_acl, head)->perm);
2410 + break;
2411 + case TOMOYO_TYPE_MANUAL_TASK_ACL:
2412 + perm = 0;
2413 +@@ -1084,21 +1085,17 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
2414 + default:
2415 + perm = 1;
2416 + }
2417 +- for (i = 0; i < 16; i++)
2418 +- if (perm & (1 << i))
2419 +- count++;
2420 ++ count += hweight16(perm);
2421 + }
2422 + if (count < tomoyo_profile(domain->ns, domain->profile)->
2423 + pref[TOMOYO_PREF_MAX_LEARNING_ENTRY])
2424 + return true;
2425 +- if (!domain->flags[TOMOYO_DIF_QUOTA_WARNED]) {
2426 +- domain->flags[TOMOYO_DIF_QUOTA_WARNED] = true;
2427 +- /* r->granted = false; */
2428 +- tomoyo_write_log(r, "%s", tomoyo_dif[TOMOYO_DIF_QUOTA_WARNED]);
2429 ++ WRITE_ONCE(domain->flags[TOMOYO_DIF_QUOTA_WARNED], true);
2430 ++ /* r->granted = false; */
2431 ++ tomoyo_write_log(r, "%s", tomoyo_dif[TOMOYO_DIF_QUOTA_WARNED]);
2432 + #ifndef CONFIG_SECURITY_TOMOYO_INSECURE_BUILTIN_SETTING
2433 +- pr_warn("WARNING: Domain '%s' has too many ACLs to hold. Stopped learning mode.\n",
2434 +- domain->domainname->name);
2435 ++ pr_warn("WARNING: Domain '%s' has too many ACLs to hold. Stopped learning mode.\n",
2436 ++ domain->domainname->name);
2437 + #endif
2438 +- }
2439 + return false;
2440 + }
2441 +diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
2442 +index 1d727387cb205..5109d01619eed 100644
2443 +--- a/tools/perf/builtin-script.c
2444 ++++ b/tools/perf/builtin-script.c
2445 +@@ -2354,7 +2354,7 @@ static int process_switch_event(struct perf_tool *tool,
2446 + if (perf_event__process_switch(tool, event, sample, machine) < 0)
2447 + return -1;
2448 +
2449 +- if (scripting_ops && scripting_ops->process_switch)
2450 ++ if (scripting_ops && scripting_ops->process_switch && !filter_cpu(sample))
2451 + scripting_ops->process_switch(event, sample, machine);
2452 +
2453 + if (!script->show_switch_events)
2454 +diff --git a/tools/testing/selftests/net/udpgso.c b/tools/testing/selftests/net/udpgso.c
2455 +index c66da6ffd6d8d..7badaf215de28 100644
2456 +--- a/tools/testing/selftests/net/udpgso.c
2457 ++++ b/tools/testing/selftests/net/udpgso.c
2458 +@@ -156,13 +156,13 @@ struct testcase testcases_v4[] = {
2459 + },
2460 + {
2461 + /* send max number of min sized segments */
2462 +- .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V4,
2463 ++ .tlen = UDP_MAX_SEGMENTS,
2464 + .gso_len = 1,
2465 +- .r_num_mss = UDP_MAX_SEGMENTS - CONST_HDRLEN_V4,
2466 ++ .r_num_mss = UDP_MAX_SEGMENTS,
2467 + },
2468 + {
2469 + /* send max number + 1 of min sized segments: fail */
2470 +- .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V4 + 1,
2471 ++ .tlen = UDP_MAX_SEGMENTS + 1,
2472 + .gso_len = 1,
2473 + .tfail = true,
2474 + },
2475 +@@ -259,13 +259,13 @@ struct testcase testcases_v6[] = {
2476 + },
2477 + {
2478 + /* send max number of min sized segments */
2479 +- .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V6,
2480 ++ .tlen = UDP_MAX_SEGMENTS,
2481 + .gso_len = 1,
2482 +- .r_num_mss = UDP_MAX_SEGMENTS - CONST_HDRLEN_V6,
2483 ++ .r_num_mss = UDP_MAX_SEGMENTS,
2484 + },
2485 + {
2486 + /* send max number + 1 of min sized segments: fail */
2487 +- .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V6 + 1,
2488 ++ .tlen = UDP_MAX_SEGMENTS + 1,
2489 + .gso_len = 1,
2490 + .tfail = true,
2491 + },
2492 +diff --git a/tools/testing/selftests/net/udpgso_bench_tx.c b/tools/testing/selftests/net/udpgso_bench_tx.c
2493 +index 17512a43885e7..f1fdaa2702913 100644
2494 +--- a/tools/testing/selftests/net/udpgso_bench_tx.c
2495 ++++ b/tools/testing/selftests/net/udpgso_bench_tx.c
2496 +@@ -419,6 +419,7 @@ static void usage(const char *filepath)
2497 +
2498 + static void parse_opts(int argc, char **argv)
2499 + {
2500 ++ const char *bind_addr = NULL;
2501 + int max_len, hdrlen;
2502 + int c;
2503 +
2504 +@@ -446,7 +447,7 @@ static void parse_opts(int argc, char **argv)
2505 + cfg_cpu = strtol(optarg, NULL, 0);
2506 + break;
2507 + case 'D':
2508 +- setup_sockaddr(cfg_family, optarg, &cfg_dst_addr);
2509 ++ bind_addr = optarg;
2510 + break;
2511 + case 'l':
2512 + cfg_runtime_ms = strtoul(optarg, NULL, 10) * 1000;
2513 +@@ -492,6 +493,11 @@ static void parse_opts(int argc, char **argv)
2514 + }
2515 + }
2516 +
2517 ++ if (!bind_addr)
2518 ++ bind_addr = cfg_family == PF_INET6 ? "::" : "0.0.0.0";
2519 ++
2520 ++ setup_sockaddr(cfg_family, bind_addr, &cfg_dst_addr);
2521 ++
2522 + if (optind != argc)
2523 + usage(argv[0]);
2524 +