Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.15 commit in: /
Date: Wed, 05 Jan 2022 12:52:19
Message-Id: 1641387126.872a55e784c136c677067309ffce18b18c089f41.mpagano@gentoo
1 commit: 872a55e784c136c677067309ffce18b18c089f41
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Jan 5 12:52:06 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Jan 5 12:52:06 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=872a55e7
7
8 Linux patch 5.15.13
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 1012_linux-5.15.13.patch | 3271 ++++++++++++++++++++++++++++++++++++++++++++++
13 1 file changed, 3271 insertions(+)
14
15 diff --git a/1012_linux-5.15.13.patch b/1012_linux-5.15.13.patch
16 new file mode 100644
17 index 00000000..eac4d342
18 --- /dev/null
19 +++ b/1012_linux-5.15.13.patch
20 @@ -0,0 +1,3271 @@
21 +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
22 +index a454f438bd621..8ff6dafafdf8d 100644
23 +--- a/Documentation/admin-guide/kernel-parameters.txt
24 ++++ b/Documentation/admin-guide/kernel-parameters.txt
25 +@@ -1690,6 +1690,8 @@
26 + architectures force reset to be always executed
27 + i8042.unlock [HW] Unlock (ignore) the keylock
28 + i8042.kbdreset [HW] Reset device connected to KBD port
29 ++ i8042.probe_defer
30 ++ [HW] Allow deferred probing upon i8042 probe errors
31 +
32 + i810= [HW,DRM]
33 +
34 +diff --git a/Makefile b/Makefile
35 +index 474b2a2292ca4..0964b940b8890 100644
36 +--- a/Makefile
37 ++++ b/Makefile
38 +@@ -1,7 +1,7 @@
39 + # SPDX-License-Identifier: GPL-2.0
40 + VERSION = 5
41 + PATCHLEVEL = 15
42 +-SUBLEVEL = 12
43 ++SUBLEVEL = 13
44 + EXTRAVERSION =
45 + NAME = Trick or Treat
46 +
47 +diff --git a/arch/arm/include/asm/efi.h b/arch/arm/include/asm/efi.h
48 +index a6f3b179e8a94..27218eabbf9a0 100644
49 +--- a/arch/arm/include/asm/efi.h
50 ++++ b/arch/arm/include/asm/efi.h
51 +@@ -17,7 +17,6 @@
52 +
53 + #ifdef CONFIG_EFI
54 + void efi_init(void);
55 +-extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
56 +
57 + int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
58 + int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
59 +diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
60 +index d3e1825337be3..ad55079abe476 100644
61 +--- a/arch/arm64/include/asm/efi.h
62 ++++ b/arch/arm64/include/asm/efi.h
63 +@@ -14,7 +14,6 @@
64 +
65 + #ifdef CONFIG_EFI
66 + extern void efi_init(void);
67 +-extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
68 + #else
69 + #define efi_init()
70 + #endif
71 +diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
72 +index 747c328fb8862..197cb8480350c 100644
73 +--- a/arch/parisc/kernel/traps.c
74 ++++ b/arch/parisc/kernel/traps.c
75 +@@ -729,6 +729,8 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
76 + }
77 + mmap_read_unlock(current->mm);
78 + }
79 ++ /* CPU could not fetch instruction, so clear stale IIR value. */
80 ++ regs->iir = 0xbaadf00d;
81 + fallthrough;
82 + case 27:
83 + /* Data memory protection ID trap */
84 +diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c
85 +index bf251191e78d9..32bfb215c4858 100644
86 +--- a/arch/powerpc/mm/ptdump/ptdump.c
87 ++++ b/arch/powerpc/mm/ptdump/ptdump.c
88 +@@ -183,7 +183,7 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr)
89 + {
90 + pte_t pte = __pte(st->current_flags);
91 +
92 +- if (!IS_ENABLED(CONFIG_PPC_DEBUG_WX) || !st->check_wx)
93 ++ if (!IS_ENABLED(CONFIG_DEBUG_WX) || !st->check_wx)
94 + return;
95 +
96 + if (!pte_write(pte) || !pte_exec(pte))
97 +diff --git a/arch/riscv/include/asm/efi.h b/arch/riscv/include/asm/efi.h
98 +index 49b398fe99f1b..cc4f6787f9371 100644
99 +--- a/arch/riscv/include/asm/efi.h
100 ++++ b/arch/riscv/include/asm/efi.h
101 +@@ -13,7 +13,6 @@
102 +
103 + #ifdef CONFIG_EFI
104 + extern void efi_init(void);
105 +-extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
106 + #else
107 + #define efi_init()
108 + #endif
109 +diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
110 +index 4d0b126835b8a..63158fd558567 100644
111 +--- a/arch/x86/include/asm/efi.h
112 ++++ b/arch/x86/include/asm/efi.h
113 +@@ -197,8 +197,6 @@ static inline bool efi_runtime_supported(void)
114 +
115 + extern void parse_efi_setup(u64 phys_addr, u32 data_len);
116 +
117 +-extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
118 +-
119 + extern void efi_thunk_runtime_setup(void);
120 + efi_status_t efi_set_virtual_address_map(unsigned long memory_map_size,
121 + unsigned long descriptor_size,
122 +diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
123 +index 340515f54498c..47bc74a8c7b6f 100644
124 +--- a/drivers/android/binder_alloc.c
125 ++++ b/drivers/android/binder_alloc.c
126 +@@ -671,7 +671,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
127 + BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
128 +
129 + if (buffer->async_transaction) {
130 +- alloc->free_async_space += size + sizeof(struct binder_buffer);
131 ++ alloc->free_async_space += buffer_size + sizeof(struct binder_buffer);
132 +
133 + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
134 + "%d: binder_free_buf size %zd async free %zd\n",
135 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
136 +index ada7bc19118ac..a919f5daacd91 100644
137 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
138 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
139 +@@ -415,10 +415,15 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
140 + }
141 + }
142 +
143 ++union gc_info {
144 ++ struct gc_info_v1_0 v1;
145 ++ struct gc_info_v2_0 v2;
146 ++};
147 ++
148 + int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
149 + {
150 + struct binary_header *bhdr;
151 +- struct gc_info_v1_0 *gc_info;
152 ++ union gc_info *gc_info;
153 +
154 + if (!adev->mman.discovery_bin) {
155 + DRM_ERROR("ip discovery uninitialized\n");
156 +@@ -426,27 +431,54 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
157 + }
158 +
159 + bhdr = (struct binary_header *)adev->mman.discovery_bin;
160 +- gc_info = (struct gc_info_v1_0 *)(adev->mman.discovery_bin +
161 ++ gc_info = (union gc_info *)(adev->mman.discovery_bin +
162 + le16_to_cpu(bhdr->table_list[GC].offset));
163 +-
164 +- adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->gc_num_se);
165 +- adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->gc_num_wgp0_per_sa) +
166 +- le32_to_cpu(gc_info->gc_num_wgp1_per_sa));
167 +- adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->gc_num_sa_per_se);
168 +- adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->gc_num_rb_per_se);
169 +- adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->gc_num_gl2c);
170 +- adev->gfx.config.max_gprs = le32_to_cpu(gc_info->gc_num_gprs);
171 +- adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->gc_num_max_gs_thds);
172 +- adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->gc_gs_table_depth);
173 +- adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->gc_gsprim_buff_depth);
174 +- adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->gc_double_offchip_lds_buffer);
175 +- adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->gc_wave_size);
176 +- adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->gc_max_waves_per_simd);
177 +- adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->gc_max_scratch_slots_per_cu);
178 +- adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->gc_lds_size);
179 +- adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->gc_num_sc_per_se) /
180 +- le32_to_cpu(gc_info->gc_num_sa_per_se);
181 +- adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->gc_num_packer_per_sc);
182 +-
183 ++ switch (gc_info->v1.header.version_major) {
184 ++ case 1:
185 ++ adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
186 ++ adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
187 ++ le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
188 ++ adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
189 ++ adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
190 ++ adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
191 ++ adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
192 ++ adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
193 ++ adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
194 ++ adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
195 ++ adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
196 ++ adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
197 ++ adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
198 ++ adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
199 ++ adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
200 ++ adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
201 ++ le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
202 ++ adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
203 ++ break;
204 ++ case 2:
205 ++ adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
206 ++ adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
207 ++ adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
208 ++ adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
209 ++ adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
210 ++ adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
211 ++ adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
212 ++ adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
213 ++ adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
214 ++ adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
215 ++ adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
216 ++ adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
217 ++ adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
218 ++ adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
219 ++ adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
220 ++ le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
221 ++ adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
222 ++ break;
223 ++ default:
224 ++ dev_err(adev->dev,
225 ++ "Unhandled GC info table %d.%d\n",
226 ++ gc_info->v1.header.version_major,
227 ++ gc_info->v1.header.version_minor);
228 ++ return -EINVAL;
229 ++ }
230 + return 0;
231 + }
232 +diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
233 +index 121ee9f2b8d16..462008d506904 100644
234 +--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
235 ++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
236 +@@ -253,6 +253,13 @@ static int vcn_v1_0_suspend(void *handle)
237 + {
238 + int r;
239 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
240 ++ bool idle_work_unexecuted;
241 ++
242 ++ idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.idle_work);
243 ++ if (idle_work_unexecuted) {
244 ++ if (adev->pm.dpm_enabled)
245 ++ amdgpu_dpm_enable_uvd(adev, false);
246 ++ }
247 +
248 + r = vcn_v1_0_hw_fini(adev);
249 + if (r)
250 +diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
251 +index 377c4e53a2b37..407e19412a949 100644
252 +--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
253 ++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
254 +@@ -157,6 +157,7 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
255 + union display_idle_optimization_u idle_info = { 0 };
256 + idle_info.idle_info.df_request_disabled = 1;
257 + idle_info.idle_info.phy_ref_clk_off = 1;
258 ++ idle_info.idle_info.s0i2_rdy = 1;
259 + dcn31_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
260 + /* update power state */
261 + clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
262 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
263 +index ede11eb120d4f..b01a21d8336cb 100644
264 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
265 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
266 +@@ -1067,7 +1067,7 @@ static const struct dc_debug_options debug_defaults_drv = {
267 + .timing_trace = false,
268 + .clock_trace = true,
269 + .disable_pplib_clock_request = true,
270 +- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
271 ++ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
272 + .force_single_disp_pipe_split = false,
273 + .disable_dcc = DCC_ENABLE,
274 + .vsr_support = true,
275 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
276 +index fbbdf99761838..92a308ad1213c 100644
277 +--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
278 ++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
279 +@@ -874,7 +874,7 @@ static const struct dc_debug_options debug_defaults_drv = {
280 + .clock_trace = true,
281 + .disable_pplib_clock_request = true,
282 + .min_disp_clk_khz = 100000,
283 +- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
284 ++ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
285 + .force_single_disp_pipe_split = false,
286 + .disable_dcc = DCC_ENABLE,
287 + .vsr_support = true,
288 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
289 +index a0de309475a97..89a237b5864c8 100644
290 +--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
291 ++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
292 +@@ -840,7 +840,7 @@ static const struct dc_debug_options debug_defaults_drv = {
293 + .timing_trace = false,
294 + .clock_trace = true,
295 + .disable_pplib_clock_request = true,
296 +- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
297 ++ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
298 + .force_single_disp_pipe_split = false,
299 + .disable_dcc = DCC_ENABLE,
300 + .vsr_support = true,
301 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
302 +index 912285fdce18e..9e2f18a0c9483 100644
303 +--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
304 ++++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
305 +@@ -863,7 +863,7 @@ static const struct dc_debug_options debug_defaults_drv = {
306 + .disable_clock_gate = true,
307 + .disable_pplib_clock_request = true,
308 + .disable_pplib_wm_range = true,
309 +- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
310 ++ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
311 + .force_single_disp_pipe_split = false,
312 + .disable_dcc = DCC_ENABLE,
313 + .vsr_support = true,
314 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
315 +index 7d3ff5d444023..2292bb82026e2 100644
316 +--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
317 ++++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
318 +@@ -211,7 +211,7 @@ static const struct dc_debug_options debug_defaults_drv = {
319 + .timing_trace = false,
320 + .clock_trace = true,
321 + .disable_pplib_clock_request = true,
322 +- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
323 ++ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
324 + .force_single_disp_pipe_split = false,
325 + .disable_dcc = DCC_ENABLE,
326 + .vsr_support = true,
327 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
328 +index dd38796ba30ad..589ddab61c2a9 100644
329 +--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
330 ++++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
331 +@@ -193,7 +193,7 @@ static const struct dc_debug_options debug_defaults_drv = {
332 + .timing_trace = false,
333 + .clock_trace = true,
334 + .disable_pplib_clock_request = true,
335 +- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
336 ++ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
337 + .force_single_disp_pipe_split = false,
338 + .disable_dcc = DCC_ENABLE,
339 + .vsr_support = true,
340 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
341 +index ac8fb202fd5ee..4e9fe090b770a 100644
342 +--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
343 ++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
344 +@@ -100,6 +100,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
345 + .z10_save_init = dcn31_z10_save_init,
346 + .is_abm_supported = dcn31_is_abm_supported,
347 + .set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
348 ++ .optimize_pwr_state = dcn21_optimize_pwr_state,
349 + .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
350 + .update_visual_confirm_color = dcn20_update_visual_confirm_color,
351 + };
352 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
353 +index 79e92ecca96c1..6d8f26dada722 100644
354 +--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
355 ++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
356 +@@ -923,7 +923,7 @@ static const struct dc_debug_options debug_defaults_drv = {
357 + .timing_trace = false,
358 + .clock_trace = true,
359 + .disable_pplib_clock_request = false,
360 +- .pipe_split_policy = MPC_SPLIT_AVOID,
361 ++ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
362 + .force_single_disp_pipe_split = false,
363 + .disable_dcc = DCC_ENABLE,
364 + .vsr_support = true,
365 +diff --git a/drivers/gpu/drm/amd/include/discovery.h b/drivers/gpu/drm/amd/include/discovery.h
366 +index 7ec4331e67f26..a486769b66c6a 100644
367 +--- a/drivers/gpu/drm/amd/include/discovery.h
368 ++++ b/drivers/gpu/drm/amd/include/discovery.h
369 +@@ -143,6 +143,55 @@ struct gc_info_v1_0 {
370 + uint32_t gc_num_gl2a;
371 + };
372 +
373 ++struct gc_info_v1_1 {
374 ++ struct gpu_info_header header;
375 ++
376 ++ uint32_t gc_num_se;
377 ++ uint32_t gc_num_wgp0_per_sa;
378 ++ uint32_t gc_num_wgp1_per_sa;
379 ++ uint32_t gc_num_rb_per_se;
380 ++ uint32_t gc_num_gl2c;
381 ++ uint32_t gc_num_gprs;
382 ++ uint32_t gc_num_max_gs_thds;
383 ++ uint32_t gc_gs_table_depth;
384 ++ uint32_t gc_gsprim_buff_depth;
385 ++ uint32_t gc_parameter_cache_depth;
386 ++ uint32_t gc_double_offchip_lds_buffer;
387 ++ uint32_t gc_wave_size;
388 ++ uint32_t gc_max_waves_per_simd;
389 ++ uint32_t gc_max_scratch_slots_per_cu;
390 ++ uint32_t gc_lds_size;
391 ++ uint32_t gc_num_sc_per_se;
392 ++ uint32_t gc_num_sa_per_se;
393 ++ uint32_t gc_num_packer_per_sc;
394 ++ uint32_t gc_num_gl2a;
395 ++ uint32_t gc_num_tcp_per_sa;
396 ++ uint32_t gc_num_sdp_interface;
397 ++ uint32_t gc_num_tcps;
398 ++};
399 ++
400 ++struct gc_info_v2_0 {
401 ++ struct gpu_info_header header;
402 ++
403 ++ uint32_t gc_num_se;
404 ++ uint32_t gc_num_cu_per_sh;
405 ++ uint32_t gc_num_sh_per_se;
406 ++ uint32_t gc_num_rb_per_se;
407 ++ uint32_t gc_num_tccs;
408 ++ uint32_t gc_num_gprs;
409 ++ uint32_t gc_num_max_gs_thds;
410 ++ uint32_t gc_gs_table_depth;
411 ++ uint32_t gc_gsprim_buff_depth;
412 ++ uint32_t gc_parameter_cache_depth;
413 ++ uint32_t gc_double_offchip_lds_buffer;
414 ++ uint32_t gc_wave_size;
415 ++ uint32_t gc_max_waves_per_simd;
416 ++ uint32_t gc_max_scratch_slots_per_cu;
417 ++ uint32_t gc_lds_size;
418 ++ uint32_t gc_num_sc_per_se;
419 ++ uint32_t gc_num_packer_per_sc;
420 ++};
421 ++
422 + typedef struct harvest_info_header {
423 + uint32_t signature; /* Table Signature */
424 + uint32_t version; /* Table Version */
425 +diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
426 +index 05d0b3eb36904..0ae416aa76dcb 100644
427 +--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
428 ++++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
429 +@@ -353,15 +353,22 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
430 +
431 + if (ret)
432 + return ret;
433 +- }
434 +
435 +- fobj = dma_resv_shared_list(resv);
436 +- fence = dma_resv_excl_fence(resv);
437 ++ fobj = NULL;
438 ++ } else {
439 ++ fobj = dma_resv_shared_list(resv);
440 ++ }
441 +
442 +- if (fence) {
443 ++ /* Waiting for the exclusive fence first causes performance regressions
444 ++ * under some circumstances. So manually wait for the shared ones first.
445 ++ */
446 ++ for (i = 0; i < (fobj ? fobj->shared_count : 0) && !ret; ++i) {
447 + struct nouveau_channel *prev = NULL;
448 + bool must_wait = true;
449 +
450 ++ fence = rcu_dereference_protected(fobj->shared[i],
451 ++ dma_resv_held(resv));
452 ++
453 + f = nouveau_local_fence(fence, chan->drm);
454 + if (f) {
455 + rcu_read_lock();
456 +@@ -373,20 +380,13 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
457 +
458 + if (must_wait)
459 + ret = dma_fence_wait(fence, intr);
460 +-
461 +- return ret;
462 + }
463 +
464 +- if (!exclusive || !fobj)
465 +- return ret;
466 +-
467 +- for (i = 0; i < fobj->shared_count && !ret; ++i) {
468 ++ fence = dma_resv_excl_fence(resv);
469 ++ if (fence) {
470 + struct nouveau_channel *prev = NULL;
471 + bool must_wait = true;
472 +
473 +- fence = rcu_dereference_protected(fobj->shared[i],
474 +- dma_resv_held(resv));
475 +-
476 + f = nouveau_local_fence(fence, chan->drm);
477 + if (f) {
478 + rcu_read_lock();
479 +@@ -398,6 +398,8 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
480 +
481 + if (must_wait)
482 + ret = dma_fence_wait(fence, intr);
483 ++
484 ++ return ret;
485 + }
486 +
487 + return ret;
488 +diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
489 +index bce0e8bb78520..cf5d049342ead 100644
490 +--- a/drivers/i2c/i2c-dev.c
491 ++++ b/drivers/i2c/i2c-dev.c
492 +@@ -535,6 +535,9 @@ static long compat_i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned lo
493 + sizeof(rdwr_arg)))
494 + return -EFAULT;
495 +
496 ++ if (!rdwr_arg.msgs || rdwr_arg.nmsgs == 0)
497 ++ return -EINVAL;
498 ++
499 + if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
500 + return -EINVAL;
501 +
502 +diff --git a/drivers/input/joystick/spaceball.c b/drivers/input/joystick/spaceball.c
503 +index 429411c6c0a8e..a85a4f33aea8c 100644
504 +--- a/drivers/input/joystick/spaceball.c
505 ++++ b/drivers/input/joystick/spaceball.c
506 +@@ -19,6 +19,7 @@
507 + #include <linux/module.h>
508 + #include <linux/input.h>
509 + #include <linux/serio.h>
510 ++#include <asm/unaligned.h>
511 +
512 + #define DRIVER_DESC "SpaceTec SpaceBall 2003/3003/4000 FLX driver"
513 +
514 +@@ -75,9 +76,15 @@ static void spaceball_process_packet(struct spaceball* spaceball)
515 +
516 + case 'D': /* Ball data */
517 + if (spaceball->idx != 15) return;
518 +- for (i = 0; i < 6; i++)
519 ++ /*
520 ++ * Skip first three bytes; read six axes worth of data.
521 ++ * Axis values are signed 16-bit big-endian.
522 ++ */
523 ++ data += 3;
524 ++ for (i = 0; i < ARRAY_SIZE(spaceball_axes); i++) {
525 + input_report_abs(dev, spaceball_axes[i],
526 +- (__s16)((data[2 * i + 3] << 8) | data[2 * i + 2]));
527 ++ (__s16)get_unaligned_be16(&data[i * 2]));
528 ++ }
529 + break;
530 +
531 + case 'K': /* Button data */
532 +diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c
533 +index bfa26651c0be7..627048bc6a12e 100644
534 +--- a/drivers/input/mouse/appletouch.c
535 ++++ b/drivers/input/mouse/appletouch.c
536 +@@ -916,6 +916,8 @@ static int atp_probe(struct usb_interface *iface,
537 + set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
538 + set_bit(BTN_LEFT, input_dev->keybit);
539 +
540 ++ INIT_WORK(&dev->work, atp_reinit);
541 ++
542 + error = input_register_device(dev->input);
543 + if (error)
544 + goto err_free_buffer;
545 +@@ -923,8 +925,6 @@ static int atp_probe(struct usb_interface *iface,
546 + /* save our data pointer in this interface device */
547 + usb_set_intfdata(iface, dev);
548 +
549 +- INIT_WORK(&dev->work, atp_reinit);
550 +-
551 + return 0;
552 +
553 + err_free_buffer:
554 +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
555 +index aedd055410443..148a7c5fd0e22 100644
556 +--- a/drivers/input/serio/i8042-x86ia64io.h
557 ++++ b/drivers/input/serio/i8042-x86ia64io.h
558 +@@ -995,6 +995,24 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
559 + { }
560 + };
561 +
562 ++static const struct dmi_system_id i8042_dmi_probe_defer_table[] __initconst = {
563 ++ {
564 ++ /* ASUS ZenBook UX425UA */
565 ++ .matches = {
566 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
567 ++ DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425UA"),
568 ++ },
569 ++ },
570 ++ {
571 ++ /* ASUS ZenBook UM325UA */
572 ++ .matches = {
573 ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
574 ++ DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
575 ++ },
576 ++ },
577 ++ { }
578 ++};
579 ++
580 + #endif /* CONFIG_X86 */
581 +
582 + #ifdef CONFIG_PNP
583 +@@ -1315,6 +1333,9 @@ static int __init i8042_platform_init(void)
584 + if (dmi_check_system(i8042_dmi_kbdreset_table))
585 + i8042_kbdreset = true;
586 +
587 ++ if (dmi_check_system(i8042_dmi_probe_defer_table))
588 ++ i8042_probe_defer = true;
589 ++
590 + /*
591 + * A20 was already enabled during early kernel init. But some buggy
592 + * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
593 +diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
594 +index 0b9f1d0a8f8b0..3fc0a89cc785c 100644
595 +--- a/drivers/input/serio/i8042.c
596 ++++ b/drivers/input/serio/i8042.c
597 +@@ -45,6 +45,10 @@ static bool i8042_unlock;
598 + module_param_named(unlock, i8042_unlock, bool, 0);
599 + MODULE_PARM_DESC(unlock, "Ignore keyboard lock.");
600 +
601 ++static bool i8042_probe_defer;
602 ++module_param_named(probe_defer, i8042_probe_defer, bool, 0);
603 ++MODULE_PARM_DESC(probe_defer, "Allow deferred probing.");
604 ++
605 + enum i8042_controller_reset_mode {
606 + I8042_RESET_NEVER,
607 + I8042_RESET_ALWAYS,
608 +@@ -711,7 +715,7 @@ static int i8042_set_mux_mode(bool multiplex, unsigned char *mux_version)
609 + * LCS/Telegraphics.
610 + */
611 +
612 +-static int __init i8042_check_mux(void)
613 ++static int i8042_check_mux(void)
614 + {
615 + unsigned char mux_version;
616 +
617 +@@ -740,10 +744,10 @@ static int __init i8042_check_mux(void)
618 + /*
619 + * The following is used to test AUX IRQ delivery.
620 + */
621 +-static struct completion i8042_aux_irq_delivered __initdata;
622 +-static bool i8042_irq_being_tested __initdata;
623 ++static struct completion i8042_aux_irq_delivered;
624 ++static bool i8042_irq_being_tested;
625 +
626 +-static irqreturn_t __init i8042_aux_test_irq(int irq, void *dev_id)
627 ++static irqreturn_t i8042_aux_test_irq(int irq, void *dev_id)
628 + {
629 + unsigned long flags;
630 + unsigned char str, data;
631 +@@ -770,7 +774,7 @@ static irqreturn_t __init i8042_aux_test_irq(int irq, void *dev_id)
632 + * verifies success by readinng CTR. Used when testing for presence of AUX
633 + * port.
634 + */
635 +-static int __init i8042_toggle_aux(bool on)
636 ++static int i8042_toggle_aux(bool on)
637 + {
638 + unsigned char param;
639 + int i;
640 +@@ -798,7 +802,7 @@ static int __init i8042_toggle_aux(bool on)
641 + * the presence of an AUX interface.
642 + */
643 +
644 +-static int __init i8042_check_aux(void)
645 ++static int i8042_check_aux(void)
646 + {
647 + int retval = -1;
648 + bool irq_registered = false;
649 +@@ -1005,7 +1009,7 @@ static int i8042_controller_init(void)
650 +
651 + if (i8042_command(&ctr[n++ % 2], I8042_CMD_CTL_RCTR)) {
652 + pr_err("Can't read CTR while initializing i8042\n");
653 +- return -EIO;
654 ++ return i8042_probe_defer ? -EPROBE_DEFER : -EIO;
655 + }
656 +
657 + } while (n < 2 || ctr[0] != ctr[1]);
658 +@@ -1320,7 +1324,7 @@ static void i8042_shutdown(struct platform_device *dev)
659 + i8042_controller_reset(false);
660 + }
661 +
662 +-static int __init i8042_create_kbd_port(void)
663 ++static int i8042_create_kbd_port(void)
664 + {
665 + struct serio *serio;
666 + struct i8042_port *port = &i8042_ports[I8042_KBD_PORT_NO];
667 +@@ -1349,7 +1353,7 @@ static int __init i8042_create_kbd_port(void)
668 + return 0;
669 + }
670 +
671 +-static int __init i8042_create_aux_port(int idx)
672 ++static int i8042_create_aux_port(int idx)
673 + {
674 + struct serio *serio;
675 + int port_no = idx < 0 ? I8042_AUX_PORT_NO : I8042_MUX_PORT_NO + idx;
676 +@@ -1386,13 +1390,13 @@ static int __init i8042_create_aux_port(int idx)
677 + return 0;
678 + }
679 +
680 +-static void __init i8042_free_kbd_port(void)
681 ++static void i8042_free_kbd_port(void)
682 + {
683 + kfree(i8042_ports[I8042_KBD_PORT_NO].serio);
684 + i8042_ports[I8042_KBD_PORT_NO].serio = NULL;
685 + }
686 +
687 +-static void __init i8042_free_aux_ports(void)
688 ++static void i8042_free_aux_ports(void)
689 + {
690 + int i;
691 +
692 +@@ -1402,7 +1406,7 @@ static void __init i8042_free_aux_ports(void)
693 + }
694 + }
695 +
696 +-static void __init i8042_register_ports(void)
697 ++static void i8042_register_ports(void)
698 + {
699 + int i;
700 +
701 +@@ -1443,7 +1447,7 @@ static void i8042_free_irqs(void)
702 + i8042_aux_irq_registered = i8042_kbd_irq_registered = false;
703 + }
704 +
705 +-static int __init i8042_setup_aux(void)
706 ++static int i8042_setup_aux(void)
707 + {
708 + int (*aux_enable)(void);
709 + int error;
710 +@@ -1485,7 +1489,7 @@ static int __init i8042_setup_aux(void)
711 + return error;
712 + }
713 +
714 +-static int __init i8042_setup_kbd(void)
715 ++static int i8042_setup_kbd(void)
716 + {
717 + int error;
718 +
719 +@@ -1535,7 +1539,7 @@ static int i8042_kbd_bind_notifier(struct notifier_block *nb,
720 + return 0;
721 + }
722 +
723 +-static int __init i8042_probe(struct platform_device *dev)
724 ++static int i8042_probe(struct platform_device *dev)
725 + {
726 + int error;
727 +
728 +@@ -1600,6 +1604,7 @@ static struct platform_driver i8042_driver = {
729 + .pm = &i8042_pm_ops,
730 + #endif
731 + },
732 ++ .probe = i8042_probe,
733 + .remove = i8042_remove,
734 + .shutdown = i8042_shutdown,
735 + };
736 +@@ -1610,7 +1615,6 @@ static struct notifier_block i8042_kbd_bind_notifier_block = {
737 +
738 + static int __init i8042_init(void)
739 + {
740 +- struct platform_device *pdev;
741 + int err;
742 +
743 + dbg_init();
744 +@@ -1626,17 +1630,29 @@ static int __init i8042_init(void)
745 + /* Set this before creating the dev to allow i8042_command to work right away */
746 + i8042_present = true;
747 +
748 +- pdev = platform_create_bundle(&i8042_driver, i8042_probe, NULL, 0, NULL, 0);
749 +- if (IS_ERR(pdev)) {
750 +- err = PTR_ERR(pdev);
751 ++ err = platform_driver_register(&i8042_driver);
752 ++ if (err)
753 + goto err_platform_exit;
754 ++
755 ++ i8042_platform_device = platform_device_alloc("i8042", -1);
756 ++ if (!i8042_platform_device) {
757 ++ err = -ENOMEM;
758 ++ goto err_unregister_driver;
759 + }
760 +
761 ++ err = platform_device_add(i8042_platform_device);
762 ++ if (err)
763 ++ goto err_free_device;
764 ++
765 + bus_register_notifier(&serio_bus, &i8042_kbd_bind_notifier_block);
766 + panic_blink = i8042_panic_blink;
767 +
768 + return 0;
769 +
770 ++err_free_device:
771 ++ platform_device_put(i8042_platform_device);
772 ++err_unregister_driver:
773 ++ platform_driver_unregister(&i8042_driver);
774 + err_platform_exit:
775 + i8042_platform_exit();
776 + return err;
777 +diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
778 +index 02ae98aabf91c..416a5c99db5a2 100644
779 +--- a/drivers/net/ethernet/atheros/ag71xx.c
780 ++++ b/drivers/net/ethernet/atheros/ag71xx.c
781 +@@ -1915,15 +1915,12 @@ static int ag71xx_probe(struct platform_device *pdev)
782 + ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac");
783 + if (IS_ERR(ag->mac_reset)) {
784 + netif_err(ag, probe, ndev, "missing mac reset\n");
785 +- err = PTR_ERR(ag->mac_reset);
786 +- goto err_free;
787 ++ return PTR_ERR(ag->mac_reset);
788 + }
789 +
790 + ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
791 +- if (!ag->mac_base) {
792 +- err = -ENOMEM;
793 +- goto err_free;
794 +- }
795 ++ if (!ag->mac_base)
796 ++ return -ENOMEM;
797 +
798 + ndev->irq = platform_get_irq(pdev, 0);
799 + err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt,
800 +@@ -1931,7 +1928,7 @@ static int ag71xx_probe(struct platform_device *pdev)
801 + if (err) {
802 + netif_err(ag, probe, ndev, "unable to request IRQ %d\n",
803 + ndev->irq);
804 +- goto err_free;
805 ++ return err;
806 + }
807 +
808 + ndev->netdev_ops = &ag71xx_netdev_ops;
809 +@@ -1959,10 +1956,8 @@ static int ag71xx_probe(struct platform_device *pdev)
810 + ag->stop_desc = dmam_alloc_coherent(&pdev->dev,
811 + sizeof(struct ag71xx_desc),
812 + &ag->stop_desc_dma, GFP_KERNEL);
813 +- if (!ag->stop_desc) {
814 +- err = -ENOMEM;
815 +- goto err_free;
816 +- }
817 ++ if (!ag->stop_desc)
818 ++ return -ENOMEM;
819 +
820 + ag->stop_desc->data = 0;
821 + ag->stop_desc->ctrl = 0;
822 +@@ -1977,7 +1972,7 @@ static int ag71xx_probe(struct platform_device *pdev)
823 + err = of_get_phy_mode(np, &ag->phy_if_mode);
824 + if (err) {
825 + netif_err(ag, probe, ndev, "missing phy-mode property in DT\n");
826 +- goto err_free;
827 ++ return err;
828 + }
829 +
830 + netif_napi_add(ndev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT);
831 +@@ -1985,7 +1980,7 @@ static int ag71xx_probe(struct platform_device *pdev)
832 + err = clk_prepare_enable(ag->clk_eth);
833 + if (err) {
834 + netif_err(ag, probe, ndev, "Failed to enable eth clk.\n");
835 +- goto err_free;
836 ++ return err;
837 + }
838 +
839 + ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0);
840 +@@ -2021,8 +2016,6 @@ err_mdio_remove:
841 + ag71xx_mdio_remove(ag);
842 + err_put_clk:
843 + clk_disable_unprepare(ag->clk_eth);
844 +-err_free:
845 +- free_netdev(ndev);
846 + return err;
847 + }
848 +
849 +diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
850 +index d9baac0dbc7d0..4c9d05c45c033 100644
851 +--- a/drivers/net/ethernet/freescale/fman/fman_port.c
852 ++++ b/drivers/net/ethernet/freescale/fman/fman_port.c
853 +@@ -1805,7 +1805,7 @@ static int fman_port_probe(struct platform_device *of_dev)
854 + fman = dev_get_drvdata(&fm_pdev->dev);
855 + if (!fman) {
856 + err = -EINVAL;
857 +- goto return_err;
858 ++ goto put_device;
859 + }
860 +
861 + err = of_property_read_u32(port_node, "cell-index", &val);
862 +@@ -1813,7 +1813,7 @@ static int fman_port_probe(struct platform_device *of_dev)
863 + dev_err(port->dev, "%s: reading cell-index for %pOF failed\n",
864 + __func__, port_node);
865 + err = -EINVAL;
866 +- goto return_err;
867 ++ goto put_device;
868 + }
869 + port_id = (u8)val;
870 + port->dts_params.id = port_id;
871 +@@ -1847,7 +1847,7 @@ static int fman_port_probe(struct platform_device *of_dev)
872 + } else {
873 + dev_err(port->dev, "%s: Illegal port type\n", __func__);
874 + err = -EINVAL;
875 +- goto return_err;
876 ++ goto put_device;
877 + }
878 +
879 + port->dts_params.type = port_type;
880 +@@ -1861,7 +1861,7 @@ static int fman_port_probe(struct platform_device *of_dev)
881 + dev_err(port->dev, "%s: incorrect qman-channel-id\n",
882 + __func__);
883 + err = -EINVAL;
884 +- goto return_err;
885 ++ goto put_device;
886 + }
887 + port->dts_params.qman_channel_id = qman_channel_id;
888 + }
889 +@@ -1871,7 +1871,7 @@ static int fman_port_probe(struct platform_device *of_dev)
890 + dev_err(port->dev, "%s: of_address_to_resource() failed\n",
891 + __func__);
892 + err = -ENOMEM;
893 +- goto return_err;
894 ++ goto put_device;
895 + }
896 +
897 + port->dts_params.fman = fman;
898 +@@ -1896,6 +1896,8 @@ static int fman_port_probe(struct platform_device *of_dev)
899 +
900 + return 0;
901 +
902 ++put_device:
903 ++ put_device(&fm_pdev->dev);
904 + return_err:
905 + of_node_put(port_node);
906 + free_port:
907 +diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
908 +index 0e19b4d02e628..0a96627391a8c 100644
909 +--- a/drivers/net/ethernet/intel/igc/igc_main.c
910 ++++ b/drivers/net/ethernet/intel/igc/igc_main.c
911 +@@ -5466,6 +5466,9 @@ static irqreturn_t igc_intr_msi(int irq, void *data)
912 + mod_timer(&adapter->watchdog_timer, jiffies + 1);
913 + }
914 +
915 ++ if (icr & IGC_ICR_TS)
916 ++ igc_tsync_interrupt(adapter);
917 ++
918 + napi_schedule(&q_vector->napi);
919 +
920 + return IRQ_HANDLED;
921 +@@ -5509,6 +5512,9 @@ static irqreturn_t igc_intr(int irq, void *data)
922 + mod_timer(&adapter->watchdog_timer, jiffies + 1);
923 + }
924 +
925 ++ if (icr & IGC_ICR_TS)
926 ++ igc_tsync_interrupt(adapter);
927 ++
928 + napi_schedule(&q_vector->napi);
929 +
930 + return IRQ_HANDLED;
931 +diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
932 +index 30568e3544cda..4f9245aa79a18 100644
933 +--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
934 ++++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
935 +@@ -768,7 +768,20 @@ int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
936 + */
937 + static bool igc_is_crosststamp_supported(struct igc_adapter *adapter)
938 + {
939 +- return IS_ENABLED(CONFIG_X86_TSC) ? pcie_ptm_enabled(adapter->pdev) : false;
940 ++ if (!IS_ENABLED(CONFIG_X86_TSC))
941 ++ return false;
942 ++
943 ++ /* FIXME: it was noticed that enabling support for PCIe PTM in
944 ++ * some i225-V models could cause lockups when bringing the
945 ++ * interface up/down. There should be no downsides to
946 ++ * disabling crosstimestamping support for i225-V, as it
947 ++ * doesn't have any PTP support. That way we gain some time
948 ++ * while root causing the issue.
949 ++ */
950 ++ if (adapter->pdev->device == IGC_DEV_ID_I225_V)
951 ++ return false;
952 ++
953 ++ return pcie_ptm_enabled(adapter->pdev);
954 + }
955 +
956 + static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp)
957 +diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
958 +index fb78f17d734fe..b02f796b5422f 100644
959 +--- a/drivers/net/ethernet/lantiq_xrx200.c
960 ++++ b/drivers/net/ethernet/lantiq_xrx200.c
961 +@@ -209,7 +209,7 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
962 + skb->protocol = eth_type_trans(skb, net_dev);
963 + netif_receive_skb(skb);
964 + net_dev->stats.rx_packets++;
965 +- net_dev->stats.rx_bytes += len - ETH_FCS_LEN;
966 ++ net_dev->stats.rx_bytes += len;
967 +
968 + return 0;
969 + }
970 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
971 +index c10a107a3ea53..7204bc86e4741 100644
972 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
973 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
974 +@@ -727,6 +727,8 @@ struct mlx5e_channel {
975 + DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
976 + int ix;
977 + int cpu;
978 ++ /* Sync between icosq recovery and XSK enable/disable. */
979 ++ struct mutex icosq_recovery_lock;
980 + };
981 +
982 + struct mlx5e_ptp;
983 +@@ -954,9 +956,6 @@ int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param);
984 + void mlx5e_destroy_rq(struct mlx5e_rq *rq);
985 +
986 + struct mlx5e_sq_param;
987 +-int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
988 +- struct mlx5e_sq_param *param, struct mlx5e_icosq *sq);
989 +-void mlx5e_close_icosq(struct mlx5e_icosq *sq);
990 + int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
991 + struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
992 + struct mlx5e_xdpsq *sq, bool is_redirect);
993 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
994 +index 018262d0164b3..3aaf3c2752feb 100644
995 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
996 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
997 +@@ -30,6 +30,8 @@ void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv);
998 + void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq);
999 + void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq);
1000 + void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq);
1001 ++void mlx5e_reporter_icosq_suspend_recovery(struct mlx5e_channel *c);
1002 ++void mlx5e_reporter_icosq_resume_recovery(struct mlx5e_channel *c);
1003 +
1004 + #define MLX5E_REPORTER_PER_Q_MAX_LEN 256
1005 + #define MLX5E_REPORTER_FLUSH_TIMEOUT_MSEC 2000
1006 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
1007 +index de03684528bbf..8451940c16ab9 100644
1008 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
1009 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
1010 +@@ -647,9 +647,7 @@ static void mlx5e_restore_skb_sample(struct mlx5e_priv *priv, struct sk_buff *sk
1011 + "Failed to restore tunnel info for sampled packet\n");
1012 + return;
1013 + }
1014 +-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
1015 + mlx5e_tc_sample_skb(skb, mapped_obj);
1016 +-#endif /* CONFIG_MLX5_TC_SAMPLE */
1017 + mlx5_rep_tc_post_napi_receive(tc_priv);
1018 + }
1019 +
1020 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
1021 +index 0eb125316fe20..e329158fdc555 100644
1022 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
1023 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
1024 +@@ -59,6 +59,7 @@ static void mlx5e_reset_icosq_cc_pc(struct mlx5e_icosq *icosq)
1025 +
1026 + static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
1027 + {
1028 ++ struct mlx5e_rq *xskrq = NULL;
1029 + struct mlx5_core_dev *mdev;
1030 + struct mlx5e_icosq *icosq;
1031 + struct net_device *dev;
1032 +@@ -67,7 +68,13 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
1033 + int err;
1034 +
1035 + icosq = ctx;
1036 ++
1037 ++ mutex_lock(&icosq->channel->icosq_recovery_lock);
1038 ++
1039 ++ /* mlx5e_close_rq cancels this work before RQ and ICOSQ are killed. */
1040 + rq = &icosq->channel->rq;
1041 ++ if (test_bit(MLX5E_RQ_STATE_ENABLED, &icosq->channel->xskrq.state))
1042 ++ xskrq = &icosq->channel->xskrq;
1043 + mdev = icosq->channel->mdev;
1044 + dev = icosq->channel->netdev;
1045 + err = mlx5_core_query_sq_state(mdev, icosq->sqn, &state);
1046 +@@ -81,6 +88,9 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
1047 + goto out;
1048 +
1049 + mlx5e_deactivate_rq(rq);
1050 ++ if (xskrq)
1051 ++ mlx5e_deactivate_rq(xskrq);
1052 ++
1053 + err = mlx5e_wait_for_icosq_flush(icosq);
1054 + if (err)
1055 + goto out;
1056 +@@ -94,15 +104,28 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
1057 + goto out;
1058 +
1059 + mlx5e_reset_icosq_cc_pc(icosq);
1060 ++
1061 + mlx5e_free_rx_in_progress_descs(rq);
1062 ++ if (xskrq)
1063 ++ mlx5e_free_rx_in_progress_descs(xskrq);
1064 ++
1065 + clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
1066 + mlx5e_activate_icosq(icosq);
1067 +- mlx5e_activate_rq(rq);
1068 +
1069 ++ mlx5e_activate_rq(rq);
1070 + rq->stats->recover++;
1071 ++
1072 ++ if (xskrq) {
1073 ++ mlx5e_activate_rq(xskrq);
1074 ++ xskrq->stats->recover++;
1075 ++ }
1076 ++
1077 ++ mutex_unlock(&icosq->channel->icosq_recovery_lock);
1078 ++
1079 + return 0;
1080 + out:
1081 + clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
1082 ++ mutex_unlock(&icosq->channel->icosq_recovery_lock);
1083 + return err;
1084 + }
1085 +
1086 +@@ -703,6 +726,16 @@ void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq)
1087 + mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
1088 + }
1089 +
1090 ++void mlx5e_reporter_icosq_suspend_recovery(struct mlx5e_channel *c)
1091 ++{
1092 ++ mutex_lock(&c->icosq_recovery_lock);
1093 ++}
1094 ++
1095 ++void mlx5e_reporter_icosq_resume_recovery(struct mlx5e_channel *c)
1096 ++{
1097 ++ mutex_unlock(&c->icosq_recovery_lock);
1098 ++}
1099 ++
1100 + static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = {
1101 + .name = "rx",
1102 + .recover = mlx5e_rx_reporter_recover,
1103 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
1104 +index bb682fd751c98..8024599994642 100644
1105 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
1106 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
1107 +@@ -463,6 +463,14 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms
1108 + return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
1109 + }
1110 +
1111 ++static int mlx5e_tx_reporter_timeout_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
1112 ++ void *ctx)
1113 ++{
1114 ++ struct mlx5e_tx_timeout_ctx *to_ctx = ctx;
1115 ++
1116 ++ return mlx5e_tx_reporter_dump_sq(priv, fmsg, to_ctx->sq);
1117 ++}
1118 ++
1119 + static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
1120 + struct devlink_fmsg *fmsg)
1121 + {
1122 +@@ -558,7 +566,7 @@ int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
1123 + to_ctx.sq = sq;
1124 + err_ctx.ctx = &to_ctx;
1125 + err_ctx.recover = mlx5e_tx_reporter_timeout_recover;
1126 +- err_ctx.dump = mlx5e_tx_reporter_dump_sq;
1127 ++ err_ctx.dump = mlx5e_tx_reporter_timeout_dump;
1128 + snprintf(err_str, sizeof(err_str),
1129 + "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u",
1130 + sq->ch_ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
1131 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h
1132 +index db0146df9b303..9ef8a49d78014 100644
1133 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h
1134 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h
1135 +@@ -19,6 +19,8 @@ struct mlx5e_sample_attr {
1136 + struct mlx5e_sample_flow *sample_flow;
1137 + };
1138 +
1139 ++#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
1140 ++
1141 + void mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj);
1142 +
1143 + struct mlx5_flow_handle *
1144 +@@ -38,4 +40,29 @@ mlx5e_tc_sample_init(struct mlx5_eswitch *esw, struct mlx5e_post_act *post_act);
1145 + void
1146 + mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample *tc_psample);
1147 +
1148 ++#else /* CONFIG_MLX5_TC_SAMPLE */
1149 ++
1150 ++static inline struct mlx5_flow_handle *
1151 ++mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
1152 ++ struct mlx5_flow_spec *spec,
1153 ++ struct mlx5_flow_attr *attr,
1154 ++ u32 tunnel_id)
1155 ++{ return ERR_PTR(-EOPNOTSUPP); }
1156 ++
1157 ++static inline void
1158 ++mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample,
1159 ++ struct mlx5_flow_handle *rule,
1160 ++ struct mlx5_flow_attr *attr) {}
1161 ++
1162 ++static inline struct mlx5e_tc_psample *
1163 ++mlx5e_tc_sample_init(struct mlx5_eswitch *esw, struct mlx5e_post_act *post_act)
1164 ++{ return ERR_PTR(-EOPNOTSUPP); }
1165 ++
1166 ++static inline void
1167 ++mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample *tc_psample) {}
1168 ++
1169 ++static inline void
1170 ++mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj) {}
1171 ++
1172 ++#endif /* CONFIG_MLX5_TC_SAMPLE */
1173 + #endif /* __MLX5_EN_TC_SAMPLE_H__ */
1174 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
1175 +index 538bc2419bd83..8526a5fbbf0bf 100644
1176 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
1177 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
1178 +@@ -4,6 +4,7 @@
1179 + #include "setup.h"
1180 + #include "en/params.h"
1181 + #include "en/txrx.h"
1182 ++#include "en/health.h"
1183 +
1184 + /* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may
1185 + * change unexpectedly, and mlx5e has a minimum valid stride size for striding
1186 +@@ -170,7 +171,13 @@ void mlx5e_close_xsk(struct mlx5e_channel *c)
1187 +
1188 + void mlx5e_activate_xsk(struct mlx5e_channel *c)
1189 + {
1190 ++ /* ICOSQ recovery deactivates RQs. Suspend the recovery to avoid
1191 ++ * activating XSKRQ in the middle of recovery.
1192 ++ */
1193 ++ mlx5e_reporter_icosq_suspend_recovery(c);
1194 + set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
1195 ++ mlx5e_reporter_icosq_resume_recovery(c);
1196 ++
1197 + /* TX queue is created active. */
1198 +
1199 + spin_lock_bh(&c->async_icosq_lock);
1200 +@@ -180,6 +187,13 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c)
1201 +
1202 + void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
1203 + {
1204 +- mlx5e_deactivate_rq(&c->xskrq);
1205 ++ /* ICOSQ recovery may reactivate XSKRQ if clear_bit is called in the
1206 ++ * middle of recovery. Suspend the recovery to avoid it.
1207 ++ */
1208 ++ mlx5e_reporter_icosq_suspend_recovery(c);
1209 ++ clear_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
1210 ++ mlx5e_reporter_icosq_resume_recovery(c);
1211 ++ synchronize_net(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */
1212 ++
1213 + /* TX queue is disabled on close. */
1214 + }
1215 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1216 +index 8cf5fbebd674b..baa0d7d48fc0c 100644
1217 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1218 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1219 +@@ -911,8 +911,6 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
1220 + void mlx5e_close_rq(struct mlx5e_rq *rq)
1221 + {
1222 + cancel_work_sync(&rq->dim.work);
1223 +- if (rq->icosq)
1224 +- cancel_work_sync(&rq->icosq->recover_work);
1225 + cancel_work_sync(&rq->recover_work);
1226 + mlx5e_destroy_rq(rq);
1227 + mlx5e_free_rx_descs(rq);
1228 +@@ -1038,9 +1036,20 @@ static void mlx5e_icosq_err_cqe_work(struct work_struct *recover_work)
1229 + mlx5e_reporter_icosq_cqe_err(sq);
1230 + }
1231 +
1232 ++static void mlx5e_async_icosq_err_cqe_work(struct work_struct *recover_work)
1233 ++{
1234 ++ struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq,
1235 ++ recover_work);
1236 ++
1237 ++ /* Not implemented yet. */
1238 ++
1239 ++ netdev_warn(sq->channel->netdev, "async_icosq recovery is not implemented\n");
1240 ++}
1241 ++
1242 + static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
1243 + struct mlx5e_sq_param *param,
1244 +- struct mlx5e_icosq *sq)
1245 ++ struct mlx5e_icosq *sq,
1246 ++ work_func_t recover_work_func)
1247 + {
1248 + void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
1249 + struct mlx5_core_dev *mdev = c->mdev;
1250 +@@ -1061,7 +1070,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
1251 + if (err)
1252 + goto err_sq_wq_destroy;
1253 +
1254 +- INIT_WORK(&sq->recover_work, mlx5e_icosq_err_cqe_work);
1255 ++ INIT_WORK(&sq->recover_work, recover_work_func);
1256 +
1257 + return 0;
1258 +
1259 +@@ -1399,13 +1408,14 @@ void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
1260 + mlx5e_reporter_tx_err_cqe(sq);
1261 + }
1262 +
1263 +-int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
1264 +- struct mlx5e_sq_param *param, struct mlx5e_icosq *sq)
1265 ++static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
1266 ++ struct mlx5e_sq_param *param, struct mlx5e_icosq *sq,
1267 ++ work_func_t recover_work_func)
1268 + {
1269 + struct mlx5e_create_sq_param csp = {};
1270 + int err;
1271 +
1272 +- err = mlx5e_alloc_icosq(c, param, sq);
1273 ++ err = mlx5e_alloc_icosq(c, param, sq, recover_work_func);
1274 + if (err)
1275 + return err;
1276 +
1277 +@@ -1444,7 +1454,7 @@ void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq)
1278 + synchronize_net(); /* Sync with NAPI. */
1279 + }
1280 +
1281 +-void mlx5e_close_icosq(struct mlx5e_icosq *sq)
1282 ++static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
1283 + {
1284 + struct mlx5e_channel *c = sq->channel;
1285 +
1286 +@@ -1871,11 +1881,15 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
1287 +
1288 + spin_lock_init(&c->async_icosq_lock);
1289 +
1290 +- err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq);
1291 ++ err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq,
1292 ++ mlx5e_async_icosq_err_cqe_work);
1293 + if (err)
1294 + goto err_close_xdpsq_cq;
1295 +
1296 +- err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
1297 ++ mutex_init(&c->icosq_recovery_lock);
1298 ++
1299 ++ err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq,
1300 ++ mlx5e_icosq_err_cqe_work);
1301 + if (err)
1302 + goto err_close_async_icosq;
1303 +
1304 +@@ -1943,9 +1957,12 @@ static void mlx5e_close_queues(struct mlx5e_channel *c)
1305 + mlx5e_close_xdpsq(&c->xdpsq);
1306 + if (c->xdp)
1307 + mlx5e_close_xdpsq(&c->rq_xdpsq);
1308 ++ /* The same ICOSQ is used for UMRs for both RQ and XSKRQ. */
1309 ++ cancel_work_sync(&c->icosq.recover_work);
1310 + mlx5e_close_rq(&c->rq);
1311 + mlx5e_close_sqs(c);
1312 + mlx5e_close_icosq(&c->icosq);
1313 ++ mutex_destroy(&c->icosq_recovery_lock);
1314 + mlx5e_close_icosq(&c->async_icosq);
1315 + if (c->xdp)
1316 + mlx5e_close_cq(&c->rq_xdpsq.cq);
1317 +@@ -3433,12 +3450,11 @@ static int set_feature_arfs(struct net_device *netdev, bool enable)
1318 +
1319 + static int mlx5e_handle_feature(struct net_device *netdev,
1320 + netdev_features_t *features,
1321 +- netdev_features_t wanted_features,
1322 + netdev_features_t feature,
1323 + mlx5e_feature_handler feature_handler)
1324 + {
1325 +- netdev_features_t changes = wanted_features ^ netdev->features;
1326 +- bool enable = !!(wanted_features & feature);
1327 ++ netdev_features_t changes = *features ^ netdev->features;
1328 ++ bool enable = !!(*features & feature);
1329 + int err;
1330 +
1331 + if (!(changes & feature))
1332 +@@ -3446,22 +3462,22 @@ static int mlx5e_handle_feature(struct net_device *netdev,
1333 +
1334 + err = feature_handler(netdev, enable);
1335 + if (err) {
1336 ++ MLX5E_SET_FEATURE(features, feature, !enable);
1337 + netdev_err(netdev, "%s feature %pNF failed, err %d\n",
1338 + enable ? "Enable" : "Disable", &feature, err);
1339 + return err;
1340 + }
1341 +
1342 +- MLX5E_SET_FEATURE(features, feature, enable);
1343 + return 0;
1344 + }
1345 +
1346 + int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
1347 + {
1348 +- netdev_features_t oper_features = netdev->features;
1349 ++ netdev_features_t oper_features = features;
1350 + int err = 0;
1351 +
1352 + #define MLX5E_HANDLE_FEATURE(feature, handler) \
1353 +- mlx5e_handle_feature(netdev, &oper_features, features, feature, handler)
1354 ++ mlx5e_handle_feature(netdev, &oper_features, feature, handler)
1355 +
1356 + err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
1357 + err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
1358 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1359 +index e3b320b6d85b9..fa461bc57baee 100644
1360 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1361 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1362 +@@ -248,7 +248,6 @@ get_ct_priv(struct mlx5e_priv *priv)
1363 + return priv->fs.tc.ct;
1364 + }
1365 +
1366 +-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
1367 + static struct mlx5e_tc_psample *
1368 + get_sample_priv(struct mlx5e_priv *priv)
1369 + {
1370 +@@ -265,7 +264,6 @@ get_sample_priv(struct mlx5e_priv *priv)
1371 +
1372 + return NULL;
1373 + }
1374 +-#endif
1375 +
1376 + struct mlx5_flow_handle *
1377 + mlx5_tc_rule_insert(struct mlx5e_priv *priv,
1378 +@@ -1148,11 +1146,9 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
1379 + rule = mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv),
1380 + flow, spec, attr,
1381 + mod_hdr_acts);
1382 +-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
1383 + } else if (flow_flag_test(flow, SAMPLE)) {
1384 + rule = mlx5e_tc_sample_offload(get_sample_priv(flow->priv), spec, attr,
1385 + mlx5e_tc_get_flow_tun_id(flow));
1386 +-#endif
1387 + } else {
1388 + rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
1389 + }
1390 +@@ -1183,23 +1179,16 @@ void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
1391 + if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
1392 + goto offload_rule_0;
1393 +
1394 +- if (flow_flag_test(flow, CT)) {
1395 +- mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
1396 +- return;
1397 +- }
1398 +-
1399 +-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
1400 +- if (flow_flag_test(flow, SAMPLE)) {
1401 +- mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr);
1402 +- return;
1403 +- }
1404 +-#endif
1405 +-
1406 + if (attr->esw_attr->split_count)
1407 + mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
1408 +
1409 ++ if (flow_flag_test(flow, CT))
1410 ++ mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
1411 ++ else if (flow_flag_test(flow, SAMPLE))
1412 ++ mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr);
1413 ++ else
1414 + offload_rule_0:
1415 +- mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
1416 ++ mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
1417 + }
1418 +
1419 + struct mlx5_flow_handle *
1420 +@@ -5014,9 +5003,7 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
1421 + MLX5_FLOW_NAMESPACE_FDB,
1422 + uplink_priv->post_act);
1423 +
1424 +-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
1425 + uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act);
1426 +-#endif
1427 +
1428 + mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
1429 +
1430 +@@ -5060,9 +5047,7 @@ err_ht_init:
1431 + err_enc_opts_mapping:
1432 + mapping_destroy(uplink_priv->tunnel_mapping);
1433 + err_tun_mapping:
1434 +-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
1435 + mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
1436 +-#endif
1437 + mlx5_tc_ct_clean(uplink_priv->ct_priv);
1438 + netdev_warn(priv->netdev,
1439 + "Failed to initialize tc (eswitch), err: %d", err);
1440 +@@ -5082,9 +5067,7 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
1441 + mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
1442 + mapping_destroy(uplink_priv->tunnel_mapping);
1443 +
1444 +-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
1445 + mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
1446 +-#endif
1447 + mlx5_tc_ct_clean(uplink_priv->ct_priv);
1448 + mlx5e_tc_post_act_destroy(uplink_priv->post_act);
1449 + }
1450 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
1451 +index 97e5845b4cfdd..d5e47630e2849 100644
1452 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
1453 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
1454 +@@ -121,6 +121,9 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
1455 +
1456 + u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
1457 + {
1458 ++ if (!mlx5_chains_prios_supported(chains))
1459 ++ return 1;
1460 ++
1461 + if (mlx5_chains_ignore_flow_level_supported(chains))
1462 + return UINT_MAX;
1463 +
1464 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
1465 +index 92b08fa07efae..92b01858d7f3e 100644
1466 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
1467 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
1468 +@@ -1775,12 +1775,13 @@ void mlx5_disable_device(struct mlx5_core_dev *dev)
1469 +
1470 + int mlx5_recover_device(struct mlx5_core_dev *dev)
1471 + {
1472 +- int ret = -EIO;
1473 ++ if (!mlx5_core_is_sf(dev)) {
1474 ++ mlx5_pci_disable_device(dev);
1475 ++ if (mlx5_pci_slot_reset(dev->pdev) != PCI_ERS_RESULT_RECOVERED)
1476 ++ return -EIO;
1477 ++ }
1478 +
1479 +- mlx5_pci_disable_device(dev);
1480 +- if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED)
1481 +- ret = mlx5_load_one(dev);
1482 +- return ret;
1483 ++ return mlx5_load_one(dev);
1484 + }
1485 +
1486 + static struct pci_driver mlx5_core_driver = {
1487 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
1488 +index 763c83a023809..11f3649fdaab1 100644
1489 +--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
1490 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
1491 +@@ -346,8 +346,8 @@ static struct mlx5_irq *irq_pool_request_affinity(struct mlx5_irq_pool *pool,
1492 + new_irq = irq_pool_create_irq(pool, affinity);
1493 + if (IS_ERR(new_irq)) {
1494 + if (!least_loaded_irq) {
1495 +- mlx5_core_err(pool->dev, "Didn't find IRQ for cpu = %u\n",
1496 +- cpumask_first(affinity));
1497 ++ mlx5_core_err(pool->dev, "Didn't find a matching IRQ. err = %ld\n",
1498 ++ PTR_ERR(new_irq));
1499 + mutex_unlock(&pool->lock);
1500 + return new_irq;
1501 + }
1502 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
1503 +index 0fe159809ba15..ea1b8ca5bf3aa 100644
1504 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
1505 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
1506 +@@ -2,6 +2,7 @@
1507 + /* Copyright (c) 2019 Mellanox Technologies. */
1508 +
1509 + #include <linux/mlx5/eswitch.h>
1510 ++#include <linux/err.h>
1511 + #include "dr_types.h"
1512 +
1513 + #define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
1514 +@@ -75,9 +76,9 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
1515 + }
1516 +
1517 + dmn->uar = mlx5_get_uars_page(dmn->mdev);
1518 +- if (!dmn->uar) {
1519 ++ if (IS_ERR(dmn->uar)) {
1520 + mlx5dr_err(dmn, "Couldn't allocate UAR\n");
1521 +- ret = -ENOMEM;
1522 ++ ret = PTR_ERR(dmn->uar);
1523 + goto clean_pd;
1524 + }
1525 +
1526 +diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
1527 +index 7f3322ce044c7..6ac507ddf09af 100644
1528 +--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
1529 ++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
1530 +@@ -3283,7 +3283,7 @@ int ionic_lif_init(struct ionic_lif *lif)
1531 + return -EINVAL;
1532 + }
1533 +
1534 +- lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL);
1535 ++ lif->dbid_inuse = bitmap_zalloc(lif->dbid_count, GFP_KERNEL);
1536 + if (!lif->dbid_inuse) {
1537 + dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
1538 + return -ENOMEM;
1539 +diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
1540 +index 6a92a3fef75e5..cd063f45785b7 100644
1541 +--- a/drivers/net/usb/pegasus.c
1542 ++++ b/drivers/net/usb/pegasus.c
1543 +@@ -493,11 +493,11 @@ static void read_bulk_callback(struct urb *urb)
1544 + goto goon;
1545 +
1546 + rx_status = buf[count - 2];
1547 +- if (rx_status & 0x1e) {
1548 ++ if (rx_status & 0x1c) {
1549 + netif_dbg(pegasus, rx_err, net,
1550 + "RX packet error %x\n", rx_status);
1551 + net->stats.rx_errors++;
1552 +- if (rx_status & 0x06) /* long or runt */
1553 ++ if (rx_status & 0x04) /* runt */
1554 + net->stats.rx_length_errors++;
1555 + if (rx_status & 0x08)
1556 + net->stats.rx_crc_errors++;
1557 +diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
1558 +index 279d88128b2e4..d56bc24709b5c 100644
1559 +--- a/drivers/nfc/st21nfca/i2c.c
1560 ++++ b/drivers/nfc/st21nfca/i2c.c
1561 +@@ -528,7 +528,8 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
1562 + phy->gpiod_ena = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
1563 + if (IS_ERR(phy->gpiod_ena)) {
1564 + nfc_err(dev, "Unable to get ENABLE GPIO\n");
1565 +- return PTR_ERR(phy->gpiod_ena);
1566 ++ r = PTR_ERR(phy->gpiod_ena);
1567 ++ goto out_free;
1568 + }
1569 +
1570 + phy->se_status.is_ese_present =
1571 +@@ -539,7 +540,7 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
1572 + r = st21nfca_hci_platform_init(phy);
1573 + if (r < 0) {
1574 + nfc_err(&client->dev, "Unable to reboot st21nfca\n");
1575 +- return r;
1576 ++ goto out_free;
1577 + }
1578 +
1579 + r = devm_request_threaded_irq(&client->dev, client->irq, NULL,
1580 +@@ -548,15 +549,23 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
1581 + ST21NFCA_HCI_DRIVER_NAME, phy);
1582 + if (r < 0) {
1583 + nfc_err(&client->dev, "Unable to register IRQ handler\n");
1584 +- return r;
1585 ++ goto out_free;
1586 + }
1587 +
1588 +- return st21nfca_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME,
1589 +- ST21NFCA_FRAME_HEADROOM,
1590 +- ST21NFCA_FRAME_TAILROOM,
1591 +- ST21NFCA_HCI_LLC_MAX_PAYLOAD,
1592 +- &phy->hdev,
1593 +- &phy->se_status);
1594 ++ r = st21nfca_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME,
1595 ++ ST21NFCA_FRAME_HEADROOM,
1596 ++ ST21NFCA_FRAME_TAILROOM,
1597 ++ ST21NFCA_HCI_LLC_MAX_PAYLOAD,
1598 ++ &phy->hdev,
1599 ++ &phy->se_status);
1600 ++ if (r)
1601 ++ goto out_free;
1602 ++
1603 ++ return 0;
1604 ++
1605 ++out_free:
1606 ++ kfree_skb(phy->pending_skb);
1607 ++ return r;
1608 + }
1609 +
1610 + static int st21nfca_hci_i2c_remove(struct i2c_client *client)
1611 +@@ -567,6 +576,8 @@ static int st21nfca_hci_i2c_remove(struct i2c_client *client)
1612 +
1613 + if (phy->powered)
1614 + st21nfca_hci_i2c_disable(phy);
1615 ++ if (phy->pending_skb)
1616 ++ kfree_skb(phy->pending_skb);
1617 +
1618 + return 0;
1619 + }
1620 +diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
1621 +index 04bc3b50aa7a4..65b4a819f1bdf 100644
1622 +--- a/drivers/platform/mellanox/mlxbf-pmc.c
1623 ++++ b/drivers/platform/mellanox/mlxbf-pmc.c
1624 +@@ -1374,8 +1374,8 @@ static int mlxbf_pmc_map_counters(struct device *dev)
1625 + pmc->block[i].counters = info[2];
1626 + pmc->block[i].type = info[3];
1627 +
1628 +- if (IS_ERR(pmc->block[i].mmio_base))
1629 +- return PTR_ERR(pmc->block[i].mmio_base);
1630 ++ if (!pmc->block[i].mmio_base)
1631 ++ return -ENOMEM;
1632 +
1633 + ret = mlxbf_pmc_create_groups(dev, i);
1634 + if (ret)
1635 +diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
1636 +index 9aae45a452002..57553f9b4d1dc 100644
1637 +--- a/drivers/platform/x86/apple-gmux.c
1638 ++++ b/drivers/platform/x86/apple-gmux.c
1639 +@@ -625,7 +625,7 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
1640 + }
1641 +
1642 + gmux_data->iostart = res->start;
1643 +- gmux_data->iolen = res->end - res->start;
1644 ++ gmux_data->iolen = resource_size(res);
1645 +
1646 + if (gmux_data->iolen < GMUX_MIN_IO_LEN) {
1647 + pr_err("gmux I/O region too small (%lu < %u)\n",
1648 +diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
1649 +index bd6d459afce54..08b2e85dcd7d8 100644
1650 +--- a/drivers/scsi/lpfc/lpfc_debugfs.c
1651 ++++ b/drivers/scsi/lpfc/lpfc_debugfs.c
1652 +@@ -2954,8 +2954,8 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf,
1653 + char mybuf[64];
1654 + char *pbuf;
1655 +
1656 +- if (nbytes > 64)
1657 +- nbytes = 64;
1658 ++ if (nbytes > 63)
1659 ++ nbytes = 63;
1660 +
1661 + memset(mybuf, 0, sizeof(mybuf));
1662 +
1663 +diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
1664 +index ce1ba1b936298..9419d6d1d8d26 100644
1665 +--- a/drivers/scsi/vmw_pvscsi.c
1666 ++++ b/drivers/scsi/vmw_pvscsi.c
1667 +@@ -586,9 +586,12 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
1668 + * Commands like INQUIRY may transfer less data than
1669 + * requested by the initiator via bufflen. Set residual
1670 + * count to make upper layer aware of the actual amount
1671 +- * of data returned.
1672 ++ * of data returned. There are cases when controller
1673 ++ * returns zero dataLen with non zero data - do not set
1674 ++ * residual count in that case.
1675 + */
1676 +- scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
1677 ++ if (e->dataLen && (e->dataLen < scsi_bufflen(cmd)))
1678 ++ scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
1679 + cmd->result = (DID_OK << 16);
1680 + break;
1681 +
1682 +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
1683 +index 8260f38025b72..aac343f7d7d3d 100644
1684 +--- a/drivers/usb/gadget/function/f_fs.c
1685 ++++ b/drivers/usb/gadget/function/f_fs.c
1686 +@@ -1773,11 +1773,15 @@ static void ffs_data_clear(struct ffs_data *ffs)
1687 +
1688 + BUG_ON(ffs->gadget);
1689 +
1690 +- if (ffs->epfiles)
1691 ++ if (ffs->epfiles) {
1692 + ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
1693 ++ ffs->epfiles = NULL;
1694 ++ }
1695 +
1696 +- if (ffs->ffs_eventfd)
1697 ++ if (ffs->ffs_eventfd) {
1698 + eventfd_ctx_put(ffs->ffs_eventfd);
1699 ++ ffs->ffs_eventfd = NULL;
1700 ++ }
1701 +
1702 + kfree(ffs->raw_descs_data);
1703 + kfree(ffs->raw_strings);
1704 +@@ -1790,7 +1794,6 @@ static void ffs_data_reset(struct ffs_data *ffs)
1705 +
1706 + ffs_data_clear(ffs);
1707 +
1708 +- ffs->epfiles = NULL;
1709 + ffs->raw_descs_data = NULL;
1710 + ffs->raw_descs = NULL;
1711 + ffs->raw_strings = NULL;
1712 +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
1713 +index 8c04a7d73388b..de9a9ea2cabc2 100644
1714 +--- a/drivers/usb/host/xhci-pci.c
1715 ++++ b/drivers/usb/host/xhci-pci.c
1716 +@@ -123,7 +123,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
1717 + /* Look for vendor-specific quirks */
1718 + if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
1719 + (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK ||
1720 +- pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1100 ||
1721 + pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1400)) {
1722 + if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
1723 + pdev->revision == 0x0) {
1724 +@@ -158,6 +157,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
1725 + pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009)
1726 + xhci->quirks |= XHCI_BROKEN_STREAMS;
1727 +
1728 ++ if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
1729 ++ pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1100)
1730 ++ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
1731 ++
1732 + if (pdev->vendor == PCI_VENDOR_ID_NEC)
1733 + xhci->quirks |= XHCI_NEC_HOST;
1734 +
1735 +diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c
1736 +index a9a65b4bbfede..0b21da4ee1836 100644
1737 +--- a/drivers/usb/mtu3/mtu3_gadget.c
1738 ++++ b/drivers/usb/mtu3/mtu3_gadget.c
1739 +@@ -92,6 +92,13 @@ static int mtu3_ep_enable(struct mtu3_ep *mep)
1740 + interval = clamp_val(interval, 1, 16) - 1;
1741 + mult = usb_endpoint_maxp_mult(desc) - 1;
1742 + }
1743 ++ break;
1744 ++ case USB_SPEED_FULL:
1745 ++ if (usb_endpoint_xfer_isoc(desc))
1746 ++ interval = clamp_val(desc->bInterval, 1, 16);
1747 ++ else if (usb_endpoint_xfer_int(desc))
1748 ++ interval = clamp_val(desc->bInterval, 1, 255);
1749 ++
1750 + break;
1751 + default:
1752 + break; /*others are ignored */
1753 +@@ -235,6 +242,7 @@ struct usb_request *mtu3_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1754 + mreq->request.dma = DMA_ADDR_INVALID;
1755 + mreq->epnum = mep->epnum;
1756 + mreq->mep = mep;
1757 ++ INIT_LIST_HEAD(&mreq->list);
1758 + trace_mtu3_alloc_request(mreq);
1759 +
1760 + return &mreq->request;
1761 +diff --git a/drivers/usb/mtu3/mtu3_qmu.c b/drivers/usb/mtu3/mtu3_qmu.c
1762 +index 3f414f91b5899..2ea3157ddb6e2 100644
1763 +--- a/drivers/usb/mtu3/mtu3_qmu.c
1764 ++++ b/drivers/usb/mtu3/mtu3_qmu.c
1765 +@@ -273,6 +273,8 @@ static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
1766 + gpd->dw3_info |= cpu_to_le32(GPD_EXT_FLAG_ZLP);
1767 + }
1768 +
1769 ++ /* prevent reorder, make sure GPD's HWO is set last */
1770 ++ mb();
1771 + gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
1772 +
1773 + mreq->gpd = gpd;
1774 +@@ -306,6 +308,8 @@ static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
1775 + gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
1776 + ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma));
1777 + gpd->dw3_info = cpu_to_le32(ext_addr);
1778 ++ /* prevent reorder, make sure GPD's HWO is set last */
1779 ++ mb();
1780 + gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
1781 +
1782 + mreq->gpd = gpd;
1783 +@@ -445,7 +449,8 @@ static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum)
1784 + return;
1785 + }
1786 + mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY);
1787 +-
1788 ++ /* prevent reorder, make sure GPD's HWO is set last */
1789 ++ mb();
1790 + /* by pass the current GDP */
1791 + gpd_current->dw0_info |= cpu_to_le32(GPD_FLAGS_BPS | GPD_FLAGS_HWO);
1792 +
1793 +diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev.c b/drivers/virt/nitro_enclaves/ne_misc_dev.c
1794 +index e21e1e86ad15f..fe7a8e4034097 100644
1795 +--- a/drivers/virt/nitro_enclaves/ne_misc_dev.c
1796 ++++ b/drivers/virt/nitro_enclaves/ne_misc_dev.c
1797 +@@ -886,8 +886,9 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
1798 + goto put_pages;
1799 + }
1800 +
1801 +- gup_rc = get_user_pages(mem_region.userspace_addr + memory_size, 1, FOLL_GET,
1802 +- ne_mem_region->pages + i, NULL);
1803 ++ gup_rc = get_user_pages_unlocked(mem_region.userspace_addr + memory_size, 1,
1804 ++ ne_mem_region->pages + i, FOLL_GET);
1805 ++
1806 + if (gup_rc < 0) {
1807 + rc = gup_rc;
1808 +
1809 +diff --git a/fs/namespace.c b/fs/namespace.c
1810 +index 659a8f39c61af..b696543adab84 100644
1811 +--- a/fs/namespace.c
1812 ++++ b/fs/namespace.c
1813 +@@ -4263,12 +4263,11 @@ SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
1814 + return err;
1815 +
1816 + err = user_path_at(dfd, path, kattr.lookup_flags, &target);
1817 +- if (err)
1818 +- return err;
1819 +-
1820 +- err = do_mount_setattr(&target, &kattr);
1821 ++ if (!err) {
1822 ++ err = do_mount_setattr(&target, &kattr);
1823 ++ path_put(&target);
1824 ++ }
1825 + finish_mount_kattr(&kattr);
1826 +- path_put(&target);
1827 + return err;
1828 + }
1829 +
1830 +diff --git a/include/linux/efi.h b/include/linux/efi.h
1831 +index 6b5d36babfcc4..3d8ddc5eca8ca 100644
1832 +--- a/include/linux/efi.h
1833 ++++ b/include/linux/efi.h
1834 +@@ -1282,4 +1282,10 @@ static inline struct efi_mokvar_table_entry *efi_mokvar_entry_find(
1835 + }
1836 + #endif
1837 +
1838 ++#ifdef CONFIG_SYSFB
1839 ++extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
1840 ++#else
1841 ++static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt) { }
1842 ++#endif
1843 ++
1844 + #endif /* _LINUX_EFI_H */
1845 +diff --git a/include/linux/memblock.h b/include/linux/memblock.h
1846 +index 34de69b3b8bad..5df38332e4139 100644
1847 +--- a/include/linux/memblock.h
1848 ++++ b/include/linux/memblock.h
1849 +@@ -388,8 +388,8 @@ phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
1850 + phys_addr_t end, int nid, bool exact_nid);
1851 + phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
1852 +
1853 +-static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
1854 +- phys_addr_t align)
1855 ++static __always_inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
1856 ++ phys_addr_t align)
1857 + {
1858 + return memblock_phys_alloc_range(size, align, 0,
1859 + MEMBLOCK_ALLOC_ACCESSIBLE);
1860 +diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
1861 +index bf79f3a890af2..05f18e81f3e87 100644
1862 +--- a/include/net/pkt_sched.h
1863 ++++ b/include/net/pkt_sched.h
1864 +@@ -193,4 +193,19 @@ static inline void skb_txtime_consumed(struct sk_buff *skb)
1865 + skb->tstamp = ktime_set(0, 0);
1866 + }
1867 +
1868 ++struct tc_skb_cb {
1869 ++ struct qdisc_skb_cb qdisc_cb;
1870 ++
1871 ++ u16 mru;
1872 ++ bool post_ct;
1873 ++};
1874 ++
1875 ++static inline struct tc_skb_cb *tc_skb_cb(const struct sk_buff *skb)
1876 ++{
1877 ++ struct tc_skb_cb *cb = (struct tc_skb_cb *)skb->cb;
1878 ++
1879 ++ BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb));
1880 ++ return cb;
1881 ++}
1882 ++
1883 + #endif
1884 +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
1885 +index 8c2d611639fca..6e7cd00333577 100644
1886 +--- a/include/net/sch_generic.h
1887 ++++ b/include/net/sch_generic.h
1888 +@@ -440,8 +440,6 @@ struct qdisc_skb_cb {
1889 + };
1890 + #define QDISC_CB_PRIV_LEN 20
1891 + unsigned char data[QDISC_CB_PRIV_LEN];
1892 +- u16 mru;
1893 +- bool post_ct;
1894 + };
1895 +
1896 + typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
1897 +diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
1898 +index 189fdb9db1622..d314a180ab93d 100644
1899 +--- a/include/net/sctp/sctp.h
1900 ++++ b/include/net/sctp/sctp.h
1901 +@@ -105,6 +105,7 @@ extern struct percpu_counter sctp_sockets_allocated;
1902 + int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
1903 + struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
1904 +
1905 ++typedef int (*sctp_callback_t)(struct sctp_endpoint *, struct sctp_transport *, void *);
1906 + void sctp_transport_walk_start(struct rhashtable_iter *iter);
1907 + void sctp_transport_walk_stop(struct rhashtable_iter *iter);
1908 + struct sctp_transport *sctp_transport_get_next(struct net *net,
1909 +@@ -115,9 +116,8 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
1910 + struct net *net,
1911 + const union sctp_addr *laddr,
1912 + const union sctp_addr *paddr, void *p);
1913 +-int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
1914 +- int (*cb_done)(struct sctp_transport *, void *),
1915 +- struct net *net, int *pos, void *p);
1916 ++int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done,
1917 ++ struct net *net, int *pos, void *p);
1918 + int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), void *p);
1919 + int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
1920 + struct sctp_info *info);
1921 +diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
1922 +index 651bba654d77d..8d2c3dd9f5953 100644
1923 +--- a/include/net/sctp/structs.h
1924 ++++ b/include/net/sctp/structs.h
1925 +@@ -1365,6 +1365,7 @@ struct sctp_endpoint {
1926 +
1927 + u32 secid;
1928 + u32 peer_secid;
1929 ++ struct rcu_head rcu;
1930 + };
1931 +
1932 + /* Recover the outter endpoint structure. */
1933 +@@ -1380,7 +1381,7 @@ static inline struct sctp_endpoint *sctp_ep(struct sctp_ep_common *base)
1934 + struct sctp_endpoint *sctp_endpoint_new(struct sock *, gfp_t);
1935 + void sctp_endpoint_free(struct sctp_endpoint *);
1936 + void sctp_endpoint_put(struct sctp_endpoint *);
1937 +-void sctp_endpoint_hold(struct sctp_endpoint *);
1938 ++int sctp_endpoint_hold(struct sctp_endpoint *ep);
1939 + void sctp_endpoint_add_asoc(struct sctp_endpoint *, struct sctp_association *);
1940 + struct sctp_association *sctp_endpoint_lookup_assoc(
1941 + const struct sctp_endpoint *ep,
1942 +diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h
1943 +index f6e3c8c9c7449..4fa4e979e948a 100644
1944 +--- a/include/uapi/linux/nfc.h
1945 ++++ b/include/uapi/linux/nfc.h
1946 +@@ -263,7 +263,7 @@ enum nfc_sdp_attr {
1947 + #define NFC_SE_ENABLED 0x1
1948 +
1949 + struct sockaddr_nfc {
1950 +- sa_family_t sa_family;
1951 ++ __kernel_sa_family_t sa_family;
1952 + __u32 dev_idx;
1953 + __u32 target_idx;
1954 + __u32 nfc_protocol;
1955 +@@ -271,14 +271,14 @@ struct sockaddr_nfc {
1956 +
1957 + #define NFC_LLCP_MAX_SERVICE_NAME 63
1958 + struct sockaddr_nfc_llcp {
1959 +- sa_family_t sa_family;
1960 ++ __kernel_sa_family_t sa_family;
1961 + __u32 dev_idx;
1962 + __u32 target_idx;
1963 + __u32 nfc_protocol;
1964 + __u8 dsap; /* Destination SAP, if known */
1965 + __u8 ssap; /* Source SAP to be bound to */
1966 + char service_name[NFC_LLCP_MAX_SERVICE_NAME]; /* Service name URI */;
1967 +- size_t service_name_len;
1968 ++ __kernel_size_t service_name_len;
1969 + };
1970 +
1971 + /* NFC socket protocols */
1972 +diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c
1973 +index d3bc110430f9d..36624990b5777 100644
1974 +--- a/mm/damon/dbgfs.c
1975 ++++ b/mm/damon/dbgfs.c
1976 +@@ -185,6 +185,7 @@ static ssize_t dbgfs_target_ids_write(struct file *file,
1977 + const char __user *buf, size_t count, loff_t *ppos)
1978 + {
1979 + struct damon_ctx *ctx = file->private_data;
1980 ++ struct damon_target *t, *next_t;
1981 + char *kbuf, *nrs;
1982 + unsigned long *targets;
1983 + ssize_t nr_targets;
1984 +@@ -224,6 +225,13 @@ static ssize_t dbgfs_target_ids_write(struct file *file,
1985 + goto unlock_out;
1986 + }
1987 +
1988 ++ /* remove previously set targets */
1989 ++ damon_for_each_target_safe(t, next_t, ctx) {
1990 ++ if (targetid_is_pid(ctx))
1991 ++ put_pid((struct pid *)t->id);
1992 ++ damon_destroy_target(t);
1993 ++ }
1994 ++
1995 + err = damon_set_targets(ctx, targets, nr_targets);
1996 + if (err) {
1997 + if (targetid_is_pid(ctx))
1998 +diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
1999 +index f3d751105343c..de24098894897 100644
2000 +--- a/net/bridge/br_multicast.c
2001 ++++ b/net/bridge/br_multicast.c
2002 +@@ -4522,6 +4522,38 @@ int br_multicast_set_mld_version(struct net_bridge_mcast *brmctx,
2003 + }
2004 + #endif
2005 +
2006 ++void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
2007 ++ unsigned long val)
2008 ++{
2009 ++ unsigned long intvl_jiffies = clock_t_to_jiffies(val);
2010 ++
2011 ++ if (intvl_jiffies < BR_MULTICAST_QUERY_INTVL_MIN) {
2012 ++ br_info(brmctx->br,
2013 ++ "trying to set multicast query interval below minimum, setting to %lu (%ums)\n",
2014 ++ jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MIN),
2015 ++ jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MIN));
2016 ++ intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN;
2017 ++ }
2018 ++
2019 ++ brmctx->multicast_query_interval = intvl_jiffies;
2020 ++}
2021 ++
2022 ++void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
2023 ++ unsigned long val)
2024 ++{
2025 ++ unsigned long intvl_jiffies = clock_t_to_jiffies(val);
2026 ++
2027 ++ if (intvl_jiffies < BR_MULTICAST_STARTUP_QUERY_INTVL_MIN) {
2028 ++ br_info(brmctx->br,
2029 ++ "trying to set multicast startup query interval below minimum, setting to %lu (%ums)\n",
2030 ++ jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN),
2031 ++ jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN));
2032 ++ intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN;
2033 ++ }
2034 ++
2035 ++ brmctx->multicast_startup_query_interval = intvl_jiffies;
2036 ++}
2037 ++
2038 + /**
2039 + * br_multicast_list_adjacent - Returns snooped multicast addresses
2040 + * @dev: The bridge port adjacent to which to retrieve addresses
2041 +diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
2042 +index 5c6c4305ed235..e365cf82f0615 100644
2043 +--- a/net/bridge/br_netlink.c
2044 ++++ b/net/bridge/br_netlink.c
2045 +@@ -1357,7 +1357,7 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
2046 + if (data[IFLA_BR_MCAST_QUERY_INTVL]) {
2047 + u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]);
2048 +
2049 +- br->multicast_ctx.multicast_query_interval = clock_t_to_jiffies(val);
2050 ++ br_multicast_set_query_intvl(&br->multicast_ctx, val);
2051 + }
2052 +
2053 + if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) {
2054 +@@ -1369,7 +1369,7 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
2055 + if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) {
2056 + u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]);
2057 +
2058 +- br->multicast_ctx.multicast_startup_query_interval = clock_t_to_jiffies(val);
2059 ++ br_multicast_set_startup_query_intvl(&br->multicast_ctx, val);
2060 + }
2061 +
2062 + if (data[IFLA_BR_MCAST_STATS_ENABLED]) {
2063 +diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
2064 +index fd5e7e74573ce..bd218c2b2cd97 100644
2065 +--- a/net/bridge/br_private.h
2066 ++++ b/net/bridge/br_private.h
2067 +@@ -28,6 +28,8 @@
2068 + #define BR_MAX_PORTS (1<<BR_PORT_BITS)
2069 +
2070 + #define BR_MULTICAST_DEFAULT_HASH_MAX 4096
2071 ++#define BR_MULTICAST_QUERY_INTVL_MIN msecs_to_jiffies(1000)
2072 ++#define BR_MULTICAST_STARTUP_QUERY_INTVL_MIN BR_MULTICAST_QUERY_INTVL_MIN
2073 +
2074 + #define BR_HWDOM_MAX BITS_PER_LONG
2075 +
2076 +@@ -968,6 +970,10 @@ int br_multicast_dump_querier_state(struct sk_buff *skb,
2077 + int nest_attr);
2078 + size_t br_multicast_querier_state_size(void);
2079 + size_t br_rports_size(const struct net_bridge_mcast *brmctx);
2080 ++void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
2081 ++ unsigned long val);
2082 ++void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
2083 ++ unsigned long val);
2084 +
2085 + static inline bool br_group_is_l2(const struct br_ip *group)
2086 + {
2087 +@@ -1152,9 +1158,9 @@ br_multicast_port_ctx_get_global(const struct net_bridge_mcast_port *pmctx)
2088 + static inline bool
2089 + br_multicast_ctx_vlan_global_disabled(const struct net_bridge_mcast *brmctx)
2090 + {
2091 +- return br_opt_get(brmctx->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) &&
2092 +- br_multicast_ctx_is_vlan(brmctx) &&
2093 +- !(brmctx->vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED);
2094 ++ return br_multicast_ctx_is_vlan(brmctx) &&
2095 ++ (!br_opt_get(brmctx->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) ||
2096 ++ !(brmctx->vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED));
2097 + }
2098 +
2099 + static inline bool
2100 +diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
2101 +index d9a89ddd03310..7b0c19772111c 100644
2102 +--- a/net/bridge/br_sysfs_br.c
2103 ++++ b/net/bridge/br_sysfs_br.c
2104 +@@ -658,7 +658,7 @@ static ssize_t multicast_query_interval_show(struct device *d,
2105 + static int set_query_interval(struct net_bridge *br, unsigned long val,
2106 + struct netlink_ext_ack *extack)
2107 + {
2108 +- br->multicast_ctx.multicast_query_interval = clock_t_to_jiffies(val);
2109 ++ br_multicast_set_query_intvl(&br->multicast_ctx, val);
2110 + return 0;
2111 + }
2112 +
2113 +@@ -706,7 +706,7 @@ static ssize_t multicast_startup_query_interval_show(
2114 + static int set_startup_query_interval(struct net_bridge *br, unsigned long val,
2115 + struct netlink_ext_ack *extack)
2116 + {
2117 +- br->multicast_ctx.multicast_startup_query_interval = clock_t_to_jiffies(val);
2118 ++ br_multicast_set_startup_query_intvl(&br->multicast_ctx, val);
2119 + return 0;
2120 + }
2121 +
2122 +diff --git a/net/bridge/br_vlan_options.c b/net/bridge/br_vlan_options.c
2123 +index 8ffd4ed2563c6..a6382973b3e70 100644
2124 +--- a/net/bridge/br_vlan_options.c
2125 ++++ b/net/bridge/br_vlan_options.c
2126 +@@ -521,7 +521,7 @@ static int br_vlan_process_global_one_opts(const struct net_bridge *br,
2127 + u64 val;
2128 +
2129 + val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL]);
2130 +- v->br_mcast_ctx.multicast_query_interval = clock_t_to_jiffies(val);
2131 ++ br_multicast_set_query_intvl(&v->br_mcast_ctx, val);
2132 + *changed = true;
2133 + }
2134 + if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL]) {
2135 +@@ -535,7 +535,7 @@ static int br_vlan_process_global_one_opts(const struct net_bridge *br,
2136 + u64 val;
2137 +
2138 + val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL]);
2139 +- v->br_mcast_ctx.multicast_startup_query_interval = clock_t_to_jiffies(val);
2140 ++ br_multicast_set_startup_query_intvl(&v->br_mcast_ctx, val);
2141 + *changed = true;
2142 + }
2143 + if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER]) {
2144 +diff --git a/net/core/dev.c b/net/core/dev.c
2145 +index 91f53eeb0e79f..e0878a500aa92 100644
2146 +--- a/net/core/dev.c
2147 ++++ b/net/core/dev.c
2148 +@@ -3934,8 +3934,8 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
2149 + return skb;
2150 +
2151 + /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
2152 +- qdisc_skb_cb(skb)->mru = 0;
2153 +- qdisc_skb_cb(skb)->post_ct = false;
2154 ++ tc_skb_cb(skb)->mru = 0;
2155 ++ tc_skb_cb(skb)->post_ct = false;
2156 + mini_qdisc_bstats_cpu_update(miniq, skb);
2157 +
2158 + switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) {
2159 +@@ -5088,8 +5088,8 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
2160 + }
2161 +
2162 + qdisc_skb_cb(skb)->pkt_len = skb->len;
2163 +- qdisc_skb_cb(skb)->mru = 0;
2164 +- qdisc_skb_cb(skb)->post_ct = false;
2165 ++ tc_skb_cb(skb)->mru = 0;
2166 ++ tc_skb_cb(skb)->post_ct = false;
2167 + skb->tc_at_ingress = 1;
2168 + mini_qdisc_bstats_cpu_update(miniq, skb);
2169 +
2170 +diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
2171 +index 3a9422a5873eb..dcea653a5204a 100644
2172 +--- a/net/ipv4/af_inet.c
2173 ++++ b/net/ipv4/af_inet.c
2174 +@@ -2004,6 +2004,10 @@ static int __init inet_init(void)
2175 +
2176 + ip_init();
2177 +
2178 ++ /* Initialise per-cpu ipv4 mibs */
2179 ++ if (init_ipv4_mibs())
2180 ++ panic("%s: Cannot init ipv4 mibs\n", __func__);
2181 ++
2182 + /* Setup TCP slab cache for open requests. */
2183 + tcp_init();
2184 +
2185 +@@ -2034,12 +2038,6 @@ static int __init inet_init(void)
2186 +
2187 + if (init_inet_pernet_ops())
2188 + pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__);
2189 +- /*
2190 +- * Initialise per-cpu ipv4 mibs
2191 +- */
2192 +-
2193 +- if (init_ipv4_mibs())
2194 +- pr_crit("%s: Cannot init ipv4 mibs\n", __func__);
2195 +
2196 + ipv4_proc_init();
2197 +
2198 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
2199 +index 7bee95d8d2df0..8cd8c0bce0986 100644
2200 +--- a/net/ipv6/udp.c
2201 ++++ b/net/ipv6/udp.c
2202 +@@ -1204,7 +1204,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
2203 + kfree_skb(skb);
2204 + return -EINVAL;
2205 + }
2206 +- if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
2207 ++ if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
2208 + kfree_skb(skb);
2209 + return -EINVAL;
2210 + }
2211 +diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c
2212 +index bb5f1650f11cb..c189b4c8a1823 100644
2213 +--- a/net/ncsi/ncsi-netlink.c
2214 ++++ b/net/ncsi/ncsi-netlink.c
2215 +@@ -112,7 +112,11 @@ static int ncsi_write_package_info(struct sk_buff *skb,
2216 + pnest = nla_nest_start_noflag(skb, NCSI_PKG_ATTR);
2217 + if (!pnest)
2218 + return -ENOMEM;
2219 +- nla_put_u32(skb, NCSI_PKG_ATTR_ID, np->id);
2220 ++ rc = nla_put_u32(skb, NCSI_PKG_ATTR_ID, np->id);
2221 ++ if (rc) {
2222 ++ nla_nest_cancel(skb, pnest);
2223 ++ return rc;
2224 ++ }
2225 + if ((0x1 << np->id) == ndp->package_whitelist)
2226 + nla_put_flag(skb, NCSI_PKG_ATTR_FORCED);
2227 + cnest = nla_nest_start_noflag(skb, NCSI_PKG_ATTR_CHANNEL_LIST);
2228 +diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
2229 +index 90866ae45573a..98e248b9c0b17 100644
2230 +--- a/net/sched/act_ct.c
2231 ++++ b/net/sched/act_ct.c
2232 +@@ -690,10 +690,10 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
2233 + u8 family, u16 zone, bool *defrag)
2234 + {
2235 + enum ip_conntrack_info ctinfo;
2236 +- struct qdisc_skb_cb cb;
2237 + struct nf_conn *ct;
2238 + int err = 0;
2239 + bool frag;
2240 ++ u16 mru;
2241 +
2242 + /* Previously seen (loopback)? Ignore. */
2243 + ct = nf_ct_get(skb, &ctinfo);
2244 +@@ -708,7 +708,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
2245 + return err;
2246 +
2247 + skb_get(skb);
2248 +- cb = *qdisc_skb_cb(skb);
2249 ++ mru = tc_skb_cb(skb)->mru;
2250 +
2251 + if (family == NFPROTO_IPV4) {
2252 + enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
2253 +@@ -722,7 +722,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
2254 +
2255 + if (!err) {
2256 + *defrag = true;
2257 +- cb.mru = IPCB(skb)->frag_max_size;
2258 ++ mru = IPCB(skb)->frag_max_size;
2259 + }
2260 + } else { /* NFPROTO_IPV6 */
2261 + #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
2262 +@@ -735,7 +735,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
2263 +
2264 + if (!err) {
2265 + *defrag = true;
2266 +- cb.mru = IP6CB(skb)->frag_max_size;
2267 ++ mru = IP6CB(skb)->frag_max_size;
2268 + }
2269 + #else
2270 + err = -EOPNOTSUPP;
2271 +@@ -744,7 +744,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
2272 + }
2273 +
2274 + if (err != -EINPROGRESS)
2275 +- *qdisc_skb_cb(skb) = cb;
2276 ++ tc_skb_cb(skb)->mru = mru;
2277 + skb_clear_hash(skb);
2278 + skb->ignore_df = 1;
2279 + return err;
2280 +@@ -963,7 +963,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
2281 + tcf_action_update_bstats(&c->common, skb);
2282 +
2283 + if (clear) {
2284 +- qdisc_skb_cb(skb)->post_ct = false;
2285 ++ tc_skb_cb(skb)->post_ct = false;
2286 + ct = nf_ct_get(skb, &ctinfo);
2287 + if (ct) {
2288 + nf_conntrack_put(&ct->ct_general);
2289 +@@ -1048,7 +1048,7 @@ do_nat:
2290 + out_push:
2291 + skb_push_rcsum(skb, nh_ofs);
2292 +
2293 +- qdisc_skb_cb(skb)->post_ct = true;
2294 ++ tc_skb_cb(skb)->post_ct = true;
2295 + out_clear:
2296 + if (defrag)
2297 + qdisc_skb_cb(skb)->pkt_len = skb->len;
2298 +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
2299 +index e54f0a42270c1..ff8a9383bf1c4 100644
2300 +--- a/net/sched/cls_api.c
2301 ++++ b/net/sched/cls_api.c
2302 +@@ -1617,12 +1617,14 @@ int tcf_classify(struct sk_buff *skb,
2303 +
2304 + /* If we missed on some chain */
2305 + if (ret == TC_ACT_UNSPEC && last_executed_chain) {
2306 ++ struct tc_skb_cb *cb = tc_skb_cb(skb);
2307 ++
2308 + ext = tc_skb_ext_alloc(skb);
2309 + if (WARN_ON_ONCE(!ext))
2310 + return TC_ACT_SHOT;
2311 + ext->chain = last_executed_chain;
2312 +- ext->mru = qdisc_skb_cb(skb)->mru;
2313 +- ext->post_ct = qdisc_skb_cb(skb)->post_ct;
2314 ++ ext->mru = cb->mru;
2315 ++ ext->post_ct = cb->post_ct;
2316 + }
2317 +
2318 + return ret;
2319 +diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
2320 +index eb6345a027e13..161bd91c8c6b0 100644
2321 +--- a/net/sched/cls_flower.c
2322 ++++ b/net/sched/cls_flower.c
2323 +@@ -19,6 +19,7 @@
2324 +
2325 + #include <net/sch_generic.h>
2326 + #include <net/pkt_cls.h>
2327 ++#include <net/pkt_sched.h>
2328 + #include <net/ip.h>
2329 + #include <net/flow_dissector.h>
2330 + #include <net/geneve.h>
2331 +@@ -309,7 +310,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
2332 + struct tcf_result *res)
2333 + {
2334 + struct cls_fl_head *head = rcu_dereference_bh(tp->root);
2335 +- bool post_ct = qdisc_skb_cb(skb)->post_ct;
2336 ++ bool post_ct = tc_skb_cb(skb)->post_ct;
2337 + struct fl_flow_key skb_key;
2338 + struct fl_flow_mask *mask;
2339 + struct cls_fl_filter *f;
2340 +diff --git a/net/sched/sch_frag.c b/net/sched/sch_frag.c
2341 +index 8c06381391d6f..5ded4c8672a64 100644
2342 +--- a/net/sched/sch_frag.c
2343 ++++ b/net/sched/sch_frag.c
2344 +@@ -1,6 +1,7 @@
2345 + // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2346 + #include <net/netlink.h>
2347 + #include <net/sch_generic.h>
2348 ++#include <net/pkt_sched.h>
2349 + #include <net/dst.h>
2350 + #include <net/ip.h>
2351 + #include <net/ip6_fib.h>
2352 +@@ -137,7 +138,7 @@ err:
2353 +
2354 + int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb))
2355 + {
2356 +- u16 mru = qdisc_skb_cb(skb)->mru;
2357 ++ u16 mru = tc_skb_cb(skb)->mru;
2358 + int err;
2359 +
2360 + if (mru && skb->len > mru + skb->dev->hard_header_len)
2361 +diff --git a/net/sctp/diag.c b/net/sctp/diag.c
2362 +index 760b367644c12..a7d6231715013 100644
2363 +--- a/net/sctp/diag.c
2364 ++++ b/net/sctp/diag.c
2365 +@@ -290,9 +290,8 @@ out:
2366 + return err;
2367 + }
2368 +
2369 +-static int sctp_sock_dump(struct sctp_transport *tsp, void *p)
2370 ++static int sctp_sock_dump(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
2371 + {
2372 +- struct sctp_endpoint *ep = tsp->asoc->ep;
2373 + struct sctp_comm_param *commp = p;
2374 + struct sock *sk = ep->base.sk;
2375 + struct sk_buff *skb = commp->skb;
2376 +@@ -302,6 +301,8 @@ static int sctp_sock_dump(struct sctp_transport *tsp, void *p)
2377 + int err = 0;
2378 +
2379 + lock_sock(sk);
2380 ++ if (ep != tsp->asoc->ep)
2381 ++ goto release;
2382 + list_for_each_entry(assoc, &ep->asocs, asocs) {
2383 + if (cb->args[4] < cb->args[1])
2384 + goto next;
2385 +@@ -344,9 +345,8 @@ release:
2386 + return err;
2387 + }
2388 +
2389 +-static int sctp_sock_filter(struct sctp_transport *tsp, void *p)
2390 ++static int sctp_sock_filter(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
2391 + {
2392 +- struct sctp_endpoint *ep = tsp->asoc->ep;
2393 + struct sctp_comm_param *commp = p;
2394 + struct sock *sk = ep->base.sk;
2395 + const struct inet_diag_req_v2 *r = commp->r;
2396 +@@ -505,8 +505,8 @@ skip:
2397 + if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE)))
2398 + goto done;
2399 +
2400 +- sctp_for_each_transport(sctp_sock_filter, sctp_sock_dump,
2401 +- net, &pos, &commp);
2402 ++ sctp_transport_traverse_process(sctp_sock_filter, sctp_sock_dump,
2403 ++ net, &pos, &commp);
2404 + cb->args[2] = pos;
2405 +
2406 + done:
2407 +diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
2408 +index 48c9c2c7602f7..efffde7f2328e 100644
2409 +--- a/net/sctp/endpointola.c
2410 ++++ b/net/sctp/endpointola.c
2411 +@@ -184,6 +184,18 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
2412 + }
2413 +
2414 + /* Final destructor for endpoint. */
2415 ++static void sctp_endpoint_destroy_rcu(struct rcu_head *head)
2416 ++{
2417 ++ struct sctp_endpoint *ep = container_of(head, struct sctp_endpoint, rcu);
2418 ++ struct sock *sk = ep->base.sk;
2419 ++
2420 ++ sctp_sk(sk)->ep = NULL;
2421 ++ sock_put(sk);
2422 ++
2423 ++ kfree(ep);
2424 ++ SCTP_DBG_OBJCNT_DEC(ep);
2425 ++}
2426 ++
2427 + static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
2428 + {
2429 + struct sock *sk;
2430 +@@ -213,18 +225,13 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
2431 + if (sctp_sk(sk)->bind_hash)
2432 + sctp_put_port(sk);
2433 +
2434 +- sctp_sk(sk)->ep = NULL;
2435 +- /* Give up our hold on the sock */
2436 +- sock_put(sk);
2437 +-
2438 +- kfree(ep);
2439 +- SCTP_DBG_OBJCNT_DEC(ep);
2440 ++ call_rcu(&ep->rcu, sctp_endpoint_destroy_rcu);
2441 + }
2442 +
2443 + /* Hold a reference to an endpoint. */
2444 +-void sctp_endpoint_hold(struct sctp_endpoint *ep)
2445 ++int sctp_endpoint_hold(struct sctp_endpoint *ep)
2446 + {
2447 +- refcount_inc(&ep->base.refcnt);
2448 ++ return refcount_inc_not_zero(&ep->base.refcnt);
2449 + }
2450 +
2451 + /* Release a reference to an endpoint and clean up if there are
2452 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
2453 +index 6b937bfd47515..d2215d24634e8 100644
2454 +--- a/net/sctp/socket.c
2455 ++++ b/net/sctp/socket.c
2456 +@@ -5338,11 +5338,12 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
2457 + }
2458 + EXPORT_SYMBOL_GPL(sctp_transport_lookup_process);
2459 +
2460 +-int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
2461 +- int (*cb_done)(struct sctp_transport *, void *),
2462 +- struct net *net, int *pos, void *p) {
2463 ++int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done,
2464 ++ struct net *net, int *pos, void *p)
2465 ++{
2466 + struct rhashtable_iter hti;
2467 + struct sctp_transport *tsp;
2468 ++ struct sctp_endpoint *ep;
2469 + int ret;
2470 +
2471 + again:
2472 +@@ -5351,26 +5352,32 @@ again:
2473 +
2474 + tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
2475 + for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
2476 +- ret = cb(tsp, p);
2477 +- if (ret)
2478 +- break;
2479 ++ ep = tsp->asoc->ep;
2480 ++ if (sctp_endpoint_hold(ep)) { /* asoc can be peeled off */
2481 ++ ret = cb(ep, tsp, p);
2482 ++ if (ret)
2483 ++ break;
2484 ++ sctp_endpoint_put(ep);
2485 ++ }
2486 + (*pos)++;
2487 + sctp_transport_put(tsp);
2488 + }
2489 + sctp_transport_walk_stop(&hti);
2490 +
2491 + if (ret) {
2492 +- if (cb_done && !cb_done(tsp, p)) {
2493 ++ if (cb_done && !cb_done(ep, tsp, p)) {
2494 + (*pos)++;
2495 ++ sctp_endpoint_put(ep);
2496 + sctp_transport_put(tsp);
2497 + goto again;
2498 + }
2499 ++ sctp_endpoint_put(ep);
2500 + sctp_transport_put(tsp);
2501 + }
2502 +
2503 + return ret;
2504 + }
2505 +-EXPORT_SYMBOL_GPL(sctp_for_each_transport);
2506 ++EXPORT_SYMBOL_GPL(sctp_transport_traverse_process);
2507 +
2508 + /* 7.2.1 Association Status (SCTP_STATUS)
2509 +
2510 +diff --git a/net/smc/smc.h b/net/smc/smc.h
2511 +index d65e15f0c944c..e6919fe31617b 100644
2512 +--- a/net/smc/smc.h
2513 ++++ b/net/smc/smc.h
2514 +@@ -170,6 +170,11 @@ struct smc_connection {
2515 + u16 tx_cdc_seq; /* sequence # for CDC send */
2516 + u16 tx_cdc_seq_fin; /* sequence # - tx completed */
2517 + spinlock_t send_lock; /* protect wr_sends */
2518 ++ atomic_t cdc_pend_tx_wr; /* number of pending tx CDC wqe
2519 ++ * - inc when post wqe,
2520 ++ * - dec on polled tx cqe
2521 ++ */
2522 ++ wait_queue_head_t cdc_pend_tx_wq; /* wakeup on no cdc_pend_tx_wr*/
2523 + struct delayed_work tx_work; /* retry of smc_cdc_msg_send */
2524 + u32 tx_off; /* base offset in peer rmb */
2525 +
2526 +diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
2527 +index 99acd337ba90d..84c8a4374fddd 100644
2528 +--- a/net/smc/smc_cdc.c
2529 ++++ b/net/smc/smc_cdc.c
2530 +@@ -31,10 +31,6 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
2531 + struct smc_sock *smc;
2532 + int diff;
2533 +
2534 +- if (!conn)
2535 +- /* already dismissed */
2536 +- return;
2537 +-
2538 + smc = container_of(conn, struct smc_sock, conn);
2539 + bh_lock_sock(&smc->sk);
2540 + if (!wc_status) {
2541 +@@ -51,6 +47,12 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
2542 + conn);
2543 + conn->tx_cdc_seq_fin = cdcpend->ctrl_seq;
2544 + }
2545 ++
2546 ++ if (atomic_dec_and_test(&conn->cdc_pend_tx_wr) &&
2547 ++ unlikely(wq_has_sleeper(&conn->cdc_pend_tx_wq)))
2548 ++ wake_up(&conn->cdc_pend_tx_wq);
2549 ++ WARN_ON(atomic_read(&conn->cdc_pend_tx_wr) < 0);
2550 ++
2551 + smc_tx_sndbuf_nonfull(smc);
2552 + bh_unlock_sock(&smc->sk);
2553 + }
2554 +@@ -107,6 +109,10 @@ int smc_cdc_msg_send(struct smc_connection *conn,
2555 + conn->tx_cdc_seq++;
2556 + conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
2557 + smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, conn, &cfed);
2558 ++
2559 ++ atomic_inc(&conn->cdc_pend_tx_wr);
2560 ++ smp_mb__after_atomic(); /* Make sure cdc_pend_tx_wr added before post */
2561 ++
2562 + rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
2563 + if (!rc) {
2564 + smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn);
2565 +@@ -114,6 +120,7 @@ int smc_cdc_msg_send(struct smc_connection *conn,
2566 + } else {
2567 + conn->tx_cdc_seq--;
2568 + conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
2569 ++ atomic_dec(&conn->cdc_pend_tx_wr);
2570 + }
2571 +
2572 + return rc;
2573 +@@ -136,7 +143,18 @@ int smcr_cdc_msg_send_validation(struct smc_connection *conn,
2574 + peer->token = htonl(local->token);
2575 + peer->prod_flags.failover_validation = 1;
2576 +
2577 ++ /* We need to set pend->conn here to make sure smc_cdc_tx_handler()
2578 ++ * can handle properly
2579 ++ */
2580 ++ smc_cdc_add_pending_send(conn, pend);
2581 ++
2582 ++ atomic_inc(&conn->cdc_pend_tx_wr);
2583 ++ smp_mb__after_atomic(); /* Make sure cdc_pend_tx_wr added before post */
2584 ++
2585 + rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
2586 ++ if (unlikely(rc))
2587 ++ atomic_dec(&conn->cdc_pend_tx_wr);
2588 ++
2589 + return rc;
2590 + }
2591 +
2592 +@@ -193,31 +211,9 @@ int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
2593 + return rc;
2594 + }
2595 +
2596 +-static bool smc_cdc_tx_filter(struct smc_wr_tx_pend_priv *tx_pend,
2597 +- unsigned long data)
2598 ++void smc_cdc_wait_pend_tx_wr(struct smc_connection *conn)
2599 + {
2600 +- struct smc_connection *conn = (struct smc_connection *)data;
2601 +- struct smc_cdc_tx_pend *cdc_pend =
2602 +- (struct smc_cdc_tx_pend *)tx_pend;
2603 +-
2604 +- return cdc_pend->conn == conn;
2605 +-}
2606 +-
2607 +-static void smc_cdc_tx_dismisser(struct smc_wr_tx_pend_priv *tx_pend)
2608 +-{
2609 +- struct smc_cdc_tx_pend *cdc_pend =
2610 +- (struct smc_cdc_tx_pend *)tx_pend;
2611 +-
2612 +- cdc_pend->conn = NULL;
2613 +-}
2614 +-
2615 +-void smc_cdc_tx_dismiss_slots(struct smc_connection *conn)
2616 +-{
2617 +- struct smc_link *link = conn->lnk;
2618 +-
2619 +- smc_wr_tx_dismiss_slots(link, SMC_CDC_MSG_TYPE,
2620 +- smc_cdc_tx_filter, smc_cdc_tx_dismisser,
2621 +- (unsigned long)conn);
2622 ++ wait_event(conn->cdc_pend_tx_wq, !atomic_read(&conn->cdc_pend_tx_wr));
2623 + }
2624 +
2625 + /* Send a SMC-D CDC header.
2626 +diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h
2627 +index 0a0a89abd38b2..696cc11f2303b 100644
2628 +--- a/net/smc/smc_cdc.h
2629 ++++ b/net/smc/smc_cdc.h
2630 +@@ -291,7 +291,7 @@ int smc_cdc_get_free_slot(struct smc_connection *conn,
2631 + struct smc_wr_buf **wr_buf,
2632 + struct smc_rdma_wr **wr_rdma_buf,
2633 + struct smc_cdc_tx_pend **pend);
2634 +-void smc_cdc_tx_dismiss_slots(struct smc_connection *conn);
2635 ++void smc_cdc_wait_pend_tx_wr(struct smc_connection *conn);
2636 + int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf,
2637 + struct smc_cdc_tx_pend *pend);
2638 + int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn);
2639 +diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
2640 +index 5a9c22ee75fa4..506b8498623b0 100644
2641 +--- a/net/smc/smc_core.c
2642 ++++ b/net/smc/smc_core.c
2643 +@@ -604,7 +604,7 @@ static void smcr_lgr_link_deactivate_all(struct smc_link_group *lgr)
2644 + for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
2645 + struct smc_link *lnk = &lgr->lnk[i];
2646 +
2647 +- if (smc_link_usable(lnk))
2648 ++ if (smc_link_sendable(lnk))
2649 + lnk->state = SMC_LNK_INACTIVE;
2650 + }
2651 + wake_up_all(&lgr->llc_msg_waiter);
2652 +@@ -1056,7 +1056,7 @@ void smc_conn_free(struct smc_connection *conn)
2653 + smc_ism_unset_conn(conn);
2654 + tasklet_kill(&conn->rx_tsklet);
2655 + } else {
2656 +- smc_cdc_tx_dismiss_slots(conn);
2657 ++ smc_cdc_wait_pend_tx_wr(conn);
2658 + if (current_work() != &conn->abort_work)
2659 + cancel_work_sync(&conn->abort_work);
2660 + }
2661 +@@ -1133,7 +1133,7 @@ void smcr_link_clear(struct smc_link *lnk, bool log)
2662 + smc_llc_link_clear(lnk, log);
2663 + smcr_buf_unmap_lgr(lnk);
2664 + smcr_rtoken_clear_link(lnk);
2665 +- smc_ib_modify_qp_reset(lnk);
2666 ++ smc_ib_modify_qp_error(lnk);
2667 + smc_wr_free_link(lnk);
2668 + smc_ib_destroy_queue_pair(lnk);
2669 + smc_ib_dealloc_protection_domain(lnk);
2670 +@@ -1264,7 +1264,7 @@ static void smc_conn_kill(struct smc_connection *conn, bool soft)
2671 + else
2672 + tasklet_unlock_wait(&conn->rx_tsklet);
2673 + } else {
2674 +- smc_cdc_tx_dismiss_slots(conn);
2675 ++ smc_cdc_wait_pend_tx_wr(conn);
2676 + }
2677 + smc_lgr_unregister_conn(conn);
2678 + smc_close_active_abort(smc);
2679 +@@ -1387,11 +1387,16 @@ void smc_smcd_terminate_all(struct smcd_dev *smcd)
2680 + /* Called when an SMCR device is removed or the smc module is unloaded.
2681 + * If smcibdev is given, all SMCR link groups using this device are terminated.
2682 + * If smcibdev is NULL, all SMCR link groups are terminated.
2683 ++ *
2684 ++ * We must wait here for QPs been destroyed before we destroy the CQs,
2685 ++ * or we won't received any CQEs and cdc_pend_tx_wr cannot reach 0 thus
2686 ++ * smc_sock cannot be released.
2687 + */
2688 + void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
2689 + {
2690 + struct smc_link_group *lgr, *lg;
2691 + LIST_HEAD(lgr_free_list);
2692 ++ LIST_HEAD(lgr_linkdown_list);
2693 + int i;
2694 +
2695 + spin_lock_bh(&smc_lgr_list.lock);
2696 +@@ -1403,7 +1408,7 @@ void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
2697 + list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) {
2698 + for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
2699 + if (lgr->lnk[i].smcibdev == smcibdev)
2700 +- smcr_link_down_cond_sched(&lgr->lnk[i]);
2701 ++ list_move_tail(&lgr->list, &lgr_linkdown_list);
2702 + }
2703 + }
2704 + }
2705 +@@ -1415,6 +1420,16 @@ void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
2706 + __smc_lgr_terminate(lgr, false);
2707 + }
2708 +
2709 ++ list_for_each_entry_safe(lgr, lg, &lgr_linkdown_list, list) {
2710 ++ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
2711 ++ if (lgr->lnk[i].smcibdev == smcibdev) {
2712 ++ mutex_lock(&lgr->llc_conf_mutex);
2713 ++ smcr_link_down_cond(&lgr->lnk[i]);
2714 ++ mutex_unlock(&lgr->llc_conf_mutex);
2715 ++ }
2716 ++ }
2717 ++ }
2718 ++
2719 + if (smcibdev) {
2720 + if (atomic_read(&smcibdev->lnk_cnt))
2721 + wait_event(smcibdev->lnks_deleted,
2722 +@@ -1514,7 +1529,6 @@ static void smcr_link_down(struct smc_link *lnk)
2723 + if (!lgr || lnk->state == SMC_LNK_UNUSED || list_empty(&lgr->list))
2724 + return;
2725 +
2726 +- smc_ib_modify_qp_reset(lnk);
2727 + to_lnk = smc_switch_conns(lgr, lnk, true);
2728 + if (!to_lnk) { /* no backup link available */
2729 + smcr_link_clear(lnk, true);
2730 +@@ -1742,6 +1756,7 @@ create:
2731 + conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
2732 + conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
2733 + conn->urg_state = SMC_URG_READ;
2734 ++ init_waitqueue_head(&conn->cdc_pend_tx_wq);
2735 + INIT_WORK(&smc->conn.abort_work, smc_conn_abort_work);
2736 + if (ini->is_smcd) {
2737 + conn->rx_off = sizeof(struct smcd_cdc_msg);
2738 +diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
2739 +index c043ecdca5c44..51a3e8248ade2 100644
2740 +--- a/net/smc/smc_core.h
2741 ++++ b/net/smc/smc_core.h
2742 +@@ -366,6 +366,12 @@ static inline bool smc_link_usable(struct smc_link *lnk)
2743 + return true;
2744 + }
2745 +
2746 ++static inline bool smc_link_sendable(struct smc_link *lnk)
2747 ++{
2748 ++ return smc_link_usable(lnk) &&
2749 ++ lnk->qp_attr.cur_qp_state == IB_QPS_RTS;
2750 ++}
2751 ++
2752 + static inline bool smc_link_active(struct smc_link *lnk)
2753 + {
2754 + return lnk->state == SMC_LNK_ACTIVE;
2755 +diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
2756 +index a8845343d183e..f0ec1f1d50fac 100644
2757 +--- a/net/smc/smc_ib.c
2758 ++++ b/net/smc/smc_ib.c
2759 +@@ -101,12 +101,12 @@ int smc_ib_modify_qp_rts(struct smc_link *lnk)
2760 + IB_QP_MAX_QP_RD_ATOMIC);
2761 + }
2762 +
2763 +-int smc_ib_modify_qp_reset(struct smc_link *lnk)
2764 ++int smc_ib_modify_qp_error(struct smc_link *lnk)
2765 + {
2766 + struct ib_qp_attr qp_attr;
2767 +
2768 + memset(&qp_attr, 0, sizeof(qp_attr));
2769 +- qp_attr.qp_state = IB_QPS_RESET;
2770 ++ qp_attr.qp_state = IB_QPS_ERR;
2771 + return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE);
2772 + }
2773 +
2774 +diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h
2775 +index 3085f5180da79..6967c3d52b03e 100644
2776 +--- a/net/smc/smc_ib.h
2777 ++++ b/net/smc/smc_ib.h
2778 +@@ -79,6 +79,7 @@ int smc_ib_create_queue_pair(struct smc_link *lnk);
2779 + int smc_ib_ready_link(struct smc_link *lnk);
2780 + int smc_ib_modify_qp_rts(struct smc_link *lnk);
2781 + int smc_ib_modify_qp_reset(struct smc_link *lnk);
2782 ++int smc_ib_modify_qp_error(struct smc_link *lnk);
2783 + long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev);
2784 + int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
2785 + struct smc_buf_desc *buf_slot, u8 link_idx);
2786 +diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
2787 +index f1d323439a2af..ee1f0fdba0855 100644
2788 +--- a/net/smc/smc_llc.c
2789 ++++ b/net/smc/smc_llc.c
2790 +@@ -1358,7 +1358,7 @@ void smc_llc_send_link_delete_all(struct smc_link_group *lgr, bool ord, u32 rsn)
2791 + delllc.reason = htonl(rsn);
2792 +
2793 + for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
2794 +- if (!smc_link_usable(&lgr->lnk[i]))
2795 ++ if (!smc_link_sendable(&lgr->lnk[i]))
2796 + continue;
2797 + if (!smc_llc_send_message_wait(&lgr->lnk[i], &delllc))
2798 + break;
2799 +diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
2800 +index a419e9af36b98..59ca1a2d5c650 100644
2801 +--- a/net/smc/smc_wr.c
2802 ++++ b/net/smc/smc_wr.c
2803 +@@ -62,13 +62,9 @@ static inline bool smc_wr_is_tx_pend(struct smc_link *link)
2804 + }
2805 +
2806 + /* wait till all pending tx work requests on the given link are completed */
2807 +-int smc_wr_tx_wait_no_pending_sends(struct smc_link *link)
2808 ++void smc_wr_tx_wait_no_pending_sends(struct smc_link *link)
2809 + {
2810 +- if (wait_event_timeout(link->wr_tx_wait, !smc_wr_is_tx_pend(link),
2811 +- SMC_WR_TX_WAIT_PENDING_TIME))
2812 +- return 0;
2813 +- else /* timeout */
2814 +- return -EPIPE;
2815 ++ wait_event(link->wr_tx_wait, !smc_wr_is_tx_pend(link));
2816 + }
2817 +
2818 + static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id)
2819 +@@ -87,7 +83,6 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
2820 + struct smc_wr_tx_pend pnd_snd;
2821 + struct smc_link *link;
2822 + u32 pnd_snd_idx;
2823 +- int i;
2824 +
2825 + link = wc->qp->qp_context;
2826 +
2827 +@@ -115,14 +110,6 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
2828 + if (!test_and_clear_bit(pnd_snd_idx, link->wr_tx_mask))
2829 + return;
2830 + if (wc->status) {
2831 +- for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
2832 +- /* clear full struct smc_wr_tx_pend including .priv */
2833 +- memset(&link->wr_tx_pends[i], 0,
2834 +- sizeof(link->wr_tx_pends[i]));
2835 +- memset(&link->wr_tx_bufs[i], 0,
2836 +- sizeof(link->wr_tx_bufs[i]));
2837 +- clear_bit(i, link->wr_tx_mask);
2838 +- }
2839 + /* terminate link */
2840 + smcr_link_down_cond_sched(link);
2841 + }
2842 +@@ -169,7 +156,7 @@ void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
2843 + static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
2844 + {
2845 + *idx = link->wr_tx_cnt;
2846 +- if (!smc_link_usable(link))
2847 ++ if (!smc_link_sendable(link))
2848 + return -ENOLINK;
2849 + for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) {
2850 + if (!test_and_set_bit(*idx, link->wr_tx_mask))
2851 +@@ -212,7 +199,7 @@ int smc_wr_tx_get_free_slot(struct smc_link *link,
2852 + } else {
2853 + rc = wait_event_interruptible_timeout(
2854 + link->wr_tx_wait,
2855 +- !smc_link_usable(link) ||
2856 ++ !smc_link_sendable(link) ||
2857 + lgr->terminating ||
2858 + (smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY),
2859 + SMC_WR_TX_WAIT_FREE_SLOT_TIME);
2860 +@@ -288,18 +275,20 @@ int smc_wr_tx_send_wait(struct smc_link *link, struct smc_wr_tx_pend_priv *priv,
2861 + unsigned long timeout)
2862 + {
2863 + struct smc_wr_tx_pend *pend;
2864 ++ u32 pnd_idx;
2865 + int rc;
2866 +
2867 + pend = container_of(priv, struct smc_wr_tx_pend, priv);
2868 + pend->compl_requested = 1;
2869 +- init_completion(&link->wr_tx_compl[pend->idx]);
2870 ++ pnd_idx = pend->idx;
2871 ++ init_completion(&link->wr_tx_compl[pnd_idx]);
2872 +
2873 + rc = smc_wr_tx_send(link, priv);
2874 + if (rc)
2875 + return rc;
2876 + /* wait for completion by smc_wr_tx_process_cqe() */
2877 + rc = wait_for_completion_interruptible_timeout(
2878 +- &link->wr_tx_compl[pend->idx], timeout);
2879 ++ &link->wr_tx_compl[pnd_idx], timeout);
2880 + if (rc <= 0)
2881 + rc = -ENODATA;
2882 + if (rc > 0)
2883 +@@ -349,25 +338,6 @@ int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr)
2884 + return rc;
2885 + }
2886 +
2887 +-void smc_wr_tx_dismiss_slots(struct smc_link *link, u8 wr_tx_hdr_type,
2888 +- smc_wr_tx_filter filter,
2889 +- smc_wr_tx_dismisser dismisser,
2890 +- unsigned long data)
2891 +-{
2892 +- struct smc_wr_tx_pend_priv *tx_pend;
2893 +- struct smc_wr_rx_hdr *wr_tx;
2894 +- int i;
2895 +-
2896 +- for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
2897 +- wr_tx = (struct smc_wr_rx_hdr *)&link->wr_tx_bufs[i];
2898 +- if (wr_tx->type != wr_tx_hdr_type)
2899 +- continue;
2900 +- tx_pend = &link->wr_tx_pends[i].priv;
2901 +- if (filter(tx_pend, data))
2902 +- dismisser(tx_pend);
2903 +- }
2904 +-}
2905 +-
2906 + /****************************** receive queue ********************************/
2907 +
2908 + int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler)
2909 +@@ -572,10 +542,7 @@ void smc_wr_free_link(struct smc_link *lnk)
2910 + smc_wr_wakeup_reg_wait(lnk);
2911 + smc_wr_wakeup_tx_wait(lnk);
2912 +
2913 +- if (smc_wr_tx_wait_no_pending_sends(lnk))
2914 +- memset(lnk->wr_tx_mask, 0,
2915 +- BITS_TO_LONGS(SMC_WR_BUF_CNT) *
2916 +- sizeof(*lnk->wr_tx_mask));
2917 ++ smc_wr_tx_wait_no_pending_sends(lnk);
2918 + wait_event(lnk->wr_reg_wait, (!atomic_read(&lnk->wr_reg_refcnt)));
2919 + wait_event(lnk->wr_tx_wait, (!atomic_read(&lnk->wr_tx_refcnt)));
2920 +
2921 +diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h
2922 +index 2bc626f230a56..cb58e60078f57 100644
2923 +--- a/net/smc/smc_wr.h
2924 ++++ b/net/smc/smc_wr.h
2925 +@@ -22,7 +22,6 @@
2926 + #define SMC_WR_BUF_CNT 16 /* # of ctrl buffers per link */
2927 +
2928 + #define SMC_WR_TX_WAIT_FREE_SLOT_TIME (10 * HZ)
2929 +-#define SMC_WR_TX_WAIT_PENDING_TIME (5 * HZ)
2930 +
2931 + #define SMC_WR_TX_SIZE 44 /* actual size of wr_send data (<=SMC_WR_BUF_SIZE) */
2932 +
2933 +@@ -62,7 +61,7 @@ static inline void smc_wr_tx_set_wr_id(atomic_long_t *wr_tx_id, long val)
2934 +
2935 + static inline bool smc_wr_tx_link_hold(struct smc_link *link)
2936 + {
2937 +- if (!smc_link_usable(link))
2938 ++ if (!smc_link_sendable(link))
2939 + return false;
2940 + atomic_inc(&link->wr_tx_refcnt);
2941 + return true;
2942 +@@ -122,7 +121,7 @@ void smc_wr_tx_dismiss_slots(struct smc_link *lnk, u8 wr_rx_hdr_type,
2943 + smc_wr_tx_filter filter,
2944 + smc_wr_tx_dismisser dismisser,
2945 + unsigned long data);
2946 +-int smc_wr_tx_wait_no_pending_sends(struct smc_link *link);
2947 ++void smc_wr_tx_wait_no_pending_sends(struct smc_link *link);
2948 +
2949 + int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler);
2950 + int smc_wr_rx_post_init(struct smc_link *link);
2951 +diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
2952 +index 52a000b057a57..3ccb2c70add4d 100755
2953 +--- a/scripts/recordmcount.pl
2954 ++++ b/scripts/recordmcount.pl
2955 +@@ -219,7 +219,7 @@ if ($arch eq "x86_64") {
2956 +
2957 + } elsif ($arch eq "s390" && $bits == 64) {
2958 + if ($cc =~ /-DCC_USING_HOTPATCH/) {
2959 +- $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*c0 04 00 00 00 00\\s*(bcrl\\s*0,|jgnop\\s*)[0-9a-f]+ <([^\+]*)>\$";
2960 ++ $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*c0 04 00 00 00 00\\s*(brcl\\s*0,|jgnop\\s*)[0-9a-f]+ <([^\+]*)>\$";
2961 + $mcount_adjust = 0;
2962 + }
2963 + $alignment = 8;
2964 +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
2965 +index 51432ea74044e..9309e62d46eda 100644
2966 +--- a/security/selinux/hooks.c
2967 ++++ b/security/selinux/hooks.c
2968 +@@ -5812,7 +5812,7 @@ static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb,
2969 + struct common_audit_data ad;
2970 + struct lsm_network_audit net = {0,};
2971 + char *addrp;
2972 +- u8 proto;
2973 ++ u8 proto = 0;
2974 +
2975 + if (sk == NULL)
2976 + return NF_ACCEPT;
2977 +diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
2978 +index 1da2e3722b126..6799b1122c9d8 100644
2979 +--- a/security/tomoyo/util.c
2980 ++++ b/security/tomoyo/util.c
2981 +@@ -1051,10 +1051,11 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
2982 + return false;
2983 + if (!domain)
2984 + return true;
2985 ++ if (READ_ONCE(domain->flags[TOMOYO_DIF_QUOTA_WARNED]))
2986 ++ return false;
2987 + list_for_each_entry_rcu(ptr, &domain->acl_info_list, list,
2988 + srcu_read_lock_held(&tomoyo_ss)) {
2989 + u16 perm;
2990 +- u8 i;
2991 +
2992 + if (ptr->is_deleted)
2993 + continue;
2994 +@@ -1065,23 +1066,23 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
2995 + */
2996 + switch (ptr->type) {
2997 + case TOMOYO_TYPE_PATH_ACL:
2998 +- data_race(perm = container_of(ptr, struct tomoyo_path_acl, head)->perm);
2999 ++ perm = data_race(container_of(ptr, struct tomoyo_path_acl, head)->perm);
3000 + break;
3001 + case TOMOYO_TYPE_PATH2_ACL:
3002 +- data_race(perm = container_of(ptr, struct tomoyo_path2_acl, head)->perm);
3003 ++ perm = data_race(container_of(ptr, struct tomoyo_path2_acl, head)->perm);
3004 + break;
3005 + case TOMOYO_TYPE_PATH_NUMBER_ACL:
3006 +- data_race(perm = container_of(ptr, struct tomoyo_path_number_acl, head)
3007 ++ perm = data_race(container_of(ptr, struct tomoyo_path_number_acl, head)
3008 + ->perm);
3009 + break;
3010 + case TOMOYO_TYPE_MKDEV_ACL:
3011 +- data_race(perm = container_of(ptr, struct tomoyo_mkdev_acl, head)->perm);
3012 ++ perm = data_race(container_of(ptr, struct tomoyo_mkdev_acl, head)->perm);
3013 + break;
3014 + case TOMOYO_TYPE_INET_ACL:
3015 +- data_race(perm = container_of(ptr, struct tomoyo_inet_acl, head)->perm);
3016 ++ perm = data_race(container_of(ptr, struct tomoyo_inet_acl, head)->perm);
3017 + break;
3018 + case TOMOYO_TYPE_UNIX_ACL:
3019 +- data_race(perm = container_of(ptr, struct tomoyo_unix_acl, head)->perm);
3020 ++ perm = data_race(container_of(ptr, struct tomoyo_unix_acl, head)->perm);
3021 + break;
3022 + case TOMOYO_TYPE_MANUAL_TASK_ACL:
3023 + perm = 0;
3024 +@@ -1089,21 +1090,17 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
3025 + default:
3026 + perm = 1;
3027 + }
3028 +- for (i = 0; i < 16; i++)
3029 +- if (perm & (1 << i))
3030 +- count++;
3031 ++ count += hweight16(perm);
3032 + }
3033 + if (count < tomoyo_profile(domain->ns, domain->profile)->
3034 + pref[TOMOYO_PREF_MAX_LEARNING_ENTRY])
3035 + return true;
3036 +- if (!domain->flags[TOMOYO_DIF_QUOTA_WARNED]) {
3037 +- domain->flags[TOMOYO_DIF_QUOTA_WARNED] = true;
3038 +- /* r->granted = false; */
3039 +- tomoyo_write_log(r, "%s", tomoyo_dif[TOMOYO_DIF_QUOTA_WARNED]);
3040 ++ WRITE_ONCE(domain->flags[TOMOYO_DIF_QUOTA_WARNED], true);
3041 ++ /* r->granted = false; */
3042 ++ tomoyo_write_log(r, "%s", tomoyo_dif[TOMOYO_DIF_QUOTA_WARNED]);
3043 + #ifndef CONFIG_SECURITY_TOMOYO_INSECURE_BUILTIN_SETTING
3044 +- pr_warn("WARNING: Domain '%s' has too many ACLs to hold. Stopped learning mode.\n",
3045 +- domain->domainname->name);
3046 ++ pr_warn("WARNING: Domain '%s' has too many ACLs to hold. Stopped learning mode.\n",
3047 ++ domain->domainname->name);
3048 + #endif
3049 +- }
3050 + return false;
3051 + }
3052 +diff --git a/sound/hda/intel-sdw-acpi.c b/sound/hda/intel-sdw-acpi.c
3053 +index c0123bc31c0dd..b7758dbe23714 100644
3054 +--- a/sound/hda/intel-sdw-acpi.c
3055 ++++ b/sound/hda/intel-sdw-acpi.c
3056 +@@ -132,8 +132,6 @@ static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
3057 + return AE_NOT_FOUND;
3058 + }
3059 +
3060 +- info->handle = handle;
3061 +-
3062 + /*
3063 + * On some Intel platforms, multiple children of the HDAS
3064 + * device can be found, but only one of them is the SoundWire
3065 +@@ -144,6 +142,9 @@ static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
3066 + if (FIELD_GET(GENMASK(31, 28), adr) != SDW_LINK_TYPE)
3067 + return AE_OK; /* keep going */
3068 +
3069 ++ /* found the correct SoundWire controller */
3070 ++ info->handle = handle;
3071 ++
3072 + /* device found, stop namespace walk */
3073 + return AE_CTRL_TERMINATE;
3074 + }
3075 +@@ -164,8 +165,14 @@ int sdw_intel_acpi_scan(acpi_handle *parent_handle,
3076 + acpi_status status;
3077 +
3078 + info->handle = NULL;
3079 ++ /*
3080 ++ * In the HDAS ACPI scope, 'SNDW' may be either the child of
3081 ++ * 'HDAS' or the grandchild of 'HDAS'. So let's go through
3082 ++ * the ACPI from 'HDAS' at max depth of 2 to find the 'SNDW'
3083 ++ * device.
3084 ++ */
3085 + status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
3086 +- parent_handle, 1,
3087 ++ parent_handle, 2,
3088 + sdw_intel_acpi_cb,
3089 + NULL, info, NULL);
3090 + if (ACPI_FAILURE(status) || info->handle == NULL)
3091 +diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
3092 +index c32c2eb16d7df..18b56256bb6ff 100644
3093 +--- a/tools/perf/builtin-script.c
3094 ++++ b/tools/perf/builtin-script.c
3095 +@@ -2463,7 +2463,7 @@ static int process_switch_event(struct perf_tool *tool,
3096 + if (perf_event__process_switch(tool, event, sample, machine) < 0)
3097 + return -1;
3098 +
3099 +- if (scripting_ops && scripting_ops->process_switch)
3100 ++ if (scripting_ops && scripting_ops->process_switch && !filter_cpu(sample))
3101 + scripting_ops->process_switch(event, sample, machine);
3102 +
3103 + if (!script->show_switch_events)
3104 +diff --git a/tools/perf/scripts/python/intel-pt-events.py b/tools/perf/scripts/python/intel-pt-events.py
3105 +index 1d3a189a9a547..66452a8ec3586 100644
3106 +--- a/tools/perf/scripts/python/intel-pt-events.py
3107 ++++ b/tools/perf/scripts/python/intel-pt-events.py
3108 +@@ -32,8 +32,7 @@ try:
3109 + except:
3110 + broken_pipe_exception = IOError
3111 +
3112 +-glb_switch_str = None
3113 +-glb_switch_printed = True
3114 ++glb_switch_str = {}
3115 + glb_insn = False
3116 + glb_disassembler = None
3117 + glb_src = False
3118 +@@ -70,6 +69,7 @@ def trace_begin():
3119 + ap = argparse.ArgumentParser(usage = "", add_help = False)
3120 + ap.add_argument("--insn-trace", action='store_true')
3121 + ap.add_argument("--src-trace", action='store_true')
3122 ++ ap.add_argument("--all-switch-events", action='store_true')
3123 + global glb_args
3124 + global glb_insn
3125 + global glb_src
3126 +@@ -256,10 +256,6 @@ def print_srccode(comm, param_dict, sample, symbol, dso, with_insn):
3127 + print(start_str, src_str)
3128 +
3129 + def do_process_event(param_dict):
3130 +- global glb_switch_printed
3131 +- if not glb_switch_printed:
3132 +- print(glb_switch_str)
3133 +- glb_switch_printed = True
3134 + event_attr = param_dict["attr"]
3135 + sample = param_dict["sample"]
3136 + raw_buf = param_dict["raw_buf"]
3137 +@@ -274,6 +270,11 @@ def do_process_event(param_dict):
3138 + dso = get_optional(param_dict, "dso")
3139 + symbol = get_optional(param_dict, "symbol")
3140 +
3141 ++ cpu = sample["cpu"]
3142 ++ if cpu in glb_switch_str:
3143 ++ print(glb_switch_str[cpu])
3144 ++ del glb_switch_str[cpu]
3145 ++
3146 + if name[0:12] == "instructions":
3147 + if glb_src:
3148 + print_srccode(comm, param_dict, sample, symbol, dso, True)
3149 +@@ -336,8 +337,6 @@ def auxtrace_error(typ, code, cpu, pid, tid, ip, ts, msg, cpumode, *x):
3150 + sys.exit(1)
3151 +
3152 + def context_switch(ts, cpu, pid, tid, np_pid, np_tid, machine_pid, out, out_preempt, *x):
3153 +- global glb_switch_printed
3154 +- global glb_switch_str
3155 + if out:
3156 + out_str = "Switch out "
3157 + else:
3158 +@@ -350,6 +349,10 @@ def context_switch(ts, cpu, pid, tid, np_pid, np_tid, machine_pid, out, out_pree
3159 + machine_str = ""
3160 + else:
3161 + machine_str = "machine PID %d" % machine_pid
3162 +- glb_switch_str = "%16s %5d/%-5d [%03u] %9u.%09u %5d/%-5d %s %s" % \
3163 ++ switch_str = "%16s %5d/%-5d [%03u] %9u.%09u %5d/%-5d %s %s" % \
3164 + (out_str, pid, tid, cpu, ts / 1000000000, ts %1000000000, np_pid, np_tid, machine_str, preempt_str)
3165 +- glb_switch_printed = False
3166 ++ if glb_args.all_switch_events:
3167 ++ print(switch_str);
3168 ++ else:
3169 ++ global glb_switch_str
3170 ++ glb_switch_str[cpu] = switch_str
3171 +diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
3172 +index 824bceb063bfe..c3ceac1388106 100644
3173 +--- a/tools/perf/util/intel-pt.c
3174 ++++ b/tools/perf/util/intel-pt.c
3175 +@@ -3540,6 +3540,7 @@ static int intel_pt_parse_vm_tm_corr_arg(struct intel_pt *pt, char **args)
3176 + *args = p;
3177 + return 0;
3178 + }
3179 ++ p += 1;
3180 + while (1) {
3181 + vmcs = strtoull(p, &p, 0);
3182 + if (errno)
3183 +diff --git a/tools/testing/selftests/net/udpgro_fwd.sh b/tools/testing/selftests/net/udpgro_fwd.sh
3184 +index 7f26591f236b9..3ea73013d9568 100755
3185 +--- a/tools/testing/selftests/net/udpgro_fwd.sh
3186 ++++ b/tools/testing/selftests/net/udpgro_fwd.sh
3187 +@@ -132,7 +132,7 @@ run_test() {
3188 + local rcv=`ip netns exec $NS_DST $ipt"-save" -c | grep 'dport 8000' | \
3189 + sed -e 's/\[//' -e 's/:.*//'`
3190 + if [ $rcv != $pkts ]; then
3191 +- echo " fail - received $rvs packets, expected $pkts"
3192 ++ echo " fail - received $rcv packets, expected $pkts"
3193 + ret=1
3194 + return
3195 + fi
3196 +@@ -185,6 +185,7 @@ for family in 4 6; do
3197 + IPT=iptables
3198 + SUFFIX=24
3199 + VXDEV=vxlan
3200 ++ PING=ping
3201 +
3202 + if [ $family = 6 ]; then
3203 + BM_NET=$BM_NET_V6
3204 +@@ -192,6 +193,7 @@ for family in 4 6; do
3205 + SUFFIX="64 nodad"
3206 + VXDEV=vxlan6
3207 + IPT=ip6tables
3208 ++ PING="ping6"
3209 + fi
3210 +
3211 + echo "IPv$family"
3212 +@@ -237,7 +239,7 @@ for family in 4 6; do
3213 +
3214 + # load arp cache before running the test to reduce the amount of
3215 + # stray traffic on top of the UDP tunnel
3216 +- ip netns exec $NS_SRC ping -q -c 1 $OL_NET$DST_NAT >/dev/null
3217 ++ ip netns exec $NS_SRC $PING -q -c 1 $OL_NET$DST_NAT >/dev/null
3218 + run_test "GRO fwd over UDP tunnel" $OL_NET$DST_NAT 1 1 $OL_NET$DST
3219 + cleanup
3220 +
3221 +diff --git a/tools/testing/selftests/net/udpgso.c b/tools/testing/selftests/net/udpgso.c
3222 +index c66da6ffd6d8d..7badaf215de28 100644
3223 +--- a/tools/testing/selftests/net/udpgso.c
3224 ++++ b/tools/testing/selftests/net/udpgso.c
3225 +@@ -156,13 +156,13 @@ struct testcase testcases_v4[] = {
3226 + },
3227 + {
3228 + /* send max number of min sized segments */
3229 +- .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V4,
3230 ++ .tlen = UDP_MAX_SEGMENTS,
3231 + .gso_len = 1,
3232 +- .r_num_mss = UDP_MAX_SEGMENTS - CONST_HDRLEN_V4,
3233 ++ .r_num_mss = UDP_MAX_SEGMENTS,
3234 + },
3235 + {
3236 + /* send max number + 1 of min sized segments: fail */
3237 +- .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V4 + 1,
3238 ++ .tlen = UDP_MAX_SEGMENTS + 1,
3239 + .gso_len = 1,
3240 + .tfail = true,
3241 + },
3242 +@@ -259,13 +259,13 @@ struct testcase testcases_v6[] = {
3243 + },
3244 + {
3245 + /* send max number of min sized segments */
3246 +- .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V6,
3247 ++ .tlen = UDP_MAX_SEGMENTS,
3248 + .gso_len = 1,
3249 +- .r_num_mss = UDP_MAX_SEGMENTS - CONST_HDRLEN_V6,
3250 ++ .r_num_mss = UDP_MAX_SEGMENTS,
3251 + },
3252 + {
3253 + /* send max number + 1 of min sized segments: fail */
3254 +- .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V6 + 1,
3255 ++ .tlen = UDP_MAX_SEGMENTS + 1,
3256 + .gso_len = 1,
3257 + .tfail = true,
3258 + },
3259 +diff --git a/tools/testing/selftests/net/udpgso_bench_tx.c b/tools/testing/selftests/net/udpgso_bench_tx.c
3260 +index 17512a43885e7..f1fdaa2702913 100644
3261 +--- a/tools/testing/selftests/net/udpgso_bench_tx.c
3262 ++++ b/tools/testing/selftests/net/udpgso_bench_tx.c
3263 +@@ -419,6 +419,7 @@ static void usage(const char *filepath)
3264 +
3265 + static void parse_opts(int argc, char **argv)
3266 + {
3267 ++ const char *bind_addr = NULL;
3268 + int max_len, hdrlen;
3269 + int c;
3270 +
3271 +@@ -446,7 +447,7 @@ static void parse_opts(int argc, char **argv)
3272 + cfg_cpu = strtol(optarg, NULL, 0);
3273 + break;
3274 + case 'D':
3275 +- setup_sockaddr(cfg_family, optarg, &cfg_dst_addr);
3276 ++ bind_addr = optarg;
3277 + break;
3278 + case 'l':
3279 + cfg_runtime_ms = strtoul(optarg, NULL, 10) * 1000;
3280 +@@ -492,6 +493,11 @@ static void parse_opts(int argc, char **argv)
3281 + }
3282 + }
3283 +
3284 ++ if (!bind_addr)
3285 ++ bind_addr = cfg_family == PF_INET6 ? "::" : "0.0.0.0";
3286 ++
3287 ++ setup_sockaddr(cfg_family, bind_addr, &cfg_dst_addr);
3288 ++
3289 + if (optind != argc)
3290 + usage(argv[0]);
3291 +