Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.10 commit in: /
Date: Sun, 07 Feb 2021 15:20:34
Message-Id: 1612711171.ac7c8645fd308101aed87ff91a355aac99de9053.alicef@gentoo
1 commit: ac7c8645fd308101aed87ff91a355aac99de9053
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Sun Feb 7 15:19:10 2021 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Sun Feb 7 15:19:31 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ac7c8645
7
8 Linux patch 5.10.14
9
10 Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>
11
12 0000_README | 4 +
13 1013_linux-5.10.14.patch | 1728 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1732 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 0a7ffef..897c945 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -95,6 +95,10 @@ Patch: 1012_linux-5.10.13.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.10.13
23
24 +Patch: 1013_linux-5.10.14.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.10.14
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1013_linux-5.10.14.patch b/1013_linux-5.10.14.patch
33 new file mode 100644
34 index 0000000..0533261
35 --- /dev/null
36 +++ b/1013_linux-5.10.14.patch
37 @@ -0,0 +1,1728 @@
38 +diff --git a/Makefile b/Makefile
39 +index a2d5e953ea40a..bb3770be9779d 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 10
46 +-SUBLEVEL = 13
47 ++SUBLEVEL = 14
48 + EXTRAVERSION =
49 + NAME = Kleptomaniac Octopus
50 +
51 +diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
52 +index 65e4482e38498..02692fbe2db5c 100644
53 +--- a/arch/arm/mm/Kconfig
54 ++++ b/arch/arm/mm/Kconfig
55 +@@ -743,6 +743,7 @@ config SWP_EMULATE
56 + config CPU_BIG_ENDIAN
57 + bool "Build big-endian kernel"
58 + depends on ARCH_SUPPORTS_BIG_ENDIAN
59 ++ depends on !LD_IS_LLD
60 + help
61 + Say Y if you plan on running a kernel in big-endian mode.
62 + Note that your board must be properly built and your board
63 +diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b.dtsi
64 +index 9b8548e5f6e51..ee8fcae9f9f00 100644
65 +--- a/arch/arm64/boot/dts/amlogic/meson-g12b.dtsi
66 ++++ b/arch/arm64/boot/dts/amlogic/meson-g12b.dtsi
67 +@@ -135,3 +135,7 @@
68 + };
69 + };
70 + };
71 ++
72 ++&mali {
73 ++ dma-coherent;
74 ++};
75 +diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
76 +index cd61239bae8c2..75c8e9a350cc7 100644
77 +--- a/arch/arm64/include/asm/memory.h
78 ++++ b/arch/arm64/include/asm/memory.h
79 +@@ -238,11 +238,11 @@ static inline const void *__tag_set(const void *addr, u8 tag)
80 +
81 +
82 + /*
83 +- * The linear kernel range starts at the bottom of the virtual address
84 +- * space. Testing the top bit for the start of the region is a
85 +- * sufficient check and avoids having to worry about the tag.
86 ++ * Check whether an arbitrary address is within the linear map, which
87 ++ * lives in the [PAGE_OFFSET, PAGE_END) interval at the bottom of the
88 ++ * kernel's TTBR1 address range.
89 + */
90 +-#define __is_lm_address(addr) (!(((u64)addr) & BIT(vabits_actual - 1)))
91 ++#define __is_lm_address(addr) (((u64)(addr) ^ PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET))
92 +
93 + #define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
94 + #define __kimg_to_phys(addr) ((addr) - kimage_voffset)
95 +@@ -323,7 +323,7 @@ static inline void *phys_to_virt(phys_addr_t x)
96 + #endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */
97 +
98 + #define virt_addr_valid(addr) ({ \
99 +- __typeof__(addr) __addr = addr; \
100 ++ __typeof__(addr) __addr = __tag_reset(addr); \
101 + __is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr)); \
102 + })
103 +
104 +diff --git a/arch/arm64/mm/physaddr.c b/arch/arm64/mm/physaddr.c
105 +index 67a9ba9eaa96b..cde44c13dda1b 100644
106 +--- a/arch/arm64/mm/physaddr.c
107 ++++ b/arch/arm64/mm/physaddr.c
108 +@@ -9,7 +9,7 @@
109 +
110 + phys_addr_t __virt_to_phys(unsigned long x)
111 + {
112 +- WARN(!__is_lm_address(x),
113 ++ WARN(!__is_lm_address(__tag_reset(x)),
114 + "virt_to_phys used for non-linear address: %pK (%pS)\n",
115 + (void *)x,
116 + (void *)x);
117 +diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
118 +index 5e658ba2654a7..9abe842dbd843 100644
119 +--- a/arch/x86/include/asm/intel-family.h
120 ++++ b/arch/x86/include/asm/intel-family.h
121 +@@ -97,6 +97,7 @@
122 +
123 + #define INTEL_FAM6_LAKEFIELD 0x8A
124 + #define INTEL_FAM6_ALDERLAKE 0x97
125 ++#define INTEL_FAM6_ALDERLAKE_L 0x9A
126 +
127 + /* "Small Core" Processors (Atom) */
128 +
129 +diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
130 +index 0b4920a7238e3..e16cccdd04207 100644
131 +--- a/arch/x86/include/asm/msr.h
132 ++++ b/arch/x86/include/asm/msr.h
133 +@@ -86,7 +86,7 @@ static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
134 + * think of extending them - you will be slapped with a stinking trout or a frozen
135 + * shark will reach you, wherever you are! You've been warned.
136 + */
137 +-static inline unsigned long long notrace __rdmsr(unsigned int msr)
138 ++static __always_inline unsigned long long __rdmsr(unsigned int msr)
139 + {
140 + DECLARE_ARGS(val, low, high);
141 +
142 +@@ -98,7 +98,7 @@ static inline unsigned long long notrace __rdmsr(unsigned int msr)
143 + return EAX_EDX_VAL(val, low, high);
144 + }
145 +
146 +-static inline void notrace __wrmsr(unsigned int msr, u32 low, u32 high)
147 ++static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high)
148 + {
149 + asm volatile("1: wrmsr\n"
150 + "2:\n"
151 +diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
152 +index 098015b739993..84f581c91db45 100644
153 +--- a/arch/x86/kernel/setup.c
154 ++++ b/arch/x86/kernel/setup.c
155 +@@ -665,6 +665,17 @@ static void __init trim_platform_memory_ranges(void)
156 +
157 + static void __init trim_bios_range(void)
158 + {
159 ++ /*
160 ++ * A special case is the first 4Kb of memory;
161 ++ * This is a BIOS owned area, not kernel ram, but generally
162 ++ * not listed as such in the E820 table.
163 ++ *
164 ++ * This typically reserves additional memory (64KiB by default)
165 ++ * since some BIOSes are known to corrupt low memory. See the
166 ++ * Kconfig help text for X86_RESERVE_LOW.
167 ++ */
168 ++ e820__range_update(0, PAGE_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
169 ++
170 + /*
171 + * special case: Some BIOSes report the PC BIOS
172 + * area (640Kb -> 1Mb) as RAM even though it is not.
173 +@@ -722,15 +733,6 @@ early_param("reservelow", parse_reservelow);
174 +
175 + static void __init trim_low_memory_range(void)
176 + {
177 +- /*
178 +- * A special case is the first 4Kb of memory;
179 +- * This is a BIOS owned area, not kernel ram, but generally
180 +- * not listed as such in the E820 table.
181 +- *
182 +- * This typically reserves additional memory (64KiB by default)
183 +- * since some BIOSes are known to corrupt low memory. See the
184 +- * Kconfig help text for X86_RESERVE_LOW.
185 +- */
186 + memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE));
187 + }
188 +
189 +diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
190 +index b0e9b0509568c..95d883482227e 100644
191 +--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
192 ++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
193 +@@ -239,6 +239,7 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
194 + struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
195 + bool force_reset = false;
196 + bool update_uclk = false;
197 ++ bool p_state_change_support;
198 +
199 + if (dc->work_arounds.skip_clock_update || !clk_mgr->smu_present)
200 + return;
201 +@@ -279,8 +280,9 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
202 + clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
203 +
204 + clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
205 +- if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
206 +- clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support;
207 ++ p_state_change_support = new_clocks->p_state_change_support || (display_count == 0);
208 ++ if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
209 ++ clk_mgr_base->clks.p_state_change_support = p_state_change_support;
210 +
211 + /* to disable P-State switching, set UCLK min = max */
212 + if (!clk_mgr_base->clks.p_state_change_support)
213 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
214 +index 98464886341f6..17e6fd8201395 100644
215 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
216 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
217 +@@ -2375,6 +2375,9 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting
218 + initial_link_setting;
219 + uint32_t link_bw;
220 +
221 ++ if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap))
222 ++ return false;
223 ++
224 + /* search for the minimum link setting that:
225 + * 1. is supported according to the link training result
226 + * 2. could support the b/w requested by the timing
227 +@@ -3020,14 +3023,14 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
228 + for (i = 0; i < MAX_PIPES; i++) {
229 + pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
230 + if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
231 +- pipe_ctx->stream->link == link)
232 ++ pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
233 + core_link_disable_stream(pipe_ctx);
234 + }
235 +
236 + for (i = 0; i < MAX_PIPES; i++) {
237 + pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
238 + if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
239 +- pipe_ctx->stream->link == link)
240 ++ pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
241 + core_link_enable_stream(link->dc->current_state, pipe_ctx);
242 + }
243 +
244 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
245 +index d0f3bf953d027..0d1e7b56fb395 100644
246 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
247 ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
248 +@@ -646,8 +646,13 @@ static void power_on_plane(
249 + if (REG(DC_IP_REQUEST_CNTL)) {
250 + REG_SET(DC_IP_REQUEST_CNTL, 0,
251 + IP_REQUEST_EN, 1);
252 +- hws->funcs.dpp_pg_control(hws, plane_id, true);
253 +- hws->funcs.hubp_pg_control(hws, plane_id, true);
254 ++
255 ++ if (hws->funcs.dpp_pg_control)
256 ++ hws->funcs.dpp_pg_control(hws, plane_id, true);
257 ++
258 ++ if (hws->funcs.hubp_pg_control)
259 ++ hws->funcs.hubp_pg_control(hws, plane_id, true);
260 ++
261 + REG_SET(DC_IP_REQUEST_CNTL, 0,
262 + IP_REQUEST_EN, 0);
263 + DC_LOG_DEBUG(
264 +@@ -1079,8 +1084,13 @@ void dcn10_plane_atomic_power_down(struct dc *dc,
265 + if (REG(DC_IP_REQUEST_CNTL)) {
266 + REG_SET(DC_IP_REQUEST_CNTL, 0,
267 + IP_REQUEST_EN, 1);
268 +- hws->funcs.dpp_pg_control(hws, dpp->inst, false);
269 +- hws->funcs.hubp_pg_control(hws, hubp->inst, false);
270 ++
271 ++ if (hws->funcs.dpp_pg_control)
272 ++ hws->funcs.dpp_pg_control(hws, dpp->inst, false);
273 ++
274 ++ if (hws->funcs.hubp_pg_control)
275 ++ hws->funcs.hubp_pg_control(hws, hubp->inst, false);
276 ++
277 + dpp->funcs->dpp_reset(dpp);
278 + REG_SET(DC_IP_REQUEST_CNTL, 0,
279 + IP_REQUEST_EN, 0);
280 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
281 +index 01530e686f437..f1e9b3b06b924 100644
282 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
283 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
284 +@@ -1069,8 +1069,13 @@ static void dcn20_power_on_plane(
285 + if (REG(DC_IP_REQUEST_CNTL)) {
286 + REG_SET(DC_IP_REQUEST_CNTL, 0,
287 + IP_REQUEST_EN, 1);
288 +- dcn20_dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true);
289 +- dcn20_hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true);
290 ++
291 ++ if (hws->funcs.dpp_pg_control)
292 ++ hws->funcs.dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true);
293 ++
294 ++ if (hws->funcs.hubp_pg_control)
295 ++ hws->funcs.hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true);
296 ++
297 + REG_SET(DC_IP_REQUEST_CNTL, 0,
298 + IP_REQUEST_EN, 0);
299 + DC_LOG_DEBUG(
300 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
301 +index e73785e74cba8..20441127783ba 100644
302 +--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
303 ++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
304 +@@ -295,7 +295,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
305 + .num_banks = 8,
306 + .num_chans = 4,
307 + .vmm_page_size_bytes = 4096,
308 +- .dram_clock_change_latency_us = 23.84,
309 ++ .dram_clock_change_latency_us = 11.72,
310 + .return_bus_width_bytes = 64,
311 + .dispclk_dppclk_vco_speed_mhz = 3600,
312 + .xfc_bus_transport_time_us = 4,
313 +diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
314 +index 67f9f66904be2..597cf1459b0a8 100644
315 +--- a/drivers/gpu/drm/panfrost/panfrost_device.h
316 ++++ b/drivers/gpu/drm/panfrost/panfrost_device.h
317 +@@ -88,6 +88,7 @@ struct panfrost_device {
318 + /* pm_domains for devices with more than one. */
319 + struct device *pm_domain_devs[MAX_PM_DOMAINS];
320 + struct device_link *pm_domain_links[MAX_PM_DOMAINS];
321 ++ bool coherent;
322 +
323 + struct panfrost_features features;
324 + const struct panfrost_compatible *comp;
325 +diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
326 +index 0fc084110e5ba..689be734ed200 100644
327 +--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
328 ++++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
329 +@@ -587,6 +587,8 @@ static int panfrost_probe(struct platform_device *pdev)
330 + if (!pfdev->comp)
331 + return -ENODEV;
332 +
333 ++ pfdev->coherent = device_get_dma_attr(&pdev->dev) == DEV_DMA_COHERENT;
334 ++
335 + /* Allocate and initialze the DRM device. */
336 + ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev);
337 + if (IS_ERR(ddev))
338 +diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
339 +index 62d4d710a5711..57a31dd0ffed1 100644
340 +--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
341 ++++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
342 +@@ -218,6 +218,7 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = {
343 + */
344 + struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
345 + {
346 ++ struct panfrost_device *pfdev = dev->dev_private;
347 + struct panfrost_gem_object *obj;
348 +
349 + obj = kzalloc(sizeof(*obj), GFP_KERNEL);
350 +@@ -227,6 +228,7 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
351 + INIT_LIST_HEAD(&obj->mappings.list);
352 + mutex_init(&obj->mappings.lock);
353 + obj->base.base.funcs = &panfrost_gem_funcs;
354 ++ obj->base.map_cached = pfdev->coherent;
355 +
356 + return &obj->base.base;
357 + }
358 +diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
359 +index 776448c527ea9..be8d68fb0e11e 100644
360 +--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
361 ++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
362 +@@ -371,6 +371,7 @@ int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv)
363 + .pgsize_bitmap = SZ_4K | SZ_2M,
364 + .ias = FIELD_GET(0xff, pfdev->features.mmu_features),
365 + .oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
366 ++ .coherent_walk = pfdev->coherent,
367 + .tlb = &mmu_tlb_ops,
368 + .iommu_dev = pfdev->dev,
369 + };
370 +diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
371 +index 0727383f49402..8b113ae32dc71 100644
372 +--- a/drivers/i2c/busses/i2c-tegra.c
373 ++++ b/drivers/i2c/busses/i2c-tegra.c
374 +@@ -326,6 +326,8 @@ static void i2c_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned int reg)
375 + /* read back register to make sure that register writes completed */
376 + if (reg != I2C_TX_FIFO)
377 + readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
378 ++ else if (i2c_dev->is_vi)
379 ++ readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, I2C_INT_STATUS));
380 + }
381 +
382 + static u32 i2c_readl(struct tegra_i2c_dev *i2c_dev, unsigned int reg)
383 +@@ -339,6 +341,21 @@ static void i2c_writesl(struct tegra_i2c_dev *i2c_dev, void *data,
384 + writesl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg), data, len);
385 + }
386 +
387 ++static void i2c_writesl_vi(struct tegra_i2c_dev *i2c_dev, void *data,
388 ++ unsigned int reg, unsigned int len)
389 ++{
390 ++ u32 *data32 = data;
391 ++
392 ++ /*
393 ++ * VI I2C controller has known hardware bug where writes get stuck
394 ++ * when immediate multiple writes happen to TX_FIFO register.
395 ++ * Recommended software work around is to read I2C register after
396 ++ * each write to TX_FIFO register to flush out the data.
397 ++ */
398 ++ while (len--)
399 ++ i2c_writel(i2c_dev, *data32++, reg);
400 ++}
401 ++
402 + static void i2c_readsl(struct tegra_i2c_dev *i2c_dev, void *data,
403 + unsigned int reg, unsigned int len)
404 + {
405 +@@ -811,7 +828,10 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
406 + i2c_dev->msg_buf_remaining = buf_remaining;
407 + i2c_dev->msg_buf = buf + words_to_transfer * BYTES_PER_FIFO_WORD;
408 +
409 +- i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
410 ++ if (i2c_dev->is_vi)
411 ++ i2c_writesl_vi(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
412 ++ else
413 ++ i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
414 +
415 + buf += words_to_transfer * BYTES_PER_FIFO_WORD;
416 + }
417 +diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
418 +index 151243fa01ba5..7e3db4c0324d3 100644
419 +--- a/drivers/iommu/intel/iommu.c
420 ++++ b/drivers/iommu/intel/iommu.c
421 +@@ -3350,6 +3350,11 @@ static int __init init_dmars(void)
422 +
423 + if (!ecap_pass_through(iommu->ecap))
424 + hw_pass_through = 0;
425 ++
426 ++ if (!intel_iommu_strict && cap_caching_mode(iommu->cap)) {
427 ++ pr_warn("Disable batched IOTLB flush due to virtualization");
428 ++ intel_iommu_strict = 1;
429 ++ }
430 + intel_svm_check(iommu);
431 + }
432 +
433 +diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
434 +index a7a9bc08dcd11..bcfbd0e44a4a0 100644
435 +--- a/drivers/iommu/io-pgtable-arm.c
436 ++++ b/drivers/iommu/io-pgtable-arm.c
437 +@@ -417,7 +417,13 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
438 + << ARM_LPAE_PTE_ATTRINDX_SHIFT);
439 + }
440 +
441 +- if (prot & IOMMU_CACHE)
442 ++ /*
443 ++ * Also Mali has its own notions of shareability wherein its Inner
444 ++ * domain covers the cores within the GPU, and its Outer domain is
445 ++ * "outside the GPU" (i.e. either the Inner or System domain in CPU
446 ++ * terms, depending on coherency).
447 ++ */
448 ++ if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE)
449 + pte |= ARM_LPAE_PTE_SH_IS;
450 + else
451 + pte |= ARM_LPAE_PTE_SH_OS;
452 +@@ -1021,6 +1027,9 @@ arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
453 + cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
454 + ARM_MALI_LPAE_TTBR_READ_INNER |
455 + ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
456 ++ if (cfg->coherent_walk)
457 ++ cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER;
458 ++
459 + return &data->iop;
460 +
461 + out_free_data:
462 +diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
463 +index 09c328ee65da8..71b3a4d5adc65 100644
464 +--- a/drivers/misc/habanalabs/common/device.c
465 ++++ b/drivers/misc/habanalabs/common/device.c
466 +@@ -1425,6 +1425,15 @@ void hl_device_fini(struct hl_device *hdev)
467 + }
468 + }
469 +
470 ++ /* Disable PCI access from device F/W so it won't send us additional
471 ++ * interrupts. We disable MSI/MSI-X at the halt_engines function and we
472 ++ * can't have the F/W sending us interrupts after that. We need to
473 ++ * disable the access here because if the device is marked disable, the
474 ++ * message won't be send. Also, in case of heartbeat, the device CPU is
475 ++ * marked as disable so this message won't be sent
476 ++ */
477 ++ hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS);
478 ++
479 + /* Mark device as disabled */
480 + hdev->disabled = true;
481 +
482 +diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
483 +index cd41c7ceb0e78..13c6eebd4fa63 100644
484 +--- a/drivers/misc/habanalabs/common/firmware_if.c
485 ++++ b/drivers/misc/habanalabs/common/firmware_if.c
486 +@@ -385,6 +385,10 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
487 + }
488 + counters->rx_throughput = result;
489 +
490 ++ memset(&pkt, 0, sizeof(pkt));
491 ++ pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET <<
492 ++ CPUCP_PKT_CTL_OPCODE_SHIFT);
493 ++
494 + /* Fetch PCI tx counter */
495 + pkt.index = cpu_to_le32(cpucp_pcie_throughput_tx);
496 + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
497 +@@ -397,6 +401,7 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
498 + counters->tx_throughput = result;
499 +
500 + /* Fetch PCI replay counter */
501 ++ memset(&pkt, 0, sizeof(pkt));
502 + pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_REPLAY_CNT_GET <<
503 + CPUCP_PKT_CTL_OPCODE_SHIFT);
504 +
505 +diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
506 +index 07317ea491295..35401148969f5 100644
507 +--- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c
508 ++++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
509 +@@ -133,6 +133,8 @@ static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
510 +
511 + hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev,
512 + &hw_idle.busy_engines_mask_ext, NULL);
513 ++ hw_idle.busy_engines_mask =
514 ++ lower_32_bits(hw_idle.busy_engines_mask_ext);
515 +
516 + return copy_to_user(out, &hw_idle,
517 + min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
518 +diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
519 +index ed1bd41262ecd..68f661aca3ff2 100644
520 +--- a/drivers/misc/habanalabs/gaudi/gaudi.c
521 ++++ b/drivers/misc/habanalabs/gaudi/gaudi.c
522 +@@ -3119,7 +3119,8 @@ static int gaudi_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
523 + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
524 + VM_DONTCOPY | VM_NORESERVE;
525 +
526 +- rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size);
527 ++ rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
528 ++ (dma_addr - HOST_PHYS_BASE), size);
529 + if (rc)
530 + dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
531 +
532 +diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
533 +index 235d47b2420f5..986ed3c072088 100644
534 +--- a/drivers/misc/habanalabs/goya/goya.c
535 ++++ b/drivers/misc/habanalabs/goya/goya.c
536 +@@ -2675,7 +2675,8 @@ static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
537 + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
538 + VM_DONTCOPY | VM_NORESERVE;
539 +
540 +- rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size);
541 ++ rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
542 ++ (dma_addr - HOST_PHYS_BASE), size);
543 + if (rc)
544 + dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
545 +
546 +diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
547 +index 1e9a0adda2d69..445226720ff29 100644
548 +--- a/drivers/net/dsa/bcm_sf2.c
549 ++++ b/drivers/net/dsa/bcm_sf2.c
550 +@@ -509,15 +509,19 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
551 + /* Find our integrated MDIO bus node */
552 + dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
553 + priv->master_mii_bus = of_mdio_find_bus(dn);
554 +- if (!priv->master_mii_bus)
555 ++ if (!priv->master_mii_bus) {
556 ++ of_node_put(dn);
557 + return -EPROBE_DEFER;
558 ++ }
559 +
560 + get_device(&priv->master_mii_bus->dev);
561 + priv->master_mii_dn = dn;
562 +
563 + priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
564 +- if (!priv->slave_mii_bus)
565 ++ if (!priv->slave_mii_bus) {
566 ++ of_node_put(dn);
567 + return -ENOMEM;
568 ++ }
569 +
570 + priv->slave_mii_bus->priv = priv;
571 + priv->slave_mii_bus->name = "sf2 slave mii";
572 +diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
573 +index 0ef854911f215..d4a64dbde3157 100644
574 +--- a/drivers/net/dsa/microchip/ksz_common.c
575 ++++ b/drivers/net/dsa/microchip/ksz_common.c
576 +@@ -400,7 +400,7 @@ int ksz_switch_register(struct ksz_device *dev,
577 + gpiod_set_value_cansleep(dev->reset_gpio, 1);
578 + usleep_range(10000, 12000);
579 + gpiod_set_value_cansleep(dev->reset_gpio, 0);
580 +- usleep_range(100, 1000);
581 ++ msleep(100);
582 + }
583 +
584 + mutex_init(&dev->dev_mutex);
585 +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
586 +index 04f24c66cf366..55c28fbc5f9ea 100644
587 +--- a/drivers/net/ethernet/freescale/fec_main.c
588 ++++ b/drivers/net/ethernet/freescale/fec_main.c
589 +@@ -2165,9 +2165,9 @@ static int fec_enet_mii_init(struct platform_device *pdev)
590 + fep->mii_bus->parent = &pdev->dev;
591 +
592 + err = of_mdiobus_register(fep->mii_bus, node);
593 +- of_node_put(node);
594 + if (err)
595 + goto err_out_free_mdiobus;
596 ++ of_node_put(node);
597 +
598 + mii_cnt++;
599 +
600 +@@ -2180,6 +2180,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
601 + err_out_free_mdiobus:
602 + mdiobus_free(fep->mii_bus);
603 + err_out:
604 ++ of_node_put(node);
605 + return err;
606 + }
607 +
608 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
609 +index e2540cc00d34e..627ce1a20473a 100644
610 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
611 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
612 +@@ -4979,6 +4979,12 @@ static void ibmvnic_tasklet(struct tasklet_struct *t)
613 + while (!done) {
614 + /* Pull all the valid messages off the CRQ */
615 + while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
616 ++ /* This barrier makes sure ibmvnic_next_crq()'s
617 ++ * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
618 ++ * before ibmvnic_handle_crq()'s
619 ++ * switch(gen_crq->first) and switch(gen_crq->cmd).
620 ++ */
621 ++ dma_rmb();
622 + ibmvnic_handle_crq(crq, adapter);
623 + crq->generic.first = 0;
624 + }
625 +diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
626 +index d2581090f9a40..df238e46e2aeb 100644
627 +--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
628 ++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
629 +@@ -473,10 +473,11 @@ dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
630 + dma_addr_t iova;
631 + u8 *buf;
632 +
633 +- buf = napi_alloc_frag(pool->rbsize);
634 ++ buf = napi_alloc_frag(pool->rbsize + OTX2_ALIGN);
635 + if (unlikely(!buf))
636 + return -ENOMEM;
637 +
638 ++ buf = PTR_ALIGN(buf, OTX2_ALIGN);
639 + iova = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
640 + DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
641 + if (unlikely(dma_mapping_error(pfvf->dev, iova))) {
642 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
643 +index c6c5826aba41e..1892cea05ee7c 100644
644 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
645 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
646 +@@ -157,6 +157,7 @@ mlxsw_sp1_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry)
647 +
648 + static const
649 + struct mlxsw_sp_span_entry_ops mlxsw_sp1_span_entry_ops_cpu = {
650 ++ .is_static = true,
651 + .can_handle = mlxsw_sp1_span_cpu_can_handle,
652 + .parms_set = mlxsw_sp1_span_entry_cpu_parms,
653 + .configure = mlxsw_sp1_span_entry_cpu_configure,
654 +@@ -214,6 +215,7 @@ mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry)
655 +
656 + static const
657 + struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
658 ++ .is_static = true,
659 + .can_handle = mlxsw_sp_port_dev_check,
660 + .parms_set = mlxsw_sp_span_entry_phys_parms,
661 + .configure = mlxsw_sp_span_entry_phys_configure,
662 +@@ -721,6 +723,7 @@ mlxsw_sp2_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry)
663 +
664 + static const
665 + struct mlxsw_sp_span_entry_ops mlxsw_sp2_span_entry_ops_cpu = {
666 ++ .is_static = true,
667 + .can_handle = mlxsw_sp2_span_cpu_can_handle,
668 + .parms_set = mlxsw_sp2_span_entry_cpu_parms,
669 + .configure = mlxsw_sp2_span_entry_cpu_configure,
670 +@@ -1036,6 +1039,9 @@ static void mlxsw_sp_span_respin_work(struct work_struct *work)
671 + if (!refcount_read(&curr->ref_count))
672 + continue;
673 +
674 ++ if (curr->ops->is_static)
675 ++ continue;
676 ++
677 + err = curr->ops->parms_set(mlxsw_sp, curr->to_dev, &sparms);
678 + if (err)
679 + continue;
680 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
681 +index d907718bc8c58..aa1cd409c0e2e 100644
682 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
683 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
684 +@@ -60,6 +60,7 @@ struct mlxsw_sp_span_entry {
685 + };
686 +
687 + struct mlxsw_sp_span_entry_ops {
688 ++ bool is_static;
689 + bool (*can_handle)(const struct net_device *to_dev);
690 + int (*parms_set)(struct mlxsw_sp *mlxsw_sp,
691 + const struct net_device *to_dev,
692 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
693 +index 82b1c7a5a7a94..ba0e4d2b256a4 100644
694 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
695 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
696 +@@ -129,7 +129,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
697 + if (ret) {
698 + dev_err(&pdev->dev,
699 + "Failed to set tx_clk\n");
700 +- return ret;
701 ++ goto err_remove_config_dt;
702 + }
703 + }
704 + }
705 +@@ -143,7 +143,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
706 + if (ret) {
707 + dev_err(&pdev->dev,
708 + "Failed to set clk_ptp_ref\n");
709 +- return ret;
710 ++ goto err_remove_config_dt;
711 + }
712 + }
713 + }
714 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
715 +index 9a6a519426a08..103d2448e9e0d 100644
716 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
717 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
718 +@@ -375,6 +375,7 @@ static int ehl_pse0_common_data(struct pci_dev *pdev,
719 + struct plat_stmmacenet_data *plat)
720 + {
721 + plat->bus_id = 2;
722 ++ plat->addr64 = 32;
723 + return ehl_common_data(pdev, plat);
724 + }
725 +
726 +@@ -406,6 +407,7 @@ static int ehl_pse1_common_data(struct pci_dev *pdev,
727 + struct plat_stmmacenet_data *plat)
728 + {
729 + plat->bus_id = 3;
730 ++ plat->addr64 = 32;
731 + return ehl_common_data(pdev, plat);
732 + }
733 +
734 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
735 +index 34cb59b2fcd67..4ec5f05dabe1d 100644
736 +--- a/drivers/nvme/host/core.c
737 ++++ b/drivers/nvme/host/core.c
738 +@@ -1489,8 +1489,21 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
739 + }
740 +
741 + length = (io.nblocks + 1) << ns->lba_shift;
742 +- meta_len = (io.nblocks + 1) * ns->ms;
743 +- metadata = nvme_to_user_ptr(io.metadata);
744 ++
745 ++ if ((io.control & NVME_RW_PRINFO_PRACT) &&
746 ++ ns->ms == sizeof(struct t10_pi_tuple)) {
747 ++ /*
748 ++ * Protection information is stripped/inserted by the
749 ++ * controller.
750 ++ */
751 ++ if (nvme_to_user_ptr(io.metadata))
752 ++ return -EINVAL;
753 ++ meta_len = 0;
754 ++ metadata = NULL;
755 ++ } else {
756 ++ meta_len = (io.nblocks + 1) * ns->ms;
757 ++ metadata = nvme_to_user_ptr(io.metadata);
758 ++ }
759 +
760 + if (ns->features & NVME_NS_EXT_LBAS) {
761 + length += meta_len;
762 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
763 +index 77f615568194d..a3486c1c27f0c 100644
764 +--- a/drivers/nvme/host/pci.c
765 ++++ b/drivers/nvme/host/pci.c
766 +@@ -23,6 +23,7 @@
767 + #include <linux/t10-pi.h>
768 + #include <linux/types.h>
769 + #include <linux/io-64-nonatomic-lo-hi.h>
770 ++#include <linux/io-64-nonatomic-hi-lo.h>
771 + #include <linux/sed-opal.h>
772 + #include <linux/pci-p2pdma.h>
773 +
774 +@@ -1825,6 +1826,9 @@ static void nvme_map_cmb(struct nvme_dev *dev)
775 + if (dev->cmb_size)
776 + return;
777 +
778 ++ if (NVME_CAP_CMBS(dev->ctrl.cap))
779 ++ writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC);
780 ++
781 + dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
782 + if (!dev->cmbsz)
783 + return;
784 +@@ -1838,6 +1842,16 @@ static void nvme_map_cmb(struct nvme_dev *dev)
785 + if (offset > bar_size)
786 + return;
787 +
788 ++ /*
789 ++ * Tell the controller about the host side address mapping the CMB,
790 ++ * and enable CMB decoding for the NVMe 1.4+ scheme:
791 ++ */
792 ++ if (NVME_CAP_CMBS(dev->ctrl.cap)) {
793 ++ hi_lo_writeq(NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE |
794 ++ (pci_bus_address(pdev, bar) + offset),
795 ++ dev->bar + NVME_REG_CMBMSC);
796 ++ }
797 ++
798 + /*
799 + * Controllers may support a CMB size larger than their BAR,
800 + * for example, due to being behind a bridge. Reduce the CMB to
801 +diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
802 +index 65e3d0ef36e1a..493ed7ba86ed2 100644
803 +--- a/drivers/nvme/host/rdma.c
804 ++++ b/drivers/nvme/host/rdma.c
805 +@@ -97,6 +97,7 @@ struct nvme_rdma_queue {
806 + struct completion cm_done;
807 + bool pi_support;
808 + int cq_size;
809 ++ struct mutex queue_lock;
810 + };
811 +
812 + struct nvme_rdma_ctrl {
813 +@@ -579,6 +580,7 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
814 + int ret;
815 +
816 + queue = &ctrl->queues[idx];
817 ++ mutex_init(&queue->queue_lock);
818 + queue->ctrl = ctrl;
819 + if (idx && ctrl->ctrl.max_integrity_segments)
820 + queue->pi_support = true;
821 +@@ -598,7 +600,8 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
822 + if (IS_ERR(queue->cm_id)) {
823 + dev_info(ctrl->ctrl.device,
824 + "failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id));
825 +- return PTR_ERR(queue->cm_id);
826 ++ ret = PTR_ERR(queue->cm_id);
827 ++ goto out_destroy_mutex;
828 + }
829 +
830 + if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
831 +@@ -628,6 +631,8 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
832 + out_destroy_cm_id:
833 + rdma_destroy_id(queue->cm_id);
834 + nvme_rdma_destroy_queue_ib(queue);
835 ++out_destroy_mutex:
836 ++ mutex_destroy(&queue->queue_lock);
837 + return ret;
838 + }
839 +
840 +@@ -639,9 +644,10 @@ static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
841 +
842 + static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
843 + {
844 +- if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
845 +- return;
846 +- __nvme_rdma_stop_queue(queue);
847 ++ mutex_lock(&queue->queue_lock);
848 ++ if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
849 ++ __nvme_rdma_stop_queue(queue);
850 ++ mutex_unlock(&queue->queue_lock);
851 + }
852 +
853 + static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
854 +@@ -651,6 +657,7 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
855 +
856 + nvme_rdma_destroy_queue_ib(queue);
857 + rdma_destroy_id(queue->cm_id);
858 ++ mutex_destroy(&queue->queue_lock);
859 + }
860 +
861 + static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl)
862 +diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
863 +index 81db2331f6d78..6487b7897d1fb 100644
864 +--- a/drivers/nvme/host/tcp.c
865 ++++ b/drivers/nvme/host/tcp.c
866 +@@ -76,6 +76,7 @@ struct nvme_tcp_queue {
867 + struct work_struct io_work;
868 + int io_cpu;
869 +
870 ++ struct mutex queue_lock;
871 + struct mutex send_mutex;
872 + struct llist_head req_list;
873 + struct list_head send_list;
874 +@@ -1219,6 +1220,7 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
875 +
876 + sock_release(queue->sock);
877 + kfree(queue->pdu);
878 ++ mutex_destroy(&queue->queue_lock);
879 + }
880 +
881 + static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
882 +@@ -1380,6 +1382,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
883 + struct nvme_tcp_queue *queue = &ctrl->queues[qid];
884 + int ret, rcv_pdu_size;
885 +
886 ++ mutex_init(&queue->queue_lock);
887 + queue->ctrl = ctrl;
888 + init_llist_head(&queue->req_list);
889 + INIT_LIST_HEAD(&queue->send_list);
890 +@@ -1398,7 +1401,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
891 + if (ret) {
892 + dev_err(nctrl->device,
893 + "failed to create socket: %d\n", ret);
894 +- return ret;
895 ++ goto err_destroy_mutex;
896 + }
897 +
898 + /* Single syn retry */
899 +@@ -1507,6 +1510,8 @@ err_crypto:
900 + err_sock:
901 + sock_release(queue->sock);
902 + queue->sock = NULL;
903 ++err_destroy_mutex:
904 ++ mutex_destroy(&queue->queue_lock);
905 + return ret;
906 + }
907 +
908 +@@ -1534,9 +1539,10 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
909 + struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
910 + struct nvme_tcp_queue *queue = &ctrl->queues[qid];
911 +
912 +- if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
913 +- return;
914 +- __nvme_tcp_stop_queue(queue);
915 ++ mutex_lock(&queue->queue_lock);
916 ++ if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
917 ++ __nvme_tcp_stop_queue(queue);
918 ++ mutex_unlock(&queue->queue_lock);
919 + }
920 +
921 + static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
922 +diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
923 +index dca34489a1dc9..92ca23bc8dbfc 100644
924 +--- a/drivers/nvme/target/admin-cmd.c
925 ++++ b/drivers/nvme/target/admin-cmd.c
926 +@@ -487,8 +487,10 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
927 +
928 + /* return an all zeroed buffer if we can't find an active namespace */
929 + ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
930 +- if (!ns)
931 ++ if (!ns) {
932 ++ status = NVME_SC_INVALID_NS;
933 + goto done;
934 ++ }
935 +
936 + nvmet_ns_revalidate(ns);
937 +
938 +@@ -541,7 +543,9 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
939 + id->nsattr |= (1 << 0);
940 + nvmet_put_namespace(ns);
941 + done:
942 +- status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
943 ++ if (!status)
944 ++ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
945 ++
946 + kfree(id);
947 + out:
948 + nvmet_req_complete(req, status);
949 +diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c
950 +index 442522ba487f0..4728e2bff6620 100644
951 +--- a/drivers/phy/motorola/phy-cpcap-usb.c
952 ++++ b/drivers/phy/motorola/phy-cpcap-usb.c
953 +@@ -662,35 +662,42 @@ static int cpcap_usb_phy_probe(struct platform_device *pdev)
954 + generic_phy = devm_phy_create(ddata->dev, NULL, &ops);
955 + if (IS_ERR(generic_phy)) {
956 + error = PTR_ERR(generic_phy);
957 +- return PTR_ERR(generic_phy);
958 ++ goto out_reg_disable;
959 + }
960 +
961 + phy_set_drvdata(generic_phy, ddata);
962 +
963 + phy_provider = devm_of_phy_provider_register(ddata->dev,
964 + of_phy_simple_xlate);
965 +- if (IS_ERR(phy_provider))
966 +- return PTR_ERR(phy_provider);
967 ++ if (IS_ERR(phy_provider)) {
968 ++ error = PTR_ERR(phy_provider);
969 ++ goto out_reg_disable;
970 ++ }
971 +
972 + error = cpcap_usb_init_optional_pins(ddata);
973 + if (error)
974 +- return error;
975 ++ goto out_reg_disable;
976 +
977 + cpcap_usb_init_optional_gpios(ddata);
978 +
979 + error = cpcap_usb_init_iio(ddata);
980 + if (error)
981 +- return error;
982 ++ goto out_reg_disable;
983 +
984 + error = cpcap_usb_init_interrupts(pdev, ddata);
985 + if (error)
986 +- return error;
987 ++ goto out_reg_disable;
988 +
989 + usb_add_phy_dev(&ddata->phy);
990 + atomic_set(&ddata->active, 1);
991 + schedule_delayed_work(&ddata->detect_work, msecs_to_jiffies(1));
992 +
993 + return 0;
994 ++
995 ++out_reg_disable:
996 ++ regulator_disable(ddata->vusb);
997 ++
998 ++ return error;
999 + }
1000 +
1001 + static int cpcap_usb_phy_remove(struct platform_device *pdev)
1002 +diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
1003 +index 65fb3a3031470..30a9062d2b4b8 100644
1004 +--- a/drivers/platform/x86/intel-vbtn.c
1005 ++++ b/drivers/platform/x86/intel-vbtn.c
1006 +@@ -216,6 +216,12 @@ static const struct dmi_system_id dmi_switches_allow_list[] = {
1007 + DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"),
1008 + },
1009 + },
1010 ++ {
1011 ++ .matches = {
1012 ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1013 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7352"),
1014 ++ },
1015 ++ },
1016 + {} /* Array terminator */
1017 + };
1018 +
1019 +diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
1020 +index c404706379d92..69402758b99c3 100644
1021 +--- a/drivers/platform/x86/thinkpad_acpi.c
1022 ++++ b/drivers/platform/x86/thinkpad_acpi.c
1023 +@@ -8782,6 +8782,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
1024 + TPACPI_Q_LNV3('N', '1', 'T', TPACPI_FAN_2CTL), /* P71 */
1025 + TPACPI_Q_LNV3('N', '1', 'U', TPACPI_FAN_2CTL), /* P51 */
1026 + TPACPI_Q_LNV3('N', '2', 'C', TPACPI_FAN_2CTL), /* P52 / P72 */
1027 ++ TPACPI_Q_LNV3('N', '2', 'N', TPACPI_FAN_2CTL), /* P53 / P73 */
1028 + TPACPI_Q_LNV3('N', '2', 'E', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (1st gen) */
1029 + TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (2nd gen) */
1030 + TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (3nd gen) */
1031 +diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
1032 +index 5783139d0a119..c4de932302d6b 100644
1033 +--- a/drivers/platform/x86/touchscreen_dmi.c
1034 ++++ b/drivers/platform/x86/touchscreen_dmi.c
1035 +@@ -263,6 +263,16 @@ static const struct ts_dmi_data digma_citi_e200_data = {
1036 + .properties = digma_citi_e200_props,
1037 + };
1038 +
1039 ++static const struct property_entry estar_beauty_hd_props[] = {
1040 ++ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
1041 ++ { }
1042 ++};
1043 ++
1044 ++static const struct ts_dmi_data estar_beauty_hd_data = {
1045 ++ .acpi_name = "GDIX1001:00",
1046 ++ .properties = estar_beauty_hd_props,
1047 ++};
1048 ++
1049 + static const struct property_entry gp_electronic_t701_props[] = {
1050 + PROPERTY_ENTRY_U32("touchscreen-size-x", 960),
1051 + PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
1052 +@@ -942,6 +952,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
1053 + DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
1054 + },
1055 + },
1056 ++ {
1057 ++ /* Estar Beauty HD (MID 7316R) */
1058 ++ .driver_data = (void *)&estar_beauty_hd_data,
1059 ++ .matches = {
1060 ++ DMI_MATCH(DMI_SYS_VENDOR, "Estar"),
1061 ++ DMI_MATCH(DMI_PRODUCT_NAME, "eSTAR BEAUTY HD Intel Quad core"),
1062 ++ },
1063 ++ },
1064 + {
1065 + /* GP-electronic T701 */
1066 + .driver_data = (void *)&gp_electronic_t701_data,
1067 +diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
1068 +index a2beee6e09f06..5988c300cc82e 100644
1069 +--- a/drivers/scsi/fnic/vnic_dev.c
1070 ++++ b/drivers/scsi/fnic/vnic_dev.c
1071 +@@ -444,7 +444,8 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
1072 + fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
1073 + if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
1074 + pr_err("error in devcmd2 init");
1075 +- return -ENODEV;
1076 ++ err = -ENODEV;
1077 ++ goto err_free_wq;
1078 + }
1079 +
1080 + /*
1081 +@@ -460,7 +461,7 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
1082 + err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
1083 + DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
1084 + if (err)
1085 +- goto err_free_wq;
1086 ++ goto err_disable_wq;
1087 +
1088 + vdev->devcmd2->result =
1089 + (struct devcmd2_result *) vdev->devcmd2->results_ring.descs;
1090 +@@ -481,8 +482,9 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
1091 +
1092 + err_free_desc_ring:
1093 + vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
1094 +-err_free_wq:
1095 ++err_disable_wq:
1096 + vnic_wq_disable(&vdev->devcmd2->wq);
1097 ++err_free_wq:
1098 + vnic_wq_free(&vdev->devcmd2->wq);
1099 + err_free_devcmd2:
1100 + kfree(vdev->devcmd2);
1101 +diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
1102 +index 070cf516b98fe..57c9a71fa33a7 100644
1103 +--- a/drivers/scsi/ibmvscsi/ibmvfc.c
1104 ++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
1105 +@@ -2957,8 +2957,10 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev)
1106 + unsigned long flags = 0;
1107 +
1108 + spin_lock_irqsave(shost->host_lock, flags);
1109 +- if (sdev->type == TYPE_DISK)
1110 ++ if (sdev->type == TYPE_DISK) {
1111 + sdev->allow_restart = 1;
1112 ++ blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
1113 ++ }
1114 + spin_unlock_irqrestore(shost->host_lock, flags);
1115 + return 0;
1116 + }
1117 +diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
1118 +index 96a2952cf626b..a50f1eef0e0cd 100644
1119 +--- a/drivers/scsi/libfc/fc_exch.c
1120 ++++ b/drivers/scsi/libfc/fc_exch.c
1121 +@@ -1624,8 +1624,13 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1122 + rc = fc_exch_done_locked(ep);
1123 + WARN_ON(fc_seq_exch(sp) != ep);
1124 + spin_unlock_bh(&ep->ex_lock);
1125 +- if (!rc)
1126 ++ if (!rc) {
1127 + fc_exch_delete(ep);
1128 ++ } else {
1129 ++ FC_EXCH_DBG(ep, "ep is completed already,"
1130 ++ "hence skip calling the resp\n");
1131 ++ goto skip_resp;
1132 ++ }
1133 + }
1134 +
1135 + /*
1136 +@@ -1644,6 +1649,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1137 + if (!fc_invoke_resp(ep, sp, fp))
1138 + fc_frame_free(fp);
1139 +
1140 ++skip_resp:
1141 + fc_exch_release(ep);
1142 + return;
1143 + rel:
1144 +@@ -1900,10 +1906,16 @@ static void fc_exch_reset(struct fc_exch *ep)
1145 +
1146 + fc_exch_hold(ep);
1147 +
1148 +- if (!rc)
1149 ++ if (!rc) {
1150 + fc_exch_delete(ep);
1151 ++ } else {
1152 ++ FC_EXCH_DBG(ep, "ep is completed already,"
1153 ++ "hence skip calling the resp\n");
1154 ++ goto skip_resp;
1155 ++ }
1156 +
1157 + fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED));
1158 ++skip_resp:
1159 + fc_seq_set_resp(sp, NULL, ep->arg);
1160 + fc_exch_release(ep);
1161 + }
1162 +diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
1163 +index cba1cf6a1c12d..1e939a2a387f3 100644
1164 +--- a/drivers/scsi/scsi_transport_srp.c
1165 ++++ b/drivers/scsi/scsi_transport_srp.c
1166 +@@ -541,7 +541,14 @@ int srp_reconnect_rport(struct srp_rport *rport)
1167 + res = mutex_lock_interruptible(&rport->mutex);
1168 + if (res)
1169 + goto out;
1170 +- scsi_target_block(&shost->shost_gendev);
1171 ++ if (rport->state != SRP_RPORT_FAIL_FAST)
1172 ++ /*
1173 ++ * sdev state must be SDEV_TRANSPORT_OFFLINE, transition
1174 ++ * to SDEV_BLOCK is illegal. Calling scsi_target_unblock()
1175 ++ * later is ok though, scsi_internal_device_unblock_nowait()
1176 ++ * treats SDEV_TRANSPORT_OFFLINE like SDEV_BLOCK.
1177 ++ */
1178 ++ scsi_target_block(&shost->shost_gendev);
1179 + res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
1180 + pr_debug("%s (state %d): transport.reconnect() returned %d\n",
1181 + dev_name(&shost->shost_gendev), rport->state, res);
1182 +diff --git a/fs/udf/super.c b/fs/udf/super.c
1183 +index 5bef3a68395d8..d0df217f4712a 100644
1184 +--- a/fs/udf/super.c
1185 ++++ b/fs/udf/super.c
1186 +@@ -705,6 +705,7 @@ static int udf_check_vsd(struct super_block *sb)
1187 + struct buffer_head *bh = NULL;
1188 + int nsr = 0;
1189 + struct udf_sb_info *sbi;
1190 ++ loff_t session_offset;
1191 +
1192 + sbi = UDF_SB(sb);
1193 + if (sb->s_blocksize < sizeof(struct volStructDesc))
1194 +@@ -712,7 +713,8 @@ static int udf_check_vsd(struct super_block *sb)
1195 + else
1196 + sectorsize = sb->s_blocksize;
1197 +
1198 +- sector += (((loff_t)sbi->s_session) << sb->s_blocksize_bits);
1199 ++ session_offset = (loff_t)sbi->s_session << sb->s_blocksize_bits;
1200 ++ sector += session_offset;
1201 +
1202 + udf_debug("Starting at sector %u (%lu byte sectors)\n",
1203 + (unsigned int)(sector >> sb->s_blocksize_bits),
1204 +@@ -757,8 +759,7 @@ static int udf_check_vsd(struct super_block *sb)
1205 +
1206 + if (nsr > 0)
1207 + return 1;
1208 +- else if (!bh && sector - (sbi->s_session << sb->s_blocksize_bits) ==
1209 +- VSD_FIRST_SECTOR_OFFSET)
1210 ++ else if (!bh && sector - session_offset == VSD_FIRST_SECTOR_OFFSET)
1211 + return -1;
1212 + else
1213 + return 0;
1214 +diff --git a/include/linux/kthread.h b/include/linux/kthread.h
1215 +index 65b81e0c494d2..2484ed97e72f5 100644
1216 +--- a/include/linux/kthread.h
1217 ++++ b/include/linux/kthread.h
1218 +@@ -33,6 +33,9 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
1219 + unsigned int cpu,
1220 + const char *namefmt);
1221 +
1222 ++void kthread_set_per_cpu(struct task_struct *k, int cpu);
1223 ++bool kthread_is_per_cpu(struct task_struct *k);
1224 ++
1225 + /**
1226 + * kthread_run - create and wake a thread.
1227 + * @threadfn: the function to run until signal_pending(current).
1228 +diff --git a/include/linux/nvme.h b/include/linux/nvme.h
1229 +index d925359976873..bfed36e342ccb 100644
1230 +--- a/include/linux/nvme.h
1231 ++++ b/include/linux/nvme.h
1232 +@@ -116,6 +116,9 @@ enum {
1233 + NVME_REG_BPMBL = 0x0048, /* Boot Partition Memory Buffer
1234 + * Location
1235 + */
1236 ++ NVME_REG_CMBMSC = 0x0050, /* Controller Memory Buffer Memory
1237 ++ * Space Control
1238 ++ */
1239 + NVME_REG_PMRCAP = 0x0e00, /* Persistent Memory Capabilities */
1240 + NVME_REG_PMRCTL = 0x0e04, /* Persistent Memory Region Control */
1241 + NVME_REG_PMRSTS = 0x0e08, /* Persistent Memory Region Status */
1242 +@@ -135,6 +138,7 @@ enum {
1243 + #define NVME_CAP_CSS(cap) (((cap) >> 37) & 0xff)
1244 + #define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
1245 + #define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
1246 ++#define NVME_CAP_CMBS(cap) (((cap) >> 57) & 0x1)
1247 +
1248 + #define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
1249 + #define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
1250 +@@ -192,6 +196,8 @@ enum {
1251 + NVME_CSTS_SHST_OCCUR = 1 << 2,
1252 + NVME_CSTS_SHST_CMPLT = 2 << 2,
1253 + NVME_CSTS_SHST_MASK = 3 << 2,
1254 ++ NVME_CMBMSC_CRE = 1 << 0,
1255 ++ NVME_CMBMSC_CMSE = 1 << 1,
1256 + };
1257 +
1258 + struct nvme_id_power_state {
1259 +diff --git a/kernel/kthread.c b/kernel/kthread.c
1260 +index 933a625621b8d..5edf7e19ab262 100644
1261 +--- a/kernel/kthread.c
1262 ++++ b/kernel/kthread.c
1263 +@@ -493,11 +493,36 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
1264 + return p;
1265 + kthread_bind(p, cpu);
1266 + /* CPU hotplug need to bind once again when unparking the thread. */
1267 +- set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
1268 + to_kthread(p)->cpu = cpu;
1269 + return p;
1270 + }
1271 +
1272 ++void kthread_set_per_cpu(struct task_struct *k, int cpu)
1273 ++{
1274 ++ struct kthread *kthread = to_kthread(k);
1275 ++ if (!kthread)
1276 ++ return;
1277 ++
1278 ++ WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
1279 ++
1280 ++ if (cpu < 0) {
1281 ++ clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
1282 ++ return;
1283 ++ }
1284 ++
1285 ++ kthread->cpu = cpu;
1286 ++ set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
1287 ++}
1288 ++
1289 ++bool kthread_is_per_cpu(struct task_struct *k)
1290 ++{
1291 ++ struct kthread *kthread = to_kthread(k);
1292 ++ if (!kthread)
1293 ++ return false;
1294 ++
1295 ++ return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
1296 ++}
1297 ++
1298 + /**
1299 + * kthread_unpark - unpark a thread created by kthread_create().
1300 + * @k: thread created by kthread_create().
1301 +diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
1302 +index 02bc5b8f1eb27..bdaf4829098c0 100644
1303 +--- a/kernel/locking/lockdep.c
1304 ++++ b/kernel/locking/lockdep.c
1305 +@@ -5271,12 +5271,15 @@ static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie
1306 + /*
1307 + * Check whether we follow the irq-flags state precisely:
1308 + */
1309 +-static void check_flags(unsigned long flags)
1310 ++static noinstr void check_flags(unsigned long flags)
1311 + {
1312 + #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP)
1313 + if (!debug_locks)
1314 + return;
1315 +
1316 ++ /* Get the warning out.. */
1317 ++ instrumentation_begin();
1318 ++
1319 + if (irqs_disabled_flags(flags)) {
1320 + if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled())) {
1321 + printk("possible reason: unannotated irqs-off.\n");
1322 +@@ -5304,6 +5307,8 @@ static void check_flags(unsigned long flags)
1323 +
1324 + if (!debug_locks)
1325 + print_irqtrace_events(current);
1326 ++
1327 ++ instrumentation_end();
1328 + #endif
1329 + }
1330 +
1331 +diff --git a/kernel/smpboot.c b/kernel/smpboot.c
1332 +index 2efe1e206167c..f25208e8df836 100644
1333 +--- a/kernel/smpboot.c
1334 ++++ b/kernel/smpboot.c
1335 +@@ -188,6 +188,7 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
1336 + kfree(td);
1337 + return PTR_ERR(tsk);
1338 + }
1339 ++ kthread_set_per_cpu(tsk, cpu);
1340 + /*
1341 + * Park the thread so that it could start right on the CPU
1342 + * when it is available.
1343 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
1344 +index 0695c7895c892..1d99c52cc99a6 100644
1345 +--- a/kernel/workqueue.c
1346 ++++ b/kernel/workqueue.c
1347 +@@ -1845,12 +1845,6 @@ static void worker_attach_to_pool(struct worker *worker,
1348 + {
1349 + mutex_lock(&wq_pool_attach_mutex);
1350 +
1351 +- /*
1352 +- * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
1353 +- * online CPUs. It'll be re-applied when any of the CPUs come up.
1354 +- */
1355 +- set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1356 +-
1357 + /*
1358 + * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
1359 + * stable across this function. See the comments above the flag
1360 +@@ -1859,6 +1853,9 @@ static void worker_attach_to_pool(struct worker *worker,
1361 + if (pool->flags & POOL_DISASSOCIATED)
1362 + worker->flags |= WORKER_UNBOUND;
1363 +
1364 ++ if (worker->rescue_wq)
1365 ++ set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1366 ++
1367 + list_add_tail(&worker->node, &pool->workers);
1368 + worker->pool = pool;
1369 +
1370 +diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
1371 +index 90470392fdaa7..de5cd3818690c 100644
1372 +--- a/net/mac80211/debugfs.c
1373 ++++ b/net/mac80211/debugfs.c
1374 +@@ -120,18 +120,17 @@ static ssize_t aqm_write(struct file *file,
1375 + {
1376 + struct ieee80211_local *local = file->private_data;
1377 + char buf[100];
1378 +- size_t len;
1379 +
1380 +- if (count > sizeof(buf))
1381 ++ if (count >= sizeof(buf))
1382 + return -EINVAL;
1383 +
1384 + if (copy_from_user(buf, user_buf, count))
1385 + return -EFAULT;
1386 +
1387 +- buf[sizeof(buf) - 1] = '\0';
1388 +- len = strlen(buf);
1389 +- if (len > 0 && buf[len-1] == '\n')
1390 +- buf[len-1] = 0;
1391 ++ if (count && buf[count - 1] == '\n')
1392 ++ buf[count - 1] = '\0';
1393 ++ else
1394 ++ buf[count] = '\0';
1395 +
1396 + if (sscanf(buf, "fq_limit %u", &local->fq.limit) == 1)
1397 + return count;
1398 +@@ -177,18 +176,17 @@ static ssize_t airtime_flags_write(struct file *file,
1399 + {
1400 + struct ieee80211_local *local = file->private_data;
1401 + char buf[16];
1402 +- size_t len;
1403 +
1404 +- if (count > sizeof(buf))
1405 ++ if (count >= sizeof(buf))
1406 + return -EINVAL;
1407 +
1408 + if (copy_from_user(buf, user_buf, count))
1409 + return -EFAULT;
1410 +
1411 +- buf[sizeof(buf) - 1] = 0;
1412 +- len = strlen(buf);
1413 +- if (len > 0 && buf[len - 1] == '\n')
1414 +- buf[len - 1] = 0;
1415 ++ if (count && buf[count - 1] == '\n')
1416 ++ buf[count - 1] = '\0';
1417 ++ else
1418 ++ buf[count] = '\0';
1419 +
1420 + if (kstrtou16(buf, 0, &local->airtime_flags))
1421 + return -EINVAL;
1422 +@@ -237,20 +235,19 @@ static ssize_t aql_txq_limit_write(struct file *file,
1423 + {
1424 + struct ieee80211_local *local = file->private_data;
1425 + char buf[100];
1426 +- size_t len;
1427 + u32 ac, q_limit_low, q_limit_high, q_limit_low_old, q_limit_high_old;
1428 + struct sta_info *sta;
1429 +
1430 +- if (count > sizeof(buf))
1431 ++ if (count >= sizeof(buf))
1432 + return -EINVAL;
1433 +
1434 + if (copy_from_user(buf, user_buf, count))
1435 + return -EFAULT;
1436 +
1437 +- buf[sizeof(buf) - 1] = 0;
1438 +- len = strlen(buf);
1439 +- if (len > 0 && buf[len - 1] == '\n')
1440 +- buf[len - 1] = 0;
1441 ++ if (count && buf[count - 1] == '\n')
1442 ++ buf[count - 1] = '\0';
1443 ++ else
1444 ++ buf[count] = '\0';
1445 +
1446 + if (sscanf(buf, "%u %u %u", &ac, &q_limit_low, &q_limit_high) != 3)
1447 + return -EINVAL;
1448 +@@ -306,18 +303,17 @@ static ssize_t force_tx_status_write(struct file *file,
1449 + {
1450 + struct ieee80211_local *local = file->private_data;
1451 + char buf[3];
1452 +- size_t len;
1453 +
1454 +- if (count > sizeof(buf))
1455 ++ if (count >= sizeof(buf))
1456 + return -EINVAL;
1457 +
1458 + if (copy_from_user(buf, user_buf, count))
1459 + return -EFAULT;
1460 +
1461 +- buf[sizeof(buf) - 1] = '\0';
1462 +- len = strlen(buf);
1463 +- if (len > 0 && buf[len - 1] == '\n')
1464 +- buf[len - 1] = 0;
1465 ++ if (count && buf[count - 1] == '\n')
1466 ++ buf[count - 1] = '\0';
1467 ++ else
1468 ++ buf[count] = '\0';
1469 +
1470 + if (buf[0] == '0' && buf[1] == '\0')
1471 + local->force_tx_status = 0;
1472 +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
1473 +index 2a5a11f92b03e..98517423b0b76 100644
1474 +--- a/net/mac80211/rx.c
1475 ++++ b/net/mac80211/rx.c
1476 +@@ -4191,6 +4191,8 @@ void ieee80211_check_fast_rx(struct sta_info *sta)
1477 +
1478 + rcu_read_lock();
1479 + key = rcu_dereference(sta->ptk[sta->ptk_idx]);
1480 ++ if (!key)
1481 ++ key = rcu_dereference(sdata->default_unicast_key);
1482 + if (key) {
1483 + switch (key->conf.cipher) {
1484 + case WLAN_CIPHER_SUITE_TKIP:
1485 +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
1486 +index ca1e9de388910..88868bf300513 100644
1487 +--- a/net/mac80211/tx.c
1488 ++++ b/net/mac80211/tx.c
1489 +@@ -4278,7 +4278,6 @@ netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
1490 + struct ethhdr *ehdr = (struct ethhdr *)skb->data;
1491 + struct ieee80211_key *key;
1492 + struct sta_info *sta;
1493 +- bool offload = true;
1494 +
1495 + if (unlikely(skb->len < ETH_HLEN)) {
1496 + kfree_skb(skb);
1497 +@@ -4294,18 +4293,22 @@ netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
1498 +
1499 + if (unlikely(IS_ERR_OR_NULL(sta) || !sta->uploaded ||
1500 + !test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
1501 +- sdata->control_port_protocol == ehdr->h_proto))
1502 +- offload = false;
1503 +- else if ((key = rcu_dereference(sta->ptk[sta->ptk_idx])) &&
1504 +- (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) ||
1505 +- key->conf.cipher == WLAN_CIPHER_SUITE_TKIP))
1506 +- offload = false;
1507 +-
1508 +- if (offload)
1509 +- ieee80211_8023_xmit(sdata, dev, sta, key, skb);
1510 +- else
1511 +- ieee80211_subif_start_xmit(skb, dev);
1512 ++ sdata->control_port_protocol == ehdr->h_proto))
1513 ++ goto skip_offload;
1514 ++
1515 ++ key = rcu_dereference(sta->ptk[sta->ptk_idx]);
1516 ++ if (!key)
1517 ++ key = rcu_dereference(sdata->default_unicast_key);
1518 ++
1519 ++ if (key && (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) ||
1520 ++ key->conf.cipher == WLAN_CIPHER_SUITE_TKIP))
1521 ++ goto skip_offload;
1522 ++
1523 ++ ieee80211_8023_xmit(sdata, dev, sta, key, skb);
1524 ++ goto out;
1525 +
1526 ++skip_offload:
1527 ++ ieee80211_subif_start_xmit(skb, dev);
1528 + out:
1529 + rcu_read_unlock();
1530 +
1531 +diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
1532 +index 23d8685453627..2c1ffc9ba2eb2 100644
1533 +--- a/net/switchdev/switchdev.c
1534 ++++ b/net/switchdev/switchdev.c
1535 +@@ -460,10 +460,11 @@ static int __switchdev_handle_port_obj_add(struct net_device *dev,
1536 + extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
1537 +
1538 + if (check_cb(dev)) {
1539 +- /* This flag is only checked if the return value is success. */
1540 +- port_obj_info->handled = true;
1541 +- return add_cb(dev, port_obj_info->obj, port_obj_info->trans,
1542 +- extack);
1543 ++ err = add_cb(dev, port_obj_info->obj, port_obj_info->trans,
1544 ++ extack);
1545 ++ if (err != -EOPNOTSUPP)
1546 ++ port_obj_info->handled = true;
1547 ++ return err;
1548 + }
1549 +
1550 + /* Switch ports might be stacked under e.g. a LAG. Ignore the
1551 +@@ -515,9 +516,10 @@ static int __switchdev_handle_port_obj_del(struct net_device *dev,
1552 + int err = -EOPNOTSUPP;
1553 +
1554 + if (check_cb(dev)) {
1555 +- /* This flag is only checked if the return value is success. */
1556 +- port_obj_info->handled = true;
1557 +- return del_cb(dev, port_obj_info->obj);
1558 ++ err = del_cb(dev, port_obj_info->obj);
1559 ++ if (err != -EOPNOTSUPP)
1560 ++ port_obj_info->handled = true;
1561 ++ return err;
1562 + }
1563 +
1564 + /* Switch ports might be stacked under e.g. a LAG. Ignore the
1565 +@@ -568,9 +570,10 @@ static int __switchdev_handle_port_attr_set(struct net_device *dev,
1566 + int err = -EOPNOTSUPP;
1567 +
1568 + if (check_cb(dev)) {
1569 +- port_attr_info->handled = true;
1570 +- return set_cb(dev, port_attr_info->attr,
1571 +- port_attr_info->trans);
1572 ++ err = set_cb(dev, port_attr_info->attr, port_attr_info->trans);
1573 ++ if (err != -EOPNOTSUPP)
1574 ++ port_attr_info->handled = true;
1575 ++ return err;
1576 + }
1577 +
1578 + /* Switch ports might be stacked under e.g. a LAG. Ignore the
1579 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
1580 +index 770ad25f1907c..d393401db1ec5 100644
1581 +--- a/sound/pci/hda/hda_intel.c
1582 ++++ b/sound/pci/hda/hda_intel.c
1583 +@@ -2484,6 +2484,9 @@ static const struct pci_device_id azx_ids[] = {
1584 + /* CometLake-S */
1585 + { PCI_DEVICE(0x8086, 0xa3f0),
1586 + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
1587 ++ /* CometLake-R */
1588 ++ { PCI_DEVICE(0x8086, 0xf0c8),
1589 ++ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
1590 + /* Icelake */
1591 + { PCI_DEVICE(0x8086, 0x34c8),
1592 + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
1593 +@@ -2507,6 +2510,9 @@ static const struct pci_device_id azx_ids[] = {
1594 + /* Alderlake-S */
1595 + { PCI_DEVICE(0x8086, 0x7ad0),
1596 + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
1597 ++ /* Alderlake-P */
1598 ++ { PCI_DEVICE(0x8086, 0x51c8),
1599 ++ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
1600 + /* Elkhart Lake */
1601 + { PCI_DEVICE(0x8086, 0x4b55),
1602 + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
1603 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
1604 +index d12b4799c3cb7..dc1ab4fc93a5b 100644
1605 +--- a/sound/pci/hda/patch_hdmi.c
1606 ++++ b/sound/pci/hda/patch_hdmi.c
1607 +@@ -4349,6 +4349,7 @@ HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI", patch_i915_icl_hdmi),
1608 + HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI", patch_i915_tgl_hdmi),
1609 + HDA_CODEC_ENTRY(0x80862814, "DG1 HDMI", patch_i915_tgl_hdmi),
1610 + HDA_CODEC_ENTRY(0x80862815, "Alderlake HDMI", patch_i915_tgl_hdmi),
1611 ++HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_tgl_hdmi),
1612 + HDA_CODEC_ENTRY(0x80862816, "Rocketlake HDMI", patch_i915_tgl_hdmi),
1613 + HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi),
1614 + HDA_CODEC_ENTRY(0x8086281b, "Elkhartlake HDMI", patch_i915_icl_hdmi),
1615 +diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c
1616 +index 8b0ddc4b8227b..8d65004c917a1 100644
1617 +--- a/sound/soc/sof/intel/hda-codec.c
1618 ++++ b/sound/soc/sof/intel/hda-codec.c
1619 +@@ -93,8 +93,7 @@ void hda_codec_jack_check(struct snd_sof_dev *sdev)
1620 + * has been recorded in STATESTS
1621 + */
1622 + if (codec->jacktbl.used)
1623 +- schedule_delayed_work(&codec->jackpoll_work,
1624 +- codec->jackpoll_interval);
1625 ++ pm_request_resume(&codec->core.dev);
1626 + }
1627 + #else
1628 + void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev) {}
1629 +diff --git a/tools/objtool/check.c b/tools/objtool/check.c
1630 +index c6ab44543c92a..956383d5fa62e 100644
1631 +--- a/tools/objtool/check.c
1632 ++++ b/tools/objtool/check.c
1633 +@@ -2921,14 +2921,10 @@ int check(struct objtool_file *file)
1634 + warnings += ret;
1635 +
1636 + out:
1637 +- if (ret < 0) {
1638 +- /*
1639 +- * Fatal error. The binary is corrupt or otherwise broken in
1640 +- * some way, or objtool itself is broken. Fail the kernel
1641 +- * build.
1642 +- */
1643 +- return ret;
1644 +- }
1645 +-
1646 ++ /*
1647 ++ * For now, don't fail the kernel build on fatal warnings. These
1648 ++ * errors are still fairly common due to the growing matrix of
1649 ++ * supported toolchains and their recent pace of change.
1650 ++ */
1651 + return 0;
1652 + }
1653 +diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
1654 +index 9452cfb01ef19..f4f3e8d995930 100644
1655 +--- a/tools/objtool/elf.c
1656 ++++ b/tools/objtool/elf.c
1657 +@@ -425,6 +425,13 @@ static int read_symbols(struct elf *elf)
1658 + list_add(&sym->list, entry);
1659 + elf_hash_add(elf->symbol_hash, &sym->hash, sym->idx);
1660 + elf_hash_add(elf->symbol_name_hash, &sym->name_hash, str_hash(sym->name));
1661 ++
1662 ++ /*
1663 ++ * Don't store empty STT_NOTYPE symbols in the rbtree. They
1664 ++ * can exist within a function, confusing the sorting.
1665 ++ */
1666 ++ if (!sym->len)
1667 ++ rb_erase(&sym->node, &sym->sec->symbol_tree);
1668 + }
1669 +
1670 + if (stats)
1671 +diff --git a/tools/power/x86/intel-speed-select/isst-config.c b/tools/power/x86/intel-speed-select/isst-config.c
1672 +index cd089a5058594..ead9e51f75ada 100644
1673 +--- a/tools/power/x86/intel-speed-select/isst-config.c
1674 ++++ b/tools/power/x86/intel-speed-select/isst-config.c
1675 +@@ -1245,6 +1245,8 @@ static void dump_isst_config(int arg)
1676 + isst_ctdp_display_information_end(outf);
1677 + }
1678 +
1679 ++static void adjust_scaling_max_from_base_freq(int cpu);
1680 ++
1681 + static void set_tdp_level_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
1682 + void *arg4)
1683 + {
1684 +@@ -1263,6 +1265,9 @@ static void set_tdp_level_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
1685 + int pkg_id = get_physical_package_id(cpu);
1686 + int die_id = get_physical_die_id(cpu);
1687 +
1688 ++ /* Wait for updated base frequencies */
1689 ++ usleep(2000);
1690 ++
1691 + fprintf(stderr, "Option is set to online/offline\n");
1692 + ctdp_level.core_cpumask_size =
1693 + alloc_cpu_set(&ctdp_level.core_cpumask);
1694 +@@ -1279,6 +1284,7 @@ static void set_tdp_level_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
1695 + if (CPU_ISSET_S(i, ctdp_level.core_cpumask_size, ctdp_level.core_cpumask)) {
1696 + fprintf(stderr, "online cpu %d\n", i);
1697 + set_cpu_online_offline(i, 1);
1698 ++ adjust_scaling_max_from_base_freq(i);
1699 + } else {
1700 + fprintf(stderr, "offline cpu %d\n", i);
1701 + set_cpu_online_offline(i, 0);
1702 +@@ -1436,6 +1442,31 @@ static int set_cpufreq_scaling_min_max(int cpu, int max, int freq)
1703 + return 0;
1704 + }
1705 +
1706 ++static int no_turbo(void)
1707 ++{
1708 ++ return parse_int_file(0, "/sys/devices/system/cpu/intel_pstate/no_turbo");
1709 ++}
1710 ++
1711 ++static void adjust_scaling_max_from_base_freq(int cpu)
1712 ++{
1713 ++ int base_freq, scaling_max_freq;
1714 ++
1715 ++ scaling_max_freq = parse_int_file(0, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_max_freq", cpu);
1716 ++ base_freq = get_cpufreq_base_freq(cpu);
1717 ++ if (scaling_max_freq < base_freq || no_turbo())
1718 ++ set_cpufreq_scaling_min_max(cpu, 1, base_freq);
1719 ++}
1720 ++
1721 ++static void adjust_scaling_min_from_base_freq(int cpu)
1722 ++{
1723 ++ int base_freq, scaling_min_freq;
1724 ++
1725 ++ scaling_min_freq = parse_int_file(0, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_min_freq", cpu);
1726 ++ base_freq = get_cpufreq_base_freq(cpu);
1727 ++ if (scaling_min_freq < base_freq)
1728 ++ set_cpufreq_scaling_min_max(cpu, 0, base_freq);
1729 ++}
1730 ++
1731 + static int set_clx_pbf_cpufreq_scaling_min_max(int cpu)
1732 + {
1733 + struct isst_pkg_ctdp_level_info *ctdp_level;
1734 +@@ -1533,6 +1564,7 @@ static void set_scaling_min_to_cpuinfo_max(int cpu)
1735 + continue;
1736 +
1737 + set_cpufreq_scaling_min_max_from_cpuinfo(i, 1, 0);
1738 ++ adjust_scaling_min_from_base_freq(i);
1739 + }
1740 + }
1741 +
1742 +diff --git a/tools/testing/selftests/powerpc/alignment/alignment_handler.c b/tools/testing/selftests/powerpc/alignment/alignment_handler.c
1743 +index cb53a8b777e68..c25cf7cd45e9f 100644
1744 +--- a/tools/testing/selftests/powerpc/alignment/alignment_handler.c
1745 ++++ b/tools/testing/selftests/powerpc/alignment/alignment_handler.c
1746 +@@ -443,7 +443,6 @@ int test_alignment_handler_integer(void)
1747 + LOAD_DFORM_TEST(ldu);
1748 + LOAD_XFORM_TEST(ldx);
1749 + LOAD_XFORM_TEST(ldux);
1750 +- LOAD_DFORM_TEST(lmw);
1751 + STORE_DFORM_TEST(stb);
1752 + STORE_XFORM_TEST(stbx);
1753 + STORE_DFORM_TEST(stbu);
1754 +@@ -462,7 +461,11 @@ int test_alignment_handler_integer(void)
1755 + STORE_XFORM_TEST(stdx);
1756 + STORE_DFORM_TEST(stdu);
1757 + STORE_XFORM_TEST(stdux);
1758 ++
1759 ++#ifdef __BIG_ENDIAN__
1760 ++ LOAD_DFORM_TEST(lmw);
1761 + STORE_DFORM_TEST(stmw);
1762 ++#endif
1763 +
1764 + return rc;
1765 + }