Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Sun, 07 Feb 2021 15:24:35
Message-Id: 1612711455.23d16d39e34beaf6a11c475c8bbd854fd9014184.alicef@gentoo
1 commit: 23d16d39e34beaf6a11c475c8bbd854fd9014184
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Sun Feb 7 15:24:04 2021 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Sun Feb 7 15:24:15 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=23d16d39
7
8 Linux patch 5.4.96
9
10 Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>
11
12 0000_README | 4 +
13 1095_linux-5.4.96.patch | 1218 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1222 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 6caa3b9..8aa848a 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -423,6 +423,10 @@ Patch: 1094_linux-5.4.95.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.95
23
24 +Patch: 1095_linux-5.4.96.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.96
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1095_linux-5.4.96.patch b/1095_linux-5.4.96.patch
33 new file mode 100644
34 index 0000000..5e7c4fa
35 --- /dev/null
36 +++ b/1095_linux-5.4.96.patch
37 @@ -0,0 +1,1218 @@
38 +diff --git a/Makefile b/Makefile
39 +index aa3c2e834442e..7a47a2594f957 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 5
45 + PATCHLEVEL = 4
46 +-SUBLEVEL = 95
47 ++SUBLEVEL = 96
48 + EXTRAVERSION =
49 + NAME = Kleptomaniac Octopus
50 +
51 +diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
52 +index 51d867cf146c1..6c295a231882a 100644
53 +--- a/arch/arm64/include/asm/memory.h
54 ++++ b/arch/arm64/include/asm/memory.h
55 +@@ -247,11 +247,11 @@ static inline const void *__tag_set(const void *addr, u8 tag)
56 +
57 +
58 + /*
59 +- * The linear kernel range starts at the bottom of the virtual address
60 +- * space. Testing the top bit for the start of the region is a
61 +- * sufficient check and avoids having to worry about the tag.
62 ++ * Check whether an arbitrary address is within the linear map, which
63 ++ * lives in the [PAGE_OFFSET, PAGE_END) interval at the bottom of the
64 ++ * kernel's TTBR1 address range.
65 + */
66 +-#define __is_lm_address(addr) (!(((u64)addr) & BIT(vabits_actual - 1)))
67 ++#define __is_lm_address(addr) (((u64)(addr) ^ PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET))
68 +
69 + #define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
70 + #define __kimg_to_phys(addr) ((addr) - kimage_voffset)
71 +@@ -332,7 +332,7 @@ static inline void *phys_to_virt(phys_addr_t x)
72 + #endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */
73 +
74 + #define virt_addr_valid(addr) ({ \
75 +- __typeof__(addr) __addr = addr; \
76 ++ __typeof__(addr) __addr = __tag_reset(addr); \
77 + __is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr)); \
78 + })
79 +
80 +diff --git a/arch/arm64/mm/physaddr.c b/arch/arm64/mm/physaddr.c
81 +index 67a9ba9eaa96b..cde44c13dda1b 100644
82 +--- a/arch/arm64/mm/physaddr.c
83 ++++ b/arch/arm64/mm/physaddr.c
84 +@@ -9,7 +9,7 @@
85 +
86 + phys_addr_t __virt_to_phys(unsigned long x)
87 + {
88 +- WARN(!__is_lm_address(x),
89 ++ WARN(!__is_lm_address(__tag_reset(x)),
90 + "virt_to_phys used for non-linear address: %pK (%pS)\n",
91 + (void *)x,
92 + (void *)x);
93 +diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
94 +index 86f20d520a079..b40d0295d8129 100644
95 +--- a/arch/x86/include/asm/msr.h
96 ++++ b/arch/x86/include/asm/msr.h
97 +@@ -88,7 +88,7 @@ static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
98 + * think of extending them - you will be slapped with a stinking trout or a frozen
99 + * shark will reach you, wherever you are! You've been warned.
100 + */
101 +-static inline unsigned long long notrace __rdmsr(unsigned int msr)
102 ++static __always_inline unsigned long long __rdmsr(unsigned int msr)
103 + {
104 + DECLARE_ARGS(val, low, high);
105 +
106 +@@ -100,7 +100,7 @@ static inline unsigned long long notrace __rdmsr(unsigned int msr)
107 + return EAX_EDX_VAL(val, low, high);
108 + }
109 +
110 +-static inline void notrace __wrmsr(unsigned int msr, u32 low, u32 high)
111 ++static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high)
112 + {
113 + asm volatile("1: wrmsr\n"
114 + "2:\n"
115 +diff --git a/block/blk-core.c b/block/blk-core.c
116 +index d2213220099d3..5808baa950c35 100644
117 +--- a/block/blk-core.c
118 ++++ b/block/blk-core.c
119 +@@ -886,11 +886,14 @@ generic_make_request_checks(struct bio *bio)
120 + }
121 +
122 + /*
123 +- * For a REQ_NOWAIT based request, return -EOPNOTSUPP
124 +- * if queue is not a request based queue.
125 ++ * Non-mq queues do not honor REQ_NOWAIT, so complete a bio
126 ++ * with BLK_STS_AGAIN status in order to catch -EAGAIN and
127 ++ * to give a chance to the caller to repeat request gracefully.
128 + */
129 +- if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q))
130 +- goto not_supported;
131 ++ if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) {
132 ++ status = BLK_STS_AGAIN;
133 ++ goto end_io;
134 ++ }
135 +
136 + if (should_fail_bio(bio))
137 + goto end_io;
138 +diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
139 +index d831a61e0010e..383c7029d3cee 100644
140 +--- a/drivers/acpi/thermal.c
141 ++++ b/drivers/acpi/thermal.c
142 +@@ -174,6 +174,8 @@ struct acpi_thermal {
143 + int tz_enabled;
144 + int kelvin_offset;
145 + struct work_struct thermal_check_work;
146 ++ struct mutex thermal_check_lock;
147 ++ refcount_t thermal_check_count;
148 + };
149 +
150 + /* --------------------------------------------------------------------------
151 +@@ -494,17 +496,6 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
152 + return 0;
153 + }
154 +
155 +-static void acpi_thermal_check(void *data)
156 +-{
157 +- struct acpi_thermal *tz = data;
158 +-
159 +- if (!tz->tz_enabled)
160 +- return;
161 +-
162 +- thermal_zone_device_update(tz->thermal_zone,
163 +- THERMAL_EVENT_UNSPECIFIED);
164 +-}
165 +-
166 + /* sys I/F for generic thermal sysfs support */
167 +
168 + static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp)
169 +@@ -538,6 +529,8 @@ static int thermal_get_mode(struct thermal_zone_device *thermal,
170 + return 0;
171 + }
172 +
173 ++static void acpi_thermal_check_fn(struct work_struct *work);
174 ++
175 + static int thermal_set_mode(struct thermal_zone_device *thermal,
176 + enum thermal_device_mode mode)
177 + {
178 +@@ -563,7 +556,7 @@ static int thermal_set_mode(struct thermal_zone_device *thermal,
179 + ACPI_DEBUG_PRINT((ACPI_DB_INFO,
180 + "%s kernel ACPI thermal control\n",
181 + tz->tz_enabled ? "Enable" : "Disable"));
182 +- acpi_thermal_check(tz);
183 ++ acpi_thermal_check_fn(&tz->thermal_check_work);
184 + }
185 + return 0;
186 + }
187 +@@ -932,6 +925,12 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz)
188 + Driver Interface
189 + -------------------------------------------------------------------------- */
190 +
191 ++static void acpi_queue_thermal_check(struct acpi_thermal *tz)
192 ++{
193 ++ if (!work_pending(&tz->thermal_check_work))
194 ++ queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
195 ++}
196 ++
197 + static void acpi_thermal_notify(struct acpi_device *device, u32 event)
198 + {
199 + struct acpi_thermal *tz = acpi_driver_data(device);
200 +@@ -942,17 +941,17 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event)
201 +
202 + switch (event) {
203 + case ACPI_THERMAL_NOTIFY_TEMPERATURE:
204 +- acpi_thermal_check(tz);
205 ++ acpi_queue_thermal_check(tz);
206 + break;
207 + case ACPI_THERMAL_NOTIFY_THRESHOLDS:
208 + acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS);
209 +- acpi_thermal_check(tz);
210 ++ acpi_queue_thermal_check(tz);
211 + acpi_bus_generate_netlink_event(device->pnp.device_class,
212 + dev_name(&device->dev), event, 0);
213 + break;
214 + case ACPI_THERMAL_NOTIFY_DEVICES:
215 + acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES);
216 +- acpi_thermal_check(tz);
217 ++ acpi_queue_thermal_check(tz);
218 + acpi_bus_generate_netlink_event(device->pnp.device_class,
219 + dev_name(&device->dev), event, 0);
220 + break;
221 +@@ -1052,7 +1051,27 @@ static void acpi_thermal_check_fn(struct work_struct *work)
222 + {
223 + struct acpi_thermal *tz = container_of(work, struct acpi_thermal,
224 + thermal_check_work);
225 +- acpi_thermal_check(tz);
226 ++
227 ++ if (!tz->tz_enabled)
228 ++ return;
229 ++ /*
230 ++ * In general, it is not sufficient to check the pending bit, because
231 ++ * subsequent instances of this function may be queued after one of them
232 ++ * has started running (e.g. if _TMP sleeps). Avoid bailing out if just
233 ++ * one of them is running, though, because it may have done the actual
234 ++ * check some time ago, so allow at least one of them to block on the
235 ++ * mutex while another one is running the update.
236 ++ */
237 ++ if (!refcount_dec_not_one(&tz->thermal_check_count))
238 ++ return;
239 ++
240 ++ mutex_lock(&tz->thermal_check_lock);
241 ++
242 ++ thermal_zone_device_update(tz->thermal_zone, THERMAL_EVENT_UNSPECIFIED);
243 ++
244 ++ refcount_inc(&tz->thermal_check_count);
245 ++
246 ++ mutex_unlock(&tz->thermal_check_lock);
247 + }
248 +
249 + static int acpi_thermal_add(struct acpi_device *device)
250 +@@ -1084,6 +1103,8 @@ static int acpi_thermal_add(struct acpi_device *device)
251 + if (result)
252 + goto free_memory;
253 +
254 ++ refcount_set(&tz->thermal_check_count, 3);
255 ++ mutex_init(&tz->thermal_check_lock);
256 + INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn);
257 +
258 + pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device),
259 +@@ -1149,7 +1170,7 @@ static int acpi_thermal_resume(struct device *dev)
260 + tz->state.active |= tz->trips.active[i].flags.enabled;
261 + }
262 +
263 +- queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
264 ++ acpi_queue_thermal_check(tz);
265 +
266 + return AE_OK;
267 + }
268 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
269 +index 959eb075d11ed..c18f39271b034 100644
270 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
271 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
272 +@@ -1914,6 +1914,9 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting
273 + initial_link_setting;
274 + uint32_t link_bw;
275 +
276 ++ if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap))
277 ++ return false;
278 ++
279 + /* search for the minimum link setting that:
280 + * 1. is supported according to the link training result
281 + * 2. could support the b/w requested by the timing
282 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
283 +index bb7add5ea2273..a6d5beada6634 100644
284 +--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
285 ++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
286 +@@ -257,7 +257,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
287 + .num_banks = 8,
288 + .num_chans = 4,
289 + .vmm_page_size_bytes = 4096,
290 +- .dram_clock_change_latency_us = 23.84,
291 ++ .dram_clock_change_latency_us = 11.72,
292 + .return_bus_width_bytes = 64,
293 + .dispclk_dppclk_vco_speed_mhz = 3600,
294 + .xfc_bus_transport_time_us = 4,
295 +diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
296 +index b16aea0e39992..6dd29bad1609f 100644
297 +--- a/drivers/net/dsa/bcm_sf2.c
298 ++++ b/drivers/net/dsa/bcm_sf2.c
299 +@@ -421,15 +421,19 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
300 + /* Find our integrated MDIO bus node */
301 + dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
302 + priv->master_mii_bus = of_mdio_find_bus(dn);
303 +- if (!priv->master_mii_bus)
304 ++ if (!priv->master_mii_bus) {
305 ++ of_node_put(dn);
306 + return -EPROBE_DEFER;
307 ++ }
308 +
309 + get_device(&priv->master_mii_bus->dev);
310 + priv->master_mii_dn = dn;
311 +
312 + priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
313 +- if (!priv->slave_mii_bus)
314 ++ if (!priv->slave_mii_bus) {
315 ++ of_node_put(dn);
316 + return -ENOMEM;
317 ++ }
318 +
319 + priv->slave_mii_bus->priv = priv;
320 + priv->slave_mii_bus->name = "sf2 slave mii";
321 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
322 +index 9040340fad198..c3079f436f6d7 100644
323 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
324 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
325 +@@ -4752,6 +4752,12 @@ static void ibmvnic_tasklet(void *data)
326 + while (!done) {
327 + /* Pull all the valid messages off the CRQ */
328 + while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
329 ++ /* This barrier makes sure ibmvnic_next_crq()'s
330 ++ * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
331 ++ * before ibmvnic_handle_crq()'s
332 ++ * switch(gen_crq->first) and switch(gen_crq->cmd).
333 ++ */
334 ++ dma_rmb();
335 + ibmvnic_handle_crq(crq, adapter);
336 + crq->generic.first = 0;
337 + }
338 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
339 +index 7a964271959d8..c2cabd77884bf 100644
340 +--- a/drivers/nvme/host/core.c
341 ++++ b/drivers/nvme/host/core.c
342 +@@ -1295,8 +1295,21 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
343 + }
344 +
345 + length = (io.nblocks + 1) << ns->lba_shift;
346 +- meta_len = (io.nblocks + 1) * ns->ms;
347 +- metadata = nvme_to_user_ptr(io.metadata);
348 ++
349 ++ if ((io.control & NVME_RW_PRINFO_PRACT) &&
350 ++ ns->ms == sizeof(struct t10_pi_tuple)) {
351 ++ /*
352 ++ * Protection information is stripped/inserted by the
353 ++ * controller.
354 ++ */
355 ++ if (nvme_to_user_ptr(io.metadata))
356 ++ return -EINVAL;
357 ++ meta_len = 0;
358 ++ metadata = NULL;
359 ++ } else {
360 ++ meta_len = (io.nblocks + 1) * ns->ms;
361 ++ metadata = nvme_to_user_ptr(io.metadata);
362 ++ }
363 +
364 + if (ns->ext) {
365 + length += meta_len;
366 +diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c
367 +index 5baf64dfb24de..1bebad36bf2e5 100644
368 +--- a/drivers/phy/motorola/phy-cpcap-usb.c
369 ++++ b/drivers/phy/motorola/phy-cpcap-usb.c
370 +@@ -625,35 +625,42 @@ static int cpcap_usb_phy_probe(struct platform_device *pdev)
371 + generic_phy = devm_phy_create(ddata->dev, NULL, &ops);
372 + if (IS_ERR(generic_phy)) {
373 + error = PTR_ERR(generic_phy);
374 +- return PTR_ERR(generic_phy);
375 ++ goto out_reg_disable;
376 + }
377 +
378 + phy_set_drvdata(generic_phy, ddata);
379 +
380 + phy_provider = devm_of_phy_provider_register(ddata->dev,
381 + of_phy_simple_xlate);
382 +- if (IS_ERR(phy_provider))
383 +- return PTR_ERR(phy_provider);
384 ++ if (IS_ERR(phy_provider)) {
385 ++ error = PTR_ERR(phy_provider);
386 ++ goto out_reg_disable;
387 ++ }
388 +
389 + error = cpcap_usb_init_optional_pins(ddata);
390 + if (error)
391 +- return error;
392 ++ goto out_reg_disable;
393 +
394 + cpcap_usb_init_optional_gpios(ddata);
395 +
396 + error = cpcap_usb_init_iio(ddata);
397 + if (error)
398 +- return error;
399 ++ goto out_reg_disable;
400 +
401 + error = cpcap_usb_init_interrupts(pdev, ddata);
402 + if (error)
403 +- return error;
404 ++ goto out_reg_disable;
405 +
406 + usb_add_phy_dev(&ddata->phy);
407 + atomic_set(&ddata->active, 1);
408 + schedule_delayed_work(&ddata->detect_work, msecs_to_jiffies(1));
409 +
410 + return 0;
411 ++
412 ++out_reg_disable:
413 ++ regulator_disable(ddata->vusb);
414 ++
415 ++ return error;
416 + }
417 +
418 + static int cpcap_usb_phy_remove(struct platform_device *pdev)
419 +diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
420 +index 37035dca469cf..d4fc2cbf78703 100644
421 +--- a/drivers/platform/x86/intel-vbtn.c
422 ++++ b/drivers/platform/x86/intel-vbtn.c
423 +@@ -203,6 +203,12 @@ static const struct dmi_system_id dmi_switches_allow_list[] = {
424 + DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"),
425 + },
426 + },
427 ++ {
428 ++ .matches = {
429 ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
430 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7352"),
431 ++ },
432 ++ },
433 + {} /* Array terminator */
434 + };
435 +
436 +diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
437 +index 1e072dbba30d6..7ed1189a7200c 100644
438 +--- a/drivers/platform/x86/touchscreen_dmi.c
439 ++++ b/drivers/platform/x86/touchscreen_dmi.c
440 +@@ -231,6 +231,16 @@ static const struct ts_dmi_data digma_citi_e200_data = {
441 + .properties = digma_citi_e200_props,
442 + };
443 +
444 ++static const struct property_entry estar_beauty_hd_props[] = {
445 ++ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
446 ++ { }
447 ++};
448 ++
449 ++static const struct ts_dmi_data estar_beauty_hd_data = {
450 ++ .acpi_name = "GDIX1001:00",
451 ++ .properties = estar_beauty_hd_props,
452 ++};
453 ++
454 + static const struct property_entry gp_electronic_t701_props[] = {
455 + PROPERTY_ENTRY_U32("touchscreen-size-x", 960),
456 + PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
457 +@@ -747,6 +757,14 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
458 + DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
459 + },
460 + },
461 ++ {
462 ++ /* Estar Beauty HD (MID 7316R) */
463 ++ .driver_data = (void *)&estar_beauty_hd_data,
464 ++ .matches = {
465 ++ DMI_MATCH(DMI_SYS_VENDOR, "Estar"),
466 ++ DMI_MATCH(DMI_PRODUCT_NAME, "eSTAR BEAUTY HD Intel Quad core"),
467 ++ },
468 ++ },
469 + {
470 + /* GP-electronic T701 */
471 + .driver_data = (void *)&gp_electronic_t701_data,
472 +diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
473 +index 522636e946282..c8bf8c7ada6a7 100644
474 +--- a/drivers/scsi/fnic/vnic_dev.c
475 ++++ b/drivers/scsi/fnic/vnic_dev.c
476 +@@ -444,7 +444,8 @@ int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
477 + fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
478 + if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
479 + pr_err("error in devcmd2 init");
480 +- return -ENODEV;
481 ++ err = -ENODEV;
482 ++ goto err_free_wq;
483 + }
484 +
485 + /*
486 +@@ -460,7 +461,7 @@ int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
487 + err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
488 + DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
489 + if (err)
490 +- goto err_free_wq;
491 ++ goto err_disable_wq;
492 +
493 + vdev->devcmd2->result =
494 + (struct devcmd2_result *) vdev->devcmd2->results_ring.descs;
495 +@@ -481,8 +482,9 @@ int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
496 +
497 + err_free_desc_ring:
498 + vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
499 +-err_free_wq:
500 ++err_disable_wq:
501 + vnic_wq_disable(&vdev->devcmd2->wq);
502 ++err_free_wq:
503 + vnic_wq_free(&vdev->devcmd2->wq);
504 + err_free_devcmd2:
505 + kfree(vdev->devcmd2);
506 +diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
507 +index 8a76284b59b08..523809a8a2323 100644
508 +--- a/drivers/scsi/ibmvscsi/ibmvfc.c
509 ++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
510 +@@ -2881,8 +2881,10 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev)
511 + unsigned long flags = 0;
512 +
513 + spin_lock_irqsave(shost->host_lock, flags);
514 +- if (sdev->type == TYPE_DISK)
515 ++ if (sdev->type == TYPE_DISK) {
516 + sdev->allow_restart = 1;
517 ++ blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
518 ++ }
519 + spin_unlock_irqrestore(shost->host_lock, flags);
520 + return 0;
521 + }
522 +diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
523 +index 52e8666598531..e5b18e5d46dac 100644
524 +--- a/drivers/scsi/libfc/fc_exch.c
525 ++++ b/drivers/scsi/libfc/fc_exch.c
526 +@@ -1619,8 +1619,13 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
527 + rc = fc_exch_done_locked(ep);
528 + WARN_ON(fc_seq_exch(sp) != ep);
529 + spin_unlock_bh(&ep->ex_lock);
530 +- if (!rc)
531 ++ if (!rc) {
532 + fc_exch_delete(ep);
533 ++ } else {
534 ++ FC_EXCH_DBG(ep, "ep is completed already,"
535 ++ "hence skip calling the resp\n");
536 ++ goto skip_resp;
537 ++ }
538 + }
539 +
540 + /*
541 +@@ -1639,6 +1644,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
542 + if (!fc_invoke_resp(ep, sp, fp))
543 + fc_frame_free(fp);
544 +
545 ++skip_resp:
546 + fc_exch_release(ep);
547 + return;
548 + rel:
549 +@@ -1895,10 +1901,16 @@ static void fc_exch_reset(struct fc_exch *ep)
550 +
551 + fc_exch_hold(ep);
552 +
553 +- if (!rc)
554 ++ if (!rc) {
555 + fc_exch_delete(ep);
556 ++ } else {
557 ++ FC_EXCH_DBG(ep, "ep is completed already,"
558 ++ "hence skip calling the resp\n");
559 ++ goto skip_resp;
560 ++ }
561 +
562 + fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED));
563 ++skip_resp:
564 + fc_seq_set_resp(sp, NULL, ep->arg);
565 + fc_exch_release(ep);
566 + }
567 +diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
568 +index d4d1104fac991..8cd0a87764dfd 100644
569 +--- a/drivers/scsi/scsi_transport_srp.c
570 ++++ b/drivers/scsi/scsi_transport_srp.c
571 +@@ -541,7 +541,14 @@ int srp_reconnect_rport(struct srp_rport *rport)
572 + res = mutex_lock_interruptible(&rport->mutex);
573 + if (res)
574 + goto out;
575 +- scsi_target_block(&shost->shost_gendev);
576 ++ if (rport->state != SRP_RPORT_FAIL_FAST)
577 ++ /*
578 ++ * sdev state must be SDEV_TRANSPORT_OFFLINE, transition
579 ++ * to SDEV_BLOCK is illegal. Calling scsi_target_unblock()
580 ++ * later is ok though, scsi_internal_device_unblock_nowait()
581 ++ * treats SDEV_TRANSPORT_OFFLINE like SDEV_BLOCK.
582 ++ */
583 ++ scsi_target_block(&shost->shost_gendev);
584 + res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
585 + pr_debug("%s (state %d): transport.reconnect() returned %d\n",
586 + dev_name(&shost->shost_gendev), rport->state, res);
587 +diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
588 +index 86e280edf8040..7f644a58db511 100644
589 +--- a/fs/btrfs/backref.c
590 ++++ b/fs/btrfs/backref.c
591 +@@ -347,33 +347,10 @@ static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
592 + return -ENOMEM;
593 +
594 + ref->root_id = root_id;
595 +- if (key) {
596 ++ if (key)
597 + ref->key_for_search = *key;
598 +- /*
599 +- * We can often find data backrefs with an offset that is too
600 +- * large (>= LLONG_MAX, maximum allowed file offset) due to
601 +- * underflows when subtracting a file's offset with the data
602 +- * offset of its corresponding extent data item. This can
603 +- * happen for example in the clone ioctl.
604 +- * So if we detect such case we set the search key's offset to
605 +- * zero to make sure we will find the matching file extent item
606 +- * at add_all_parents(), otherwise we will miss it because the
607 +- * offset taken form the backref is much larger then the offset
608 +- * of the file extent item. This can make us scan a very large
609 +- * number of file extent items, but at least it will not make
610 +- * us miss any.
611 +- * This is an ugly workaround for a behaviour that should have
612 +- * never existed, but it does and a fix for the clone ioctl
613 +- * would touch a lot of places, cause backwards incompatibility
614 +- * and would not fix the problem for extents cloned with older
615 +- * kernels.
616 +- */
617 +- if (ref->key_for_search.type == BTRFS_EXTENT_DATA_KEY &&
618 +- ref->key_for_search.offset >= LLONG_MAX)
619 +- ref->key_for_search.offset = 0;
620 +- } else {
621 ++ else
622 + memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
623 +- }
624 +
625 + ref->inode_list = NULL;
626 + ref->level = level;
627 +@@ -409,10 +386,36 @@ static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
628 + wanted_disk_byte, count, sc, gfp_mask);
629 + }
630 +
631 ++static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
632 ++{
633 ++ struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
634 ++ struct rb_node *parent = NULL;
635 ++ struct prelim_ref *ref = NULL;
636 ++ struct prelim_ref target = {0};
637 ++ int result;
638 ++
639 ++ target.parent = bytenr;
640 ++
641 ++ while (*p) {
642 ++ parent = *p;
643 ++ ref = rb_entry(parent, struct prelim_ref, rbnode);
644 ++ result = prelim_ref_compare(ref, &target);
645 ++
646 ++ if (result < 0)
647 ++ p = &(*p)->rb_left;
648 ++ else if (result > 0)
649 ++ p = &(*p)->rb_right;
650 ++ else
651 ++ return 1;
652 ++ }
653 ++ return 0;
654 ++}
655 ++
656 + static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
657 +- struct ulist *parents, struct prelim_ref *ref,
658 ++ struct ulist *parents,
659 ++ struct preftrees *preftrees, struct prelim_ref *ref,
660 + int level, u64 time_seq, const u64 *extent_item_pos,
661 +- u64 total_refs, bool ignore_offset)
662 ++ bool ignore_offset)
663 + {
664 + int ret = 0;
665 + int slot;
666 +@@ -424,6 +427,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
667 + u64 disk_byte;
668 + u64 wanted_disk_byte = ref->wanted_disk_byte;
669 + u64 count = 0;
670 ++ u64 data_offset;
671 +
672 + if (level != 0) {
673 + eb = path->nodes[level];
674 +@@ -434,18 +438,26 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
675 + }
676 +
677 + /*
678 +- * We normally enter this function with the path already pointing to
679 +- * the first item to check. But sometimes, we may enter it with
680 +- * slot==nritems. In that case, go to the next leaf before we continue.
681 ++ * 1. We normally enter this function with the path already pointing to
682 ++ * the first item to check. But sometimes, we may enter it with
683 ++ * slot == nritems.
684 ++ * 2. We are searching for normal backref but bytenr of this leaf
685 ++ * matches shared data backref
686 ++ * 3. The leaf owner is not equal to the root we are searching
687 ++ *
688 ++ * For these cases, go to the next leaf before we continue.
689 + */
690 +- if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
691 ++ eb = path->nodes[0];
692 ++ if (path->slots[0] >= btrfs_header_nritems(eb) ||
693 ++ is_shared_data_backref(preftrees, eb->start) ||
694 ++ ref->root_id != btrfs_header_owner(eb)) {
695 + if (time_seq == SEQ_LAST)
696 + ret = btrfs_next_leaf(root, path);
697 + else
698 + ret = btrfs_next_old_leaf(root, path, time_seq);
699 + }
700 +
701 +- while (!ret && count < total_refs) {
702 ++ while (!ret && count < ref->count) {
703 + eb = path->nodes[0];
704 + slot = path->slots[0];
705 +
706 +@@ -455,13 +467,31 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
707 + key.type != BTRFS_EXTENT_DATA_KEY)
708 + break;
709 +
710 ++ /*
711 ++ * We are searching for normal backref but bytenr of this leaf
712 ++ * matches shared data backref, OR
713 ++ * the leaf owner is not equal to the root we are searching for
714 ++ */
715 ++ if (slot == 0 &&
716 ++ (is_shared_data_backref(preftrees, eb->start) ||
717 ++ ref->root_id != btrfs_header_owner(eb))) {
718 ++ if (time_seq == SEQ_LAST)
719 ++ ret = btrfs_next_leaf(root, path);
720 ++ else
721 ++ ret = btrfs_next_old_leaf(root, path, time_seq);
722 ++ continue;
723 ++ }
724 + fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
725 + disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
726 ++ data_offset = btrfs_file_extent_offset(eb, fi);
727 +
728 + if (disk_byte == wanted_disk_byte) {
729 + eie = NULL;
730 + old = NULL;
731 +- count++;
732 ++ if (ref->key_for_search.offset == key.offset - data_offset)
733 ++ count++;
734 ++ else
735 ++ goto next;
736 + if (extent_item_pos) {
737 + ret = check_extent_in_eb(&key, eb, fi,
738 + *extent_item_pos,
739 +@@ -502,9 +532,9 @@ next:
740 + */
741 + static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
742 + struct btrfs_path *path, u64 time_seq,
743 ++ struct preftrees *preftrees,
744 + struct prelim_ref *ref, struct ulist *parents,
745 +- const u64 *extent_item_pos, u64 total_refs,
746 +- bool ignore_offset)
747 ++ const u64 *extent_item_pos, bool ignore_offset)
748 + {
749 + struct btrfs_root *root;
750 + struct btrfs_key root_key;
751 +@@ -513,6 +543,7 @@ static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
752 + int root_level;
753 + int level = ref->level;
754 + int index;
755 ++ struct btrfs_key search_key = ref->key_for_search;
756 +
757 + root_key.objectid = ref->root_id;
758 + root_key.type = BTRFS_ROOT_ITEM_KEY;
759 +@@ -545,13 +576,33 @@ static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
760 + goto out;
761 + }
762 +
763 ++ /*
764 ++ * We can often find data backrefs with an offset that is too large
765 ++ * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
766 ++ * subtracting a file's offset with the data offset of its
767 ++ * corresponding extent data item. This can happen for example in the
768 ++ * clone ioctl.
769 ++ *
770 ++ * So if we detect such case we set the search key's offset to zero to
771 ++ * make sure we will find the matching file extent item at
772 ++ * add_all_parents(), otherwise we will miss it because the offset
773 ++ * taken form the backref is much larger then the offset of the file
774 ++ * extent item. This can make us scan a very large number of file
775 ++ * extent items, but at least it will not make us miss any.
776 ++ *
777 ++ * This is an ugly workaround for a behaviour that should have never
778 ++ * existed, but it does and a fix for the clone ioctl would touch a lot
779 ++ * of places, cause backwards incompatibility and would not fix the
780 ++ * problem for extents cloned with older kernels.
781 ++ */
782 ++ if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
783 ++ search_key.offset >= LLONG_MAX)
784 ++ search_key.offset = 0;
785 + path->lowest_level = level;
786 + if (time_seq == SEQ_LAST)
787 +- ret = btrfs_search_slot(NULL, root, &ref->key_for_search, path,
788 +- 0, 0);
789 ++ ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
790 + else
791 +- ret = btrfs_search_old_slot(root, &ref->key_for_search, path,
792 +- time_seq);
793 ++ ret = btrfs_search_old_slot(root, &search_key, path, time_seq);
794 +
795 + /* root node has been locked, we can release @subvol_srcu safely here */
796 + srcu_read_unlock(&fs_info->subvol_srcu, index);
797 +@@ -574,8 +625,8 @@ static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
798 + eb = path->nodes[level];
799 + }
800 +
801 +- ret = add_all_parents(root, path, parents, ref, level, time_seq,
802 +- extent_item_pos, total_refs, ignore_offset);
803 ++ ret = add_all_parents(root, path, parents, preftrees, ref, level,
804 ++ time_seq, extent_item_pos, ignore_offset);
805 + out:
806 + path->lowest_level = 0;
807 + btrfs_release_path(path);
808 +@@ -609,7 +660,7 @@ unode_aux_to_inode_list(struct ulist_node *node)
809 + static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
810 + struct btrfs_path *path, u64 time_seq,
811 + struct preftrees *preftrees,
812 +- const u64 *extent_item_pos, u64 total_refs,
813 ++ const u64 *extent_item_pos,
814 + struct share_check *sc, bool ignore_offset)
815 + {
816 + int err;
817 +@@ -653,9 +704,9 @@ static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
818 + ret = BACKREF_FOUND_SHARED;
819 + goto out;
820 + }
821 +- err = resolve_indirect_ref(fs_info, path, time_seq, ref,
822 +- parents, extent_item_pos,
823 +- total_refs, ignore_offset);
824 ++ err = resolve_indirect_ref(fs_info, path, time_seq, preftrees,
825 ++ ref, parents, extent_item_pos,
826 ++ ignore_offset);
827 + /*
828 + * we can only tolerate ENOENT,otherwise,we should catch error
829 + * and return directly.
830 +@@ -758,8 +809,7 @@ static int add_missing_keys(struct btrfs_fs_info *fs_info,
831 + */
832 + static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
833 + struct btrfs_delayed_ref_head *head, u64 seq,
834 +- struct preftrees *preftrees, u64 *total_refs,
835 +- struct share_check *sc)
836 ++ struct preftrees *preftrees, struct share_check *sc)
837 + {
838 + struct btrfs_delayed_ref_node *node;
839 + struct btrfs_delayed_extent_op *extent_op = head->extent_op;
840 +@@ -793,7 +843,6 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
841 + default:
842 + BUG();
843 + }
844 +- *total_refs += count;
845 + switch (node->type) {
846 + case BTRFS_TREE_BLOCK_REF_KEY: {
847 + /* NORMAL INDIRECT METADATA backref */
848 +@@ -876,7 +925,7 @@ out:
849 + static int add_inline_refs(const struct btrfs_fs_info *fs_info,
850 + struct btrfs_path *path, u64 bytenr,
851 + int *info_level, struct preftrees *preftrees,
852 +- u64 *total_refs, struct share_check *sc)
853 ++ struct share_check *sc)
854 + {
855 + int ret = 0;
856 + int slot;
857 +@@ -900,7 +949,6 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info,
858 +
859 + ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
860 + flags = btrfs_extent_flags(leaf, ei);
861 +- *total_refs += btrfs_extent_refs(leaf, ei);
862 + btrfs_item_key_to_cpu(leaf, &found_key, slot);
863 +
864 + ptr = (unsigned long)(ei + 1);
865 +@@ -1125,8 +1173,6 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
866 + struct prelim_ref *ref;
867 + struct rb_node *node;
868 + struct extent_inode_elem *eie = NULL;
869 +- /* total of both direct AND indirect refs! */
870 +- u64 total_refs = 0;
871 + struct preftrees preftrees = {
872 + .direct = PREFTREE_INIT,
873 + .indirect = PREFTREE_INIT,
874 +@@ -1195,7 +1241,7 @@ again:
875 + }
876 + spin_unlock(&delayed_refs->lock);
877 + ret = add_delayed_refs(fs_info, head, time_seq,
878 +- &preftrees, &total_refs, sc);
879 ++ &preftrees, sc);
880 + mutex_unlock(&head->mutex);
881 + if (ret)
882 + goto out;
883 +@@ -1216,8 +1262,7 @@ again:
884 + (key.type == BTRFS_EXTENT_ITEM_KEY ||
885 + key.type == BTRFS_METADATA_ITEM_KEY)) {
886 + ret = add_inline_refs(fs_info, path, bytenr,
887 +- &info_level, &preftrees,
888 +- &total_refs, sc);
889 ++ &info_level, &preftrees, sc);
890 + if (ret)
891 + goto out;
892 + ret = add_keyed_refs(fs_info, path, bytenr, info_level,
893 +@@ -1236,7 +1281,7 @@ again:
894 + WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
895 +
896 + ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
897 +- extent_item_pos, total_refs, sc, ignore_offset);
898 ++ extent_item_pos, sc, ignore_offset);
899 + if (ret)
900 + goto out;
901 +
902 +diff --git a/fs/udf/super.c b/fs/udf/super.c
903 +index 4aba4878ed967..8bb001c7927f0 100644
904 +--- a/fs/udf/super.c
905 ++++ b/fs/udf/super.c
906 +@@ -705,6 +705,7 @@ static int udf_check_vsd(struct super_block *sb)
907 + struct buffer_head *bh = NULL;
908 + int nsr = 0;
909 + struct udf_sb_info *sbi;
910 ++ loff_t session_offset;
911 +
912 + sbi = UDF_SB(sb);
913 + if (sb->s_blocksize < sizeof(struct volStructDesc))
914 +@@ -712,7 +713,8 @@ static int udf_check_vsd(struct super_block *sb)
915 + else
916 + sectorsize = sb->s_blocksize;
917 +
918 +- sector += (((loff_t)sbi->s_session) << sb->s_blocksize_bits);
919 ++ session_offset = (loff_t)sbi->s_session << sb->s_blocksize_bits;
920 ++ sector += session_offset;
921 +
922 + udf_debug("Starting at sector %u (%lu byte sectors)\n",
923 + (unsigned int)(sector >> sb->s_blocksize_bits),
924 +@@ -757,8 +759,7 @@ static int udf_check_vsd(struct super_block *sb)
925 +
926 + if (nsr > 0)
927 + return 1;
928 +- else if (!bh && sector - (sbi->s_session << sb->s_blocksize_bits) ==
929 +- VSD_FIRST_SECTOR_OFFSET)
930 ++ else if (!bh && sector - session_offset == VSD_FIRST_SECTOR_OFFSET)
931 + return -1;
932 + else
933 + return 0;
934 +diff --git a/include/linux/kthread.h b/include/linux/kthread.h
935 +index 0f9da966934e2..c7108ce5a051c 100644
936 +--- a/include/linux/kthread.h
937 ++++ b/include/linux/kthread.h
938 +@@ -31,6 +31,9 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
939 + unsigned int cpu,
940 + const char *namefmt);
941 +
942 ++void kthread_set_per_cpu(struct task_struct *k, int cpu);
943 ++bool kthread_is_per_cpu(struct task_struct *k);
944 ++
945 + /**
946 + * kthread_run - create and wake a thread.
947 + * @threadfn: the function to run until signal_pending(current).
948 +diff --git a/include/net/tcp.h b/include/net/tcp.h
949 +index 4b38ba101b9b7..37b51456784f8 100644
950 +--- a/include/net/tcp.h
951 ++++ b/include/net/tcp.h
952 +@@ -619,6 +619,7 @@ static inline void tcp_clear_xmit_timers(struct sock *sk)
953 +
954 + unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
955 + unsigned int tcp_current_mss(struct sock *sk);
956 ++u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
957 +
958 + /* Bound MSS / TSO packet size with the half of the window */
959 + static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
960 +diff --git a/kernel/kthread.c b/kernel/kthread.c
961 +index e51f0006057df..1d4c98a19043f 100644
962 +--- a/kernel/kthread.c
963 ++++ b/kernel/kthread.c
964 +@@ -469,11 +469,36 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
965 + return p;
966 + kthread_bind(p, cpu);
967 + /* CPU hotplug need to bind once again when unparking the thread. */
968 +- set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
969 + to_kthread(p)->cpu = cpu;
970 + return p;
971 + }
972 +
973 ++void kthread_set_per_cpu(struct task_struct *k, int cpu)
974 ++{
975 ++ struct kthread *kthread = to_kthread(k);
976 ++ if (!kthread)
977 ++ return;
978 ++
979 ++ WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
980 ++
981 ++ if (cpu < 0) {
982 ++ clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
983 ++ return;
984 ++ }
985 ++
986 ++ kthread->cpu = cpu;
987 ++ set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
988 ++}
989 ++
990 ++bool kthread_is_per_cpu(struct task_struct *k)
991 ++{
992 ++ struct kthread *kthread = to_kthread(k);
993 ++ if (!kthread)
994 ++ return false;
995 ++
996 ++ return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
997 ++}
998 ++
999 + /**
1000 + * kthread_unpark - unpark a thread created by kthread_create().
1001 + * @k: thread created by kthread_create().
1002 +diff --git a/kernel/smpboot.c b/kernel/smpboot.c
1003 +index 2efe1e206167c..f25208e8df836 100644
1004 +--- a/kernel/smpboot.c
1005 ++++ b/kernel/smpboot.c
1006 +@@ -188,6 +188,7 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
1007 + kfree(td);
1008 + return PTR_ERR(tsk);
1009 + }
1010 ++ kthread_set_per_cpu(tsk, cpu);
1011 + /*
1012 + * Park the thread so that it could start right on the CPU
1013 + * when it is available.
1014 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
1015 +index 28e52657e0930..29c36c0290623 100644
1016 +--- a/kernel/workqueue.c
1017 ++++ b/kernel/workqueue.c
1018 +@@ -1847,12 +1847,6 @@ static void worker_attach_to_pool(struct worker *worker,
1019 + {
1020 + mutex_lock(&wq_pool_attach_mutex);
1021 +
1022 +- /*
1023 +- * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
1024 +- * online CPUs. It'll be re-applied when any of the CPUs come up.
1025 +- */
1026 +- set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1027 +-
1028 + /*
1029 + * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
1030 + * stable across this function. See the comments above the flag
1031 +@@ -1861,6 +1855,9 @@ static void worker_attach_to_pool(struct worker *worker,
1032 + if (pool->flags & POOL_DISASSOCIATED)
1033 + worker->flags |= WORKER_UNBOUND;
1034 +
1035 ++ if (worker->rescue_wq)
1036 ++ set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1037 ++
1038 + list_add_tail(&worker->node, &pool->workers);
1039 + worker->pool = pool;
1040 +
1041 +diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
1042 +index bfe7bdd4c3406..98c396769be94 100644
1043 +--- a/net/core/gen_estimator.c
1044 ++++ b/net/core/gen_estimator.c
1045 +@@ -80,11 +80,11 @@ static void est_timer(struct timer_list *t)
1046 + u64 rate, brate;
1047 +
1048 + est_fetch_counters(est, &b);
1049 +- brate = (b.bytes - est->last_bytes) << (10 - est->ewma_log - est->intvl_log);
1050 +- brate -= (est->avbps >> est->ewma_log);
1051 ++ brate = (b.bytes - est->last_bytes) << (10 - est->intvl_log);
1052 ++ brate = (brate >> est->ewma_log) - (est->avbps >> est->ewma_log);
1053 +
1054 +- rate = (u64)(b.packets - est->last_packets) << (10 - est->ewma_log - est->intvl_log);
1055 +- rate -= (est->avpps >> est->ewma_log);
1056 ++ rate = (u64)(b.packets - est->last_packets) << (10 - est->intvl_log);
1057 ++ rate = (rate >> est->ewma_log) - (est->avpps >> est->ewma_log);
1058 +
1059 + write_seqcount_begin(&est->seq);
1060 + est->avbps += brate;
1061 +@@ -143,6 +143,9 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
1062 + if (parm->interval < -2 || parm->interval > 3)
1063 + return -EINVAL;
1064 +
1065 ++ if (parm->ewma_log == 0 || parm->ewma_log >= 31)
1066 ++ return -EINVAL;
1067 ++
1068 + est = kzalloc(sizeof(*est), GFP_KERNEL);
1069 + if (!est)
1070 + return -ENOBUFS;
1071 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
1072 +index 26305aa88651f..a1768ded2d545 100644
1073 +--- a/net/ipv4/tcp_input.c
1074 ++++ b/net/ipv4/tcp_input.c
1075 +@@ -3295,6 +3295,7 @@ static void tcp_ack_probe(struct sock *sk)
1076 + } else {
1077 + unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX);
1078 +
1079 ++ when = tcp_clamp_probe0_to_user_timeout(sk, when);
1080 + tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1081 + when, TCP_RTO_MAX, NULL);
1082 + }
1083 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
1084 +index 5da6ffce390c2..d0774b4e934d6 100644
1085 +--- a/net/ipv4/tcp_output.c
1086 ++++ b/net/ipv4/tcp_output.c
1087 +@@ -3850,6 +3850,8 @@ void tcp_send_probe0(struct sock *sk)
1088 + */
1089 + timeout = TCP_RESOURCE_PROBE_INTERVAL;
1090 + }
1091 ++
1092 ++ timeout = tcp_clamp_probe0_to_user_timeout(sk, timeout);
1093 + tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX, NULL);
1094 + }
1095 +
1096 +diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
1097 +index 7fcd116fbd378..fa2ae96ecdc40 100644
1098 +--- a/net/ipv4/tcp_timer.c
1099 ++++ b/net/ipv4/tcp_timer.c
1100 +@@ -40,6 +40,24 @@ static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
1101 + return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining));
1102 + }
1103 +
1104 ++u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when)
1105 ++{
1106 ++ struct inet_connection_sock *icsk = inet_csk(sk);
1107 ++ u32 remaining;
1108 ++ s32 elapsed;
1109 ++
1110 ++ if (!icsk->icsk_user_timeout || !icsk->icsk_probes_tstamp)
1111 ++ return when;
1112 ++
1113 ++ elapsed = tcp_jiffies32 - icsk->icsk_probes_tstamp;
1114 ++ if (unlikely(elapsed < 0))
1115 ++ elapsed = 0;
1116 ++ remaining = msecs_to_jiffies(icsk->icsk_user_timeout) - elapsed;
1117 ++ remaining = max_t(u32, remaining, TCP_TIMEOUT_MIN);
1118 ++
1119 ++ return min_t(u32, remaining, when);
1120 ++}
1121 ++
1122 + /**
1123 + * tcp_write_err() - close socket and save error info
1124 + * @sk: The socket the error has appeared on.
1125 +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
1126 +index 3ab85e1e38d82..1a15e7bae106a 100644
1127 +--- a/net/mac80211/rx.c
1128 ++++ b/net/mac80211/rx.c
1129 +@@ -4080,6 +4080,8 @@ void ieee80211_check_fast_rx(struct sta_info *sta)
1130 +
1131 + rcu_read_lock();
1132 + key = rcu_dereference(sta->ptk[sta->ptk_idx]);
1133 ++ if (!key)
1134 ++ key = rcu_dereference(sdata->default_unicast_key);
1135 + if (key) {
1136 + switch (key->conf.cipher) {
1137 + case WLAN_CIPHER_SUITE_TKIP:
1138 +diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
1139 +index 3a1d428c13369..ea9ddea35a886 100644
1140 +--- a/net/switchdev/switchdev.c
1141 ++++ b/net/switchdev/switchdev.c
1142 +@@ -461,10 +461,11 @@ static int __switchdev_handle_port_obj_add(struct net_device *dev,
1143 + extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
1144 +
1145 + if (check_cb(dev)) {
1146 +- /* This flag is only checked if the return value is success. */
1147 +- port_obj_info->handled = true;
1148 +- return add_cb(dev, port_obj_info->obj, port_obj_info->trans,
1149 +- extack);
1150 ++ err = add_cb(dev, port_obj_info->obj, port_obj_info->trans,
1151 ++ extack);
1152 ++ if (err != -EOPNOTSUPP)
1153 ++ port_obj_info->handled = true;
1154 ++ return err;
1155 + }
1156 +
1157 + /* Switch ports might be stacked under e.g. a LAG. Ignore the
1158 +@@ -513,9 +514,10 @@ static int __switchdev_handle_port_obj_del(struct net_device *dev,
1159 + int err = -EOPNOTSUPP;
1160 +
1161 + if (check_cb(dev)) {
1162 +- /* This flag is only checked if the return value is success. */
1163 +- port_obj_info->handled = true;
1164 +- return del_cb(dev, port_obj_info->obj);
1165 ++ err = del_cb(dev, port_obj_info->obj);
1166 ++ if (err != -EOPNOTSUPP)
1167 ++ port_obj_info->handled = true;
1168 ++ return err;
1169 + }
1170 +
1171 + /* Switch ports might be stacked under e.g. a LAG. Ignore the
1172 +@@ -563,9 +565,10 @@ static int __switchdev_handle_port_attr_set(struct net_device *dev,
1173 + int err = -EOPNOTSUPP;
1174 +
1175 + if (check_cb(dev)) {
1176 +- port_attr_info->handled = true;
1177 +- return set_cb(dev, port_attr_info->attr,
1178 +- port_attr_info->trans);
1179 ++ err = set_cb(dev, port_attr_info->attr, port_attr_info->trans);
1180 ++ if (err != -EOPNOTSUPP)
1181 ++ port_attr_info->handled = true;
1182 ++ return err;
1183 + }
1184 +
1185 + /* Switch ports might be stacked under e.g. a LAG. Ignore the
1186 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
1187 +index 5f515a29668c8..b3667a5efdc1f 100644
1188 +--- a/sound/pci/hda/hda_intel.c
1189 ++++ b/sound/pci/hda/hda_intel.c
1190 +@@ -2450,6 +2450,9 @@ static const struct pci_device_id azx_ids[] = {
1191 + /* CometLake-S */
1192 + { PCI_DEVICE(0x8086, 0xa3f0),
1193 + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
1194 ++ /* CometLake-R */
1195 ++ { PCI_DEVICE(0x8086, 0xf0c8),
1196 ++ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
1197 + /* Icelake */
1198 + { PCI_DEVICE(0x8086, 0x34c8),
1199 + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
1200 +diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c
1201 +index 9e8233c10d860..df38616c431a6 100644
1202 +--- a/sound/soc/sof/intel/hda-codec.c
1203 ++++ b/sound/soc/sof/intel/hda-codec.c
1204 +@@ -68,8 +68,7 @@ void hda_codec_jack_check(struct snd_sof_dev *sdev)
1205 + * has been recorded in STATESTS
1206 + */
1207 + if (codec->jacktbl.used)
1208 +- schedule_delayed_work(&codec->jackpoll_work,
1209 +- codec->jackpoll_interval);
1210 ++ pm_request_resume(&codec->core.dev);
1211 + }
1212 + #else
1213 + void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev) {}
1214 +diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
1215 +index edba4745f25a9..693d740107a8b 100644
1216 +--- a/tools/objtool/elf.c
1217 ++++ b/tools/objtool/elf.c
1218 +@@ -214,8 +214,11 @@ static int read_symbols(struct elf *elf)
1219 +
1220 + symtab = find_section_by_name(elf, ".symtab");
1221 + if (!symtab) {
1222 +- WARN("missing symbol table");
1223 +- return -1;
1224 ++ /*
1225 ++ * A missing symbol table is actually possible if it's an empty
1226 ++ * .o file. This can happen for thunk_64.o.
1227 ++ */
1228 ++ return 0;
1229 + }
1230 +
1231 + symbols_nr = symtab->sh.sh_size / symtab->sh.sh_entsize;
1232 +diff --git a/tools/testing/selftests/powerpc/alignment/alignment_handler.c b/tools/testing/selftests/powerpc/alignment/alignment_handler.c
1233 +index 0453c50c949cb..0725239bbd85c 100644
1234 +--- a/tools/testing/selftests/powerpc/alignment/alignment_handler.c
1235 ++++ b/tools/testing/selftests/powerpc/alignment/alignment_handler.c
1236 +@@ -380,7 +380,6 @@ int test_alignment_handler_integer(void)
1237 + LOAD_DFORM_TEST(ldu);
1238 + LOAD_XFORM_TEST(ldx);
1239 + LOAD_XFORM_TEST(ldux);
1240 +- LOAD_DFORM_TEST(lmw);
1241 + STORE_DFORM_TEST(stb);
1242 + STORE_XFORM_TEST(stbx);
1243 + STORE_DFORM_TEST(stbu);
1244 +@@ -399,7 +398,11 @@ int test_alignment_handler_integer(void)
1245 + STORE_XFORM_TEST(stdx);
1246 + STORE_DFORM_TEST(stdu);
1247 + STORE_XFORM_TEST(stdux);
1248 ++
1249 ++#ifdef __BIG_ENDIAN__
1250 ++ LOAD_DFORM_TEST(lmw);
1251 + STORE_DFORM_TEST(stmw);
1252 ++#endif
1253 +
1254 + return rc;
1255 + }