Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Wed, 16 Jan 2019 23:32:42
Message-Id: 1547681533.d15efad0d3fb3fd3b56902ecb343a9830a113c3e.mpagano@gentoo
1 commit: d15efad0d3fb3fd3b56902ecb343a9830a113c3e
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Jan 16 23:32:13 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Jan 16 23:32:13 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d15efad0
7
8 proj/linux-patches: Linux patch 4.19.16
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1015_linux-4.19.16.patch | 1744 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1748 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 2970cec..3ddd86b 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -103,6 +103,10 @@ Patch: 1014_linux-4.19.15.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.19.15
23
24 +Patch: 1015_linux-4.19.16.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.19.16
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1015_linux-4.19.16.patch b/1015_linux-4.19.16.patch
33 new file mode 100644
34 index 0000000..0f57c61
35 --- /dev/null
36 +++ b/1015_linux-4.19.16.patch
37 @@ -0,0 +1,1744 @@
38 +diff --git a/Makefile b/Makefile
39 +index 0e30d48274fa..e8cb4875b86d 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 19
46 +-SUBLEVEL = 15
47 ++SUBLEVEL = 16
48 + EXTRAVERSION =
49 + NAME = "People's Front"
50 +
51 +diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
52 +index 7be666062c7c..010212d35700 100644
53 +--- a/arch/arm64/kernel/sys_compat.c
54 ++++ b/arch/arm64/kernel/sys_compat.c
55 +@@ -66,12 +66,11 @@ do_compat_cache_op(unsigned long start, unsigned long end, int flags)
56 + /*
57 + * Handle all unrecognised system calls.
58 + */
59 +-long compat_arm_syscall(struct pt_regs *regs)
60 ++long compat_arm_syscall(struct pt_regs *regs, int scno)
61 + {
62 + siginfo_t info;
63 +- unsigned int no = regs->regs[7];
64 +
65 +- switch (no) {
66 ++ switch (scno) {
67 + /*
68 + * Flush a region from virtual address 'r0' to virtual address 'r1'
69 + * _exclusive_. There is no alignment requirement on either address;
70 +@@ -107,7 +106,7 @@ long compat_arm_syscall(struct pt_regs *regs)
71 + * way the calling program can gracefully determine whether
72 + * a feature is supported.
73 + */
74 +- if (no < __ARM_NR_COMPAT_END)
75 ++ if (scno < __ARM_NR_COMPAT_END)
76 + return -ENOSYS;
77 + break;
78 + }
79 +@@ -119,6 +118,6 @@ long compat_arm_syscall(struct pt_regs *regs)
80 + info.si_addr = (void __user *)instruction_pointer(regs) -
81 + (compat_thumb_mode(regs) ? 2 : 4);
82 +
83 +- arm64_notify_die("Oops - bad compat syscall(2)", regs, &info, no);
84 ++ arm64_notify_die("Oops - bad compat syscall(2)", regs, &info, scno);
85 + return 0;
86 + }
87 +diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
88 +index 032d22312881..5610ac01c1ec 100644
89 +--- a/arch/arm64/kernel/syscall.c
90 ++++ b/arch/arm64/kernel/syscall.c
91 +@@ -13,16 +13,15 @@
92 + #include <asm/thread_info.h>
93 + #include <asm/unistd.h>
94 +
95 +-long compat_arm_syscall(struct pt_regs *regs);
96 +-
97 ++long compat_arm_syscall(struct pt_regs *regs, int scno);
98 + long sys_ni_syscall(void);
99 +
100 +-asmlinkage long do_ni_syscall(struct pt_regs *regs)
101 ++static long do_ni_syscall(struct pt_regs *regs, int scno)
102 + {
103 + #ifdef CONFIG_COMPAT
104 + long ret;
105 + if (is_compat_task()) {
106 +- ret = compat_arm_syscall(regs);
107 ++ ret = compat_arm_syscall(regs, scno);
108 + if (ret != -ENOSYS)
109 + return ret;
110 + }
111 +@@ -47,7 +46,7 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
112 + syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)];
113 + ret = __invoke_syscall(regs, syscall_fn);
114 + } else {
115 +- ret = do_ni_syscall(regs);
116 ++ ret = do_ni_syscall(regs, scno);
117 + }
118 +
119 + regs->regs[0] = ret;
120 +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
121 +index abb92c341693..807d06a7acac 100644
122 +--- a/arch/x86/kernel/cpu/bugs.c
123 ++++ b/arch/x86/kernel/cpu/bugs.c
124 +@@ -213,7 +213,7 @@ static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
125 + static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
126 + SPECTRE_V2_USER_NONE;
127 +
128 +-#ifdef RETPOLINE
129 ++#ifdef CONFIG_RETPOLINE
130 + static bool spectre_v2_bad_module;
131 +
132 + bool retpoline_module_ok(bool has_retpoline)
133 +diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
134 +index e938576e58cb..e48eebc27b81 100644
135 +--- a/drivers/acpi/arm64/iort.c
136 ++++ b/drivers/acpi/arm64/iort.c
137 +@@ -951,9 +951,10 @@ static int rc_dma_get_range(struct device *dev, u64 *size)
138 + {
139 + struct acpi_iort_node *node;
140 + struct acpi_iort_root_complex *rc;
141 ++ struct pci_bus *pbus = to_pci_dev(dev)->bus;
142 +
143 + node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
144 +- iort_match_node_callback, dev);
145 ++ iort_match_node_callback, &pbus->dev);
146 + if (!node || node->revision < 1)
147 + return -ENODEV;
148 +
149 +diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
150 +index 316e55174aa9..bb5391f59b8b 100644
151 +--- a/drivers/acpi/pmic/intel_pmic_xpower.c
152 ++++ b/drivers/acpi/pmic/intel_pmic_xpower.c
153 +@@ -27,8 +27,11 @@
154 + #define GPI1_LDO_ON (3 << 0)
155 + #define GPI1_LDO_OFF (4 << 0)
156 +
157 +-#define AXP288_ADC_TS_PIN_GPADC 0xf2
158 +-#define AXP288_ADC_TS_PIN_ON 0xf3
159 ++#define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0)
160 ++#define AXP288_ADC_TS_CURRENT_OFF (0 << 0)
161 ++#define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0)
162 ++#define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0)
163 ++#define AXP288_ADC_TS_CURRENT_ON (3 << 0)
164 +
165 + static struct pmic_table power_table[] = {
166 + {
167 +@@ -211,22 +214,44 @@ static int intel_xpower_pmic_update_power(struct regmap *regmap, int reg,
168 + */
169 + static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg)
170 + {
171 ++ int ret, adc_ts_pin_ctrl;
172 + u8 buf[2];
173 +- int ret;
174 +
175 +- ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL,
176 +- AXP288_ADC_TS_PIN_GPADC);
177 ++ /*
178 ++ * The current-source used for the battery temp-sensor (TS) is shared
179 ++ * with the GPADC. For proper fuel-gauge and charger operation the TS
180 ++ * current-source needs to be permanently on. But to read the GPADC we
181 ++ * need to temporary switch the TS current-source to ondemand, so that
182 ++ * the GPADC can use it, otherwise we will always read an all 0 value.
183 ++ *
184 ++ * Note that the switching from on to on-ondemand is not necessary
185 ++ * when the TS current-source is off (this happens on devices which
186 ++ * do not use the TS-pin).
187 ++ */
188 ++ ret = regmap_read(regmap, AXP288_ADC_TS_PIN_CTRL, &adc_ts_pin_ctrl);
189 + if (ret)
190 + return ret;
191 +
192 +- /* After switching to the GPADC pin give things some time to settle */
193 +- usleep_range(6000, 10000);
194 ++ if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) {
195 ++ ret = regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL,
196 ++ AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
197 ++ AXP288_ADC_TS_CURRENT_ON_ONDEMAND);
198 ++ if (ret)
199 ++ return ret;
200 ++
201 ++ /* Wait a bit after switching the current-source */
202 ++ usleep_range(6000, 10000);
203 ++ }
204 +
205 + ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2);
206 + if (ret == 0)
207 + ret = (buf[0] << 4) + ((buf[1] >> 4) & 0x0f);
208 +
209 +- regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON);
210 ++ if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) {
211 ++ regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL,
212 ++ AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
213 ++ AXP288_ADC_TS_CURRENT_ON);
214 ++ }
215 +
216 + return ret;
217 + }
218 +diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
219 +index 1b475bc1ae16..665e93ca0b40 100644
220 +--- a/drivers/acpi/power.c
221 ++++ b/drivers/acpi/power.c
222 +@@ -131,6 +131,23 @@ void acpi_power_resources_list_free(struct list_head *list)
223 + }
224 + }
225 +
226 ++static bool acpi_power_resource_is_dup(union acpi_object *package,
227 ++ unsigned int start, unsigned int i)
228 ++{
229 ++ acpi_handle rhandle, dup;
230 ++ unsigned int j;
231 ++
232 ++ /* The caller is expected to check the package element types */
233 ++ rhandle = package->package.elements[i].reference.handle;
234 ++ for (j = start; j < i; j++) {
235 ++ dup = package->package.elements[j].reference.handle;
236 ++ if (dup == rhandle)
237 ++ return true;
238 ++ }
239 ++
240 ++ return false;
241 ++}
242 ++
243 + int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
244 + struct list_head *list)
245 + {
246 +@@ -150,6 +167,11 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
247 + err = -ENODEV;
248 + break;
249 + }
250 ++
251 ++ /* Some ACPI tables contain duplicate power resource references */
252 ++ if (acpi_power_resource_is_dup(package, start, i))
253 ++ continue;
254 ++
255 + err = acpi_add_power_resource(rhandle);
256 + if (err)
257 + break;
258 +diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
259 +index 73ed5f3a862d..585378bc988c 100644
260 +--- a/drivers/block/rbd.c
261 ++++ b/drivers/block/rbd.c
262 +@@ -5982,7 +5982,6 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
263 + struct list_head *tmp;
264 + int dev_id;
265 + char opt_buf[6];
266 +- bool already = false;
267 + bool force = false;
268 + int ret;
269 +
270 +@@ -6015,13 +6014,13 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
271 + spin_lock_irq(&rbd_dev->lock);
272 + if (rbd_dev->open_count && !force)
273 + ret = -EBUSY;
274 +- else
275 +- already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
276 +- &rbd_dev->flags);
277 ++ else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING,
278 ++ &rbd_dev->flags))
279 ++ ret = -EINPROGRESS;
280 + spin_unlock_irq(&rbd_dev->lock);
281 + }
282 + spin_unlock(&rbd_dev_list_lock);
283 +- if (ret < 0 || already)
284 ++ if (ret)
285 + return ret;
286 +
287 + if (force) {
288 +diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
289 +index 50b1551ba894..3f0693439486 100644
290 +--- a/drivers/cpufreq/scmi-cpufreq.c
291 ++++ b/drivers/cpufreq/scmi-cpufreq.c
292 +@@ -52,9 +52,9 @@ scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
293 + int ret;
294 + struct scmi_data *priv = policy->driver_data;
295 + struct scmi_perf_ops *perf_ops = handle->perf_ops;
296 +- u64 freq = policy->freq_table[index].frequency * 1000;
297 ++ u64 freq = policy->freq_table[index].frequency;
298 +
299 +- ret = perf_ops->freq_set(handle, priv->domain_id, freq, false);
300 ++ ret = perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false);
301 + if (!ret)
302 + arch_set_freq_scale(policy->related_cpus, freq,
303 + policy->cpuinfo.max_freq);
304 +diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
305 +index cb88528e7b10..e44e567bd789 100644
306 +--- a/drivers/gpu/drm/Kconfig
307 ++++ b/drivers/gpu/drm/Kconfig
308 +@@ -110,6 +110,26 @@ config DRM_FBDEV_OVERALLOC
309 + is 100. Typical values for double buffering will be 200,
310 + triple buffering 300.
311 +
312 ++config DRM_FBDEV_LEAK_PHYS_SMEM
313 ++ bool "Shamelessly allow leaking of fbdev physical address (DANGEROUS)"
314 ++ depends on DRM_FBDEV_EMULATION && EXPERT
315 ++ default n
316 ++ help
317 ++ In order to keep user-space compatibility, we want in certain
318 ++ use-cases to keep leaking the fbdev physical address to the
319 ++ user-space program handling the fbdev buffer.
320 ++ This affects, not only, Amlogic, Allwinner or Rockchip devices
321 ++ with ARM Mali GPUs using an userspace Blob.
322 ++ This option is not supported by upstream developers and should be
323 ++ removed as soon as possible and be considered as a broken and
324 ++ legacy behaviour from a modern fbdev device driver.
325 ++
326 ++ Please send any bug reports when using this to your proprietary
327 ++ software vendor that requires this.
328 ++
329 ++ If in doubt, say "N" or spread the word to your closed source
330 ++ library vendor.
331 ++
332 + config DRM_LOAD_EDID_FIRMWARE
333 + bool "Allow to specify an EDID data set instead of probing for it"
334 + depends on DRM
335 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
336 +index 8e26e1ca14c6..b40e9c76af0c 100644
337 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
338 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
339 +@@ -753,6 +753,7 @@ static const struct pci_device_id pciidlist[] = {
340 + /* VEGAM */
341 + {0x1002, 0x694C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
342 + {0x1002, 0x694E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
343 ++ {0x1002, 0x694F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
344 + /* Vega 10 */
345 + {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
346 + {0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
347 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
348 +index d792735f1365..a851bb07443f 100644
349 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
350 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
351 +@@ -565,22 +565,36 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
352 + {
353 + struct amdgpu_dm_connector *aconnector;
354 + struct drm_connector *connector;
355 ++ struct drm_dp_mst_topology_mgr *mgr;
356 ++ int ret;
357 ++ bool need_hotplug = false;
358 +
359 + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
360 +
361 +- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
362 +- aconnector = to_amdgpu_dm_connector(connector);
363 +- if (aconnector->dc_link->type == dc_connection_mst_branch &&
364 +- !aconnector->mst_port) {
365 ++ list_for_each_entry(connector, &dev->mode_config.connector_list,
366 ++ head) {
367 ++ aconnector = to_amdgpu_dm_connector(connector);
368 ++ if (aconnector->dc_link->type != dc_connection_mst_branch ||
369 ++ aconnector->mst_port)
370 ++ continue;
371 +
372 +- if (suspend)
373 +- drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
374 +- else
375 +- drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
376 +- }
377 ++ mgr = &aconnector->mst_mgr;
378 ++
379 ++ if (suspend) {
380 ++ drm_dp_mst_topology_mgr_suspend(mgr);
381 ++ } else {
382 ++ ret = drm_dp_mst_topology_mgr_resume(mgr);
383 ++ if (ret < 0) {
384 ++ drm_dp_mst_topology_mgr_set_mst(mgr, false);
385 ++ need_hotplug = true;
386 ++ }
387 ++ }
388 + }
389 +
390 + drm_modeset_unlock(&dev->mode_config.connection_mutex);
391 ++
392 ++ if (need_hotplug)
393 ++ drm_kms_helper_hotplug_event(dev);
394 + }
395 +
396 + static int dm_hw_init(void *handle)
397 +@@ -736,7 +750,6 @@ static int dm_resume(void *handle)
398 + struct drm_plane_state *new_plane_state;
399 + struct dm_plane_state *dm_new_plane_state;
400 + enum dc_connection_type new_connection_type = dc_connection_none;
401 +- int ret;
402 + int i;
403 +
404 + /* power on hardware */
405 +@@ -809,13 +822,13 @@ static int dm_resume(void *handle)
406 + }
407 + }
408 +
409 +- ret = drm_atomic_helper_resume(ddev, dm->cached_state);
410 ++ drm_atomic_helper_resume(ddev, dm->cached_state);
411 +
412 + dm->cached_state = NULL;
413 +
414 + amdgpu_dm_irq_resume_late(adev);
415 +
416 +- return ret;
417 ++ return 0;
418 + }
419 +
420 + static const struct amd_ip_funcs amdgpu_dm_funcs = {
421 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
422 +index fced3c1c2ef5..7c89785fd731 100644
423 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
424 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
425 +@@ -2457,11 +2457,11 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
426 + {
427 + struct dc *core_dc = pipe_ctx->stream->ctx->dc;
428 +
429 ++ core_dc->hwss.blank_stream(pipe_ctx);
430 ++
431 + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
432 + deallocate_mst_payload(pipe_ctx);
433 +
434 +- core_dc->hwss.blank_stream(pipe_ctx);
435 +-
436 + core_dc->hwss.disable_stream(pipe_ctx, option);
437 +
438 + disable_link(pipe_ctx->stream->sink->link, pipe_ctx->stream->signal);
439 +diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
440 +index 9214c8b02484..b5b9f15549c2 100644
441 +--- a/drivers/gpu/drm/drm_fb_helper.c
442 ++++ b/drivers/gpu/drm/drm_fb_helper.c
443 +@@ -56,6 +56,25 @@ MODULE_PARM_DESC(drm_fbdev_overalloc,
444 + "Overallocation of the fbdev buffer (%) [default="
445 + __MODULE_STRING(CONFIG_DRM_FBDEV_OVERALLOC) "]");
446 +
447 ++/*
448 ++ * In order to keep user-space compatibility, we want in certain use-cases
449 ++ * to keep leaking the fbdev physical address to the user-space program
450 ++ * handling the fbdev buffer.
451 ++ * This is a bad habit essentially kept into closed source opengl driver
452 ++ * that should really be moved into open-source upstream projects instead
453 ++ * of using legacy physical addresses in user space to communicate with
454 ++ * other out-of-tree kernel modules.
455 ++ *
456 ++ * This module_param *should* be removed as soon as possible and be
457 ++ * considered as a broken and legacy behaviour from a modern fbdev device.
458 ++ */
459 ++#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
460 ++static bool drm_leak_fbdev_smem = false;
461 ++module_param_unsafe(drm_leak_fbdev_smem, bool, 0600);
462 ++MODULE_PARM_DESC(fbdev_emulation,
463 ++ "Allow unsafe leaking fbdev physical smem address [default=false]");
464 ++#endif
465 ++
466 + static LIST_HEAD(kernel_fb_helper_list);
467 + static DEFINE_MUTEX(kernel_fb_helper_lock);
468 +
469 +@@ -1602,6 +1621,64 @@ static bool drm_fb_pixel_format_equal(const struct fb_var_screeninfo *var_1,
470 + var_1->transp.msb_right == var_2->transp.msb_right;
471 + }
472 +
473 ++static void drm_fb_helper_fill_pixel_fmt(struct fb_var_screeninfo *var,
474 ++ u8 depth)
475 ++{
476 ++ switch (depth) {
477 ++ case 8:
478 ++ var->red.offset = 0;
479 ++ var->green.offset = 0;
480 ++ var->blue.offset = 0;
481 ++ var->red.length = 8; /* 8bit DAC */
482 ++ var->green.length = 8;
483 ++ var->blue.length = 8;
484 ++ var->transp.offset = 0;
485 ++ var->transp.length = 0;
486 ++ break;
487 ++ case 15:
488 ++ var->red.offset = 10;
489 ++ var->green.offset = 5;
490 ++ var->blue.offset = 0;
491 ++ var->red.length = 5;
492 ++ var->green.length = 5;
493 ++ var->blue.length = 5;
494 ++ var->transp.offset = 15;
495 ++ var->transp.length = 1;
496 ++ break;
497 ++ case 16:
498 ++ var->red.offset = 11;
499 ++ var->green.offset = 5;
500 ++ var->blue.offset = 0;
501 ++ var->red.length = 5;
502 ++ var->green.length = 6;
503 ++ var->blue.length = 5;
504 ++ var->transp.offset = 0;
505 ++ break;
506 ++ case 24:
507 ++ var->red.offset = 16;
508 ++ var->green.offset = 8;
509 ++ var->blue.offset = 0;
510 ++ var->red.length = 8;
511 ++ var->green.length = 8;
512 ++ var->blue.length = 8;
513 ++ var->transp.offset = 0;
514 ++ var->transp.length = 0;
515 ++ break;
516 ++ case 32:
517 ++ var->red.offset = 16;
518 ++ var->green.offset = 8;
519 ++ var->blue.offset = 0;
520 ++ var->red.length = 8;
521 ++ var->green.length = 8;
522 ++ var->blue.length = 8;
523 ++ var->transp.offset = 24;
524 ++ var->transp.length = 8;
525 ++ break;
526 ++ default:
527 ++ break;
528 ++ }
529 ++}
530 ++
531 + /**
532 + * drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var
533 + * @var: screeninfo to check
534 +@@ -1631,6 +1708,20 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
535 + return -EINVAL;
536 + }
537 +
538 ++ /*
539 ++ * Workaround for SDL 1.2, which is known to be setting all pixel format
540 ++ * fields values to zero in some cases. We treat this situation as a
541 ++ * kind of "use some reasonable autodetected values".
542 ++ */
543 ++ if (!var->red.offset && !var->green.offset &&
544 ++ !var->blue.offset && !var->transp.offset &&
545 ++ !var->red.length && !var->green.length &&
546 ++ !var->blue.length && !var->transp.length &&
547 ++ !var->red.msb_right && !var->green.msb_right &&
548 ++ !var->blue.msb_right && !var->transp.msb_right) {
549 ++ drm_fb_helper_fill_pixel_fmt(var, fb->format->depth);
550 ++ }
551 ++
552 + /*
553 + * drm fbdev emulation doesn't support changing the pixel format at all,
554 + * so reject all pixel format changing requests.
555 +@@ -1942,59 +2033,7 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe
556 + info->var.yoffset = 0;
557 + info->var.activate = FB_ACTIVATE_NOW;
558 +
559 +- switch (fb->format->depth) {
560 +- case 8:
561 +- info->var.red.offset = 0;
562 +- info->var.green.offset = 0;
563 +- info->var.blue.offset = 0;
564 +- info->var.red.length = 8; /* 8bit DAC */
565 +- info->var.green.length = 8;
566 +- info->var.blue.length = 8;
567 +- info->var.transp.offset = 0;
568 +- info->var.transp.length = 0;
569 +- break;
570 +- case 15:
571 +- info->var.red.offset = 10;
572 +- info->var.green.offset = 5;
573 +- info->var.blue.offset = 0;
574 +- info->var.red.length = 5;
575 +- info->var.green.length = 5;
576 +- info->var.blue.length = 5;
577 +- info->var.transp.offset = 15;
578 +- info->var.transp.length = 1;
579 +- break;
580 +- case 16:
581 +- info->var.red.offset = 11;
582 +- info->var.green.offset = 5;
583 +- info->var.blue.offset = 0;
584 +- info->var.red.length = 5;
585 +- info->var.green.length = 6;
586 +- info->var.blue.length = 5;
587 +- info->var.transp.offset = 0;
588 +- break;
589 +- case 24:
590 +- info->var.red.offset = 16;
591 +- info->var.green.offset = 8;
592 +- info->var.blue.offset = 0;
593 +- info->var.red.length = 8;
594 +- info->var.green.length = 8;
595 +- info->var.blue.length = 8;
596 +- info->var.transp.offset = 0;
597 +- info->var.transp.length = 0;
598 +- break;
599 +- case 32:
600 +- info->var.red.offset = 16;
601 +- info->var.green.offset = 8;
602 +- info->var.blue.offset = 0;
603 +- info->var.red.length = 8;
604 +- info->var.green.length = 8;
605 +- info->var.blue.length = 8;
606 +- info->var.transp.offset = 24;
607 +- info->var.transp.length = 8;
608 +- break;
609 +- default:
610 +- break;
611 +- }
612 ++ drm_fb_helper_fill_pixel_fmt(&info->var, fb->format->depth);
613 +
614 + info->var.xres = fb_width;
615 + info->var.yres = fb_height;
616 +@@ -3041,6 +3080,12 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
617 + fbi->screen_size = fb->height * fb->pitches[0];
618 + fbi->fix.smem_len = fbi->screen_size;
619 + fbi->screen_buffer = buffer->vaddr;
620 ++ /* Shamelessly leak the physical address to user-space */
621 ++#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
622 ++ if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0)
623 ++ fbi->fix.smem_start =
624 ++ page_to_phys(virt_to_page(fbi->screen_buffer));
625 ++#endif
626 + strcpy(fbi->fix.id, "DRM emulated");
627 +
628 + drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
629 +diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
630 +index 5f57f4e1fbc8..87411a5aba77 100644
631 +--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
632 ++++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
633 +@@ -2128,6 +2128,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
634 + int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
635 + {
636 + struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
637 ++ int err;
638 +
639 + /*
640 + * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
641 +@@ -2143,9 +2144,17 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
642 + * allocator works in address space sizes, so it's multiplied by page
643 + * size. We allocate at the top of the GTT to avoid fragmentation.
644 + */
645 +- return i915_vma_pin(ppgtt->vma,
646 +- 0, GEN6_PD_ALIGN,
647 +- PIN_GLOBAL | PIN_HIGH);
648 ++ err = i915_vma_pin(ppgtt->vma,
649 ++ 0, GEN6_PD_ALIGN,
650 ++ PIN_GLOBAL | PIN_HIGH);
651 ++ if (err)
652 ++ goto unpin;
653 ++
654 ++ return 0;
655 ++
656 ++unpin:
657 ++ ppgtt->pin_count = 0;
658 ++ return err;
659 + }
660 +
661 + void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
662 +diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
663 +index 1aca742fde4a..ccd76c71af09 100644
664 +--- a/drivers/i2c/i2c-dev.c
665 ++++ b/drivers/i2c/i2c-dev.c
666 +@@ -470,9 +470,15 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
667 + data_arg.data);
668 + }
669 + case I2C_RETRIES:
670 ++ if (arg > INT_MAX)
671 ++ return -EINVAL;
672 ++
673 + client->adapter->retries = arg;
674 + break;
675 + case I2C_TIMEOUT:
676 ++ if (arg > INT_MAX)
677 ++ return -EINVAL;
678 ++
679 + /* For historical reasons, user-space sets the timeout
680 + * value in units of 10 ms.
681 + */
682 +diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
683 +index 8815f3e2b718..880e75f63a19 100644
684 +--- a/drivers/mtd/nand/raw/qcom_nandc.c
685 ++++ b/drivers/mtd/nand/raw/qcom_nandc.c
686 +@@ -2839,6 +2839,16 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
687 + if (ret)
688 + return ret;
689 +
690 ++ if (nandc->props->is_bam) {
691 ++ free_bam_transaction(nandc);
692 ++ nandc->bam_txn = alloc_bam_transaction(nandc);
693 ++ if (!nandc->bam_txn) {
694 ++ dev_err(nandc->dev,
695 ++ "failed to allocate bam transaction\n");
696 ++ return -ENOMEM;
697 ++ }
698 ++ }
699 ++
700 + ret = mtd_device_register(mtd, NULL, 0);
701 + if (ret)
702 + nand_cleanup(chip);
703 +@@ -2853,16 +2863,6 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
704 + struct qcom_nand_host *host;
705 + int ret;
706 +
707 +- if (nandc->props->is_bam) {
708 +- free_bam_transaction(nandc);
709 +- nandc->bam_txn = alloc_bam_transaction(nandc);
710 +- if (!nandc->bam_txn) {
711 +- dev_err(nandc->dev,
712 +- "failed to allocate bam transaction\n");
713 +- return -ENOMEM;
714 +- }
715 +- }
716 +-
717 + for_each_available_child_of_node(dn, child) {
718 + host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
719 + if (!host) {
720 +diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
721 +index 29a05759a294..0fa9e8fdce66 100644
722 +--- a/drivers/pci/controller/dwc/pcie-designware-host.c
723 ++++ b/drivers/pci/controller/dwc/pcie-designware-host.c
724 +@@ -99,9 +99,6 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
725 + (i * MAX_MSI_IRQS_PER_CTRL) +
726 + pos);
727 + generic_handle_irq(irq);
728 +- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS +
729 +- (i * MSI_REG_CTRL_BLOCK_SIZE),
730 +- 4, 1 << pos);
731 + pos++;
732 + }
733 + }
734 +@@ -168,8 +165,8 @@ static void dw_pci_bottom_mask(struct irq_data *data)
735 + bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
736 +
737 + pp->irq_status[ctrl] &= ~(1 << bit);
738 +- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
739 +- pp->irq_status[ctrl]);
740 ++ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
741 ++ ~pp->irq_status[ctrl]);
742 + }
743 +
744 + raw_spin_unlock_irqrestore(&pp->lock, flags);
745 +@@ -191,8 +188,8 @@ static void dw_pci_bottom_unmask(struct irq_data *data)
746 + bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
747 +
748 + pp->irq_status[ctrl] |= 1 << bit;
749 +- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
750 +- pp->irq_status[ctrl]);
751 ++ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
752 ++ ~pp->irq_status[ctrl]);
753 + }
754 +
755 + raw_spin_unlock_irqrestore(&pp->lock, flags);
756 +@@ -200,13 +197,22 @@ static void dw_pci_bottom_unmask(struct irq_data *data)
757 +
758 + static void dw_pci_bottom_ack(struct irq_data *d)
759 + {
760 +- struct msi_desc *msi = irq_data_get_msi_desc(d);
761 +- struct pcie_port *pp;
762 ++ struct pcie_port *pp = irq_data_get_irq_chip_data(d);
763 ++ unsigned int res, bit, ctrl;
764 ++ unsigned long flags;
765 ++
766 ++ ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
767 ++ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
768 ++ bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
769 ++
770 ++ raw_spin_lock_irqsave(&pp->lock, flags);
771 +
772 +- pp = msi_desc_to_pci_sysdata(msi);
773 ++ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, 1 << bit);
774 +
775 + if (pp->ops->msi_irq_ack)
776 + pp->ops->msi_irq_ack(d->hwirq, pp);
777 ++
778 ++ raw_spin_unlock_irqrestore(&pp->lock, flags);
779 + }
780 +
781 + static struct irq_chip dw_pci_msi_bottom_irq_chip = {
782 +@@ -658,10 +664,15 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
783 + num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
784 +
785 + /* Initialize IRQ Status array */
786 +- for (ctrl = 0; ctrl < num_ctrls; ctrl++)
787 +- dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
788 ++ for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
789 ++ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK +
790 + (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
791 +- 4, &pp->irq_status[ctrl]);
792 ++ 4, ~0);
793 ++ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
794 ++ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
795 ++ 4, ~0);
796 ++ pp->irq_status[ctrl] = 0;
797 ++ }
798 +
799 + /* Setup RC BARs */
800 + dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
801 +diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
802 +index 2a48b09ea9ae..470ea2c0c433 100644
803 +--- a/drivers/staging/rtl8188eu/core/rtw_security.c
804 ++++ b/drivers/staging/rtl8188eu/core/rtw_security.c
805 +@@ -154,7 +154,7 @@ void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe)
806 +
807 + pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset;
808 +
809 +- crypto_ops = try_then_request_module(lib80211_get_crypto_ops("WEP"), "lib80211_crypt_wep");
810 ++ crypto_ops = lib80211_get_crypto_ops("WEP");
811 +
812 + if (!crypto_ops)
813 + return;
814 +@@ -210,7 +210,7 @@ int rtw_wep_decrypt(struct adapter *padapter, u8 *precvframe)
815 + void *crypto_private = NULL;
816 + int status = _SUCCESS;
817 + const int keyindex = prxattrib->key_index;
818 +- struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("WEP"), "lib80211_crypt_wep");
819 ++ struct lib80211_crypto_ops *crypto_ops = lib80211_get_crypto_ops("WEP");
820 + char iv[4], icv[4];
821 +
822 + if (!crypto_ops) {
823 +@@ -1292,7 +1292,7 @@ u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe)
824 + struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt;
825 + void *crypto_private = NULL;
826 + u8 *key, *pframe = skb->data;
827 +- struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("CCMP"), "lib80211_crypt_ccmp");
828 ++ struct lib80211_crypto_ops *crypto_ops = lib80211_get_crypto_ops("CCMP");
829 + struct security_priv *psecuritypriv = &padapter->securitypriv;
830 + char iv[8], icv[8];
831 +
832 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
833 +index 2886b7b477c7..08b8aa5299b5 100644
834 +--- a/drivers/usb/class/cdc-acm.c
835 ++++ b/drivers/usb/class/cdc-acm.c
836 +@@ -1880,6 +1880,13 @@ static const struct usb_device_id acm_ids[] = {
837 + .driver_info = IGNORE_DEVICE,
838 + },
839 +
840 ++ { USB_DEVICE(0x1bc7, 0x0021), /* Telit 3G ACM only composition */
841 ++ .driver_info = SEND_ZERO_PACKET,
842 ++ },
843 ++ { USB_DEVICE(0x1bc7, 0x0023), /* Telit 3G ACM + ECM composition */
844 ++ .driver_info = SEND_ZERO_PACKET,
845 ++ },
846 ++
847 + /* control interfaces without any protocol set */
848 + { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
849 + USB_CDC_PROTO_NONE) },
850 +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
851 +index 514c5214ddb2..8bc35d53408b 100644
852 +--- a/drivers/usb/core/quirks.c
853 ++++ b/drivers/usb/core/quirks.c
854 +@@ -394,7 +394,8 @@ static const struct usb_device_id usb_quirk_list[] = {
855 + { USB_DEVICE(0x1a40, 0x0101), .driver_info = USB_QUIRK_HUB_SLOW_RESET },
856 +
857 + /* Corsair K70 RGB */
858 +- { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
859 ++ { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT |
860 ++ USB_QUIRK_DELAY_CTRL_MSG },
861 +
862 + /* Corsair Strafe */
863 + { USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT |
864 +diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
865 +index e227bb5b794f..101ebac43c87 100644
866 +--- a/drivers/usb/storage/scsiglue.c
867 ++++ b/drivers/usb/storage/scsiglue.c
868 +@@ -235,8 +235,12 @@ static int slave_configure(struct scsi_device *sdev)
869 + if (!(us->fflags & US_FL_NEEDS_CAP16))
870 + sdev->try_rc_10_first = 1;
871 +
872 +- /* assume SPC3 or latter devices support sense size > 18 */
873 +- if (sdev->scsi_level > SCSI_SPC_2)
874 ++ /*
875 ++ * assume SPC3 or latter devices support sense size > 18
876 ++ * unless US_FL_BAD_SENSE quirk is specified.
877 ++ */
878 ++ if (sdev->scsi_level > SCSI_SPC_2 &&
879 ++ !(us->fflags & US_FL_BAD_SENSE))
880 + us->fflags |= US_FL_SANE_SENSE;
881 +
882 + /*
883 +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
884 +index f7f83b21dc74..ea0d27a94afe 100644
885 +--- a/drivers/usb/storage/unusual_devs.h
886 ++++ b/drivers/usb/storage/unusual_devs.h
887 +@@ -1265,6 +1265,18 @@ UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff,
888 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
889 + US_FL_FIX_CAPACITY ),
890 +
891 ++/*
892 ++ * Reported by Icenowy Zheng <icenowy@××××.io>
893 ++ * The SMI SM3350 USB-UFS bridge controller will enter a wrong state
894 ++ * that do not process read/write command if a long sense is requested,
895 ++ * so force to use 18-byte sense.
896 ++ */
897 ++UNUSUAL_DEV( 0x090c, 0x3350, 0x0000, 0xffff,
898 ++ "SMI",
899 ++ "SM3350 UFS-to-USB-Mass-Storage bridge",
900 ++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
901 ++ US_FL_BAD_SENSE ),
902 ++
903 + /*
904 + * Reported by Paul Hartman <paul.hartman+linux@×××××.com>
905 + * This card reader returns "Illegal Request, Logical Block Address
906 +diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
907 +index d9fd3188615d..64cbc2d007c9 100644
908 +--- a/drivers/vfio/vfio_iommu_type1.c
909 ++++ b/drivers/vfio/vfio_iommu_type1.c
910 +@@ -878,7 +878,7 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
911 + return -EINVAL;
912 + if (!unmap->size || unmap->size & mask)
913 + return -EINVAL;
914 +- if (unmap->iova + unmap->size < unmap->iova ||
915 ++ if (unmap->iova + unmap->size - 1 < unmap->iova ||
916 + unmap->size > SIZE_MAX)
917 + return -EINVAL;
918 +
919 +diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
920 +index fa18520529f3..7ad6f2eec711 100644
921 +--- a/fs/btrfs/ctree.c
922 ++++ b/fs/btrfs/ctree.c
923 +@@ -1051,19 +1051,21 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
924 + parent_start = parent->start;
925 +
926 + /*
927 +- * If we are COWing a node/leaf from the extent, chunk or device trees,
928 +- * make sure that we do not finish block group creation of pending block
929 +- * groups. We do this to avoid a deadlock.
930 ++ * If we are COWing a node/leaf from the extent, chunk, device or free
931 ++ * space trees, make sure that we do not finish block group creation of
932 ++ * pending block groups. We do this to avoid a deadlock.
933 + * COWing can result in allocation of a new chunk, and flushing pending
934 + * block groups (btrfs_create_pending_block_groups()) can be triggered
935 + * when finishing allocation of a new chunk. Creation of a pending block
936 +- * group modifies the extent, chunk and device trees, therefore we could
937 +- * deadlock with ourselves since we are holding a lock on an extent
938 +- * buffer that btrfs_create_pending_block_groups() may try to COW later.
939 ++ * group modifies the extent, chunk, device and free space trees,
940 ++ * therefore we could deadlock with ourselves since we are holding a
941 ++ * lock on an extent buffer that btrfs_create_pending_block_groups() may
942 ++ * try to COW later.
943 + */
944 + if (root == fs_info->extent_root ||
945 + root == fs_info->chunk_root ||
946 +- root == fs_info->dev_root)
947 ++ root == fs_info->dev_root ||
948 ++ root == fs_info->free_space_root)
949 + trans->can_flush_pending_bgs = false;
950 +
951 + cow = btrfs_alloc_tree_block(trans, root, parent_start,
952 +diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
953 +index ff434663d65b..e1fcb28ad4cc 100644
954 +--- a/fs/btrfs/qgroup.c
955 ++++ b/fs/btrfs/qgroup.c
956 +@@ -1013,16 +1013,22 @@ out_add_root:
957 + btrfs_abort_transaction(trans, ret);
958 + goto out_free_path;
959 + }
960 +- spin_lock(&fs_info->qgroup_lock);
961 +- fs_info->quota_root = quota_root;
962 +- set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
963 +- spin_unlock(&fs_info->qgroup_lock);
964 +
965 + ret = btrfs_commit_transaction(trans);
966 + trans = NULL;
967 + if (ret)
968 + goto out_free_path;
969 +
970 ++ /*
971 ++ * Set quota enabled flag after committing the transaction, to avoid
972 ++ * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot
973 ++ * creation.
974 ++ */
975 ++ spin_lock(&fs_info->qgroup_lock);
976 ++ fs_info->quota_root = quota_root;
977 ++ set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
978 ++ spin_unlock(&fs_info->qgroup_lock);
979 ++
980 + ret = qgroup_rescan_init(fs_info, 0, 1);
981 + if (!ret) {
982 + qgroup_rescan_zero_tracking(fs_info);
983 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
984 +index f4405e430da6..223334f08530 100644
985 +--- a/fs/btrfs/volumes.c
986 ++++ b/fs/btrfs/volumes.c
987 +@@ -3712,6 +3712,7 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
988 + int ret;
989 + u64 num_devices;
990 + unsigned seq;
991 ++ bool reducing_integrity;
992 +
993 + if (btrfs_fs_closing(fs_info) ||
994 + atomic_read(&fs_info->balance_pause_req) ||
995 +@@ -3796,24 +3797,30 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
996 + !(bctl->sys.target & allowed)) ||
997 + ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
998 + (fs_info->avail_metadata_alloc_bits & allowed) &&
999 +- !(bctl->meta.target & allowed))) {
1000 +- if (bctl->flags & BTRFS_BALANCE_FORCE) {
1001 +- btrfs_info(fs_info,
1002 +- "balance: force reducing metadata integrity");
1003 +- } else {
1004 +- btrfs_err(fs_info,
1005 +- "balance: reduces metadata integrity, use --force if you want this");
1006 +- ret = -EINVAL;
1007 +- goto out;
1008 +- }
1009 +- }
1010 ++ !(bctl->meta.target & allowed)))
1011 ++ reducing_integrity = true;
1012 ++ else
1013 ++ reducing_integrity = false;
1014 ++
1015 ++ /* if we're not converting, the target field is uninitialized */
1016 ++ meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
1017 ++ bctl->meta.target : fs_info->avail_metadata_alloc_bits;
1018 ++ data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
1019 ++ bctl->data.target : fs_info->avail_data_alloc_bits;
1020 + } while (read_seqretry(&fs_info->profiles_lock, seq));
1021 +
1022 +- /* if we're not converting, the target field is uninitialized */
1023 +- meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
1024 +- bctl->meta.target : fs_info->avail_metadata_alloc_bits;
1025 +- data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
1026 +- bctl->data.target : fs_info->avail_data_alloc_bits;
1027 ++ if (reducing_integrity) {
1028 ++ if (bctl->flags & BTRFS_BALANCE_FORCE) {
1029 ++ btrfs_info(fs_info,
1030 ++ "balance: force reducing metadata integrity");
1031 ++ } else {
1032 ++ btrfs_err(fs_info,
1033 ++ "balance: reduces metadata integrity, use --force if you want this");
1034 ++ ret = -EINVAL;
1035 ++ goto out;
1036 ++ }
1037 ++ }
1038 ++
1039 + if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
1040 + btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
1041 + int meta_index = btrfs_bg_flags_to_raid_index(meta_target);
1042 +diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
1043 +index ea78c3d6dcfc..f141b45ce349 100644
1044 +--- a/fs/btrfs/xattr.c
1045 ++++ b/fs/btrfs/xattr.c
1046 +@@ -11,6 +11,7 @@
1047 + #include <linux/security.h>
1048 + #include <linux/posix_acl_xattr.h>
1049 + #include <linux/iversion.h>
1050 ++#include <linux/sched/mm.h>
1051 + #include "ctree.h"
1052 + #include "btrfs_inode.h"
1053 + #include "transaction.h"
1054 +@@ -422,9 +423,15 @@ static int btrfs_initxattrs(struct inode *inode,
1055 + {
1056 + const struct xattr *xattr;
1057 + struct btrfs_trans_handle *trans = fs_info;
1058 ++ unsigned int nofs_flag;
1059 + char *name;
1060 + int err = 0;
1061 +
1062 ++ /*
1063 ++ * We're holding a transaction handle, so use a NOFS memory allocation
1064 ++ * context to avoid deadlock if reclaim happens.
1065 ++ */
1066 ++ nofs_flag = memalloc_nofs_save();
1067 + for (xattr = xattr_array; xattr->name != NULL; xattr++) {
1068 + name = kmalloc(XATTR_SECURITY_PREFIX_LEN +
1069 + strlen(xattr->name) + 1, GFP_KERNEL);
1070 +@@ -440,6 +447,7 @@ static int btrfs_initxattrs(struct inode *inode,
1071 + if (err < 0)
1072 + break;
1073 + }
1074 ++ memalloc_nofs_restore(nofs_flag);
1075 + return err;
1076 + }
1077 +
1078 +diff --git a/fs/cifs/file.c b/fs/cifs/file.c
1079 +index 8d41ca7bfcf1..7b637fc27990 100644
1080 +--- a/fs/cifs/file.c
1081 ++++ b/fs/cifs/file.c
1082 +@@ -1120,10 +1120,10 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1083 +
1084 + /*
1085 + * Accessing maxBuf is racy with cifs_reconnect - need to store value
1086 +- * and check it for zero before using.
1087 ++ * and check it before using.
1088 + */
1089 + max_buf = tcon->ses->server->maxBuf;
1090 +- if (!max_buf) {
1091 ++ if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1092 + free_xid(xid);
1093 + return -EINVAL;
1094 + }
1095 +@@ -1460,10 +1460,10 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1096 +
1097 + /*
1098 + * Accessing maxBuf is racy with cifs_reconnect - need to store value
1099 +- * and check it for zero before using.
1100 ++ * and check it before using.
1101 + */
1102 + max_buf = tcon->ses->server->maxBuf;
1103 +- if (!max_buf)
1104 ++ if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
1105 + return -EINVAL;
1106 +
1107 + max_num = (max_buf - sizeof(struct smb_hdr)) /
1108 +diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
1109 +index 4ed10dd086e6..2fc3d31967ee 100644
1110 +--- a/fs/cifs/smb2file.c
1111 ++++ b/fs/cifs/smb2file.c
1112 +@@ -122,10 +122,10 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1113 +
1114 + /*
1115 + * Accessing maxBuf is racy with cifs_reconnect - need to store value
1116 +- * and check it for zero before using.
1117 ++ * and check it before using.
1118 + */
1119 + max_buf = tcon->ses->server->maxBuf;
1120 +- if (!max_buf)
1121 ++ if (max_buf < sizeof(struct smb2_lock_element))
1122 + return -EINVAL;
1123 +
1124 + max_num = max_buf / sizeof(struct smb2_lock_element);
1125 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
1126 +index f54d07bda067..dba986524917 100644
1127 +--- a/fs/cifs/smb2pdu.c
1128 ++++ b/fs/cifs/smb2pdu.c
1129 +@@ -3185,12 +3185,14 @@ smb2_async_readv(struct cifs_readdata *rdata)
1130 + if (rdata->credits) {
1131 + shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
1132 + SMB2_MAX_BUFFER_SIZE));
1133 +- shdr->CreditRequest = shdr->CreditCharge;
1134 ++ shdr->CreditRequest =
1135 ++ cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1);
1136 + spin_lock(&server->req_lock);
1137 + server->credits += rdata->credits -
1138 + le16_to_cpu(shdr->CreditCharge);
1139 + spin_unlock(&server->req_lock);
1140 + wake_up(&server->request_q);
1141 ++ rdata->credits = le16_to_cpu(shdr->CreditCharge);
1142 + flags |= CIFS_HAS_CREDITS;
1143 + }
1144 +
1145 +@@ -3462,12 +3464,14 @@ smb2_async_writev(struct cifs_writedata *wdata,
1146 + if (wdata->credits) {
1147 + shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
1148 + SMB2_MAX_BUFFER_SIZE));
1149 +- shdr->CreditRequest = shdr->CreditCharge;
1150 ++ shdr->CreditRequest =
1151 ++ cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1);
1152 + spin_lock(&server->req_lock);
1153 + server->credits += wdata->credits -
1154 + le16_to_cpu(shdr->CreditCharge);
1155 + spin_unlock(&server->req_lock);
1156 + wake_up(&server->request_q);
1157 ++ wdata->credits = le16_to_cpu(shdr->CreditCharge);
1158 + flags |= CIFS_HAS_CREDITS;
1159 + }
1160 +
1161 +diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
1162 +index 333729cf46cd..66348b3d28e6 100644
1163 +--- a/fs/cifs/transport.c
1164 ++++ b/fs/cifs/transport.c
1165 +@@ -378,7 +378,7 @@ smbd_done:
1166 + if (rc < 0 && rc != -EINTR)
1167 + cifs_dbg(VFS, "Error %d sending data on socket to server\n",
1168 + rc);
1169 +- else
1170 ++ else if (rc > 0)
1171 + rc = 0;
1172 +
1173 + return rc;
1174 +@@ -786,7 +786,8 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1175 + int i, j, rc = 0;
1176 + int timeout, optype;
1177 + struct mid_q_entry *midQ[MAX_COMPOUND];
1178 +- unsigned int credits = 0;
1179 ++ bool cancelled_mid[MAX_COMPOUND] = {false};
1180 ++ unsigned int credits[MAX_COMPOUND] = {0};
1181 + char *buf;
1182 +
1183 + timeout = flags & CIFS_TIMEOUT_MASK;
1184 +@@ -804,13 +805,31 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1185 + return -ENOENT;
1186 +
1187 + /*
1188 +- * Ensure that we do not send more than 50 overlapping requests
1189 +- * to the same server. We may make this configurable later or
1190 +- * use ses->maxReq.
1191 ++ * Ensure we obtain 1 credit per request in the compound chain.
1192 ++ * It can be optimized further by waiting for all the credits
1193 ++ * at once but this can wait long enough if we don't have enough
1194 ++ * credits due to some heavy operations in progress or the server
1195 ++ * not granting us much, so a fallback to the current approach is
1196 ++ * needed anyway.
1197 + */
1198 +- rc = wait_for_free_request(ses->server, timeout, optype);
1199 +- if (rc)
1200 +- return rc;
1201 ++ for (i = 0; i < num_rqst; i++) {
1202 ++ rc = wait_for_free_request(ses->server, timeout, optype);
1203 ++ if (rc) {
1204 ++ /*
1205 ++ * We haven't sent an SMB packet to the server yet but
1206 ++ * we already obtained credits for i requests in the
1207 ++ * compound chain - need to return those credits back
1208 ++ * for future use. Note that we need to call add_credits
1209 ++ * multiple times to match the way we obtained credits
1210 ++ * in the first place and to account for in flight
1211 ++ * requests correctly.
1212 ++ */
1213 ++ for (j = 0; j < i; j++)
1214 ++ add_credits(ses->server, 1, optype);
1215 ++ return rc;
1216 ++ }
1217 ++ credits[i] = 1;
1218 ++ }
1219 +
1220 + /*
1221 + * Make sure that we sign in the same order that we send on this socket
1222 +@@ -826,8 +845,10 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1223 + for (j = 0; j < i; j++)
1224 + cifs_delete_mid(midQ[j]);
1225 + mutex_unlock(&ses->server->srv_mutex);
1226 ++
1227 + /* Update # of requests on wire to server */
1228 +- add_credits(ses->server, 1, optype);
1229 ++ for (j = 0; j < num_rqst; j++)
1230 ++ add_credits(ses->server, credits[j], optype);
1231 + return PTR_ERR(midQ[i]);
1232 + }
1233 +
1234 +@@ -874,19 +895,16 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1235 + if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1236 + midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1237 + midQ[i]->callback = DeleteMidQEntry;
1238 +- spin_unlock(&GlobalMid_Lock);
1239 +- add_credits(ses->server, 1, optype);
1240 +- return rc;
1241 ++ cancelled_mid[i] = true;
1242 + }
1243 + spin_unlock(&GlobalMid_Lock);
1244 + }
1245 + }
1246 +
1247 + for (i = 0; i < num_rqst; i++)
1248 +- if (midQ[i]->resp_buf)
1249 +- credits += ses->server->ops->get_credits(midQ[i]);
1250 +- if (!credits)
1251 +- credits = 1;
1252 ++ if (!cancelled_mid[i] && midQ[i]->resp_buf
1253 ++ && (midQ[i]->mid_state == MID_RESPONSE_RECEIVED))
1254 ++ credits[i] = ses->server->ops->get_credits(midQ[i]);
1255 +
1256 + for (i = 0; i < num_rqst; i++) {
1257 + if (rc < 0)
1258 +@@ -894,8 +912,9 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1259 +
1260 + rc = cifs_sync_mid_result(midQ[i], ses->server);
1261 + if (rc != 0) {
1262 +- add_credits(ses->server, credits, optype);
1263 +- return rc;
1264 ++ /* mark this mid as cancelled to not free it below */
1265 ++ cancelled_mid[i] = true;
1266 ++ goto out;
1267 + }
1268 +
1269 + if (!midQ[i]->resp_buf ||
1270 +@@ -942,9 +961,11 @@ out:
1271 + * This is prevented above by using a noop callback that will not
1272 + * wake this thread except for the very last PDU.
1273 + */
1274 +- for (i = 0; i < num_rqst; i++)
1275 +- cifs_delete_mid(midQ[i]);
1276 +- add_credits(ses->server, credits, optype);
1277 ++ for (i = 0; i < num_rqst; i++) {
1278 ++ if (!cancelled_mid[i])
1279 ++ cifs_delete_mid(midQ[i]);
1280 ++ add_credits(ses->server, credits[i], optype);
1281 ++ }
1282 +
1283 + return rc;
1284 + }
1285 +diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
1286 +index 26a7fe5c4fd3..712f00995390 100644
1287 +--- a/fs/ext4/fsync.c
1288 ++++ b/fs/ext4/fsync.c
1289 +@@ -116,8 +116,16 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1290 + goto out;
1291 + }
1292 +
1293 ++ ret = file_write_and_wait_range(file, start, end);
1294 ++ if (ret)
1295 ++ return ret;
1296 ++
1297 + if (!journal) {
1298 +- ret = __generic_file_fsync(file, start, end, datasync);
1299 ++ struct writeback_control wbc = {
1300 ++ .sync_mode = WB_SYNC_ALL
1301 ++ };
1302 ++
1303 ++ ret = ext4_write_inode(inode, &wbc);
1304 + if (!ret)
1305 + ret = ext4_sync_parent(inode);
1306 + if (test_opt(inode->i_sb, BARRIER))
1307 +@@ -125,9 +133,6 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1308 + goto out;
1309 + }
1310 +
1311 +- ret = file_write_and_wait_range(file, start, end);
1312 +- if (ret)
1313 +- return ret;
1314 + /*
1315 + * data=writeback,ordered:
1316 + * The caller's filemap_fdatawrite()/wait will sync the data.
1317 +@@ -159,6 +164,9 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1318 + ret = err;
1319 + }
1320 + out:
1321 ++ err = file_check_and_advance_wb_err(file);
1322 ++ if (ret == 0)
1323 ++ ret = err;
1324 + trace_ext4_sync_file_exit(inode, ret);
1325 + return ret;
1326 + }
1327 +diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
1328 +index 27373d88b5f0..56f6e1782d5f 100644
1329 +--- a/fs/ext4/inline.c
1330 ++++ b/fs/ext4/inline.c
1331 +@@ -1890,12 +1890,12 @@ int ext4_inline_data_fiemap(struct inode *inode,
1332 + physical += (char *)ext4_raw_inode(&iloc) - iloc.bh->b_data;
1333 + physical += offsetof(struct ext4_inode, i_block);
1334 +
1335 +- if (physical)
1336 +- error = fiemap_fill_next_extent(fieinfo, start, physical,
1337 +- inline_len, flags);
1338 + brelse(iloc.bh);
1339 + out:
1340 + up_read(&EXT4_I(inode)->xattr_sem);
1341 ++ if (physical)
1342 ++ error = fiemap_fill_next_extent(fieinfo, start, physical,
1343 ++ inline_len, flags);
1344 + return (error < 0 ? error : 0);
1345 + }
1346 +
1347 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
1348 +index 36abbdafb26e..2c43c5b92229 100644
1349 +--- a/fs/ext4/inode.c
1350 ++++ b/fs/ext4/inode.c
1351 +@@ -2748,7 +2748,8 @@ static int ext4_writepages(struct address_space *mapping,
1352 + * We may need to convert up to one extent per block in
1353 + * the page and we may dirty the inode.
1354 + */
1355 +- rsv_blocks = 1 + (PAGE_SIZE >> inode->i_blkbits);
1356 ++ rsv_blocks = 1 + ext4_chunk_trans_blocks(inode,
1357 ++ PAGE_SIZE >> inode->i_blkbits);
1358 + }
1359 +
1360 + /*
1361 +@@ -4802,7 +4803,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
1362 + gid_t i_gid;
1363 + projid_t i_projid;
1364 +
1365 +- if (((flags & EXT4_IGET_NORMAL) &&
1366 ++ if ((!(flags & EXT4_IGET_SPECIAL) &&
1367 + (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)) ||
1368 + (ino < EXT4_ROOT_INO) ||
1369 + (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) {
1370 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
1371 +index ee0f30852835..a1cf7d68b4f0 100644
1372 +--- a/fs/ext4/super.c
1373 ++++ b/fs/ext4/super.c
1374 +@@ -4904,7 +4904,7 @@ static int ext4_commit_super(struct super_block *sb, int sync)
1375 + ext4_superblock_csum_set(sb);
1376 + if (sync)
1377 + lock_buffer(sbh);
1378 +- if (buffer_write_io_error(sbh)) {
1379 ++ if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) {
1380 + /*
1381 + * Oh, dear. A previous attempt to write the
1382 + * superblock failed. This could happen because the
1383 +diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
1384 +index 4d36b27214fd..0242f6eec4ea 100644
1385 +--- a/include/linux/compiler-gcc.h
1386 ++++ b/include/linux/compiler-gcc.h
1387 +@@ -75,7 +75,7 @@
1388 + #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
1389 + #endif
1390 +
1391 +-#ifdef RETPOLINE
1392 ++#ifdef CONFIG_RETPOLINE
1393 + #define __noretpoline __attribute__((indirect_branch("keep")))
1394 + #endif
1395 +
1396 +diff --git a/include/linux/module.h b/include/linux/module.h
1397 +index e19ae08c7fb8..904f94628132 100644
1398 +--- a/include/linux/module.h
1399 ++++ b/include/linux/module.h
1400 +@@ -818,7 +818,7 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr,
1401 + static inline void module_bug_cleanup(struct module *mod) {}
1402 + #endif /* CONFIG_GENERIC_BUG */
1403 +
1404 +-#ifdef RETPOLINE
1405 ++#ifdef CONFIG_RETPOLINE
1406 + extern bool retpoline_module_ok(bool has_retpoline);
1407 + #else
1408 + static inline bool retpoline_module_ok(bool has_retpoline)
1409 +diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
1410 +index 73e130a840ce..fdb6b317d974 100644
1411 +--- a/include/linux/sunrpc/svc.h
1412 ++++ b/include/linux/sunrpc/svc.h
1413 +@@ -295,9 +295,12 @@ struct svc_rqst {
1414 + struct svc_cacherep * rq_cacherep; /* cache info */
1415 + struct task_struct *rq_task; /* service thread */
1416 + spinlock_t rq_lock; /* per-request lock */
1417 ++ struct net *rq_bc_net; /* pointer to backchannel's
1418 ++ * net namespace
1419 ++ */
1420 + };
1421 +
1422 +-#define SVC_NET(svc_rqst) (svc_rqst->rq_xprt->xpt_net)
1423 ++#define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
1424 +
1425 + /*
1426 + * Rigorous type checking on sockaddr type conversions
1427 +diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
1428 +index bbb08a3ef5cc..a2644c494a9c 100644
1429 +--- a/include/trace/events/sunrpc.h
1430 ++++ b/include/trace/events/sunrpc.h
1431 +@@ -582,7 +582,8 @@ TRACE_EVENT(svc_process,
1432 + __field(u32, vers)
1433 + __field(u32, proc)
1434 + __string(service, name)
1435 +- __string(addr, rqst->rq_xprt->xpt_remotebuf)
1436 ++ __string(addr, rqst->rq_xprt ?
1437 ++ rqst->rq_xprt->xpt_remotebuf : "(null)")
1438 + ),
1439 +
1440 + TP_fast_assign(
1441 +@@ -590,7 +591,8 @@ TRACE_EVENT(svc_process,
1442 + __entry->vers = rqst->rq_vers;
1443 + __entry->proc = rqst->rq_proc;
1444 + __assign_str(service, name);
1445 +- __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
1446 ++ __assign_str(addr, rqst->rq_xprt ?
1447 ++ rqst->rq_xprt->xpt_remotebuf : "(null)");
1448 + ),
1449 +
1450 + TP_printk("addr=%s xid=0x%08x service=%s vers=%u proc=%u",
1451 +diff --git a/mm/memory.c b/mm/memory.c
1452 +index 5c5df53dbdf9..281172540a9c 100644
1453 +--- a/mm/memory.c
1454 ++++ b/mm/memory.c
1455 +@@ -3237,6 +3237,29 @@ static vm_fault_t __do_fault(struct vm_fault *vmf)
1456 + struct vm_area_struct *vma = vmf->vma;
1457 + vm_fault_t ret;
1458 +
1459 ++ /*
1460 ++ * Preallocate pte before we take page_lock because this might lead to
1461 ++ * deadlocks for memcg reclaim which waits for pages under writeback:
1462 ++ * lock_page(A)
1463 ++ * SetPageWriteback(A)
1464 ++ * unlock_page(A)
1465 ++ * lock_page(B)
1466 ++ * lock_page(B)
1467 ++ * pte_alloc_pne
1468 ++ * shrink_page_list
1469 ++ * wait_on_page_writeback(A)
1470 ++ * SetPageWriteback(B)
1471 ++ * unlock_page(B)
1472 ++ * # flush A, B to clear the writeback
1473 ++ */
1474 ++ if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
1475 ++ vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm,
1476 ++ vmf->address);
1477 ++ if (!vmf->prealloc_pte)
1478 ++ return VM_FAULT_OOM;
1479 ++ smp_wmb(); /* See comment in __pte_alloc() */
1480 ++ }
1481 ++
1482 + ret = vma->vm_ops->fault(vmf);
1483 + if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
1484 + VM_FAULT_DONE_COW)))
1485 +diff --git a/mm/slab.c b/mm/slab.c
1486 +index d73c7a4820a4..fad6839e8eab 100644
1487 +--- a/mm/slab.c
1488 ++++ b/mm/slab.c
1489 +@@ -679,8 +679,10 @@ static struct alien_cache *__alloc_alien_cache(int node, int entries,
1490 + struct alien_cache *alc = NULL;
1491 +
1492 + alc = kmalloc_node(memsize, gfp, node);
1493 +- init_arraycache(&alc->ac, entries, batch);
1494 +- spin_lock_init(&alc->lock);
1495 ++ if (alc) {
1496 ++ init_arraycache(&alc->ac, entries, batch);
1497 ++ spin_lock_init(&alc->lock);
1498 ++ }
1499 + return alc;
1500 + }
1501 +
1502 +diff --git a/mm/usercopy.c b/mm/usercopy.c
1503 +index 852eb4e53f06..14faadcedd06 100644
1504 +--- a/mm/usercopy.c
1505 ++++ b/mm/usercopy.c
1506 +@@ -247,7 +247,8 @@ static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks);
1507 + /*
1508 + * Validates that the given object is:
1509 + * - not bogus address
1510 +- * - known-safe heap or stack object
1511 ++ * - fully contained by stack (or stack frame, when available)
1512 ++ * - fully within SLAB object (or object whitelist area, when available)
1513 + * - not in kernel text
1514 + */
1515 + void __check_object_size(const void *ptr, unsigned long n, bool to_user)
1516 +@@ -262,9 +263,6 @@ void __check_object_size(const void *ptr, unsigned long n, bool to_user)
1517 + /* Check for invalid addresses. */
1518 + check_bogus_address((const unsigned long)ptr, n, to_user);
1519 +
1520 +- /* Check for bad heap object. */
1521 +- check_heap_object(ptr, n, to_user);
1522 +-
1523 + /* Check for bad stack object. */
1524 + switch (check_stack_object(ptr, n)) {
1525 + case NOT_STACK:
1526 +@@ -282,6 +280,9 @@ void __check_object_size(const void *ptr, unsigned long n, bool to_user)
1527 + usercopy_abort("process stack", NULL, to_user, 0, n);
1528 + }
1529 +
1530 ++ /* Check for bad heap object. */
1531 ++ check_heap_object(ptr, n, to_user);
1532 ++
1533 + /* Check for object in kernel to avoid text exposure. */
1534 + check_kernel_text_object((const unsigned long)ptr, n, to_user);
1535 + }
1536 +diff --git a/mm/util.c b/mm/util.c
1537 +index 9e3ebd2ef65f..6a24a1025d77 100644
1538 +--- a/mm/util.c
1539 ++++ b/mm/util.c
1540 +@@ -485,7 +485,7 @@ bool page_mapped(struct page *page)
1541 + return true;
1542 + if (PageHuge(page))
1543 + return false;
1544 +- for (i = 0; i < hpage_nr_pages(page); i++) {
1545 ++ for (i = 0; i < (1 << compound_order(page)); i++) {
1546 + if (atomic_read(&page[i]._mapcount) >= 0)
1547 + return true;
1548 + }
1549 +diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
1550 +index d13e05f1a990..d65f8d35de87 100644
1551 +--- a/net/sunrpc/svc.c
1552 ++++ b/net/sunrpc/svc.c
1553 +@@ -1144,6 +1144,8 @@ void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
1554 + static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {}
1555 + #endif
1556 +
1557 ++extern void svc_tcp_prep_reply_hdr(struct svc_rqst *);
1558 ++
1559 + /*
1560 + * Common routine for processing the RPC request.
1561 + */
1562 +@@ -1172,7 +1174,8 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
1563 + clear_bit(RQ_DROPME, &rqstp->rq_flags);
1564 +
1565 + /* Setup reply header */
1566 +- rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
1567 ++ if (rqstp->rq_prot == IPPROTO_TCP)
1568 ++ svc_tcp_prep_reply_hdr(rqstp);
1569 +
1570 + svc_putu32(resv, rqstp->rq_xid);
1571 +
1572 +@@ -1244,7 +1247,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
1573 + * for lower versions. RPC_PROG_MISMATCH seems to be the closest
1574 + * fit.
1575 + */
1576 +- if (versp->vs_need_cong_ctrl &&
1577 ++ if (versp->vs_need_cong_ctrl && rqstp->rq_xprt &&
1578 + !test_bit(XPT_CONG_CTRL, &rqstp->rq_xprt->xpt_flags))
1579 + goto err_bad_vers;
1580 +
1581 +@@ -1336,7 +1339,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
1582 + return 0;
1583 +
1584 + close:
1585 +- if (test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
1586 ++ if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
1587 + svc_close_xprt(rqstp->rq_xprt);
1588 + dprintk("svc: svc_process close\n");
1589 + return 0;
1590 +@@ -1459,10 +1462,10 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
1591 + dprintk("svc: %s(%p)\n", __func__, req);
1592 +
1593 + /* Build the svc_rqst used by the common processing routine */
1594 +- rqstp->rq_xprt = serv->sv_bc_xprt;
1595 + rqstp->rq_xid = req->rq_xid;
1596 + rqstp->rq_prot = req->rq_xprt->prot;
1597 + rqstp->rq_server = serv;
1598 ++ rqstp->rq_bc_net = req->rq_xprt->xprt_net;
1599 +
1600 + rqstp->rq_addrlen = sizeof(req->rq_xprt->addr);
1601 + memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
1602 +diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
1603 +index 83ccd0221c98..6cf0fd37cbf0 100644
1604 +--- a/net/sunrpc/svc_xprt.c
1605 ++++ b/net/sunrpc/svc_xprt.c
1606 +@@ -469,10 +469,11 @@ out:
1607 + */
1608 + void svc_reserve(struct svc_rqst *rqstp, int space)
1609 + {
1610 ++ struct svc_xprt *xprt = rqstp->rq_xprt;
1611 ++
1612 + space += rqstp->rq_res.head[0].iov_len;
1613 +
1614 +- if (space < rqstp->rq_reserved) {
1615 +- struct svc_xprt *xprt = rqstp->rq_xprt;
1616 ++ if (xprt && space < rqstp->rq_reserved) {
1617 + atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved);
1618 + rqstp->rq_reserved = space;
1619 +
1620 +diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
1621 +index fc1c0d9ef57d..97a8282955a8 100644
1622 +--- a/net/sunrpc/svcsock.c
1623 ++++ b/net/sunrpc/svcsock.c
1624 +@@ -1198,7 +1198,7 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
1625 + /*
1626 + * Setup response header. TCP has a 4B record length field.
1627 + */
1628 +-static void svc_tcp_prep_reply_hdr(struct svc_rqst *rqstp)
1629 ++void svc_tcp_prep_reply_hdr(struct svc_rqst *rqstp)
1630 + {
1631 + struct kvec *resv = &rqstp->rq_res.head[0];
1632 +
1633 +diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
1634 +index 0d998c54564d..5a5b3780456f 100644
1635 +--- a/scripts/mod/modpost.c
1636 ++++ b/scripts/mod/modpost.c
1637 +@@ -2157,7 +2157,7 @@ static void add_intree_flag(struct buffer *b, int is_intree)
1638 + /* Cannot check for assembler */
1639 + static void add_retpoline(struct buffer *b)
1640 + {
1641 +- buf_printf(b, "\n#ifdef RETPOLINE\n");
1642 ++ buf_printf(b, "\n#ifdef CONFIG_RETPOLINE\n");
1643 + buf_printf(b, "MODULE_INFO(retpoline, \"Y\");\n");
1644 + buf_printf(b, "#endif\n");
1645 + }
1646 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
1647 +index 854d63c01dd2..8b9f2487969b 100644
1648 +--- a/sound/pci/hda/patch_realtek.c
1649 ++++ b/sound/pci/hda/patch_realtek.c
1650 +@@ -4102,6 +4102,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
1651 + case 0x10ec0295:
1652 + case 0x10ec0289:
1653 + case 0x10ec0299:
1654 ++ alc_process_coef_fw(codec, alc225_pre_hsmode);
1655 + alc_process_coef_fw(codec, coef0225);
1656 + break;
1657 + case 0x10ec0867:
1658 +@@ -5380,6 +5381,13 @@ static void alc285_fixup_invalidate_dacs(struct hda_codec *codec,
1659 + snd_hda_override_wcaps(codec, 0x03, 0);
1660 + }
1661 +
1662 ++static void alc_fixup_disable_mic_vref(struct hda_codec *codec,
1663 ++ const struct hda_fixup *fix, int action)
1664 ++{
1665 ++ if (action == HDA_FIXUP_ACT_PRE_PROBE)
1666 ++ snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ);
1667 ++}
1668 ++
1669 + /* for hda_fixup_thinkpad_acpi() */
1670 + #include "thinkpad_helper.c"
1671 +
1672 +@@ -5492,6 +5500,7 @@ enum {
1673 + ALC293_FIXUP_LENOVO_SPK_NOISE,
1674 + ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
1675 + ALC255_FIXUP_DELL_SPK_NOISE,
1676 ++ ALC225_FIXUP_DISABLE_MIC_VREF,
1677 + ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
1678 + ALC295_FIXUP_DISABLE_DAC3,
1679 + ALC280_FIXUP_HP_HEADSET_MIC,
1680 +@@ -6191,6 +6200,12 @@ static const struct hda_fixup alc269_fixups[] = {
1681 + .chained = true,
1682 + .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
1683 + },
1684 ++ [ALC225_FIXUP_DISABLE_MIC_VREF] = {
1685 ++ .type = HDA_FIXUP_FUNC,
1686 ++ .v.func = alc_fixup_disable_mic_vref,
1687 ++ .chained = true,
1688 ++ .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
1689 ++ },
1690 + [ALC225_FIXUP_DELL1_MIC_NO_PRESENCE] = {
1691 + .type = HDA_FIXUP_VERBS,
1692 + .v.verbs = (const struct hda_verb[]) {
1693 +@@ -6200,7 +6215,7 @@ static const struct hda_fixup alc269_fixups[] = {
1694 + {}
1695 + },
1696 + .chained = true,
1697 +- .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
1698 ++ .chain_id = ALC225_FIXUP_DISABLE_MIC_VREF
1699 + },
1700 + [ALC280_FIXUP_HP_HEADSET_MIC] = {
1701 + .type = HDA_FIXUP_FUNC,
1702 +@@ -6503,6 +6518,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
1703 + SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
1704 + SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
1705 + SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB),
1706 ++ SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
1707 + SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
1708 + SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
1709 + SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
1710 +diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
1711 +index 8fb31a7cc22c..91495045ad5a 100644
1712 +--- a/virt/kvm/arm/arm.c
1713 ++++ b/virt/kvm/arm/arm.c
1714 +@@ -66,7 +66,7 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
1715 + static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
1716 + static u32 kvm_next_vmid;
1717 + static unsigned int kvm_vmid_bits __read_mostly;
1718 +-static DEFINE_RWLOCK(kvm_vmid_lock);
1719 ++static DEFINE_SPINLOCK(kvm_vmid_lock);
1720 +
1721 + static bool vgic_present;
1722 +
1723 +@@ -482,7 +482,9 @@ void force_vm_exit(const cpumask_t *mask)
1724 + */
1725 + static bool need_new_vmid_gen(struct kvm *kvm)
1726 + {
1727 +- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
1728 ++ u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen);
1729 ++ smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
1730 ++ return unlikely(READ_ONCE(kvm->arch.vmid_gen) != current_vmid_gen);
1731 + }
1732 +
1733 + /**
1734 +@@ -497,16 +499,11 @@ static void update_vttbr(struct kvm *kvm)
1735 + {
1736 + phys_addr_t pgd_phys;
1737 + u64 vmid;
1738 +- bool new_gen;
1739 +
1740 +- read_lock(&kvm_vmid_lock);
1741 +- new_gen = need_new_vmid_gen(kvm);
1742 +- read_unlock(&kvm_vmid_lock);
1743 +-
1744 +- if (!new_gen)
1745 ++ if (!need_new_vmid_gen(kvm))
1746 + return;
1747 +
1748 +- write_lock(&kvm_vmid_lock);
1749 ++ spin_lock(&kvm_vmid_lock);
1750 +
1751 + /*
1752 + * We need to re-check the vmid_gen here to ensure that if another vcpu
1753 +@@ -514,7 +511,7 @@ static void update_vttbr(struct kvm *kvm)
1754 + * use the same vmid.
1755 + */
1756 + if (!need_new_vmid_gen(kvm)) {
1757 +- write_unlock(&kvm_vmid_lock);
1758 ++ spin_unlock(&kvm_vmid_lock);
1759 + return;
1760 + }
1761 +
1762 +@@ -537,7 +534,6 @@ static void update_vttbr(struct kvm *kvm)
1763 + kvm_call_hyp(__kvm_flush_vm_context);
1764 + }
1765 +
1766 +- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
1767 + kvm->arch.vmid = kvm_next_vmid;
1768 + kvm_next_vmid++;
1769 + kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
1770 +@@ -548,7 +544,10 @@ static void update_vttbr(struct kvm *kvm)
1771 + vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
1772 + kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid;
1773 +
1774 +- write_unlock(&kvm_vmid_lock);
1775 ++ smp_wmb();
1776 ++ WRITE_ONCE(kvm->arch.vmid_gen, atomic64_read(&kvm_vmid_gen));
1777 ++
1778 ++ spin_unlock(&kvm_vmid_lock);
1779 + }
1780 +
1781 + static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)