Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.20 commit in: /
Date: Fri, 15 Feb 2019 12:36:15
Message-Id: 1550234128.cb73cbeaefb5ee7da3fbe79ec7b7674456f2c557.mpagano@gentoo
1 commit: cb73cbeaefb5ee7da3fbe79ec7b7674456f2c557
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Feb 15 12:35:28 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Feb 15 12:35:28 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=cb73cbea
7
8 proj/linux-patches: Linux patches 4.20.9 and 4.20.10
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 8 +
13 1008_linux-4.20.9.patch | 1532 ++++++++++++++++++++++++++++++++++++++++++++++
14 1009_linux-4.20.10.patch | 35 ++
15 3 files changed, 1575 insertions(+)
16
17 diff --git a/0000_README b/0000_README
18 index 16edf0d..e40abc7 100644
19 --- a/0000_README
20 +++ b/0000_README
21 @@ -75,6 +75,14 @@ Patch: 1007_linux-4.20.8.patch
22 From: http://www.kernel.org
23 Desc: Linux 4.20.8
24
25 +Patch: 1008_linux-4.20.9.patch
26 +From: http://www.kernel.org
27 +Desc: Linux 4.20.9
28 +
29 +Patch: 1009_linux-4.20.10.patch
30 +From: http://www.kernel.org
31 +Desc: Linux 4.20.10
32 +
33 Patch: 1500_XATTR_USER_PREFIX.patch
34 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
35 Desc: Support for namespace user.pax.* on tmpfs.
36
37 diff --git a/1008_linux-4.20.9.patch b/1008_linux-4.20.9.patch
38 new file mode 100644
39 index 0000000..8d94fa7
40 --- /dev/null
41 +++ b/1008_linux-4.20.9.patch
42 @@ -0,0 +1,1532 @@
43 +diff --git a/Makefile b/Makefile
44 +index d7d190781010..c9b831f5e873 100644
45 +--- a/Makefile
46 ++++ b/Makefile
47 +@@ -1,7 +1,7 @@
48 + # SPDX-License-Identifier: GPL-2.0
49 + VERSION = 4
50 + PATCHLEVEL = 20
51 +-SUBLEVEL = 8
52 ++SUBLEVEL = 9
53 + EXTRAVERSION =
54 + NAME = Shy Crocodile
55 +
56 +diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi
57 +index 47aa53ba6b92..559659b399d0 100644
58 +--- a/arch/arm/boot/dts/da850.dtsi
59 ++++ b/arch/arm/boot/dts/da850.dtsi
60 +@@ -476,7 +476,7 @@
61 + clocksource: timer@20000 {
62 + compatible = "ti,da830-timer";
63 + reg = <0x20000 0x1000>;
64 +- interrupts = <12>, <13>;
65 ++ interrupts = <21>, <22>;
66 + interrupt-names = "tint12", "tint34";
67 + clocks = <&pll0_auxclk>;
68 + };
69 +diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c
70 +index 3b73813c6b04..23e8c93515d4 100644
71 +--- a/arch/arm/mach-iop32x/n2100.c
72 ++++ b/arch/arm/mach-iop32x/n2100.c
73 +@@ -75,8 +75,7 @@ void __init n2100_map_io(void)
74 + /*
75 + * N2100 PCI.
76 + */
77 +-static int __init
78 +-n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
79 ++static int n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
80 + {
81 + int irq;
82 +
83 +diff --git a/arch/arm/mach-tango/pm.c b/arch/arm/mach-tango/pm.c
84 +index 028e50c6383f..a32c3b631484 100644
85 +--- a/arch/arm/mach-tango/pm.c
86 ++++ b/arch/arm/mach-tango/pm.c
87 +@@ -3,6 +3,7 @@
88 + #include <linux/suspend.h>
89 + #include <asm/suspend.h>
90 + #include "smc.h"
91 ++#include "pm.h"
92 +
93 + static int tango_pm_powerdown(unsigned long arg)
94 + {
95 +@@ -24,10 +25,7 @@ static const struct platform_suspend_ops tango_pm_ops = {
96 + .valid = suspend_valid_only_mem,
97 + };
98 +
99 +-static int __init tango_pm_init(void)
100 ++void __init tango_pm_init(void)
101 + {
102 + suspend_set_ops(&tango_pm_ops);
103 +- return 0;
104 + }
105 +-
106 +-late_initcall(tango_pm_init);
107 +diff --git a/arch/arm/mach-tango/pm.h b/arch/arm/mach-tango/pm.h
108 +new file mode 100644
109 +index 000000000000..35ea705a0ee2
110 +--- /dev/null
111 ++++ b/arch/arm/mach-tango/pm.h
112 +@@ -0,0 +1,7 @@
113 ++/* SPDX-License-Identifier: GPL-2.0 */
114 ++
115 ++#ifdef CONFIG_SUSPEND
116 ++void __init tango_pm_init(void);
117 ++#else
118 ++#define tango_pm_init NULL
119 ++#endif
120 +diff --git a/arch/arm/mach-tango/setup.c b/arch/arm/mach-tango/setup.c
121 +index 677dd7b5efd9..824f90737b04 100644
122 +--- a/arch/arm/mach-tango/setup.c
123 ++++ b/arch/arm/mach-tango/setup.c
124 +@@ -2,6 +2,7 @@
125 + #include <asm/mach/arch.h>
126 + #include <asm/hardware/cache-l2x0.h>
127 + #include "smc.h"
128 ++#include "pm.h"
129 +
130 + static void tango_l2c_write(unsigned long val, unsigned int reg)
131 + {
132 +@@ -15,4 +16,5 @@ DT_MACHINE_START(TANGO_DT, "Sigma Tango DT")
133 + .dt_compat = tango_dt_compat,
134 + .l2c_aux_mask = ~0,
135 + .l2c_write_sec = tango_l2c_write,
136 ++ .init_late = tango_pm_init,
137 + MACHINE_END
138 +diff --git a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
139 +index 2152b7ba65fb..cc8dbea0911f 100644
140 +--- a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
141 ++++ b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
142 +@@ -90,11 +90,11 @@
143 + interrupts = <0>;
144 + };
145 +
146 +- axi_i2c: i2c@10A00000 {
147 ++ axi_i2c: i2c@10a00000 {
148 + compatible = "xlnx,xps-iic-2.00.a";
149 + interrupt-parent = <&axi_intc>;
150 + interrupts = <4>;
151 +- reg = < 0x10A00000 0x10000 >;
152 ++ reg = < 0x10a00000 0x10000 >;
153 + clocks = <&ext>;
154 + xlnx,clk-freq = <0x5f5e100>;
155 + xlnx,family = "Artix7";
156 +@@ -106,9 +106,9 @@
157 + #address-cells = <1>;
158 + #size-cells = <0>;
159 +
160 +- ad7420@4B {
161 ++ ad7420@4b {
162 + compatible = "adi,adt7420";
163 +- reg = <0x4B>;
164 ++ reg = <0x4b>;
165 + };
166 + } ;
167 + };
168 +diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
169 +index 8f5bd04f320a..7f3f136572de 100644
170 +--- a/arch/mips/kernel/mips-cm.c
171 ++++ b/arch/mips/kernel/mips-cm.c
172 +@@ -457,5 +457,5 @@ void mips_cm_error_report(void)
173 + }
174 +
175 + /* reprime cause register */
176 +- write_gcr_error_cause(0);
177 ++ write_gcr_error_cause(cm_error);
178 + }
179 +diff --git a/arch/mips/loongson64/common/reset.c b/arch/mips/loongson64/common/reset.c
180 +index a60715e11306..b26892ce871c 100644
181 +--- a/arch/mips/loongson64/common/reset.c
182 ++++ b/arch/mips/loongson64/common/reset.c
183 +@@ -59,7 +59,12 @@ static void loongson_poweroff(void)
184 + {
185 + #ifndef CONFIG_LEFI_FIRMWARE_INTERFACE
186 + mach_prepare_shutdown();
187 +- unreachable();
188 ++
189 ++ /*
190 ++ * It needs a wait loop here, but mips/kernel/reset.c already calls
191 ++ * a generic delay loop, machine_hang(), so simply return.
192 ++ */
193 ++ return;
194 + #else
195 + void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr;
196 +
197 +diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
198 +index 5017d5843c5a..fc29b85cfa92 100644
199 +--- a/arch/mips/pci/pci-octeon.c
200 ++++ b/arch/mips/pci/pci-octeon.c
201 +@@ -568,6 +568,11 @@ static int __init octeon_pci_setup(void)
202 + if (octeon_has_feature(OCTEON_FEATURE_PCIE))
203 + return 0;
204 +
205 ++ if (!octeon_is_pci_host()) {
206 ++ pr_notice("Not in host mode, PCI Controller not initialized\n");
207 ++ return 0;
208 ++ }
209 ++
210 + /* Point pcibios_map_irq() to the PCI version of it */
211 + octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq;
212 +
213 +@@ -579,11 +584,6 @@ static int __init octeon_pci_setup(void)
214 + else
215 + octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG;
216 +
217 +- if (!octeon_is_pci_host()) {
218 +- pr_notice("Not in host mode, PCI Controller not initialized\n");
219 +- return 0;
220 +- }
221 +-
222 + /* PCI I/O and PCI MEM values */
223 + set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
224 + ioport_resource.start = 0;
225 +diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
226 +index 58a0315ad743..67e44466d5a4 100644
227 +--- a/arch/mips/vdso/Makefile
228 ++++ b/arch/mips/vdso/Makefile
229 +@@ -8,6 +8,7 @@ ccflags-vdso := \
230 + $(filter -E%,$(KBUILD_CFLAGS)) \
231 + $(filter -mmicromips,$(KBUILD_CFLAGS)) \
232 + $(filter -march=%,$(KBUILD_CFLAGS)) \
233 ++ $(filter -m%-float,$(KBUILD_CFLAGS)) \
234 + -D__VDSO__
235 +
236 + ifdef CONFIG_CC_IS_CLANG
237 +@@ -128,7 +129,7 @@ $(obj)/%-o32.o: $(src)/%.c FORCE
238 + $(call cmd,force_checksrc)
239 + $(call if_changed_rule,cc_o_c)
240 +
241 +-$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := -mabi=32
242 ++$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=32
243 + $(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE
244 + $(call if_changed_dep,cpp_lds_S)
245 +
246 +@@ -168,7 +169,7 @@ $(obj)/%-n32.o: $(src)/%.c FORCE
247 + $(call cmd,force_checksrc)
248 + $(call if_changed_rule,cc_o_c)
249 +
250 +-$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := -mabi=n32
251 ++$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=n32
252 + $(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE
253 + $(call if_changed_dep,cpp_lds_S)
254 +
255 +diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
256 +index 6c99e846a8c9..db706ffc4ca9 100644
257 +--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
258 ++++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
259 +@@ -1258,21 +1258,13 @@ extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
260 +
261 + #define pmd_move_must_withdraw pmd_move_must_withdraw
262 + struct spinlock;
263 +-static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
264 +- struct spinlock *old_pmd_ptl,
265 +- struct vm_area_struct *vma)
266 +-{
267 +- if (radix_enabled())
268 +- return false;
269 +- /*
270 +- * Archs like ppc64 use pgtable to store per pmd
271 +- * specific information. So when we switch the pmd,
272 +- * we should also withdraw and deposit the pgtable
273 +- */
274 +- return true;
275 +-}
276 +-
277 +-
278 ++extern int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
279 ++ struct spinlock *old_pmd_ptl,
280 ++ struct vm_area_struct *vma);
281 ++/*
282 ++ * Hash translation mode use the deposited table to store hash pte
283 ++ * slot information.
284 ++ */
285 + #define arch_needs_pgtable_deposit arch_needs_pgtable_deposit
286 + static inline bool arch_needs_pgtable_deposit(void)
287 + {
288 +diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
289 +index 9f93c9f985c5..30d89a37fe62 100644
290 +--- a/arch/powerpc/mm/pgtable-book3s64.c
291 ++++ b/arch/powerpc/mm/pgtable-book3s64.c
292 +@@ -482,3 +482,25 @@ void arch_report_meminfo(struct seq_file *m)
293 + atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
294 + }
295 + #endif /* CONFIG_PROC_FS */
296 ++
297 ++/*
298 ++ * For hash translation mode, we use the deposited table to store hash slot
299 ++ * information and they are stored at PTRS_PER_PMD offset from related pmd
300 ++ * location. Hence a pmd move requires deposit and withdraw.
301 ++ *
302 ++ * For radix translation with split pmd ptl, we store the deposited table in the
303 ++ * pmd page. Hence if we have different pmd page we need to withdraw during pmd
304 ++ * move.
305 ++ *
306 ++ * With hash we use deposited table always irrespective of anon or not.
307 ++ * With radix we use deposited table only for anonymous mapping.
308 ++ */
309 ++int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
310 ++ struct spinlock *old_pmd_ptl,
311 ++ struct vm_area_struct *vma)
312 ++{
313 ++ if (radix_enabled())
314 ++ return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
315 ++
316 ++ return true;
317 ++}
318 +diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
319 +index 7d6457ab5d34..bba281b1fe1b 100644
320 +--- a/arch/powerpc/platforms/pseries/papr_scm.c
321 ++++ b/arch/powerpc/platforms/pseries/papr_scm.c
322 +@@ -43,6 +43,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
323 + {
324 + unsigned long ret[PLPAR_HCALL_BUFSIZE];
325 + uint64_t rc, token;
326 ++ uint64_t saved = 0;
327 +
328 + /*
329 + * When the hypervisor cannot map all the requested memory in a single
330 +@@ -56,6 +57,8 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
331 + rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0,
332 + p->blocks, BIND_ANY_ADDR, token);
333 + token = ret[0];
334 ++ if (!saved)
335 ++ saved = ret[1];
336 + cond_resched();
337 + } while (rc == H_BUSY);
338 +
339 +@@ -64,7 +67,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
340 + return -ENXIO;
341 + }
342 +
343 +- p->bound_addr = ret[1];
344 ++ p->bound_addr = saved;
345 +
346 + dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res);
347 +
348 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
349 +index b8c3f9e6af89..adf28788cab5 100644
350 +--- a/drivers/ata/libata-core.c
351 ++++ b/drivers/ata/libata-core.c
352 +@@ -4554,6 +4554,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
353 + { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
354 + { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
355 + { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, },
356 ++ { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM, },
357 +
358 + /* devices that don't properly handle queued TRIM commands */
359 + { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
360 +diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
361 +index 472c88ae1c0f..92f843eaf1e0 100644
362 +--- a/drivers/firmware/arm_scmi/bus.c
363 ++++ b/drivers/firmware/arm_scmi/bus.c
364 +@@ -119,6 +119,11 @@ void scmi_driver_unregister(struct scmi_driver *driver)
365 + }
366 + EXPORT_SYMBOL_GPL(scmi_driver_unregister);
367 +
368 ++static void scmi_device_release(struct device *dev)
369 ++{
370 ++ kfree(to_scmi_dev(dev));
371 ++}
372 ++
373 + struct scmi_device *
374 + scmi_device_create(struct device_node *np, struct device *parent, int protocol)
375 + {
376 +@@ -138,6 +143,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol)
377 + scmi_dev->dev.parent = parent;
378 + scmi_dev->dev.of_node = np;
379 + scmi_dev->dev.bus = &scmi_bus_type;
380 ++ scmi_dev->dev.release = scmi_device_release;
381 + dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id);
382 +
383 + retval = device_register(&scmi_dev->dev);
384 +@@ -156,9 +162,8 @@ free_mem:
385 + void scmi_device_destroy(struct scmi_device *scmi_dev)
386 + {
387 + scmi_handle_put(scmi_dev->handle);
388 +- device_unregister(&scmi_dev->dev);
389 + ida_simple_remove(&scmi_bus_id, scmi_dev->id);
390 +- kfree(scmi_dev);
391 ++ device_unregister(&scmi_dev->dev);
392 + }
393 +
394 + void scmi_set_handle(struct scmi_device *scmi_dev)
395 +diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
396 +index dd18cb710391..0b945d0fd732 100644
397 +--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
398 ++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
399 +@@ -1005,6 +1005,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
400 + break;
401 + case amd_pp_dpp_clock:
402 + pclk_vol_table = pinfo->vdd_dep_on_dppclk;
403 ++ break;
404 + default:
405 + return -EINVAL;
406 + }
407 +diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
408 +index 02db9ac82d7a..a3104d79b48f 100644
409 +--- a/drivers/gpu/drm/drm_modes.c
410 ++++ b/drivers/gpu/drm/drm_modes.c
411 +@@ -758,7 +758,7 @@ int drm_mode_hsync(const struct drm_display_mode *mode)
412 + if (mode->hsync)
413 + return mode->hsync;
414 +
415 +- if (mode->htotal < 0)
416 ++ if (mode->htotal <= 0)
417 + return 0;
418 +
419 + calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
420 +diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
421 +index 5186cd7075f9..372f30d286e3 100644
422 +--- a/drivers/gpu/drm/i915/intel_ddi.c
423 ++++ b/drivers/gpu/drm/i915/intel_ddi.c
424 +@@ -1085,7 +1085,7 @@ static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
425 + return DDI_CLK_SEL_TBT_810;
426 + default:
427 + MISSING_CASE(clock);
428 +- break;
429 ++ return DDI_CLK_SEL_NONE;
430 + }
431 + case DPLL_ID_ICL_MGPLL1:
432 + case DPLL_ID_ICL_MGPLL2:
433 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
434 +index c9878dd1f7cd..a8293a7bab8f 100644
435 +--- a/drivers/gpu/drm/i915/intel_display.c
436 ++++ b/drivers/gpu/drm/i915/intel_display.c
437 +@@ -15684,15 +15684,44 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
438 + }
439 + }
440 +
441 ++static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
442 ++{
443 ++ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
444 ++
445 ++ /*
446 ++ * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
447 ++ * the hardware when a high res displays plugged in. DPLL P
448 ++ * divider is zero, and the pipe timings are bonkers. We'll
449 ++ * try to disable everything in that case.
450 ++ *
451 ++ * FIXME would be nice to be able to sanitize this state
452 ++ * without several WARNs, but for now let's take the easy
453 ++ * road.
454 ++ */
455 ++ return IS_GEN6(dev_priv) &&
456 ++ crtc_state->base.active &&
457 ++ crtc_state->shared_dpll &&
458 ++ crtc_state->port_clock == 0;
459 ++}
460 ++
461 + static void intel_sanitize_encoder(struct intel_encoder *encoder)
462 + {
463 + struct intel_connector *connector;
464 ++ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
465 ++ struct intel_crtc_state *crtc_state = crtc ?
466 ++ to_intel_crtc_state(crtc->base.state) : NULL;
467 +
468 + /* We need to check both for a crtc link (meaning that the
469 + * encoder is active and trying to read from a pipe) and the
470 + * pipe itself being active. */
471 +- bool has_active_crtc = encoder->base.crtc &&
472 +- to_intel_crtc(encoder->base.crtc)->active;
473 ++ bool has_active_crtc = crtc_state &&
474 ++ crtc_state->base.active;
475 ++
476 ++ if (crtc_state && has_bogus_dpll_config(crtc_state)) {
477 ++ DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
478 ++ pipe_name(crtc->pipe));
479 ++ has_active_crtc = false;
480 ++ }
481 +
482 + connector = intel_encoder_find_connector(encoder);
483 + if (connector && !has_active_crtc) {
484 +@@ -15703,15 +15732,25 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
485 + /* Connector is active, but has no active pipe. This is
486 + * fallout from our resume register restoring. Disable
487 + * the encoder manually again. */
488 +- if (encoder->base.crtc) {
489 +- struct drm_crtc_state *crtc_state = encoder->base.crtc->state;
490 ++ if (crtc_state) {
491 ++ struct drm_encoder *best_encoder;
492 +
493 + DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
494 + encoder->base.base.id,
495 + encoder->base.name);
496 +- encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
497 ++
498 ++ /* avoid oopsing in case the hooks consult best_encoder */
499 ++ best_encoder = connector->base.state->best_encoder;
500 ++ connector->base.state->best_encoder = &encoder->base;
501 ++
502 ++ if (encoder->disable)
503 ++ encoder->disable(encoder, crtc_state,
504 ++ connector->base.state);
505 + if (encoder->post_disable)
506 +- encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
507 ++ encoder->post_disable(encoder, crtc_state,
508 ++ connector->base.state);
509 ++
510 ++ connector->base.state->best_encoder = best_encoder;
511 + }
512 + encoder->base.crtc = NULL;
513 +
514 +diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
515 +index 37f93022a106..c0351abf83a3 100644
516 +--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
517 ++++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
518 +@@ -1,17 +1,8 @@
519 +-//SPDX-License-Identifier: GPL-2.0+
520 ++// SPDX-License-Identifier: GPL-2.0
521 + /*
522 + * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
523 + * Author:
524 + * Sandy Huang <hjc@××××××××××.com>
525 +- *
526 +- * This software is licensed under the terms of the GNU General Public
527 +- * License version 2, as published by the Free Software Foundation, and
528 +- * may be copied, distributed, and modified under those terms.
529 +- *
530 +- * This program is distributed in the hope that it will be useful,
531 +- * but WITHOUT ANY WARRANTY; without even the implied warranty of
532 +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
533 +- * GNU General Public License for more details.
534 + */
535 +
536 + #include <drm/drmP.h>
537 +diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.h b/drivers/gpu/drm/rockchip/rockchip_rgb.h
538 +index 38b52e63b2b0..27b9635124bc 100644
539 +--- a/drivers/gpu/drm/rockchip/rockchip_rgb.h
540 ++++ b/drivers/gpu/drm/rockchip/rockchip_rgb.h
541 +@@ -1,17 +1,8 @@
542 +-//SPDX-License-Identifier: GPL-2.0+
543 ++/* SPDX-License-Identifier: GPL-2.0 */
544 + /*
545 + * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
546 + * Author:
547 + * Sandy Huang <hjc@××××××××××.com>
548 +- *
549 +- * This software is licensed under the terms of the GNU General Public
550 +- * License version 2, as published by the Free Software Foundation, and
551 +- * may be copied, distributed, and modified under those terms.
552 +- *
553 +- * This program is distributed in the hope that it will be useful,
554 +- * but WITHOUT ANY WARRANTY; without even the implied warranty of
555 +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
556 +- * GNU General Public License for more details.
557 + */
558 +
559 + #ifdef CONFIG_ROCKCHIP_RGB
560 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
561 +index d7a2dfb8ee9b..ddf80935c4b9 100644
562 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
563 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
564 +@@ -629,13 +629,16 @@ out_fixup:
565 + static int vmw_dma_masks(struct vmw_private *dev_priv)
566 + {
567 + struct drm_device *dev = dev_priv->dev;
568 ++ int ret = 0;
569 +
570 +- if (intel_iommu_enabled &&
571 ++ ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
572 ++ if (dev_priv->map_mode != vmw_dma_phys &&
573 + (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
574 + DRM_INFO("Restricting DMA addresses to 44 bits.\n");
575 +- return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
576 ++ return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
577 + }
578 +- return 0;
579 ++
580 ++ return ret;
581 + }
582 + #else
583 + static int vmw_dma_masks(struct vmw_private *dev_priv)
584 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
585 +index f2d13a72c05d..88b8178d4687 100644
586 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
587 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
588 +@@ -3570,7 +3570,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
589 + *p_fence = NULL;
590 + }
591 +
592 +- return 0;
593 ++ return ret;
594 + }
595 +
596 + /**
597 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
598 +index dca04d4246ea..d59125c55dc2 100644
599 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
600 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
601 +@@ -2592,8 +2592,8 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
602 + user_fence_rep)
603 + {
604 + struct vmw_fence_obj *fence = NULL;
605 +- uint32_t handle;
606 +- int ret;
607 ++ uint32_t handle = 0;
608 ++ int ret = 0;
609 +
610 + if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
611 + out_fence)
612 +diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
613 +index 031d568b4972..4e339cfd0c54 100644
614 +--- a/drivers/iio/adc/axp288_adc.c
615 ++++ b/drivers/iio/adc/axp288_adc.c
616 +@@ -27,9 +27,18 @@
617 + #include <linux/iio/machine.h>
618 + #include <linux/iio/driver.h>
619 +
620 +-#define AXP288_ADC_EN_MASK 0xF1
621 +-#define AXP288_ADC_TS_PIN_GPADC 0xF2
622 +-#define AXP288_ADC_TS_PIN_ON 0xF3
623 ++/*
624 ++ * This mask enables all ADCs except for the battery temp-sensor (TS), that is
625 ++ * left as-is to avoid breaking charging on devices without a temp-sensor.
626 ++ */
627 ++#define AXP288_ADC_EN_MASK 0xF0
628 ++#define AXP288_ADC_TS_ENABLE 0x01
629 ++
630 ++#define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0)
631 ++#define AXP288_ADC_TS_CURRENT_OFF (0 << 0)
632 ++#define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0)
633 ++#define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0)
634 ++#define AXP288_ADC_TS_CURRENT_ON (3 << 0)
635 +
636 + enum axp288_adc_id {
637 + AXP288_ADC_TS,
638 +@@ -44,6 +53,7 @@ enum axp288_adc_id {
639 + struct axp288_adc_info {
640 + int irq;
641 + struct regmap *regmap;
642 ++ bool ts_enabled;
643 + };
644 +
645 + static const struct iio_chan_spec axp288_adc_channels[] = {
646 +@@ -115,21 +125,33 @@ static int axp288_adc_read_channel(int *val, unsigned long address,
647 + return IIO_VAL_INT;
648 + }
649 +
650 +-static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode,
651 +- unsigned long address)
652 ++/*
653 ++ * The current-source used for the battery temp-sensor (TS) is shared
654 ++ * with the GPADC. For proper fuel-gauge and charger operation the TS
655 ++ * current-source needs to be permanently on. But to read the GPADC we
656 ++ * need to temporary switch the TS current-source to ondemand, so that
657 ++ * the GPADC can use it, otherwise we will always read an all 0 value.
658 ++ */
659 ++static int axp288_adc_set_ts(struct axp288_adc_info *info,
660 ++ unsigned int mode, unsigned long address)
661 + {
662 + int ret;
663 +
664 +- /* channels other than GPADC do not need to switch TS pin */
665 ++ /* No need to switch the current-source if the TS pin is disabled */
666 ++ if (!info->ts_enabled)
667 ++ return 0;
668 ++
669 ++ /* Channels other than GPADC do not need the current source */
670 + if (address != AXP288_GP_ADC_H)
671 + return 0;
672 +
673 +- ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode);
674 ++ ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
675 ++ AXP288_ADC_TS_CURRENT_ON_OFF_MASK, mode);
676 + if (ret)
677 + return ret;
678 +
679 + /* When switching to the GPADC pin give things some time to settle */
680 +- if (mode == AXP288_ADC_TS_PIN_GPADC)
681 ++ if (mode == AXP288_ADC_TS_CURRENT_ON_ONDEMAND)
682 + usleep_range(6000, 10000);
683 +
684 + return 0;
685 +@@ -145,14 +167,14 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
686 + mutex_lock(&indio_dev->mlock);
687 + switch (mask) {
688 + case IIO_CHAN_INFO_RAW:
689 +- if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC,
690 ++ if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON_ONDEMAND,
691 + chan->address)) {
692 + dev_err(&indio_dev->dev, "GPADC mode\n");
693 + ret = -EINVAL;
694 + break;
695 + }
696 + ret = axp288_adc_read_channel(val, chan->address, info->regmap);
697 +- if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON,
698 ++ if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON,
699 + chan->address))
700 + dev_err(&indio_dev->dev, "TS pin restore\n");
701 + break;
702 +@@ -164,13 +186,35 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
703 + return ret;
704 + }
705 +
706 +-static int axp288_adc_set_state(struct regmap *regmap)
707 ++static int axp288_adc_initialize(struct axp288_adc_info *info)
708 + {
709 +- /* ADC should be always enabled for internal FG to function */
710 +- if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON))
711 +- return -EIO;
712 ++ int ret, adc_enable_val;
713 ++
714 ++ /*
715 ++ * Determine if the TS pin is enabled and set the TS current-source
716 ++ * accordingly.
717 ++ */
718 ++ ret = regmap_read(info->regmap, AXP20X_ADC_EN1, &adc_enable_val);
719 ++ if (ret)
720 ++ return ret;
721 ++
722 ++ if (adc_enable_val & AXP288_ADC_TS_ENABLE) {
723 ++ info->ts_enabled = true;
724 ++ ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
725 ++ AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
726 ++ AXP288_ADC_TS_CURRENT_ON);
727 ++ } else {
728 ++ info->ts_enabled = false;
729 ++ ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
730 ++ AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
731 ++ AXP288_ADC_TS_CURRENT_OFF);
732 ++ }
733 ++ if (ret)
734 ++ return ret;
735 +
736 +- return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
737 ++ /* Turn on the ADC for all channels except TS, leave TS as is */
738 ++ return regmap_update_bits(info->regmap, AXP20X_ADC_EN1,
739 ++ AXP288_ADC_EN_MASK, AXP288_ADC_EN_MASK);
740 + }
741 +
742 + static const struct iio_info axp288_adc_iio_info = {
743 +@@ -200,7 +244,7 @@ static int axp288_adc_probe(struct platform_device *pdev)
744 + * Set ADC to enabled state at all time, including system suspend.
745 + * otherwise internal fuel gauge functionality may be affected.
746 + */
747 +- ret = axp288_adc_set_state(axp20x->regmap);
748 ++ ret = axp288_adc_initialize(info);
749 + if (ret) {
750 + dev_err(&pdev->dev, "unable to enable ADC device\n");
751 + return ret;
752 +diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
753 +index 184d686ebd99..8b4568edd5cb 100644
754 +--- a/drivers/iio/adc/ti-ads8688.c
755 ++++ b/drivers/iio/adc/ti-ads8688.c
756 +@@ -41,6 +41,7 @@
757 +
758 + #define ADS8688_VREF_MV 4096
759 + #define ADS8688_REALBITS 16
760 ++#define ADS8688_MAX_CHANNELS 8
761 +
762 + /*
763 + * enum ads8688_range - ADS8688 reference voltage range
764 +@@ -385,7 +386,7 @@ static irqreturn_t ads8688_trigger_handler(int irq, void *p)
765 + {
766 + struct iio_poll_func *pf = p;
767 + struct iio_dev *indio_dev = pf->indio_dev;
768 +- u16 buffer[8];
769 ++ u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)];
770 + int i, j = 0;
771 +
772 + for (i = 0; i < indio_dev->masklength; i++) {
773 +diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
774 +index a406ad31b096..3a20cb5d9bff 100644
775 +--- a/drivers/iio/chemical/atlas-ph-sensor.c
776 ++++ b/drivers/iio/chemical/atlas-ph-sensor.c
777 +@@ -444,9 +444,8 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
778 + case IIO_CHAN_INFO_SCALE:
779 + switch (chan->type) {
780 + case IIO_TEMP:
781 +- *val = 1; /* 0.01 */
782 +- *val2 = 100;
783 +- break;
784 ++ *val = 10;
785 ++ return IIO_VAL_INT;
786 + case IIO_PH:
787 + *val = 1; /* 0.001 */
788 + *val2 = 1000;
789 +@@ -477,7 +476,7 @@ static int atlas_write_raw(struct iio_dev *indio_dev,
790 + int val, int val2, long mask)
791 + {
792 + struct atlas_data *data = iio_priv(indio_dev);
793 +- __be32 reg = cpu_to_be32(val);
794 ++ __be32 reg = cpu_to_be32(val / 10);
795 +
796 + if (val2 != 0 || val < 0 || val > 20000)
797 + return -EINVAL;
798 +diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
799 +index 23739a60517f..bb1ee9834a02 100644
800 +--- a/drivers/misc/mei/hw-me-regs.h
801 ++++ b/drivers/misc/mei/hw-me-regs.h
802 +@@ -139,6 +139,8 @@
803 + #define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */
804 + #define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */
805 +
806 ++#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
807 ++
808 + /*
809 + * MEI HW Section
810 + */
811 +diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
812 +index c8e21c894a5f..4299658d48d6 100644
813 +--- a/drivers/misc/mei/pci-me.c
814 ++++ b/drivers/misc/mei/pci-me.c
815 +@@ -105,6 +105,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
816 + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH8_CFG)},
817 + {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)},
818 +
819 ++ {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
820 ++
821 + /* required last entry */
822 + {0, }
823 + };
824 +diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
825 +index 02a9aba85368..17b6398cf66c 100644
826 +--- a/drivers/misc/mic/vop/vop_main.c
827 ++++ b/drivers/misc/mic/vop/vop_main.c
828 +@@ -568,6 +568,8 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d,
829 + int ret = -1;
830 +
831 + if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) {
832 ++ struct device *dev = get_device(&vdev->vdev.dev);
833 ++
834 + dev_dbg(&vpdev->dev,
835 + "%s %d config_change %d type %d vdev %p\n",
836 + __func__, __LINE__,
837 +@@ -579,7 +581,7 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d,
838 + iowrite8(-1, &dc->h2c_vdev_db);
839 + if (status & VIRTIO_CONFIG_S_DRIVER_OK)
840 + wait_for_completion(&vdev->reset_done);
841 +- put_device(&vdev->vdev.dev);
842 ++ put_device(dev);
843 + iowrite8(1, &dc->guest_ack);
844 + dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n",
845 + __func__, __LINE__, ioread8(&dc->guest_ack));
846 +diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c
847 +index 6c3591cdf855..a3c6c773d9dc 100644
848 +--- a/drivers/misc/vexpress-syscfg.c
849 ++++ b/drivers/misc/vexpress-syscfg.c
850 +@@ -61,7 +61,7 @@ static int vexpress_syscfg_exec(struct vexpress_syscfg_func *func,
851 + int tries;
852 + long timeout;
853 +
854 +- if (WARN_ON(index > func->num_templates))
855 ++ if (WARN_ON(index >= func->num_templates))
856 + return -EINVAL;
857 +
858 + command = readl(syscfg->base + SYS_CFGCTRL);
859 +diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
860 +index 99c460facd5e..0bbb23b014f1 100644
861 +--- a/drivers/mtd/mtdpart.c
862 ++++ b/drivers/mtd/mtdpart.c
863 +@@ -470,6 +470,10 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
864 + /* let's register it anyway to preserve ordering */
865 + slave->offset = 0;
866 + slave->mtd.size = 0;
867 ++
868 ++ /* Initialize ->erasesize to make add_mtd_device() happy. */
869 ++ slave->mtd.erasesize = parent->erasesize;
870 ++
871 + printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
872 + part->name);
873 + goto out_register;
874 +diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
875 +index bd4cfac6b5aa..a4768df5083f 100644
876 +--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
877 ++++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
878 +@@ -155,9 +155,10 @@ int gpmi_init(struct gpmi_nand_data *this)
879 +
880 + /*
881 + * Reset BCH here, too. We got failures otherwise :(
882 +- * See later BCH reset for explanation of MX23 handling
883 ++ * See later BCH reset for explanation of MX23 and MX28 handling
884 + */
885 +- ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
886 ++ ret = gpmi_reset_block(r->bch_regs,
887 ++ GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
888 + if (ret)
889 + goto err_out;
890 +
891 +@@ -263,12 +264,10 @@ int bch_set_geometry(struct gpmi_nand_data *this)
892 + /*
893 + * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
894 + * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
895 +- * On the other hand, the MX28 needs the reset, because one case has been
896 +- * seen where the BCH produced ECC errors constantly after 10000
897 +- * consecutive reboots. The latter case has not been seen on the MX23
898 +- * yet, still we don't know if it could happen there as well.
899 ++ * and MX28.
900 + */
901 +- ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
902 ++ ret = gpmi_reset_block(r->bch_regs,
903 ++ GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
904 + if (ret)
905 + goto err_out;
906 +
907 +diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
908 +index 30f83649c481..8c7bf91ce4e1 100644
909 +--- a/drivers/mtd/nand/spi/core.c
910 ++++ b/drivers/mtd/nand/spi/core.c
911 +@@ -304,24 +304,30 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
912 + struct nand_device *nand = spinand_to_nand(spinand);
913 + struct mtd_info *mtd = nanddev_to_mtd(nand);
914 + struct nand_page_io_req adjreq = *req;
915 +- unsigned int nbytes = 0;
916 +- void *buf = NULL;
917 ++ void *buf = spinand->databuf;
918 ++ unsigned int nbytes;
919 + u16 column = 0;
920 + int ret;
921 +
922 +- memset(spinand->databuf, 0xff,
923 +- nanddev_page_size(nand) +
924 +- nanddev_per_page_oobsize(nand));
925 ++ /*
926 ++ * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
927 ++ * the cache content to 0xFF (depends on vendor implementation), so we
928 ++ * must fill the page cache entirely even if we only want to program
929 ++ * the data portion of the page, otherwise we might corrupt the BBM or
930 ++ * user data previously programmed in OOB area.
931 ++ */
932 ++ nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
933 ++ memset(spinand->databuf, 0xff, nbytes);
934 ++ adjreq.dataoffs = 0;
935 ++ adjreq.datalen = nanddev_page_size(nand);
936 ++ adjreq.databuf.out = spinand->databuf;
937 ++ adjreq.ooblen = nanddev_per_page_oobsize(nand);
938 ++ adjreq.ooboffs = 0;
939 ++ adjreq.oobbuf.out = spinand->oobbuf;
940 +
941 +- if (req->datalen) {
942 ++ if (req->datalen)
943 + memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
944 + req->datalen);
945 +- adjreq.dataoffs = 0;
946 +- adjreq.datalen = nanddev_page_size(nand);
947 +- adjreq.databuf.out = spinand->databuf;
948 +- nbytes = adjreq.datalen;
949 +- buf = spinand->databuf;
950 +- }
951 +
952 + if (req->ooblen) {
953 + if (req->mode == MTD_OPS_AUTO_OOB)
954 +@@ -332,14 +338,6 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
955 + else
956 + memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
957 + req->ooblen);
958 +-
959 +- adjreq.ooblen = nanddev_per_page_oobsize(nand);
960 +- adjreq.ooboffs = 0;
961 +- nbytes += nanddev_per_page_oobsize(nand);
962 +- if (!buf) {
963 +- buf = spinand->oobbuf;
964 +- column = nanddev_page_size(nand);
965 +- }
966 + }
967 +
968 + spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
969 +@@ -370,8 +368,8 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
970 +
971 + /*
972 + * We need to use the RANDOM LOAD CACHE operation if there's
973 +- * more than one iteration, because the LOAD operation resets
974 +- * the cache to 0xff.
975 ++ * more than one iteration, because the LOAD operation might
976 ++ * reset the cache to 0xff.
977 + */
978 + if (nbytes) {
979 + column = op.addr.val;
980 +@@ -1016,11 +1014,11 @@ static int spinand_init(struct spinand_device *spinand)
981 + for (i = 0; i < nand->memorg.ntargets; i++) {
982 + ret = spinand_select_target(spinand, i);
983 + if (ret)
984 +- goto err_free_bufs;
985 ++ goto err_manuf_cleanup;
986 +
987 + ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
988 + if (ret)
989 +- goto err_free_bufs;
990 ++ goto err_manuf_cleanup;
991 + }
992 +
993 + ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
994 +diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
995 +index 9b0f4b9ef482..8efe8ea45602 100644
996 +--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
997 ++++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
998 +@@ -1507,7 +1507,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
999 + .matches = {
1000 + DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
1001 + DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
1002 +- DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
1003 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
1004 + },
1005 + },
1006 + {
1007 +@@ -1515,7 +1515,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
1008 + .matches = {
1009 + DMI_MATCH(DMI_SYS_VENDOR, "HP"),
1010 + DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
1011 +- DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
1012 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
1013 + },
1014 + },
1015 + {
1016 +@@ -1523,7 +1523,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
1017 + .matches = {
1018 + DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
1019 + DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
1020 +- DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
1021 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
1022 + },
1023 + },
1024 + {
1025 +@@ -1531,7 +1531,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
1026 + .matches = {
1027 + DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
1028 + DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
1029 +- DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
1030 ++ DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
1031 + },
1032 + },
1033 + {}
1034 +diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
1035 +index aa8b58125568..ef4268cc6227 100644
1036 +--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
1037 ++++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
1038 +@@ -588,7 +588,7 @@ static const unsigned int h6_irq_bank_map[] = { 1, 5, 6, 7 };
1039 + static const struct sunxi_pinctrl_desc h6_pinctrl_data = {
1040 + .pins = h6_pins,
1041 + .npins = ARRAY_SIZE(h6_pins),
1042 +- .irq_banks = 3,
1043 ++ .irq_banks = 4,
1044 + .irq_bank_map = h6_irq_bank_map,
1045 + .irq_read_needs_mux = true,
1046 + };
1047 +diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
1048 +index 13b01351dd1c..41ef452c1fcf 100644
1049 +--- a/fs/debugfs/inode.c
1050 ++++ b/fs/debugfs/inode.c
1051 +@@ -787,6 +787,13 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
1052 + struct dentry *dentry = NULL, *trap;
1053 + struct name_snapshot old_name;
1054 +
1055 ++ if (IS_ERR(old_dir))
1056 ++ return old_dir;
1057 ++ if (IS_ERR(new_dir))
1058 ++ return new_dir;
1059 ++ if (IS_ERR_OR_NULL(old_dentry))
1060 ++ return old_dentry;
1061 ++
1062 + trap = lock_rename(new_dir, old_dir);
1063 + /* Source or destination directories don't exist? */
1064 + if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir))
1065 +diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
1066 +index 712f00995390..5508baa11bb6 100644
1067 +--- a/fs/ext4/fsync.c
1068 ++++ b/fs/ext4/fsync.c
1069 +@@ -116,16 +116,8 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1070 + goto out;
1071 + }
1072 +
1073 +- ret = file_write_and_wait_range(file, start, end);
1074 +- if (ret)
1075 +- return ret;
1076 +-
1077 + if (!journal) {
1078 +- struct writeback_control wbc = {
1079 +- .sync_mode = WB_SYNC_ALL
1080 +- };
1081 +-
1082 +- ret = ext4_write_inode(inode, &wbc);
1083 ++ ret = __generic_file_fsync(file, start, end, datasync);
1084 + if (!ret)
1085 + ret = ext4_sync_parent(inode);
1086 + if (test_opt(inode->i_sb, BARRIER))
1087 +@@ -133,6 +125,9 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1088 + goto out;
1089 + }
1090 +
1091 ++ ret = file_write_and_wait_range(file, start, end);
1092 ++ if (ret)
1093 ++ return ret;
1094 + /*
1095 + * data=writeback,ordered:
1096 + * The caller's filemap_fdatawrite()/wait will sync the data.
1097 +diff --git a/kernel/signal.c b/kernel/signal.c
1098 +index 9a32bc2088c9..cf4cf68c3ea8 100644
1099 +--- a/kernel/signal.c
1100 ++++ b/kernel/signal.c
1101 +@@ -688,6 +688,48 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *in
1102 + }
1103 + EXPORT_SYMBOL_GPL(dequeue_signal);
1104 +
1105 ++static int dequeue_synchronous_signal(kernel_siginfo_t *info)
1106 ++{
1107 ++ struct task_struct *tsk = current;
1108 ++ struct sigpending *pending = &tsk->pending;
1109 ++ struct sigqueue *q, *sync = NULL;
1110 ++
1111 ++ /*
1112 ++ * Might a synchronous signal be in the queue?
1113 ++ */
1114 ++ if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
1115 ++ return 0;
1116 ++
1117 ++ /*
1118 ++ * Return the first synchronous signal in the queue.
1119 ++ */
1120 ++ list_for_each_entry(q, &pending->list, list) {
1121 ++ /* Synchronous signals have a postive si_code */
1122 ++ if ((q->info.si_code > SI_USER) &&
1123 ++ (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
1124 ++ sync = q;
1125 ++ goto next;
1126 ++ }
1127 ++ }
1128 ++ return 0;
1129 ++next:
1130 ++ /*
1131 ++ * Check if there is another siginfo for the same signal.
1132 ++ */
1133 ++ list_for_each_entry_continue(q, &pending->list, list) {
1134 ++ if (q->info.si_signo == sync->info.si_signo)
1135 ++ goto still_pending;
1136 ++ }
1137 ++
1138 ++ sigdelset(&pending->signal, sync->info.si_signo);
1139 ++ recalc_sigpending();
1140 ++still_pending:
1141 ++ list_del_init(&sync->list);
1142 ++ copy_siginfo(info, &sync->info);
1143 ++ __sigqueue_free(sync);
1144 ++ return info->si_signo;
1145 ++}
1146 ++
1147 + /*
1148 + * Tell a process that it has a new active signal..
1149 + *
1150 +@@ -1057,10 +1099,9 @@ static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struc
1151 +
1152 + result = TRACE_SIGNAL_DELIVERED;
1153 + /*
1154 +- * Skip useless siginfo allocation for SIGKILL SIGSTOP,
1155 +- * and kernel threads.
1156 ++ * Skip useless siginfo allocation for SIGKILL and kernel threads.
1157 + */
1158 +- if (sig_kernel_only(sig) || (t->flags & PF_KTHREAD))
1159 ++ if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1160 + goto out_set;
1161 +
1162 + /*
1163 +@@ -2394,6 +2435,11 @@ relock:
1164 + goto relock;
1165 + }
1166 +
1167 ++ /* Has this task already been marked for death? */
1168 ++ ksig->info.si_signo = signr = SIGKILL;
1169 ++ if (signal_group_exit(signal))
1170 ++ goto fatal;
1171 ++
1172 + for (;;) {
1173 + struct k_sigaction *ka;
1174 +
1175 +@@ -2407,7 +2453,15 @@ relock:
1176 + goto relock;
1177 + }
1178 +
1179 +- signr = dequeue_signal(current, &current->blocked, &ksig->info);
1180 ++ /*
1181 ++ * Signals generated by the execution of an instruction
1182 ++ * need to be delivered before any other pending signals
1183 ++ * so that the instruction pointer in the signal stack
1184 ++ * frame points to the faulting instruction.
1185 ++ */
1186 ++ signr = dequeue_synchronous_signal(&ksig->info);
1187 ++ if (!signr)
1188 ++ signr = dequeue_signal(current, &current->blocked, &ksig->info);
1189 +
1190 + if (!signr)
1191 + break; /* will return 0 */
1192 +@@ -2489,6 +2543,7 @@ relock:
1193 + continue;
1194 + }
1195 +
1196 ++ fatal:
1197 + spin_unlock_irq(&sighand->siglock);
1198 +
1199 + /*
1200 +diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
1201 +index 31ea48eceda1..ec8332c5056a 100644
1202 +--- a/kernel/trace/trace_uprobe.c
1203 ++++ b/kernel/trace/trace_uprobe.c
1204 +@@ -5,7 +5,7 @@
1205 + * Copyright (C) IBM Corporation, 2010-2012
1206 + * Author: Srikar Dronamraju <srikar@××××××××××××××.com>
1207 + */
1208 +-#define pr_fmt(fmt) "trace_kprobe: " fmt
1209 ++#define pr_fmt(fmt) "trace_uprobe: " fmt
1210 +
1211 + #include <linux/module.h>
1212 + #include <linux/uaccess.h>
1213 +@@ -127,6 +127,13 @@ fetch_store_string(unsigned long addr, void *dest, void *base)
1214 + if (ret >= 0) {
1215 + if (ret == maxlen)
1216 + dst[ret - 1] = '\0';
1217 ++ else
1218 ++ /*
1219 ++ * Include the terminating null byte. In this case it
1220 ++ * was copied by strncpy_from_user but not accounted
1221 ++ * for in ret.
1222 ++ */
1223 ++ ret++;
1224 + *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
1225 + }
1226 +
1227 +diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
1228 +index 781c5b6e6e8e..41be60d54001 100644
1229 +--- a/net/batman-adv/hard-interface.c
1230 ++++ b/net/batman-adv/hard-interface.c
1231 +@@ -20,7 +20,6 @@
1232 + #include "main.h"
1233 +
1234 + #include <linux/atomic.h>
1235 +-#include <linux/bug.h>
1236 + #include <linux/byteorder/generic.h>
1237 + #include <linux/errno.h>
1238 + #include <linux/gfp.h>
1239 +@@ -179,8 +178,10 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
1240 + parent_dev = __dev_get_by_index((struct net *)parent_net,
1241 + dev_get_iflink(net_dev));
1242 + /* if we got a NULL parent_dev there is something broken.. */
1243 +- if (WARN(!parent_dev, "Cannot find parent device"))
1244 ++ if (!parent_dev) {
1245 ++ pr_err("Cannot find parent device\n");
1246 + return false;
1247 ++ }
1248 +
1249 + if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net))
1250 + return false;
1251 +diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
1252 +index 5db5a0a4c959..b85ca809e509 100644
1253 +--- a/net/batman-adv/soft-interface.c
1254 ++++ b/net/batman-adv/soft-interface.c
1255 +@@ -221,6 +221,8 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
1256 +
1257 + netif_trans_update(soft_iface);
1258 + vid = batadv_get_vid(skb, 0);
1259 ++
1260 ++ skb_reset_mac_header(skb);
1261 + ethhdr = eth_hdr(skb);
1262 +
1263 + switch (ntohs(ethhdr->h_proto)) {
1264 +diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
1265 +index 2f126eff275d..664f886f464d 100644
1266 +--- a/net/ceph/messenger.c
1267 ++++ b/net/ceph/messenger.c
1268 +@@ -3219,9 +3219,10 @@ void ceph_con_keepalive(struct ceph_connection *con)
1269 + dout("con_keepalive %p\n", con);
1270 + mutex_lock(&con->mutex);
1271 + clear_standby(con);
1272 ++ con_flag_set(con, CON_FLAG_KEEPALIVE_PENDING);
1273 + mutex_unlock(&con->mutex);
1274 +- if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 &&
1275 +- con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
1276 ++
1277 ++ if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
1278 + queue_con(con);
1279 + }
1280 + EXPORT_SYMBOL(ceph_con_keepalive);
1281 +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
1282 +index 1f536ba573b4..65e511756e64 100644
1283 +--- a/net/mac80211/tx.c
1284 ++++ b/net/mac80211/tx.c
1285 +@@ -1938,9 +1938,16 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
1286 + int head_need, bool may_encrypt)
1287 + {
1288 + struct ieee80211_local *local = sdata->local;
1289 ++ struct ieee80211_hdr *hdr;
1290 ++ bool enc_tailroom;
1291 + int tail_need = 0;
1292 +
1293 +- if (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt) {
1294 ++ hdr = (struct ieee80211_hdr *) skb->data;
1295 ++ enc_tailroom = may_encrypt &&
1296 ++ (sdata->crypto_tx_tailroom_needed_cnt ||
1297 ++ ieee80211_is_mgmt(hdr->frame_control));
1298 ++
1299 ++ if (enc_tailroom) {
1300 + tail_need = IEEE80211_ENCRYPT_TAILROOM;
1301 + tail_need -= skb_tailroom(skb);
1302 + tail_need = max_t(int, tail_need, 0);
1303 +@@ -1948,8 +1955,7 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
1304 +
1305 + if (skb_cloned(skb) &&
1306 + (!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) ||
1307 +- !skb_clone_writable(skb, ETH_HLEN) ||
1308 +- (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt)))
1309 ++ !skb_clone_writable(skb, ETH_HLEN) || enc_tailroom))
1310 + I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
1311 + else if (head_need || tail_need)
1312 + I802_DEBUG_INC(local->tx_expand_skb_head);
1313 +diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
1314 +index 8602a5f1b515..e8ad7ddf347a 100644
1315 +--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
1316 ++++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
1317 +@@ -563,6 +563,99 @@ void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma,
1318 + DMA_TO_DEVICE);
1319 + }
1320 +
1321 ++/* If the xdr_buf has more elements than the device can
1322 ++ * transmit in a single RDMA Send, then the reply will
1323 ++ * have to be copied into a bounce buffer.
1324 ++ */
1325 ++static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
1326 ++ struct xdr_buf *xdr,
1327 ++ __be32 *wr_lst)
1328 ++{
1329 ++ int elements;
1330 ++
1331 ++ /* xdr->head */
1332 ++ elements = 1;
1333 ++
1334 ++ /* xdr->pages */
1335 ++ if (!wr_lst) {
1336 ++ unsigned int remaining;
1337 ++ unsigned long pageoff;
1338 ++
1339 ++ pageoff = xdr->page_base & ~PAGE_MASK;
1340 ++ remaining = xdr->page_len;
1341 ++ while (remaining) {
1342 ++ ++elements;
1343 ++ remaining -= min_t(u32, PAGE_SIZE - pageoff,
1344 ++ remaining);
1345 ++ pageoff = 0;
1346 ++ }
1347 ++ }
1348 ++
1349 ++ /* xdr->tail */
1350 ++ if (xdr->tail[0].iov_len)
1351 ++ ++elements;
1352 ++
1353 ++ /* assume 1 SGE is needed for the transport header */
1354 ++ return elements >= rdma->sc_max_send_sges;
1355 ++}
1356 ++
1357 ++/* The device is not capable of sending the reply directly.
1358 ++ * Assemble the elements of @xdr into the transport header
1359 ++ * buffer.
1360 ++ */
1361 ++static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
1362 ++ struct svc_rdma_send_ctxt *ctxt,
1363 ++ struct xdr_buf *xdr, __be32 *wr_lst)
1364 ++{
1365 ++ unsigned char *dst, *tailbase;
1366 ++ unsigned int taillen;
1367 ++
1368 ++ dst = ctxt->sc_xprt_buf;
1369 ++ dst += ctxt->sc_sges[0].length;
1370 ++
1371 ++ memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len);
1372 ++ dst += xdr->head[0].iov_len;
1373 ++
1374 ++ tailbase = xdr->tail[0].iov_base;
1375 ++ taillen = xdr->tail[0].iov_len;
1376 ++ if (wr_lst) {
1377 ++ u32 xdrpad;
1378 ++
1379 ++ xdrpad = xdr_padsize(xdr->page_len);
1380 ++ if (taillen && xdrpad) {
1381 ++ tailbase += xdrpad;
1382 ++ taillen -= xdrpad;
1383 ++ }
1384 ++ } else {
1385 ++ unsigned int len, remaining;
1386 ++ unsigned long pageoff;
1387 ++ struct page **ppages;
1388 ++
1389 ++ ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
1390 ++ pageoff = xdr->page_base & ~PAGE_MASK;
1391 ++ remaining = xdr->page_len;
1392 ++ while (remaining) {
1393 ++ len = min_t(u32, PAGE_SIZE - pageoff, remaining);
1394 ++
1395 ++ memcpy(dst, page_address(*ppages), len);
1396 ++ remaining -= len;
1397 ++ dst += len;
1398 ++ pageoff = 0;
1399 ++ }
1400 ++ }
1401 ++
1402 ++ if (taillen)
1403 ++ memcpy(dst, tailbase, taillen);
1404 ++
1405 ++ ctxt->sc_sges[0].length += xdr->len;
1406 ++ ib_dma_sync_single_for_device(rdma->sc_pd->device,
1407 ++ ctxt->sc_sges[0].addr,
1408 ++ ctxt->sc_sges[0].length,
1409 ++ DMA_TO_DEVICE);
1410 ++
1411 ++ return 0;
1412 ++}
1413 ++
1414 + /* svc_rdma_map_reply_msg - Map the buffer holding RPC message
1415 + * @rdma: controlling transport
1416 + * @ctxt: send_ctxt for the Send WR
1417 +@@ -585,8 +678,10 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
1418 + u32 xdr_pad;
1419 + int ret;
1420 +
1421 +- if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
1422 +- return -EIO;
1423 ++ if (svc_rdma_pull_up_needed(rdma, xdr, wr_lst))
1424 ++ return svc_rdma_pull_up_reply_msg(rdma, ctxt, xdr, wr_lst);
1425 ++
1426 ++ ++ctxt->sc_cur_sge_no;
1427 + ret = svc_rdma_dma_map_buf(rdma, ctxt,
1428 + xdr->head[0].iov_base,
1429 + xdr->head[0].iov_len);
1430 +@@ -617,8 +712,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
1431 + while (remaining) {
1432 + len = min_t(u32, PAGE_SIZE - page_off, remaining);
1433 +
1434 +- if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
1435 +- return -EIO;
1436 ++ ++ctxt->sc_cur_sge_no;
1437 + ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++,
1438 + page_off, len);
1439 + if (ret < 0)
1440 +@@ -632,8 +726,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
1441 + len = xdr->tail[0].iov_len;
1442 + tail:
1443 + if (len) {
1444 +- if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
1445 +- return -EIO;
1446 ++ ++ctxt->sc_cur_sge_no;
1447 + ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len);
1448 + if (ret < 0)
1449 + return ret;
1450 +diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
1451 +index 2f7ec8912f49..ce5c610b49c7 100644
1452 +--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
1453 ++++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
1454 +@@ -478,12 +478,9 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
1455 + /* Transport header, head iovec, tail iovec */
1456 + newxprt->sc_max_send_sges = 3;
1457 + /* Add one SGE per page list entry */
1458 +- newxprt->sc_max_send_sges += svcrdma_max_req_size / PAGE_SIZE;
1459 +- if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge) {
1460 +- pr_err("svcrdma: too few Send SGEs available (%d needed)\n",
1461 +- newxprt->sc_max_send_sges);
1462 +- goto errout;
1463 +- }
1464 ++ newxprt->sc_max_send_sges += (svcrdma_max_req_size / PAGE_SIZE) + 1;
1465 ++ if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge)
1466 ++ newxprt->sc_max_send_sges = dev->attrs.max_send_sge;
1467 + newxprt->sc_max_req_size = svcrdma_max_req_size;
1468 + newxprt->sc_max_requests = svcrdma_max_requests;
1469 + newxprt->sc_max_bc_requests = svcrdma_max_bc_requests;
1470 +diff --git a/net/wireless/ap.c b/net/wireless/ap.c
1471 +index 882d97bdc6bf..550ac9d827fe 100644
1472 +--- a/net/wireless/ap.c
1473 ++++ b/net/wireless/ap.c
1474 +@@ -41,6 +41,8 @@ int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
1475 + cfg80211_sched_dfs_chan_update(rdev);
1476 + }
1477 +
1478 ++ schedule_work(&cfg80211_disconnect_work);
1479 ++
1480 + return err;
1481 + }
1482 +
1483 +diff --git a/net/wireless/core.h b/net/wireless/core.h
1484 +index c61dbba8bf47..7f4d5f2f9112 100644
1485 +--- a/net/wireless/core.h
1486 ++++ b/net/wireless/core.h
1487 +@@ -444,6 +444,8 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev);
1488 + bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range,
1489 + u32 center_freq_khz, u32 bw_khz);
1490 +
1491 ++extern struct work_struct cfg80211_disconnect_work;
1492 ++
1493 + /**
1494 + * cfg80211_chandef_dfs_usable - checks if chandef is DFS usable
1495 + * @wiphy: the wiphy to validate against
1496 +diff --git a/net/wireless/sme.c b/net/wireless/sme.c
1497 +index f741d8376a46..7d34cb884840 100644
1498 +--- a/net/wireless/sme.c
1499 ++++ b/net/wireless/sme.c
1500 +@@ -667,7 +667,7 @@ static void disconnect_work(struct work_struct *work)
1501 + rtnl_unlock();
1502 + }
1503 +
1504 +-static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work);
1505 ++DECLARE_WORK(cfg80211_disconnect_work, disconnect_work);
1506 +
1507 +
1508 + /*
1509 +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
1510 +index 119a427d9b2b..6ea8036fcdbe 100644
1511 +--- a/net/xfrm/xfrm_policy.c
1512 ++++ b/net/xfrm/xfrm_policy.c
1513 +@@ -1628,7 +1628,10 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1514 + dst_copy_metrics(dst1, dst);
1515 +
1516 + if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
1517 +- __u32 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
1518 ++ __u32 mark = 0;
1519 ++
1520 ++ if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
1521 ++ mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
1522 +
1523 + family = xfrm[i]->props.family;
1524 + dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
1525 +diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
1526 +index 277c1c46fe94..c6d26afcf89d 100644
1527 +--- a/net/xfrm/xfrm_user.c
1528 ++++ b/net/xfrm/xfrm_user.c
1529 +@@ -1488,10 +1488,15 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
1530 + if (!ut[i].family)
1531 + ut[i].family = family;
1532 +
1533 +- if ((ut[i].mode == XFRM_MODE_TRANSPORT) &&
1534 +- (ut[i].family != prev_family))
1535 +- return -EINVAL;
1536 +-
1537 ++ switch (ut[i].mode) {
1538 ++ case XFRM_MODE_TUNNEL:
1539 ++ case XFRM_MODE_BEET:
1540 ++ break;
1541 ++ default:
1542 ++ if (ut[i].family != prev_family)
1543 ++ return -EINVAL;
1544 ++ break;
1545 ++ }
1546 + if (ut[i].mode >= XFRM_MODE_MAX)
1547 + return -EINVAL;
1548 +
1549 +diff --git a/samples/mei/mei-amt-version.c b/samples/mei/mei-amt-version.c
1550 +index 33e67bd1dc34..32234481ad7d 100644
1551 +--- a/samples/mei/mei-amt-version.c
1552 ++++ b/samples/mei/mei-amt-version.c
1553 +@@ -117,7 +117,7 @@ static bool mei_init(struct mei *me, const uuid_le *guid,
1554 +
1555 + me->verbose = verbose;
1556 +
1557 +- me->fd = open("/dev/mei", O_RDWR);
1558 ++ me->fd = open("/dev/mei0", O_RDWR);
1559 + if (me->fd == -1) {
1560 + mei_err(me, "Cannot establish a handle to the Intel MEI driver\n");
1561 + goto err;
1562 +diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
1563 +index 3040830d7797..84545666a09c 100644
1564 +--- a/tools/iio/iio_generic_buffer.c
1565 ++++ b/tools/iio/iio_generic_buffer.c
1566 +@@ -330,7 +330,7 @@ static const struct option longopts[] = {
1567 +
1568 + int main(int argc, char **argv)
1569 + {
1570 +- unsigned long long num_loops = 2;
1571 ++ long long num_loops = 2;
1572 + unsigned long timedelay = 1000000;
1573 + unsigned long buf_len = 128;
1574 +
1575
1576 diff --git a/1009_linux-4.20.10.patch b/1009_linux-4.20.10.patch
1577 new file mode 100644
1578 index 0000000..fd23d1c
1579 --- /dev/null
1580 +++ b/1009_linux-4.20.10.patch
1581 @@ -0,0 +1,35 @@
1582 +diff --git a/Makefile b/Makefile
1583 +index c9b831f5e873..6f7a8172de44 100644
1584 +--- a/Makefile
1585 ++++ b/Makefile
1586 +@@ -1,7 +1,7 @@
1587 + # SPDX-License-Identifier: GPL-2.0
1588 + VERSION = 4
1589 + PATCHLEVEL = 20
1590 +-SUBLEVEL = 9
1591 ++SUBLEVEL = 10
1592 + EXTRAVERSION =
1593 + NAME = Shy Crocodile
1594 +
1595 +diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
1596 +index d0078cbb718b..7cde3f46ad26 100644
1597 +--- a/fs/binfmt_script.c
1598 ++++ b/fs/binfmt_script.c
1599 +@@ -42,14 +42,10 @@ static int load_script(struct linux_binprm *bprm)
1600 + fput(bprm->file);
1601 + bprm->file = NULL;
1602 +
1603 +- for (cp = bprm->buf+2;; cp++) {
1604 +- if (cp >= bprm->buf + BINPRM_BUF_SIZE)
1605 +- return -ENOEXEC;
1606 +- if (!*cp || (*cp == '\n'))
1607 +- break;
1608 +- }
1609 ++ bprm->buf[BINPRM_BUF_SIZE - 1] = '\0';
1610 ++ if ((cp = strchr(bprm->buf, '\n')) == NULL)
1611 ++ cp = bprm->buf+BINPRM_BUF_SIZE-1;
1612 + *cp = '\0';
1613 +-
1614 + while (cp > bprm->buf) {
1615 + cp--;
1616 + if ((*cp == ' ') || (*cp == '\t'))