Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Sat, 15 Sep 2018 10:10:31
Message-Id: 1537006219.6f513a9977bd1b9caccfe2fe767e0f50b1978803.mpagano@gentoo
1 commit: 6f513a9977bd1b9caccfe2fe767e0f50b1978803
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Sep 15 10:10:19 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Sat Sep 15 10:10:19 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=6f513a99
7
8 Linux patch 4.9.127
9
10 0000_README | 4 +
11 1126_linux-4.9.127.patch | 1973 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 1977 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index eaa2495..ad1fe4a 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -547,6 +547,10 @@ Patch: 1125_linux-4.9.126.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.9.126
21
22 +Patch: 1126_linux-4.9.127.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.9.127
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1126_linux-4.9.127.patch b/1126_linux-4.9.127.patch
31 new file mode 100644
32 index 0000000..0cd0eae
33 --- /dev/null
34 +++ b/1126_linux-4.9.127.patch
35 @@ -0,0 +1,1973 @@
36 +diff --git a/Makefile b/Makefile
37 +index b26481fef3f0..4e37716ae395 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 9
43 +-SUBLEVEL = 126
44 ++SUBLEVEL = 127
45 + EXTRAVERSION =
46 + NAME = Roaring Lionus
47 +
48 +diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
49 +index 6b7d4f535984..8ec4dbbb50b0 100644
50 +--- a/arch/arm/configs/imx_v6_v7_defconfig
51 ++++ b/arch/arm/configs/imx_v6_v7_defconfig
52 +@@ -271,7 +271,6 @@ CONFIG_USB_STORAGE=y
53 + CONFIG_USB_CHIPIDEA=y
54 + CONFIG_USB_CHIPIDEA_UDC=y
55 + CONFIG_USB_CHIPIDEA_HOST=y
56 +-CONFIG_USB_CHIPIDEA_ULPI=y
57 + CONFIG_USB_SERIAL=m
58 + CONFIG_USB_SERIAL_GENERIC=y
59 + CONFIG_USB_SERIAL_FTDI_SIO=m
60 +@@ -308,7 +307,6 @@ CONFIG_USB_GADGETFS=m
61 + CONFIG_USB_FUNCTIONFS=m
62 + CONFIG_USB_MASS_STORAGE=m
63 + CONFIG_USB_G_SERIAL=m
64 +-CONFIG_USB_ULPI_BUS=y
65 + CONFIG_MMC=y
66 + CONFIG_MMC_SDHCI=y
67 + CONFIG_MMC_SDHCI_PLTFM=y
68 +diff --git a/arch/arm/mach-rockchip/Kconfig b/arch/arm/mach-rockchip/Kconfig
69 +index 9ad84cd01ba0..5ed8fa5a9825 100644
70 +--- a/arch/arm/mach-rockchip/Kconfig
71 ++++ b/arch/arm/mach-rockchip/Kconfig
72 +@@ -16,6 +16,7 @@ config ARCH_ROCKCHIP
73 + select ROCKCHIP_TIMER
74 + select ARM_GLOBAL_TIMER
75 + select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
76 ++ select PM
77 + help
78 + Support for Rockchip's Cortex-A9 Single-to-Quad-Core-SoCs
79 + containing the RK2928, RK30xx and RK31xx series.
80 +diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
81 +index 08a4497f70a6..3428a4ba2ccd 100644
82 +--- a/arch/arm64/Kconfig.platforms
83 ++++ b/arch/arm64/Kconfig.platforms
84 +@@ -125,6 +125,7 @@ config ARCH_ROCKCHIP
85 + select GPIOLIB
86 + select PINCTRL
87 + select PINCTRL_ROCKCHIP
88 ++ select PM
89 + select ROCKCHIP_TIMER
90 + help
91 + This enables support for the ARMv8 based Rockchip chipsets,
92 +diff --git a/arch/arm64/include/asm/cachetype.h b/arch/arm64/include/asm/cachetype.h
93 +index f5588692f1d4..877d4789dcb3 100644
94 +--- a/arch/arm64/include/asm/cachetype.h
95 ++++ b/arch/arm64/include/asm/cachetype.h
96 +@@ -22,6 +22,11 @@
97 + #define CTR_L1IP_MASK 3
98 + #define CTR_CWG_SHIFT 24
99 + #define CTR_CWG_MASK 15
100 ++#define CTR_DMINLINE_SHIFT 16
101 ++#define CTR_IMINLINE_SHIFT 0
102 ++
103 ++#define CTR_CACHE_MINLINE_MASK \
104 ++ ((0xf << CTR_DMINLINE_SHIFT) | (0xf << CTR_IMINLINE_SHIFT))
105 +
106 + #define ICACHE_POLICY_RESERVED 0
107 + #define ICACHE_POLICY_AIVIVT 1
108 +diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
109 +index 7010779a1429..8c7c4b23a8b1 100644
110 +--- a/arch/arm64/include/asm/cpucaps.h
111 ++++ b/arch/arm64/include/asm/cpucaps.h
112 +@@ -37,7 +37,8 @@
113 + #define ARM64_UNMAP_KERNEL_AT_EL0 16
114 + #define ARM64_HARDEN_BRANCH_PREDICTOR 17
115 + #define ARM64_SSBD 18
116 ++#define ARM64_MISMATCHED_CACHE_TYPE 19
117 +
118 +-#define ARM64_NCAPS 19
119 ++#define ARM64_NCAPS 20
120 +
121 + #endif /* __ASM_CPUCAPS_H */
122 +diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
123 +index 1db97ad7b58b..930e74d9fcbd 100644
124 +--- a/arch/arm64/kernel/cpu_errata.c
125 ++++ b/arch/arm64/kernel/cpu_errata.c
126 +@@ -17,6 +17,7 @@
127 + */
128 +
129 + #include <linux/types.h>
130 ++#include <asm/cachetype.h>
131 + #include <asm/cpu.h>
132 + #include <asm/cputype.h>
133 + #include <asm/cpufeature.h>
134 +@@ -31,12 +32,18 @@ is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
135 + }
136 +
137 + static bool
138 +-has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
139 +- int scope)
140 ++has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
141 ++ int scope)
142 + {
143 ++ u64 mask = CTR_CACHE_MINLINE_MASK;
144 ++
145 ++ /* Skip matching the min line sizes for cache type check */
146 ++ if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
147 ++ mask ^= arm64_ftr_reg_ctrel0.strict_mask;
148 ++
149 + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
150 +- return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) !=
151 +- (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
152 ++ return (read_cpuid_cachetype() & mask) !=
153 ++ (arm64_ftr_reg_ctrel0.sys_val & mask);
154 + }
155 +
156 + static int cpu_enable_trap_ctr_access(void *__unused)
157 +@@ -446,7 +453,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
158 + {
159 + .desc = "Mismatched cache line size",
160 + .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
161 +- .matches = has_mismatched_cache_line_size,
162 ++ .matches = has_mismatched_cache_type,
163 ++ .def_scope = SCOPE_LOCAL_CPU,
164 ++ .enable = cpu_enable_trap_ctr_access,
165 ++ },
166 ++ {
167 ++ .desc = "Mismatched cache type",
168 ++ .capability = ARM64_MISMATCHED_CACHE_TYPE,
169 ++ .matches = has_mismatched_cache_type,
170 + .def_scope = SCOPE_LOCAL_CPU,
171 + .enable = cpu_enable_trap_ctr_access,
172 + },
173 +diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
174 +index ab15747a49d4..a3ab7dfad50a 100644
175 +--- a/arch/arm64/kernel/cpufeature.c
176 ++++ b/arch/arm64/kernel/cpufeature.c
177 +@@ -152,7 +152,7 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
178 + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0),
179 + ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
180 + ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */
181 +- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
182 ++ ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
183 + /*
184 + * Linux can handle differing I-cache policies. Userspace JITs will
185 + * make use of *minLine.
186 +@@ -160,7 +160,7 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
187 + */
188 + ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_AIVIVT), /* L1Ip */
189 + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0), /* RAZ */
190 +- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */
191 ++ ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
192 + ARM64_FTR_END,
193 + };
194 +
195 +diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
196 +index 34989ce43147..8799d8a83d56 100644
197 +--- a/arch/powerpc/platforms/pseries/ras.c
198 ++++ b/arch/powerpc/platforms/pseries/ras.c
199 +@@ -357,7 +357,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
200 + int len, error_log_length;
201 +
202 + error_log_length = 8 + rtas_error_extended_log_length(h);
203 +- len = max_t(int, error_log_length, RTAS_ERROR_LOG_MAX);
204 ++ len = min_t(int, error_log_length, RTAS_ERROR_LOG_MAX);
205 + memset(global_mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
206 + memcpy(global_mce_data_buf, h, len);
207 + errhdr = (struct rtas_error_log *)global_mce_data_buf;
208 +diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
209 +index db2286be5d9a..47fb336741d4 100644
210 +--- a/arch/powerpc/sysdev/mpic_msgr.c
211 ++++ b/arch/powerpc/sysdev/mpic_msgr.c
212 +@@ -196,7 +196,7 @@ static int mpic_msgr_probe(struct platform_device *dev)
213 +
214 + /* IO map the message register block. */
215 + of_address_to_resource(np, 0, &rsrc);
216 +- msgr_block_addr = ioremap(rsrc.start, rsrc.end - rsrc.start);
217 ++ msgr_block_addr = ioremap(rsrc.start, resource_size(&rsrc));
218 + if (!msgr_block_addr) {
219 + dev_err(&dev->dev, "Failed to iomap MPIC message registers");
220 + return -EFAULT;
221 +diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
222 +index 598254461fb7..167135294ca5 100644
223 +--- a/arch/s390/kernel/crash_dump.c
224 ++++ b/arch/s390/kernel/crash_dump.c
225 +@@ -401,11 +401,13 @@ static void *get_vmcoreinfo_old(unsigned long *size)
226 + if (copy_oldmem_kernel(nt_name, addr + sizeof(note),
227 + sizeof(nt_name) - 1))
228 + return NULL;
229 +- if (strcmp(nt_name, "VMCOREINFO") != 0)
230 ++ if (strcmp(nt_name, VMCOREINFO_NOTE_NAME) != 0)
231 + return NULL;
232 + vmcoreinfo = kzalloc_panic(note.n_descsz);
233 +- if (copy_oldmem_kernel(vmcoreinfo, addr + 24, note.n_descsz))
234 ++ if (copy_oldmem_kernel(vmcoreinfo, addr + 24, note.n_descsz)) {
235 ++ kfree(vmcoreinfo);
236 + return NULL;
237 ++ }
238 + *size = note.n_descsz;
239 + return vmcoreinfo;
240 + }
241 +@@ -415,15 +417,20 @@ static void *get_vmcoreinfo_old(unsigned long *size)
242 + */
243 + static void *nt_vmcoreinfo(void *ptr)
244 + {
245 ++ const char *name = VMCOREINFO_NOTE_NAME;
246 + unsigned long size;
247 + void *vmcoreinfo;
248 +
249 + vmcoreinfo = os_info_old_entry(OS_INFO_VMCOREINFO, &size);
250 +- if (!vmcoreinfo)
251 +- vmcoreinfo = get_vmcoreinfo_old(&size);
252 ++ if (vmcoreinfo)
253 ++ return nt_init_name(ptr, 0, vmcoreinfo, size, name);
254 ++
255 ++ vmcoreinfo = get_vmcoreinfo_old(&size);
256 + if (!vmcoreinfo)
257 + return ptr;
258 +- return nt_init_name(ptr, 0, vmcoreinfo, size, "VMCOREINFO");
259 ++ ptr = nt_init_name(ptr, 0, vmcoreinfo, size, name);
260 ++ kfree(vmcoreinfo);
261 ++ return ptr;
262 + }
263 +
264 + /*
265 +diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
266 +index e7672edc284a..5ff0520784f2 100644
267 +--- a/arch/s390/lib/mem.S
268 ++++ b/arch/s390/lib/mem.S
269 +@@ -27,7 +27,7 @@
270 + */
271 + ENTRY(memset)
272 + ltgr %r4,%r4
273 +- bzr %r14
274 ++ jz .Lmemset_exit
275 + ltgr %r3,%r3
276 + jnz .Lmemset_fill
277 + aghi %r4,-1
278 +@@ -42,12 +42,13 @@ ENTRY(memset)
279 + .Lmemset_clear_rest:
280 + larl %r3,.Lmemset_xc
281 + ex %r4,0(%r3)
282 ++.Lmemset_exit:
283 + BR_EX %r14
284 + .Lmemset_fill:
285 + stc %r3,0(%r2)
286 + cghi %r4,1
287 + lgr %r1,%r2
288 +- ber %r14
289 ++ je .Lmemset_fill_exit
290 + aghi %r4,-2
291 + srlg %r3,%r4,8
292 + ltgr %r3,%r3
293 +@@ -59,6 +60,7 @@ ENTRY(memset)
294 + .Lmemset_fill_rest:
295 + larl %r3,.Lmemset_mvc
296 + ex %r4,0(%r3)
297 ++.Lmemset_fill_exit:
298 + BR_EX %r14
299 + .Lmemset_xc:
300 + xc 0(1,%r1),0(%r1)
301 +@@ -73,7 +75,7 @@ EXPORT_SYMBOL(memset)
302 + */
303 + ENTRY(memcpy)
304 + ltgr %r4,%r4
305 +- bzr %r14
306 ++ jz .Lmemcpy_exit
307 + aghi %r4,-1
308 + srlg %r5,%r4,8
309 + ltgr %r5,%r5
310 +@@ -82,6 +84,7 @@ ENTRY(memcpy)
311 + .Lmemcpy_rest:
312 + larl %r5,.Lmemcpy_mvc
313 + ex %r4,0(%r5)
314 ++.Lmemcpy_exit:
315 + BR_EX %r14
316 + .Lmemcpy_loop:
317 + mvc 0(256,%r1),0(%r3)
318 +diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
319 +index 5c686382d84b..095dbc25122a 100644
320 +--- a/arch/x86/include/asm/pgtable-3level.h
321 ++++ b/arch/x86/include/asm/pgtable-3level.h
322 +@@ -1,6 +1,8 @@
323 + #ifndef _ASM_X86_PGTABLE_3LEVEL_H
324 + #define _ASM_X86_PGTABLE_3LEVEL_H
325 +
326 ++#include <asm/atomic64_32.h>
327 ++
328 + /*
329 + * Intel Physical Address Extension (PAE) Mode - three-level page
330 + * tables on PPro+ CPUs.
331 +@@ -142,10 +144,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
332 + {
333 + pte_t res;
334 +
335 +- /* xchg acts as a barrier before the setting of the high bits */
336 +- res.pte_low = xchg(&ptep->pte_low, 0);
337 +- res.pte_high = ptep->pte_high;
338 +- ptep->pte_high = 0;
339 ++ res.pte = (pteval_t)atomic64_xchg((atomic64_t *)ptep, 0);
340 +
341 + return res;
342 + }
343 +diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
344 +index c535012bdb56..5736306bdaab 100644
345 +--- a/arch/x86/include/asm/pgtable.h
346 ++++ b/arch/x86/include/asm/pgtable.h
347 +@@ -420,7 +420,7 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
348 +
349 + static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
350 + {
351 +- phys_addr_t pfn = page_nr << PAGE_SHIFT;
352 ++ phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
353 + pfn ^= protnone_mask(pgprot_val(pgprot));
354 + pfn &= PHYSICAL_PUD_PAGE_MASK;
355 + return __pud(pfn | massage_pgprot(pgprot));
356 +diff --git a/block/bio.c b/block/bio.c
357 +index 4f93345c6a82..68972e3d3f5c 100644
358 +--- a/block/bio.c
359 ++++ b/block/bio.c
360 +@@ -155,7 +155,7 @@ out:
361 +
362 + unsigned int bvec_nr_vecs(unsigned short idx)
363 + {
364 +- return bvec_slabs[idx].nr_vecs;
365 ++ return bvec_slabs[--idx].nr_vecs;
366 + }
367 +
368 + void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
369 +diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
370 +index 145dcf293c6f..0792ec5a9efc 100644
371 +--- a/drivers/acpi/scan.c
372 ++++ b/drivers/acpi/scan.c
373 +@@ -1453,7 +1453,8 @@ static int acpi_add_single_object(struct acpi_device **child,
374 + * Note this must be done before the get power-/wakeup_dev-flags calls.
375 + */
376 + if (type == ACPI_BUS_TYPE_DEVICE)
377 +- acpi_bus_get_status(device);
378 ++ if (acpi_bus_get_status(device) < 0)
379 ++ acpi_set_device_status(device, 0);
380 +
381 + acpi_bus_get_power_flags(device);
382 + acpi_bus_get_wakeup_device_flags(device);
383 +@@ -1531,7 +1532,7 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type,
384 + * acpi_add_single_object updates this once we've an acpi_device
385 + * so that acpi_bus_get_status' quirk handling can be used.
386 + */
387 +- *sta = 0;
388 ++ *sta = ACPI_STA_DEFAULT;
389 + break;
390 + case ACPI_TYPE_PROCESSOR:
391 + *type = ACPI_BUS_TYPE_PROCESSOR;
392 +diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
393 +index 05671c03efe2..410998800af5 100644
394 +--- a/drivers/clk/rockchip/clk-rk3399.c
395 ++++ b/drivers/clk/rockchip/clk-rk3399.c
396 +@@ -1521,6 +1521,7 @@ static const char *const rk3399_pmucru_critical_clocks[] __initconst = {
397 + "pclk_pmu_src",
398 + "fclk_cm0s_src_pmu",
399 + "clk_timer_src_pmu",
400 ++ "pclk_rkpwm_pmu",
401 + };
402 +
403 + static void __init rk3399_clk_init(struct device_node *np)
404 +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
405 +index 6b31e0474271..37ba5f51378e 100644
406 +--- a/drivers/gpu/drm/drm_edid.c
407 ++++ b/drivers/gpu/drm/drm_edid.c
408 +@@ -110,6 +110,9 @@ static const struct edid_quirk {
409 + /* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
410 + { "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC },
411 +
412 ++ /* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */
413 ++ { "SDC", 0x3652, EDID_QUIRK_FORCE_6BPC },
414 ++
415 + /* Belinea 10 15 55 */
416 + { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
417 + { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
418 +diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
419 +index 05db7d59812a..da61ce82c3d9 100644
420 +--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
421 ++++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
422 +@@ -35,7 +35,7 @@
423 +
424 + static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
425 + {
426 +- return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn);
427 ++ return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn) ? -ENOMEM : 0;
428 + }
429 +
430 + static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn)
431 +diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
432 +index e86dd8d06777..33cf1035030b 100644
433 +--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
434 ++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
435 +@@ -114,7 +114,10 @@ static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
436 + {
437 + struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
438 +
439 +- return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base);
440 ++ return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align,
441 ++ base) ?
442 ++ -ENOMEM :
443 ++ 0;
444 + }
445 +
446 + enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
447 +diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
448 +index c2662a1bfdd3..6e24facebb46 100644
449 +--- a/drivers/irqchip/irq-bcm7038-l1.c
450 ++++ b/drivers/irqchip/irq-bcm7038-l1.c
451 +@@ -215,6 +215,7 @@ static int bcm7038_l1_set_affinity(struct irq_data *d,
452 + return 0;
453 + }
454 +
455 ++#ifdef CONFIG_SMP
456 + static void bcm7038_l1_cpu_offline(struct irq_data *d)
457 + {
458 + struct cpumask *mask = irq_data_get_affinity_mask(d);
459 +@@ -239,6 +240,7 @@ static void bcm7038_l1_cpu_offline(struct irq_data *d)
460 + }
461 + irq_set_affinity_locked(d, &new_affinity, false);
462 + }
463 ++#endif
464 +
465 + static int __init bcm7038_l1_init_one(struct device_node *dn,
466 + unsigned int idx,
467 +@@ -291,7 +293,9 @@ static struct irq_chip bcm7038_l1_irq_chip = {
468 + .irq_mask = bcm7038_l1_mask,
469 + .irq_unmask = bcm7038_l1_unmask,
470 + .irq_set_affinity = bcm7038_l1_set_affinity,
471 ++#ifdef CONFIG_SMP
472 + .irq_cpu_offline = bcm7038_l1_cpu_offline,
473 ++#endif
474 + };
475 +
476 + static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
477 +diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
478 +index 9e9d04cb7d51..56fcccc30554 100644
479 +--- a/drivers/md/dm-kcopyd.c
480 ++++ b/drivers/md/dm-kcopyd.c
481 +@@ -454,6 +454,8 @@ static int run_complete_job(struct kcopyd_job *job)
482 + if (atomic_dec_and_test(&kc->nr_jobs))
483 + wake_up(&kc->destroyq);
484 +
485 ++ cond_resched();
486 ++
487 + return 0;
488 + }
489 +
490 +diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
491 +index 40534352e574..3270b8dbc949 100644
492 +--- a/drivers/mfd/sm501.c
493 ++++ b/drivers/mfd/sm501.c
494 +@@ -714,6 +714,7 @@ sm501_create_subdev(struct sm501_devdata *sm, char *name,
495 + smdev->pdev.name = name;
496 + smdev->pdev.id = sm->pdev_id;
497 + smdev->pdev.dev.parent = sm->dev;
498 ++ smdev->pdev.dev.coherent_dma_mask = 0xffffffff;
499 +
500 + if (res_count) {
501 + smdev->pdev.resource = (struct resource *)(smdev+1);
502 +diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
503 +index f9c6ec4b98ab..013a7b3fe92d 100644
504 +--- a/drivers/misc/mei/pci-me.c
505 ++++ b/drivers/misc/mei/pci-me.c
506 +@@ -229,8 +229,11 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
507 + if (!pci_dev_run_wake(pdev))
508 + mei_me_set_pm_domain(dev);
509 +
510 +- if (mei_pg_is_enabled(dev))
511 ++ if (mei_pg_is_enabled(dev)) {
512 + pm_runtime_put_noidle(&pdev->dev);
513 ++ if (hw->d0i3_supported)
514 ++ pm_runtime_allow(&pdev->dev);
515 ++ }
516 +
517 + dev_dbg(&pdev->dev, "initialization successful.\n");
518 +
519 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
520 +index db7f289d65ae..3f8858db12eb 100644
521 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
522 ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
523 +@@ -185,6 +185,9 @@ struct bcmgenet_mib_counters {
524 + #define UMAC_MAC1 0x010
525 + #define UMAC_MAX_FRAME_LEN 0x014
526 +
527 ++#define UMAC_MODE 0x44
528 ++#define MODE_LINK_STATUS (1 << 5)
529 ++
530 + #define UMAC_EEE_CTRL 0x064
531 + #define EN_LPI_RX_PAUSE (1 << 0)
532 + #define EN_LPI_TX_PFC (1 << 1)
533 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
534 +index 2f9281936f0e..3b9e1a5dce82 100644
535 +--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
536 ++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
537 +@@ -167,8 +167,14 @@ void bcmgenet_mii_setup(struct net_device *dev)
538 + static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
539 + struct fixed_phy_status *status)
540 + {
541 +- if (dev && dev->phydev && status)
542 +- status->link = dev->phydev->link;
543 ++ struct bcmgenet_priv *priv;
544 ++ u32 reg;
545 ++
546 ++ if (dev && dev->phydev && status) {
547 ++ priv = netdev_priv(dev);
548 ++ reg = bcmgenet_umac_readl(priv, UMAC_MODE);
549 ++ status->link = !!(reg & MODE_LINK_STATUS);
550 ++ }
551 +
552 + return 0;
553 + }
554 +diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
555 +index f7e7b79c6050..f314be07ec58 100644
556 +--- a/drivers/net/ethernet/cisco/enic/enic_main.c
557 ++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
558 +@@ -2681,7 +2681,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
559 + */
560 +
561 + enic->port_mtu = enic->config.mtu;
562 +- (void)enic_change_mtu(netdev, enic->port_mtu);
563 +
564 + err = enic_set_mac_addr(netdev, enic->mac_addr);
565 + if (err) {
566 +@@ -2731,6 +2730,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
567 + netdev->features |= NETIF_F_HIGHDMA;
568 +
569 + netdev->priv_flags |= IFF_UNICAST_FLT;
570 ++ netdev->mtu = enic->port_mtu;
571 +
572 + err = register_netdev(netdev);
573 + if (err) {
574 +diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
575 +index fd4a8e473f11..6a507544682f 100644
576 +--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
577 ++++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
578 +@@ -2387,26 +2387,20 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev,
579 + return status;
580 + }
581 +
582 +-static netdev_features_t qlge_fix_features(struct net_device *ndev,
583 +- netdev_features_t features)
584 +-{
585 +- int err;
586 +-
587 +- /* Update the behavior of vlan accel in the adapter */
588 +- err = qlge_update_hw_vlan_features(ndev, features);
589 +- if (err)
590 +- return err;
591 +-
592 +- return features;
593 +-}
594 +-
595 + static int qlge_set_features(struct net_device *ndev,
596 + netdev_features_t features)
597 + {
598 + netdev_features_t changed = ndev->features ^ features;
599 ++ int err;
600 ++
601 ++ if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
602 ++ /* Update the behavior of vlan accel in the adapter */
603 ++ err = qlge_update_hw_vlan_features(ndev, features);
604 ++ if (err)
605 ++ return err;
606 +
607 +- if (changed & NETIF_F_HW_VLAN_CTAG_RX)
608 + qlge_vlan_mode(ndev, features);
609 ++ }
610 +
611 + return 0;
612 + }
613 +@@ -4719,7 +4713,6 @@ static const struct net_device_ops qlge_netdev_ops = {
614 + .ndo_set_mac_address = qlge_set_mac_address,
615 + .ndo_validate_addr = eth_validate_addr,
616 + .ndo_tx_timeout = qlge_tx_timeout,
617 +- .ndo_fix_features = qlge_fix_features,
618 + .ndo_set_features = qlge_set_features,
619 + .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
620 + .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
621 +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
622 +index 59b932db0d42..f65e8cd6d144 100644
623 +--- a/drivers/net/ethernet/realtek/r8169.c
624 ++++ b/drivers/net/ethernet/realtek/r8169.c
625 +@@ -329,6 +329,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
626 + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 },
627 + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
628 + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
629 ++ { PCI_DEVICE(PCI_VENDOR_ID_NCUBE, 0x8168), 0, 0, RTL_CFG_1 },
630 + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
631 + { PCI_VENDOR_ID_DLINK, 0x4300,
632 + PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
633 +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
634 +index 36a04e182af1..53602fdf5b47 100644
635 +--- a/drivers/net/hyperv/netvsc_drv.c
636 ++++ b/drivers/net/hyperv/netvsc_drv.c
637 +@@ -29,6 +29,7 @@
638 + #include <linux/netdevice.h>
639 + #include <linux/inetdevice.h>
640 + #include <linux/etherdevice.h>
641 ++#include <linux/pci.h>
642 + #include <linux/skbuff.h>
643 + #include <linux/if_vlan.h>
644 + #include <linux/in.h>
645 +@@ -1228,11 +1229,15 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
646 + {
647 + struct net_device *ndev;
648 + struct net_device_context *net_device_ctx;
649 ++ struct device *pdev = vf_netdev->dev.parent;
650 + struct netvsc_device *netvsc_dev;
651 +
652 + if (vf_netdev->addr_len != ETH_ALEN)
653 + return NOTIFY_DONE;
654 +
655 ++ if (!pdev || !dev_is_pci(pdev) || dev_is_pf(pdev))
656 ++ return NOTIFY_DONE;
657 ++
658 + /*
659 + * We will use the MAC address to locate the synthetic interface to
660 + * associate with the VF interface. If we don't find a matching
661 +diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
662 +index 90e0b6f134ad..23d7f73cc347 100644
663 +--- a/drivers/pci/host/pci-mvebu.c
664 ++++ b/drivers/pci/host/pci-mvebu.c
665 +@@ -1236,7 +1236,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
666 + pcie->realio.start = PCIBIOS_MIN_IO;
667 + pcie->realio.end = min_t(resource_size_t,
668 + IO_SPACE_LIMIT,
669 +- resource_size(&pcie->io));
670 ++ resource_size(&pcie->io) - 1);
671 + } else
672 + pcie->realio = pcie->io;
673 +
674 +diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
675 +index 687cc5b922ee..c857d2d7bbec 100644
676 +--- a/drivers/platform/x86/asus-nb-wmi.c
677 ++++ b/drivers/platform/x86/asus-nb-wmi.c
678 +@@ -531,6 +531,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
679 + { KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
680 + { KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
681 + { KE_IGNORE, 0xC6, }, /* Ambient Light Sensor notification */
682 ++ { KE_KEY, 0xFA, { KEY_PROG2 } }, /* Lid flip action */
683 + { KE_END, 0},
684 + };
685 +
686 +diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
687 +index b5b890127479..b7dfe06261f1 100644
688 +--- a/drivers/platform/x86/intel_punit_ipc.c
689 ++++ b/drivers/platform/x86/intel_punit_ipc.c
690 +@@ -17,6 +17,7 @@
691 + #include <linux/bitops.h>
692 + #include <linux/device.h>
693 + #include <linux/interrupt.h>
694 ++#include <linux/io.h>
695 + #include <linux/platform_device.h>
696 + #include <asm/intel_punit_ipc.h>
697 +
698 +diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
699 +index 0f5bc2f8382b..be17de9807b6 100644
700 +--- a/drivers/s390/block/dasd_eckd.c
701 ++++ b/drivers/s390/block/dasd_eckd.c
702 +@@ -1834,6 +1834,9 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
703 + struct dasd_eckd_private *private = device->private;
704 + int i;
705 +
706 ++ if (!private)
707 ++ return;
708 ++
709 + dasd_alias_disconnect_device_from_lcu(device);
710 + private->ned = NULL;
711 + private->sneq = NULL;
712 +@@ -2085,8 +2088,11 @@ static int dasd_eckd_basic_to_ready(struct dasd_device *device)
713 +
714 + static int dasd_eckd_online_to_ready(struct dasd_device *device)
715 + {
716 +- cancel_work_sync(&device->reload_device);
717 +- cancel_work_sync(&device->kick_validate);
718 ++ if (cancel_work_sync(&device->reload_device))
719 ++ dasd_put_device(device);
720 ++ if (cancel_work_sync(&device->kick_validate))
721 ++ dasd_put_device(device);
722 ++
723 + return 0;
724 + };
725 +
726 +diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
727 +index 662b2321d1b0..913ebb6d0d29 100644
728 +--- a/drivers/scsi/aic94xx/aic94xx_init.c
729 ++++ b/drivers/scsi/aic94xx/aic94xx_init.c
730 +@@ -1031,8 +1031,10 @@ static int __init aic94xx_init(void)
731 +
732 + aic94xx_transport_template =
733 + sas_domain_attach_transport(&aic94xx_transport_functions);
734 +- if (!aic94xx_transport_template)
735 ++ if (!aic94xx_transport_template) {
736 ++ err = -ENOMEM;
737 + goto out_destroy_caches;
738 ++ }
739 +
740 + err = pci_register_driver(&aic94xx_pci_driver);
741 + if (err)
742 +diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
743 +index 18c5312f7886..0fa85d55c82f 100644
744 +--- a/drivers/staging/comedi/drivers/ni_mio_common.c
745 ++++ b/drivers/staging/comedi/drivers/ni_mio_common.c
746 +@@ -5407,11 +5407,11 @@ static int ni_E_init(struct comedi_device *dev,
747 + /* Digital I/O (PFI) subdevice */
748 + s = &dev->subdevices[NI_PFI_DIO_SUBDEV];
749 + s->type = COMEDI_SUBD_DIO;
750 +- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
751 + s->maxdata = 1;
752 + if (devpriv->is_m_series) {
753 + s->n_chan = 16;
754 + s->insn_bits = ni_pfi_insn_bits;
755 ++ s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
756 +
757 + ni_writew(dev, s->state, NI_M_PFI_DO_REG);
758 + for (i = 0; i < NUM_PFI_OUTPUT_SELECT_REGS; ++i) {
759 +@@ -5420,6 +5420,7 @@ static int ni_E_init(struct comedi_device *dev,
760 + }
761 + } else {
762 + s->n_chan = 10;
763 ++ s->subdev_flags = SDF_INTERNAL;
764 + }
765 + s->insn_config = ni_pfi_insn_config;
766 +
767 +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
768 +index 8b6489ae74eb..c569b6454a9d 100644
769 +--- a/drivers/vhost/vhost.c
770 ++++ b/drivers/vhost/vhost.c
771 +@@ -905,7 +905,7 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d,
772 + list_for_each_entry_safe(node, n, &d->pending_list, node) {
773 + struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
774 + if (msg->iova <= vq_msg->iova &&
775 +- msg->iova + msg->size - 1 > vq_msg->iova &&
776 ++ msg->iova + msg->size - 1 >= vq_msg->iova &&
777 + vq_msg->type == VHOST_IOTLB_MISS) {
778 + vhost_poll_queue(&node->vq->poll);
779 + list_del(&node->node);
780 +diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
781 +index 6d9e5173d5fa..fbc4761987e8 100644
782 +--- a/drivers/virtio/virtio_pci_legacy.c
783 ++++ b/drivers/virtio/virtio_pci_legacy.c
784 +@@ -121,6 +121,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
785 + struct virtqueue *vq;
786 + u16 num;
787 + int err;
788 ++ u64 q_pfn;
789 +
790 + /* Select the queue we're interested in */
791 + iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
792 +@@ -139,9 +140,17 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
793 + if (!vq)
794 + return ERR_PTR(-ENOMEM);
795 +
796 ++ q_pfn = virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
797 ++ if (q_pfn >> 32) {
798 ++ dev_err(&vp_dev->pci_dev->dev,
799 ++ "platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n",
800 ++ 0x1ULL << (32 + PAGE_SHIFT - 30));
801 ++ err = -E2BIG;
802 ++ goto out_del_vq;
803 ++ }
804 ++
805 + /* activate the queue */
806 +- iowrite32(virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT,
807 +- vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
808 ++ iowrite32(q_pfn, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
809 +
810 + vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
811 +
812 +@@ -158,6 +167,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
813 +
814 + out_deactivate:
815 + iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
816 ++out_del_vq:
817 + vring_del_virtqueue(vq);
818 + return ERR_PTR(err);
819 + }
820 +diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
821 +index 05169ef30596..b450adf65236 100644
822 +--- a/fs/btrfs/dev-replace.c
823 ++++ b/fs/btrfs/dev-replace.c
824 +@@ -585,6 +585,12 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
825 +
826 + btrfs_rm_dev_replace_unblocked(fs_info);
827 +
828 ++ /*
829 ++ * Increment dev_stats_ccnt so that btrfs_run_dev_stats() will
830 ++ * update on-disk dev stats value during commit transaction
831 ++ */
832 ++ atomic_inc(&tgt_device->dev_stats_ccnt);
833 ++
834 + /*
835 + * this is again a consistent state where no dev_replace procedure
836 + * is running, the target device is part of the filesystem, the
837 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
838 +index 92f3b231d5a2..18d05323ca53 100644
839 +--- a/fs/btrfs/disk-io.c
840 ++++ b/fs/btrfs/disk-io.c
841 +@@ -1096,8 +1096,9 @@ static int btree_writepages(struct address_space *mapping,
842 +
843 + fs_info = BTRFS_I(mapping->host)->root->fs_info;
844 + /* this is a bit racy, but that's ok */
845 +- ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
846 +- BTRFS_DIRTY_METADATA_THRESH);
847 ++ ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
848 ++ BTRFS_DIRTY_METADATA_THRESH,
849 ++ fs_info->dirty_metadata_batch);
850 + if (ret < 0)
851 + return 0;
852 + }
853 +@@ -4107,8 +4108,9 @@ static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
854 + if (flush_delayed)
855 + btrfs_balance_delayed_items(root);
856 +
857 +- ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
858 +- BTRFS_DIRTY_METADATA_THRESH);
859 ++ ret = __percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
860 ++ BTRFS_DIRTY_METADATA_THRESH,
861 ++ root->fs_info->dirty_metadata_batch);
862 + if (ret > 0) {
863 + balance_dirty_pages_ratelimited(
864 + root->fs_info->btree_inode->i_mapping);
865 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
866 +index 44a43851404a..6661116c47d9 100644
867 +--- a/fs/btrfs/extent-tree.c
868 ++++ b/fs/btrfs/extent-tree.c
869 +@@ -10853,7 +10853,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
870 + /* Don't want to race with allocators so take the groups_sem */
871 + down_write(&space_info->groups_sem);
872 + spin_lock(&block_group->lock);
873 +- if (block_group->reserved ||
874 ++ if (block_group->reserved || block_group->pinned ||
875 + btrfs_block_group_used(&block_group->item) ||
876 + block_group->ro ||
877 + list_is_singular(&block_group->list)) {
878 +diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
879 +index 04c61bcf62e5..9140aede5869 100644
880 +--- a/fs/btrfs/relocation.c
881 ++++ b/fs/btrfs/relocation.c
882 +@@ -1325,18 +1325,19 @@ static void __del_reloc_root(struct btrfs_root *root)
883 + struct mapping_node *node = NULL;
884 + struct reloc_control *rc = root->fs_info->reloc_ctl;
885 +
886 +- spin_lock(&rc->reloc_root_tree.lock);
887 +- rb_node = tree_search(&rc->reloc_root_tree.rb_root,
888 +- root->node->start);
889 +- if (rb_node) {
890 +- node = rb_entry(rb_node, struct mapping_node, rb_node);
891 +- rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
892 ++ if (rc) {
893 ++ spin_lock(&rc->reloc_root_tree.lock);
894 ++ rb_node = tree_search(&rc->reloc_root_tree.rb_root,
895 ++ root->node->start);
896 ++ if (rb_node) {
897 ++ node = rb_entry(rb_node, struct mapping_node, rb_node);
898 ++ rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
899 ++ }
900 ++ spin_unlock(&rc->reloc_root_tree.lock);
901 ++ if (!node)
902 ++ return;
903 ++ BUG_ON((struct btrfs_root *)node->data != root);
904 + }
905 +- spin_unlock(&rc->reloc_root_tree.lock);
906 +-
907 +- if (!node)
908 +- return;
909 +- BUG_ON((struct btrfs_root *)node->data != root);
910 +
911 + spin_lock(&root->fs_info->trans_lock);
912 + list_del_init(&root->root_list);
913 +diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
914 +index ad8bd96093f7..e06468f8e041 100644
915 +--- a/fs/cifs/cifs_debug.c
916 ++++ b/fs/cifs/cifs_debug.c
917 +@@ -284,6 +284,10 @@ static ssize_t cifs_stats_proc_write(struct file *file,
918 + atomic_set(&totBufAllocCount, 0);
919 + atomic_set(&totSmBufAllocCount, 0);
920 + #endif /* CONFIG_CIFS_STATS2 */
921 ++ spin_lock(&GlobalMid_Lock);
922 ++ GlobalMaxActiveXid = 0;
923 ++ GlobalCurrentXid = 0;
924 ++ spin_unlock(&GlobalMid_Lock);
925 + spin_lock(&cifs_tcp_ses_lock);
926 + list_for_each(tmp1, &cifs_tcp_ses_list) {
927 + server = list_entry(tmp1, struct TCP_Server_Info,
928 +@@ -296,6 +300,10 @@ static ssize_t cifs_stats_proc_write(struct file *file,
929 + struct cifs_tcon,
930 + tcon_list);
931 + atomic_set(&tcon->num_smbs_sent, 0);
932 ++ spin_lock(&tcon->stat_lock);
933 ++ tcon->bytes_read = 0;
934 ++ tcon->bytes_written = 0;
935 ++ spin_unlock(&tcon->stat_lock);
936 + if (server->ops->clear_stats)
937 + server->ops->clear_stats(tcon);
938 + }
939 +diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
940 +index 967dfe656ced..e96a74da756f 100644
941 +--- a/fs/cifs/smb2misc.c
942 ++++ b/fs/cifs/smb2misc.c
943 +@@ -208,6 +208,13 @@ smb2_check_message(char *buf, unsigned int length, struct TCP_Server_Info *srvr)
944 + if (clc_len == 4 + len + 1)
945 + return 0;
946 +
947 ++ /*
948 ++ * Some windows servers (win2016) will pad also the final
949 ++ * PDU in a compound to 8 bytes.
950 ++ */
951 ++ if (((clc_len + 7) & ~7) == len)
952 ++ return 0;
953 ++
954 + /*
955 + * MacOS server pads after SMB2.1 write response with 3 bytes
956 + * of junk. Other servers match RFC1001 len to actual
957 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
958 +index 4ded64b8b43b..383cf8148fe7 100644
959 +--- a/fs/cifs/smb2pdu.c
960 ++++ b/fs/cifs/smb2pdu.c
961 +@@ -320,7 +320,7 @@ small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon,
962 + smb2_hdr_assemble((struct smb2_hdr *) *request_buf, smb2_command, tcon);
963 +
964 + if (tcon != NULL) {
965 +-#ifdef CONFIG_CIFS_STATS2
966 ++#ifdef CONFIG_CIFS_STATS
967 + uint16_t com_code = le16_to_cpu(smb2_command);
968 + cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
969 + #endif
970 +diff --git a/fs/dcache.c b/fs/dcache.c
971 +index 461ff8f234e3..f903b86b06e5 100644
972 +--- a/fs/dcache.c
973 ++++ b/fs/dcache.c
974 +@@ -286,7 +286,8 @@ void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry
975 + spin_unlock(&dentry->d_lock);
976 + name->name = p->name;
977 + } else {
978 +- memcpy(name->inline_name, dentry->d_iname, DNAME_INLINE_LEN);
979 ++ memcpy(name->inline_name, dentry->d_iname,
980 ++ dentry->d_name.len + 1);
981 + spin_unlock(&dentry->d_lock);
982 + name->name = name->inline_name;
983 + }
984 +diff --git a/fs/fat/cache.c b/fs/fat/cache.c
985 +index 5d384921524d..f04b189fd90d 100644
986 +--- a/fs/fat/cache.c
987 ++++ b/fs/fat/cache.c
988 +@@ -224,7 +224,8 @@ static inline void cache_init(struct fat_cache_id *cid, int fclus, int dclus)
989 + int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
990 + {
991 + struct super_block *sb = inode->i_sb;
992 +- const int limit = sb->s_maxbytes >> MSDOS_SB(sb)->cluster_bits;
993 ++ struct msdos_sb_info *sbi = MSDOS_SB(sb);
994 ++ const int limit = sb->s_maxbytes >> sbi->cluster_bits;
995 + struct fat_entry fatent;
996 + struct fat_cache_id cid;
997 + int nr;
998 +@@ -233,6 +234,12 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
999 +
1000 + *fclus = 0;
1001 + *dclus = MSDOS_I(inode)->i_start;
1002 ++ if (!fat_valid_entry(sbi, *dclus)) {
1003 ++ fat_fs_error_ratelimit(sb,
1004 ++ "%s: invalid start cluster (i_pos %lld, start %08x)",
1005 ++ __func__, MSDOS_I(inode)->i_pos, *dclus);
1006 ++ return -EIO;
1007 ++ }
1008 + if (cluster == 0)
1009 + return 0;
1010 +
1011 +@@ -249,9 +256,8 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
1012 + /* prevent the infinite loop of cluster chain */
1013 + if (*fclus > limit) {
1014 + fat_fs_error_ratelimit(sb,
1015 +- "%s: detected the cluster chain loop"
1016 +- " (i_pos %lld)", __func__,
1017 +- MSDOS_I(inode)->i_pos);
1018 ++ "%s: detected the cluster chain loop (i_pos %lld)",
1019 ++ __func__, MSDOS_I(inode)->i_pos);
1020 + nr = -EIO;
1021 + goto out;
1022 + }
1023 +@@ -261,9 +267,8 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
1024 + goto out;
1025 + else if (nr == FAT_ENT_FREE) {
1026 + fat_fs_error_ratelimit(sb,
1027 +- "%s: invalid cluster chain (i_pos %lld)",
1028 +- __func__,
1029 +- MSDOS_I(inode)->i_pos);
1030 ++ "%s: invalid cluster chain (i_pos %lld)",
1031 ++ __func__, MSDOS_I(inode)->i_pos);
1032 + nr = -EIO;
1033 + goto out;
1034 + } else if (nr == FAT_ENT_EOF) {
1035 +diff --git a/fs/fat/fat.h b/fs/fat/fat.h
1036 +index e6b764a17a9c..437affe987c5 100644
1037 +--- a/fs/fat/fat.h
1038 ++++ b/fs/fat/fat.h
1039 +@@ -347,6 +347,11 @@ static inline void fatent_brelse(struct fat_entry *fatent)
1040 + fatent->fat_inode = NULL;
1041 + }
1042 +
1043 ++static inline bool fat_valid_entry(struct msdos_sb_info *sbi, int entry)
1044 ++{
1045 ++ return FAT_START_ENT <= entry && entry < sbi->max_cluster;
1046 ++}
1047 ++
1048 + extern void fat_ent_access_init(struct super_block *sb);
1049 + extern int fat_ent_read(struct inode *inode, struct fat_entry *fatent,
1050 + int entry);
1051 +diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
1052 +index 1d9a8c4e9de0..3b7644e43796 100644
1053 +--- a/fs/fat/fatent.c
1054 ++++ b/fs/fat/fatent.c
1055 +@@ -23,7 +23,7 @@ static void fat12_ent_blocknr(struct super_block *sb, int entry,
1056 + {
1057 + struct msdos_sb_info *sbi = MSDOS_SB(sb);
1058 + int bytes = entry + (entry >> 1);
1059 +- WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry);
1060 ++ WARN_ON(!fat_valid_entry(sbi, entry));
1061 + *offset = bytes & (sb->s_blocksize - 1);
1062 + *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
1063 + }
1064 +@@ -33,7 +33,7 @@ static void fat_ent_blocknr(struct super_block *sb, int entry,
1065 + {
1066 + struct msdos_sb_info *sbi = MSDOS_SB(sb);
1067 + int bytes = (entry << sbi->fatent_shift);
1068 +- WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry);
1069 ++ WARN_ON(!fat_valid_entry(sbi, entry));
1070 + *offset = bytes & (sb->s_blocksize - 1);
1071 + *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
1072 + }
1073 +@@ -353,7 +353,7 @@ int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry)
1074 + int err, offset;
1075 + sector_t blocknr;
1076 +
1077 +- if (entry < FAT_START_ENT || sbi->max_cluster <= entry) {
1078 ++ if (!fat_valid_entry(sbi, entry)) {
1079 + fatent_brelse(fatent);
1080 + fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry);
1081 + return -EIO;
1082 +diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
1083 +index 6fc766df0461..2a6f3c67cb3f 100644
1084 +--- a/fs/hfs/brec.c
1085 ++++ b/fs/hfs/brec.c
1086 +@@ -74,9 +74,10 @@ int hfs_brec_insert(struct hfs_find_data *fd, void *entry, int entry_len)
1087 + if (!fd->bnode) {
1088 + if (!tree->root)
1089 + hfs_btree_inc_height(tree);
1090 +- fd->bnode = hfs_bnode_find(tree, tree->leaf_head);
1091 +- if (IS_ERR(fd->bnode))
1092 +- return PTR_ERR(fd->bnode);
1093 ++ node = hfs_bnode_find(tree, tree->leaf_head);
1094 ++ if (IS_ERR(node))
1095 ++ return PTR_ERR(node);
1096 ++ fd->bnode = node;
1097 + fd->record = -1;
1098 + }
1099 + new_node = NULL;
1100 +diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
1101 +index 31d5e3f1fe17..193d5411210a 100644
1102 +--- a/fs/hfsplus/dir.c
1103 ++++ b/fs/hfsplus/dir.c
1104 +@@ -77,13 +77,13 @@ again:
1105 + cpu_to_be32(HFSP_HARDLINK_TYPE) &&
1106 + entry.file.user_info.fdCreator ==
1107 + cpu_to_be32(HFSP_HFSPLUS_CREATOR) &&
1108 ++ HFSPLUS_SB(sb)->hidden_dir &&
1109 + (entry.file.create_date ==
1110 + HFSPLUS_I(HFSPLUS_SB(sb)->hidden_dir)->
1111 + create_date ||
1112 + entry.file.create_date ==
1113 + HFSPLUS_I(d_inode(sb->s_root))->
1114 +- create_date) &&
1115 +- HFSPLUS_SB(sb)->hidden_dir) {
1116 ++ create_date)) {
1117 + struct qstr str;
1118 + char name[32];
1119 +
1120 +diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
1121 +index b9563cdcfe28..7fb976e0aa07 100644
1122 +--- a/fs/hfsplus/super.c
1123 ++++ b/fs/hfsplus/super.c
1124 +@@ -524,8 +524,10 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
1125 + goto out_put_root;
1126 + if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
1127 + hfs_find_exit(&fd);
1128 +- if (entry.type != cpu_to_be16(HFSPLUS_FOLDER))
1129 ++ if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) {
1130 ++ err = -EINVAL;
1131 + goto out_put_root;
1132 ++ }
1133 + inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id));
1134 + if (IS_ERR(inode)) {
1135 + err = PTR_ERR(inode);
1136 +diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
1137 +index 6ca00471afbf..d920a646b578 100644
1138 +--- a/fs/reiserfs/reiserfs.h
1139 ++++ b/fs/reiserfs/reiserfs.h
1140 +@@ -270,7 +270,7 @@ struct reiserfs_journal_list {
1141 +
1142 + struct mutex j_commit_mutex;
1143 + unsigned int j_trans_id;
1144 +- time_t j_timestamp;
1145 ++ time64_t j_timestamp; /* write-only but useful for crash dump analysis */
1146 + struct reiserfs_list_bitmap *j_list_bitmap;
1147 + struct buffer_head *j_commit_bh; /* commit buffer head */
1148 + struct reiserfs_journal_cnode *j_realblock;
1149 +diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
1150 +index 43082049baf2..bba5604f3a03 100644
1151 +--- a/include/linux/pci_ids.h
1152 ++++ b/include/linux/pci_ids.h
1153 +@@ -3054,4 +3054,6 @@
1154 +
1155 + #define PCI_VENDOR_ID_OCZ 0x1b85
1156 +
1157 ++#define PCI_VENDOR_ID_NCUBE 0x10ff
1158 ++
1159 + #endif /* _LINUX_PCI_IDS_H */
1160 +diff --git a/kernel/fork.c b/kernel/fork.c
1161 +index 2c98b987808d..5d0e2f366766 100644
1162 +--- a/kernel/fork.c
1163 ++++ b/kernel/fork.c
1164 +@@ -1304,7 +1304,9 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
1165 + return -ENOMEM;
1166 +
1167 + atomic_set(&sig->count, 1);
1168 ++ spin_lock_irq(&current->sighand->siglock);
1169 + memcpy(sig->action, current->sighand->action, sizeof(sig->action));
1170 ++ spin_unlock_irq(&current->sighand->siglock);
1171 + return 0;
1172 + }
1173 +
1174 +diff --git a/lib/debugobjects.c b/lib/debugobjects.c
1175 +index 056052dc8e91..88580e8ee39e 100644
1176 +--- a/lib/debugobjects.c
1177 ++++ b/lib/debugobjects.c
1178 +@@ -294,9 +294,12 @@ static void debug_object_is_on_stack(void *addr, int onstack)
1179 +
1180 + limit++;
1181 + if (is_on_stack)
1182 +- pr_warn("object is on stack, but not annotated\n");
1183 ++ pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
1184 ++ task_stack_page(current));
1185 + else
1186 +- pr_warn("object is not on stack, but annotated\n");
1187 ++ pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
1188 ++ task_stack_page(current));
1189 ++
1190 + WARN_ON(1);
1191 + }
1192 +
1193 +diff --git a/mm/fadvise.c b/mm/fadvise.c
1194 +index 27fc9ad267ac..eb3269e59002 100644
1195 +--- a/mm/fadvise.c
1196 ++++ b/mm/fadvise.c
1197 +@@ -68,8 +68,12 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
1198 + goto out;
1199 + }
1200 +
1201 +- /* Careful about overflows. Len == 0 means "as much as possible" */
1202 +- endbyte = offset + len;
1203 ++ /*
1204 ++ * Careful about overflows. Len == 0 means "as much as possible". Use
1205 ++ * unsigned math because signed overflows are undefined and UBSan
1206 ++ * complains.
1207 ++ */
1208 ++ endbyte = (u64)offset + (u64)len;
1209 + if (!len || endbyte < len)
1210 + endbyte = -1;
1211 + else
1212 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
1213 +index 9efe88ef9702..e4c6c3edaf6a 100644
1214 +--- a/mm/huge_memory.c
1215 ++++ b/mm/huge_memory.c
1216 +@@ -1259,12 +1259,12 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd)
1217 +
1218 + /* Migration could have started since the pmd_trans_migrating check */
1219 + if (!page_locked) {
1220 ++ page_nid = -1;
1221 + if (!get_page_unless_zero(page))
1222 + goto out_unlock;
1223 + spin_unlock(fe->ptl);
1224 + wait_on_page_locked(page);
1225 + put_page(page);
1226 +- page_nid = -1;
1227 + goto out;
1228 + }
1229 +
1230 +diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
1231 +index 2b543532e2f1..aa4586672cee 100644
1232 +--- a/net/9p/trans_fd.c
1233 ++++ b/net/9p/trans_fd.c
1234 +@@ -195,15 +195,14 @@ static void p9_mux_poll_stop(struct p9_conn *m)
1235 + static void p9_conn_cancel(struct p9_conn *m, int err)
1236 + {
1237 + struct p9_req_t *req, *rtmp;
1238 +- unsigned long flags;
1239 + LIST_HEAD(cancel_list);
1240 +
1241 + p9_debug(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
1242 +
1243 +- spin_lock_irqsave(&m->client->lock, flags);
1244 ++ spin_lock(&m->client->lock);
1245 +
1246 + if (m->err) {
1247 +- spin_unlock_irqrestore(&m->client->lock, flags);
1248 ++ spin_unlock(&m->client->lock);
1249 + return;
1250 + }
1251 +
1252 +@@ -215,7 +214,6 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
1253 + list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
1254 + list_move(&req->req_list, &cancel_list);
1255 + }
1256 +- spin_unlock_irqrestore(&m->client->lock, flags);
1257 +
1258 + list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
1259 + p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req);
1260 +@@ -224,6 +222,7 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
1261 + req->t_err = err;
1262 + p9_client_cb(m->client, req, REQ_STATUS_ERROR);
1263 + }
1264 ++ spin_unlock(&m->client->lock);
1265 + }
1266 +
1267 + static int
1268 +@@ -379,8 +378,9 @@ static void p9_read_work(struct work_struct *work)
1269 + if (m->req->status != REQ_STATUS_ERROR)
1270 + status = REQ_STATUS_RCVD;
1271 + list_del(&m->req->req_list);
1272 +- spin_unlock(&m->client->lock);
1273 ++ /* update req->status while holding client->lock */
1274 + p9_client_cb(m->client, m->req, status);
1275 ++ spin_unlock(&m->client->lock);
1276 + m->rc.sdata = NULL;
1277 + m->rc.offset = 0;
1278 + m->rc.capacity = 0;
1279 +diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
1280 +index da0d3b257459..e73fd647065a 100644
1281 +--- a/net/9p/trans_virtio.c
1282 ++++ b/net/9p/trans_virtio.c
1283 +@@ -571,7 +571,7 @@ static int p9_virtio_probe(struct virtio_device *vdev)
1284 + chan->vq = virtio_find_single_vq(vdev, req_done, "requests");
1285 + if (IS_ERR(chan->vq)) {
1286 + err = PTR_ERR(chan->vq);
1287 +- goto out_free_vq;
1288 ++ goto out_free_chan;
1289 + }
1290 + chan->vq->vdev->priv = chan;
1291 + spin_lock_init(&chan->lock);
1292 +@@ -624,6 +624,7 @@ out_free_tag:
1293 + kfree(tag);
1294 + out_free_vq:
1295 + vdev->config->del_vqs(vdev);
1296 ++out_free_chan:
1297 + kfree(chan);
1298 + fail:
1299 + return err;
1300 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
1301 +index 504cdae41013..16dea67792e0 100644
1302 +--- a/net/ipv4/tcp_ipv4.c
1303 ++++ b/net/ipv4/tcp_ipv4.c
1304 +@@ -2440,6 +2440,12 @@ static int __net_init tcp_sk_init(struct net *net)
1305 + if (res)
1306 + goto fail;
1307 + sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1308 ++
1309 ++ /* Please enforce IP_DF and IPID==0 for RST and
1310 ++ * ACK sent in SYN-RECV and TIME-WAIT state.
1311 ++ */
1312 ++ inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
1313 ++
1314 + *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
1315 + }
1316 +
1317 +diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
1318 +index 830a5645d8c1..a501b45d0334 100644
1319 +--- a/net/ipv4/tcp_minisocks.c
1320 ++++ b/net/ipv4/tcp_minisocks.c
1321 +@@ -194,8 +194,9 @@ kill:
1322 + inet_twsk_deschedule_put(tw);
1323 + return TCP_TW_SUCCESS;
1324 + }
1325 ++ } else {
1326 ++ inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
1327 + }
1328 +- inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
1329 +
1330 + if (tmp_opt.saw_tstamp) {
1331 + tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
1332 +diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
1333 +index 3d063eb37848..f6c50af24a64 100644
1334 +--- a/net/ipv4/tcp_probe.c
1335 ++++ b/net/ipv4/tcp_probe.c
1336 +@@ -117,7 +117,7 @@ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
1337 + (fwmark > 0 && skb->mark == fwmark)) &&
1338 + (full || tp->snd_cwnd != tcp_probe.lastcwnd)) {
1339 +
1340 +- spin_lock_bh(&tcp_probe.lock);
1341 ++ spin_lock(&tcp_probe.lock);
1342 + /* If log fills, just silently drop */
1343 + if (tcp_probe_avail() > 1) {
1344 + struct tcp_log *p = tcp_probe.log + tcp_probe.head;
1345 +@@ -157,7 +157,7 @@ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
1346 + tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1);
1347 + }
1348 + tcp_probe.lastcwnd = tp->snd_cwnd;
1349 +- spin_unlock_bh(&tcp_probe.lock);
1350 ++ spin_unlock(&tcp_probe.lock);
1351 +
1352 + wake_up(&tcp_probe.wait);
1353 + }
1354 +diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
1355 +index a5aeeb613fac..3213921cdfee 100644
1356 +--- a/net/ipv6/ip6_vti.c
1357 ++++ b/net/ipv6/ip6_vti.c
1358 +@@ -481,7 +481,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
1359 + }
1360 +
1361 + mtu = dst_mtu(dst);
1362 +- if (!skb->ignore_df && skb->len > mtu) {
1363 ++ if (skb->len > mtu) {
1364 + skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu);
1365 +
1366 + if (skb->protocol == htons(ETH_P_IPV6)) {
1367 +diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
1368 +index 101ed6c42808..0a78f17006a4 100644
1369 +--- a/net/irda/af_irda.c
1370 ++++ b/net/irda/af_irda.c
1371 +@@ -774,6 +774,13 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1372 + return -EINVAL;
1373 +
1374 + lock_sock(sk);
1375 ++
1376 ++ /* Ensure that the socket is not already bound */
1377 ++ if (self->ias_obj) {
1378 ++ err = -EINVAL;
1379 ++ goto out;
1380 ++ }
1381 ++
1382 + #ifdef CONFIG_IRDA_ULTRA
1383 + /* Special care for Ultra sockets */
1384 + if ((sk->sk_type == SOCK_DGRAM) &&
1385 +@@ -2016,7 +2023,11 @@ static int irda_setsockopt(struct socket *sock, int level, int optname,
1386 + err = -EINVAL;
1387 + goto out;
1388 + }
1389 +- irias_insert_object(ias_obj);
1390 ++
1391 ++ /* Only insert newly allocated objects */
1392 ++ if (free_ias)
1393 ++ irias_insert_object(ias_obj);
1394 ++
1395 + kfree(ias_opt);
1396 + break;
1397 + case IRLMP_IAS_DEL:
1398 +diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
1399 +index e34d3f60fccd..fd186b011a99 100644
1400 +--- a/net/netfilter/ipvs/ip_vs_core.c
1401 ++++ b/net/netfilter/ipvs/ip_vs_core.c
1402 +@@ -1968,13 +1968,20 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
1403 + if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
1404 + /* the destination server is not available */
1405 +
1406 +- if (sysctl_expire_nodest_conn(ipvs)) {
1407 ++ __u32 flags = cp->flags;
1408 ++
1409 ++ /* when timer already started, silently drop the packet.*/
1410 ++ if (timer_pending(&cp->timer))
1411 ++ __ip_vs_conn_put(cp);
1412 ++ else
1413 ++ ip_vs_conn_put(cp);
1414 ++
1415 ++ if (sysctl_expire_nodest_conn(ipvs) &&
1416 ++ !(flags & IP_VS_CONN_F_ONE_PACKET)) {
1417 + /* try to expire the connection immediately */
1418 + ip_vs_conn_expire_now(cp);
1419 + }
1420 +- /* don't restart its timer, and silently
1421 +- drop the packet. */
1422 +- __ip_vs_conn_put(cp);
1423 ++
1424 + return NF_DROP;
1425 + }
1426 +
1427 +diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c
1428 +index 66b3d6228a15..3d9c4c6397c3 100644
1429 +--- a/net/rds/ib_frmr.c
1430 ++++ b/net/rds/ib_frmr.c
1431 +@@ -61,6 +61,7 @@ static struct rds_ib_mr *rds_ib_alloc_frmr(struct rds_ib_device *rds_ibdev,
1432 + pool->fmr_attr.max_pages);
1433 + if (IS_ERR(frmr->mr)) {
1434 + pr_warn("RDS/IB: %s failed to allocate MR", __func__);
1435 ++ err = PTR_ERR(frmr->mr);
1436 + goto out_no_cigar;
1437 + }
1438 +
1439 +diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
1440 +index 235db2c9bbbb..d2932dc4c83d 100644
1441 +--- a/net/sched/act_ife.c
1442 ++++ b/net/sched/act_ife.c
1443 +@@ -267,10 +267,8 @@ static int ife_validate_metatype(struct tcf_meta_ops *ops, void *val, int len)
1444 + }
1445 +
1446 + /* called when adding new meta information
1447 +- * under ife->tcf_lock for existing action
1448 + */
1449 +-static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
1450 +- void *val, int len, bool exists)
1451 ++static int load_metaops_and_vet(u32 metaid, void *val, int len)
1452 + {
1453 + struct tcf_meta_ops *ops = find_ife_oplist(metaid);
1454 + int ret = 0;
1455 +@@ -278,13 +276,9 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
1456 + if (!ops) {
1457 + ret = -ENOENT;
1458 + #ifdef CONFIG_MODULES
1459 +- if (exists)
1460 +- spin_unlock_bh(&ife->tcf_lock);
1461 + rtnl_unlock();
1462 + request_module("ifemeta%u", metaid);
1463 + rtnl_lock();
1464 +- if (exists)
1465 +- spin_lock_bh(&ife->tcf_lock);
1466 + ops = find_ife_oplist(metaid);
1467 + #endif
1468 + }
1469 +@@ -301,24 +295,17 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
1470 + }
1471 +
1472 + /* called when adding new meta information
1473 +- * under ife->tcf_lock for existing action
1474 + */
1475 +-static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
1476 +- int len, bool atomic)
1477 ++static int __add_metainfo(const struct tcf_meta_ops *ops,
1478 ++ struct tcf_ife_info *ife, u32 metaid, void *metaval,
1479 ++ int len, bool atomic, bool exists)
1480 + {
1481 + struct tcf_meta_info *mi = NULL;
1482 +- struct tcf_meta_ops *ops = find_ife_oplist(metaid);
1483 + int ret = 0;
1484 +
1485 +- if (!ops)
1486 +- return -ENOENT;
1487 +-
1488 + mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL);
1489 +- if (!mi) {
1490 +- /*put back what find_ife_oplist took */
1491 +- module_put(ops->owner);
1492 ++ if (!mi)
1493 + return -ENOMEM;
1494 +- }
1495 +
1496 + mi->metaid = metaid;
1497 + mi->ops = ops;
1498 +@@ -326,17 +313,49 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
1499 + ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL);
1500 + if (ret != 0) {
1501 + kfree(mi);
1502 +- module_put(ops->owner);
1503 + return ret;
1504 + }
1505 + }
1506 +
1507 ++ if (exists)
1508 ++ spin_lock_bh(&ife->tcf_lock);
1509 + list_add_tail(&mi->metalist, &ife->metalist);
1510 ++ if (exists)
1511 ++ spin_unlock_bh(&ife->tcf_lock);
1512 ++
1513 ++ return ret;
1514 ++}
1515 ++
1516 ++static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops,
1517 ++ struct tcf_ife_info *ife, u32 metaid,
1518 ++ bool exists)
1519 ++{
1520 ++ int ret;
1521 ++
1522 ++ if (!try_module_get(ops->owner))
1523 ++ return -ENOENT;
1524 ++ ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists);
1525 ++ if (ret)
1526 ++ module_put(ops->owner);
1527 ++ return ret;
1528 ++}
1529 ++
1530 ++static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
1531 ++ int len, bool exists)
1532 ++{
1533 ++ const struct tcf_meta_ops *ops = find_ife_oplist(metaid);
1534 ++ int ret;
1535 +
1536 ++ if (!ops)
1537 ++ return -ENOENT;
1538 ++ ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists);
1539 ++ if (ret)
1540 ++ /*put back what find_ife_oplist took */
1541 ++ module_put(ops->owner);
1542 + return ret;
1543 + }
1544 +
1545 +-static int use_all_metadata(struct tcf_ife_info *ife)
1546 ++static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
1547 + {
1548 + struct tcf_meta_ops *o;
1549 + int rc = 0;
1550 +@@ -344,7 +363,7 @@ static int use_all_metadata(struct tcf_ife_info *ife)
1551 +
1552 + read_lock(&ife_mod_lock);
1553 + list_for_each_entry(o, &ifeoplist, list) {
1554 +- rc = add_metainfo(ife, o->metaid, NULL, 0, true);
1555 ++ rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists);
1556 + if (rc == 0)
1557 + installed += 1;
1558 + }
1559 +@@ -395,7 +414,6 @@ static void _tcf_ife_cleanup(struct tc_action *a, int bind)
1560 + struct tcf_meta_info *e, *n;
1561 +
1562 + list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
1563 +- module_put(e->ops->owner);
1564 + list_del(&e->metalist);
1565 + if (e->metaval) {
1566 + if (e->ops->release)
1567 +@@ -403,6 +421,7 @@ static void _tcf_ife_cleanup(struct tc_action *a, int bind)
1568 + else
1569 + kfree(e->metaval);
1570 + }
1571 ++ module_put(e->ops->owner);
1572 + kfree(e);
1573 + }
1574 + }
1575 +@@ -416,7 +435,6 @@ static void tcf_ife_cleanup(struct tc_action *a, int bind)
1576 + spin_unlock_bh(&ife->tcf_lock);
1577 + }
1578 +
1579 +-/* under ife->tcf_lock for existing action */
1580 + static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
1581 + bool exists)
1582 + {
1583 +@@ -430,7 +448,7 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
1584 + val = nla_data(tb[i]);
1585 + len = nla_len(tb[i]);
1586 +
1587 +- rc = load_metaops_and_vet(ife, i, val, len, exists);
1588 ++ rc = load_metaops_and_vet(i, val, len);
1589 + if (rc != 0)
1590 + return rc;
1591 +
1592 +@@ -510,6 +528,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
1593 + if (exists)
1594 + spin_lock_bh(&ife->tcf_lock);
1595 + ife->tcf_action = parm->action;
1596 ++ if (exists)
1597 ++ spin_unlock_bh(&ife->tcf_lock);
1598 +
1599 + if (parm->flags & IFE_ENCODE) {
1600 + if (daddr)
1601 +@@ -537,9 +557,6 @@ metadata_parse_err:
1602 + tcf_hash_release(*a, bind);
1603 + if (ret == ACT_P_CREATED)
1604 + _tcf_ife_cleanup(*a, bind);
1605 +-
1606 +- if (exists)
1607 +- spin_unlock_bh(&ife->tcf_lock);
1608 + return err;
1609 + }
1610 +
1611 +@@ -553,20 +570,14 @@ metadata_parse_err:
1612 + * as we can. You better have at least one else we are
1613 + * going to bail out
1614 + */
1615 +- err = use_all_metadata(ife);
1616 ++ err = use_all_metadata(ife, exists);
1617 + if (err) {
1618 + if (ret == ACT_P_CREATED)
1619 + _tcf_ife_cleanup(*a, bind);
1620 +-
1621 +- if (exists)
1622 +- spin_unlock_bh(&ife->tcf_lock);
1623 + return err;
1624 + }
1625 + }
1626 +
1627 +- if (exists)
1628 +- spin_unlock_bh(&ife->tcf_lock);
1629 +-
1630 + if (ret == ACT_P_CREATED)
1631 + tcf_hash_insert(tn, *a);
1632 +
1633 +diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
1634 +index da574a16e7b3..e377dd5b06a6 100644
1635 +--- a/net/sched/cls_u32.c
1636 ++++ b/net/sched/cls_u32.c
1637 +@@ -851,6 +851,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
1638 + struct nlattr *opt = tca[TCA_OPTIONS];
1639 + struct nlattr *tb[TCA_U32_MAX + 1];
1640 + u32 htid, flags = 0;
1641 ++ size_t sel_size;
1642 + int err;
1643 + #ifdef CONFIG_CLS_U32_PERF
1644 + size_t size;
1645 +@@ -967,8 +968,11 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
1646 + return -EINVAL;
1647 +
1648 + s = nla_data(tb[TCA_U32_SEL]);
1649 ++ sel_size = sizeof(*s) + sizeof(*s->keys) * s->nkeys;
1650 ++ if (nla_len(tb[TCA_U32_SEL]) < sel_size)
1651 ++ return -EINVAL;
1652 +
1653 +- n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
1654 ++ n = kzalloc(offsetof(typeof(*n), sel) + sel_size, GFP_KERNEL);
1655 + if (n == NULL)
1656 + return -ENOBUFS;
1657 +
1658 +@@ -981,7 +985,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
1659 + }
1660 + #endif
1661 +
1662 +- memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
1663 ++ memcpy(&n->sel, s, sel_size);
1664 + RCU_INIT_POINTER(n->ht_up, ht);
1665 + n->handle = handle;
1666 + n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
1667 +diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
1668 +index 2fae8b5f1b80..f4b2d69973c3 100644
1669 +--- a/net/sched/sch_hhf.c
1670 ++++ b/net/sched/sch_hhf.c
1671 +@@ -492,6 +492,9 @@ static void hhf_destroy(struct Qdisc *sch)
1672 + hhf_free(q->hhf_valid_bits[i]);
1673 + }
1674 +
1675 ++ if (!q->hh_flows)
1676 ++ return;
1677 ++
1678 + for (i = 0; i < HH_FLOWS_CNT; i++) {
1679 + struct hh_flow_state *flow, *next;
1680 + struct list_head *head = &q->hh_flows[i];
1681 +diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
1682 +index c798d0de8a9d..95fe75d441eb 100644
1683 +--- a/net/sched/sch_htb.c
1684 ++++ b/net/sched/sch_htb.c
1685 +@@ -1013,6 +1013,9 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
1686 + int err;
1687 + int i;
1688 +
1689 ++ qdisc_watchdog_init(&q->watchdog, sch);
1690 ++ INIT_WORK(&q->work, htb_work_func);
1691 ++
1692 + if (!opt)
1693 + return -EINVAL;
1694 +
1695 +@@ -1033,8 +1036,6 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
1696 + for (i = 0; i < TC_HTB_NUMPRIO; i++)
1697 + INIT_LIST_HEAD(q->drops + i);
1698 +
1699 +- qdisc_watchdog_init(&q->watchdog, sch);
1700 +- INIT_WORK(&q->work, htb_work_func);
1701 + qdisc_skb_head_init(&q->direct_queue);
1702 +
1703 + if (tb[TCA_HTB_DIRECT_QLEN])
1704 +diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
1705 +index 9ffbb025b37e..66b6e807b4ec 100644
1706 +--- a/net/sched/sch_multiq.c
1707 ++++ b/net/sched/sch_multiq.c
1708 +@@ -234,7 +234,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
1709 + static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
1710 + {
1711 + struct multiq_sched_data *q = qdisc_priv(sch);
1712 +- int i, err;
1713 ++ int i;
1714 +
1715 + q->queues = NULL;
1716 +
1717 +@@ -249,12 +249,7 @@ static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
1718 + for (i = 0; i < q->max_bands; i++)
1719 + q->queues[i] = &noop_qdisc;
1720 +
1721 +- err = multiq_tune(sch, opt);
1722 +-
1723 +- if (err)
1724 +- kfree(q->queues);
1725 +-
1726 +- return err;
1727 ++ return multiq_tune(sch, opt);
1728 + }
1729 +
1730 + static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
1731 +diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
1732 +index e899d9eb76cb..3f87ddb1777d 100644
1733 +--- a/net/sched/sch_netem.c
1734 ++++ b/net/sched/sch_netem.c
1735 +@@ -937,11 +937,11 @@ static int netem_init(struct Qdisc *sch, struct nlattr *opt)
1736 + struct netem_sched_data *q = qdisc_priv(sch);
1737 + int ret;
1738 +
1739 ++ qdisc_watchdog_init(&q->watchdog, sch);
1740 ++
1741 + if (!opt)
1742 + return -EINVAL;
1743 +
1744 +- qdisc_watchdog_init(&q->watchdog, sch);
1745 +-
1746 + q->loss_model = CLG_RANDOM;
1747 + ret = netem_change(sch, opt);
1748 + if (ret)
1749 +diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
1750 +index 303355c449ab..b3f7980b0f27 100644
1751 +--- a/net/sched/sch_tbf.c
1752 ++++ b/net/sched/sch_tbf.c
1753 +@@ -423,12 +423,13 @@ static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
1754 + {
1755 + struct tbf_sched_data *q = qdisc_priv(sch);
1756 +
1757 ++ qdisc_watchdog_init(&q->watchdog, sch);
1758 ++ q->qdisc = &noop_qdisc;
1759 ++
1760 + if (opt == NULL)
1761 + return -EINVAL;
1762 +
1763 + q->t_c = ktime_get_ns();
1764 +- qdisc_watchdog_init(&q->watchdog, sch);
1765 +- q->qdisc = &noop_qdisc;
1766 +
1767 + return tbf_change(sch, opt);
1768 + }
1769 +diff --git a/net/sctp/proc.c b/net/sctp/proc.c
1770 +index 206377fe91ec..fd7f23566ed6 100644
1771 +--- a/net/sctp/proc.c
1772 ++++ b/net/sctp/proc.c
1773 +@@ -337,8 +337,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
1774 + }
1775 +
1776 + transport = (struct sctp_transport *)v;
1777 +- if (!sctp_transport_hold(transport))
1778 +- return 0;
1779 + assoc = transport->asoc;
1780 + epb = &assoc->base;
1781 + sk = epb->sk;
1782 +@@ -428,8 +426,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
1783 + }
1784 +
1785 + transport = (struct sctp_transport *)v;
1786 +- if (!sctp_transport_hold(transport))
1787 +- return 0;
1788 + assoc = transport->asoc;
1789 +
1790 + list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list,
1791 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
1792 +index 78f38056fca6..64d2d9ea2f8c 100644
1793 +--- a/net/sctp/socket.c
1794 ++++ b/net/sctp/socket.c
1795 +@@ -4476,9 +4476,14 @@ struct sctp_transport *sctp_transport_get_next(struct net *net,
1796 + break;
1797 + }
1798 +
1799 ++ if (!sctp_transport_hold(t))
1800 ++ continue;
1801 ++
1802 + if (net_eq(sock_net(t->asoc->base.sk), net) &&
1803 + t->asoc->peer.primary_path == t)
1804 + break;
1805 ++
1806 ++ sctp_transport_put(t);
1807 + }
1808 +
1809 + return t;
1810 +@@ -4488,13 +4493,18 @@ struct sctp_transport *sctp_transport_get_idx(struct net *net,
1811 + struct rhashtable_iter *iter,
1812 + int pos)
1813 + {
1814 +- void *obj = SEQ_START_TOKEN;
1815 ++ struct sctp_transport *t;
1816 +
1817 +- while (pos && (obj = sctp_transport_get_next(net, iter)) &&
1818 +- !IS_ERR(obj))
1819 +- pos--;
1820 ++ if (!pos)
1821 ++ return SEQ_START_TOKEN;
1822 +
1823 +- return obj;
1824 ++ while ((t = sctp_transport_get_next(net, iter)) && !IS_ERR(t)) {
1825 ++ if (!--pos)
1826 ++ break;
1827 ++ sctp_transport_put(t);
1828 ++ }
1829 ++
1830 ++ return t;
1831 + }
1832 +
1833 + int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
1834 +@@ -4556,8 +4566,6 @@ int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
1835 + for (; !IS_ERR_OR_NULL(obj); obj = sctp_transport_get_next(net, &hti)) {
1836 + struct sctp_transport *transport = obj;
1837 +
1838 +- if (!sctp_transport_hold(transport))
1839 +- continue;
1840 + err = cb(transport, p);
1841 + sctp_transport_put(transport);
1842 + if (err)
1843 +diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
1844 +index 4afd4149a632..bad69e91fea3 100644
1845 +--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
1846 ++++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
1847 +@@ -169,7 +169,7 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
1848 + struct scatterlist sg[1];
1849 + int err = -1;
1850 + u8 *checksumdata;
1851 +- u8 rc4salt[4];
1852 ++ u8 *rc4salt;
1853 + struct crypto_ahash *md5;
1854 + struct crypto_ahash *hmac_md5;
1855 + struct ahash_request *req;
1856 +@@ -183,14 +183,18 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
1857 + return GSS_S_FAILURE;
1858 + }
1859 +
1860 ++ rc4salt = kmalloc_array(4, sizeof(*rc4salt), GFP_NOFS);
1861 ++ if (!rc4salt)
1862 ++ return GSS_S_FAILURE;
1863 ++
1864 + if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) {
1865 + dprintk("%s: invalid usage value %u\n", __func__, usage);
1866 +- return GSS_S_FAILURE;
1867 ++ goto out_free_rc4salt;
1868 + }
1869 +
1870 + checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
1871 + if (!checksumdata)
1872 +- return GSS_S_FAILURE;
1873 ++ goto out_free_rc4salt;
1874 +
1875 + md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
1876 + if (IS_ERR(md5))
1877 +@@ -258,6 +262,8 @@ out_free_md5:
1878 + crypto_free_ahash(md5);
1879 + out_free_cksum:
1880 + kfree(checksumdata);
1881 ++out_free_rc4salt:
1882 ++ kfree(rc4salt);
1883 + return err ? GSS_S_FAILURE : 0;
1884 + }
1885 +
1886 +diff --git a/scripts/depmod.sh b/scripts/depmod.sh
1887 +index ea1e96921e3b..baedaef53ca0 100755
1888 +--- a/scripts/depmod.sh
1889 ++++ b/scripts/depmod.sh
1890 +@@ -15,9 +15,9 @@ if ! test -r System.map ; then
1891 + fi
1892 +
1893 + if [ -z $(command -v $DEPMOD) ]; then
1894 +- echo "'make modules_install' requires $DEPMOD. Please install it." >&2
1895 ++ echo "Warning: 'make modules_install' requires $DEPMOD. Please install it." >&2
1896 + echo "This is probably in the kmod package." >&2
1897 +- exit 1
1898 ++ exit 0
1899 + fi
1900 +
1901 + # older versions of depmod don't support -P <symbol-prefix>
1902 +diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
1903 +index 238db4ffd30c..88b3dc19bbae 100644
1904 +--- a/scripts/mod/modpost.c
1905 ++++ b/scripts/mod/modpost.c
1906 +@@ -649,7 +649,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
1907 + if (ELF_ST_TYPE(sym->st_info) == STT_SPARC_REGISTER)
1908 + break;
1909 + if (symname[0] == '.') {
1910 +- char *munged = strdup(symname);
1911 ++ char *munged = NOFAIL(strdup(symname));
1912 + munged[0] = '_';
1913 + munged[1] = toupper(munged[1]);
1914 + symname = munged;
1915 +@@ -1312,7 +1312,7 @@ static Elf_Sym *find_elf_symbol2(struct elf_info *elf, Elf_Addr addr,
1916 + static char *sec2annotation(const char *s)
1917 + {
1918 + if (match(s, init_exit_sections)) {
1919 +- char *p = malloc(20);
1920 ++ char *p = NOFAIL(malloc(20));
1921 + char *r = p;
1922 +
1923 + *p++ = '_';
1924 +@@ -1332,7 +1332,7 @@ static char *sec2annotation(const char *s)
1925 + strcat(p, " ");
1926 + return r;
1927 + } else {
1928 +- return strdup("");
1929 ++ return NOFAIL(strdup(""));
1930 + }
1931 + }
1932 +
1933 +@@ -2033,7 +2033,7 @@ void buf_write(struct buffer *buf, const char *s, int len)
1934 + {
1935 + if (buf->size - buf->pos < len) {
1936 + buf->size += len + SZ;
1937 +- buf->p = realloc(buf->p, buf->size);
1938 ++ buf->p = NOFAIL(realloc(buf->p, buf->size));
1939 + }
1940 + strncpy(buf->p + buf->pos, s, len);
1941 + buf->pos += len;
1942 +diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
1943 +index 3896523b71e9..f289762cd676 100644
1944 +--- a/sound/soc/codecs/wm8994.c
1945 ++++ b/sound/soc/codecs/wm8994.c
1946 +@@ -2431,6 +2431,7 @@ static int wm8994_set_dai_sysclk(struct snd_soc_dai *dai,
1947 + snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_2,
1948 + WM8994_OPCLK_ENA, 0);
1949 + }
1950 ++ break;
1951 +
1952 + default:
1953 + return -EINVAL;
1954 +diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c
1955 +index 1030a6e504bb..de477a3dc968 100644
1956 +--- a/tools/perf/arch/powerpc/util/sym-handling.c
1957 ++++ b/tools/perf/arch/powerpc/util/sym-handling.c
1958 +@@ -115,8 +115,10 @@ void arch__post_process_probe_trace_events(struct perf_probe_event *pev,
1959 + for (i = 0; i < ntevs; i++) {
1960 + tev = &pev->tevs[i];
1961 + map__for_each_symbol(map, sym, tmp) {
1962 +- if (map->unmap_ip(map, sym->start) == tev->point.address)
1963 ++ if (map->unmap_ip(map, sym->start) == tev->point.address) {
1964 + arch__fix_tev_from_maps(pev, tev, map, sym);
1965 ++ break;
1966 ++ }
1967 + }
1968 + }
1969 + }
1970 +diff --git a/tools/testing/selftests/powerpc/harness.c b/tools/testing/selftests/powerpc/harness.c
1971 +index 66d31de60b9a..9d7166dfad1e 100644
1972 +--- a/tools/testing/selftests/powerpc/harness.c
1973 ++++ b/tools/testing/selftests/powerpc/harness.c
1974 +@@ -85,13 +85,13 @@ wait:
1975 + return status;
1976 + }
1977 +
1978 +-static void alarm_handler(int signum)
1979 ++static void sig_handler(int signum)
1980 + {
1981 +- /* Jut wake us up from waitpid */
1982 ++ /* Just wake us up from waitpid */
1983 + }
1984 +
1985 +-static struct sigaction alarm_action = {
1986 +- .sa_handler = alarm_handler,
1987 ++static struct sigaction sig_action = {
1988 ++ .sa_handler = sig_handler,
1989 + };
1990 +
1991 + void test_harness_set_timeout(uint64_t time)
1992 +@@ -106,8 +106,14 @@ int test_harness(int (test_function)(void), char *name)
1993 + test_start(name);
1994 + test_set_git_version(GIT_VERSION);
1995 +
1996 +- if (sigaction(SIGALRM, &alarm_action, NULL)) {
1997 +- perror("sigaction");
1998 ++ if (sigaction(SIGINT, &sig_action, NULL)) {
1999 ++ perror("sigaction (sigint)");
2000 ++ test_error(name);
2001 ++ return 1;
2002 ++ }
2003 ++
2004 ++ if (sigaction(SIGALRM, &sig_action, NULL)) {
2005 ++ perror("sigaction (sigalrm)");
2006 + test_error(name);
2007 + return 1;
2008 + }