Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Wed, 14 Nov 2018 14:37:55
Message-Id: 1542206219.448adb6e2a2b7b3d07f3dc00a3cd61158482584b.mpagano@gentoo
1 commit: 448adb6e2a2b7b3d07f3dc00a3cd61158482584b
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Sat Sep 15 10:10:19 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Nov 14 14:36:59 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=448adb6e
7
8 Linux patch 4.9.127
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1126_linux-4.9.127.patch | 1973 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1977 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index eaa2495..ad1fe4a 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -547,6 +547,10 @@ Patch: 1125_linux-4.9.126.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.9.126
23
24 +Patch: 1126_linux-4.9.127.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.9.127
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1126_linux-4.9.127.patch b/1126_linux-4.9.127.patch
33 new file mode 100644
34 index 0000000..0cd0eae
35 --- /dev/null
36 +++ b/1126_linux-4.9.127.patch
37 @@ -0,0 +1,1973 @@
38 +diff --git a/Makefile b/Makefile
39 +index b26481fef3f0..4e37716ae395 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,6 +1,6 @@
43 + VERSION = 4
44 + PATCHLEVEL = 9
45 +-SUBLEVEL = 126
46 ++SUBLEVEL = 127
47 + EXTRAVERSION =
48 + NAME = Roaring Lionus
49 +
50 +diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
51 +index 6b7d4f535984..8ec4dbbb50b0 100644
52 +--- a/arch/arm/configs/imx_v6_v7_defconfig
53 ++++ b/arch/arm/configs/imx_v6_v7_defconfig
54 +@@ -271,7 +271,6 @@ CONFIG_USB_STORAGE=y
55 + CONFIG_USB_CHIPIDEA=y
56 + CONFIG_USB_CHIPIDEA_UDC=y
57 + CONFIG_USB_CHIPIDEA_HOST=y
58 +-CONFIG_USB_CHIPIDEA_ULPI=y
59 + CONFIG_USB_SERIAL=m
60 + CONFIG_USB_SERIAL_GENERIC=y
61 + CONFIG_USB_SERIAL_FTDI_SIO=m
62 +@@ -308,7 +307,6 @@ CONFIG_USB_GADGETFS=m
63 + CONFIG_USB_FUNCTIONFS=m
64 + CONFIG_USB_MASS_STORAGE=m
65 + CONFIG_USB_G_SERIAL=m
66 +-CONFIG_USB_ULPI_BUS=y
67 + CONFIG_MMC=y
68 + CONFIG_MMC_SDHCI=y
69 + CONFIG_MMC_SDHCI_PLTFM=y
70 +diff --git a/arch/arm/mach-rockchip/Kconfig b/arch/arm/mach-rockchip/Kconfig
71 +index 9ad84cd01ba0..5ed8fa5a9825 100644
72 +--- a/arch/arm/mach-rockchip/Kconfig
73 ++++ b/arch/arm/mach-rockchip/Kconfig
74 +@@ -16,6 +16,7 @@ config ARCH_ROCKCHIP
75 + select ROCKCHIP_TIMER
76 + select ARM_GLOBAL_TIMER
77 + select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
78 ++ select PM
79 + help
80 + Support for Rockchip's Cortex-A9 Single-to-Quad-Core-SoCs
81 + containing the RK2928, RK30xx and RK31xx series.
82 +diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
83 +index 08a4497f70a6..3428a4ba2ccd 100644
84 +--- a/arch/arm64/Kconfig.platforms
85 ++++ b/arch/arm64/Kconfig.platforms
86 +@@ -125,6 +125,7 @@ config ARCH_ROCKCHIP
87 + select GPIOLIB
88 + select PINCTRL
89 + select PINCTRL_ROCKCHIP
90 ++ select PM
91 + select ROCKCHIP_TIMER
92 + help
93 + This enables support for the ARMv8 based Rockchip chipsets,
94 +diff --git a/arch/arm64/include/asm/cachetype.h b/arch/arm64/include/asm/cachetype.h
95 +index f5588692f1d4..877d4789dcb3 100644
96 +--- a/arch/arm64/include/asm/cachetype.h
97 ++++ b/arch/arm64/include/asm/cachetype.h
98 +@@ -22,6 +22,11 @@
99 + #define CTR_L1IP_MASK 3
100 + #define CTR_CWG_SHIFT 24
101 + #define CTR_CWG_MASK 15
102 ++#define CTR_DMINLINE_SHIFT 16
103 ++#define CTR_IMINLINE_SHIFT 0
104 ++
105 ++#define CTR_CACHE_MINLINE_MASK \
106 ++ ((0xf << CTR_DMINLINE_SHIFT) | (0xf << CTR_IMINLINE_SHIFT))
107 +
108 + #define ICACHE_POLICY_RESERVED 0
109 + #define ICACHE_POLICY_AIVIVT 1
110 +diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
111 +index 7010779a1429..8c7c4b23a8b1 100644
112 +--- a/arch/arm64/include/asm/cpucaps.h
113 ++++ b/arch/arm64/include/asm/cpucaps.h
114 +@@ -37,7 +37,8 @@
115 + #define ARM64_UNMAP_KERNEL_AT_EL0 16
116 + #define ARM64_HARDEN_BRANCH_PREDICTOR 17
117 + #define ARM64_SSBD 18
118 ++#define ARM64_MISMATCHED_CACHE_TYPE 19
119 +
120 +-#define ARM64_NCAPS 19
121 ++#define ARM64_NCAPS 20
122 +
123 + #endif /* __ASM_CPUCAPS_H */
124 +diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
125 +index 1db97ad7b58b..930e74d9fcbd 100644
126 +--- a/arch/arm64/kernel/cpu_errata.c
127 ++++ b/arch/arm64/kernel/cpu_errata.c
128 +@@ -17,6 +17,7 @@
129 + */
130 +
131 + #include <linux/types.h>
132 ++#include <asm/cachetype.h>
133 + #include <asm/cpu.h>
134 + #include <asm/cputype.h>
135 + #include <asm/cpufeature.h>
136 +@@ -31,12 +32,18 @@ is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
137 + }
138 +
139 + static bool
140 +-has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
141 +- int scope)
142 ++has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
143 ++ int scope)
144 + {
145 ++ u64 mask = CTR_CACHE_MINLINE_MASK;
146 ++
147 ++ /* Skip matching the min line sizes for cache type check */
148 ++ if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
149 ++ mask ^= arm64_ftr_reg_ctrel0.strict_mask;
150 ++
151 + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
152 +- return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) !=
153 +- (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
154 ++ return (read_cpuid_cachetype() & mask) !=
155 ++ (arm64_ftr_reg_ctrel0.sys_val & mask);
156 + }
157 +
158 + static int cpu_enable_trap_ctr_access(void *__unused)
159 +@@ -446,7 +453,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
160 + {
161 + .desc = "Mismatched cache line size",
162 + .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
163 +- .matches = has_mismatched_cache_line_size,
164 ++ .matches = has_mismatched_cache_type,
165 ++ .def_scope = SCOPE_LOCAL_CPU,
166 ++ .enable = cpu_enable_trap_ctr_access,
167 ++ },
168 ++ {
169 ++ .desc = "Mismatched cache type",
170 ++ .capability = ARM64_MISMATCHED_CACHE_TYPE,
171 ++ .matches = has_mismatched_cache_type,
172 + .def_scope = SCOPE_LOCAL_CPU,
173 + .enable = cpu_enable_trap_ctr_access,
174 + },
175 +diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
176 +index ab15747a49d4..a3ab7dfad50a 100644
177 +--- a/arch/arm64/kernel/cpufeature.c
178 ++++ b/arch/arm64/kernel/cpufeature.c
179 +@@ -152,7 +152,7 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
180 + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0),
181 + ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
182 + ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */
183 +- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
184 ++ ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
185 + /*
186 + * Linux can handle differing I-cache policies. Userspace JITs will
187 + * make use of *minLine.
188 +@@ -160,7 +160,7 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
189 + */
190 + ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_AIVIVT), /* L1Ip */
191 + ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0), /* RAZ */
192 +- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */
193 ++ ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
194 + ARM64_FTR_END,
195 + };
196 +
197 +diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
198 +index 34989ce43147..8799d8a83d56 100644
199 +--- a/arch/powerpc/platforms/pseries/ras.c
200 ++++ b/arch/powerpc/platforms/pseries/ras.c
201 +@@ -357,7 +357,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
202 + int len, error_log_length;
203 +
204 + error_log_length = 8 + rtas_error_extended_log_length(h);
205 +- len = max_t(int, error_log_length, RTAS_ERROR_LOG_MAX);
206 ++ len = min_t(int, error_log_length, RTAS_ERROR_LOG_MAX);
207 + memset(global_mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
208 + memcpy(global_mce_data_buf, h, len);
209 + errhdr = (struct rtas_error_log *)global_mce_data_buf;
210 +diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
211 +index db2286be5d9a..47fb336741d4 100644
212 +--- a/arch/powerpc/sysdev/mpic_msgr.c
213 ++++ b/arch/powerpc/sysdev/mpic_msgr.c
214 +@@ -196,7 +196,7 @@ static int mpic_msgr_probe(struct platform_device *dev)
215 +
216 + /* IO map the message register block. */
217 + of_address_to_resource(np, 0, &rsrc);
218 +- msgr_block_addr = ioremap(rsrc.start, rsrc.end - rsrc.start);
219 ++ msgr_block_addr = ioremap(rsrc.start, resource_size(&rsrc));
220 + if (!msgr_block_addr) {
221 + dev_err(&dev->dev, "Failed to iomap MPIC message registers");
222 + return -EFAULT;
223 +diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
224 +index 598254461fb7..167135294ca5 100644
225 +--- a/arch/s390/kernel/crash_dump.c
226 ++++ b/arch/s390/kernel/crash_dump.c
227 +@@ -401,11 +401,13 @@ static void *get_vmcoreinfo_old(unsigned long *size)
228 + if (copy_oldmem_kernel(nt_name, addr + sizeof(note),
229 + sizeof(nt_name) - 1))
230 + return NULL;
231 +- if (strcmp(nt_name, "VMCOREINFO") != 0)
232 ++ if (strcmp(nt_name, VMCOREINFO_NOTE_NAME) != 0)
233 + return NULL;
234 + vmcoreinfo = kzalloc_panic(note.n_descsz);
235 +- if (copy_oldmem_kernel(vmcoreinfo, addr + 24, note.n_descsz))
236 ++ if (copy_oldmem_kernel(vmcoreinfo, addr + 24, note.n_descsz)) {
237 ++ kfree(vmcoreinfo);
238 + return NULL;
239 ++ }
240 + *size = note.n_descsz;
241 + return vmcoreinfo;
242 + }
243 +@@ -415,15 +417,20 @@ static void *get_vmcoreinfo_old(unsigned long *size)
244 + */
245 + static void *nt_vmcoreinfo(void *ptr)
246 + {
247 ++ const char *name = VMCOREINFO_NOTE_NAME;
248 + unsigned long size;
249 + void *vmcoreinfo;
250 +
251 + vmcoreinfo = os_info_old_entry(OS_INFO_VMCOREINFO, &size);
252 +- if (!vmcoreinfo)
253 +- vmcoreinfo = get_vmcoreinfo_old(&size);
254 ++ if (vmcoreinfo)
255 ++ return nt_init_name(ptr, 0, vmcoreinfo, size, name);
256 ++
257 ++ vmcoreinfo = get_vmcoreinfo_old(&size);
258 + if (!vmcoreinfo)
259 + return ptr;
260 +- return nt_init_name(ptr, 0, vmcoreinfo, size, "VMCOREINFO");
261 ++ ptr = nt_init_name(ptr, 0, vmcoreinfo, size, name);
262 ++ kfree(vmcoreinfo);
263 ++ return ptr;
264 + }
265 +
266 + /*
267 +diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
268 +index e7672edc284a..5ff0520784f2 100644
269 +--- a/arch/s390/lib/mem.S
270 ++++ b/arch/s390/lib/mem.S
271 +@@ -27,7 +27,7 @@
272 + */
273 + ENTRY(memset)
274 + ltgr %r4,%r4
275 +- bzr %r14
276 ++ jz .Lmemset_exit
277 + ltgr %r3,%r3
278 + jnz .Lmemset_fill
279 + aghi %r4,-1
280 +@@ -42,12 +42,13 @@ ENTRY(memset)
281 + .Lmemset_clear_rest:
282 + larl %r3,.Lmemset_xc
283 + ex %r4,0(%r3)
284 ++.Lmemset_exit:
285 + BR_EX %r14
286 + .Lmemset_fill:
287 + stc %r3,0(%r2)
288 + cghi %r4,1
289 + lgr %r1,%r2
290 +- ber %r14
291 ++ je .Lmemset_fill_exit
292 + aghi %r4,-2
293 + srlg %r3,%r4,8
294 + ltgr %r3,%r3
295 +@@ -59,6 +60,7 @@ ENTRY(memset)
296 + .Lmemset_fill_rest:
297 + larl %r3,.Lmemset_mvc
298 + ex %r4,0(%r3)
299 ++.Lmemset_fill_exit:
300 + BR_EX %r14
301 + .Lmemset_xc:
302 + xc 0(1,%r1),0(%r1)
303 +@@ -73,7 +75,7 @@ EXPORT_SYMBOL(memset)
304 + */
305 + ENTRY(memcpy)
306 + ltgr %r4,%r4
307 +- bzr %r14
308 ++ jz .Lmemcpy_exit
309 + aghi %r4,-1
310 + srlg %r5,%r4,8
311 + ltgr %r5,%r5
312 +@@ -82,6 +84,7 @@ ENTRY(memcpy)
313 + .Lmemcpy_rest:
314 + larl %r5,.Lmemcpy_mvc
315 + ex %r4,0(%r5)
316 ++.Lmemcpy_exit:
317 + BR_EX %r14
318 + .Lmemcpy_loop:
319 + mvc 0(256,%r1),0(%r3)
320 +diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
321 +index 5c686382d84b..095dbc25122a 100644
322 +--- a/arch/x86/include/asm/pgtable-3level.h
323 ++++ b/arch/x86/include/asm/pgtable-3level.h
324 +@@ -1,6 +1,8 @@
325 + #ifndef _ASM_X86_PGTABLE_3LEVEL_H
326 + #define _ASM_X86_PGTABLE_3LEVEL_H
327 +
328 ++#include <asm/atomic64_32.h>
329 ++
330 + /*
331 + * Intel Physical Address Extension (PAE) Mode - three-level page
332 + * tables on PPro+ CPUs.
333 +@@ -142,10 +144,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
334 + {
335 + pte_t res;
336 +
337 +- /* xchg acts as a barrier before the setting of the high bits */
338 +- res.pte_low = xchg(&ptep->pte_low, 0);
339 +- res.pte_high = ptep->pte_high;
340 +- ptep->pte_high = 0;
341 ++ res.pte = (pteval_t)atomic64_xchg((atomic64_t *)ptep, 0);
342 +
343 + return res;
344 + }
345 +diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
346 +index c535012bdb56..5736306bdaab 100644
347 +--- a/arch/x86/include/asm/pgtable.h
348 ++++ b/arch/x86/include/asm/pgtable.h
349 +@@ -420,7 +420,7 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
350 +
351 + static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
352 + {
353 +- phys_addr_t pfn = page_nr << PAGE_SHIFT;
354 ++ phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
355 + pfn ^= protnone_mask(pgprot_val(pgprot));
356 + pfn &= PHYSICAL_PUD_PAGE_MASK;
357 + return __pud(pfn | massage_pgprot(pgprot));
358 +diff --git a/block/bio.c b/block/bio.c
359 +index 4f93345c6a82..68972e3d3f5c 100644
360 +--- a/block/bio.c
361 ++++ b/block/bio.c
362 +@@ -155,7 +155,7 @@ out:
363 +
364 + unsigned int bvec_nr_vecs(unsigned short idx)
365 + {
366 +- return bvec_slabs[idx].nr_vecs;
367 ++ return bvec_slabs[--idx].nr_vecs;
368 + }
369 +
370 + void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
371 +diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
372 +index 145dcf293c6f..0792ec5a9efc 100644
373 +--- a/drivers/acpi/scan.c
374 ++++ b/drivers/acpi/scan.c
375 +@@ -1453,7 +1453,8 @@ static int acpi_add_single_object(struct acpi_device **child,
376 + * Note this must be done before the get power-/wakeup_dev-flags calls.
377 + */
378 + if (type == ACPI_BUS_TYPE_DEVICE)
379 +- acpi_bus_get_status(device);
380 ++ if (acpi_bus_get_status(device) < 0)
381 ++ acpi_set_device_status(device, 0);
382 +
383 + acpi_bus_get_power_flags(device);
384 + acpi_bus_get_wakeup_device_flags(device);
385 +@@ -1531,7 +1532,7 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type,
386 + * acpi_add_single_object updates this once we've an acpi_device
387 + * so that acpi_bus_get_status' quirk handling can be used.
388 + */
389 +- *sta = 0;
390 ++ *sta = ACPI_STA_DEFAULT;
391 + break;
392 + case ACPI_TYPE_PROCESSOR:
393 + *type = ACPI_BUS_TYPE_PROCESSOR;
394 +diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
395 +index 05671c03efe2..410998800af5 100644
396 +--- a/drivers/clk/rockchip/clk-rk3399.c
397 ++++ b/drivers/clk/rockchip/clk-rk3399.c
398 +@@ -1521,6 +1521,7 @@ static const char *const rk3399_pmucru_critical_clocks[] __initconst = {
399 + "pclk_pmu_src",
400 + "fclk_cm0s_src_pmu",
401 + "clk_timer_src_pmu",
402 ++ "pclk_rkpwm_pmu",
403 + };
404 +
405 + static void __init rk3399_clk_init(struct device_node *np)
406 +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
407 +index 6b31e0474271..37ba5f51378e 100644
408 +--- a/drivers/gpu/drm/drm_edid.c
409 ++++ b/drivers/gpu/drm/drm_edid.c
410 +@@ -110,6 +110,9 @@ static const struct edid_quirk {
411 + /* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
412 + { "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC },
413 +
414 ++ /* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */
415 ++ { "SDC", 0x3652, EDID_QUIRK_FORCE_6BPC },
416 ++
417 + /* Belinea 10 15 55 */
418 + { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
419 + { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
420 +diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
421 +index 05db7d59812a..da61ce82c3d9 100644
422 +--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
423 ++++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
424 +@@ -35,7 +35,7 @@
425 +
426 + static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
427 + {
428 +- return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn);
429 ++ return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn) ? -ENOMEM : 0;
430 + }
431 +
432 + static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn)
433 +diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
434 +index e86dd8d06777..33cf1035030b 100644
435 +--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
436 ++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
437 +@@ -114,7 +114,10 @@ static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
438 + {
439 + struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
440 +
441 +- return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base);
442 ++ return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align,
443 ++ base) ?
444 ++ -ENOMEM :
445 ++ 0;
446 + }
447 +
448 + enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
449 +diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
450 +index c2662a1bfdd3..6e24facebb46 100644
451 +--- a/drivers/irqchip/irq-bcm7038-l1.c
452 ++++ b/drivers/irqchip/irq-bcm7038-l1.c
453 +@@ -215,6 +215,7 @@ static int bcm7038_l1_set_affinity(struct irq_data *d,
454 + return 0;
455 + }
456 +
457 ++#ifdef CONFIG_SMP
458 + static void bcm7038_l1_cpu_offline(struct irq_data *d)
459 + {
460 + struct cpumask *mask = irq_data_get_affinity_mask(d);
461 +@@ -239,6 +240,7 @@ static void bcm7038_l1_cpu_offline(struct irq_data *d)
462 + }
463 + irq_set_affinity_locked(d, &new_affinity, false);
464 + }
465 ++#endif
466 +
467 + static int __init bcm7038_l1_init_one(struct device_node *dn,
468 + unsigned int idx,
469 +@@ -291,7 +293,9 @@ static struct irq_chip bcm7038_l1_irq_chip = {
470 + .irq_mask = bcm7038_l1_mask,
471 + .irq_unmask = bcm7038_l1_unmask,
472 + .irq_set_affinity = bcm7038_l1_set_affinity,
473 ++#ifdef CONFIG_SMP
474 + .irq_cpu_offline = bcm7038_l1_cpu_offline,
475 ++#endif
476 + };
477 +
478 + static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
479 +diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
480 +index 9e9d04cb7d51..56fcccc30554 100644
481 +--- a/drivers/md/dm-kcopyd.c
482 ++++ b/drivers/md/dm-kcopyd.c
483 +@@ -454,6 +454,8 @@ static int run_complete_job(struct kcopyd_job *job)
484 + if (atomic_dec_and_test(&kc->nr_jobs))
485 + wake_up(&kc->destroyq);
486 +
487 ++ cond_resched();
488 ++
489 + return 0;
490 + }
491 +
492 +diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
493 +index 40534352e574..3270b8dbc949 100644
494 +--- a/drivers/mfd/sm501.c
495 ++++ b/drivers/mfd/sm501.c
496 +@@ -714,6 +714,7 @@ sm501_create_subdev(struct sm501_devdata *sm, char *name,
497 + smdev->pdev.name = name;
498 + smdev->pdev.id = sm->pdev_id;
499 + smdev->pdev.dev.parent = sm->dev;
500 ++ smdev->pdev.dev.coherent_dma_mask = 0xffffffff;
501 +
502 + if (res_count) {
503 + smdev->pdev.resource = (struct resource *)(smdev+1);
504 +diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
505 +index f9c6ec4b98ab..013a7b3fe92d 100644
506 +--- a/drivers/misc/mei/pci-me.c
507 ++++ b/drivers/misc/mei/pci-me.c
508 +@@ -229,8 +229,11 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
509 + if (!pci_dev_run_wake(pdev))
510 + mei_me_set_pm_domain(dev);
511 +
512 +- if (mei_pg_is_enabled(dev))
513 ++ if (mei_pg_is_enabled(dev)) {
514 + pm_runtime_put_noidle(&pdev->dev);
515 ++ if (hw->d0i3_supported)
516 ++ pm_runtime_allow(&pdev->dev);
517 ++ }
518 +
519 + dev_dbg(&pdev->dev, "initialization successful.\n");
520 +
521 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
522 +index db7f289d65ae..3f8858db12eb 100644
523 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
524 ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
525 +@@ -185,6 +185,9 @@ struct bcmgenet_mib_counters {
526 + #define UMAC_MAC1 0x010
527 + #define UMAC_MAX_FRAME_LEN 0x014
528 +
529 ++#define UMAC_MODE 0x44
530 ++#define MODE_LINK_STATUS (1 << 5)
531 ++
532 + #define UMAC_EEE_CTRL 0x064
533 + #define EN_LPI_RX_PAUSE (1 << 0)
534 + #define EN_LPI_TX_PFC (1 << 1)
535 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
536 +index 2f9281936f0e..3b9e1a5dce82 100644
537 +--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
538 ++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
539 +@@ -167,8 +167,14 @@ void bcmgenet_mii_setup(struct net_device *dev)
540 + static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
541 + struct fixed_phy_status *status)
542 + {
543 +- if (dev && dev->phydev && status)
544 +- status->link = dev->phydev->link;
545 ++ struct bcmgenet_priv *priv;
546 ++ u32 reg;
547 ++
548 ++ if (dev && dev->phydev && status) {
549 ++ priv = netdev_priv(dev);
550 ++ reg = bcmgenet_umac_readl(priv, UMAC_MODE);
551 ++ status->link = !!(reg & MODE_LINK_STATUS);
552 ++ }
553 +
554 + return 0;
555 + }
556 +diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
557 +index f7e7b79c6050..f314be07ec58 100644
558 +--- a/drivers/net/ethernet/cisco/enic/enic_main.c
559 ++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
560 +@@ -2681,7 +2681,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
561 + */
562 +
563 + enic->port_mtu = enic->config.mtu;
564 +- (void)enic_change_mtu(netdev, enic->port_mtu);
565 +
566 + err = enic_set_mac_addr(netdev, enic->mac_addr);
567 + if (err) {
568 +@@ -2731,6 +2730,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
569 + netdev->features |= NETIF_F_HIGHDMA;
570 +
571 + netdev->priv_flags |= IFF_UNICAST_FLT;
572 ++ netdev->mtu = enic->port_mtu;
573 +
574 + err = register_netdev(netdev);
575 + if (err) {
576 +diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
577 +index fd4a8e473f11..6a507544682f 100644
578 +--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
579 ++++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
580 +@@ -2387,26 +2387,20 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev,
581 + return status;
582 + }
583 +
584 +-static netdev_features_t qlge_fix_features(struct net_device *ndev,
585 +- netdev_features_t features)
586 +-{
587 +- int err;
588 +-
589 +- /* Update the behavior of vlan accel in the adapter */
590 +- err = qlge_update_hw_vlan_features(ndev, features);
591 +- if (err)
592 +- return err;
593 +-
594 +- return features;
595 +-}
596 +-
597 + static int qlge_set_features(struct net_device *ndev,
598 + netdev_features_t features)
599 + {
600 + netdev_features_t changed = ndev->features ^ features;
601 ++ int err;
602 ++
603 ++ if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
604 ++ /* Update the behavior of vlan accel in the adapter */
605 ++ err = qlge_update_hw_vlan_features(ndev, features);
606 ++ if (err)
607 ++ return err;
608 +
609 +- if (changed & NETIF_F_HW_VLAN_CTAG_RX)
610 + qlge_vlan_mode(ndev, features);
611 ++ }
612 +
613 + return 0;
614 + }
615 +@@ -4719,7 +4713,6 @@ static const struct net_device_ops qlge_netdev_ops = {
616 + .ndo_set_mac_address = qlge_set_mac_address,
617 + .ndo_validate_addr = eth_validate_addr,
618 + .ndo_tx_timeout = qlge_tx_timeout,
619 +- .ndo_fix_features = qlge_fix_features,
620 + .ndo_set_features = qlge_set_features,
621 + .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
622 + .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
623 +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
624 +index 59b932db0d42..f65e8cd6d144 100644
625 +--- a/drivers/net/ethernet/realtek/r8169.c
626 ++++ b/drivers/net/ethernet/realtek/r8169.c
627 +@@ -329,6 +329,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
628 + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 },
629 + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
630 + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
631 ++ { PCI_DEVICE(PCI_VENDOR_ID_NCUBE, 0x8168), 0, 0, RTL_CFG_1 },
632 + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
633 + { PCI_VENDOR_ID_DLINK, 0x4300,
634 + PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
635 +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
636 +index 36a04e182af1..53602fdf5b47 100644
637 +--- a/drivers/net/hyperv/netvsc_drv.c
638 ++++ b/drivers/net/hyperv/netvsc_drv.c
639 +@@ -29,6 +29,7 @@
640 + #include <linux/netdevice.h>
641 + #include <linux/inetdevice.h>
642 + #include <linux/etherdevice.h>
643 ++#include <linux/pci.h>
644 + #include <linux/skbuff.h>
645 + #include <linux/if_vlan.h>
646 + #include <linux/in.h>
647 +@@ -1228,11 +1229,15 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
648 + {
649 + struct net_device *ndev;
650 + struct net_device_context *net_device_ctx;
651 ++ struct device *pdev = vf_netdev->dev.parent;
652 + struct netvsc_device *netvsc_dev;
653 +
654 + if (vf_netdev->addr_len != ETH_ALEN)
655 + return NOTIFY_DONE;
656 +
657 ++ if (!pdev || !dev_is_pci(pdev) || dev_is_pf(pdev))
658 ++ return NOTIFY_DONE;
659 ++
660 + /*
661 + * We will use the MAC address to locate the synthetic interface to
662 + * associate with the VF interface. If we don't find a matching
663 +diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
664 +index 90e0b6f134ad..23d7f73cc347 100644
665 +--- a/drivers/pci/host/pci-mvebu.c
666 ++++ b/drivers/pci/host/pci-mvebu.c
667 +@@ -1236,7 +1236,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
668 + pcie->realio.start = PCIBIOS_MIN_IO;
669 + pcie->realio.end = min_t(resource_size_t,
670 + IO_SPACE_LIMIT,
671 +- resource_size(&pcie->io));
672 ++ resource_size(&pcie->io) - 1);
673 + } else
674 + pcie->realio = pcie->io;
675 +
676 +diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
677 +index 687cc5b922ee..c857d2d7bbec 100644
678 +--- a/drivers/platform/x86/asus-nb-wmi.c
679 ++++ b/drivers/platform/x86/asus-nb-wmi.c
680 +@@ -531,6 +531,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
681 + { KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
682 + { KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
683 + { KE_IGNORE, 0xC6, }, /* Ambient Light Sensor notification */
684 ++ { KE_KEY, 0xFA, { KEY_PROG2 } }, /* Lid flip action */
685 + { KE_END, 0},
686 + };
687 +
688 +diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
689 +index b5b890127479..b7dfe06261f1 100644
690 +--- a/drivers/platform/x86/intel_punit_ipc.c
691 ++++ b/drivers/platform/x86/intel_punit_ipc.c
692 +@@ -17,6 +17,7 @@
693 + #include <linux/bitops.h>
694 + #include <linux/device.h>
695 + #include <linux/interrupt.h>
696 ++#include <linux/io.h>
697 + #include <linux/platform_device.h>
698 + #include <asm/intel_punit_ipc.h>
699 +
700 +diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
701 +index 0f5bc2f8382b..be17de9807b6 100644
702 +--- a/drivers/s390/block/dasd_eckd.c
703 ++++ b/drivers/s390/block/dasd_eckd.c
704 +@@ -1834,6 +1834,9 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
705 + struct dasd_eckd_private *private = device->private;
706 + int i;
707 +
708 ++ if (!private)
709 ++ return;
710 ++
711 + dasd_alias_disconnect_device_from_lcu(device);
712 + private->ned = NULL;
713 + private->sneq = NULL;
714 +@@ -2085,8 +2088,11 @@ static int dasd_eckd_basic_to_ready(struct dasd_device *device)
715 +
716 + static int dasd_eckd_online_to_ready(struct dasd_device *device)
717 + {
718 +- cancel_work_sync(&device->reload_device);
719 +- cancel_work_sync(&device->kick_validate);
720 ++ if (cancel_work_sync(&device->reload_device))
721 ++ dasd_put_device(device);
722 ++ if (cancel_work_sync(&device->kick_validate))
723 ++ dasd_put_device(device);
724 ++
725 + return 0;
726 + };
727 +
728 +diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
729 +index 662b2321d1b0..913ebb6d0d29 100644
730 +--- a/drivers/scsi/aic94xx/aic94xx_init.c
731 ++++ b/drivers/scsi/aic94xx/aic94xx_init.c
732 +@@ -1031,8 +1031,10 @@ static int __init aic94xx_init(void)
733 +
734 + aic94xx_transport_template =
735 + sas_domain_attach_transport(&aic94xx_transport_functions);
736 +- if (!aic94xx_transport_template)
737 ++ if (!aic94xx_transport_template) {
738 ++ err = -ENOMEM;
739 + goto out_destroy_caches;
740 ++ }
741 +
742 + err = pci_register_driver(&aic94xx_pci_driver);
743 + if (err)
744 +diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
745 +index 18c5312f7886..0fa85d55c82f 100644
746 +--- a/drivers/staging/comedi/drivers/ni_mio_common.c
747 ++++ b/drivers/staging/comedi/drivers/ni_mio_common.c
748 +@@ -5407,11 +5407,11 @@ static int ni_E_init(struct comedi_device *dev,
749 + /* Digital I/O (PFI) subdevice */
750 + s = &dev->subdevices[NI_PFI_DIO_SUBDEV];
751 + s->type = COMEDI_SUBD_DIO;
752 +- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
753 + s->maxdata = 1;
754 + if (devpriv->is_m_series) {
755 + s->n_chan = 16;
756 + s->insn_bits = ni_pfi_insn_bits;
757 ++ s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
758 +
759 + ni_writew(dev, s->state, NI_M_PFI_DO_REG);
760 + for (i = 0; i < NUM_PFI_OUTPUT_SELECT_REGS; ++i) {
761 +@@ -5420,6 +5420,7 @@ static int ni_E_init(struct comedi_device *dev,
762 + }
763 + } else {
764 + s->n_chan = 10;
765 ++ s->subdev_flags = SDF_INTERNAL;
766 + }
767 + s->insn_config = ni_pfi_insn_config;
768 +
769 +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
770 +index 8b6489ae74eb..c569b6454a9d 100644
771 +--- a/drivers/vhost/vhost.c
772 ++++ b/drivers/vhost/vhost.c
773 +@@ -905,7 +905,7 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d,
774 + list_for_each_entry_safe(node, n, &d->pending_list, node) {
775 + struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
776 + if (msg->iova <= vq_msg->iova &&
777 +- msg->iova + msg->size - 1 > vq_msg->iova &&
778 ++ msg->iova + msg->size - 1 >= vq_msg->iova &&
779 + vq_msg->type == VHOST_IOTLB_MISS) {
780 + vhost_poll_queue(&node->vq->poll);
781 + list_del(&node->node);
782 +diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
783 +index 6d9e5173d5fa..fbc4761987e8 100644
784 +--- a/drivers/virtio/virtio_pci_legacy.c
785 ++++ b/drivers/virtio/virtio_pci_legacy.c
786 +@@ -121,6 +121,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
787 + struct virtqueue *vq;
788 + u16 num;
789 + int err;
790 ++ u64 q_pfn;
791 +
792 + /* Select the queue we're interested in */
793 + iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
794 +@@ -139,9 +140,17 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
795 + if (!vq)
796 + return ERR_PTR(-ENOMEM);
797 +
798 ++ q_pfn = virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
799 ++ if (q_pfn >> 32) {
800 ++ dev_err(&vp_dev->pci_dev->dev,
801 ++ "platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n",
802 ++ 0x1ULL << (32 + PAGE_SHIFT - 30));
803 ++ err = -E2BIG;
804 ++ goto out_del_vq;
805 ++ }
806 ++
807 + /* activate the queue */
808 +- iowrite32(virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT,
809 +- vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
810 ++ iowrite32(q_pfn, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
811 +
812 + vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
813 +
814 +@@ -158,6 +167,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
815 +
816 + out_deactivate:
817 + iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
818 ++out_del_vq:
819 + vring_del_virtqueue(vq);
820 + return ERR_PTR(err);
821 + }
822 +diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
823 +index 05169ef30596..b450adf65236 100644
824 +--- a/fs/btrfs/dev-replace.c
825 ++++ b/fs/btrfs/dev-replace.c
826 +@@ -585,6 +585,12 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
827 +
828 + btrfs_rm_dev_replace_unblocked(fs_info);
829 +
830 ++ /*
831 ++ * Increment dev_stats_ccnt so that btrfs_run_dev_stats() will
832 ++ * update on-disk dev stats value during commit transaction
833 ++ */
834 ++ atomic_inc(&tgt_device->dev_stats_ccnt);
835 ++
836 + /*
837 + * this is again a consistent state where no dev_replace procedure
838 + * is running, the target device is part of the filesystem, the
839 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
840 +index 92f3b231d5a2..18d05323ca53 100644
841 +--- a/fs/btrfs/disk-io.c
842 ++++ b/fs/btrfs/disk-io.c
843 +@@ -1096,8 +1096,9 @@ static int btree_writepages(struct address_space *mapping,
844 +
845 + fs_info = BTRFS_I(mapping->host)->root->fs_info;
846 + /* this is a bit racy, but that's ok */
847 +- ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
848 +- BTRFS_DIRTY_METADATA_THRESH);
849 ++ ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
850 ++ BTRFS_DIRTY_METADATA_THRESH,
851 ++ fs_info->dirty_metadata_batch);
852 + if (ret < 0)
853 + return 0;
854 + }
855 +@@ -4107,8 +4108,9 @@ static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
856 + if (flush_delayed)
857 + btrfs_balance_delayed_items(root);
858 +
859 +- ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
860 +- BTRFS_DIRTY_METADATA_THRESH);
861 ++ ret = __percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
862 ++ BTRFS_DIRTY_METADATA_THRESH,
863 ++ root->fs_info->dirty_metadata_batch);
864 + if (ret > 0) {
865 + balance_dirty_pages_ratelimited(
866 + root->fs_info->btree_inode->i_mapping);
867 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
868 +index 44a43851404a..6661116c47d9 100644
869 +--- a/fs/btrfs/extent-tree.c
870 ++++ b/fs/btrfs/extent-tree.c
871 +@@ -10853,7 +10853,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
872 + /* Don't want to race with allocators so take the groups_sem */
873 + down_write(&space_info->groups_sem);
874 + spin_lock(&block_group->lock);
875 +- if (block_group->reserved ||
876 ++ if (block_group->reserved || block_group->pinned ||
877 + btrfs_block_group_used(&block_group->item) ||
878 + block_group->ro ||
879 + list_is_singular(&block_group->list)) {
880 +diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
881 +index 04c61bcf62e5..9140aede5869 100644
882 +--- a/fs/btrfs/relocation.c
883 ++++ b/fs/btrfs/relocation.c
884 +@@ -1325,18 +1325,19 @@ static void __del_reloc_root(struct btrfs_root *root)
885 + struct mapping_node *node = NULL;
886 + struct reloc_control *rc = root->fs_info->reloc_ctl;
887 +
888 +- spin_lock(&rc->reloc_root_tree.lock);
889 +- rb_node = tree_search(&rc->reloc_root_tree.rb_root,
890 +- root->node->start);
891 +- if (rb_node) {
892 +- node = rb_entry(rb_node, struct mapping_node, rb_node);
893 +- rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
894 ++ if (rc) {
895 ++ spin_lock(&rc->reloc_root_tree.lock);
896 ++ rb_node = tree_search(&rc->reloc_root_tree.rb_root,
897 ++ root->node->start);
898 ++ if (rb_node) {
899 ++ node = rb_entry(rb_node, struct mapping_node, rb_node);
900 ++ rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
901 ++ }
902 ++ spin_unlock(&rc->reloc_root_tree.lock);
903 ++ if (!node)
904 ++ return;
905 ++ BUG_ON((struct btrfs_root *)node->data != root);
906 + }
907 +- spin_unlock(&rc->reloc_root_tree.lock);
908 +-
909 +- if (!node)
910 +- return;
911 +- BUG_ON((struct btrfs_root *)node->data != root);
912 +
913 + spin_lock(&root->fs_info->trans_lock);
914 + list_del_init(&root->root_list);
915 +diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
916 +index ad8bd96093f7..e06468f8e041 100644
917 +--- a/fs/cifs/cifs_debug.c
918 ++++ b/fs/cifs/cifs_debug.c
919 +@@ -284,6 +284,10 @@ static ssize_t cifs_stats_proc_write(struct file *file,
920 + atomic_set(&totBufAllocCount, 0);
921 + atomic_set(&totSmBufAllocCount, 0);
922 + #endif /* CONFIG_CIFS_STATS2 */
923 ++ spin_lock(&GlobalMid_Lock);
924 ++ GlobalMaxActiveXid = 0;
925 ++ GlobalCurrentXid = 0;
926 ++ spin_unlock(&GlobalMid_Lock);
927 + spin_lock(&cifs_tcp_ses_lock);
928 + list_for_each(tmp1, &cifs_tcp_ses_list) {
929 + server = list_entry(tmp1, struct TCP_Server_Info,
930 +@@ -296,6 +300,10 @@ static ssize_t cifs_stats_proc_write(struct file *file,
931 + struct cifs_tcon,
932 + tcon_list);
933 + atomic_set(&tcon->num_smbs_sent, 0);
934 ++ spin_lock(&tcon->stat_lock);
935 ++ tcon->bytes_read = 0;
936 ++ tcon->bytes_written = 0;
937 ++ spin_unlock(&tcon->stat_lock);
938 + if (server->ops->clear_stats)
939 + server->ops->clear_stats(tcon);
940 + }
941 +diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
942 +index 967dfe656ced..e96a74da756f 100644
943 +--- a/fs/cifs/smb2misc.c
944 ++++ b/fs/cifs/smb2misc.c
945 +@@ -208,6 +208,13 @@ smb2_check_message(char *buf, unsigned int length, struct TCP_Server_Info *srvr)
946 + if (clc_len == 4 + len + 1)
947 + return 0;
948 +
949 ++ /*
950 ++ * Some windows servers (win2016) will pad also the final
951 ++ * PDU in a compound to 8 bytes.
952 ++ */
953 ++ if (((clc_len + 7) & ~7) == len)
954 ++ return 0;
955 ++
956 + /*
957 + * MacOS server pads after SMB2.1 write response with 3 bytes
958 + * of junk. Other servers match RFC1001 len to actual
959 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
960 +index 4ded64b8b43b..383cf8148fe7 100644
961 +--- a/fs/cifs/smb2pdu.c
962 ++++ b/fs/cifs/smb2pdu.c
963 +@@ -320,7 +320,7 @@ small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon,
964 + smb2_hdr_assemble((struct smb2_hdr *) *request_buf, smb2_command, tcon);
965 +
966 + if (tcon != NULL) {
967 +-#ifdef CONFIG_CIFS_STATS2
968 ++#ifdef CONFIG_CIFS_STATS
969 + uint16_t com_code = le16_to_cpu(smb2_command);
970 + cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
971 + #endif
972 +diff --git a/fs/dcache.c b/fs/dcache.c
973 +index 461ff8f234e3..f903b86b06e5 100644
974 +--- a/fs/dcache.c
975 ++++ b/fs/dcache.c
976 +@@ -286,7 +286,8 @@ void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry
977 + spin_unlock(&dentry->d_lock);
978 + name->name = p->name;
979 + } else {
980 +- memcpy(name->inline_name, dentry->d_iname, DNAME_INLINE_LEN);
981 ++ memcpy(name->inline_name, dentry->d_iname,
982 ++ dentry->d_name.len + 1);
983 + spin_unlock(&dentry->d_lock);
984 + name->name = name->inline_name;
985 + }
986 +diff --git a/fs/fat/cache.c b/fs/fat/cache.c
987 +index 5d384921524d..f04b189fd90d 100644
988 +--- a/fs/fat/cache.c
989 ++++ b/fs/fat/cache.c
990 +@@ -224,7 +224,8 @@ static inline void cache_init(struct fat_cache_id *cid, int fclus, int dclus)
991 + int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
992 + {
993 + struct super_block *sb = inode->i_sb;
994 +- const int limit = sb->s_maxbytes >> MSDOS_SB(sb)->cluster_bits;
995 ++ struct msdos_sb_info *sbi = MSDOS_SB(sb);
996 ++ const int limit = sb->s_maxbytes >> sbi->cluster_bits;
997 + struct fat_entry fatent;
998 + struct fat_cache_id cid;
999 + int nr;
1000 +@@ -233,6 +234,12 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
1001 +
1002 + *fclus = 0;
1003 + *dclus = MSDOS_I(inode)->i_start;
1004 ++ if (!fat_valid_entry(sbi, *dclus)) {
1005 ++ fat_fs_error_ratelimit(sb,
1006 ++ "%s: invalid start cluster (i_pos %lld, start %08x)",
1007 ++ __func__, MSDOS_I(inode)->i_pos, *dclus);
1008 ++ return -EIO;
1009 ++ }
1010 + if (cluster == 0)
1011 + return 0;
1012 +
1013 +@@ -249,9 +256,8 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
1014 + /* prevent the infinite loop of cluster chain */
1015 + if (*fclus > limit) {
1016 + fat_fs_error_ratelimit(sb,
1017 +- "%s: detected the cluster chain loop"
1018 +- " (i_pos %lld)", __func__,
1019 +- MSDOS_I(inode)->i_pos);
1020 ++ "%s: detected the cluster chain loop (i_pos %lld)",
1021 ++ __func__, MSDOS_I(inode)->i_pos);
1022 + nr = -EIO;
1023 + goto out;
1024 + }
1025 +@@ -261,9 +267,8 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
1026 + goto out;
1027 + else if (nr == FAT_ENT_FREE) {
1028 + fat_fs_error_ratelimit(sb,
1029 +- "%s: invalid cluster chain (i_pos %lld)",
1030 +- __func__,
1031 +- MSDOS_I(inode)->i_pos);
1032 ++ "%s: invalid cluster chain (i_pos %lld)",
1033 ++ __func__, MSDOS_I(inode)->i_pos);
1034 + nr = -EIO;
1035 + goto out;
1036 + } else if (nr == FAT_ENT_EOF) {
1037 +diff --git a/fs/fat/fat.h b/fs/fat/fat.h
1038 +index e6b764a17a9c..437affe987c5 100644
1039 +--- a/fs/fat/fat.h
1040 ++++ b/fs/fat/fat.h
1041 +@@ -347,6 +347,11 @@ static inline void fatent_brelse(struct fat_entry *fatent)
1042 + fatent->fat_inode = NULL;
1043 + }
1044 +
1045 ++static inline bool fat_valid_entry(struct msdos_sb_info *sbi, int entry)
1046 ++{
1047 ++ return FAT_START_ENT <= entry && entry < sbi->max_cluster;
1048 ++}
1049 ++
1050 + extern void fat_ent_access_init(struct super_block *sb);
1051 + extern int fat_ent_read(struct inode *inode, struct fat_entry *fatent,
1052 + int entry);
1053 +diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
1054 +index 1d9a8c4e9de0..3b7644e43796 100644
1055 +--- a/fs/fat/fatent.c
1056 ++++ b/fs/fat/fatent.c
1057 +@@ -23,7 +23,7 @@ static void fat12_ent_blocknr(struct super_block *sb, int entry,
1058 + {
1059 + struct msdos_sb_info *sbi = MSDOS_SB(sb);
1060 + int bytes = entry + (entry >> 1);
1061 +- WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry);
1062 ++ WARN_ON(!fat_valid_entry(sbi, entry));
1063 + *offset = bytes & (sb->s_blocksize - 1);
1064 + *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
1065 + }
1066 +@@ -33,7 +33,7 @@ static void fat_ent_blocknr(struct super_block *sb, int entry,
1067 + {
1068 + struct msdos_sb_info *sbi = MSDOS_SB(sb);
1069 + int bytes = (entry << sbi->fatent_shift);
1070 +- WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry);
1071 ++ WARN_ON(!fat_valid_entry(sbi, entry));
1072 + *offset = bytes & (sb->s_blocksize - 1);
1073 + *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
1074 + }
1075 +@@ -353,7 +353,7 @@ int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry)
1076 + int err, offset;
1077 + sector_t blocknr;
1078 +
1079 +- if (entry < FAT_START_ENT || sbi->max_cluster <= entry) {
1080 ++ if (!fat_valid_entry(sbi, entry)) {
1081 + fatent_brelse(fatent);
1082 + fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry);
1083 + return -EIO;
1084 +diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
1085 +index 6fc766df0461..2a6f3c67cb3f 100644
1086 +--- a/fs/hfs/brec.c
1087 ++++ b/fs/hfs/brec.c
1088 +@@ -74,9 +74,10 @@ int hfs_brec_insert(struct hfs_find_data *fd, void *entry, int entry_len)
1089 + if (!fd->bnode) {
1090 + if (!tree->root)
1091 + hfs_btree_inc_height(tree);
1092 +- fd->bnode = hfs_bnode_find(tree, tree->leaf_head);
1093 +- if (IS_ERR(fd->bnode))
1094 +- return PTR_ERR(fd->bnode);
1095 ++ node = hfs_bnode_find(tree, tree->leaf_head);
1096 ++ if (IS_ERR(node))
1097 ++ return PTR_ERR(node);
1098 ++ fd->bnode = node;
1099 + fd->record = -1;
1100 + }
1101 + new_node = NULL;
1102 +diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
1103 +index 31d5e3f1fe17..193d5411210a 100644
1104 +--- a/fs/hfsplus/dir.c
1105 ++++ b/fs/hfsplus/dir.c
1106 +@@ -77,13 +77,13 @@ again:
1107 + cpu_to_be32(HFSP_HARDLINK_TYPE) &&
1108 + entry.file.user_info.fdCreator ==
1109 + cpu_to_be32(HFSP_HFSPLUS_CREATOR) &&
1110 ++ HFSPLUS_SB(sb)->hidden_dir &&
1111 + (entry.file.create_date ==
1112 + HFSPLUS_I(HFSPLUS_SB(sb)->hidden_dir)->
1113 + create_date ||
1114 + entry.file.create_date ==
1115 + HFSPLUS_I(d_inode(sb->s_root))->
1116 +- create_date) &&
1117 +- HFSPLUS_SB(sb)->hidden_dir) {
1118 ++ create_date)) {
1119 + struct qstr str;
1120 + char name[32];
1121 +
1122 +diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
1123 +index b9563cdcfe28..7fb976e0aa07 100644
1124 +--- a/fs/hfsplus/super.c
1125 ++++ b/fs/hfsplus/super.c
1126 +@@ -524,8 +524,10 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
1127 + goto out_put_root;
1128 + if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
1129 + hfs_find_exit(&fd);
1130 +- if (entry.type != cpu_to_be16(HFSPLUS_FOLDER))
1131 ++ if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) {
1132 ++ err = -EINVAL;
1133 + goto out_put_root;
1134 ++ }
1135 + inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id));
1136 + if (IS_ERR(inode)) {
1137 + err = PTR_ERR(inode);
1138 +diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
1139 +index 6ca00471afbf..d920a646b578 100644
1140 +--- a/fs/reiserfs/reiserfs.h
1141 ++++ b/fs/reiserfs/reiserfs.h
1142 +@@ -270,7 +270,7 @@ struct reiserfs_journal_list {
1143 +
1144 + struct mutex j_commit_mutex;
1145 + unsigned int j_trans_id;
1146 +- time_t j_timestamp;
1147 ++ time64_t j_timestamp; /* write-only but useful for crash dump analysis */
1148 + struct reiserfs_list_bitmap *j_list_bitmap;
1149 + struct buffer_head *j_commit_bh; /* commit buffer head */
1150 + struct reiserfs_journal_cnode *j_realblock;
1151 +diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
1152 +index 43082049baf2..bba5604f3a03 100644
1153 +--- a/include/linux/pci_ids.h
1154 ++++ b/include/linux/pci_ids.h
1155 +@@ -3054,4 +3054,6 @@
1156 +
1157 + #define PCI_VENDOR_ID_OCZ 0x1b85
1158 +
1159 ++#define PCI_VENDOR_ID_NCUBE 0x10ff
1160 ++
1161 + #endif /* _LINUX_PCI_IDS_H */
1162 +diff --git a/kernel/fork.c b/kernel/fork.c
1163 +index 2c98b987808d..5d0e2f366766 100644
1164 +--- a/kernel/fork.c
1165 ++++ b/kernel/fork.c
1166 +@@ -1304,7 +1304,9 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
1167 + return -ENOMEM;
1168 +
1169 + atomic_set(&sig->count, 1);
1170 ++ spin_lock_irq(&current->sighand->siglock);
1171 + memcpy(sig->action, current->sighand->action, sizeof(sig->action));
1172 ++ spin_unlock_irq(&current->sighand->siglock);
1173 + return 0;
1174 + }
1175 +
1176 +diff --git a/lib/debugobjects.c b/lib/debugobjects.c
1177 +index 056052dc8e91..88580e8ee39e 100644
1178 +--- a/lib/debugobjects.c
1179 ++++ b/lib/debugobjects.c
1180 +@@ -294,9 +294,12 @@ static void debug_object_is_on_stack(void *addr, int onstack)
1181 +
1182 + limit++;
1183 + if (is_on_stack)
1184 +- pr_warn("object is on stack, but not annotated\n");
1185 ++ pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
1186 ++ task_stack_page(current));
1187 + else
1188 +- pr_warn("object is not on stack, but annotated\n");
1189 ++ pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
1190 ++ task_stack_page(current));
1191 ++
1192 + WARN_ON(1);
1193 + }
1194 +
1195 +diff --git a/mm/fadvise.c b/mm/fadvise.c
1196 +index 27fc9ad267ac..eb3269e59002 100644
1197 +--- a/mm/fadvise.c
1198 ++++ b/mm/fadvise.c
1199 +@@ -68,8 +68,12 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
1200 + goto out;
1201 + }
1202 +
1203 +- /* Careful about overflows. Len == 0 means "as much as possible" */
1204 +- endbyte = offset + len;
1205 ++ /*
1206 ++ * Careful about overflows. Len == 0 means "as much as possible". Use
1207 ++ * unsigned math because signed overflows are undefined and UBSan
1208 ++ * complains.
1209 ++ */
1210 ++ endbyte = (u64)offset + (u64)len;
1211 + if (!len || endbyte < len)
1212 + endbyte = -1;
1213 + else
1214 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
1215 +index 9efe88ef9702..e4c6c3edaf6a 100644
1216 +--- a/mm/huge_memory.c
1217 ++++ b/mm/huge_memory.c
1218 +@@ -1259,12 +1259,12 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd)
1219 +
1220 + /* Migration could have started since the pmd_trans_migrating check */
1221 + if (!page_locked) {
1222 ++ page_nid = -1;
1223 + if (!get_page_unless_zero(page))
1224 + goto out_unlock;
1225 + spin_unlock(fe->ptl);
1226 + wait_on_page_locked(page);
1227 + put_page(page);
1228 +- page_nid = -1;
1229 + goto out;
1230 + }
1231 +
1232 +diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
1233 +index 2b543532e2f1..aa4586672cee 100644
1234 +--- a/net/9p/trans_fd.c
1235 ++++ b/net/9p/trans_fd.c
1236 +@@ -195,15 +195,14 @@ static void p9_mux_poll_stop(struct p9_conn *m)
1237 + static void p9_conn_cancel(struct p9_conn *m, int err)
1238 + {
1239 + struct p9_req_t *req, *rtmp;
1240 +- unsigned long flags;
1241 + LIST_HEAD(cancel_list);
1242 +
1243 + p9_debug(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
1244 +
1245 +- spin_lock_irqsave(&m->client->lock, flags);
1246 ++ spin_lock(&m->client->lock);
1247 +
1248 + if (m->err) {
1249 +- spin_unlock_irqrestore(&m->client->lock, flags);
1250 ++ spin_unlock(&m->client->lock);
1251 + return;
1252 + }
1253 +
1254 +@@ -215,7 +214,6 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
1255 + list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
1256 + list_move(&req->req_list, &cancel_list);
1257 + }
1258 +- spin_unlock_irqrestore(&m->client->lock, flags);
1259 +
1260 + list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
1261 + p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req);
1262 +@@ -224,6 +222,7 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
1263 + req->t_err = err;
1264 + p9_client_cb(m->client, req, REQ_STATUS_ERROR);
1265 + }
1266 ++ spin_unlock(&m->client->lock);
1267 + }
1268 +
1269 + static int
1270 +@@ -379,8 +378,9 @@ static void p9_read_work(struct work_struct *work)
1271 + if (m->req->status != REQ_STATUS_ERROR)
1272 + status = REQ_STATUS_RCVD;
1273 + list_del(&m->req->req_list);
1274 +- spin_unlock(&m->client->lock);
1275 ++ /* update req->status while holding client->lock */
1276 + p9_client_cb(m->client, m->req, status);
1277 ++ spin_unlock(&m->client->lock);
1278 + m->rc.sdata = NULL;
1279 + m->rc.offset = 0;
1280 + m->rc.capacity = 0;
1281 +diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
1282 +index da0d3b257459..e73fd647065a 100644
1283 +--- a/net/9p/trans_virtio.c
1284 ++++ b/net/9p/trans_virtio.c
1285 +@@ -571,7 +571,7 @@ static int p9_virtio_probe(struct virtio_device *vdev)
1286 + chan->vq = virtio_find_single_vq(vdev, req_done, "requests");
1287 + if (IS_ERR(chan->vq)) {
1288 + err = PTR_ERR(chan->vq);
1289 +- goto out_free_vq;
1290 ++ goto out_free_chan;
1291 + }
1292 + chan->vq->vdev->priv = chan;
1293 + spin_lock_init(&chan->lock);
1294 +@@ -624,6 +624,7 @@ out_free_tag:
1295 + kfree(tag);
1296 + out_free_vq:
1297 + vdev->config->del_vqs(vdev);
1298 ++out_free_chan:
1299 + kfree(chan);
1300 + fail:
1301 + return err;
1302 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
1303 +index 504cdae41013..16dea67792e0 100644
1304 +--- a/net/ipv4/tcp_ipv4.c
1305 ++++ b/net/ipv4/tcp_ipv4.c
1306 +@@ -2440,6 +2440,12 @@ static int __net_init tcp_sk_init(struct net *net)
1307 + if (res)
1308 + goto fail;
1309 + sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1310 ++
1311 ++ /* Please enforce IP_DF and IPID==0 for RST and
1312 ++ * ACK sent in SYN-RECV and TIME-WAIT state.
1313 ++ */
1314 ++ inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
1315 ++
1316 + *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
1317 + }
1318 +
1319 +diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
1320 +index 830a5645d8c1..a501b45d0334 100644
1321 +--- a/net/ipv4/tcp_minisocks.c
1322 ++++ b/net/ipv4/tcp_minisocks.c
1323 +@@ -194,8 +194,9 @@ kill:
1324 + inet_twsk_deschedule_put(tw);
1325 + return TCP_TW_SUCCESS;
1326 + }
1327 ++ } else {
1328 ++ inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
1329 + }
1330 +- inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
1331 +
1332 + if (tmp_opt.saw_tstamp) {
1333 + tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
1334 +diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
1335 +index 3d063eb37848..f6c50af24a64 100644
1336 +--- a/net/ipv4/tcp_probe.c
1337 ++++ b/net/ipv4/tcp_probe.c
1338 +@@ -117,7 +117,7 @@ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
1339 + (fwmark > 0 && skb->mark == fwmark)) &&
1340 + (full || tp->snd_cwnd != tcp_probe.lastcwnd)) {
1341 +
1342 +- spin_lock_bh(&tcp_probe.lock);
1343 ++ spin_lock(&tcp_probe.lock);
1344 + /* If log fills, just silently drop */
1345 + if (tcp_probe_avail() > 1) {
1346 + struct tcp_log *p = tcp_probe.log + tcp_probe.head;
1347 +@@ -157,7 +157,7 @@ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
1348 + tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1);
1349 + }
1350 + tcp_probe.lastcwnd = tp->snd_cwnd;
1351 +- spin_unlock_bh(&tcp_probe.lock);
1352 ++ spin_unlock(&tcp_probe.lock);
1353 +
1354 + wake_up(&tcp_probe.wait);
1355 + }
1356 +diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
1357 +index a5aeeb613fac..3213921cdfee 100644
1358 +--- a/net/ipv6/ip6_vti.c
1359 ++++ b/net/ipv6/ip6_vti.c
1360 +@@ -481,7 +481,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
1361 + }
1362 +
1363 + mtu = dst_mtu(dst);
1364 +- if (!skb->ignore_df && skb->len > mtu) {
1365 ++ if (skb->len > mtu) {
1366 + skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu);
1367 +
1368 + if (skb->protocol == htons(ETH_P_IPV6)) {
1369 +diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
1370 +index 101ed6c42808..0a78f17006a4 100644
1371 +--- a/net/irda/af_irda.c
1372 ++++ b/net/irda/af_irda.c
1373 +@@ -774,6 +774,13 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1374 + return -EINVAL;
1375 +
1376 + lock_sock(sk);
1377 ++
1378 ++ /* Ensure that the socket is not already bound */
1379 ++ if (self->ias_obj) {
1380 ++ err = -EINVAL;
1381 ++ goto out;
1382 ++ }
1383 ++
1384 + #ifdef CONFIG_IRDA_ULTRA
1385 + /* Special care for Ultra sockets */
1386 + if ((sk->sk_type == SOCK_DGRAM) &&
1387 +@@ -2016,7 +2023,11 @@ static int irda_setsockopt(struct socket *sock, int level, int optname,
1388 + err = -EINVAL;
1389 + goto out;
1390 + }
1391 +- irias_insert_object(ias_obj);
1392 ++
1393 ++ /* Only insert newly allocated objects */
1394 ++ if (free_ias)
1395 ++ irias_insert_object(ias_obj);
1396 ++
1397 + kfree(ias_opt);
1398 + break;
1399 + case IRLMP_IAS_DEL:
1400 +diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
1401 +index e34d3f60fccd..fd186b011a99 100644
1402 +--- a/net/netfilter/ipvs/ip_vs_core.c
1403 ++++ b/net/netfilter/ipvs/ip_vs_core.c
1404 +@@ -1968,13 +1968,20 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
1405 + if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
1406 + /* the destination server is not available */
1407 +
1408 +- if (sysctl_expire_nodest_conn(ipvs)) {
1409 ++ __u32 flags = cp->flags;
1410 ++
1411 ++ /* when timer already started, silently drop the packet.*/
1412 ++ if (timer_pending(&cp->timer))
1413 ++ __ip_vs_conn_put(cp);
1414 ++ else
1415 ++ ip_vs_conn_put(cp);
1416 ++
1417 ++ if (sysctl_expire_nodest_conn(ipvs) &&
1418 ++ !(flags & IP_VS_CONN_F_ONE_PACKET)) {
1419 + /* try to expire the connection immediately */
1420 + ip_vs_conn_expire_now(cp);
1421 + }
1422 +- /* don't restart its timer, and silently
1423 +- drop the packet. */
1424 +- __ip_vs_conn_put(cp);
1425 ++
1426 + return NF_DROP;
1427 + }
1428 +
1429 +diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c
1430 +index 66b3d6228a15..3d9c4c6397c3 100644
1431 +--- a/net/rds/ib_frmr.c
1432 ++++ b/net/rds/ib_frmr.c
1433 +@@ -61,6 +61,7 @@ static struct rds_ib_mr *rds_ib_alloc_frmr(struct rds_ib_device *rds_ibdev,
1434 + pool->fmr_attr.max_pages);
1435 + if (IS_ERR(frmr->mr)) {
1436 + pr_warn("RDS/IB: %s failed to allocate MR", __func__);
1437 ++ err = PTR_ERR(frmr->mr);
1438 + goto out_no_cigar;
1439 + }
1440 +
1441 +diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
1442 +index 235db2c9bbbb..d2932dc4c83d 100644
1443 +--- a/net/sched/act_ife.c
1444 ++++ b/net/sched/act_ife.c
1445 +@@ -267,10 +267,8 @@ static int ife_validate_metatype(struct tcf_meta_ops *ops, void *val, int len)
1446 + }
1447 +
1448 + /* called when adding new meta information
1449 +- * under ife->tcf_lock for existing action
1450 + */
1451 +-static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
1452 +- void *val, int len, bool exists)
1453 ++static int load_metaops_and_vet(u32 metaid, void *val, int len)
1454 + {
1455 + struct tcf_meta_ops *ops = find_ife_oplist(metaid);
1456 + int ret = 0;
1457 +@@ -278,13 +276,9 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
1458 + if (!ops) {
1459 + ret = -ENOENT;
1460 + #ifdef CONFIG_MODULES
1461 +- if (exists)
1462 +- spin_unlock_bh(&ife->tcf_lock);
1463 + rtnl_unlock();
1464 + request_module("ifemeta%u", metaid);
1465 + rtnl_lock();
1466 +- if (exists)
1467 +- spin_lock_bh(&ife->tcf_lock);
1468 + ops = find_ife_oplist(metaid);
1469 + #endif
1470 + }
1471 +@@ -301,24 +295,17 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
1472 + }
1473 +
1474 + /* called when adding new meta information
1475 +- * under ife->tcf_lock for existing action
1476 + */
1477 +-static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
1478 +- int len, bool atomic)
1479 ++static int __add_metainfo(const struct tcf_meta_ops *ops,
1480 ++ struct tcf_ife_info *ife, u32 metaid, void *metaval,
1481 ++ int len, bool atomic, bool exists)
1482 + {
1483 + struct tcf_meta_info *mi = NULL;
1484 +- struct tcf_meta_ops *ops = find_ife_oplist(metaid);
1485 + int ret = 0;
1486 +
1487 +- if (!ops)
1488 +- return -ENOENT;
1489 +-
1490 + mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL);
1491 +- if (!mi) {
1492 +- /*put back what find_ife_oplist took */
1493 +- module_put(ops->owner);
1494 ++ if (!mi)
1495 + return -ENOMEM;
1496 +- }
1497 +
1498 + mi->metaid = metaid;
1499 + mi->ops = ops;
1500 +@@ -326,17 +313,49 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
1501 + ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL);
1502 + if (ret != 0) {
1503 + kfree(mi);
1504 +- module_put(ops->owner);
1505 + return ret;
1506 + }
1507 + }
1508 +
1509 ++ if (exists)
1510 ++ spin_lock_bh(&ife->tcf_lock);
1511 + list_add_tail(&mi->metalist, &ife->metalist);
1512 ++ if (exists)
1513 ++ spin_unlock_bh(&ife->tcf_lock);
1514 ++
1515 ++ return ret;
1516 ++}
1517 ++
1518 ++static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops,
1519 ++ struct tcf_ife_info *ife, u32 metaid,
1520 ++ bool exists)
1521 ++{
1522 ++ int ret;
1523 ++
1524 ++ if (!try_module_get(ops->owner))
1525 ++ return -ENOENT;
1526 ++ ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists);
1527 ++ if (ret)
1528 ++ module_put(ops->owner);
1529 ++ return ret;
1530 ++}
1531 ++
1532 ++static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
1533 ++ int len, bool exists)
1534 ++{
1535 ++ const struct tcf_meta_ops *ops = find_ife_oplist(metaid);
1536 ++ int ret;
1537 +
1538 ++ if (!ops)
1539 ++ return -ENOENT;
1540 ++ ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists);
1541 ++ if (ret)
1542 ++ /*put back what find_ife_oplist took */
1543 ++ module_put(ops->owner);
1544 + return ret;
1545 + }
1546 +
1547 +-static int use_all_metadata(struct tcf_ife_info *ife)
1548 ++static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
1549 + {
1550 + struct tcf_meta_ops *o;
1551 + int rc = 0;
1552 +@@ -344,7 +363,7 @@ static int use_all_metadata(struct tcf_ife_info *ife)
1553 +
1554 + read_lock(&ife_mod_lock);
1555 + list_for_each_entry(o, &ifeoplist, list) {
1556 +- rc = add_metainfo(ife, o->metaid, NULL, 0, true);
1557 ++ rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists);
1558 + if (rc == 0)
1559 + installed += 1;
1560 + }
1561 +@@ -395,7 +414,6 @@ static void _tcf_ife_cleanup(struct tc_action *a, int bind)
1562 + struct tcf_meta_info *e, *n;
1563 +
1564 + list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
1565 +- module_put(e->ops->owner);
1566 + list_del(&e->metalist);
1567 + if (e->metaval) {
1568 + if (e->ops->release)
1569 +@@ -403,6 +421,7 @@ static void _tcf_ife_cleanup(struct tc_action *a, int bind)
1570 + else
1571 + kfree(e->metaval);
1572 + }
1573 ++ module_put(e->ops->owner);
1574 + kfree(e);
1575 + }
1576 + }
1577 +@@ -416,7 +435,6 @@ static void tcf_ife_cleanup(struct tc_action *a, int bind)
1578 + spin_unlock_bh(&ife->tcf_lock);
1579 + }
1580 +
1581 +-/* under ife->tcf_lock for existing action */
1582 + static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
1583 + bool exists)
1584 + {
1585 +@@ -430,7 +448,7 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
1586 + val = nla_data(tb[i]);
1587 + len = nla_len(tb[i]);
1588 +
1589 +- rc = load_metaops_and_vet(ife, i, val, len, exists);
1590 ++ rc = load_metaops_and_vet(i, val, len);
1591 + if (rc != 0)
1592 + return rc;
1593 +
1594 +@@ -510,6 +528,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
1595 + if (exists)
1596 + spin_lock_bh(&ife->tcf_lock);
1597 + ife->tcf_action = parm->action;
1598 ++ if (exists)
1599 ++ spin_unlock_bh(&ife->tcf_lock);
1600 +
1601 + if (parm->flags & IFE_ENCODE) {
1602 + if (daddr)
1603 +@@ -537,9 +557,6 @@ metadata_parse_err:
1604 + tcf_hash_release(*a, bind);
1605 + if (ret == ACT_P_CREATED)
1606 + _tcf_ife_cleanup(*a, bind);
1607 +-
1608 +- if (exists)
1609 +- spin_unlock_bh(&ife->tcf_lock);
1610 + return err;
1611 + }
1612 +
1613 +@@ -553,20 +570,14 @@ metadata_parse_err:
1614 + * as we can. You better have at least one else we are
1615 + * going to bail out
1616 + */
1617 +- err = use_all_metadata(ife);
1618 ++ err = use_all_metadata(ife, exists);
1619 + if (err) {
1620 + if (ret == ACT_P_CREATED)
1621 + _tcf_ife_cleanup(*a, bind);
1622 +-
1623 +- if (exists)
1624 +- spin_unlock_bh(&ife->tcf_lock);
1625 + return err;
1626 + }
1627 + }
1628 +
1629 +- if (exists)
1630 +- spin_unlock_bh(&ife->tcf_lock);
1631 +-
1632 + if (ret == ACT_P_CREATED)
1633 + tcf_hash_insert(tn, *a);
1634 +
1635 +diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
1636 +index da574a16e7b3..e377dd5b06a6 100644
1637 +--- a/net/sched/cls_u32.c
1638 ++++ b/net/sched/cls_u32.c
1639 +@@ -851,6 +851,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
1640 + struct nlattr *opt = tca[TCA_OPTIONS];
1641 + struct nlattr *tb[TCA_U32_MAX + 1];
1642 + u32 htid, flags = 0;
1643 ++ size_t sel_size;
1644 + int err;
1645 + #ifdef CONFIG_CLS_U32_PERF
1646 + size_t size;
1647 +@@ -967,8 +968,11 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
1648 + return -EINVAL;
1649 +
1650 + s = nla_data(tb[TCA_U32_SEL]);
1651 ++ sel_size = sizeof(*s) + sizeof(*s->keys) * s->nkeys;
1652 ++ if (nla_len(tb[TCA_U32_SEL]) < sel_size)
1653 ++ return -EINVAL;
1654 +
1655 +- n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
1656 ++ n = kzalloc(offsetof(typeof(*n), sel) + sel_size, GFP_KERNEL);
1657 + if (n == NULL)
1658 + return -ENOBUFS;
1659 +
1660 +@@ -981,7 +985,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
1661 + }
1662 + #endif
1663 +
1664 +- memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
1665 ++ memcpy(&n->sel, s, sel_size);
1666 + RCU_INIT_POINTER(n->ht_up, ht);
1667 + n->handle = handle;
1668 + n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
1669 +diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
1670 +index 2fae8b5f1b80..f4b2d69973c3 100644
1671 +--- a/net/sched/sch_hhf.c
1672 ++++ b/net/sched/sch_hhf.c
1673 +@@ -492,6 +492,9 @@ static void hhf_destroy(struct Qdisc *sch)
1674 + hhf_free(q->hhf_valid_bits[i]);
1675 + }
1676 +
1677 ++ if (!q->hh_flows)
1678 ++ return;
1679 ++
1680 + for (i = 0; i < HH_FLOWS_CNT; i++) {
1681 + struct hh_flow_state *flow, *next;
1682 + struct list_head *head = &q->hh_flows[i];
1683 +diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
1684 +index c798d0de8a9d..95fe75d441eb 100644
1685 +--- a/net/sched/sch_htb.c
1686 ++++ b/net/sched/sch_htb.c
1687 +@@ -1013,6 +1013,9 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
1688 + int err;
1689 + int i;
1690 +
1691 ++ qdisc_watchdog_init(&q->watchdog, sch);
1692 ++ INIT_WORK(&q->work, htb_work_func);
1693 ++
1694 + if (!opt)
1695 + return -EINVAL;
1696 +
1697 +@@ -1033,8 +1036,6 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
1698 + for (i = 0; i < TC_HTB_NUMPRIO; i++)
1699 + INIT_LIST_HEAD(q->drops + i);
1700 +
1701 +- qdisc_watchdog_init(&q->watchdog, sch);
1702 +- INIT_WORK(&q->work, htb_work_func);
1703 + qdisc_skb_head_init(&q->direct_queue);
1704 +
1705 + if (tb[TCA_HTB_DIRECT_QLEN])
1706 +diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
1707 +index 9ffbb025b37e..66b6e807b4ec 100644
1708 +--- a/net/sched/sch_multiq.c
1709 ++++ b/net/sched/sch_multiq.c
1710 +@@ -234,7 +234,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
1711 + static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
1712 + {
1713 + struct multiq_sched_data *q = qdisc_priv(sch);
1714 +- int i, err;
1715 ++ int i;
1716 +
1717 + q->queues = NULL;
1718 +
1719 +@@ -249,12 +249,7 @@ static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
1720 + for (i = 0; i < q->max_bands; i++)
1721 + q->queues[i] = &noop_qdisc;
1722 +
1723 +- err = multiq_tune(sch, opt);
1724 +-
1725 +- if (err)
1726 +- kfree(q->queues);
1727 +-
1728 +- return err;
1729 ++ return multiq_tune(sch, opt);
1730 + }
1731 +
1732 + static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
1733 +diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
1734 +index e899d9eb76cb..3f87ddb1777d 100644
1735 +--- a/net/sched/sch_netem.c
1736 ++++ b/net/sched/sch_netem.c
1737 +@@ -937,11 +937,11 @@ static int netem_init(struct Qdisc *sch, struct nlattr *opt)
1738 + struct netem_sched_data *q = qdisc_priv(sch);
1739 + int ret;
1740 +
1741 ++ qdisc_watchdog_init(&q->watchdog, sch);
1742 ++
1743 + if (!opt)
1744 + return -EINVAL;
1745 +
1746 +- qdisc_watchdog_init(&q->watchdog, sch);
1747 +-
1748 + q->loss_model = CLG_RANDOM;
1749 + ret = netem_change(sch, opt);
1750 + if (ret)
1751 +diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
1752 +index 303355c449ab..b3f7980b0f27 100644
1753 +--- a/net/sched/sch_tbf.c
1754 ++++ b/net/sched/sch_tbf.c
1755 +@@ -423,12 +423,13 @@ static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
1756 + {
1757 + struct tbf_sched_data *q = qdisc_priv(sch);
1758 +
1759 ++ qdisc_watchdog_init(&q->watchdog, sch);
1760 ++ q->qdisc = &noop_qdisc;
1761 ++
1762 + if (opt == NULL)
1763 + return -EINVAL;
1764 +
1765 + q->t_c = ktime_get_ns();
1766 +- qdisc_watchdog_init(&q->watchdog, sch);
1767 +- q->qdisc = &noop_qdisc;
1768 +
1769 + return tbf_change(sch, opt);
1770 + }
1771 +diff --git a/net/sctp/proc.c b/net/sctp/proc.c
1772 +index 206377fe91ec..fd7f23566ed6 100644
1773 +--- a/net/sctp/proc.c
1774 ++++ b/net/sctp/proc.c
1775 +@@ -337,8 +337,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
1776 + }
1777 +
1778 + transport = (struct sctp_transport *)v;
1779 +- if (!sctp_transport_hold(transport))
1780 +- return 0;
1781 + assoc = transport->asoc;
1782 + epb = &assoc->base;
1783 + sk = epb->sk;
1784 +@@ -428,8 +426,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
1785 + }
1786 +
1787 + transport = (struct sctp_transport *)v;
1788 +- if (!sctp_transport_hold(transport))
1789 +- return 0;
1790 + assoc = transport->asoc;
1791 +
1792 + list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list,
1793 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
1794 +index 78f38056fca6..64d2d9ea2f8c 100644
1795 +--- a/net/sctp/socket.c
1796 ++++ b/net/sctp/socket.c
1797 +@@ -4476,9 +4476,14 @@ struct sctp_transport *sctp_transport_get_next(struct net *net,
1798 + break;
1799 + }
1800 +
1801 ++ if (!sctp_transport_hold(t))
1802 ++ continue;
1803 ++
1804 + if (net_eq(sock_net(t->asoc->base.sk), net) &&
1805 + t->asoc->peer.primary_path == t)
1806 + break;
1807 ++
1808 ++ sctp_transport_put(t);
1809 + }
1810 +
1811 + return t;
1812 +@@ -4488,13 +4493,18 @@ struct sctp_transport *sctp_transport_get_idx(struct net *net,
1813 + struct rhashtable_iter *iter,
1814 + int pos)
1815 + {
1816 +- void *obj = SEQ_START_TOKEN;
1817 ++ struct sctp_transport *t;
1818 +
1819 +- while (pos && (obj = sctp_transport_get_next(net, iter)) &&
1820 +- !IS_ERR(obj))
1821 +- pos--;
1822 ++ if (!pos)
1823 ++ return SEQ_START_TOKEN;
1824 +
1825 +- return obj;
1826 ++ while ((t = sctp_transport_get_next(net, iter)) && !IS_ERR(t)) {
1827 ++ if (!--pos)
1828 ++ break;
1829 ++ sctp_transport_put(t);
1830 ++ }
1831 ++
1832 ++ return t;
1833 + }
1834 +
1835 + int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
1836 +@@ -4556,8 +4566,6 @@ int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
1837 + for (; !IS_ERR_OR_NULL(obj); obj = sctp_transport_get_next(net, &hti)) {
1838 + struct sctp_transport *transport = obj;
1839 +
1840 +- if (!sctp_transport_hold(transport))
1841 +- continue;
1842 + err = cb(transport, p);
1843 + sctp_transport_put(transport);
1844 + if (err)
1845 +diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
1846 +index 4afd4149a632..bad69e91fea3 100644
1847 +--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
1848 ++++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
1849 +@@ -169,7 +169,7 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
1850 + struct scatterlist sg[1];
1851 + int err = -1;
1852 + u8 *checksumdata;
1853 +- u8 rc4salt[4];
1854 ++ u8 *rc4salt;
1855 + struct crypto_ahash *md5;
1856 + struct crypto_ahash *hmac_md5;
1857 + struct ahash_request *req;
1858 +@@ -183,14 +183,18 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
1859 + return GSS_S_FAILURE;
1860 + }
1861 +
1862 ++ rc4salt = kmalloc_array(4, sizeof(*rc4salt), GFP_NOFS);
1863 ++ if (!rc4salt)
1864 ++ return GSS_S_FAILURE;
1865 ++
1866 + if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) {
1867 + dprintk("%s: invalid usage value %u\n", __func__, usage);
1868 +- return GSS_S_FAILURE;
1869 ++ goto out_free_rc4salt;
1870 + }
1871 +
1872 + checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
1873 + if (!checksumdata)
1874 +- return GSS_S_FAILURE;
1875 ++ goto out_free_rc4salt;
1876 +
1877 + md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
1878 + if (IS_ERR(md5))
1879 +@@ -258,6 +262,8 @@ out_free_md5:
1880 + crypto_free_ahash(md5);
1881 + out_free_cksum:
1882 + kfree(checksumdata);
1883 ++out_free_rc4salt:
1884 ++ kfree(rc4salt);
1885 + return err ? GSS_S_FAILURE : 0;
1886 + }
1887 +
1888 +diff --git a/scripts/depmod.sh b/scripts/depmod.sh
1889 +index ea1e96921e3b..baedaef53ca0 100755
1890 +--- a/scripts/depmod.sh
1891 ++++ b/scripts/depmod.sh
1892 +@@ -15,9 +15,9 @@ if ! test -r System.map ; then
1893 + fi
1894 +
1895 + if [ -z $(command -v $DEPMOD) ]; then
1896 +- echo "'make modules_install' requires $DEPMOD. Please install it." >&2
1897 ++ echo "Warning: 'make modules_install' requires $DEPMOD. Please install it." >&2
1898 + echo "This is probably in the kmod package." >&2
1899 +- exit 1
1900 ++ exit 0
1901 + fi
1902 +
1903 + # older versions of depmod don't support -P <symbol-prefix>
1904 +diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
1905 +index 238db4ffd30c..88b3dc19bbae 100644
1906 +--- a/scripts/mod/modpost.c
1907 ++++ b/scripts/mod/modpost.c
1908 +@@ -649,7 +649,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
1909 + if (ELF_ST_TYPE(sym->st_info) == STT_SPARC_REGISTER)
1910 + break;
1911 + if (symname[0] == '.') {
1912 +- char *munged = strdup(symname);
1913 ++ char *munged = NOFAIL(strdup(symname));
1914 + munged[0] = '_';
1915 + munged[1] = toupper(munged[1]);
1916 + symname = munged;
1917 +@@ -1312,7 +1312,7 @@ static Elf_Sym *find_elf_symbol2(struct elf_info *elf, Elf_Addr addr,
1918 + static char *sec2annotation(const char *s)
1919 + {
1920 + if (match(s, init_exit_sections)) {
1921 +- char *p = malloc(20);
1922 ++ char *p = NOFAIL(malloc(20));
1923 + char *r = p;
1924 +
1925 + *p++ = '_';
1926 +@@ -1332,7 +1332,7 @@ static char *sec2annotation(const char *s)
1927 + strcat(p, " ");
1928 + return r;
1929 + } else {
1930 +- return strdup("");
1931 ++ return NOFAIL(strdup(""));
1932 + }
1933 + }
1934 +
1935 +@@ -2033,7 +2033,7 @@ void buf_write(struct buffer *buf, const char *s, int len)
1936 + {
1937 + if (buf->size - buf->pos < len) {
1938 + buf->size += len + SZ;
1939 +- buf->p = realloc(buf->p, buf->size);
1940 ++ buf->p = NOFAIL(realloc(buf->p, buf->size));
1941 + }
1942 + strncpy(buf->p + buf->pos, s, len);
1943 + buf->pos += len;
1944 +diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
1945 +index 3896523b71e9..f289762cd676 100644
1946 +--- a/sound/soc/codecs/wm8994.c
1947 ++++ b/sound/soc/codecs/wm8994.c
1948 +@@ -2431,6 +2431,7 @@ static int wm8994_set_dai_sysclk(struct snd_soc_dai *dai,
1949 + snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_2,
1950 + WM8994_OPCLK_ENA, 0);
1951 + }
1952 ++ break;
1953 +
1954 + default:
1955 + return -EINVAL;
1956 +diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c
1957 +index 1030a6e504bb..de477a3dc968 100644
1958 +--- a/tools/perf/arch/powerpc/util/sym-handling.c
1959 ++++ b/tools/perf/arch/powerpc/util/sym-handling.c
1960 +@@ -115,8 +115,10 @@ void arch__post_process_probe_trace_events(struct perf_probe_event *pev,
1961 + for (i = 0; i < ntevs; i++) {
1962 + tev = &pev->tevs[i];
1963 + map__for_each_symbol(map, sym, tmp) {
1964 +- if (map->unmap_ip(map, sym->start) == tev->point.address)
1965 ++ if (map->unmap_ip(map, sym->start) == tev->point.address) {
1966 + arch__fix_tev_from_maps(pev, tev, map, sym);
1967 ++ break;
1968 ++ }
1969 + }
1970 + }
1971 + }
1972 +diff --git a/tools/testing/selftests/powerpc/harness.c b/tools/testing/selftests/powerpc/harness.c
1973 +index 66d31de60b9a..9d7166dfad1e 100644
1974 +--- a/tools/testing/selftests/powerpc/harness.c
1975 ++++ b/tools/testing/selftests/powerpc/harness.c
1976 +@@ -85,13 +85,13 @@ wait:
1977 + return status;
1978 + }
1979 +
1980 +-static void alarm_handler(int signum)
1981 ++static void sig_handler(int signum)
1982 + {
1983 +- /* Jut wake us up from waitpid */
1984 ++ /* Just wake us up from waitpid */
1985 + }
1986 +
1987 +-static struct sigaction alarm_action = {
1988 +- .sa_handler = alarm_handler,
1989 ++static struct sigaction sig_action = {
1990 ++ .sa_handler = sig_handler,
1991 + };
1992 +
1993 + void test_harness_set_timeout(uint64_t time)
1994 +@@ -106,8 +106,14 @@ int test_harness(int (test_function)(void), char *name)
1995 + test_start(name);
1996 + test_set_git_version(GIT_VERSION);
1997 +
1998 +- if (sigaction(SIGALRM, &alarm_action, NULL)) {
1999 +- perror("sigaction");
2000 ++ if (sigaction(SIGINT, &sig_action, NULL)) {
2001 ++ perror("sigaction (sigint)");
2002 ++ test_error(name);
2003 ++ return 1;
2004 ++ }
2005 ++
2006 ++ if (sigaction(SIGALRM, &sig_action, NULL)) {
2007 ++ perror("sigaction (sigalrm)");
2008 + test_error(name);
2009 + return 1;
2010 + }