Gentoo Archives: gentoo-commits

From: "Anthony G. Basile" <blueness@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/hardened-patchset:master commit in: 4.8.15/
Date: Sun, 01 Jan 2017 19:58:18
Message-Id: 1483300466.1b413d517df92fadb5758bba4463c8bd6f73fb25.blueness@gentoo
1 commit: 1b413d517df92fadb5758bba4463c8bd6f73fb25
2 Author: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
3 AuthorDate: Sun Jan 1 19:54:26 2017 +0000
4 Commit: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
5 CommitDate: Sun Jan 1 19:54:26 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/hardened-patchset.git/commit/?id=1b413d51
7
8 grsecurity-3.1-4.8.15-201612301949
9
10 4.8.15/0000_README | 18 +-
11 4.8.15/1012_linux-4.8.13.patch | 1063 ------------
12 4.8.15/1013_linux-4.8.14.patch | 1725 --------------------
13 4.8.15/1014_linux-4.8.15.patch | 1042 ------------
14 ... 4420_grsecurity-3.1-4.8.15-201612301949.patch} | 14 +-
15 4.8.15/4426_default_XATTR_PAX_FLAGS.patch | 36 +
16 6 files changed, 50 insertions(+), 3848 deletions(-)
17
18 diff --git a/4.8.15/0000_README b/4.8.15/0000_README
19 index cd91d08..97a03a1 100644
20 --- a/4.8.15/0000_README
21 +++ b/4.8.15/0000_README
22 @@ -2,19 +2,7 @@ README
23 -----------------------------------------------------------------------------
24 Individual Patch Descriptions:
25 -----------------------------------------------------------------------------
26 -Patch: 1012_linux-4.8.13.patch
27 -From: http://www.kernel.org
28 -Desc: Linux 4.8.13
29 -
30 -Patch: 1013_linux-4.8.14.patch
31 -From: http://www.kernel.org
32 -Desc: Linux 4.8.14
33 -
34 -Patch: 1014_linux-4.8.15.patch
35 -From: http://www.kernel.org
36 -Desc: Linux 4.8.15
37 -
38 -Patch: 4420_grsecurity-3.1-4.8.15-201612151923.patch
39 +Patch: 4420_grsecurity-3.1-4.8.15-201612301949.patch
40 From: http://www.grsecurity.net
41 Desc: hardened-sources base patch from upstream grsecurity
42
43 @@ -22,6 +10,10 @@ Patch: 4425_grsec_remove_EI_PAX.patch
44 From: Anthony G. Basile <blueness@g.o>
45 Desc: Remove EI_PAX option and force off
46
47 +Patch: 4426_default_XATTR_PAX_FLAGS.patch
48 +From: Anthony G. Basile <blueness@g.o>
49 +Desc: Defalut PT_PAX_FLAGS off and XATTR_PAX_FLAGS on
50 +
51 Patch: 4427_force_XATTR_PAX_tmpfs.patch
52 From: Anthony G. Basile <blueness@g.o>
53 Desc: Force XATTR_PAX on tmpfs
54
55 diff --git a/4.8.15/1012_linux-4.8.13.patch b/4.8.15/1012_linux-4.8.13.patch
56 deleted file mode 100644
57 index c742393..0000000
58 --- a/4.8.15/1012_linux-4.8.13.patch
59 +++ /dev/null
60 @@ -1,1063 +0,0 @@
61 -diff --git a/Makefile b/Makefile
62 -index 7b0c92f..b38abe9 100644
63 ---- a/Makefile
64 -+++ b/Makefile
65 -@@ -1,6 +1,6 @@
66 - VERSION = 4
67 - PATCHLEVEL = 8
68 --SUBLEVEL = 12
69 -+SUBLEVEL = 13
70 - EXTRAVERSION =
71 - NAME = Psychotic Stoned Sheep
72 -
73 -diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
74 -index 08e7e2a..a36e860 100644
75 ---- a/arch/arc/include/asm/delay.h
76 -+++ b/arch/arc/include/asm/delay.h
77 -@@ -22,10 +22,11 @@
78 - static inline void __delay(unsigned long loops)
79 - {
80 - __asm__ __volatile__(
81 -- " lp 1f \n"
82 -- " nop \n"
83 -- "1: \n"
84 -- : "+l"(loops));
85 -+ " mov lp_count, %0 \n"
86 -+ " lp 1f \n"
87 -+ " nop \n"
88 -+ "1: \n"
89 -+ : : "r"(loops));
90 - }
91 -
92 - extern void __bad_udelay(void);
93 -diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
94 -index 89eeb37..e94ca72 100644
95 ---- a/arch/arc/include/asm/pgtable.h
96 -+++ b/arch/arc/include/asm/pgtable.h
97 -@@ -280,7 +280,7 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
98 -
99 - #define pte_page(pte) pfn_to_page(pte_pfn(pte))
100 - #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
101 --#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
102 -+#define pfn_pte(pfn, prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
103 -
104 - /* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
105 - #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
106 -diff --git a/arch/arm64/boot/dts/arm/juno-r1.dts b/arch/arm64/boot/dts/arm/juno-r1.dts
107 -index 123a58b..f0b857d 100644
108 ---- a/arch/arm64/boot/dts/arm/juno-r1.dts
109 -+++ b/arch/arm64/boot/dts/arm/juno-r1.dts
110 -@@ -76,7 +76,7 @@
111 - compatible = "arm,idle-state";
112 - arm,psci-suspend-param = <0x1010000>;
113 - local-timer-stop;
114 -- entry-latency-us = <300>;
115 -+ entry-latency-us = <400>;
116 - exit-latency-us = <1200>;
117 - min-residency-us = <2500>;
118 - };
119 -diff --git a/arch/arm64/boot/dts/arm/juno-r2.dts b/arch/arm64/boot/dts/arm/juno-r2.dts
120 -index 007be82..26aaa6a 100644
121 ---- a/arch/arm64/boot/dts/arm/juno-r2.dts
122 -+++ b/arch/arm64/boot/dts/arm/juno-r2.dts
123 -@@ -76,7 +76,7 @@
124 - compatible = "arm,idle-state";
125 - arm,psci-suspend-param = <0x1010000>;
126 - local-timer-stop;
127 -- entry-latency-us = <300>;
128 -+ entry-latency-us = <400>;
129 - exit-latency-us = <1200>;
130 - min-residency-us = <2500>;
131 - };
132 -diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts
133 -index a7270ef..6e154d9 100644
134 ---- a/arch/arm64/boot/dts/arm/juno.dts
135 -+++ b/arch/arm64/boot/dts/arm/juno.dts
136 -@@ -76,7 +76,7 @@
137 - compatible = "arm,idle-state";
138 - arm,psci-suspend-param = <0x1010000>;
139 - local-timer-stop;
140 -- entry-latency-us = <300>;
141 -+ entry-latency-us = <400>;
142 - exit-latency-us = <1200>;
143 - min-residency-us = <2500>;
144 - };
145 -diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
146 -index 7099f26..b96346b 100644
147 ---- a/arch/arm64/include/asm/cpufeature.h
148 -+++ b/arch/arm64/include/asm/cpufeature.h
149 -@@ -90,7 +90,7 @@ struct arm64_cpu_capabilities {
150 - u16 capability;
151 - int def_scope; /* default scope */
152 - bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
153 -- void (*enable)(void *); /* Called on all active CPUs */
154 -+ int (*enable)(void *); /* Called on all active CPUs */
155 - union {
156 - struct { /* To be used for erratum handling only */
157 - u32 midr_model;
158 -diff --git a/arch/arm64/include/asm/exec.h b/arch/arm64/include/asm/exec.h
159 -index db0563c..f7865dd 100644
160 ---- a/arch/arm64/include/asm/exec.h
161 -+++ b/arch/arm64/include/asm/exec.h
162 -@@ -18,6 +18,9 @@
163 - #ifndef __ASM_EXEC_H
164 - #define __ASM_EXEC_H
165 -
166 -+#include <linux/sched.h>
167 -+
168 - extern unsigned long arch_align_stack(unsigned long sp);
169 -+void uao_thread_switch(struct task_struct *next);
170 -
171 - #endif /* __ASM_EXEC_H */
172 -diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
173 -index ace0a96..3be0ab0 100644
174 ---- a/arch/arm64/include/asm/processor.h
175 -+++ b/arch/arm64/include/asm/processor.h
176 -@@ -190,8 +190,8 @@ static inline void spin_lock_prefetch(const void *ptr)
177 -
178 - #endif
179 -
180 --void cpu_enable_pan(void *__unused);
181 --void cpu_enable_uao(void *__unused);
182 --void cpu_enable_cache_maint_trap(void *__unused);
183 -+int cpu_enable_pan(void *__unused);
184 -+int cpu_enable_uao(void *__unused);
185 -+int cpu_enable_cache_maint_trap(void *__unused);
186 -
187 - #endif /* __ASM_PROCESSOR_H */
188 -diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
189 -index 62272ea..94a0330 100644
190 ---- a/arch/arm64/kernel/cpufeature.c
191 -+++ b/arch/arm64/kernel/cpufeature.c
192 -@@ -19,7 +19,9 @@
193 - #define pr_fmt(fmt) "CPU features: " fmt
194 -
195 - #include <linux/bsearch.h>
196 -+#include <linux/cpumask.h>
197 - #include <linux/sort.h>
198 -+#include <linux/stop_machine.h>
199 - #include <linux/types.h>
200 - #include <asm/cpu.h>
201 - #include <asm/cpufeature.h>
202 -@@ -936,7 +938,13 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
203 - {
204 - for (; caps->matches; caps++)
205 - if (caps->enable && cpus_have_cap(caps->capability))
206 -- on_each_cpu(caps->enable, NULL, true);
207 -+ /*
208 -+ * Use stop_machine() as it schedules the work allowing
209 -+ * us to modify PSTATE, instead of on_each_cpu() which
210 -+ * uses an IPI, giving us a PSTATE that disappears when
211 -+ * we return.
212 -+ */
213 -+ stop_machine(caps->enable, NULL, cpu_online_mask);
214 - }
215 -
216 - /*
217 -diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
218 -index 6cd2612..9cc8667 100644
219 ---- a/arch/arm64/kernel/process.c
220 -+++ b/arch/arm64/kernel/process.c
221 -@@ -49,6 +49,7 @@
222 - #include <asm/alternative.h>
223 - #include <asm/compat.h>
224 - #include <asm/cacheflush.h>
225 -+#include <asm/exec.h>
226 - #include <asm/fpsimd.h>
227 - #include <asm/mmu_context.h>
228 - #include <asm/processor.h>
229 -@@ -303,7 +304,7 @@ static void tls_thread_switch(struct task_struct *next)
230 - }
231 -
232 - /* Restore the UAO state depending on next's addr_limit */
233 --static void uao_thread_switch(struct task_struct *next)
234 -+void uao_thread_switch(struct task_struct *next)
235 - {
236 - if (IS_ENABLED(CONFIG_ARM64_UAO)) {
237 - if (task_thread_info(next)->addr_limit == KERNEL_DS)
238 -diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
239 -index b616e365..23ddf55 100644
240 ---- a/arch/arm64/kernel/suspend.c
241 -+++ b/arch/arm64/kernel/suspend.c
242 -@@ -1,8 +1,11 @@
243 - #include <linux/ftrace.h>
244 - #include <linux/percpu.h>
245 - #include <linux/slab.h>
246 -+#include <asm/alternative.h>
247 - #include <asm/cacheflush.h>
248 -+#include <asm/cpufeature.h>
249 - #include <asm/debug-monitors.h>
250 -+#include <asm/exec.h>
251 - #include <asm/pgtable.h>
252 - #include <asm/memory.h>
253 - #include <asm/mmu_context.h>
254 -@@ -48,6 +51,14 @@ void notrace __cpu_suspend_exit(void)
255 - set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
256 -
257 - /*
258 -+ * PSTATE was not saved over suspend/resume, re-enable any detected
259 -+ * features that might not have been set correctly.
260 -+ */
261 -+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
262 -+ CONFIG_ARM64_PAN));
263 -+ uao_thread_switch(current);
264 -+
265 -+ /*
266 - * Restore HW breakpoint registers to sane values
267 - * before debug exceptions are possibly reenabled
268 - * through local_dbg_restore.
269 -diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
270 -index 771a01a7f..9595d3d 100644
271 ---- a/arch/arm64/kernel/traps.c
272 -+++ b/arch/arm64/kernel/traps.c
273 -@@ -428,9 +428,10 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
274 - force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
275 - }
276 -
277 --void cpu_enable_cache_maint_trap(void *__unused)
278 -+int cpu_enable_cache_maint_trap(void *__unused)
279 - {
280 - config_sctlr_el1(SCTLR_EL1_UCI, 0);
281 -+ return 0;
282 - }
283 -
284 - #define __user_cache_maint(insn, address, res) \
285 -diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
286 -index 05d2bd7..67506c3 100644
287 ---- a/arch/arm64/mm/fault.c
288 -+++ b/arch/arm64/mm/fault.c
289 -@@ -29,7 +29,9 @@
290 - #include <linux/sched.h>
291 - #include <linux/highmem.h>
292 - #include <linux/perf_event.h>
293 -+#include <linux/preempt.h>
294 -
295 -+#include <asm/bug.h>
296 - #include <asm/cpufeature.h>
297 - #include <asm/exception.h>
298 - #include <asm/debug-monitors.h>
299 -@@ -671,9 +673,17 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
300 - NOKPROBE_SYMBOL(do_debug_exception);
301 -
302 - #ifdef CONFIG_ARM64_PAN
303 --void cpu_enable_pan(void *__unused)
304 -+int cpu_enable_pan(void *__unused)
305 - {
306 -+ /*
307 -+ * We modify PSTATE. This won't work from irq context as the PSTATE
308 -+ * is discarded once we return from the exception.
309 -+ */
310 -+ WARN_ON_ONCE(in_interrupt());
311 -+
312 - config_sctlr_el1(SCTLR_EL1_SPAN, 0);
313 -+ asm(SET_PSTATE_PAN(1));
314 -+ return 0;
315 - }
316 - #endif /* CONFIG_ARM64_PAN */
317 -
318 -@@ -684,8 +694,9 @@ void cpu_enable_pan(void *__unused)
319 - * We need to enable the feature at runtime (instead of adding it to
320 - * PSR_MODE_EL1h) as the feature may not be implemented by the cpu.
321 - */
322 --void cpu_enable_uao(void *__unused)
323 -+int cpu_enable_uao(void *__unused)
324 - {
325 - asm(SET_PSTATE_UAO(1));
326 -+ return 0;
327 - }
328 - #endif /* CONFIG_ARM64_UAO */
329 -diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
330 -index d0efb5c..a4e070a 100644
331 ---- a/arch/x86/events/core.c
332 -+++ b/arch/x86/events/core.c
333 -@@ -2344,7 +2344,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
334 - frame.next_frame = 0;
335 - frame.return_address = 0;
336 -
337 -- if (!access_ok(VERIFY_READ, fp, 8))
338 -+ if (!valid_user_frame(fp, sizeof(frame)))
339 - break;
340 -
341 - bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4);
342 -@@ -2354,9 +2354,6 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
343 - if (bytes != 0)
344 - break;
345 -
346 -- if (!valid_user_frame(fp, sizeof(frame)))
347 -- break;
348 --
349 - perf_callchain_store(entry, cs_base + frame.return_address);
350 - fp = compat_ptr(ss_base + frame.next_frame);
351 - }
352 -@@ -2405,7 +2402,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
353 - frame.next_frame = NULL;
354 - frame.return_address = 0;
355 -
356 -- if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2))
357 -+ if (!valid_user_frame(fp, sizeof(frame)))
358 - break;
359 -
360 - bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
361 -@@ -2415,9 +2412,6 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
362 - if (bytes != 0)
363 - break;
364 -
365 -- if (!valid_user_frame(fp, sizeof(frame)))
366 -- break;
367 --
368 - perf_callchain_store(entry, frame.return_address);
369 - fp = (void __user *)frame.next_frame;
370 - }
371 -diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
372 -index e207b33..1e007a9 100644
373 ---- a/drivers/ata/libata-scsi.c
374 -+++ b/drivers/ata/libata-scsi.c
375 -@@ -1088,7 +1088,7 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
376 - desc[1] = tf->command; /* status */
377 - desc[2] = tf->device;
378 - desc[3] = tf->nsect;
379 -- desc[0] = 0;
380 -+ desc[7] = 0;
381 - if (tf->flags & ATA_TFLAG_LBA48) {
382 - desc[8] |= 0x80;
383 - if (tf->hob_nsect)
384 -diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
385 -index 04365b1..5163c8f 100644
386 ---- a/drivers/block/zram/zram_drv.c
387 -+++ b/drivers/block/zram/zram_drv.c
388 -@@ -1403,7 +1403,8 @@ static ssize_t hot_remove_store(struct class *class,
389 - zram = idr_find(&zram_index_idr, dev_id);
390 - if (zram) {
391 - ret = zram_remove(zram);
392 -- idr_remove(&zram_index_idr, dev_id);
393 -+ if (!ret)
394 -+ idr_remove(&zram_index_idr, dev_id);
395 - } else {
396 - ret = -ENODEV;
397 - }
398 -diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
399 -index 838b22a..f2c9274 100644
400 ---- a/drivers/clk/sunxi/clk-sunxi.c
401 -+++ b/drivers/clk/sunxi/clk-sunxi.c
402 -@@ -373,7 +373,7 @@ static void sun4i_get_apb1_factors(struct factors_request *req)
403 - else
404 - calcp = 3;
405 -
406 -- calcm = (req->parent_rate >> calcp) - 1;
407 -+ calcm = (div >> calcp) - 1;
408 -
409 - req->rate = (req->parent_rate >> calcp) / (calcm + 1);
410 - req->m = calcm;
411 -diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
412 -index 10b5ddf..1ed085f 100644
413 ---- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
414 -+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
415 -@@ -33,6 +33,7 @@ struct amdgpu_atpx {
416 -
417 - static struct amdgpu_atpx_priv {
418 - bool atpx_detected;
419 -+ bool bridge_pm_usable;
420 - /* handle for device - and atpx */
421 - acpi_handle dhandle;
422 - acpi_handle other_handle;
423 -@@ -200,7 +201,11 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
424 - atpx->is_hybrid = false;
425 - if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
426 - printk("ATPX Hybrid Graphics\n");
427 -- atpx->functions.power_cntl = false;
428 -+ /*
429 -+ * Disable legacy PM methods only when pcie port PM is usable,
430 -+ * otherwise the device might fail to power off or power on.
431 -+ */
432 -+ atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable;
433 - atpx->is_hybrid = true;
434 - }
435 -
436 -@@ -546,17 +551,25 @@ static bool amdgpu_atpx_detect(void)
437 - struct pci_dev *pdev = NULL;
438 - bool has_atpx = false;
439 - int vga_count = 0;
440 -+ bool d3_supported = false;
441 -+ struct pci_dev *parent_pdev;
442 -
443 - while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
444 - vga_count++;
445 -
446 - has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
447 -+
448 -+ parent_pdev = pci_upstream_bridge(pdev);
449 -+ d3_supported |= parent_pdev && parent_pdev->bridge_d3;
450 - }
451 -
452 - while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
453 - vga_count++;
454 -
455 - has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
456 -+
457 -+ parent_pdev = pci_upstream_bridge(pdev);
458 -+ d3_supported |= parent_pdev && parent_pdev->bridge_d3;
459 - }
460 -
461 - if (has_atpx && vga_count == 2) {
462 -@@ -564,6 +577,7 @@ static bool amdgpu_atpx_detect(void)
463 - printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
464 - acpi_method_name);
465 - amdgpu_atpx_priv.atpx_detected = true;
466 -+ amdgpu_atpx_priv.bridge_pm_usable = d3_supported;
467 - amdgpu_atpx_init();
468 - return true;
469 - }
470 -diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
471 -index a77ce99..b8e3854 100644
472 ---- a/drivers/gpu/drm/i915/i915_gem.c
473 -+++ b/drivers/gpu/drm/i915/i915_gem.c
474 -@@ -2540,7 +2540,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
475 - page = shmem_read_mapping_page(mapping, i);
476 - if (IS_ERR(page)) {
477 - ret = PTR_ERR(page);
478 -- goto err_pages;
479 -+ goto err_sg;
480 - }
481 - }
482 - #ifdef CONFIG_SWIOTLB
483 -@@ -2583,8 +2583,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
484 -
485 - return 0;
486 -
487 --err_pages:
488 -+err_sg:
489 - sg_mark_end(sg);
490 -+err_pages:
491 - for_each_sgt_page(page, sgt_iter, st)
492 - put_page(page);
493 - sg_free_table(st);
494 -diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
495 -index e26f889..35d385d 100644
496 ---- a/drivers/gpu/drm/i915/intel_display.c
497 -+++ b/drivers/gpu/drm/i915/intel_display.c
498 -@@ -11791,7 +11791,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
499 - intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error);
500 - if (__i915_reset_in_progress_or_wedged(intel_crtc->reset_counter)) {
501 - ret = -EIO;
502 -- goto cleanup;
503 -+ goto unlock;
504 - }
505 -
506 - atomic_inc(&intel_crtc->unpin_work_count);
507 -@@ -11877,6 +11877,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
508 - if (!IS_ERR_OR_NULL(request))
509 - i915_add_request_no_flush(request);
510 - atomic_dec(&intel_crtc->unpin_work_count);
511 -+unlock:
512 - mutex_unlock(&dev->struct_mutex);
513 - cleanup:
514 - crtc->primary->fb = old_fb;
515 -diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
516 -index 8f62671f..54acfcc 100644
517 ---- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
518 -+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
519 -@@ -249,13 +249,6 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
520 - if (irq < 0)
521 - return irq;
522 -
523 -- ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
524 -- IRQF_TRIGGER_NONE, dev_name(dev), priv);
525 -- if (ret < 0) {
526 -- dev_err(dev, "Failed to request irq %d: %d\n", irq, ret);
527 -- return ret;
528 -- }
529 --
530 - comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_OVL);
531 - if (comp_id < 0) {
532 - dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
533 -@@ -271,6 +264,13 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
534 -
535 - platform_set_drvdata(pdev, priv);
536 -
537 -+ ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
538 -+ IRQF_TRIGGER_NONE, dev_name(dev), priv);
539 -+ if (ret < 0) {
540 -+ dev_err(dev, "Failed to request irq %d: %d\n", irq, ret);
541 -+ return ret;
542 -+ }
543 -+
544 - ret = component_add(dev, &mtk_disp_ovl_component_ops);
545 - if (ret)
546 - dev_err(dev, "Failed to add component: %d\n", ret);
547 -diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
548 -index ddef0d4..34b4ace 100644
549 ---- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
550 -+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
551 -@@ -33,6 +33,7 @@ struct radeon_atpx {
552 -
553 - static struct radeon_atpx_priv {
554 - bool atpx_detected;
555 -+ bool bridge_pm_usable;
556 - /* handle for device - and atpx */
557 - acpi_handle dhandle;
558 - struct radeon_atpx atpx;
559 -@@ -198,7 +199,11 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx)
560 - atpx->is_hybrid = false;
561 - if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
562 - printk("ATPX Hybrid Graphics\n");
563 -- atpx->functions.power_cntl = false;
564 -+ /*
565 -+ * Disable legacy PM methods only when pcie port PM is usable,
566 -+ * otherwise the device might fail to power off or power on.
567 -+ */
568 -+ atpx->functions.power_cntl = !radeon_atpx_priv.bridge_pm_usable;
569 - atpx->is_hybrid = true;
570 - }
571 -
572 -@@ -543,11 +548,16 @@ static bool radeon_atpx_detect(void)
573 - struct pci_dev *pdev = NULL;
574 - bool has_atpx = false;
575 - int vga_count = 0;
576 -+ bool d3_supported = false;
577 -+ struct pci_dev *parent_pdev;
578 -
579 - while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
580 - vga_count++;
581 -
582 - has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
583 -+
584 -+ parent_pdev = pci_upstream_bridge(pdev);
585 -+ d3_supported |= parent_pdev && parent_pdev->bridge_d3;
586 - }
587 -
588 - /* some newer PX laptops mark the dGPU as a non-VGA display device */
589 -@@ -555,6 +565,9 @@ static bool radeon_atpx_detect(void)
590 - vga_count++;
591 -
592 - has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
593 -+
594 -+ parent_pdev = pci_upstream_bridge(pdev);
595 -+ d3_supported |= parent_pdev && parent_pdev->bridge_d3;
596 - }
597 -
598 - if (has_atpx && vga_count == 2) {
599 -@@ -562,6 +575,7 @@ static bool radeon_atpx_detect(void)
600 - printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
601 - acpi_method_name);
602 - radeon_atpx_priv.atpx_detected = true;
603 -+ radeon_atpx_priv.bridge_pm_usable = d3_supported;
604 - radeon_atpx_init();
605 - return true;
606 - }
607 -diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
608 -index 5784e20..9f6203c 100644
609 ---- a/drivers/input/mouse/psmouse-base.c
610 -+++ b/drivers/input/mouse/psmouse-base.c
611 -@@ -1115,10 +1115,6 @@ static int psmouse_extensions(struct psmouse *psmouse,
612 - if (psmouse_try_protocol(psmouse, PSMOUSE_TOUCHKIT_PS2,
613 - &max_proto, set_properties, true))
614 - return PSMOUSE_TOUCHKIT_PS2;
615 --
616 -- if (psmouse_try_protocol(psmouse, PSMOUSE_BYD,
617 -- &max_proto, set_properties, true))
618 -- return PSMOUSE_BYD;
619 - }
620 -
621 - /*
622 -diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
623 -index a8ff969..cbc7dfa 100644
624 ---- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
625 -+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
626 -@@ -2203,8 +2203,9 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len,
627 - is_scanning_required = 1;
628 - } else {
629 - mwifiex_dbg(priv->adapter, MSG,
630 -- "info: trying to associate to '%s' bssid %pM\n",
631 -- (char *)req_ssid.ssid, bss->bssid);
632 -+ "info: trying to associate to '%.*s' bssid %pM\n",
633 -+ req_ssid.ssid_len, (char *)req_ssid.ssid,
634 -+ bss->bssid);
635 - memcpy(&priv->cfg_bssid, bss->bssid, ETH_ALEN);
636 - break;
637 - }
638 -@@ -2264,8 +2265,8 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
639 - }
640 -
641 - mwifiex_dbg(adapter, INFO,
642 -- "info: Trying to associate to %s and bssid %pM\n",
643 -- (char *)sme->ssid, sme->bssid);
644 -+ "info: Trying to associate to %.*s and bssid %pM\n",
645 -+ (int)sme->ssid_len, (char *)sme->ssid, sme->bssid);
646 -
647 - if (!mwifiex_stop_bg_scan(priv))
648 - cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy);
649 -@@ -2398,8 +2399,8 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
650 - }
651 -
652 - mwifiex_dbg(priv->adapter, MSG,
653 -- "info: trying to join to %s and bssid %pM\n",
654 -- (char *)params->ssid, params->bssid);
655 -+ "info: trying to join to %.*s and bssid %pM\n",
656 -+ params->ssid_len, (char *)params->ssid, params->bssid);
657 -
658 - mwifiex_set_ibss_params(priv, params);
659 -
660 -diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
661 -index db553dc..2b6a592 100644
662 ---- a/drivers/pci/pcie/aer/aer_inject.c
663 -+++ b/drivers/pci/pcie/aer/aer_inject.c
664 -@@ -307,20 +307,6 @@ static int pci_bus_set_aer_ops(struct pci_bus *bus)
665 - return 0;
666 - }
667 -
668 --static struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
669 --{
670 -- while (1) {
671 -- if (!pci_is_pcie(dev))
672 -- break;
673 -- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
674 -- return dev;
675 -- if (!dev->bus->self)
676 -- break;
677 -- dev = dev->bus->self;
678 -- }
679 -- return NULL;
680 --}
681 --
682 - static int find_aer_device_iter(struct device *device, void *data)
683 - {
684 - struct pcie_device **result = data;
685 -diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
686 -index 93f280d..f6eff4a 100644
687 ---- a/drivers/pci/probe.c
688 -+++ b/drivers/pci/probe.c
689 -@@ -1439,6 +1439,21 @@ static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
690 - dev_warn(&dev->dev, "PCI-X settings not supported\n");
691 - }
692 -
693 -+static bool pcie_root_rcb_set(struct pci_dev *dev)
694 -+{
695 -+ struct pci_dev *rp = pcie_find_root_port(dev);
696 -+ u16 lnkctl;
697 -+
698 -+ if (!rp)
699 -+ return false;
700 -+
701 -+ pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
702 -+ if (lnkctl & PCI_EXP_LNKCTL_RCB)
703 -+ return true;
704 -+
705 -+ return false;
706 -+}
707 -+
708 - static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
709 - {
710 - int pos;
711 -@@ -1468,9 +1483,20 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
712 - ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
713 -
714 - /* Initialize Link Control Register */
715 -- if (pcie_cap_has_lnkctl(dev))
716 -+ if (pcie_cap_has_lnkctl(dev)) {
717 -+
718 -+ /*
719 -+ * If the Root Port supports Read Completion Boundary of
720 -+ * 128, set RCB to 128. Otherwise, clear it.
721 -+ */
722 -+ hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
723 -+ hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
724 -+ if (pcie_root_rcb_set(dev))
725 -+ hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
726 -+
727 - pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
728 - ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
729 -+ }
730 -
731 - /* Find Advanced Error Reporting Enhanced Capability */
732 - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
733 -diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
734 -index 0296d81..a813239 100644
735 ---- a/drivers/pwm/sysfs.c
736 -+++ b/drivers/pwm/sysfs.c
737 -@@ -425,6 +425,8 @@ void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
738 - if (test_bit(PWMF_EXPORTED, &pwm->flags))
739 - pwm_unexport_child(parent, pwm);
740 - }
741 -+
742 -+ put_device(parent);
743 - }
744 -
745 - static int __init pwm_sysfs_init(void)
746 -diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
747 -index 030d002..5138a84 100644
748 ---- a/drivers/scsi/hpsa.c
749 -+++ b/drivers/scsi/hpsa.c
750 -@@ -2007,7 +2007,7 @@ static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
751 -
752 - static int hpsa_slave_alloc(struct scsi_device *sdev)
753 - {
754 -- struct hpsa_scsi_dev_t *sd;
755 -+ struct hpsa_scsi_dev_t *sd = NULL;
756 - unsigned long flags;
757 - struct ctlr_info *h;
758 -
759 -@@ -2024,7 +2024,8 @@ static int hpsa_slave_alloc(struct scsi_device *sdev)
760 - sd->target = sdev_id(sdev);
761 - sd->lun = sdev->lun;
762 - }
763 -- } else
764 -+ }
765 -+ if (!sd)
766 - sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
767 - sdev_id(sdev), sdev->lun);
768 -
769 -@@ -3805,6 +3806,7 @@ static int hpsa_update_device_info(struct ctlr_info *h,
770 - sizeof(this_device->vendor));
771 - memcpy(this_device->model, &inq_buff[16],
772 - sizeof(this_device->model));
773 -+ this_device->rev = inq_buff[2];
774 - memset(this_device->device_id, 0,
775 - sizeof(this_device->device_id));
776 - hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
777 -@@ -3887,10 +3889,14 @@ static void figure_bus_target_lun(struct ctlr_info *h,
778 -
779 - if (!is_logical_dev_addr_mode(lunaddrbytes)) {
780 - /* physical device, target and lun filled in later */
781 -- if (is_hba_lunid(lunaddrbytes))
782 -+ if (is_hba_lunid(lunaddrbytes)) {
783 -+ int bus = HPSA_HBA_BUS;
784 -+
785 -+ if (!device->rev)
786 -+ bus = HPSA_LEGACY_HBA_BUS;
787 - hpsa_set_bus_target_lun(device,
788 -- HPSA_HBA_BUS, 0, lunid & 0x3fff);
789 -- else
790 -+ bus, 0, lunid & 0x3fff);
791 -+ } else
792 - /* defer target, lun assignment for physical devices */
793 - hpsa_set_bus_target_lun(device,
794 - HPSA_PHYSICAL_DEVICE_BUS, -1, -1);
795 -diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
796 -index a1487e6..9d45dde 100644
797 ---- a/drivers/scsi/hpsa.h
798 -+++ b/drivers/scsi/hpsa.h
799 -@@ -69,6 +69,7 @@ struct hpsa_scsi_dev_t {
800 - u64 sas_address;
801 - unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
802 - unsigned char model[16]; /* bytes 16-31 of inquiry data */
803 -+ unsigned char rev; /* byte 2 of inquiry data */
804 - unsigned char raid_level; /* from inquiry page 0xC1 */
805 - unsigned char volume_offline; /* discovered via TUR or VPD */
806 - u16 queue_depth; /* max queue_depth for this device */
807 -@@ -403,6 +404,7 @@ struct offline_device_entry {
808 - #define HPSA_RAID_VOLUME_BUS 1
809 - #define HPSA_EXTERNAL_RAID_VOLUME_BUS 2
810 - #define HPSA_HBA_BUS 0
811 -+#define HPSA_LEGACY_HBA_BUS 3
812 -
813 - /*
814 - Send the command to the hardware
815 -diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
816 -index 04ce7cf..50c7167 100644
817 ---- a/drivers/scsi/libfc/fc_lport.c
818 -+++ b/drivers/scsi/libfc/fc_lport.c
819 -@@ -308,7 +308,7 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
820 - fc_stats = &lport->host_stats;
821 - memset(fc_stats, 0, sizeof(struct fc_host_statistics));
822 -
823 -- fc_stats->seconds_since_last_reset = (lport->boot_time - jiffies) / HZ;
824 -+ fc_stats->seconds_since_last_reset = (jiffies - lport->boot_time) / HZ;
825 -
826 - for_each_possible_cpu(cpu) {
827 - struct fc_stats *stats;
828 -diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
829 -index a78415d..78be4ae 100644
830 ---- a/fs/overlayfs/super.c
831 -+++ b/fs/overlayfs/super.c
832 -@@ -329,11 +329,11 @@ static struct dentry *ovl_d_real(struct dentry *dentry,
833 - if (!real)
834 - goto bug;
835 -
836 -+ /* Handle recursion */
837 -+ real = d_real(real, inode, open_flags);
838 -+
839 - if (!inode || inode == d_inode(real))
840 - return real;
841 --
842 -- /* Handle recursion */
843 -- return d_real(real, inode, open_flags);
844 - bug:
845 - WARN(1, "ovl_d_real(%pd4, %s:%lu): real dentry not found\n", dentry,
846 - inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0);
847 -diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
848 -index 573c5a1..0a0b2d5 100644
849 ---- a/include/linux/compiler-gcc.h
850 -+++ b/include/linux/compiler-gcc.h
851 -@@ -256,7 +256,9 @@
852 - #endif
853 - #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */
854 -
855 --#if GCC_VERSION >= 50000
856 -+#if GCC_VERSION >= 70000
857 -+#define KASAN_ABI_VERSION 5
858 -+#elif GCC_VERSION >= 50000
859 - #define KASAN_ABI_VERSION 4
860 - #elif GCC_VERSION >= 40902
861 - #define KASAN_ABI_VERSION 3
862 -diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
863 -index 01e8443..d47cc4a 100644
864 ---- a/include/linux/pagemap.h
865 -+++ b/include/linux/pagemap.h
866 -@@ -364,16 +364,13 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
867 - }
868 -
869 - /*
870 -- * Get the offset in PAGE_SIZE.
871 -- * (TODO: hugepage should have ->index in PAGE_SIZE)
872 -+ * Get index of the page with in radix-tree
873 -+ * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
874 - */
875 --static inline pgoff_t page_to_pgoff(struct page *page)
876 -+static inline pgoff_t page_to_index(struct page *page)
877 - {
878 - pgoff_t pgoff;
879 -
880 -- if (unlikely(PageHeadHuge(page)))
881 -- return page->index << compound_order(page);
882 --
883 - if (likely(!PageTransTail(page)))
884 - return page->index;
885 -
886 -@@ -387,6 +384,18 @@ static inline pgoff_t page_to_pgoff(struct page *page)
887 - }
888 -
889 - /*
890 -+ * Get the offset in PAGE_SIZE.
891 -+ * (TODO: hugepage should have ->index in PAGE_SIZE)
892 -+ */
893 -+static inline pgoff_t page_to_pgoff(struct page *page)
894 -+{
895 -+ if (unlikely(PageHeadHuge(page)))
896 -+ return page->index << compound_order(page);
897 -+
898 -+ return page_to_index(page);
899 -+}
900 -+
901 -+/*
902 - * Return byte-offset into filesystem object for page.
903 - */
904 - static inline loff_t page_offset(struct page *page)
905 -diff --git a/include/linux/pci.h b/include/linux/pci.h
906 -index 0ab8359..03f3df0 100644
907 ---- a/include/linux/pci.h
908 -+++ b/include/linux/pci.h
909 -@@ -1896,6 +1896,20 @@ static inline int pci_pcie_type(const struct pci_dev *dev)
910 - return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
911 - }
912 -
913 -+static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
914 -+{
915 -+ while (1) {
916 -+ if (!pci_is_pcie(dev))
917 -+ break;
918 -+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
919 -+ return dev;
920 -+ if (!dev->bus->self)
921 -+ break;
922 -+ dev = dev->bus->self;
923 -+ }
924 -+ return NULL;
925 -+}
926 -+
927 - void pci_request_acs(void);
928 - bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
929 - bool pci_acs_path_enabled(struct pci_dev *start,
930 -diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
931 -index d6d071f..3af60ee 100644
932 ---- a/include/uapi/linux/input-event-codes.h
933 -+++ b/include/uapi/linux/input-event-codes.h
934 -@@ -640,7 +640,7 @@
935 - * Control a data application associated with the currently viewed channel,
936 - * e.g. teletext or data broadcast application (MHEG, MHP, HbbTV, etc.)
937 - */
938 --#define KEY_DATA 0x275
939 -+#define KEY_DATA 0x277
940 -
941 - #define BTN_TRIGGER_HAPPY 0x2c0
942 - #define BTN_TRIGGER_HAPPY1 0x2c0
943 -diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
944 -index 0082fce..85c5a88 100644
945 ---- a/kernel/rcu/tree_plugin.h
946 -+++ b/kernel/rcu/tree_plugin.h
947 -@@ -2173,6 +2173,7 @@ static int rcu_nocb_kthread(void *arg)
948 - cl++;
949 - c++;
950 - local_bh_enable();
951 -+ cond_resched_rcu_qs();
952 - list = next;
953 - }
954 - trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
955 -diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
956 -index e5c2181f..03f4545 100644
957 ---- a/mm/kasan/kasan.h
958 -+++ b/mm/kasan/kasan.h
959 -@@ -53,6 +53,9 @@ struct kasan_global {
960 - #if KASAN_ABI_VERSION >= 4
961 - struct kasan_source_location *location;
962 - #endif
963 -+#if KASAN_ABI_VERSION >= 5
964 -+ char *odr_indicator;
965 -+#endif
966 - };
967 -
968 - /**
969 -diff --git a/mm/khugepaged.c b/mm/khugepaged.c
970 -index 728d779..87e1a7ca 100644
971 ---- a/mm/khugepaged.c
972 -+++ b/mm/khugepaged.c
973 -@@ -103,6 +103,7 @@ static struct khugepaged_scan khugepaged_scan = {
974 - .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
975 - };
976 -
977 -+#ifdef CONFIG_SYSFS
978 - static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
979 - struct kobj_attribute *attr,
980 - char *buf)
981 -@@ -295,6 +296,7 @@ struct attribute_group khugepaged_attr_group = {
982 - .attrs = khugepaged_attr,
983 - .name = "khugepaged",
984 - };
985 -+#endif /* CONFIG_SYSFS */
986 -
987 - #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
988 -
989 -diff --git a/mm/mlock.c b/mm/mlock.c
990 -index 14645be..9c91acc 100644
991 ---- a/mm/mlock.c
992 -+++ b/mm/mlock.c
993 -@@ -190,10 +190,13 @@ unsigned int munlock_vma_page(struct page *page)
994 - */
995 - spin_lock_irq(zone_lru_lock(zone));
996 -
997 -- nr_pages = hpage_nr_pages(page);
998 -- if (!TestClearPageMlocked(page))
999 -+ if (!TestClearPageMlocked(page)) {
1000 -+ /* Potentially, PTE-mapped THP: do not skip the rest PTEs */
1001 -+ nr_pages = 1;
1002 - goto unlock_out;
1003 -+ }
1004 -
1005 -+ nr_pages = hpage_nr_pages(page);
1006 - __mod_zone_page_state(zone, NR_MLOCK, -nr_pages);
1007 -
1008 - if (__munlock_isolate_lru_page(page, true)) {
1009 -diff --git a/mm/truncate.c b/mm/truncate.c
1010 -index a01cce4..8d8c62d 100644
1011 ---- a/mm/truncate.c
1012 -+++ b/mm/truncate.c
1013 -@@ -283,7 +283,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
1014 -
1015 - if (!trylock_page(page))
1016 - continue;
1017 -- WARN_ON(page_to_pgoff(page) != index);
1018 -+ WARN_ON(page_to_index(page) != index);
1019 - if (PageWriteback(page)) {
1020 - unlock_page(page);
1021 - continue;
1022 -@@ -371,7 +371,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
1023 - }
1024 -
1025 - lock_page(page);
1026 -- WARN_ON(page_to_pgoff(page) != index);
1027 -+ WARN_ON(page_to_index(page) != index);
1028 - wait_on_page_writeback(page);
1029 - truncate_inode_page(mapping, page);
1030 - unlock_page(page);
1031 -@@ -492,7 +492,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
1032 - if (!trylock_page(page))
1033 - continue;
1034 -
1035 -- WARN_ON(page_to_pgoff(page) != index);
1036 -+ WARN_ON(page_to_index(page) != index);
1037 -
1038 - /* Middle of THP: skip */
1039 - if (PageTransTail(page)) {
1040 -@@ -612,7 +612,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
1041 - }
1042 -
1043 - lock_page(page);
1044 -- WARN_ON(page_to_pgoff(page) != index);
1045 -+ WARN_ON(page_to_index(page) != index);
1046 - if (page->mapping != mapping) {
1047 - unlock_page(page);
1048 - continue;
1049 -diff --git a/mm/workingset.c b/mm/workingset.c
1050 -index 617475f..fb1f918 100644
1051 ---- a/mm/workingset.c
1052 -+++ b/mm/workingset.c
1053 -@@ -348,7 +348,7 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
1054 - shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc);
1055 - local_irq_enable();
1056 -
1057 -- if (memcg_kmem_enabled()) {
1058 -+ if (sc->memcg) {
1059 - pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
1060 - LRU_ALL_FILE);
1061 - } else {
1062 -diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c
1063 -index 2333777..8af1611 100644
1064 ---- a/net/batman-adv/tp_meter.c
1065 -+++ b/net/batman-adv/tp_meter.c
1066 -@@ -837,6 +837,7 @@ static int batadv_tp_send(void *arg)
1067 - primary_if = batadv_primary_if_get_selected(bat_priv);
1068 - if (unlikely(!primary_if)) {
1069 - err = BATADV_TP_REASON_DST_UNREACHABLE;
1070 -+ tp_vars->reason = err;
1071 - goto out;
1072 - }
1073 -
1074 -diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
1075 -index 0bf6709..6fb4314 100644
1076 ---- a/virt/kvm/arm/vgic/vgic-v2.c
1077 -+++ b/virt/kvm/arm/vgic/vgic-v2.c
1078 -@@ -50,8 +50,10 @@ void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu)
1079 -
1080 - WARN_ON(cpuif->vgic_lr[lr] & GICH_LR_STATE);
1081 -
1082 -- kvm_notify_acked_irq(vcpu->kvm, 0,
1083 -- intid - VGIC_NR_PRIVATE_IRQS);
1084 -+ /* Only SPIs require notification */
1085 -+ if (vgic_valid_spi(vcpu->kvm, intid))
1086 -+ kvm_notify_acked_irq(vcpu->kvm, 0,
1087 -+ intid - VGIC_NR_PRIVATE_IRQS);
1088 - }
1089 - }
1090 -
1091 -diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
1092 -index 9f0dae3..5c9f974 100644
1093 ---- a/virt/kvm/arm/vgic/vgic-v3.c
1094 -+++ b/virt/kvm/arm/vgic/vgic-v3.c
1095 -@@ -41,8 +41,10 @@ void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu)
1096 -
1097 - WARN_ON(cpuif->vgic_lr[lr] & ICH_LR_STATE);
1098 -
1099 -- kvm_notify_acked_irq(vcpu->kvm, 0,
1100 -- intid - VGIC_NR_PRIVATE_IRQS);
1101 -+ /* Only SPIs require notification */
1102 -+ if (vgic_valid_spi(vcpu->kvm, intid))
1103 -+ kvm_notify_acked_irq(vcpu->kvm, 0,
1104 -+ intid - VGIC_NR_PRIVATE_IRQS);
1105 - }
1106 -
1107 - /*
1108 -diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
1109 -index 1950782..690d15e 100644
1110 ---- a/virt/kvm/kvm_main.c
1111 -+++ b/virt/kvm/kvm_main.c
1112 -@@ -2852,10 +2852,10 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
1113 -
1114 - ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
1115 - if (ret < 0) {
1116 -- ops->destroy(dev);
1117 - mutex_lock(&kvm->lock);
1118 - list_del(&dev->vm_node);
1119 - mutex_unlock(&kvm->lock);
1120 -+ ops->destroy(dev);
1121 - return ret;
1122 - }
1123 -
1124
1125 diff --git a/4.8.15/1013_linux-4.8.14.patch b/4.8.15/1013_linux-4.8.14.patch
1126 deleted file mode 100644
1127 index 63d837b..0000000
1128 --- a/4.8.15/1013_linux-4.8.14.patch
1129 +++ /dev/null
1130 @@ -1,1725 +0,0 @@
1131 -diff --git a/Makefile b/Makefile
1132 -index b38abe9..6a74924 100644
1133 ---- a/Makefile
1134 -+++ b/Makefile
1135 -@@ -1,6 +1,6 @@
1136 - VERSION = 4
1137 - PATCHLEVEL = 8
1138 --SUBLEVEL = 13
1139 -+SUBLEVEL = 14
1140 - EXTRAVERSION =
1141 - NAME = Psychotic Stoned Sheep
1142 -
1143 -diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
1144 -index c3c12ef..9c0c8fd 100644
1145 ---- a/arch/sparc/kernel/signal_32.c
1146 -+++ b/arch/sparc/kernel/signal_32.c
1147 -@@ -89,7 +89,7 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
1148 - sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
1149 -
1150 - /* 1. Make sure we are not getting garbage from the user */
1151 -- if (!invalid_frame_pointer(sf, sizeof(*sf)))
1152 -+ if (invalid_frame_pointer(sf, sizeof(*sf)))
1153 - goto segv_and_exit;
1154 -
1155 - if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
1156 -@@ -150,7 +150,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
1157 -
1158 - synchronize_user_stack();
1159 - sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
1160 -- if (!invalid_frame_pointer(sf, sizeof(*sf)))
1161 -+ if (invalid_frame_pointer(sf, sizeof(*sf)))
1162 - goto segv;
1163 -
1164 - if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
1165 -diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
1166 -index 7ac6b62..05c7708 100644
1167 ---- a/arch/sparc/mm/init_64.c
1168 -+++ b/arch/sparc/mm/init_64.c
1169 -@@ -802,8 +802,10 @@ struct mdesc_mblock {
1170 - };
1171 - static struct mdesc_mblock *mblocks;
1172 - static int num_mblocks;
1173 -+static int find_numa_node_for_addr(unsigned long pa,
1174 -+ struct node_mem_mask *pnode_mask);
1175 -
1176 --static unsigned long ra_to_pa(unsigned long addr)
1177 -+static unsigned long __init ra_to_pa(unsigned long addr)
1178 - {
1179 - int i;
1180 -
1181 -@@ -819,8 +821,11 @@ static unsigned long ra_to_pa(unsigned long addr)
1182 - return addr;
1183 - }
1184 -
1185 --static int find_node(unsigned long addr)
1186 -+static int __init find_node(unsigned long addr)
1187 - {
1188 -+ static bool search_mdesc = true;
1189 -+ static struct node_mem_mask last_mem_mask = { ~0UL, ~0UL };
1190 -+ static int last_index;
1191 - int i;
1192 -
1193 - addr = ra_to_pa(addr);
1194 -@@ -830,13 +835,30 @@ static int find_node(unsigned long addr)
1195 - if ((addr & p->mask) == p->val)
1196 - return i;
1197 - }
1198 -- /* The following condition has been observed on LDOM guests.*/
1199 -- WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node"
1200 -- " rule. Some physical memory will be owned by node 0.");
1201 -- return 0;
1202 -+ /* The following condition has been observed on LDOM guests because
1203 -+ * node_masks only contains the best latency mask and value.
1204 -+ * LDOM guest's mdesc can contain a single latency group to
1205 -+ * cover multiple address range. Print warning message only if the
1206 -+ * address cannot be found in node_masks nor mdesc.
1207 -+ */
1208 -+ if ((search_mdesc) &&
1209 -+ ((addr & last_mem_mask.mask) != last_mem_mask.val)) {
1210 -+ /* find the available node in the mdesc */
1211 -+ last_index = find_numa_node_for_addr(addr, &last_mem_mask);
1212 -+ numadbg("find_node: latency group for address 0x%lx is %d\n",
1213 -+ addr, last_index);
1214 -+ if ((last_index < 0) || (last_index >= num_node_masks)) {
1215 -+ /* WARN_ONCE() and use default group 0 */
1216 -+ WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node rule. Some physical memory will be owned by node 0.");
1217 -+ search_mdesc = false;
1218 -+ last_index = 0;
1219 -+ }
1220 -+ }
1221 -+
1222 -+ return last_index;
1223 - }
1224 -
1225 --static u64 memblock_nid_range(u64 start, u64 end, int *nid)
1226 -+static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
1227 - {
1228 - *nid = find_node(start);
1229 - start += PAGE_SIZE;
1230 -@@ -1160,6 +1182,41 @@ int __node_distance(int from, int to)
1231 - return numa_latency[from][to];
1232 - }
1233 -
1234 -+static int find_numa_node_for_addr(unsigned long pa,
1235 -+ struct node_mem_mask *pnode_mask)
1236 -+{
1237 -+ struct mdesc_handle *md = mdesc_grab();
1238 -+ u64 node, arc;
1239 -+ int i = 0;
1240 -+
1241 -+ node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1242 -+ if (node == MDESC_NODE_NULL)
1243 -+ goto out;
1244 -+
1245 -+ mdesc_for_each_node_by_name(md, node, "group") {
1246 -+ mdesc_for_each_arc(arc, md, node, MDESC_ARC_TYPE_FWD) {
1247 -+ u64 target = mdesc_arc_target(md, arc);
1248 -+ struct mdesc_mlgroup *m = find_mlgroup(target);
1249 -+
1250 -+ if (!m)
1251 -+ continue;
1252 -+ if ((pa & m->mask) == m->match) {
1253 -+ if (pnode_mask) {
1254 -+ pnode_mask->mask = m->mask;
1255 -+ pnode_mask->val = m->match;
1256 -+ }
1257 -+ mdesc_release(md);
1258 -+ return i;
1259 -+ }
1260 -+ }
1261 -+ i++;
1262 -+ }
1263 -+
1264 -+out:
1265 -+ mdesc_release(md);
1266 -+ return -1;
1267 -+}
1268 -+
1269 - static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
1270 - {
1271 - int i;
1272 -diff --git a/block/blk-map.c b/block/blk-map.c
1273 -index b8657fa..27fd8d92 100644
1274 ---- a/block/blk-map.c
1275 -+++ b/block/blk-map.c
1276 -@@ -118,6 +118,9 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
1277 - struct iov_iter i;
1278 - int ret;
1279 -
1280 -+ if (!iter_is_iovec(iter))
1281 -+ goto fail;
1282 -+
1283 - if (map_data)
1284 - copy = true;
1285 - else if (iov_iter_alignment(iter) & align)
1286 -@@ -140,6 +143,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
1287 -
1288 - unmap_rq:
1289 - __blk_rq_unmap_user(bio);
1290 -+fail:
1291 - rq->bio = NULL;
1292 - return -EINVAL;
1293 - }
1294 -diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
1295 -index bda37d3..b081929 100644
1296 ---- a/drivers/net/dsa/b53/b53_common.c
1297 -+++ b/drivers/net/dsa/b53/b53_common.c
1298 -@@ -904,9 +904,10 @@ static void b53_vlan_add(struct dsa_switch *ds, int port,
1299 -
1300 - vl->members |= BIT(port) | BIT(cpu_port);
1301 - if (untagged)
1302 -- vl->untag |= BIT(port) | BIT(cpu_port);
1303 -+ vl->untag |= BIT(port);
1304 - else
1305 -- vl->untag &= ~(BIT(port) | BIT(cpu_port));
1306 -+ vl->untag &= ~BIT(port);
1307 -+ vl->untag &= ~BIT(cpu_port);
1308 -
1309 - b53_set_vlan_entry(dev, vid, vl);
1310 - b53_fast_age_vlan(dev, vid);
1311 -@@ -915,8 +916,6 @@ static void b53_vlan_add(struct dsa_switch *ds, int port,
1312 - if (pvid) {
1313 - b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
1314 - vlan->vid_end);
1315 -- b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port),
1316 -- vlan->vid_end);
1317 - b53_fast_age_vlan(dev, vid);
1318 - }
1319 - }
1320 -@@ -926,7 +925,6 @@ static int b53_vlan_del(struct dsa_switch *ds, int port,
1321 - {
1322 - struct b53_device *dev = ds_to_priv(ds);
1323 - bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1324 -- unsigned int cpu_port = dev->cpu_port;
1325 - struct b53_vlan *vl;
1326 - u16 vid;
1327 - u16 pvid;
1328 -@@ -939,8 +937,6 @@ static int b53_vlan_del(struct dsa_switch *ds, int port,
1329 - b53_get_vlan_entry(dev, vid, vl);
1330 -
1331 - vl->members &= ~BIT(port);
1332 -- if ((vl->members & BIT(cpu_port)) == BIT(cpu_port))
1333 -- vl->members = 0;
1334 -
1335 - if (pvid == vid) {
1336 - if (is5325(dev) || is5365(dev))
1337 -@@ -949,18 +945,14 @@ static int b53_vlan_del(struct dsa_switch *ds, int port,
1338 - pvid = 0;
1339 - }
1340 -
1341 -- if (untagged) {
1342 -+ if (untagged)
1343 - vl->untag &= ~(BIT(port));
1344 -- if ((vl->untag & BIT(cpu_port)) == BIT(cpu_port))
1345 -- vl->untag = 0;
1346 -- }
1347 -
1348 - b53_set_vlan_entry(dev, vid, vl);
1349 - b53_fast_age_vlan(dev, vid);
1350 - }
1351 -
1352 - b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid);
1353 -- b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port), pvid);
1354 - b53_fast_age_vlan(dev, pvid);
1355 -
1356 - return 0;
1357 -diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
1358 -index b2b8387..4036865 100644
1359 ---- a/drivers/net/dsa/bcm_sf2.c
1360 -+++ b/drivers/net/dsa/bcm_sf2.c
1361 -@@ -1167,6 +1167,7 @@ static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
1362 - struct phy_device *phydev)
1363 - {
1364 - struct bcm_sf2_priv *priv = ds_to_priv(ds);
1365 -+ struct ethtool_eee *p = &priv->port_sts[port].eee;
1366 - u32 id_mode_dis = 0, port_mode;
1367 - const char *str = NULL;
1368 - u32 reg;
1369 -@@ -1241,6 +1242,9 @@ static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
1370 - reg |= DUPLX_MODE;
1371 -
1372 - core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
1373 -+
1374 -+ if (!phydev->is_pseudo_fixed_link)
1375 -+ p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev);
1376 - }
1377 -
1378 - static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
1379 -diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
1380 -index 5414563..842d8b9 100644
1381 ---- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
1382 -+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
1383 -@@ -1172,6 +1172,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1384 - struct bcmgenet_tx_ring *ring)
1385 - {
1386 - struct bcmgenet_priv *priv = netdev_priv(dev);
1387 -+ struct device *kdev = &priv->pdev->dev;
1388 - struct enet_cb *tx_cb_ptr;
1389 - struct netdev_queue *txq;
1390 - unsigned int pkts_compl = 0;
1391 -@@ -1199,13 +1200,13 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1392 - if (tx_cb_ptr->skb) {
1393 - pkts_compl++;
1394 - bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent;
1395 -- dma_unmap_single(&dev->dev,
1396 -+ dma_unmap_single(kdev,
1397 - dma_unmap_addr(tx_cb_ptr, dma_addr),
1398 - dma_unmap_len(tx_cb_ptr, dma_len),
1399 - DMA_TO_DEVICE);
1400 - bcmgenet_free_cb(tx_cb_ptr);
1401 - } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
1402 -- dma_unmap_page(&dev->dev,
1403 -+ dma_unmap_page(kdev,
1404 - dma_unmap_addr(tx_cb_ptr, dma_addr),
1405 - dma_unmap_len(tx_cb_ptr, dma_len),
1406 - DMA_TO_DEVICE);
1407 -@@ -1775,6 +1776,7 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
1408 -
1409 - static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
1410 - {
1411 -+ struct device *kdev = &priv->pdev->dev;
1412 - struct enet_cb *cb;
1413 - int i;
1414 -
1415 -@@ -1782,7 +1784,7 @@ static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
1416 - cb = &priv->rx_cbs[i];
1417 -
1418 - if (dma_unmap_addr(cb, dma_addr)) {
1419 -- dma_unmap_single(&priv->dev->dev,
1420 -+ dma_unmap_single(kdev,
1421 - dma_unmap_addr(cb, dma_addr),
1422 - priv->rx_buf_len, DMA_FROM_DEVICE);
1423 - dma_unmap_addr_set(cb, dma_addr, 0);
1424 -diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
1425 -index d954a97..ef0dbcf 100644
1426 ---- a/drivers/net/ethernet/cadence/macb.c
1427 -+++ b/drivers/net/ethernet/cadence/macb.c
1428 -@@ -959,6 +959,7 @@ static inline void macb_init_rx_ring(struct macb *bp)
1429 - addr += bp->rx_buffer_size;
1430 - }
1431 - bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
1432 -+ bp->rx_tail = 0;
1433 - }
1434 -
1435 - static int macb_rx(struct macb *bp, int budget)
1436 -@@ -1597,8 +1598,6 @@ static void macb_init_rings(struct macb *bp)
1437 - bp->queues[0].tx_head = 0;
1438 - bp->queues[0].tx_tail = 0;
1439 - bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
1440 --
1441 -- bp->rx_tail = 0;
1442 - }
1443 -
1444 - static void macb_reset_hw(struct macb *bp)
1445 -diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
1446 -index 467138b..d747e17 100644
1447 ---- a/drivers/net/ethernet/marvell/sky2.c
1448 -+++ b/drivers/net/ethernet/marvell/sky2.c
1449 -@@ -5220,6 +5220,19 @@ static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, sky2_resume);
1450 -
1451 - static void sky2_shutdown(struct pci_dev *pdev)
1452 - {
1453 -+ struct sky2_hw *hw = pci_get_drvdata(pdev);
1454 -+ int port;
1455 -+
1456 -+ for (port = 0; port < hw->ports; port++) {
1457 -+ struct net_device *ndev = hw->dev[port];
1458 -+
1459 -+ rtnl_lock();
1460 -+ if (netif_running(ndev)) {
1461 -+ dev_close(ndev);
1462 -+ netif_device_detach(ndev);
1463 -+ }
1464 -+ rtnl_unlock();
1465 -+ }
1466 - sky2_suspend(&pdev->dev);
1467 - pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev));
1468 - pci_set_power_state(pdev, PCI_D3hot);
1469 -diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
1470 -index 054e795..92c9a95 100644
1471 ---- a/drivers/net/ethernet/renesas/sh_eth.c
1472 -+++ b/drivers/net/ethernet/renesas/sh_eth.c
1473 -@@ -518,7 +518,7 @@ static struct sh_eth_cpu_data r7s72100_data = {
1474 -
1475 - .ecsr_value = ECSR_ICD,
1476 - .ecsipr_value = ECSIPR_ICDIP,
1477 -- .eesipr_value = 0xff7f009f,
1478 -+ .eesipr_value = 0xe77f009f,
1479 -
1480 - .tx_check = EESR_TC1 | EESR_FTC,
1481 - .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
1482 -diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
1483 -index 16af1ce..5ad706b 100644
1484 ---- a/drivers/net/geneve.c
1485 -+++ b/drivers/net/geneve.c
1486 -@@ -844,7 +844,6 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
1487 - struct geneve_dev *geneve = netdev_priv(dev);
1488 - struct geneve_sock *gs4 = geneve->sock4;
1489 - struct rtable *rt = NULL;
1490 -- const struct iphdr *iip; /* interior IP header */
1491 - int err = -EINVAL;
1492 - struct flowi4 fl4;
1493 - __u8 tos, ttl;
1494 -@@ -871,8 +870,6 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
1495 - sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
1496 - skb_reset_mac_header(skb);
1497 -
1498 -- iip = ip_hdr(skb);
1499 --
1500 - if (info) {
1501 - const struct ip_tunnel_key *key = &info->key;
1502 - u8 *opts = NULL;
1503 -@@ -892,7 +889,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
1504 - if (unlikely(err))
1505 - goto tx_error;
1506 -
1507 -- tos = ip_tunnel_ecn_encap(key->tos, iip, skb);
1508 -+ tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
1509 - ttl = key->ttl;
1510 - df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
1511 - } else {
1512 -@@ -901,7 +898,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
1513 - if (unlikely(err))
1514 - goto tx_error;
1515 -
1516 -- tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb);
1517 -+ tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb);
1518 - ttl = geneve->ttl;
1519 - if (!ttl && IN_MULTICAST(ntohl(fl4.daddr)))
1520 - ttl = 1;
1521 -@@ -934,7 +931,6 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
1522 - struct geneve_dev *geneve = netdev_priv(dev);
1523 - struct geneve_sock *gs6 = geneve->sock6;
1524 - struct dst_entry *dst = NULL;
1525 -- const struct iphdr *iip; /* interior IP header */
1526 - int err = -EINVAL;
1527 - struct flowi6 fl6;
1528 - __u8 prio, ttl;
1529 -@@ -959,8 +955,6 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
1530 - sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
1531 - skb_reset_mac_header(skb);
1532 -
1533 -- iip = ip_hdr(skb);
1534 --
1535 - if (info) {
1536 - const struct ip_tunnel_key *key = &info->key;
1537 - u8 *opts = NULL;
1538 -@@ -981,7 +975,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
1539 - if (unlikely(err))
1540 - goto tx_error;
1541 -
1542 -- prio = ip_tunnel_ecn_encap(key->tos, iip, skb);
1543 -+ prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
1544 - ttl = key->ttl;
1545 - label = info->key.label;
1546 - } else {
1547 -@@ -991,7 +985,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
1548 - goto tx_error;
1549 -
1550 - prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel),
1551 -- iip, skb);
1552 -+ ip_hdr(skb), skb);
1553 - ttl = geneve->ttl;
1554 - if (!ttl && ipv6_addr_is_multicast(&fl6.daddr))
1555 - ttl = 1;
1556 -diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
1557 -index c47ec0a..dd623f6 100644
1558 ---- a/drivers/net/usb/cdc_ether.c
1559 -+++ b/drivers/net/usb/cdc_ether.c
1560 -@@ -388,12 +388,6 @@ void usbnet_cdc_status(struct usbnet *dev, struct urb *urb)
1561 - case USB_CDC_NOTIFY_NETWORK_CONNECTION:
1562 - netif_dbg(dev, timer, dev->net, "CDC: carrier %s\n",
1563 - event->wValue ? "on" : "off");
1564 --
1565 -- /* Work-around for devices with broken off-notifications */
1566 -- if (event->wValue &&
1567 -- !test_bit(__LINK_STATE_NOCARRIER, &dev->net->state))
1568 -- usbnet_link_change(dev, 0, 0);
1569 --
1570 - usbnet_link_change(dev, !!event->wValue, 0);
1571 - break;
1572 - case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */
1573 -@@ -466,6 +460,36 @@ static int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1574 - return 1;
1575 - }
1576 -
1577 -+/* Ensure correct link state
1578 -+ *
1579 -+ * Some devices (ZTE MF823/831/910) export two carrier on notifications when
1580 -+ * connected. This causes the link state to be incorrect. Work around this by
1581 -+ * always setting the state to off, then on.
1582 -+ */
1583 -+void usbnet_cdc_zte_status(struct usbnet *dev, struct urb *urb)
1584 -+{
1585 -+ struct usb_cdc_notification *event;
1586 -+
1587 -+ if (urb->actual_length < sizeof(*event))
1588 -+ return;
1589 -+
1590 -+ event = urb->transfer_buffer;
1591 -+
1592 -+ if (event->bNotificationType != USB_CDC_NOTIFY_NETWORK_CONNECTION) {
1593 -+ usbnet_cdc_status(dev, urb);
1594 -+ return;
1595 -+ }
1596 -+
1597 -+ netif_dbg(dev, timer, dev->net, "CDC: carrier %s\n",
1598 -+ event->wValue ? "on" : "off");
1599 -+
1600 -+ if (event->wValue &&
1601 -+ netif_carrier_ok(dev->net))
1602 -+ netif_carrier_off(dev->net);
1603 -+
1604 -+ usbnet_link_change(dev, !!event->wValue, 0);
1605 -+}
1606 -+
1607 - static const struct driver_info cdc_info = {
1608 - .description = "CDC Ethernet Device",
1609 - .flags = FLAG_ETHER | FLAG_POINTTOPOINT,
1610 -@@ -481,7 +505,7 @@ static const struct driver_info zte_cdc_info = {
1611 - .flags = FLAG_ETHER | FLAG_POINTTOPOINT,
1612 - .bind = usbnet_cdc_zte_bind,
1613 - .unbind = usbnet_cdc_unbind,
1614 -- .status = usbnet_cdc_status,
1615 -+ .status = usbnet_cdc_zte_status,
1616 - .set_rx_mode = usbnet_cdc_update_filter,
1617 - .manage_power = usbnet_manage_power,
1618 - .rx_fixup = usbnet_cdc_zte_rx_fixup,
1619 -diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
1620 -index bf3fd34..d807209 100644
1621 ---- a/drivers/net/virtio_net.c
1622 -+++ b/drivers/net/virtio_net.c
1623 -@@ -1468,6 +1468,11 @@ static void virtnet_free_queues(struct virtnet_info *vi)
1624 - netif_napi_del(&vi->rq[i].napi);
1625 - }
1626 -
1627 -+ /* We called napi_hash_del() before netif_napi_del(),
1628 -+ * we need to respect an RCU grace period before freeing vi->rq
1629 -+ */
1630 -+ synchronize_net();
1631 -+
1632 - kfree(vi->rq);
1633 - kfree(vi->sq);
1634 - }
1635 -diff --git a/include/linux/uio.h b/include/linux/uio.h
1636 -index 75b4aaf..944e7ba 100644
1637 ---- a/include/linux/uio.h
1638 -+++ b/include/linux/uio.h
1639 -@@ -102,12 +102,12 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages);
1640 -
1641 - const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
1642 -
1643 --static inline size_t iov_iter_count(struct iov_iter *i)
1644 -+static inline size_t iov_iter_count(const struct iov_iter *i)
1645 - {
1646 - return i->count;
1647 - }
1648 -
1649 --static inline bool iter_is_iovec(struct iov_iter *i)
1650 -+static inline bool iter_is_iovec(const struct iov_iter *i)
1651 - {
1652 - return !(i->type & (ITER_BVEC | ITER_KVEC));
1653 - }
1654 -diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
1655 -index d15214d..2a1abbf 100644
1656 ---- a/include/net/gro_cells.h
1657 -+++ b/include/net/gro_cells.h
1658 -@@ -68,6 +68,9 @@ static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *de
1659 - struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
1660 -
1661 - __skb_queue_head_init(&cell->napi_skbs);
1662 -+
1663 -+ set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state);
1664 -+
1665 - netif_napi_add(dev, &cell->napi, gro_cell_poll, 64);
1666 - napi_enable(&cell->napi);
1667 - }
1668 -diff --git a/net/core/flow.c b/net/core/flow.c
1669 -index 3937b1b..18e8893 100644
1670 ---- a/net/core/flow.c
1671 -+++ b/net/core/flow.c
1672 -@@ -95,7 +95,6 @@ static void flow_cache_gc_task(struct work_struct *work)
1673 - list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) {
1674 - flow_entry_kill(fce, xfrm);
1675 - atomic_dec(&xfrm->flow_cache_gc_count);
1676 -- WARN_ON(atomic_read(&xfrm->flow_cache_gc_count) < 0);
1677 - }
1678 - }
1679 -
1680 -@@ -236,9 +235,8 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
1681 - if (fcp->hash_count > fc->high_watermark)
1682 - flow_cache_shrink(fc, fcp);
1683 -
1684 -- if (fcp->hash_count > 2 * fc->high_watermark ||
1685 -- atomic_read(&net->xfrm.flow_cache_gc_count) > fc->high_watermark) {
1686 -- atomic_inc(&net->xfrm.flow_cache_genid);
1687 -+ if (atomic_read(&net->xfrm.flow_cache_gc_count) >
1688 -+ 2 * num_online_cpus() * fc->high_watermark) {
1689 - flo = ERR_PTR(-ENOBUFS);
1690 - goto ret_object;
1691 - }
1692 -diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
1693 -index 2c2eb1b..2e9a1c2 100644
1694 ---- a/net/core/net_namespace.c
1695 -+++ b/net/core/net_namespace.c
1696 -@@ -217,6 +217,8 @@ int peernet2id_alloc(struct net *net, struct net *peer)
1697 - bool alloc;
1698 - int id;
1699 -
1700 -+ if (atomic_read(&net->count) == 0)
1701 -+ return NETNSA_NSID_NOT_ASSIGNED;
1702 - spin_lock_irqsave(&net->nsid_lock, flags);
1703 - alloc = atomic_read(&peer->count) == 0 ? false : true;
1704 - id = __peernet2id_alloc(net, peer, &alloc);
1705 -diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
1706 -index 189cc78..08c3702 100644
1707 ---- a/net/core/rtnetlink.c
1708 -+++ b/net/core/rtnetlink.c
1709 -@@ -1578,7 +1578,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1710 - head = &net->dev_index_head[h];
1711 - hlist_for_each_entry(dev, head, index_hlist) {
1712 - if (link_dump_filtered(dev, master_idx, kind_ops))
1713 -- continue;
1714 -+ goto cont;
1715 - if (idx < s_idx)
1716 - goto cont;
1717 - err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
1718 -@@ -2791,7 +2791,10 @@ static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
1719 -
1720 - static inline size_t rtnl_fdb_nlmsg_size(void)
1721 - {
1722 -- return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN);
1723 -+ return NLMSG_ALIGN(sizeof(struct ndmsg)) +
1724 -+ nla_total_size(ETH_ALEN) + /* NDA_LLADDR */
1725 -+ nla_total_size(sizeof(u16)) + /* NDA_VLAN */
1726 -+ 0;
1727 - }
1728 -
1729 - static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
1730 -diff --git a/net/core/sock.c b/net/core/sock.c
1731 -index 10acacc..ba27920 100644
1732 ---- a/net/core/sock.c
1733 -+++ b/net/core/sock.c
1734 -@@ -715,7 +715,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
1735 - val = min_t(u32, val, sysctl_wmem_max);
1736 - set_sndbuf:
1737 - sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
1738 -- sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
1739 -+ sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
1740 - /* Wake up sending tasks if we upped the value. */
1741 - sk->sk_write_space(sk);
1742 - break;
1743 -@@ -751,7 +751,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
1744 - * returning the value we actually used in getsockopt
1745 - * is the most desirable behavior.
1746 - */
1747 -- sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
1748 -+ sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
1749 - break;
1750 -
1751 - case SO_RCVBUFFORCE:
1752 -diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
1753 -index b567c87..edbe59d 100644
1754 ---- a/net/dccp/ipv4.c
1755 -+++ b/net/dccp/ipv4.c
1756 -@@ -700,6 +700,7 @@ int dccp_invalid_packet(struct sk_buff *skb)
1757 - {
1758 - const struct dccp_hdr *dh;
1759 - unsigned int cscov;
1760 -+ u8 dccph_doff;
1761 -
1762 - if (skb->pkt_type != PACKET_HOST)
1763 - return 1;
1764 -@@ -721,18 +722,19 @@ int dccp_invalid_packet(struct sk_buff *skb)
1765 - /*
1766 - * If P.Data Offset is too small for packet type, drop packet and return
1767 - */
1768 -- if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) {
1769 -- DCCP_WARN("P.Data Offset(%u) too small\n", dh->dccph_doff);
1770 -+ dccph_doff = dh->dccph_doff;
1771 -+ if (dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) {
1772 -+ DCCP_WARN("P.Data Offset(%u) too small\n", dccph_doff);
1773 - return 1;
1774 - }
1775 - /*
1776 - * If P.Data Offset is too too large for packet, drop packet and return
1777 - */
1778 -- if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) {
1779 -- DCCP_WARN("P.Data Offset(%u) too large\n", dh->dccph_doff);
1780 -+ if (!pskb_may_pull(skb, dccph_doff * sizeof(u32))) {
1781 -+ DCCP_WARN("P.Data Offset(%u) too large\n", dccph_doff);
1782 - return 1;
1783 - }
1784 --
1785 -+ dh = dccp_hdr(skb);
1786 - /*
1787 - * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet
1788 - * has short sequence numbers), drop packet and return
1789 -diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
1790 -index f30bad9..3bdecd2 100644
1791 ---- a/net/dsa/dsa2.c
1792 -+++ b/net/dsa/dsa2.c
1793 -@@ -28,8 +28,10 @@ static struct dsa_switch_tree *dsa_get_dst(u32 tree)
1794 - struct dsa_switch_tree *dst;
1795 -
1796 - list_for_each_entry(dst, &dsa_switch_trees, list)
1797 -- if (dst->tree == tree)
1798 -+ if (dst->tree == tree) {
1799 -+ kref_get(&dst->refcount);
1800 - return dst;
1801 -+ }
1802 - return NULL;
1803 - }
1804 -
1805 -diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
1806 -index eebbc0f..ed22af6 100644
1807 ---- a/net/ipv4/af_inet.c
1808 -+++ b/net/ipv4/af_inet.c
1809 -@@ -1237,7 +1237,7 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1810 - fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID);
1811 -
1812 - /* fixed ID is invalid if DF bit is not set */
1813 -- if (fixedid && !(iph->frag_off & htons(IP_DF)))
1814 -+ if (fixedid && !(ip_hdr(skb)->frag_off & htons(IP_DF)))
1815 - goto out;
1816 - }
1817 -
1818 -diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
1819 -index d95631d..20fb25e 100644
1820 ---- a/net/ipv4/esp4.c
1821 -+++ b/net/ipv4/esp4.c
1822 -@@ -476,7 +476,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
1823 - esph = (void *)skb_push(skb, 4);
1824 - *seqhi = esph->spi;
1825 - esph->spi = esph->seq_no;
1826 -- esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.input.hi);
1827 -+ esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
1828 - aead_request_set_callback(req, 0, esp_input_done_esn, skb);
1829 - }
1830 -
1831 -diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
1832 -index 1b25daf..9301308 100644
1833 ---- a/net/ipv4/fib_frontend.c
1834 -+++ b/net/ipv4/fib_frontend.c
1835 -@@ -157,7 +157,7 @@ static void fib_replace_table(struct net *net, struct fib_table *old,
1836 -
1837 - int fib_unmerge(struct net *net)
1838 - {
1839 -- struct fib_table *old, *new;
1840 -+ struct fib_table *old, *new, *main_table;
1841 -
1842 - /* attempt to fetch local table if it has been allocated */
1843 - old = fib_get_table(net, RT_TABLE_LOCAL);
1844 -@@ -168,11 +168,21 @@ int fib_unmerge(struct net *net)
1845 - if (!new)
1846 - return -ENOMEM;
1847 -
1848 -+ /* table is already unmerged */
1849 -+ if (new == old)
1850 -+ return 0;
1851 -+
1852 - /* replace merged table with clean table */
1853 -- if (new != old) {
1854 -- fib_replace_table(net, old, new);
1855 -- fib_free_table(old);
1856 -- }
1857 -+ fib_replace_table(net, old, new);
1858 -+ fib_free_table(old);
1859 -+
1860 -+ /* attempt to fetch main table if it has been allocated */
1861 -+ main_table = fib_get_table(net, RT_TABLE_MAIN);
1862 -+ if (!main_table)
1863 -+ return 0;
1864 -+
1865 -+ /* flush local entries from main table */
1866 -+ fib_table_flush_external(main_table);
1867 -
1868 - return 0;
1869 - }
1870 -diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
1871 -index 7ef7031..84fd7272 100644
1872 ---- a/net/ipv4/fib_trie.c
1873 -+++ b/net/ipv4/fib_trie.c
1874 -@@ -681,6 +681,13 @@ static unsigned char update_suffix(struct key_vector *tn)
1875 - {
1876 - unsigned char slen = tn->pos;
1877 - unsigned long stride, i;
1878 -+ unsigned char slen_max;
1879 -+
1880 -+ /* only vector 0 can have a suffix length greater than or equal to
1881 -+ * tn->pos + tn->bits, the second highest node will have a suffix
1882 -+ * length at most of tn->pos + tn->bits - 1
1883 -+ */
1884 -+ slen_max = min_t(unsigned char, tn->pos + tn->bits - 1, tn->slen);
1885 -
1886 - /* search though the list of children looking for nodes that might
1887 - * have a suffix greater than the one we currently have. This is
1888 -@@ -698,12 +705,8 @@ static unsigned char update_suffix(struct key_vector *tn)
1889 - slen = n->slen;
1890 - i &= ~(stride - 1);
1891 -
1892 -- /* if slen covers all but the last bit we can stop here
1893 -- * there will be nothing longer than that since only node
1894 -- * 0 and 1 << (bits - 1) could have that as their suffix
1895 -- * length.
1896 -- */
1897 -- if ((slen + 1) >= (tn->pos + tn->bits))
1898 -+ /* stop searching if we have hit the maximum possible value */
1899 -+ if (slen >= slen_max)
1900 - break;
1901 - }
1902 -
1903 -@@ -875,39 +878,27 @@ static struct key_vector *resize(struct trie *t, struct key_vector *tn)
1904 - return collapse(t, tn);
1905 -
1906 - /* update parent in case halve failed */
1907 -- tp = node_parent(tn);
1908 --
1909 -- /* Return if at least one deflate was run */
1910 -- if (max_work != MAX_WORK)
1911 -- return tp;
1912 --
1913 -- /* push the suffix length to the parent node */
1914 -- if (tn->slen > tn->pos) {
1915 -- unsigned char slen = update_suffix(tn);
1916 --
1917 -- if (slen > tp->slen)
1918 -- tp->slen = slen;
1919 -- }
1920 --
1921 -- return tp;
1922 -+ return node_parent(tn);
1923 - }
1924 -
1925 --static void leaf_pull_suffix(struct key_vector *tp, struct key_vector *l)
1926 -+static void node_pull_suffix(struct key_vector *tn, unsigned char slen)
1927 - {
1928 -- while ((tp->slen > tp->pos) && (tp->slen > l->slen)) {
1929 -- if (update_suffix(tp) > l->slen)
1930 -+ unsigned char node_slen = tn->slen;
1931 -+
1932 -+ while ((node_slen > tn->pos) && (node_slen > slen)) {
1933 -+ slen = update_suffix(tn);
1934 -+ if (node_slen == slen)
1935 - break;
1936 -- tp = node_parent(tp);
1937 -+
1938 -+ tn = node_parent(tn);
1939 -+ node_slen = tn->slen;
1940 - }
1941 - }
1942 -
1943 --static void leaf_push_suffix(struct key_vector *tn, struct key_vector *l)
1944 -+static void node_push_suffix(struct key_vector *tn, unsigned char slen)
1945 - {
1946 -- /* if this is a new leaf then tn will be NULL and we can sort
1947 -- * out parent suffix lengths as a part of trie_rebalance
1948 -- */
1949 -- while (tn->slen < l->slen) {
1950 -- tn->slen = l->slen;
1951 -+ while (tn->slen < slen) {
1952 -+ tn->slen = slen;
1953 - tn = node_parent(tn);
1954 - }
1955 - }
1956 -@@ -1028,6 +1019,7 @@ static int fib_insert_node(struct trie *t, struct key_vector *tp,
1957 - }
1958 -
1959 - /* Case 3: n is NULL, and will just insert a new leaf */
1960 -+ node_push_suffix(tp, new->fa_slen);
1961 - NODE_INIT_PARENT(l, tp);
1962 - put_child_root(tp, key, l);
1963 - trie_rebalance(t, tp);
1964 -@@ -1069,7 +1061,7 @@ static int fib_insert_alias(struct trie *t, struct key_vector *tp,
1965 - /* if we added to the tail node then we need to update slen */
1966 - if (l->slen < new->fa_slen) {
1967 - l->slen = new->fa_slen;
1968 -- leaf_push_suffix(tp, l);
1969 -+ node_push_suffix(tp, new->fa_slen);
1970 - }
1971 -
1972 - return 0;
1973 -@@ -1470,6 +1462,8 @@ static void fib_remove_alias(struct trie *t, struct key_vector *tp,
1974 - * out parent suffix lengths as a part of trie_rebalance
1975 - */
1976 - if (hlist_empty(&l->leaf)) {
1977 -+ if (tp->slen == l->slen)
1978 -+ node_pull_suffix(tp, tp->pos);
1979 - put_child_root(tp, l->key, NULL);
1980 - node_free(l);
1981 - trie_rebalance(t, tp);
1982 -@@ -1482,7 +1476,7 @@ static void fib_remove_alias(struct trie *t, struct key_vector *tp,
1983 -
1984 - /* update the trie with the latest suffix length */
1985 - l->slen = fa->fa_slen;
1986 -- leaf_pull_suffix(tp, l);
1987 -+ node_pull_suffix(tp, fa->fa_slen);
1988 - }
1989 -
1990 - /* Caller must hold RTNL. */
1991 -@@ -1713,8 +1707,10 @@ struct fib_table *fib_trie_unmerge(struct fib_table *oldtb)
1992 - local_l = fib_find_node(lt, &local_tp, l->key);
1993 -
1994 - if (fib_insert_alias(lt, local_tp, local_l, new_fa,
1995 -- NULL, l->key))
1996 -+ NULL, l->key)) {
1997 -+ kmem_cache_free(fn_alias_kmem, new_fa);
1998 - goto out;
1999 -+ }
2000 - }
2001 -
2002 - /* stop loop if key wrapped back to 0 */
2003 -@@ -1751,6 +1747,10 @@ void fib_table_flush_external(struct fib_table *tb)
2004 - if (IS_TRIE(pn))
2005 - break;
2006 -
2007 -+ /* update the suffix to address pulled leaves */
2008 -+ if (pn->slen > pn->pos)
2009 -+ update_suffix(pn);
2010 -+
2011 - /* resize completed node */
2012 - pn = resize(t, pn);
2013 - cindex = get_index(pkey, pn);
2014 -@@ -1826,6 +1826,10 @@ int fib_table_flush(struct fib_table *tb)
2015 - if (IS_TRIE(pn))
2016 - break;
2017 -
2018 -+ /* update the suffix to address pulled leaves */
2019 -+ if (pn->slen > pn->pos)
2020 -+ update_suffix(pn);
2021 -+
2022 - /* resize completed node */
2023 - pn = resize(t, pn);
2024 - cindex = get_index(pkey, pn);
2025 -diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
2026 -index 307daed..f4790c3 100644
2027 ---- a/net/ipv4/ip_output.c
2028 -+++ b/net/ipv4/ip_output.c
2029 -@@ -98,6 +98,9 @@ int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
2030 -
2031 - iph->tot_len = htons(skb->len);
2032 - ip_send_check(iph);
2033 -+
2034 -+ skb->protocol = htons(ETH_P_IP);
2035 -+
2036 - return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
2037 - net, sk, skb, NULL, skb_dst(skb)->dev,
2038 - dst_output);
2039 -diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
2040 -index 66ddcb6..dcdd5ae 100644
2041 ---- a/net/ipv4/ping.c
2042 -+++ b/net/ipv4/ping.c
2043 -@@ -662,6 +662,10 @@ int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
2044 - if (len > 0xFFFF)
2045 - return -EMSGSIZE;
2046 -
2047 -+ /* Must have at least a full ICMP header. */
2048 -+ if (len < icmph_len)
2049 -+ return -EINVAL;
2050 -+
2051 - /*
2052 - * Check the flags.
2053 - */
2054 -diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
2055 -index c0d71e7..a2d54f5 100644
2056 ---- a/net/ipv4/udp.c
2057 -+++ b/net/ipv4/udp.c
2058 -@@ -1451,7 +1451,7 @@ static void udp_v4_rehash(struct sock *sk)
2059 - udp_lib_rehash(sk, new_hash);
2060 - }
2061 -
2062 --static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
2063 -+int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
2064 - {
2065 - int rc;
2066 -
2067 -diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
2068 -index 7e0fe4b..feb50a1 100644
2069 ---- a/net/ipv4/udp_impl.h
2070 -+++ b/net/ipv4/udp_impl.h
2071 -@@ -25,7 +25,7 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
2072 - int flags, int *addr_len);
2073 - int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
2074 - int flags);
2075 --int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2076 -+int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2077 - void udp_destroy_sock(struct sock *sk);
2078 -
2079 - #ifdef CONFIG_PROC_FS
2080 -diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
2081 -index 2eea073..705d9fb 100644
2082 ---- a/net/ipv4/udplite.c
2083 -+++ b/net/ipv4/udplite.c
2084 -@@ -50,7 +50,7 @@ struct proto udplite_prot = {
2085 - .sendmsg = udp_sendmsg,
2086 - .recvmsg = udp_recvmsg,
2087 - .sendpage = udp_sendpage,
2088 -- .backlog_rcv = udp_queue_rcv_skb,
2089 -+ .backlog_rcv = __udp_queue_rcv_skb,
2090 - .hash = udp_lib_hash,
2091 - .unhash = udp_lib_unhash,
2092 - .get_port = udp_v4_get_port,
2093 -diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
2094 -index f5432d6..8f2e36f 100644
2095 ---- a/net/ipv6/addrconf.c
2096 -+++ b/net/ipv6/addrconf.c
2097 -@@ -163,7 +163,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
2098 -
2099 - static void addrconf_dad_start(struct inet6_ifaddr *ifp);
2100 - static void addrconf_dad_work(struct work_struct *w);
2101 --static void addrconf_dad_completed(struct inet6_ifaddr *ifp);
2102 -+static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id);
2103 - static void addrconf_dad_run(struct inet6_dev *idev);
2104 - static void addrconf_rs_timer(unsigned long data);
2105 - static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
2106 -@@ -2893,6 +2893,7 @@ static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
2107 - spin_lock_bh(&ifp->lock);
2108 - ifp->flags &= ~IFA_F_TENTATIVE;
2109 - spin_unlock_bh(&ifp->lock);
2110 -+ rt_genid_bump_ipv6(dev_net(idev->dev));
2111 - ipv6_ifa_notify(RTM_NEWADDR, ifp);
2112 - in6_ifa_put(ifp);
2113 - }
2114 -@@ -3736,7 +3737,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
2115 - {
2116 - struct inet6_dev *idev = ifp->idev;
2117 - struct net_device *dev = idev->dev;
2118 -- bool notify = false;
2119 -+ bool bump_id, notify = false;
2120 -
2121 - addrconf_join_solict(dev, &ifp->addr);
2122 -
2123 -@@ -3751,11 +3752,12 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
2124 - idev->cnf.accept_dad < 1 ||
2125 - !(ifp->flags&IFA_F_TENTATIVE) ||
2126 - ifp->flags & IFA_F_NODAD) {
2127 -+ bump_id = ifp->flags & IFA_F_TENTATIVE;
2128 - ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
2129 - spin_unlock(&ifp->lock);
2130 - read_unlock_bh(&idev->lock);
2131 -
2132 -- addrconf_dad_completed(ifp);
2133 -+ addrconf_dad_completed(ifp, bump_id);
2134 - return;
2135 - }
2136 -
2137 -@@ -3815,8 +3817,8 @@ static void addrconf_dad_work(struct work_struct *w)
2138 - struct inet6_ifaddr,
2139 - dad_work);
2140 - struct inet6_dev *idev = ifp->idev;
2141 -+ bool bump_id, disable_ipv6 = false;
2142 - struct in6_addr mcaddr;
2143 -- bool disable_ipv6 = false;
2144 -
2145 - enum {
2146 - DAD_PROCESS,
2147 -@@ -3886,11 +3888,12 @@ static void addrconf_dad_work(struct work_struct *w)
2148 - * DAD was successful
2149 - */
2150 -
2151 -+ bump_id = ifp->flags & IFA_F_TENTATIVE;
2152 - ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
2153 - spin_unlock(&ifp->lock);
2154 - write_unlock_bh(&idev->lock);
2155 -
2156 -- addrconf_dad_completed(ifp);
2157 -+ addrconf_dad_completed(ifp, bump_id);
2158 -
2159 - goto out;
2160 - }
2161 -@@ -3927,7 +3930,7 @@ static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
2162 - return true;
2163 - }
2164 -
2165 --static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
2166 -+static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id)
2167 - {
2168 - struct net_device *dev = ifp->idev->dev;
2169 - struct in6_addr lladdr;
2170 -@@ -3978,6 +3981,9 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
2171 - spin_unlock(&ifp->lock);
2172 - write_unlock_bh(&ifp->idev->lock);
2173 - }
2174 -+
2175 -+ if (bump_id)
2176 -+ rt_genid_bump_ipv6(dev_net(dev));
2177 - }
2178 -
2179 - static void addrconf_dad_run(struct inet6_dev *idev)
2180 -diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
2181 -index 060a60b..111ba55 100644
2182 ---- a/net/ipv6/esp6.c
2183 -+++ b/net/ipv6/esp6.c
2184 -@@ -418,7 +418,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
2185 - esph = (void *)skb_push(skb, 4);
2186 - *seqhi = esph->spi;
2187 - esph->spi = esph->seq_no;
2188 -- esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.input.hi);
2189 -+ esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
2190 - aead_request_set_callback(req, 0, esp_input_done_esn, skb);
2191 - }
2192 -
2193 -diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
2194 -index a09418b..93294cf 100644
2195 ---- a/net/ipv6/ip6_offload.c
2196 -+++ b/net/ipv6/ip6_offload.c
2197 -@@ -98,7 +98,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
2198 - segs = ops->callbacks.gso_segment(skb, features);
2199 - }
2200 -
2201 -- if (IS_ERR(segs))
2202 -+ if (IS_ERR_OR_NULL(segs))
2203 - goto out;
2204 -
2205 - for (skb = segs; skb; skb = skb->next) {
2206 -diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
2207 -index 41489f3..da4e7b3 100644
2208 ---- a/net/ipv6/ip6_tunnel.c
2209 -+++ b/net/ipv6/ip6_tunnel.c
2210 -@@ -1014,6 +1014,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
2211 - int mtu;
2212 - unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
2213 - unsigned int max_headroom = psh_hlen;
2214 -+ bool use_cache = false;
2215 - int err = -1;
2216 -
2217 - /* NBMA tunnel */
2218 -@@ -1038,7 +1039,15 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
2219 -
2220 - memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
2221 - neigh_release(neigh);
2222 -- } else if (!fl6->flowi6_mark)
2223 -+ } else if (!(t->parms.flags &
2224 -+ (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
2225 -+ /* enable the cache only only if the routing decision does
2226 -+ * not depend on the current inner header value
2227 -+ */
2228 -+ use_cache = true;
2229 -+ }
2230 -+
2231 -+ if (use_cache)
2232 - dst = dst_cache_get(&t->dst_cache);
2233 -
2234 - if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
2235 -@@ -1113,7 +1122,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
2236 - skb = new_skb;
2237 - }
2238 -
2239 -- if (!fl6->flowi6_mark && ndst)
2240 -+ if (use_cache && ndst)
2241 - dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
2242 - skb_dst_set(skb, dst);
2243 -
2244 -@@ -1134,7 +1143,6 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
2245 - if (err)
2246 - return err;
2247 -
2248 -- skb->protocol = htons(ETH_P_IPV6);
2249 - skb_push(skb, sizeof(struct ipv6hdr));
2250 - skb_reset_network_header(skb);
2251 - ipv6h = ipv6_hdr(skb);
2252 -diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
2253 -index 462f2a76b..1d18432 100644
2254 ---- a/net/ipv6/output_core.c
2255 -+++ b/net/ipv6/output_core.c
2256 -@@ -148,6 +148,8 @@ int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
2257 - ipv6_hdr(skb)->payload_len = htons(len);
2258 - IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
2259 -
2260 -+ skb->protocol = htons(ETH_P_IPV6);
2261 -+
2262 - return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
2263 - net, sk, skb, NULL, skb_dst(skb)->dev,
2264 - dst_output);
2265 -diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
2266 -index c2a8656..fa39ab8 100644
2267 ---- a/net/ipv6/udp.c
2268 -+++ b/net/ipv6/udp.c
2269 -@@ -514,7 +514,7 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
2270 - return;
2271 - }
2272 -
2273 --static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
2274 -+int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
2275 - {
2276 - int rc;
2277 -
2278 -diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
2279 -index 0682c03..3c1dbc9 100644
2280 ---- a/net/ipv6/udp_impl.h
2281 -+++ b/net/ipv6/udp_impl.h
2282 -@@ -26,7 +26,7 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
2283 - int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
2284 - int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
2285 - int flags, int *addr_len);
2286 --int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2287 -+int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2288 - void udpv6_destroy_sock(struct sock *sk);
2289 -
2290 - void udp_v6_clear_sk(struct sock *sk, int size);
2291 -diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
2292 -index fd6ef41..af2895c 100644
2293 ---- a/net/ipv6/udplite.c
2294 -+++ b/net/ipv6/udplite.c
2295 -@@ -45,7 +45,7 @@ struct proto udplitev6_prot = {
2296 - .getsockopt = udpv6_getsockopt,
2297 - .sendmsg = udpv6_sendmsg,
2298 - .recvmsg = udpv6_recvmsg,
2299 -- .backlog_rcv = udpv6_queue_rcv_skb,
2300 -+ .backlog_rcv = __udpv6_queue_rcv_skb,
2301 - .hash = udp_lib_hash,
2302 - .unhash = udp_lib_unhash,
2303 - .get_port = udp_v6_get_port,
2304 -diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
2305 -index 42de4cc..d0e906d 100644
2306 ---- a/net/l2tp/l2tp_ip.c
2307 -+++ b/net/l2tp/l2tp_ip.c
2308 -@@ -251,8 +251,6 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
2309 - int ret;
2310 - int chk_addr_ret;
2311 -
2312 -- if (!sock_flag(sk, SOCK_ZAPPED))
2313 -- return -EINVAL;
2314 - if (addr_len < sizeof(struct sockaddr_l2tpip))
2315 - return -EINVAL;
2316 - if (addr->l2tp_family != AF_INET)
2317 -@@ -267,6 +265,9 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
2318 - read_unlock_bh(&l2tp_ip_lock);
2319 -
2320 - lock_sock(sk);
2321 -+ if (!sock_flag(sk, SOCK_ZAPPED))
2322 -+ goto out;
2323 -+
2324 - if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip))
2325 - goto out;
2326 -
2327 -diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
2328 -index ea2ae66..b9c6a41 100644
2329 ---- a/net/l2tp/l2tp_ip6.c
2330 -+++ b/net/l2tp/l2tp_ip6.c
2331 -@@ -269,8 +269,6 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
2332 - int addr_type;
2333 - int err;
2334 -
2335 -- if (!sock_flag(sk, SOCK_ZAPPED))
2336 -- return -EINVAL;
2337 - if (addr->l2tp_family != AF_INET6)
2338 - return -EINVAL;
2339 - if (addr_len < sizeof(*addr))
2340 -@@ -296,6 +294,9 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
2341 - lock_sock(sk);
2342 -
2343 - err = -EINVAL;
2344 -+ if (!sock_flag(sk, SOCK_ZAPPED))
2345 -+ goto out_unlock;
2346 -+
2347 - if (sk->sk_state != TCP_CLOSE)
2348 - goto out_unlock;
2349 -
2350 -diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
2351 -index 62bea45..246f29d 100644
2352 ---- a/net/netlink/af_netlink.c
2353 -+++ b/net/netlink/af_netlink.c
2354 -@@ -329,7 +329,6 @@ static void netlink_sock_destruct(struct sock *sk)
2355 - if (nlk->cb_running) {
2356 - if (nlk->cb.done)
2357 - nlk->cb.done(&nlk->cb);
2358 --
2359 - module_put(nlk->cb.module);
2360 - kfree_skb(nlk->cb.skb);
2361 - }
2362 -@@ -346,6 +345,14 @@ static void netlink_sock_destruct(struct sock *sk)
2363 - WARN_ON(nlk_sk(sk)->groups);
2364 - }
2365 -
2366 -+static void netlink_sock_destruct_work(struct work_struct *work)
2367 -+{
2368 -+ struct netlink_sock *nlk = container_of(work, struct netlink_sock,
2369 -+ work);
2370 -+
2371 -+ sk_free(&nlk->sk);
2372 -+}
2373 -+
2374 - /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
2375 - * SMP. Look, when several writers sleep and reader wakes them up, all but one
2376 - * immediately hit write lock and grab all the cpus. Exclusive sleep solves
2377 -@@ -648,8 +655,18 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
2378 - static void deferred_put_nlk_sk(struct rcu_head *head)
2379 - {
2380 - struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
2381 -+ struct sock *sk = &nlk->sk;
2382 -+
2383 -+ if (!atomic_dec_and_test(&sk->sk_refcnt))
2384 -+ return;
2385 -+
2386 -+ if (nlk->cb_running && nlk->cb.done) {
2387 -+ INIT_WORK(&nlk->work, netlink_sock_destruct_work);
2388 -+ schedule_work(&nlk->work);
2389 -+ return;
2390 -+ }
2391 -
2392 -- sock_put(&nlk->sk);
2393 -+ sk_free(sk);
2394 - }
2395 -
2396 - static int netlink_release(struct socket *sock)
2397 -diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
2398 -index 3cfd6cc..4fdb383 100644
2399 ---- a/net/netlink/af_netlink.h
2400 -+++ b/net/netlink/af_netlink.h
2401 -@@ -3,6 +3,7 @@
2402 -
2403 - #include <linux/rhashtable.h>
2404 - #include <linux/atomic.h>
2405 -+#include <linux/workqueue.h>
2406 - #include <net/sock.h>
2407 -
2408 - #define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
2409 -@@ -33,6 +34,7 @@ struct netlink_sock {
2410 -
2411 - struct rhash_head node;
2412 - struct rcu_head rcu;
2413 -+ struct work_struct work;
2414 - };
2415 -
2416 - static inline struct netlink_sock *nlk_sk(struct sock *sk)
2417 -diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
2418 -index d2238b2..dd23323 100644
2419 ---- a/net/packet/af_packet.c
2420 -+++ b/net/packet/af_packet.c
2421 -@@ -3648,19 +3648,25 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
2422 -
2423 - if (optlen != sizeof(val))
2424 - return -EINVAL;
2425 -- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
2426 -- return -EBUSY;
2427 - if (copy_from_user(&val, optval, sizeof(val)))
2428 - return -EFAULT;
2429 - switch (val) {
2430 - case TPACKET_V1:
2431 - case TPACKET_V2:
2432 - case TPACKET_V3:
2433 -- po->tp_version = val;
2434 -- return 0;
2435 -+ break;
2436 - default:
2437 - return -EINVAL;
2438 - }
2439 -+ lock_sock(sk);
2440 -+ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
2441 -+ ret = -EBUSY;
2442 -+ } else {
2443 -+ po->tp_version = val;
2444 -+ ret = 0;
2445 -+ }
2446 -+ release_sock(sk);
2447 -+ return ret;
2448 - }
2449 - case PACKET_RESERVE:
2450 - {
2451 -@@ -4164,6 +4170,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
2452 - /* Added to avoid minimal code churn */
2453 - struct tpacket_req *req = &req_u->req;
2454 -
2455 -+ lock_sock(sk);
2456 - /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
2457 - if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
2458 - net_warn_ratelimited("Tx-ring is not supported.\n");
2459 -@@ -4245,7 +4252,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
2460 - goto out;
2461 - }
2462 -
2463 -- lock_sock(sk);
2464 -
2465 - /* Detach socket from network */
2466 - spin_lock(&po->bind_lock);
2467 -@@ -4294,11 +4300,11 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
2468 - if (!tx_ring)
2469 - prb_shutdown_retire_blk_timer(po, rb_queue);
2470 - }
2471 -- release_sock(sk);
2472 -
2473 - if (pg_vec)
2474 - free_pg_vec(pg_vec, order, req->tp_block_nr);
2475 - out:
2476 -+ release_sock(sk);
2477 - return err;
2478 - }
2479 -
2480 -diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
2481 -index b54d56d..cf9b2fe 100644
2482 ---- a/net/sched/act_pedit.c
2483 -+++ b/net/sched/act_pedit.c
2484 -@@ -108,6 +108,17 @@ static void tcf_pedit_cleanup(struct tc_action *a, int bind)
2485 - kfree(keys);
2486 - }
2487 -
2488 -+static bool offset_valid(struct sk_buff *skb, int offset)
2489 -+{
2490 -+ if (offset > 0 && offset > skb->len)
2491 -+ return false;
2492 -+
2493 -+ if (offset < 0 && -offset > skb_headroom(skb))
2494 -+ return false;
2495 -+
2496 -+ return true;
2497 -+}
2498 -+
2499 - static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
2500 - struct tcf_result *res)
2501 - {
2502 -@@ -134,6 +145,11 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
2503 - if (tkey->offmask) {
2504 - char *d, _d;
2505 -
2506 -+ if (!offset_valid(skb, off + tkey->at)) {
2507 -+ pr_info("tc filter pedit 'at' offset %d out of bounds\n",
2508 -+ off + tkey->at);
2509 -+ goto bad;
2510 -+ }
2511 - d = skb_header_pointer(skb, off + tkey->at, 1,
2512 - &_d);
2513 - if (!d)
2514 -@@ -146,10 +162,10 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
2515 - " offset must be on 32 bit boundaries\n");
2516 - goto bad;
2517 - }
2518 -- if (offset > 0 && offset > skb->len) {
2519 -- pr_info("tc filter pedit"
2520 -- " offset %d can't exceed pkt length %d\n",
2521 -- offset, skb->len);
2522 -+
2523 -+ if (!offset_valid(skb, off + offset)) {
2524 -+ pr_info("tc filter pedit offset %d out of bounds\n",
2525 -+ offset);
2526 - goto bad;
2527 - }
2528 -
2529 -diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
2530 -index 0b8c3ac..1bf1f451 100644
2531 ---- a/net/sched/cls_basic.c
2532 -+++ b/net/sched/cls_basic.c
2533 -@@ -62,9 +62,6 @@ static unsigned long basic_get(struct tcf_proto *tp, u32 handle)
2534 - struct basic_head *head = rtnl_dereference(tp->root);
2535 - struct basic_filter *f;
2536 -
2537 -- if (head == NULL)
2538 -- return 0UL;
2539 --
2540 - list_for_each_entry(f, &head->flist, link) {
2541 - if (f->handle == handle) {
2542 - l = (unsigned long) f;
2543 -@@ -109,7 +106,6 @@ static bool basic_destroy(struct tcf_proto *tp, bool force)
2544 - tcf_unbind_filter(tp, &f->res);
2545 - call_rcu(&f->rcu, basic_delete_filter);
2546 - }
2547 -- RCU_INIT_POINTER(tp->root, NULL);
2548 - kfree_rcu(head, rcu);
2549 - return true;
2550 - }
2551 -diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
2552 -index c3002c2..dbec458 100644
2553 ---- a/net/sched/cls_bpf.c
2554 -+++ b/net/sched/cls_bpf.c
2555 -@@ -200,7 +200,6 @@ static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
2556 - call_rcu(&prog->rcu, __cls_bpf_delete_prog);
2557 - }
2558 -
2559 -- RCU_INIT_POINTER(tp->root, NULL);
2560 - kfree_rcu(head, rcu);
2561 - return true;
2562 - }
2563 -@@ -211,9 +210,6 @@ static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
2564 - struct cls_bpf_prog *prog;
2565 - unsigned long ret = 0UL;
2566 -
2567 -- if (head == NULL)
2568 -- return 0UL;
2569 --
2570 - list_for_each_entry(prog, &head->plist, link) {
2571 - if (prog->handle == handle) {
2572 - ret = (unsigned long) prog;
2573 -diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
2574 -index 4c85bd3..c104c20 100644
2575 ---- a/net/sched/cls_cgroup.c
2576 -+++ b/net/sched/cls_cgroup.c
2577 -@@ -130,11 +130,10 @@ static bool cls_cgroup_destroy(struct tcf_proto *tp, bool force)
2578 -
2579 - if (!force)
2580 - return false;
2581 --
2582 -- if (head) {
2583 -- RCU_INIT_POINTER(tp->root, NULL);
2584 -+ /* Head can still be NULL due to cls_cgroup_init(). */
2585 -+ if (head)
2586 - call_rcu(&head->rcu, cls_cgroup_destroy_rcu);
2587 -- }
2588 -+
2589 - return true;
2590 - }
2591 -
2592 -diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
2593 -index fbfec6a..d7ba2b4 100644
2594 ---- a/net/sched/cls_flow.c
2595 -+++ b/net/sched/cls_flow.c
2596 -@@ -583,7 +583,6 @@ static bool flow_destroy(struct tcf_proto *tp, bool force)
2597 - list_del_rcu(&f->list);
2598 - call_rcu(&f->rcu, flow_destroy_filter);
2599 - }
2600 -- RCU_INIT_POINTER(tp->root, NULL);
2601 - kfree_rcu(head, rcu);
2602 - return true;
2603 - }
2604 -diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
2605 -index 5060801..a411571 100644
2606 ---- a/net/sched/cls_flower.c
2607 -+++ b/net/sched/cls_flower.c
2608 -@@ -13,6 +13,7 @@
2609 - #include <linux/init.h>
2610 - #include <linux/module.h>
2611 - #include <linux/rhashtable.h>
2612 -+#include <linux/workqueue.h>
2613 -
2614 - #include <linux/if_ether.h>
2615 - #include <linux/in6.h>
2616 -@@ -55,7 +56,10 @@ struct cls_fl_head {
2617 - bool mask_assigned;
2618 - struct list_head filters;
2619 - struct rhashtable_params ht_params;
2620 -- struct rcu_head rcu;
2621 -+ union {
2622 -+ struct work_struct work;
2623 -+ struct rcu_head rcu;
2624 -+ };
2625 - };
2626 -
2627 - struct cls_fl_filter {
2628 -@@ -239,6 +243,24 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
2629 - dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
2630 - }
2631 -
2632 -+static void fl_destroy_sleepable(struct work_struct *work)
2633 -+{
2634 -+ struct cls_fl_head *head = container_of(work, struct cls_fl_head,
2635 -+ work);
2636 -+ if (head->mask_assigned)
2637 -+ rhashtable_destroy(&head->ht);
2638 -+ kfree(head);
2639 -+ module_put(THIS_MODULE);
2640 -+}
2641 -+
2642 -+static void fl_destroy_rcu(struct rcu_head *rcu)
2643 -+{
2644 -+ struct cls_fl_head *head = container_of(rcu, struct cls_fl_head, rcu);
2645 -+
2646 -+ INIT_WORK(&head->work, fl_destroy_sleepable);
2647 -+ schedule_work(&head->work);
2648 -+}
2649 -+
2650 - static bool fl_destroy(struct tcf_proto *tp, bool force)
2651 - {
2652 - struct cls_fl_head *head = rtnl_dereference(tp->root);
2653 -@@ -252,10 +274,9 @@ static bool fl_destroy(struct tcf_proto *tp, bool force)
2654 - list_del_rcu(&f->list);
2655 - call_rcu(&f->rcu, fl_destroy_filter);
2656 - }
2657 -- RCU_INIT_POINTER(tp->root, NULL);
2658 -- if (head->mask_assigned)
2659 -- rhashtable_destroy(&head->ht);
2660 -- kfree_rcu(head, rcu);
2661 -+
2662 -+ __module_get(THIS_MODULE);
2663 -+ call_rcu(&head->rcu, fl_destroy_rcu);
2664 - return true;
2665 - }
2666 -
2667 -diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
2668 -index 25927b6..f935429 100644
2669 ---- a/net/sched/cls_matchall.c
2670 -+++ b/net/sched/cls_matchall.c
2671 -@@ -114,7 +114,6 @@ static bool mall_destroy(struct tcf_proto *tp, bool force)
2672 -
2673 - call_rcu(&f->rcu, mall_destroy_filter);
2674 - }
2675 -- RCU_INIT_POINTER(tp->root, NULL);
2676 - kfree_rcu(head, rcu);
2677 - return true;
2678 - }
2679 -diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
2680 -index f9c9fc0..9992dfa 100644
2681 ---- a/net/sched/cls_rsvp.h
2682 -+++ b/net/sched/cls_rsvp.h
2683 -@@ -152,7 +152,8 @@ static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp,
2684 - return -1;
2685 - nhptr = ip_hdr(skb);
2686 - #endif
2687 --
2688 -+ if (unlikely(!head))
2689 -+ return -1;
2690 - restart:
2691 -
2692 - #if RSVP_DST_LEN == 4
2693 -diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
2694 -index 944c8ff..403746b 100644
2695 ---- a/net/sched/cls_tcindex.c
2696 -+++ b/net/sched/cls_tcindex.c
2697 -@@ -503,7 +503,6 @@ static bool tcindex_destroy(struct tcf_proto *tp, bool force)
2698 - walker.fn = tcindex_destroy_element;
2699 - tcindex_walk(tp, &walker);
2700 -
2701 -- RCU_INIT_POINTER(tp->root, NULL);
2702 - call_rcu(&p->rcu, __tcindex_destroy);
2703 - return true;
2704 - }
2705 -diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
2706 -index 65b1bbf..6167699 100644
2707 ---- a/net/tipc/bearer.c
2708 -+++ b/net/tipc/bearer.c
2709 -@@ -402,6 +402,10 @@ int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
2710 - dev = dev_get_by_name(net, driver_name);
2711 - if (!dev)
2712 - return -ENODEV;
2713 -+ if (tipc_mtu_bad(dev, 0)) {
2714 -+ dev_put(dev);
2715 -+ return -EINVAL;
2716 -+ }
2717 -
2718 - /* Associate TIPC bearer with L2 bearer */
2719 - rcu_assign_pointer(b->media_ptr, dev);
2720 -@@ -606,8 +610,6 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
2721 - if (!b)
2722 - return NOTIFY_DONE;
2723 -
2724 -- b->mtu = dev->mtu;
2725 --
2726 - switch (evt) {
2727 - case NETDEV_CHANGE:
2728 - if (netif_carrier_ok(dev))
2729 -@@ -621,6 +623,11 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
2730 - tipc_reset_bearer(net, b);
2731 - break;
2732 - case NETDEV_CHANGEMTU:
2733 -+ if (tipc_mtu_bad(dev, 0)) {
2734 -+ bearer_disable(net, b);
2735 -+ break;
2736 -+ }
2737 -+ b->mtu = dev->mtu;
2738 - tipc_reset_bearer(net, b);
2739 - break;
2740 - case NETDEV_CHANGEADDR:
2741 -diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
2742 -index 43757f1..d93f1f1 100644
2743 ---- a/net/tipc/bearer.h
2744 -+++ b/net/tipc/bearer.h
2745 -@@ -39,6 +39,7 @@
2746 -
2747 - #include "netlink.h"
2748 - #include "core.h"
2749 -+#include "msg.h"
2750 - #include <net/genetlink.h>
2751 -
2752 - #define MAX_MEDIA 3
2753 -@@ -59,6 +60,9 @@
2754 - #define TIPC_MEDIA_TYPE_IB 2
2755 - #define TIPC_MEDIA_TYPE_UDP 3
2756 -
2757 -+/* minimum bearer MTU */
2758 -+#define TIPC_MIN_BEARER_MTU (MAX_H_SIZE + INT_H_SIZE)
2759 -+
2760 - /**
2761 - * struct tipc_media_addr - destination address used by TIPC bearers
2762 - * @value: address info (format defined by media)
2763 -@@ -213,4 +217,13 @@ void tipc_bearer_xmit(struct net *net, u32 bearer_id,
2764 - void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
2765 - struct sk_buff_head *xmitq);
2766 -
2767 -+/* check if device MTU is too low for tipc headers */
2768 -+static inline bool tipc_mtu_bad(struct net_device *dev, unsigned int reserve)
2769 -+{
2770 -+ if (dev->mtu >= TIPC_MIN_BEARER_MTU + reserve)
2771 -+ return false;
2772 -+ netdev_warn(dev, "MTU too low for tipc bearer\n");
2773 -+ return true;
2774 -+}
2775 -+
2776 - #endif /* _TIPC_BEARER_H */
2777 -diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
2778 -index ae7e14c..f60f346 100644
2779 ---- a/net/tipc/udp_media.c
2780 -+++ b/net/tipc/udp_media.c
2781 -@@ -372,6 +372,11 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
2782 - udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
2783 - udp_conf.use_udp_checksums = false;
2784 - ub->ifindex = dev->ifindex;
2785 -+ if (tipc_mtu_bad(dev, sizeof(struct iphdr) +
2786 -+ sizeof(struct udphdr))) {
2787 -+ err = -EINVAL;
2788 -+ goto err;
2789 -+ }
2790 - b->mtu = dev->mtu - sizeof(struct iphdr)
2791 - - sizeof(struct udphdr);
2792 - #if IS_ENABLED(CONFIG_IPV6)
2793 -diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
2794 -index 8309687..568f307 100644
2795 ---- a/net/unix/af_unix.c
2796 -+++ b/net/unix/af_unix.c
2797 -@@ -2199,7 +2199,8 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
2798 - * Sleep until more data has arrived. But check for races..
2799 - */
2800 - static long unix_stream_data_wait(struct sock *sk, long timeo,
2801 -- struct sk_buff *last, unsigned int last_len)
2802 -+ struct sk_buff *last, unsigned int last_len,
2803 -+ bool freezable)
2804 - {
2805 - struct sk_buff *tail;
2806 - DEFINE_WAIT(wait);
2807 -@@ -2220,7 +2221,10 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
2808 -
2809 - sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2810 - unix_state_unlock(sk);
2811 -- timeo = freezable_schedule_timeout(timeo);
2812 -+ if (freezable)
2813 -+ timeo = freezable_schedule_timeout(timeo);
2814 -+ else
2815 -+ timeo = schedule_timeout(timeo);
2816 - unix_state_lock(sk);
2817 -
2818 - if (sock_flag(sk, SOCK_DEAD))
2819 -@@ -2250,7 +2254,8 @@ struct unix_stream_read_state {
2820 - unsigned int splice_flags;
2821 - };
2822 -
2823 --static int unix_stream_read_generic(struct unix_stream_read_state *state)
2824 -+static int unix_stream_read_generic(struct unix_stream_read_state *state,
2825 -+ bool freezable)
2826 - {
2827 - struct scm_cookie scm;
2828 - struct socket *sock = state->socket;
2829 -@@ -2330,7 +2335,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
2830 - mutex_unlock(&u->iolock);
2831 -
2832 - timeo = unix_stream_data_wait(sk, timeo, last,
2833 -- last_len);
2834 -+ last_len, freezable);
2835 -
2836 - if (signal_pending(current)) {
2837 - err = sock_intr_errno(timeo);
2838 -@@ -2472,7 +2477,7 @@ static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2839 - .flags = flags
2840 - };
2841 -
2842 -- return unix_stream_read_generic(&state);
2843 -+ return unix_stream_read_generic(&state, true);
2844 - }
2845 -
2846 - static ssize_t skb_unix_socket_splice(struct sock *sk,
2847 -@@ -2518,7 +2523,7 @@ static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos,
2848 - flags & SPLICE_F_NONBLOCK)
2849 - state.flags = MSG_DONTWAIT;
2850 -
2851 -- return unix_stream_read_generic(&state);
2852 -+ return unix_stream_read_generic(&state, false);
2853 - }
2854 -
2855 - static int unix_shutdown(struct socket *sock, int mode)
2856
2857 diff --git a/4.8.15/1014_linux-4.8.15.patch b/4.8.15/1014_linux-4.8.15.patch
2858 deleted file mode 100644
2859 index 9b7b2f4..0000000
2860 --- a/4.8.15/1014_linux-4.8.15.patch
2861 +++ /dev/null
2862 @@ -1,1042 +0,0 @@
2863 -diff --git a/Makefile b/Makefile
2864 -index 6a74924..c7f0e79 100644
2865 ---- a/Makefile
2866 -+++ b/Makefile
2867 -@@ -1,6 +1,6 @@
2868 - VERSION = 4
2869 - PATCHLEVEL = 8
2870 --SUBLEVEL = 14
2871 -+SUBLEVEL = 15
2872 - EXTRAVERSION =
2873 - NAME = Psychotic Stoned Sheep
2874 -
2875 -diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
2876 -index 1e90bdb..fb307de 100644
2877 ---- a/arch/arm/boot/dts/imx7s.dtsi
2878 -+++ b/arch/arm/boot/dts/imx7s.dtsi
2879 -@@ -640,9 +640,8 @@
2880 - reg = <0x30730000 0x10000>;
2881 - interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
2882 - clocks = <&clks IMX7D_LCDIF_PIXEL_ROOT_CLK>,
2883 -- <&clks IMX7D_CLK_DUMMY>,
2884 -- <&clks IMX7D_CLK_DUMMY>;
2885 -- clock-names = "pix", "axi", "disp_axi";
2886 -+ <&clks IMX7D_LCDIF_PIXEL_ROOT_CLK>;
2887 -+ clock-names = "pix", "axi";
2888 - status = "disabled";
2889 - };
2890 - };
2891 -diff --git a/arch/arm/boot/dts/orion5x-linkstation-lsgl.dts b/arch/arm/boot/dts/orion5x-linkstation-lsgl.dts
2892 -index 1cf644b..51dc734 100644
2893 ---- a/arch/arm/boot/dts/orion5x-linkstation-lsgl.dts
2894 -+++ b/arch/arm/boot/dts/orion5x-linkstation-lsgl.dts
2895 -@@ -82,6 +82,10 @@
2896 - gpios = <&gpio0 9 GPIO_ACTIVE_HIGH>;
2897 - };
2898 -
2899 -+&sata {
2900 -+ nr-ports = <2>;
2901 -+};
2902 -+
2903 - &ehci1 {
2904 - status = "okay";
2905 - };
2906 -diff --git a/arch/m68k/include/asm/delay.h b/arch/m68k/include/asm/delay.h
2907 -index d28fa8f..c598d84 100644
2908 ---- a/arch/m68k/include/asm/delay.h
2909 -+++ b/arch/m68k/include/asm/delay.h
2910 -@@ -114,6 +114,6 @@ static inline void __udelay(unsigned long usecs)
2911 - */
2912 - #define HZSCALE (268435456 / (1000000 / HZ))
2913 -
2914 --#define ndelay(n) __delay(DIV_ROUND_UP((n) * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6), 1000));
2915 -+#define ndelay(n) __delay(DIV_ROUND_UP((n) * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6), 1000))
2916 -
2917 - #endif /* defined(_M68K_DELAY_H) */
2918 -diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
2919 -index c2c43f7..3a4ed9f 100644
2920 ---- a/arch/parisc/include/asm/pgtable.h
2921 -+++ b/arch/parisc/include/asm/pgtable.h
2922 -@@ -65,9 +65,9 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
2923 - unsigned long flags; \
2924 - spin_lock_irqsave(&pa_tlb_lock, flags); \
2925 - old_pte = *ptep; \
2926 -- set_pte(ptep, pteval); \
2927 - if (pte_inserted(old_pte)) \
2928 - purge_tlb_entries(mm, addr); \
2929 -+ set_pte(ptep, pteval); \
2930 - spin_unlock_irqrestore(&pa_tlb_lock, flags); \
2931 - } while (0)
2932 -
2933 -@@ -478,8 +478,8 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
2934 - spin_unlock_irqrestore(&pa_tlb_lock, flags);
2935 - return 0;
2936 - }
2937 -- set_pte(ptep, pte_mkold(pte));
2938 - purge_tlb_entries(vma->vm_mm, addr);
2939 -+ set_pte(ptep, pte_mkold(pte));
2940 - spin_unlock_irqrestore(&pa_tlb_lock, flags);
2941 - return 1;
2942 - }
2943 -@@ -492,9 +492,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
2944 -
2945 - spin_lock_irqsave(&pa_tlb_lock, flags);
2946 - old_pte = *ptep;
2947 -- set_pte(ptep, __pte(0));
2948 - if (pte_inserted(old_pte))
2949 - purge_tlb_entries(mm, addr);
2950 -+ set_pte(ptep, __pte(0));
2951 - spin_unlock_irqrestore(&pa_tlb_lock, flags);
2952 -
2953 - return old_pte;
2954 -@@ -504,8 +504,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
2955 - {
2956 - unsigned long flags;
2957 - spin_lock_irqsave(&pa_tlb_lock, flags);
2958 -- set_pte(ptep, pte_wrprotect(*ptep));
2959 - purge_tlb_entries(mm, addr);
2960 -+ set_pte(ptep, pte_wrprotect(*ptep));
2961 - spin_unlock_irqrestore(&pa_tlb_lock, flags);
2962 - }
2963 -
2964 -diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
2965 -index c2259d4..bbb314eb 100644
2966 ---- a/arch/parisc/kernel/cache.c
2967 -+++ b/arch/parisc/kernel/cache.c
2968 -@@ -393,6 +393,15 @@ void __init parisc_setup_cache_timing(void)
2969 -
2970 - /* calculate TLB flush threshold */
2971 -
2972 -+ /* On SMP machines, skip the TLB measure of kernel text which
2973 -+ * has been mapped as huge pages. */
2974 -+ if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
2975 -+ threshold = max(cache_info.it_size, cache_info.dt_size);
2976 -+ threshold *= PAGE_SIZE;
2977 -+ threshold /= num_online_cpus();
2978 -+ goto set_tlb_threshold;
2979 -+ }
2980 -+
2981 - alltime = mfctl(16);
2982 - flush_tlb_all();
2983 - alltime = mfctl(16) - alltime;
2984 -@@ -411,6 +420,8 @@ void __init parisc_setup_cache_timing(void)
2985 - alltime, size, rangetime);
2986 -
2987 - threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime);
2988 -+
2989 -+set_tlb_threshold:
2990 - if (threshold)
2991 - parisc_tlb_flush_threshold = threshold;
2992 - printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
2993 -diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
2994 -index 6755219..a4761b7 100644
2995 ---- a/arch/parisc/kernel/pacache.S
2996 -+++ b/arch/parisc/kernel/pacache.S
2997 -@@ -886,19 +886,10 @@ ENTRY(flush_dcache_page_asm)
2998 - fdc,m r31(%r28)
2999 - fdc,m r31(%r28)
3000 - fdc,m r31(%r28)
3001 -- cmpb,COND(<<) %r28, %r25,1b
3002 -+ cmpb,COND(<<) %r28, %r25,1b
3003 - fdc,m r31(%r28)
3004 -
3005 - sync
3006 --
3007 --#ifdef CONFIG_PA20
3008 -- pdtlb,l %r0(%r25)
3009 --#else
3010 -- tlb_lock %r20,%r21,%r22
3011 -- pdtlb %r0(%r25)
3012 -- tlb_unlock %r20,%r21,%r22
3013 --#endif
3014 --
3015 - bv %r0(%r2)
3016 - nop
3017 - .exit
3018 -@@ -973,17 +964,6 @@ ENTRY(flush_icache_page_asm)
3019 - fic,m %r31(%sr4,%r28)
3020 -
3021 - sync
3022 --
3023 --#ifdef CONFIG_PA20
3024 -- pdtlb,l %r0(%r28)
3025 -- pitlb,l %r0(%sr4,%r25)
3026 --#else
3027 -- tlb_lock %r20,%r21,%r22
3028 -- pdtlb %r0(%r28)
3029 -- pitlb %r0(%sr4,%r25)
3030 -- tlb_unlock %r20,%r21,%r22
3031 --#endif
3032 --
3033 - bv %r0(%r2)
3034 - nop
3035 - .exit
3036 -diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
3037 -index 1a2a6e8..1894beb 100644
3038 ---- a/arch/powerpc/boot/Makefile
3039 -+++ b/arch/powerpc/boot/Makefile
3040 -@@ -78,7 +78,8 @@ src-wlib-y := string.S crt0.S crtsavres.S stdio.c main.c \
3041 - ns16550.c serial.c simple_alloc.c div64.S util.S \
3042 - gunzip_util.c elf_util.c $(zlib) devtree.c stdlib.c \
3043 - oflib.c ofconsole.c cuboot.c mpsc.c cpm-serial.c \
3044 -- uartlite.c mpc52xx-psc.c opal.c opal-calls.S
3045 -+ uartlite.c mpc52xx-psc.c opal.c
3046 -+src-wlib-$(CONFIG_PPC64_BOOT_WRAPPER) += opal-calls.S
3047 - src-wlib-$(CONFIG_40x) += 4xx.c planetcore.c
3048 - src-wlib-$(CONFIG_44x) += 4xx.c ebony.c bamboo.c
3049 - src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c fsl-soc.c
3050 -diff --git a/arch/powerpc/boot/opal.c b/arch/powerpc/boot/opal.c
3051 -index d7b4fd4..0272570 100644
3052 ---- a/arch/powerpc/boot/opal.c
3053 -+++ b/arch/powerpc/boot/opal.c
3054 -@@ -13,7 +13,7 @@
3055 - #include <libfdt.h>
3056 - #include "../include/asm/opal-api.h"
3057 -
3058 --#ifdef __powerpc64__
3059 -+#ifdef CONFIG_PPC64_BOOT_WRAPPER
3060 -
3061 - /* Global OPAL struct used by opal-call.S */
3062 - struct opal {
3063 -diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
3064 -index 29aa8d1..248f28b 100644
3065 ---- a/arch/powerpc/kernel/eeh_driver.c
3066 -+++ b/arch/powerpc/kernel/eeh_driver.c
3067 -@@ -671,8 +671,10 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
3068 -
3069 - /* Clear frozen state */
3070 - rc = eeh_clear_pe_frozen_state(pe, false);
3071 -- if (rc)
3072 -+ if (rc) {
3073 -+ pci_unlock_rescan_remove();
3074 - return rc;
3075 -+ }
3076 -
3077 - /* Give the system 5 seconds to finish running the user-space
3078 - * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
3079 -diff --git a/arch/powerpc/mm/hash64_4k.c b/arch/powerpc/mm/hash64_4k.c
3080 -index 42c702b..6fa450c 100644
3081 ---- a/arch/powerpc/mm/hash64_4k.c
3082 -+++ b/arch/powerpc/mm/hash64_4k.c
3083 -@@ -55,7 +55,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
3084 - */
3085 - rflags = htab_convert_pte_flags(new_pte);
3086 -
3087 -- if (!cpu_has_feature(CPU_FTR_NOEXECUTE) &&
3088 -+ if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
3089 - !cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
3090 - rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
3091 -
3092 -diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c
3093 -index 3bbbea0..1a68cb1 100644
3094 ---- a/arch/powerpc/mm/hash64_64k.c
3095 -+++ b/arch/powerpc/mm/hash64_64k.c
3096 -@@ -87,7 +87,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
3097 - subpg_pte = new_pte & ~subpg_prot;
3098 - rflags = htab_convert_pte_flags(subpg_pte);
3099 -
3100 -- if (!cpu_has_feature(CPU_FTR_NOEXECUTE) &&
3101 -+ if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
3102 - !cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
3103 -
3104 - /*
3105 -@@ -258,7 +258,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
3106 -
3107 - rflags = htab_convert_pte_flags(new_pte);
3108 -
3109 -- if (!cpu_has_feature(CPU_FTR_NOEXECUTE) &&
3110 -+ if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
3111 - !cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
3112 - rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
3113 -
3114 -diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
3115 -index a4e070a..8c925ec 100644
3116 ---- a/arch/x86/events/core.c
3117 -+++ b/arch/x86/events/core.c
3118 -@@ -68,7 +68,7 @@ u64 x86_perf_event_update(struct perf_event *event)
3119 - int shift = 64 - x86_pmu.cntval_bits;
3120 - u64 prev_raw_count, new_raw_count;
3121 - int idx = hwc->idx;
3122 -- s64 delta;
3123 -+ u64 delta;
3124 -
3125 - if (idx == INTEL_PMC_IDX_FIXED_BTS)
3126 - return 0;
3127 -diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
3128 -index 4c9a79b..3ef34c6 100644
3129 ---- a/arch/x86/events/intel/core.c
3130 -+++ b/arch/x86/events/intel/core.c
3131 -@@ -4024,7 +4024,7 @@ __init int intel_pmu_init(void)
3132 -
3133 - /* Support full width counters using alternative MSR range */
3134 - if (x86_pmu.intel_cap.full_width_write) {
3135 -- x86_pmu.max_period = x86_pmu.cntval_mask;
3136 -+ x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
3137 - x86_pmu.perfctr = MSR_IA32_PMC0;
3138 - pr_cont("full-width counters, ");
3139 - }
3140 -diff --git a/crypto/Makefile b/crypto/Makefile
3141 -index 99cc64ac..bd6a029 100644
3142 ---- a/crypto/Makefile
3143 -+++ b/crypto/Makefile
3144 -@@ -40,6 +40,7 @@ obj-$(CONFIG_CRYPTO_ECDH) += ecdh_generic.o
3145 -
3146 - $(obj)/rsapubkey-asn1.o: $(obj)/rsapubkey-asn1.c $(obj)/rsapubkey-asn1.h
3147 - $(obj)/rsaprivkey-asn1.o: $(obj)/rsaprivkey-asn1.c $(obj)/rsaprivkey-asn1.h
3148 -+$(obj)/rsa_helper.o: $(obj)/rsapubkey-asn1.h $(obj)/rsaprivkey-asn1.h
3149 - clean-files += rsapubkey-asn1.c rsapubkey-asn1.h
3150 - clean-files += rsaprivkey-asn1.c rsaprivkey-asn1.h
3151 -
3152 -diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
3153 -index 86fb59b..c6e9920 100644
3154 ---- a/crypto/mcryptd.c
3155 -+++ b/crypto/mcryptd.c
3156 -@@ -254,18 +254,22 @@ static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
3157 - goto out;
3158 - }
3159 -
3160 --static inline void mcryptd_check_internal(struct rtattr **tb, u32 *type,
3161 -+static inline bool mcryptd_check_internal(struct rtattr **tb, u32 *type,
3162 - u32 *mask)
3163 - {
3164 - struct crypto_attr_type *algt;
3165 -
3166 - algt = crypto_get_attr_type(tb);
3167 - if (IS_ERR(algt))
3168 -- return;
3169 -- if ((algt->type & CRYPTO_ALG_INTERNAL))
3170 -- *type |= CRYPTO_ALG_INTERNAL;
3171 -- if ((algt->mask & CRYPTO_ALG_INTERNAL))
3172 -- *mask |= CRYPTO_ALG_INTERNAL;
3173 -+ return false;
3174 -+
3175 -+ *type |= algt->type & CRYPTO_ALG_INTERNAL;
3176 -+ *mask |= algt->mask & CRYPTO_ALG_INTERNAL;
3177 -+
3178 -+ if (*type & *mask & CRYPTO_ALG_INTERNAL)
3179 -+ return true;
3180 -+ else
3181 -+ return false;
3182 - }
3183 -
3184 - static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
3185 -@@ -492,7 +496,8 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
3186 - u32 mask = 0;
3187 - int err;
3188 -
3189 -- mcryptd_check_internal(tb, &type, &mask);
3190 -+ if (!mcryptd_check_internal(tb, &type, &mask))
3191 -+ return -EINVAL;
3192 -
3193 - halg = ahash_attr_alg(tb[1], type, mask);
3194 - if (IS_ERR(halg))
3195 -diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
3196 -index 2accf78..93e0d83 100644
3197 ---- a/drivers/acpi/nfit/core.c
3198 -+++ b/drivers/acpi/nfit/core.c
3199 -@@ -94,7 +94,7 @@ static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
3200 - return to_acpi_device(acpi_desc->dev);
3201 - }
3202 -
3203 --static int xlat_status(void *buf, unsigned int cmd, u32 status)
3204 -+static int xlat_bus_status(void *buf, unsigned int cmd, u32 status)
3205 - {
3206 - struct nd_cmd_clear_error *clear_err;
3207 - struct nd_cmd_ars_status *ars_status;
3208 -@@ -113,7 +113,7 @@ static int xlat_status(void *buf, unsigned int cmd, u32 status)
3209 - flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
3210 - if ((status >> 16 & flags) == 0)
3211 - return -ENOTTY;
3212 -- break;
3213 -+ return 0;
3214 - case ND_CMD_ARS_START:
3215 - /* ARS is in progress */
3216 - if ((status & 0xffff) == NFIT_ARS_START_BUSY)
3217 -@@ -122,7 +122,7 @@ static int xlat_status(void *buf, unsigned int cmd, u32 status)
3218 - /* Command failed */
3219 - if (status & 0xffff)
3220 - return -EIO;
3221 -- break;
3222 -+ return 0;
3223 - case ND_CMD_ARS_STATUS:
3224 - ars_status = buf;
3225 - /* Command failed */
3226 -@@ -146,7 +146,8 @@ static int xlat_status(void *buf, unsigned int cmd, u32 status)
3227 - * then just continue with the returned results.
3228 - */
3229 - if (status == NFIT_ARS_STATUS_INTR) {
3230 -- if (ars_status->flags & NFIT_ARS_F_OVERFLOW)
3231 -+ if (ars_status->out_length >= 40 && (ars_status->flags
3232 -+ & NFIT_ARS_F_OVERFLOW))
3233 - return -ENOSPC;
3234 - return 0;
3235 - }
3236 -@@ -154,7 +155,7 @@ static int xlat_status(void *buf, unsigned int cmd, u32 status)
3237 - /* Unknown status */
3238 - if (status >> 16)
3239 - return -EIO;
3240 -- break;
3241 -+ return 0;
3242 - case ND_CMD_CLEAR_ERROR:
3243 - clear_err = buf;
3244 - if (status & 0xffff)
3245 -@@ -163,7 +164,7 @@ static int xlat_status(void *buf, unsigned int cmd, u32 status)
3246 - return -EIO;
3247 - if (clear_err->length > clear_err->cleared)
3248 - return clear_err->cleared;
3249 -- break;
3250 -+ return 0;
3251 - default:
3252 - break;
3253 - }
3254 -@@ -174,6 +175,16 @@ static int xlat_status(void *buf, unsigned int cmd, u32 status)
3255 - return 0;
3256 - }
3257 -
3258 -+static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
3259 -+ u32 status)
3260 -+{
3261 -+ if (!nvdimm)
3262 -+ return xlat_bus_status(buf, cmd, status);
3263 -+ if (status)
3264 -+ return -EIO;
3265 -+ return 0;
3266 -+}
3267 -+
3268 - static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
3269 - struct nvdimm *nvdimm, unsigned int cmd, void *buf,
3270 - unsigned int buf_len, int *cmd_rc)
3271 -@@ -298,7 +309,8 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
3272 -
3273 - for (i = 0, offset = 0; i < desc->out_num; i++) {
3274 - u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
3275 -- (u32 *) out_obj->buffer.pointer);
3276 -+ (u32 *) out_obj->buffer.pointer,
3277 -+ out_obj->buffer.length - offset);
3278 -
3279 - if (offset + out_size > out_obj->buffer.length) {
3280 - dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n",
3281 -@@ -333,7 +345,8 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
3282 - */
3283 - rc = buf_len - offset - in_buf.buffer.length;
3284 - if (cmd_rc)
3285 -- *cmd_rc = xlat_status(buf, cmd, fw_status);
3286 -+ *cmd_rc = xlat_status(nvdimm, buf, cmd,
3287 -+ fw_status);
3288 - } else {
3289 - dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
3290 - __func__, dimm_name, cmd_name, buf_len,
3291 -@@ -343,7 +356,7 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
3292 - } else {
3293 - rc = 0;
3294 - if (cmd_rc)
3295 -- *cmd_rc = xlat_status(buf, cmd, fw_status);
3296 -+ *cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status);
3297 - }
3298 -
3299 - out:
3300 -@@ -1857,19 +1870,32 @@ static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
3301 - return cmd_rc;
3302 - }
3303 -
3304 --static int ars_status_process_records(struct nvdimm_bus *nvdimm_bus,
3305 -+static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc,
3306 - struct nd_cmd_ars_status *ars_status)
3307 - {
3308 -+ struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus;
3309 - int rc;
3310 - u32 i;
3311 -
3312 -+ /*
3313 -+ * First record starts at 44 byte offset from the start of the
3314 -+ * payload.
3315 -+ */
3316 -+ if (ars_status->out_length < 44)
3317 -+ return 0;
3318 - for (i = 0; i < ars_status->num_records; i++) {
3319 -+ /* only process full records */
3320 -+ if (ars_status->out_length
3321 -+ < 44 + sizeof(struct nd_ars_record) * (i + 1))
3322 -+ break;
3323 - rc = nvdimm_bus_add_poison(nvdimm_bus,
3324 - ars_status->records[i].err_address,
3325 - ars_status->records[i].length);
3326 - if (rc)
3327 - return rc;
3328 - }
3329 -+ if (i < ars_status->num_records)
3330 -+ dev_warn(acpi_desc->dev, "detected truncated ars results\n");
3331 -
3332 - return 0;
3333 - }
3334 -@@ -2122,8 +2148,7 @@ static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc,
3335 - if (rc < 0 && rc != -ENOSPC)
3336 - return rc;
3337 -
3338 -- if (ars_status_process_records(acpi_desc->nvdimm_bus,
3339 -- acpi_desc->ars_status))
3340 -+ if (ars_status_process_records(acpi_desc, acpi_desc->ars_status))
3341 - return -ENOMEM;
3342 -
3343 - return 0;
3344 -diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
3345 -index 2b38c1b..7a2e4d4 100644
3346 ---- a/drivers/acpi/sleep.c
3347 -+++ b/drivers/acpi/sleep.c
3348 -@@ -47,32 +47,15 @@ static void acpi_sleep_tts_switch(u32 acpi_state)
3349 - }
3350 - }
3351 -
3352 --static void acpi_sleep_pts_switch(u32 acpi_state)
3353 --{
3354 -- acpi_status status;
3355 --
3356 -- status = acpi_execute_simple_method(NULL, "\\_PTS", acpi_state);
3357 -- if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
3358 -- /*
3359 -- * OS can't evaluate the _PTS object correctly. Some warning
3360 -- * message will be printed. But it won't break anything.
3361 -- */
3362 -- printk(KERN_NOTICE "Failure in evaluating _PTS object\n");
3363 -- }
3364 --}
3365 --
3366 --static int sleep_notify_reboot(struct notifier_block *this,
3367 -+static int tts_notify_reboot(struct notifier_block *this,
3368 - unsigned long code, void *x)
3369 - {
3370 - acpi_sleep_tts_switch(ACPI_STATE_S5);
3371 --
3372 -- acpi_sleep_pts_switch(ACPI_STATE_S5);
3373 --
3374 - return NOTIFY_DONE;
3375 - }
3376 -
3377 --static struct notifier_block sleep_notifier = {
3378 -- .notifier_call = sleep_notify_reboot,
3379 -+static struct notifier_block tts_notifier = {
3380 -+ .notifier_call = tts_notify_reboot,
3381 - .next = NULL,
3382 - .priority = 0,
3383 - };
3384 -@@ -916,9 +899,9 @@ int __init acpi_sleep_init(void)
3385 - pr_info(PREFIX "(supports%s)\n", supported);
3386 -
3387 - /*
3388 -- * Register the sleep_notifier to reboot notifier list so that the _TTS
3389 -- * and _PTS object can also be evaluated when the system enters S5.
3390 -+ * Register the tts_notifier to reboot notifier list so that the _TTS
3391 -+ * object can also be evaluated when the system enters S5.
3392 - */
3393 -- register_reboot_notifier(&sleep_notifier);
3394 -+ register_reboot_notifier(&tts_notifier);
3395 - return 0;
3396 - }
3397 -diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
3398 -index 5163c8f..5497f7f 100644
3399 ---- a/drivers/block/zram/zram_drv.c
3400 -+++ b/drivers/block/zram/zram_drv.c
3401 -@@ -1413,8 +1413,14 @@ static ssize_t hot_remove_store(struct class *class,
3402 - return ret ? ret : count;
3403 - }
3404 -
3405 -+/*
3406 -+ * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
3407 -+ * sense that reading from this file does alter the state of your system -- it
3408 -+ * creates a new un-initialized zram device and returns back this device's
3409 -+ * device_id (or an error code if it fails to create a new device).
3410 -+ */
3411 - static struct class_attribute zram_control_class_attrs[] = {
3412 -- __ATTR_RO(hot_add),
3413 -+ __ATTR(hot_add, 0400, hot_add_show, NULL),
3414 - __ATTR_WO(hot_remove),
3415 - __ATTR_NULL,
3416 - };
3417 -diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
3418 -index 0ec112e..2341f37 100644
3419 ---- a/drivers/crypto/caam/ctrl.c
3420 -+++ b/drivers/crypto/caam/ctrl.c
3421 -@@ -557,8 +557,9 @@ static int caam_probe(struct platform_device *pdev)
3422 - * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
3423 - * long pointers in master configuration register
3424 - */
3425 -- clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH |
3426 -- MCFGR_AWCACHE_BUFF | MCFGR_WDENABLE | MCFGR_LARGE_BURST |
3427 -+ clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
3428 -+ MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
3429 -+ MCFGR_WDENABLE | MCFGR_LARGE_BURST |
3430 - (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
3431 -
3432 - /*
3433 -diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
3434 -index b111e14..13e89af 100644
3435 ---- a/drivers/crypto/marvell/hash.c
3436 -+++ b/drivers/crypto/marvell/hash.c
3437 -@@ -168,12 +168,11 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
3438 - mv_cesa_adjust_op(engine, &creq->op_tmpl);
3439 - memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
3440 -
3441 -- digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
3442 -- for (i = 0; i < digsize / 4; i++)
3443 -- writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
3444 --
3445 -- mv_cesa_adjust_op(engine, &creq->op_tmpl);
3446 -- memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
3447 -+ if (!sreq->offset) {
3448 -+ digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
3449 -+ for (i = 0; i < digsize / 4; i++)
3450 -+ writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
3451 -+ }
3452 -
3453 - if (creq->cache_ptr)
3454 - memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
3455 -diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
3456 -index ff64313..4894199 100644
3457 ---- a/drivers/dax/dax.c
3458 -+++ b/drivers/dax/dax.c
3459 -@@ -324,7 +324,7 @@ static int check_vma(struct dax_dev *dax_dev, struct vm_area_struct *vma,
3460 - return -ENXIO;
3461 -
3462 - /* prevent private mappings from being established */
3463 -- if ((vma->vm_flags & VM_SHARED) != VM_SHARED) {
3464 -+ if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
3465 - dev_info(dev, "%s: %s: fail, attempted private mapping\n",
3466 - current->comm, func);
3467 - return -EINVAL;
3468 -diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
3469 -index bfb91d8..1006af4 100644
3470 ---- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
3471 -+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
3472 -@@ -872,23 +872,25 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
3473 - static void peak_usb_disconnect(struct usb_interface *intf)
3474 - {
3475 - struct peak_usb_device *dev;
3476 -+ struct peak_usb_device *dev_prev_siblings;
3477 -
3478 - /* unregister as many netdev devices as siblings */
3479 -- for (dev = usb_get_intfdata(intf); dev; dev = dev->prev_siblings) {
3480 -+ for (dev = usb_get_intfdata(intf); dev; dev = dev_prev_siblings) {
3481 - struct net_device *netdev = dev->netdev;
3482 - char name[IFNAMSIZ];
3483 -
3484 -+ dev_prev_siblings = dev->prev_siblings;
3485 - dev->state &= ~PCAN_USB_STATE_CONNECTED;
3486 - strncpy(name, netdev->name, IFNAMSIZ);
3487 -
3488 - unregister_netdev(netdev);
3489 -- free_candev(netdev);
3490 -
3491 - kfree(dev->cmd_buf);
3492 - dev->next_siblings = NULL;
3493 - if (dev->adapter->dev_free)
3494 - dev->adapter->dev_free(dev);
3495 -
3496 -+ free_candev(netdev);
3497 - dev_info(&intf->dev, "%s removed\n", name);
3498 - }
3499 -
3500 -diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
3501 -index a8b6949..23d4a17 100644
3502 ---- a/drivers/nvdimm/bus.c
3503 -+++ b/drivers/nvdimm/bus.c
3504 -@@ -715,7 +715,7 @@ EXPORT_SYMBOL_GPL(nd_cmd_in_size);
3505 -
3506 - u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
3507 - const struct nd_cmd_desc *desc, int idx, const u32 *in_field,
3508 -- const u32 *out_field)
3509 -+ const u32 *out_field, unsigned long remainder)
3510 - {
3511 - if (idx >= desc->out_num)
3512 - return UINT_MAX;
3513 -@@ -727,9 +727,24 @@ u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
3514 - return in_field[1];
3515 - else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2)
3516 - return out_field[1];
3517 -- else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 2)
3518 -- return out_field[1] - 8;
3519 -- else if (cmd == ND_CMD_CALL) {
3520 -+ else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 2) {
3521 -+ /*
3522 -+ * Per table 9-276 ARS Data in ACPI 6.1, out_field[1] is
3523 -+ * "Size of Output Buffer in bytes, including this
3524 -+ * field."
3525 -+ */
3526 -+ if (out_field[1] < 4)
3527 -+ return 0;
3528 -+ /*
3529 -+ * ACPI 6.1 is ambiguous if 'status' is included in the
3530 -+ * output size. If we encounter an output size that
3531 -+ * overshoots the remainder by 4 bytes, assume it was
3532 -+ * including 'status'.
3533 -+ */
3534 -+ if (out_field[1] - 8 == remainder)
3535 -+ return remainder;
3536 -+ return out_field[1] - 4;
3537 -+ } else if (cmd == ND_CMD_CALL) {
3538 - struct nd_cmd_pkg *pkg = (struct nd_cmd_pkg *) in_field;
3539 -
3540 - return pkg->nd_size_out;
3541 -@@ -876,7 +891,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
3542 - /* process an output envelope */
3543 - for (i = 0; i < desc->out_num; i++) {
3544 - u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i,
3545 -- (u32 *) in_env, (u32 *) out_env);
3546 -+ (u32 *) in_env, (u32 *) out_env, 0);
3547 - u32 copy;
3548 -
3549 - if (out_size == UINT_MAX) {
3550 -diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
3551 -index 7080ce2..8214eba 100644
3552 ---- a/drivers/scsi/lpfc/lpfc_sli.c
3553 -+++ b/drivers/scsi/lpfc/lpfc_sli.c
3554 -@@ -1323,18 +1323,20 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3555 - {
3556 - lockdep_assert_held(&phba->hbalock);
3557 -
3558 -- BUG_ON(!piocb || !piocb->vport);
3559 -+ BUG_ON(!piocb);
3560 -
3561 - list_add_tail(&piocb->list, &pring->txcmplq);
3562 - piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
3563 -
3564 - if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
3565 - (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
3566 -- (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN) &&
3567 -- (!(piocb->vport->load_flag & FC_UNLOADING)))
3568 -- mod_timer(&piocb->vport->els_tmofunc,
3569 -- jiffies +
3570 -- msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
3571 -+ (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
3572 -+ BUG_ON(!piocb->vport);
3573 -+ if (!(piocb->vport->load_flag & FC_UNLOADING))
3574 -+ mod_timer(&piocb->vport->els_tmofunc,
3575 -+ jiffies +
3576 -+ msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
3577 -+ }
3578 -
3579 - return 0;
3580 - }
3581 -diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
3582 -index e3b30ea..a504e2e0 100644
3583 ---- a/drivers/vhost/vsock.c
3584 -+++ b/drivers/vhost/vsock.c
3585 -@@ -506,7 +506,7 @@ static void vhost_vsock_reset_orphans(struct sock *sk)
3586 - * executing.
3587 - */
3588 -
3589 -- if (!vhost_vsock_get(vsk->local_addr.svm_cid)) {
3590 -+ if (!vhost_vsock_get(vsk->remote_addr.svm_cid)) {
3591 - sock_set_flag(sk, SOCK_DONE);
3592 - vsk->peer_shutdown = SHUTDOWN_MASK;
3593 - sk->sk_state = SS_UNCONNECTED;
3594 -diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
3595 -index df4b3e6..93142bf 100644
3596 ---- a/fs/ceph/dir.c
3597 -+++ b/fs/ceph/dir.c
3598 -@@ -1257,26 +1257,30 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
3599 - return -ECHILD;
3600 -
3601 - op = ceph_snap(dir) == CEPH_SNAPDIR ?
3602 -- CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
3603 -+ CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_GETATTR;
3604 - req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
3605 - if (!IS_ERR(req)) {
3606 - req->r_dentry = dget(dentry);
3607 -- req->r_num_caps = 2;
3608 -+ req->r_num_caps = op == CEPH_MDS_OP_GETATTR ? 1 : 2;
3609 -
3610 - mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
3611 - if (ceph_security_xattr_wanted(dir))
3612 - mask |= CEPH_CAP_XATTR_SHARED;
3613 - req->r_args.getattr.mask = mask;
3614 -
3615 -- req->r_locked_dir = dir;
3616 - err = ceph_mdsc_do_request(mdsc, NULL, req);
3617 -- if (err == 0 || err == -ENOENT) {
3618 -- if (dentry == req->r_dentry) {
3619 -- valid = !d_unhashed(dentry);
3620 -- } else {
3621 -- d_invalidate(req->r_dentry);
3622 -- err = -EAGAIN;
3623 -- }
3624 -+ switch (err) {
3625 -+ case 0:
3626 -+ if (d_really_is_positive(dentry) &&
3627 -+ d_inode(dentry) == req->r_target_inode)
3628 -+ valid = 1;
3629 -+ break;
3630 -+ case -ENOENT:
3631 -+ if (d_really_is_negative(dentry))
3632 -+ valid = 1;
3633 -+ /* Fallthrough */
3634 -+ default:
3635 -+ break;
3636 - }
3637 - ceph_mdsc_put_request(req);
3638 - dout("d_revalidate %p lookup result=%d\n",
3639 -diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
3640 -index 4ff9251..eb5373a 100644
3641 ---- a/fs/fuse/dir.c
3642 -+++ b/fs/fuse/dir.c
3643 -@@ -1709,8 +1709,6 @@ static int fuse_setattr(struct dentry *entry, struct iattr *attr)
3644 - return -EACCES;
3645 -
3646 - if (attr->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID)) {
3647 -- int kill;
3648 --
3649 - attr->ia_valid &= ~(ATTR_KILL_SUID | ATTR_KILL_SGID |
3650 - ATTR_MODE);
3651 - /*
3652 -@@ -1722,12 +1720,11 @@ static int fuse_setattr(struct dentry *entry, struct iattr *attr)
3653 - return ret;
3654 -
3655 - attr->ia_mode = inode->i_mode;
3656 -- kill = should_remove_suid(entry);
3657 -- if (kill & ATTR_KILL_SUID) {
3658 -+ if (inode->i_mode & S_ISUID) {
3659 - attr->ia_valid |= ATTR_MODE;
3660 - attr->ia_mode &= ~S_ISUID;
3661 - }
3662 -- if (kill & ATTR_KILL_SGID) {
3663 -+ if ((inode->i_mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
3664 - attr->ia_valid |= ATTR_MODE;
3665 - attr->ia_mode &= ~S_ISGID;
3666 - }
3667 -diff --git a/include/linux/cpu.h b/include/linux/cpu.h
3668 -index 797d9c8..c8938eb 100644
3669 ---- a/include/linux/cpu.h
3670 -+++ b/include/linux/cpu.h
3671 -@@ -105,22 +105,16 @@ extern bool cpuhp_tasks_frozen;
3672 - { .notifier_call = fn, .priority = pri }; \
3673 - __register_cpu_notifier(&fn##_nb); \
3674 - }
3675 --#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
3676 --#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
3677 --#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
3678 --#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
3679 -
3680 --#ifdef CONFIG_HOTPLUG_CPU
3681 - extern int register_cpu_notifier(struct notifier_block *nb);
3682 - extern int __register_cpu_notifier(struct notifier_block *nb);
3683 - extern void unregister_cpu_notifier(struct notifier_block *nb);
3684 - extern void __unregister_cpu_notifier(struct notifier_block *nb);
3685 --#else
3686 -
3687 --#ifndef MODULE
3688 --extern int register_cpu_notifier(struct notifier_block *nb);
3689 --extern int __register_cpu_notifier(struct notifier_block *nb);
3690 --#else
3691 -+#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
3692 -+#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
3693 -+#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
3694 -+
3695 - static inline int register_cpu_notifier(struct notifier_block *nb)
3696 - {
3697 - return 0;
3698 -@@ -130,7 +124,6 @@ static inline int __register_cpu_notifier(struct notifier_block *nb)
3699 - {
3700 - return 0;
3701 - }
3702 --#endif
3703 -
3704 - static inline void unregister_cpu_notifier(struct notifier_block *nb)
3705 - {
3706 -diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
3707 -index bbfce62..d02d65d 100644
3708 ---- a/include/linux/libnvdimm.h
3709 -+++ b/include/linux/libnvdimm.h
3710 -@@ -153,7 +153,7 @@ u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd,
3711 - const struct nd_cmd_desc *desc, int idx, void *buf);
3712 - u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
3713 - const struct nd_cmd_desc *desc, int idx, const u32 *in_field,
3714 -- const u32 *out_field);
3715 -+ const u32 *out_field, unsigned long remainder);
3716 - int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count);
3717 - struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
3718 - struct nd_region_desc *ndr_desc);
3719 -diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h
3720 -index 9692cda..c48d93a 100644
3721 ---- a/include/uapi/linux/can.h
3722 -+++ b/include/uapi/linux/can.h
3723 -@@ -196,5 +196,6 @@ struct can_filter {
3724 - };
3725 -
3726 - #define CAN_INV_FILTER 0x20000000U /* to be set in can_filter.can_id */
3727 -+#define CAN_RAW_FILTER_MAX 512 /* maximum number of can_filter set via setsockopt() */
3728 -
3729 - #endif /* !_UAPI_CAN_H */
3730 -diff --git a/kernel/cpu.c b/kernel/cpu.c
3731 -index 341bf80..73fb59f 100644
3732 ---- a/kernel/cpu.c
3733 -+++ b/kernel/cpu.c
3734 -@@ -578,7 +578,6 @@ void __init cpuhp_threads_init(void)
3735 - kthread_unpark(this_cpu_read(cpuhp_state.thread));
3736 - }
3737 -
3738 --#ifdef CONFIG_HOTPLUG_CPU
3739 - EXPORT_SYMBOL(register_cpu_notifier);
3740 - EXPORT_SYMBOL(__register_cpu_notifier);
3741 - void unregister_cpu_notifier(struct notifier_block *nb)
3742 -@@ -595,6 +594,7 @@ void __unregister_cpu_notifier(struct notifier_block *nb)
3743 - }
3744 - EXPORT_SYMBOL(__unregister_cpu_notifier);
3745 -
3746 -+#ifdef CONFIG_HOTPLUG_CPU
3747 - /**
3748 - * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
3749 - * @cpu: a CPU id
3750 -diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
3751 -index 1ec0f48..2c49d76 100644
3752 ---- a/kernel/locking/rtmutex.c
3753 -+++ b/kernel/locking/rtmutex.c
3754 -@@ -65,8 +65,72 @@ static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
3755 -
3756 - static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
3757 - {
3758 -- if (!rt_mutex_has_waiters(lock))
3759 -- clear_rt_mutex_waiters(lock);
3760 -+ unsigned long owner, *p = (unsigned long *) &lock->owner;
3761 -+
3762 -+ if (rt_mutex_has_waiters(lock))
3763 -+ return;
3764 -+
3765 -+ /*
3766 -+ * The rbtree has no waiters enqueued, now make sure that the
3767 -+ * lock->owner still has the waiters bit set, otherwise the
3768 -+ * following can happen:
3769 -+ *
3770 -+ * CPU 0 CPU 1 CPU2
3771 -+ * l->owner=T1
3772 -+ * rt_mutex_lock(l)
3773 -+ * lock(l->lock)
3774 -+ * l->owner = T1 | HAS_WAITERS;
3775 -+ * enqueue(T2)
3776 -+ * boost()
3777 -+ * unlock(l->lock)
3778 -+ * block()
3779 -+ *
3780 -+ * rt_mutex_lock(l)
3781 -+ * lock(l->lock)
3782 -+ * l->owner = T1 | HAS_WAITERS;
3783 -+ * enqueue(T3)
3784 -+ * boost()
3785 -+ * unlock(l->lock)
3786 -+ * block()
3787 -+ * signal(->T2) signal(->T3)
3788 -+ * lock(l->lock)
3789 -+ * dequeue(T2)
3790 -+ * deboost()
3791 -+ * unlock(l->lock)
3792 -+ * lock(l->lock)
3793 -+ * dequeue(T3)
3794 -+ * ==> wait list is empty
3795 -+ * deboost()
3796 -+ * unlock(l->lock)
3797 -+ * lock(l->lock)
3798 -+ * fixup_rt_mutex_waiters()
3799 -+ * if (wait_list_empty(l) {
3800 -+ * l->owner = owner
3801 -+ * owner = l->owner & ~HAS_WAITERS;
3802 -+ * ==> l->owner = T1
3803 -+ * }
3804 -+ * lock(l->lock)
3805 -+ * rt_mutex_unlock(l) fixup_rt_mutex_waiters()
3806 -+ * if (wait_list_empty(l) {
3807 -+ * owner = l->owner & ~HAS_WAITERS;
3808 -+ * cmpxchg(l->owner, T1, NULL)
3809 -+ * ===> Success (l->owner = NULL)
3810 -+ *
3811 -+ * l->owner = owner
3812 -+ * ==> l->owner = T1
3813 -+ * }
3814 -+ *
3815 -+ * With the check for the waiter bit in place T3 on CPU2 will not
3816 -+ * overwrite. All tasks fiddling with the waiters bit are
3817 -+ * serialized by l->lock, so nothing else can modify the waiters
3818 -+ * bit. If the bit is set then nothing can change l->owner either
3819 -+ * so the simple RMW is safe. The cmpxchg() will simply fail if it
3820 -+ * happens in the middle of the RMW because the waiters bit is
3821 -+ * still set.
3822 -+ */
3823 -+ owner = READ_ONCE(*p);
3824 -+ if (owner & RT_MUTEX_HAS_WAITERS)
3825 -+ WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
3826 - }
3827 -
3828 - /*
3829 -diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
3830 -index 4f5f83c..e317e1c 100644
3831 ---- a/kernel/locking/rtmutex_common.h
3832 -+++ b/kernel/locking/rtmutex_common.h
3833 -@@ -75,8 +75,9 @@ task_top_pi_waiter(struct task_struct *p)
3834 -
3835 - static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
3836 - {
3837 -- return (struct task_struct *)
3838 -- ((unsigned long)lock->owner & ~RT_MUTEX_OWNER_MASKALL);
3839 -+ unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
3840 -+
3841 -+ return (struct task_struct *) (owner & ~RT_MUTEX_OWNER_MASKALL);
3842 - }
3843 -
3844 - /*
3845 -diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
3846 -index a5d966c..418d9b6 100644
3847 ---- a/kernel/sched/auto_group.c
3848 -+++ b/kernel/sched/auto_group.c
3849 -@@ -192,6 +192,7 @@ int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
3850 - {
3851 - static unsigned long next = INITIAL_JIFFIES;
3852 - struct autogroup *ag;
3853 -+ unsigned long shares;
3854 - int err;
3855 -
3856 - if (nice < MIN_NICE || nice > MAX_NICE)
3857 -@@ -210,9 +211,10 @@ int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
3858 -
3859 - next = HZ / 10 + jiffies;
3860 - ag = autogroup_task_get(p);
3861 -+ shares = scale_load(sched_prio_to_weight[nice + 20]);
3862 -
3863 - down_write(&ag->lock);
3864 -- err = sched_group_set_shares(ag->tg, sched_prio_to_weight[nice + 20]);
3865 -+ err = sched_group_set_shares(ag->tg, shares);
3866 - if (!err)
3867 - ag->nice = nice;
3868 - up_write(&ag->lock);
3869 -diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
3870 -index 7e6df7a..67f8fa9 100644
3871 ---- a/net/batman-adv/translation-table.c
3872 -+++ b/net/batman-adv/translation-table.c
3873 -@@ -2849,7 +2849,7 @@ static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv,
3874 - &tvlv_tt_data,
3875 - &tt_change,
3876 - &tt_len);
3877 -- if (!tt_len)
3878 -+ if (!tt_len || !tvlv_len)
3879 - goto unlock;
3880 -
3881 - /* Copy the last orig_node's OGM buffer */
3882 -@@ -2867,7 +2867,7 @@ static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv,
3883 - &tvlv_tt_data,
3884 - &tt_change,
3885 - &tt_len);
3886 -- if (!tt_len)
3887 -+ if (!tt_len || !tvlv_len)
3888 - goto out;
3889 -
3890 - /* fill the rest of the tvlv with the real TT entries */
3891 -diff --git a/net/can/raw.c b/net/can/raw.c
3892 -index 972c187..b075f02 100644
3893 ---- a/net/can/raw.c
3894 -+++ b/net/can/raw.c
3895 -@@ -499,6 +499,9 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
3896 - if (optlen % sizeof(struct can_filter) != 0)
3897 - return -EINVAL;
3898 -
3899 -+ if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter))
3900 -+ return -EINVAL;
3901 -+
3902 - count = optlen / sizeof(struct can_filter);
3903 -
3904 - if (count > 1) {
3905
3906 diff --git a/4.8.15/4420_grsecurity-3.1-4.8.15-201612151923.patch b/4.8.15/4420_grsecurity-3.1-4.8.15-201612301949.patch
3907 similarity index 99%
3908 rename from 4.8.15/4420_grsecurity-3.1-4.8.15-201612151923.patch
3909 rename to 4.8.15/4420_grsecurity-3.1-4.8.15-201612301949.patch
3910 index f7b8b72..b16a42d 100644
3911 --- a/4.8.15/4420_grsecurity-3.1-4.8.15-201612151923.patch
3912 +++ b/4.8.15/4420_grsecurity-3.1-4.8.15-201612301949.patch
3913 @@ -22150,24 +22150,28 @@ index 6ca9fd6..4c0aa55 100644
3914 * Vectors 0x30-0x3f are used for ISA interrupts.
3915 * round up to the next 16-vector boundary
3916 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
3917 -index b77f5ed..3862b91 100644
3918 +index b77f5ed..cbf5ec6 100644
3919 --- a/arch/x86/include/asm/irqflags.h
3920 +++ b/arch/x86/include/asm/irqflags.h
3921 -@@ -23,11 +23,13 @@ static inline unsigned long native_save_fl(void)
3922 +@@ -23,11 +23,17 @@ static inline unsigned long native_save_fl(void)
3923 : /* no input */
3924 : "memory");
3925
3926 ++#if !defined(CONFIG_GRKERNSEC_CONFIG_VIRT_HOST) || !defined(CONFIG_GRKERNSEC_CONFIG_VIRT_VIRTUALBOX)
3927 + BUG_ON(flags & X86_EFLAGS_AC);
3928 ++#endif
3929 return flags;
3930 }
3931
3932 static inline void native_restore_fl(unsigned long flags)
3933 {
3934 ++#if !defined(CONFIG_GRKERNSEC_CONFIG_VIRT_HOST) || !defined(CONFIG_GRKERNSEC_CONFIG_VIRT_VIRTUALBOX)
3935 + BUG_ON(flags & X86_EFLAGS_AC);
3936 ++#endif
3937 asm volatile("push %0 ; popf"
3938 : /* no output */
3939 :"g" (flags)
3940 -@@ -137,6 +139,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
3941 +@@ -137,6 +143,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
3942 swapgs; \
3943 sysretl
3944
3945 @@ -120398,7 +120402,7 @@ index fd6be45..6be6542 100644
3946
3947 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
3948 new file mode 100644
3949 -index 0000000..307ca55
3950 +index 0000000..f1c1ee2
3951 --- /dev/null
3952 +++ b/grsecurity/Kconfig
3953 @@ -0,0 +1,1206 @@
3954 @@ -120600,7 +120604,7 @@ index 0000000..307ca55
3955 +config GRKERNSEC_HIDESYM
3956 + bool "Hide kernel symbols"
3957 + default y if GRKERNSEC_CONFIG_AUTO
3958 -+ select PAX_USERCOPY_SLABS
3959 ++ select PAX_USERCOPY
3960 + help
3961 + If you say Y here, getting information on loaded modules, and
3962 + displaying all kernel symbols through a syscall will be restricted
3963
3964 diff --git a/4.8.15/4426_default_XATTR_PAX_FLAGS.patch b/4.8.15/4426_default_XATTR_PAX_FLAGS.patch
3965 new file mode 100644
3966 index 0000000..f7e97b5
3967 --- /dev/null
3968 +++ b/4.8.15/4426_default_XATTR_PAX_FLAGS.patch
3969 @@ -0,0 +1,36 @@
3970 +diff -Naur linux-4.8.15-hardened-r1.orig/security/Kconfig linux-4.8.15-hardened-r1/security/Kconfig
3971 +--- linux-4.8.15-hardened-r1.orig/security/Kconfig 2017-01-01 12:10:19.638828792 -0500
3972 ++++ linux-4.8.15-hardened-r1/security/Kconfig 2017-01-01 12:14:05.434836657 -0500
3973 +@@ -293,7 +293,7 @@
3974 +
3975 + config PAX_PT_PAX_FLAGS
3976 + bool 'Use ELF program header marking'
3977 +- default y if GRKERNSEC_CONFIG_AUTO
3978 ++ default n
3979 + help
3980 + Enabling this option will allow you to control PaX features on
3981 + a per executable basis via the 'paxctl' utility available at
3982 +@@ -312,9 +312,12 @@
3983 + If you enable none of the marking options then all applications
3984 + will run with PaX enabled on them by default.
3985 +
3986 ++ Note for Gentoo: PT_PAX_FLAGS has been deprecated in Gentoo. Enable
3987 ++ this only for legacy systems.
3988 ++
3989 + config PAX_XATTR_PAX_FLAGS
3990 + bool 'Use filesystem extended attributes marking'
3991 +- default y if GRKERNSEC_CONFIG_AUTO
3992 ++ default y
3993 + select CIFS_XATTR if CIFS
3994 + select EXT2_FS_XATTR if EXT2_FS
3995 + select EXT3_FS_XATTR if EXT3_FS
3996 +@@ -343,6 +346,9 @@
3997 + If you enable none of the marking options then all applications
3998 + will run with PaX enabled on them by default.
3999 +
4000 ++ Note for Gentoo: XATTR_PAX_FLAGS is now the default in Gentoo. Do
4001 ++ not disable this unless you know what you're doing.
4002 ++
4003 + choice
4004 + prompt 'MAC system integration'
4005 + default PAX_HAVE_ACL_FLAGS