Gentoo Archives: gentoo-commits

From: "Anthony G. Basile" <blueness@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/hardened-patchset:master commit in: 4.1.3/
Date: Wed, 29 Jul 2015 13:20:36
Message-Id: 1438176152.d8f474ed8b41ce8c105ac603af9afbba9f60181d.blueness@gentoo
1 commit: d8f474ed8b41ce8c105ac603af9afbba9f60181d
2 Author: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
3 AuthorDate: Wed Jul 29 13:22:32 2015 +0000
4 Commit: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
5 CommitDate: Wed Jul 29 13:22:32 2015 +0000
6 URL: https://gitweb.gentoo.org/proj/hardened-patchset.git/commit/?id=d8f474ed
7
8 grsecurity-3.1-4.1.3-201507281943
9
10 4.1.3/0000_README | 2 +-
11 ...> 4420_grsecurity-3.1-4.1.3-201507281943.patch} | 770 ++++++++++++++++++---
12 2 files changed, 686 insertions(+), 86 deletions(-)
13
14 diff --git a/4.1.3/0000_README b/4.1.3/0000_README
15 index cbe10c3..68a3992 100644
16 --- a/4.1.3/0000_README
17 +++ b/4.1.3/0000_README
18 @@ -2,7 +2,7 @@ README
19 -----------------------------------------------------------------------------
20 Individual Patch Descriptions:
21 -----------------------------------------------------------------------------
22 -Patch: 4420_grsecurity-3.1-4.1.3-201507261932.patch
23 +Patch: 4420_grsecurity-3.1-4.1.3-201507281943.patch
24 From: http://www.grsecurity.net
25 Desc: hardened-sources base patch from upstream grsecurity
26
27
28 diff --git a/4.1.3/4420_grsecurity-3.1-4.1.3-201507261932.patch b/4.1.3/4420_grsecurity-3.1-4.1.3-201507281943.patch
29 similarity index 99%
30 rename from 4.1.3/4420_grsecurity-3.1-4.1.3-201507261932.patch
31 rename to 4.1.3/4420_grsecurity-3.1-4.1.3-201507281943.patch
32 index c2c4ded..fc096b0 100644
33 --- a/4.1.3/4420_grsecurity-3.1-4.1.3-201507261932.patch
34 +++ b/4.1.3/4420_grsecurity-3.1-4.1.3-201507281943.patch
35 @@ -16525,20 +16525,19 @@ index acdee09..a553db3 100644
36 struct compat_timespec {
37 compat_time_t tv_sec;
38 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
39 -index 3d6606f..5e22255 100644
40 +index 3d6606f..91703f1 100644
41 --- a/arch/x86/include/asm/cpufeature.h
42 +++ b/arch/x86/include/asm/cpufeature.h
43 -@@ -214,7 +214,8 @@
44 +@@ -214,7 +214,7 @@
45 #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
46 #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
47 #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
48 -
49 -+#define X86_FEATURE_PCIDUDEREF ( 8*32+30) /* PaX PCID based UDEREF */
50 +#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
51
52 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
53 #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
54 -@@ -222,7 +223,7 @@
55 +@@ -222,7 +222,7 @@
56 #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
57 #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
58 #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
59 @@ -16547,7 +16546,7 @@ index 3d6606f..5e22255 100644
60 #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
61 #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
62 #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
63 -@@ -401,6 +402,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
64 +@@ -401,6 +401,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
65 #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
66 #define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
67 #define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT)
68 @@ -16555,7 +16554,7 @@ index 3d6606f..5e22255 100644
69
70 #if __GNUC__ >= 4
71 extern void warn_pre_alternatives(void);
72 -@@ -454,7 +456,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
73 +@@ -454,7 +455,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
74
75 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
76 t_warn:
77 @@ -16565,7 +16564,7 @@ index 3d6606f..5e22255 100644
78 return false;
79 #endif
80
81 -@@ -475,7 +478,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
82 +@@ -475,7 +477,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
83 ".section .discard,\"aw\",@progbits\n"
84 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
85 ".previous\n"
86 @@ -16574,7 +16573,7 @@ index 3d6606f..5e22255 100644
87 "3: movb $1,%0\n"
88 "4:\n"
89 ".previous\n"
90 -@@ -510,7 +513,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
91 +@@ -510,7 +512,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
92 " .byte 5f - 4f\n" /* repl len */
93 " .byte 3b - 2b\n" /* pad len */
94 ".previous\n"
95 @@ -16583,7 +16582,7 @@ index 3d6606f..5e22255 100644
96 "4: jmp %l[t_no]\n"
97 "5:\n"
98 ".previous\n"
99 -@@ -545,7 +548,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
100 +@@ -545,7 +547,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
101 ".section .discard,\"aw\",@progbits\n"
102 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
103 ".previous\n"
104 @@ -16592,7 +16591,7 @@ index 3d6606f..5e22255 100644
105 "3: movb $0,%0\n"
106 "4:\n"
107 ".previous\n"
108 -@@ -560,7 +563,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
109 +@@ -560,7 +562,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
110 ".section .discard,\"aw\",@progbits\n"
111 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
112 ".previous\n"
113 @@ -17439,7 +17438,7 @@ index 09b9620..923aecd 100644
114 atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */
115 } mm_context_t;
116 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
117 -index 883f6b93..5184058 100644
118 +index 883f6b93..bb405b5 100644
119 --- a/arch/x86/include/asm/mmu_context.h
120 +++ b/arch/x86/include/asm/mmu_context.h
121 @@ -42,6 +42,20 @@ void destroy_context(struct mm_struct *mm);
122 @@ -17448,7 +17447,7 @@ index 883f6b93..5184058 100644
123 {
124 +
125 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
126 -+ if (!(static_cpu_has(X86_FEATURE_PCIDUDEREF))) {
127 ++ if (!(static_cpu_has(X86_FEATURE_PCID))) {
128 + unsigned int i;
129 + pgd_t *pgd;
130 +
131 @@ -17486,7 +17485,7 @@ index 883f6b93..5184058 100644
132 + pax_open_kernel();
133 +
134 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
135 -+ if (static_cpu_has(X86_FEATURE_PCIDUDEREF))
136 ++ if (static_cpu_has(X86_FEATURE_PCID))
137 + __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
138 + else
139 +#endif
140 @@ -17497,7 +17496,7 @@ index 883f6b93..5184058 100644
141 + BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
142 +
143 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
144 -+ if (static_cpu_has(X86_FEATURE_PCIDUDEREF)) {
145 ++ if (static_cpu_has(X86_FEATURE_PCID)) {
146 + if (static_cpu_has(X86_FEATURE_INVPCID)) {
147 + u64 descriptor[2];
148 + descriptor[0] = PCID_USER;
149 @@ -17554,7 +17553,7 @@ index 883f6b93..5184058 100644
150 + pax_open_kernel();
151 +
152 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
153 -+ if (static_cpu_has(X86_FEATURE_PCIDUDEREF))
154 ++ if (static_cpu_has(X86_FEATURE_PCID))
155 + __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
156 + else
157 +#endif
158 @@ -17565,7 +17564,7 @@ index 883f6b93..5184058 100644
159 + BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
160 +
161 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
162 -+ if (static_cpu_has(X86_FEATURE_PCIDUDEREF)) {
163 ++ if (static_cpu_has(X86_FEATURE_PCID)) {
164 + if (static_cpu_has(X86_FEATURE_INVPCID)) {
165 + u64 descriptor[2];
166 + descriptor[0] = PCID_USER;
167 @@ -19402,7 +19401,7 @@ index b4bdec3..e8af9bc 100644
168 #endif
169 #endif /* _ASM_X86_THREAD_INFO_H */
170 diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
171 -index cd79194..6a9956f 100644
172 +index cd79194..e7a9491 100644
173 --- a/arch/x86/include/asm/tlbflush.h
174 +++ b/arch/x86/include/asm/tlbflush.h
175 @@ -86,18 +86,44 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask)
176 @@ -19418,7 +19417,7 @@ index cd79194..6a9956f 100644
177 + }
178 +
179 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
180 -+ if (static_cpu_has(X86_FEATURE_PCIDUDEREF)) {
181 ++ if (static_cpu_has(X86_FEATURE_PCID)) {
182 + unsigned int cpu = raw_get_cpu();
183 +
184 + native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
185 @@ -19456,7 +19455,7 @@ index cd79194..6a9956f 100644
186 }
187
188 static inline void __native_flush_tlb_global(void)
189 -@@ -118,6 +144,43 @@ static inline void __native_flush_tlb_global(void)
190 +@@ -118,6 +144,41 @@ static inline void __native_flush_tlb_global(void)
191
192 static inline void __native_flush_tlb_single(unsigned long addr)
193 {
194 @@ -19467,16 +19466,14 @@ index cd79194..6a9956f 100644
195 + descriptor[1] = addr;
196 +
197 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
198 -+ if (static_cpu_has(X86_FEATURE_PCIDUDEREF)) {
199 -+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
200 -+ if (addr < TASK_SIZE_MAX)
201 -+ descriptor[1] += pax_user_shadow_base;
202 -+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
203 -+ }
204 -+
205 -+ descriptor[0] = PCID_USER;
206 -+ descriptor[1] = addr;
207 ++ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
208 ++ if (addr < TASK_SIZE_MAX)
209 ++ descriptor[1] += pax_user_shadow_base;
210 ++ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
211 + }
212 ++
213 ++ descriptor[0] = PCID_USER;
214 ++ descriptor[1] = addr;
215 +#endif
216 +
217 + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
218 @@ -19484,7 +19481,7 @@ index cd79194..6a9956f 100644
219 + }
220 +
221 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
222 -+ if (static_cpu_has(X86_FEATURE_PCIDUDEREF)) {
223 ++ if (static_cpu_has(X86_FEATURE_PCID)) {
224 + unsigned int cpu = raw_get_cpu();
225 +
226 + native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
227 @@ -21131,7 +21128,7 @@ index e4cf633..941f450 100644
228 if (c->x86_model == 3 && c->x86_mask == 0)
229 size = 64;
230 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
231 -index a62cf04..56afd65 100644
232 +index a62cf04..041e39c 100644
233 --- a/arch/x86/kernel/cpu/common.c
234 +++ b/arch/x86/kernel/cpu/common.c
235 @@ -91,60 +91,6 @@ static const struct cpu_dev default_cpu = {
236 @@ -21195,7 +21192,7 @@ index a62cf04..56afd65 100644
237 static int __init x86_xsave_setup(char *s)
238 {
239 if (strlen(s))
240 -@@ -306,6 +252,62 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
241 +@@ -306,6 +252,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
242 }
243 }
244
245 @@ -21216,31 +21213,26 @@ index a62cf04..56afd65 100644
246 +
247 +static void setup_pcid(struct cpuinfo_x86 *c)
248 +{
249 -+ if (cpu_has(c, X86_FEATURE_PCID)) {
250 -+ printk("PAX: PCID detected\n");
251 -+ cr4_set_bits(X86_CR4_PCIDE);
252 -+ } else
253 ++ if (!cpu_has(c, X86_FEATURE_PCID)) {
254 + clear_cpu_cap(c, X86_FEATURE_INVPCID);
255 +
256 -+ if (cpu_has(c, X86_FEATURE_INVPCID))
257 -+ printk("PAX: INVPCID detected\n");
258 -+
259 +#ifdef CONFIG_PAX_MEMORY_UDEREF
260 -+ if (clone_pgd_mask == ~(pgdval_t)0UL) {
261 -+ printk("PAX: UDEREF disabled\n");
262 -+ return;
263 -+ }
264 ++ if (clone_pgd_mask != ~(pgdval_t)0UL) {
265 ++ pax_open_kernel();
266 ++ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
267 ++ pax_close_kernel();
268 ++ printk("PAX: slow and weak UDEREF enabled\n");
269 ++ } else
270 ++ printk("PAX: UDEREF disabled\n");
271 ++#endif
272 +
273 -+ if (!cpu_has(c, X86_FEATURE_PCID)) {
274 -+ pax_open_kernel();
275 -+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
276 -+ pax_close_kernel();
277 -+ printk("PAX: slow and weak UDEREF enabled\n");
278 + return;
279 + }
280 +
281 -+ set_cpu_cap(c, X86_FEATURE_PCIDUDEREF);
282 ++ printk("PAX: PCID detected\n");
283 ++ cr4_set_bits(X86_CR4_PCIDE);
284 +
285 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
286 + pax_open_kernel();
287 + clone_pgd_mask = ~(pgdval_t)0UL;
288 + pax_close_kernel();
289 @@ -21252,13 +21244,15 @@ index a62cf04..56afd65 100644
290 + }
291 +#endif
292 +
293 ++ if (cpu_has(c, X86_FEATURE_INVPCID))
294 ++ printk("PAX: INVPCID detected\n");
295 +}
296 +#endif
297 +
298 /*
299 * Some CPU features depend on higher CPUID levels, which may not always
300 * be available due to CPUID level capping or broken virtualization
301 -@@ -406,7 +408,7 @@ void switch_to_new_gdt(int cpu)
302 +@@ -406,7 +405,7 @@ void switch_to_new_gdt(int cpu)
303 {
304 struct desc_ptr gdt_descr;
305
306 @@ -21267,7 +21261,7 @@ index a62cf04..56afd65 100644
307 gdt_descr.size = GDT_SIZE - 1;
308 load_gdt(&gdt_descr);
309 /* Reload the per-cpu base */
310 -@@ -935,6 +937,20 @@ static void identify_cpu(struct cpuinfo_x86 *c)
311 +@@ -935,6 +934,20 @@ static void identify_cpu(struct cpuinfo_x86 *c)
312 setup_smep(c);
313 setup_smap(c);
314
315 @@ -21288,7 +21282,7 @@ index a62cf04..56afd65 100644
316 /*
317 * The vendor-specific functions might have changed features.
318 * Now we do "generic changes."
319 -@@ -1009,7 +1025,7 @@ void enable_sep_cpu(void)
320 +@@ -1009,7 +1022,7 @@ void enable_sep_cpu(void)
321 int cpu;
322
323 cpu = get_cpu();
324 @@ -21297,7 +21291,7 @@ index a62cf04..56afd65 100644
325
326 if (!boot_cpu_has(X86_FEATURE_SEP))
327 goto out;
328 -@@ -1155,14 +1171,16 @@ static __init int setup_disablecpuid(char *arg)
329 +@@ -1155,14 +1168,16 @@ static __init int setup_disablecpuid(char *arg)
330 }
331 __setup("clearcpuid=", setup_disablecpuid);
332
333 @@ -21318,7 +21312,7 @@ index a62cf04..56afd65 100644
334
335 DEFINE_PER_CPU_FIRST(union irq_stack_union,
336 irq_stack_union) __aligned(PAGE_SIZE) __visible;
337 -@@ -1367,7 +1385,7 @@ void cpu_init(void)
338 +@@ -1367,7 +1382,7 @@ void cpu_init(void)
339 */
340 load_ucode_ap();
341
342 @@ -21327,7 +21321,7 @@ index a62cf04..56afd65 100644
343 oist = &per_cpu(orig_ist, cpu);
344
345 #ifdef CONFIG_NUMA
346 -@@ -1399,7 +1417,6 @@ void cpu_init(void)
347 +@@ -1399,7 +1414,6 @@ void cpu_init(void)
348 wrmsrl(MSR_KERNEL_GS_BASE, 0);
349 barrier();
350
351 @@ -21335,7 +21329,7 @@ index a62cf04..56afd65 100644
352 x2apic_setup();
353
354 /*
355 -@@ -1451,7 +1468,7 @@ void cpu_init(void)
356 +@@ -1451,7 +1465,7 @@ void cpu_init(void)
357 {
358 int cpu = smp_processor_id();
359 struct task_struct *curr = current;
360 @@ -21745,10 +21739,25 @@ index 7795f3f..3535b76 100644
361 __bts_event_stop(event);
362
363 diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
364 -index e4d1b8b..2c6ffa0 100644
365 +index e4d1b8b..8867302 100644
366 --- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
367 +++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
368 -@@ -1352,7 +1352,9 @@ static int __init intel_cqm_init(void)
369 +@@ -934,6 +934,14 @@ static u64 intel_cqm_event_count(struct perf_event *event)
370 + return 0;
371 +
372 + /*
373 ++ * Getting up-to-date values requires an SMP IPI which is not
374 ++ * possible if we're being called in interrupt context. Return
375 ++ * the cached values instead.
376 ++ */
377 ++ if (unlikely(in_interrupt()))
378 ++ goto out;
379 ++
380 ++ /*
381 + * Notice that we don't perform the reading of an RMID
382 + * atomically, because we can't hold a spin lock across the
383 + * IPIs.
384 +@@ -1352,7 +1360,9 @@ static int __init intel_cqm_init(void)
385 goto out;
386 }
387
388 @@ -33559,10 +33568,10 @@ index 9ca35fc..4b2b7b7 100644
389
390 return (void *)vaddr;
391 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
392 -index 70e7444..75b9a13 100644
393 +index 70e7444..e9904fd 100644
394 --- a/arch/x86/mm/ioremap.c
395 +++ b/arch/x86/mm/ioremap.c
396 -@@ -56,8 +56,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
397 +@@ -56,12 +56,10 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
398 unsigned long i;
399
400 for (i = 0; i < nr_pages; ++i)
401 @@ -33572,8 +33581,50 @@ index 70e7444..75b9a13 100644
402 + !PageReserved(pfn_to_page(start_pfn + i))))
403 return 1;
404
405 - WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
406 -@@ -288,7 +288,7 @@ EXPORT_SYMBOL(ioremap_prot);
407 +- WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
408 +-
409 + return 0;
410 + }
411 +
412 +@@ -91,7 +89,6 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
413 + pgprot_t prot;
414 + int retval;
415 + void __iomem *ret_addr;
416 +- int ram_region;
417 +
418 + /* Don't allow wraparound or zero size */
419 + last_addr = phys_addr + size - 1;
420 +@@ -114,23 +111,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
421 + /*
422 + * Don't allow anybody to remap normal RAM that we're using..
423 + */
424 +- /* First check if whole region can be identified as RAM or not */
425 +- ram_region = region_is_ram(phys_addr, size);
426 +- if (ram_region > 0) {
427 +- WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n",
428 +- (unsigned long int)phys_addr,
429 +- (unsigned long int)last_addr);
430 ++ pfn = phys_addr >> PAGE_SHIFT;
431 ++ last_pfn = last_addr >> PAGE_SHIFT;
432 ++ if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
433 ++ __ioremap_check_ram) == 1) {
434 ++ WARN_ONCE(1, "ioremap on RAM at 0x%llx - 0x%llx\n",
435 ++ phys_addr, last_addr);
436 + return NULL;
437 + }
438 +
439 +- /* If could not be identified(-1), check page by page */
440 +- if (ram_region < 0) {
441 +- pfn = phys_addr >> PAGE_SHIFT;
442 +- last_pfn = last_addr >> PAGE_SHIFT;
443 +- if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
444 +- __ioremap_check_ram) == 1)
445 +- return NULL;
446 +- }
447 + /*
448 + * Mappings have to be page-aligned
449 + */
450 +@@ -288,7 +277,7 @@ EXPORT_SYMBOL(ioremap_prot);
451 *
452 * Caller must ensure there is only one unmapping for the same pointer.
453 */
454 @@ -33582,7 +33633,7 @@ index 70e7444..75b9a13 100644
455 {
456 struct vm_struct *p, *o;
457
458 -@@ -351,32 +351,36 @@ int arch_ioremap_pmd_supported(void)
459 +@@ -351,32 +340,36 @@ int arch_ioremap_pmd_supported(void)
460 */
461 void *xlate_dev_mem_ptr(phys_addr_t phys)
462 {
463 @@ -33635,7 +33686,7 @@ index 70e7444..75b9a13 100644
464
465 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
466 {
467 -@@ -412,8 +416,7 @@ void __init early_ioremap_init(void)
468 +@@ -412,8 +405,7 @@ void __init early_ioremap_init(void)
469 early_ioremap_setup();
470
471 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
472 @@ -34407,7 +34458,7 @@ index 90555bf..f5f1828 100644
473 }
474
475 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
476 -index 3250f23..7a97ba2 100644
477 +index 3250f23..4197ac2 100644
478 --- a/arch/x86/mm/tlb.c
479 +++ b/arch/x86/mm/tlb.c
480 @@ -45,7 +45,11 @@ void leave_mm(int cpu)
481 @@ -34422,6 +34473,15 @@ index 3250f23..7a97ba2 100644
482 /*
483 * This gets called in the idle path where RCU
484 * functions differently. Tracing normally
485 +@@ -117,7 +121,7 @@ static void flush_tlb_func(void *info)
486 + } else {
487 + unsigned long addr;
488 + unsigned long nr_pages =
489 +- f->flush_end - f->flush_start / PAGE_SIZE;
490 ++ (f->flush_end - f->flush_start) / PAGE_SIZE;
491 + addr = f->flush_start;
492 + while (addr < f->flush_end) {
493 + __flush_tlb_single(addr);
494 diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
495 new file mode 100644
496 index 0000000..3fda3f3
497 @@ -47683,6 +47743,19 @@ index c439c82..1f20f57 100644
498 union axis_conversion ac; /* hw -> logical axis */
499 int mapped_btns[3];
500
501 +diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
502 +index 3e29681..e40bcd03 100644
503 +--- a/drivers/misc/mei/main.c
504 ++++ b/drivers/misc/mei/main.c
505 +@@ -685,7 +685,7 @@ int mei_register(struct mei_device *dev, struct device *parent)
506 + /* Fill in the data structures */
507 + devno = MKDEV(MAJOR(mei_devt), dev->minor);
508 + cdev_init(&dev->cdev, &mei_fops);
509 +- dev->cdev.owner = mei_fops.owner;
510 ++ dev->cdev.owner = parent->driver->owner;
511 +
512 + /* Add the device */
513 + ret = cdev_add(&dev->cdev, devno, 1);
514 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
515 index 2f30bad..c4c13d0 100644
516 --- a/drivers/misc/sgi-gru/gruhandles.c
517 @@ -96084,6 +96157,34 @@ index 7ee1774..72505b8 100644
518 }
519
520 /*
521 +diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
522 +index 1da6029..6cd8c0e 100644
523 +--- a/include/linux/ftrace.h
524 ++++ b/include/linux/ftrace.h
525 +@@ -116,6 +116,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
526 + * SAVE_REGS. If another ops with this flag set is already registered
527 + * for any of the functions that this ops will be registered for, then
528 + * this ops will fail to register or set_filter_ip.
529 ++ * PID - Is affected by set_ftrace_pid (allows filtering on those pids)
530 + */
531 + enum {
532 + FTRACE_OPS_FL_ENABLED = 1 << 0,
533 +@@ -132,6 +133,7 @@ enum {
534 + FTRACE_OPS_FL_MODIFYING = 1 << 11,
535 + FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12,
536 + FTRACE_OPS_FL_IPMODIFY = 1 << 13,
537 ++ FTRACE_OPS_FL_PID = 1 << 14,
538 + };
539 +
540 + #ifdef CONFIG_DYNAMIC_FTRACE
541 +@@ -159,6 +161,7 @@ struct ftrace_ops {
542 + struct ftrace_ops *next;
543 + unsigned long flags;
544 + void *private;
545 ++ ftrace_func_t saved_func;
546 + int __percpu *disabled;
547 + #ifdef CONFIG_DYNAMIC_FTRACE
548 + int nr_trampolines;
549 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
550 index ec274e0..e678159 100644
551 --- a/include/linux/genhd.h
552 @@ -101287,6 +101388,71 @@ index 0320bbb..938789c 100644
553
554 /** inet_connection_sock - INET connection oriented sock
555 *
556 +diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
557 +index 8d17655..2f3246d 100644
558 +--- a/include/net/inet_frag.h
559 ++++ b/include/net/inet_frag.h
560 +@@ -21,13 +21,11 @@ struct netns_frags {
561 + * @INET_FRAG_FIRST_IN: first fragment has arrived
562 + * @INET_FRAG_LAST_IN: final fragment has arrived
563 + * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction
564 +- * @INET_FRAG_EVICTED: frag queue is being evicted
565 + */
566 + enum {
567 + INET_FRAG_FIRST_IN = BIT(0),
568 + INET_FRAG_LAST_IN = BIT(1),
569 + INET_FRAG_COMPLETE = BIT(2),
570 +- INET_FRAG_EVICTED = BIT(3)
571 + };
572 +
573 + /**
574 +@@ -45,6 +43,7 @@ enum {
575 + * @flags: fragment queue flags
576 + * @max_size: (ipv4 only) maximum received fragment size with IP_DF set
577 + * @net: namespace that this frag belongs to
578 ++ * @list_evictor: list of queues to forcefully evict (e.g. due to low memory)
579 + */
580 + struct inet_frag_queue {
581 + spinlock_t lock;
582 +@@ -59,6 +58,7 @@ struct inet_frag_queue {
583 + __u8 flags;
584 + u16 max_size;
585 + struct netns_frags *net;
586 ++ struct hlist_node list_evictor;
587 + };
588 +
589 + #define INETFRAGS_HASHSZ 1024
590 +@@ -125,6 +125,11 @@ static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f
591 + inet_frag_destroy(q, f);
592 + }
593 +
594 ++static inline bool inet_frag_evicting(struct inet_frag_queue *q)
595 ++{
596 ++ return !hlist_unhashed(&q->list_evictor);
597 ++}
598 ++
599 + /* Memory Tracking Functions. */
600 +
601 + /* The default percpu_counter batch size is not big enough to scale to
602 +@@ -139,14 +144,14 @@ static inline int frag_mem_limit(struct netns_frags *nf)
603 + return percpu_counter_read(&nf->mem);
604 + }
605 +
606 +-static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i)
607 ++static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
608 + {
609 +- __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch);
610 ++ __percpu_counter_add(&nf->mem, -i, frag_percpu_counter_batch);
611 + }
612 +
613 +-static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i)
614 ++static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
615 + {
616 +- __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch);
617 ++ __percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch);
618 + }
619 +
620 + static inline void init_frag_mem_limit(struct netns_frags *nf)
621 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
622 index d5332dd..10a5c3c 100644
623 --- a/include/net/inetpeer.h
624 @@ -102761,7 +102927,7 @@ index ad1bd77..dca2c1b 100644
625 next_state = Reset;
626 return 0;
627 diff --git a/init/main.c b/init/main.c
628 -index 2a89545..eb9203f 100644
629 +index 2a89545..449eca2 100644
630 --- a/init/main.c
631 +++ b/init/main.c
632 @@ -97,6 +97,8 @@ extern void radix_tree_init(void);
633 @@ -102773,7 +102939,7 @@ index 2a89545..eb9203f 100644
634 /*
635 * Debug helper: via this flag we know that we are in 'early bootup code'
636 * where only the boot processor is running with IRQ disabled. This means
637 -@@ -158,6 +160,84 @@ static int __init set_reset_devices(char *str)
638 +@@ -158,6 +160,85 @@ static int __init set_reset_devices(char *str)
639
640 __setup("reset_devices", set_reset_devices);
641
642 @@ -102826,7 +102992,8 @@ index 2a89545..eb9203f 100644
643 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
644 + clone_pgd_mask = ~(pgdval_t)0UL;
645 + pax_user_shadow_base = 0UL;
646 -+ setup_clear_cpu_cap(X86_FEATURE_PCIDUDEREF);
647 ++ setup_clear_cpu_cap(X86_FEATURE_PCID);
648 ++ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
649 +#endif
650 +
651 + return 0;
652 @@ -102858,7 +103025,7 @@ index 2a89545..eb9203f 100644
653 static const char *argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
654 const char *envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
655 static const char *panic_later, *panic_param;
656 -@@ -726,7 +806,7 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
657 +@@ -726,7 +807,7 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
658 struct blacklist_entry *entry;
659 char *fn_name;
660
661 @@ -102867,7 +103034,7 @@ index 2a89545..eb9203f 100644
662 if (!fn_name)
663 return false;
664
665 -@@ -778,7 +858,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
666 +@@ -778,7 +859,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
667 {
668 int count = preempt_count();
669 int ret;
670 @@ -102876,7 +103043,7 @@ index 2a89545..eb9203f 100644
671
672 if (initcall_blacklisted(fn))
673 return -EPERM;
674 -@@ -788,18 +868,17 @@ int __init_or_module do_one_initcall(initcall_t fn)
675 +@@ -788,18 +869,17 @@ int __init_or_module do_one_initcall(initcall_t fn)
676 else
677 ret = fn();
678
679 @@ -102899,7 +103066,7 @@ index 2a89545..eb9203f 100644
680 return ret;
681 }
682
683 -@@ -905,8 +984,8 @@ static int run_init_process(const char *init_filename)
684 +@@ -905,8 +985,8 @@ static int run_init_process(const char *init_filename)
685 {
686 argv_init[0] = init_filename;
687 return do_execve(getname_kernel(init_filename),
688 @@ -102910,7 +103077,7 @@ index 2a89545..eb9203f 100644
689 }
690
691 static int try_to_run_init_process(const char *init_filename)
692 -@@ -923,6 +1002,10 @@ static int try_to_run_init_process(const char *init_filename)
693 +@@ -923,6 +1003,10 @@ static int try_to_run_init_process(const char *init_filename)
694 return ret;
695 }
696
697 @@ -102921,7 +103088,7 @@ index 2a89545..eb9203f 100644
698 static noinline void __init kernel_init_freeable(void);
699
700 static int __ref kernel_init(void *unused)
701 -@@ -947,6 +1030,11 @@ static int __ref kernel_init(void *unused)
702 +@@ -947,6 +1031,11 @@ static int __ref kernel_init(void *unused)
703 ramdisk_execute_command, ret);
704 }
705
706 @@ -102933,7 +103100,7 @@ index 2a89545..eb9203f 100644
707 /*
708 * We try each of these until one succeeds.
709 *
710 -@@ -1002,7 +1090,7 @@ static noinline void __init kernel_init_freeable(void)
711 +@@ -1002,7 +1091,7 @@ static noinline void __init kernel_init_freeable(void)
712 do_basic_setup();
713
714 /* Open the /dev/console on the rootfs, this should never fail */
715 @@ -102942,7 +103109,7 @@ index 2a89545..eb9203f 100644
716 pr_err("Warning: unable to open an initial console.\n");
717
718 (void) sys_dup(0);
719 -@@ -1015,11 +1103,13 @@ static noinline void __init kernel_init_freeable(void)
720 +@@ -1015,11 +1104,13 @@ static noinline void __init kernel_init_freeable(void)
721 if (!ramdisk_execute_command)
722 ramdisk_execute_command = "/init";
723
724 @@ -107685,7 +107852,7 @@ index 1f13335..77ebb7f 100644
725 }
726
727 diff --git a/kernel/resource.c b/kernel/resource.c
728 -index 90552aa..8c02098 100644
729 +index 90552aa..ad13346 100644
730 --- a/kernel/resource.c
731 +++ b/kernel/resource.c
732 @@ -162,8 +162,18 @@ static const struct file_operations proc_iomem_operations = {
733 @@ -107707,6 +107874,31 @@ index 90552aa..8c02098 100644
734 return 0;
735 }
736 __initcall(ioresources_init);
737 +@@ -504,13 +514,13 @@ int region_is_ram(resource_size_t start, unsigned long size)
738 + {
739 + struct resource *p;
740 + resource_size_t end = start + size - 1;
741 +- int flags = IORESOURCE_MEM | IORESOURCE_BUSY;
742 ++ unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
743 + const char *name = "System RAM";
744 + int ret = -1;
745 +
746 + read_lock(&resource_lock);
747 + for (p = iomem_resource.child; p ; p = p->sibling) {
748 +- if (end < p->start)
749 ++ if (p->end < start)
750 + continue;
751 +
752 + if (p->start <= start && end <= p->end) {
753 +@@ -521,7 +531,7 @@ int region_is_ram(resource_size_t start, unsigned long size)
754 + ret = 1;
755 + break;
756 + }
757 +- if (p->end < start)
758 ++ if (end < p->start)
759 + break; /* not found */
760 + }
761 + read_unlock(&resource_lock);
762 diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
763 index eae160d..c9aa22e 100644
764 --- a/kernel/sched/auto_group.c
765 @@ -109195,10 +109387,108 @@ index 483cecf..ac46091 100644
766
767 ret = -EIO;
768 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
769 -index 02bece4..f9b05af 100644
770 +index 02bece4..43adc29 100644
771 --- a/kernel/trace/ftrace.c
772 +++ b/kernel/trace/ftrace.c
773 -@@ -2395,12 +2395,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
774 +@@ -98,6 +98,13 @@ struct ftrace_pid {
775 + struct pid *pid;
776 + };
777 +
778 ++static bool ftrace_pids_enabled(void)
779 ++{
780 ++ return !list_empty(&ftrace_pids);
781 ++}
782 ++
783 ++static void ftrace_update_trampoline(struct ftrace_ops *ops);
784 ++
785 + /*
786 + * ftrace_disabled is set when an anomaly is discovered.
787 + * ftrace_disabled is much stronger than ftrace_enabled.
788 +@@ -109,7 +116,6 @@ static DEFINE_MUTEX(ftrace_lock);
789 + static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
790 + static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
791 + ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
792 +-ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
793 + static struct ftrace_ops global_ops;
794 + static struct ftrace_ops control_ops;
795 +
796 +@@ -183,14 +189,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
797 + if (!test_tsk_trace_trace(current))
798 + return;
799 +
800 +- ftrace_pid_function(ip, parent_ip, op, regs);
801 +-}
802 +-
803 +-static void set_ftrace_pid_function(ftrace_func_t func)
804 +-{
805 +- /* do not set ftrace_pid_function to itself! */
806 +- if (func != ftrace_pid_func)
807 +- ftrace_pid_function = func;
808 ++ op->saved_func(ip, parent_ip, op, regs);
809 + }
810 +
811 + /**
812 +@@ -202,7 +201,6 @@ static void set_ftrace_pid_function(ftrace_func_t func)
813 + void clear_ftrace_function(void)
814 + {
815 + ftrace_trace_function = ftrace_stub;
816 +- ftrace_pid_function = ftrace_stub;
817 + }
818 +
819 + static void control_ops_disable_all(struct ftrace_ops *ops)
820 +@@ -436,6 +434,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
821 + } else
822 + add_ftrace_ops(&ftrace_ops_list, ops);
823 +
824 ++ /* Always save the function, and reset at unregistering */
825 ++ ops->saved_func = ops->func;
826 ++
827 ++ if (ops->flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled())
828 ++ ops->func = ftrace_pid_func;
829 ++
830 + ftrace_update_trampoline(ops);
831 +
832 + if (ftrace_enabled)
833 +@@ -463,15 +467,28 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
834 + if (ftrace_enabled)
835 + update_ftrace_function();
836 +
837 ++ ops->func = ops->saved_func;
838 ++
839 + return 0;
840 + }
841 +
842 + static void ftrace_update_pid_func(void)
843 + {
844 ++ bool enabled = ftrace_pids_enabled();
845 ++ struct ftrace_ops *op;
846 ++
847 + /* Only do something if we are tracing something */
848 + if (ftrace_trace_function == ftrace_stub)
849 + return;
850 +
851 ++ do_for_each_ftrace_op(op, ftrace_ops_list) {
852 ++ if (op->flags & FTRACE_OPS_FL_PID) {
853 ++ op->func = enabled ? ftrace_pid_func :
854 ++ op->saved_func;
855 ++ ftrace_update_trampoline(op);
856 ++ }
857 ++ } while_for_each_ftrace_op(op);
858 ++
859 + update_ftrace_function();
860 + }
861 +
862 +@@ -1133,7 +1150,8 @@ static struct ftrace_ops global_ops = {
863 + .local_hash.filter_hash = EMPTY_HASH,
864 + INIT_OPS_HASH(global_ops)
865 + .flags = FTRACE_OPS_FL_RECURSION_SAFE |
866 +- FTRACE_OPS_FL_INITIALIZED,
867 ++ FTRACE_OPS_FL_INITIALIZED |
868 ++ FTRACE_OPS_FL_PID,
869 + };
870 +
871 + /*
872 +@@ -2395,12 +2413,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
873 if (unlikely(ftrace_disabled))
874 return 0;
875
876 @@ -109218,7 +109508,7 @@ index 02bece4..f9b05af 100644
877 }
878
879 /*
880 -@@ -4789,8 +4794,10 @@ static int ftrace_process_locs(struct module *mod,
881 +@@ -4789,8 +4812,10 @@ static int ftrace_process_locs(struct module *mod,
882 if (!count)
883 return 0;
884
885 @@ -109229,7 +109519,47 @@ index 02bece4..f9b05af 100644
886
887 start_pg = ftrace_allocate_pages(count);
888 if (!start_pg)
889 -@@ -5659,7 +5666,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
890 +@@ -5023,7 +5048,9 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops)
891 +
892 + static struct ftrace_ops global_ops = {
893 + .func = ftrace_stub,
894 +- .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
895 ++ .flags = FTRACE_OPS_FL_RECURSION_SAFE |
896 ++ FTRACE_OPS_FL_INITIALIZED |
897 ++ FTRACE_OPS_FL_PID,
898 + };
899 +
900 + static int __init ftrace_nodyn_init(void)
901 +@@ -5080,11 +5107,6 @@ void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
902 + if (WARN_ON(tr->ops->func != ftrace_stub))
903 + printk("ftrace ops had %pS for function\n",
904 + tr->ops->func);
905 +- /* Only the top level instance does pid tracing */
906 +- if (!list_empty(&ftrace_pids)) {
907 +- set_ftrace_pid_function(func);
908 +- func = ftrace_pid_func;
909 +- }
910 + }
911 + tr->ops->func = func;
912 + tr->ops->private = tr;
913 +@@ -5371,7 +5393,7 @@ static void *fpid_start(struct seq_file *m, loff_t *pos)
914 + {
915 + mutex_lock(&ftrace_lock);
916 +
917 +- if (list_empty(&ftrace_pids) && (!*pos))
918 ++ if (!ftrace_pids_enabled() && (!*pos))
919 + return (void *) 1;
920 +
921 + return seq_list_start(&ftrace_pids, *pos);
922 +@@ -5610,6 +5632,7 @@ static struct ftrace_ops graph_ops = {
923 + .func = ftrace_stub,
924 + .flags = FTRACE_OPS_FL_RECURSION_SAFE |
925 + FTRACE_OPS_FL_INITIALIZED |
926 ++ FTRACE_OPS_FL_PID |
927 + FTRACE_OPS_FL_STUB,
928 + #ifdef FTRACE_GRAPH_TRAMP_ADDR
929 + .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
930 +@@ -5659,7 +5682,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
931
932 if (t->ret_stack == NULL) {
933 atomic_set(&t->tracing_graph_pause, 0);
934 @@ -109238,7 +109568,7 @@ index 02bece4..f9b05af 100644
935 t->curr_ret_stack = -1;
936 /* Make sure the tasks see the -1 first: */
937 smp_wmb();
938 -@@ -5882,7 +5889,7 @@ static void
939 +@@ -5882,7 +5905,7 @@ static void
940 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
941 {
942 atomic_set(&t->tracing_graph_pause, 0);
943 @@ -116603,6 +116933,21 @@ index 8e385a0..a5bdd8e 100644
944
945 tty_port_close(&dev->port, tty, filp);
946 }
947 +diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
948 +index 1ab3dc9..7b815bc 100644
949 +--- a/net/bluetooth/smp.c
950 ++++ b/net/bluetooth/smp.c
951 +@@ -2295,6 +2295,10 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
952 + return 1;
953 +
954 + chan = conn->smp;
955 ++ if (!chan) {
956 ++ BT_ERR("SMP security requested but not available");
957 ++ return 1;
958 ++ }
959 +
960 + if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED))
961 + return 1;
962 diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
963 index e29ad70b..cc00066 100644
964 --- a/net/bridge/br_mdb.c
965 @@ -117842,9 +118187,36 @@ index 0ae5822..3fe3627 100644
966 .priv_size = sizeof(struct lowpan_dev_info),
967 .setup = lowpan_setup,
968 diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
969 -index f46e4d1..30231f1 100644
970 +index f46e4d1..dcb7f86 100644
971 --- a/net/ieee802154/6lowpan/reassembly.c
972 +++ b/net/ieee802154/6lowpan/reassembly.c
973 +@@ -207,7 +207,7 @@ found:
974 + } else {
975 + fq->q.meat += skb->len;
976 + }
977 +- add_frag_mem_limit(&fq->q, skb->truesize);
978 ++ add_frag_mem_limit(fq->q.net, skb->truesize);
979 +
980 + if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
981 + fq->q.meat == fq->q.len) {
982 +@@ -287,7 +287,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
983 + clone->data_len = clone->len;
984 + head->data_len -= clone->len;
985 + head->len -= clone->len;
986 +- add_frag_mem_limit(&fq->q, clone->truesize);
987 ++ add_frag_mem_limit(fq->q.net, clone->truesize);
988 + }
989 +
990 + WARN_ON(head == NULL);
991 +@@ -310,7 +310,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
992 + }
993 + fp = next;
994 + }
995 +- sub_frag_mem_limit(&fq->q, sum_truesize);
996 ++ sub_frag_mem_limit(fq->q.net, sum_truesize);
997 +
998 + head->next = NULL;
999 + head->dev = dev;
1000 @@ -435,14 +435,13 @@ static struct ctl_table lowpan_frags_ctl_table[] = {
1001
1002 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
1003 @@ -118030,6 +118402,117 @@ index 8d695b6..752d427a 100644
1004
1005 return nh->nh_saddr;
1006 }
1007 +diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
1008 +index 5e346a0..d0a7c03 100644
1009 +--- a/net/ipv4/inet_fragment.c
1010 ++++ b/net/ipv4/inet_fragment.c
1011 +@@ -131,34 +131,22 @@ inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
1012 + unsigned int evicted = 0;
1013 + HLIST_HEAD(expired);
1014 +
1015 +-evict_again:
1016 + spin_lock(&hb->chain_lock);
1017 +
1018 + hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
1019 + if (!inet_fragq_should_evict(fq))
1020 + continue;
1021 +
1022 +- if (!del_timer(&fq->timer)) {
1023 +- /* q expiring right now thus increment its refcount so
1024 +- * it won't be freed under us and wait until the timer
1025 +- * has finished executing then destroy it
1026 +- */
1027 +- atomic_inc(&fq->refcnt);
1028 +- spin_unlock(&hb->chain_lock);
1029 +- del_timer_sync(&fq->timer);
1030 +- inet_frag_put(fq, f);
1031 +- goto evict_again;
1032 +- }
1033 ++ if (!del_timer(&fq->timer))
1034 ++ continue;
1035 +
1036 +- fq->flags |= INET_FRAG_EVICTED;
1037 +- hlist_del(&fq->list);
1038 +- hlist_add_head(&fq->list, &expired);
1039 ++ hlist_add_head(&fq->list_evictor, &expired);
1040 + ++evicted;
1041 + }
1042 +
1043 + spin_unlock(&hb->chain_lock);
1044 +
1045 +- hlist_for_each_entry_safe(fq, n, &expired, list)
1046 ++ hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
1047 + f->frag_expire((unsigned long) fq);
1048 +
1049 + return evicted;
1050 +@@ -240,19 +228,21 @@ void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
1051 + int i;
1052 +
1053 + nf->low_thresh = 0;
1054 +- local_bh_disable();
1055 +
1056 + evict_again:
1057 ++ local_bh_disable();
1058 + seq = read_seqbegin(&f->rnd_seqlock);
1059 +
1060 + for (i = 0; i < INETFRAGS_HASHSZ ; i++)
1061 + inet_evict_bucket(f, &f->hash[i]);
1062 +
1063 +- if (read_seqretry(&f->rnd_seqlock, seq))
1064 ++ local_bh_enable();
1065 ++ cond_resched();
1066 ++
1067 ++ if (read_seqretry(&f->rnd_seqlock, seq) ||
1068 ++ percpu_counter_sum(&nf->mem))
1069 + goto evict_again;
1070 +
1071 +- local_bh_enable();
1072 +-
1073 + percpu_counter_destroy(&nf->mem);
1074 + }
1075 + EXPORT_SYMBOL(inet_frags_exit_net);
1076 +@@ -284,8 +274,8 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
1077 + struct inet_frag_bucket *hb;
1078 +
1079 + hb = get_frag_bucket_locked(fq, f);
1080 +- if (!(fq->flags & INET_FRAG_EVICTED))
1081 +- hlist_del(&fq->list);
1082 ++ hlist_del(&fq->list);
1083 ++ fq->flags |= INET_FRAG_COMPLETE;
1084 + spin_unlock(&hb->chain_lock);
1085 + }
1086 +
1087 +@@ -297,7 +287,6 @@ void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
1088 + if (!(fq->flags & INET_FRAG_COMPLETE)) {
1089 + fq_unlink(fq, f);
1090 + atomic_dec(&fq->refcnt);
1091 +- fq->flags |= INET_FRAG_COMPLETE;
1092 + }
1093 + }
1094 + EXPORT_SYMBOL(inet_frag_kill);
1095 +@@ -330,11 +319,12 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
1096 + fp = xp;
1097 + }
1098 + sum = sum_truesize + f->qsize;
1099 +- sub_frag_mem_limit(q, sum);
1100 +
1101 + if (f->destructor)
1102 + f->destructor(q);
1103 + kmem_cache_free(f->frags_cachep, q);
1104 ++
1105 ++ sub_frag_mem_limit(nf, sum);
1106 + }
1107 + EXPORT_SYMBOL(inet_frag_destroy);
1108 +
1109 +@@ -390,7 +380,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
1110 +
1111 + q->net = nf;
1112 + f->constructor(q, arg);
1113 +- add_frag_mem_limit(q, f->qsize);
1114 ++ add_frag_mem_limit(nf, f->qsize);
1115 +
1116 + setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
1117 + spin_lock_init(&q->lock);
1118 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
1119 index c6fb80b..8705495 100644
1120 --- a/net/ipv4/inet_hashtables.c
1121 @@ -118074,9 +118557,18 @@ index 241afd7..31b95d5 100644
1122 p->rate_tokens = 0;
1123 /* 60*HZ is arbitrary, but chosen enough high so that the first
1124 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
1125 -index cc1da6d..64b1534 100644
1126 +index cc1da6d..593fc73 100644
1127 --- a/net/ipv4/ip_fragment.c
1128 +++ b/net/ipv4/ip_fragment.c
1129 +@@ -192,7 +192,7 @@ static void ip_expire(unsigned long arg)
1130 + ipq_kill(qp);
1131 + IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
1132 +
1133 +- if (!(qp->q.flags & INET_FRAG_EVICTED)) {
1134 ++ if (!inet_frag_evicting(&qp->q)) {
1135 + struct sk_buff *head = qp->q.fragments;
1136 + const struct iphdr *iph;
1137 + int err;
1138 @@ -268,7 +268,7 @@ static int ip_frag_too_far(struct ipq *qp)
1139 return 0;
1140
1141 @@ -118086,6 +118578,51 @@ index cc1da6d..64b1534 100644
1142 qp->rid = end;
1143
1144 rc = qp->q.fragments && (end - start) > max;
1145 +@@ -301,7 +301,7 @@ static int ip_frag_reinit(struct ipq *qp)
1146 + kfree_skb(fp);
1147 + fp = xp;
1148 + } while (fp);
1149 +- sub_frag_mem_limit(&qp->q, sum_truesize);
1150 ++ sub_frag_mem_limit(qp->q.net, sum_truesize);
1151 +
1152 + qp->q.flags = 0;
1153 + qp->q.len = 0;
1154 +@@ -446,7 +446,7 @@ found:
1155 + qp->q.fragments = next;
1156 +
1157 + qp->q.meat -= free_it->len;
1158 +- sub_frag_mem_limit(&qp->q, free_it->truesize);
1159 ++ sub_frag_mem_limit(qp->q.net, free_it->truesize);
1160 + kfree_skb(free_it);
1161 + }
1162 + }
1163 +@@ -470,7 +470,7 @@ found:
1164 + qp->q.stamp = skb->tstamp;
1165 + qp->q.meat += skb->len;
1166 + qp->ecn |= ecn;
1167 +- add_frag_mem_limit(&qp->q, skb->truesize);
1168 ++ add_frag_mem_limit(qp->q.net, skb->truesize);
1169 + if (offset == 0)
1170 + qp->q.flags |= INET_FRAG_FIRST_IN;
1171 +
1172 +@@ -573,7 +573,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
1173 + head->len -= clone->len;
1174 + clone->csum = 0;
1175 + clone->ip_summed = head->ip_summed;
1176 +- add_frag_mem_limit(&qp->q, clone->truesize);
1177 ++ add_frag_mem_limit(qp->q.net, clone->truesize);
1178 + }
1179 +
1180 + skb_push(head, head->data - skb_network_header(head));
1181 +@@ -601,7 +601,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
1182 + }
1183 + fp = next;
1184 + }
1185 +- sub_frag_mem_limit(&qp->q, sum_truesize);
1186 ++ sub_frag_mem_limit(qp->q.net, sum_truesize);
1187 +
1188 + head->next = NULL;
1189 + head->dev = dev;
1190 @@ -750,12 +750,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
1191
1192 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
1193 @@ -119380,7 +119917,7 @@ index 62f5b0d..331fdb1 100644
1194
1195 case IP6T_SO_GET_ENTRIES:
1196 diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
1197 -index 6f187c8..34b367f 100644
1198 +index 6f187c8..55e564f 100644
1199 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c
1200 +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
1201 @@ -96,12 +96,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
1202 @@ -119421,6 +119958,33 @@ index 6f187c8..34b367f 100644
1203 err_alloc:
1204 return -ENOMEM;
1205 }
1206 +@@ -348,7 +346,7 @@ found:
1207 + fq->ecn |= ecn;
1208 + if (payload_len > fq->q.max_size)
1209 + fq->q.max_size = payload_len;
1210 +- add_frag_mem_limit(&fq->q, skb->truesize);
1211 ++ add_frag_mem_limit(fq->q.net, skb->truesize);
1212 +
1213 + /* The first fragment.
1214 + * nhoffset is obtained from the first fragment, of course.
1215 +@@ -430,7 +428,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
1216 + clone->ip_summed = head->ip_summed;
1217 +
1218 + NFCT_FRAG6_CB(clone)->orig = NULL;
1219 +- add_frag_mem_limit(&fq->q, clone->truesize);
1220 ++ add_frag_mem_limit(fq->q.net, clone->truesize);
1221 + }
1222 +
1223 + /* We have to remove fragment header from datagram and to relocate
1224 +@@ -454,7 +452,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
1225 + head->csum = csum_add(head->csum, fp->csum);
1226 + head->truesize += fp->truesize;
1227 + }
1228 +- sub_frag_mem_limit(&fq->q, head->truesize);
1229 ++ sub_frag_mem_limit(fq->q.net, head->truesize);
1230 +
1231 + head->ignore_df = 1;
1232 + head->next = NULL;
1233 diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
1234 index 263a516..692f738 100644
1235 --- a/net/ipv6/ping.c
1236 @@ -119570,9 +120134,45 @@ index 8072bd4..1629245 100644
1237 return 0;
1238 default:
1239 diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
1240 -index 8ffa2c8..5968612 100644
1241 +index 8ffa2c8..0db5dad 100644
1242 --- a/net/ipv6/reassembly.c
1243 +++ b/net/ipv6/reassembly.c
1244 +@@ -144,7 +144,7 @@ void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
1245 +
1246 + IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
1247 +
1248 +- if (fq->q.flags & INET_FRAG_EVICTED)
1249 ++ if (inet_frag_evicting(&fq->q))
1250 + goto out_rcu_unlock;
1251 +
1252 + IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
1253 +@@ -330,7 +330,7 @@ found:
1254 + fq->q.stamp = skb->tstamp;
1255 + fq->q.meat += skb->len;
1256 + fq->ecn |= ecn;
1257 +- add_frag_mem_limit(&fq->q, skb->truesize);
1258 ++ add_frag_mem_limit(fq->q.net, skb->truesize);
1259 +
1260 + /* The first fragment.
1261 + * nhoffset is obtained from the first fragment, of course.
1262 +@@ -443,7 +443,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
1263 + head->len -= clone->len;
1264 + clone->csum = 0;
1265 + clone->ip_summed = head->ip_summed;
1266 +- add_frag_mem_limit(&fq->q, clone->truesize);
1267 ++ add_frag_mem_limit(fq->q.net, clone->truesize);
1268 + }
1269 +
1270 + /* We have to remove fragment header from datagram and to relocate
1271 +@@ -481,7 +481,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
1272 + }
1273 + fp = next;
1274 + }
1275 +- sub_frag_mem_limit(&fq->q, sum_truesize);
1276 ++ sub_frag_mem_limit(fq->q.net, sum_truesize);
1277 +
1278 + head->next = NULL;
1279 + head->dev = dev;
1280 @@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
1281
1282 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)