Gentoo Archives: gentoo-commits

From: "Anthony G. Basile" <blueness@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/hardened-patchset:master commit in: 3.2.38/, 2.6.32/, 3.8.0/, 3.7.9/, 3.2.39/
Date: Sun, 24 Feb 2013 11:55:53
Message-Id: 1361706887.6760f54871a351ed33e572e01c123f1df45c3ff4.blueness@gentoo
1 commit: 6760f54871a351ed33e572e01c123f1df45c3ff4
2 Author: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
3 AuthorDate: Sun Feb 24 11:54:47 2013 +0000
4 Commit: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
5 CommitDate: Sun Feb 24 11:54:47 2013 +0000
6 URL: http://git.overlays.gentoo.org/gitweb/?p=proj/hardened-patchset.git;a=commit;h=6760f548
7
8 Grsec/PaX: 2.9.1-{2.6.32.60,3.2.39,3.8.0}-20130222
9
10 ---
11 ..._grsecurity-2.9.1-2.6.32.60-201302222044.patch} | 235 +-
12 {3.2.38 => 3.2.39}/0000_README | 6 +-
13 {3.2.38 => 3.2.39}/1021_linux-3.2.22.patch | 0
14 {3.2.38 => 3.2.39}/1022_linux-3.2.23.patch | 0
15 {3.2.38 => 3.2.39}/1023_linux-3.2.24.patch | 0
16 {3.2.38 => 3.2.39}/1024_linux-3.2.25.patch | 0
17 {3.2.38 => 3.2.39}/1025_linux-3.2.26.patch | 0
18 {3.2.38 => 3.2.39}/1026_linux-3.2.27.patch | 0
19 {3.2.38 => 3.2.39}/1027_linux-3.2.28.patch | 0
20 {3.2.38 => 3.2.39}/1028_linux-3.2.29.patch | 0
21 {3.2.38 => 3.2.39}/1029_linux-3.2.30.patch | 0
22 {3.2.38 => 3.2.39}/1030_linux-3.2.31.patch | 0
23 {3.2.38 => 3.2.39}/1031_linux-3.2.32.patch | 0
24 {3.2.38 => 3.2.39}/1032_linux-3.2.33.patch | 0
25 {3.2.38 => 3.2.39}/1033_linux-3.2.34.patch | 0
26 {3.2.38 => 3.2.39}/1034_linux-3.2.35.patch | 0
27 {3.2.38 => 3.2.39}/1035_linux-3.2.36.patch | 0
28 {3.2.38 => 3.2.39}/1036_linux-3.2.37.patch | 0
29 {3.2.38 => 3.2.39}/1037_linux-3.2.38.patch | 0
30 3.2.39/1039_linux-3.2.39.patch | 2660 +++++
31 ...4420_grsecurity-2.9.1-3.2.39-201302222046.patch | 1178 ++-
32 {3.2.38 => 3.2.39}/4425_grsec_remove_EI_PAX.patch | 0
33 .../4430_grsec-remove-localversion-grsec.patch | 0
34 {3.2.38 => 3.2.39}/4435_grsec-mute-warnings.patch | 0
35 .../4440_grsec-remove-protected-paths.patch | 0
36 .../4450_grsec-kconfig-default-gids.patch | 0
37 .../4465_selinux-avc_audit-log-curr_ip.patch | 0
38 {3.2.38 => 3.2.39}/4470_disable-compat_vdso.patch | 0
39 {3.7.9 => 3.8.0}/0000_README | 2 +-
40 .../4420_grsecurity-2.9.1-3.8.0-201302231124.patch |10150 +++++++++++---------
41 {3.7.9 => 3.8.0}/4425_grsec_remove_EI_PAX.patch | 0
42 .../4430_grsec-remove-localversion-grsec.patch | 0
43 {3.7.9 => 3.8.0}/4435_grsec-mute-warnings.patch | 0
44 .../4440_grsec-remove-protected-paths.patch | 0
45 .../4450_grsec-kconfig-default-gids.patch | 0
46 .../4465_selinux-avc_audit-log-curr_ip.patch | 0
47 {3.7.9 => 3.8.0}/4470_disable-compat_vdso.patch | 0
48 37 files changed, 9252 insertions(+), 4979 deletions(-)
49
50 diff --git a/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201302181144.patch b/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201302222044.patch
51 similarity index 99%
52 rename from 2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201302181144.patch
53 rename to 2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201302222044.patch
54 index 88490c1..f5ba675 100644
55 --- a/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201302181144.patch
56 +++ b/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201302222044.patch
57 @@ -265,7 +265,7 @@ index 334258c..1e8f4ff 100644
58 M: Liam Girdwood <lrg@××××××××××××.uk>
59 M: Mark Brown <broonie@×××××××××××××××××××××××.com>
60 diff --git a/Makefile b/Makefile
61 -index b0e245e..e5894da 100644
62 +index b0e245e..1c8b6ed 100644
63 --- a/Makefile
64 +++ b/Makefile
65 @@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
66 @@ -300,12 +300,16 @@ index b0e245e..e5894da 100644
67 include/linux/version.h headers_% \
68 kernelrelease kernelversion
69
70 -@@ -526,6 +527,60 @@ else
71 +@@ -526,6 +527,64 @@ else
72 KBUILD_CFLAGS += -O2
73 endif
74
75 +ifndef DISABLE_PAX_PLUGINS
76 ++ifeq ($(call cc-ifversion, -ge, 0408, y), y)
77 ++PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
78 ++else
79 +PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
80 ++endif
81 +ifneq ($(PLUGINCC),)
82 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
83 +ifndef CONFIG_UML
84 @@ -361,7 +365,7 @@ index b0e245e..e5894da 100644
85 include $(srctree)/arch/$(SRCARCH)/Makefile
86
87 ifneq ($(CONFIG_FRAME_WARN),0)
88 -@@ -647,7 +702,7 @@ export mod_strip_cmd
89 +@@ -647,7 +706,7 @@ export mod_strip_cmd
90
91
92 ifeq ($(KBUILD_EXTMOD),)
93 @@ -370,7 +374,7 @@ index b0e245e..e5894da 100644
94
95 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
96 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
97 -@@ -868,6 +923,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
98 +@@ -868,6 +927,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
99
100 # The actual objects are generated when descending,
101 # make sure no implicit rule kicks in
102 @@ -379,7 +383,7 @@ index b0e245e..e5894da 100644
103 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
104
105 # Handle descending into subdirectories listed in $(vmlinux-dirs)
106 -@@ -877,7 +934,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
107 +@@ -877,7 +938,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
108 # Error messages still appears in the original language
109
110 PHONY += $(vmlinux-dirs)
111 @@ -388,7 +392,7 @@ index b0e245e..e5894da 100644
112 $(Q)$(MAKE) $(build)=$@
113
114 # Build the kernel release string
115 -@@ -986,6 +1043,7 @@ prepare0: archprepare FORCE
116 +@@ -986,6 +1047,7 @@ prepare0: archprepare FORCE
117 $(Q)$(MAKE) $(build)=. missing-syscalls
118
119 # All the preparing..
120 @@ -396,7 +400,7 @@ index b0e245e..e5894da 100644
121 prepare: prepare0
122
123 # The asm symlink changes when $(ARCH) changes.
124 -@@ -1127,6 +1185,8 @@ all: modules
125 +@@ -1127,6 +1189,8 @@ all: modules
126 # using awk while concatenating to the final file.
127
128 PHONY += modules
129 @@ -405,7 +409,7 @@ index b0e245e..e5894da 100644
130 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
131 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
132 @$(kecho) ' Building modules, stage 2.';
133 -@@ -1136,7 +1196,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
134 +@@ -1136,7 +1200,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
135
136 # Target to prepare building external modules
137 PHONY += modules_prepare
138 @@ -414,7 +418,7 @@ index b0e245e..e5894da 100644
139
140 # Target to install modules
141 PHONY += modules_install
142 -@@ -1199,9 +1259,9 @@ CLEAN_FILES += vmlinux System.map \
143 +@@ -1199,9 +1263,9 @@ CLEAN_FILES += vmlinux System.map \
144 MRPROPER_DIRS += include/config include2 usr/include include/generated
145 MRPROPER_FILES += .config .config.old include/asm .version .old_version \
146 include/linux/autoconf.h include/linux/version.h \
147 @@ -426,7 +430,7 @@ index b0e245e..e5894da 100644
148
149 # clean - Delete most, but leave enough to build external modules
150 #
151 -@@ -1245,7 +1305,7 @@ distclean: mrproper
152 +@@ -1245,7 +1309,7 @@ distclean: mrproper
153 @find $(srctree) $(RCS_FIND_IGNORE) \
154 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
155 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
156 @@ -435,7 +439,7 @@ index b0e245e..e5894da 100644
157 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
158 -type f -print | xargs rm -f
159
160 -@@ -1292,6 +1352,7 @@ help:
161 +@@ -1292,6 +1356,7 @@ help:
162 @echo ' modules_prepare - Set up for building external modules'
163 @echo ' tags/TAGS - Generate tags file for editors'
164 @echo ' cscope - Generate cscope index'
165 @@ -443,7 +447,7 @@ index b0e245e..e5894da 100644
166 @echo ' kernelrelease - Output the release version string'
167 @echo ' kernelversion - Output the version stored in Makefile'
168 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
169 -@@ -1393,6 +1454,8 @@ PHONY += $(module-dirs) modules
170 +@@ -1393,6 +1458,8 @@ PHONY += $(module-dirs) modules
171 $(module-dirs): crmodverdir $(objtree)/Module.symvers
172 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
173
174 @@ -452,7 +456,7 @@ index b0e245e..e5894da 100644
175 modules: $(module-dirs)
176 @$(kecho) ' Building modules, stage 2.';
177 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
178 -@@ -1448,7 +1511,7 @@ endif # KBUILD_EXTMOD
179 +@@ -1448,7 +1515,7 @@ endif # KBUILD_EXTMOD
180 quiet_cmd_tags = GEN $@
181 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
182
183 @@ -461,7 +465,7 @@ index b0e245e..e5894da 100644
184 $(call cmd,tags)
185
186 # Scripts to check various things for consistency
187 -@@ -1513,17 +1576,21 @@ else
188 +@@ -1513,17 +1580,21 @@ else
189 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
190 endif
191
192 @@ -487,7 +491,7 @@ index b0e245e..e5894da 100644
193 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
194 %.symtypes: %.c prepare scripts FORCE
195 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
196 -@@ -1533,11 +1600,15 @@ endif
197 +@@ -1533,11 +1604,15 @@ endif
198 $(cmd_crmodverdir)
199 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
200 $(build)=$(build-dir)
201 @@ -13326,7 +13330,7 @@ index 33927d2..ccde329 100644
202
203 /*
204 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
205 -index af6fd36..60da657 100644
206 +index af6fd36..a7c3e4d 100644
207 --- a/arch/x86/include/asm/pgtable.h
208 +++ b/arch/x86/include/asm/pgtable.h
209 @@ -39,6 +39,7 @@ extern struct list_head pgd_list;
210 @@ -13389,7 +13393,7 @@ index af6fd36..60da657 100644
211 static inline int pte_dirty(pte_t pte)
212 {
213 return pte_flags(pte) & _PAGE_DIRTY;
214 -@@ -130,6 +170,11 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
215 +@@ -130,12 +170,16 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
216 return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
217 }
218
219 @@ -13401,7 +13405,14 @@ index af6fd36..60da657 100644
220 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
221
222 static inline int pmd_large(pmd_t pte)
223 -@@ -167,9 +212,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
224 + {
225 +- return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
226 +- (_PAGE_PSE | _PAGE_PRESENT);
227 ++ return pmd_flags(pte) & _PAGE_PSE;
228 + }
229 +
230 + static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
231 +@@ -167,9 +211,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
232 return pte_clear_flags(pte, _PAGE_RW);
233 }
234
235 @@ -13432,7 +13443,7 @@ index af6fd36..60da657 100644
236 }
237
238 static inline pte_t pte_mkdirty(pte_t pte)
239 -@@ -302,6 +367,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
240 +@@ -302,6 +366,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
241 #endif
242
243 #ifndef __ASSEMBLY__
244 @@ -13448,7 +13459,22 @@ index af6fd36..60da657 100644
245 #include <linux/mm_types.h>
246
247 static inline int pte_none(pte_t pte)
248 -@@ -472,7 +546,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
249 +@@ -327,7 +400,13 @@ static inline int pte_hidden(pte_t pte)
250 +
251 + static inline int pmd_present(pmd_t pmd)
252 + {
253 +- return pmd_flags(pmd) & _PAGE_PRESENT;
254 ++ /*
255 ++ * Checking for _PAGE_PSE is needed too because
256 ++ * split_huge_page will temporarily clear the present bit (but
257 ++ * the _PAGE_PSE flag will remain set at all times while the
258 ++ * _PAGE_PRESENT bit is clear).
259 ++ */
260 ++ return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
261 + }
262 +
263 + static inline int pmd_none(pmd_t pmd)
264 +@@ -472,7 +551,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
265
266 static inline int pgd_bad(pgd_t pgd)
267 {
268 @@ -13457,7 +13483,7 @@ index af6fd36..60da657 100644
269 }
270
271 static inline int pgd_none(pgd_t pgd)
272 -@@ -495,7 +569,12 @@ static inline int pgd_none(pgd_t pgd)
273 +@@ -495,7 +574,12 @@ static inline int pgd_none(pgd_t pgd)
274 * pgd_offset() returns a (pgd_t *)
275 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
276 */
277 @@ -13471,7 +13497,7 @@ index af6fd36..60da657 100644
278 /*
279 * a shortcut which implies the use of the kernel's pgd, instead
280 * of a process's
281 -@@ -506,6 +585,20 @@ static inline int pgd_none(pgd_t pgd)
282 +@@ -506,6 +590,20 @@ static inline int pgd_none(pgd_t pgd)
283 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
284 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
285
286 @@ -13492,7 +13518,7 @@ index af6fd36..60da657 100644
287 #ifndef __ASSEMBLY__
288
289 extern int direct_gbpages;
290 -@@ -611,11 +704,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
291 +@@ -611,11 +709,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
292 * dst and src can be on the same page, but the range must not overlap,
293 * and must not cross a page boundary.
294 */
295 @@ -13898,6 +13924,25 @@ index fa04dea..5f823fc 100644
296 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
297
298 /* Get/set a process' ability to use the timestamp counter instruction */
299 +diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h
300 +index 621f56d..f1094fd 100644
301 +--- a/arch/x86/include/asm/proto.h
302 ++++ b/arch/x86/include/asm/proto.h
303 +@@ -22,14 +22,4 @@ extern int reboot_force;
304 +
305 + long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
306 +
307 +-/*
308 +- * This looks more complex than it should be. But we need to
309 +- * get the type for the ~ right in round_down (it needs to be
310 +- * as wide as the result!), and we want to evaluate the macro
311 +- * arguments just once each.
312 +- */
313 +-#define __round_mask(x,y) ((__typeof__(x))((y)-1))
314 +-#define round_up(x,y) ((((x)-1) | __round_mask(x,y))+1)
315 +-#define round_down(x,y) ((x) & ~__round_mask(x,y))
316 +-
317 + #endif /* _ASM_X86_PROTO_H */
318 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
319 index 0f0d908..f2e3da2 100644
320 --- a/arch/x86/include/asm/ptrace.h
321 @@ -98186,10 +98231,27 @@ index 7922742..27306a2 100644
322 /* This macro allows us to keep printk typechecking */
323 static void __check_printsym_format(const char *fmt, ...)
324 diff --git a/include/linux/kernel.h b/include/linux/kernel.h
325 -index 3526cd4..99206e2 100644
326 +index 3526cd4..6835d45 100644
327 --- a/include/linux/kernel.h
328 +++ b/include/linux/kernel.h
329 -@@ -163,6 +163,11 @@ extern int _cond_resched(void);
330 +@@ -45,6 +45,16 @@ extern const char linux_proc_banner[];
331 +
332 + #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
333 +
334 ++/*
335 ++ * This looks more complex than it should be. But we need to
336 ++ * get the type for the ~ right in round_down (it needs to be
337 ++ * as wide as the result!), and we want to evaluate the macro
338 ++ * arguments just once each.
339 ++ */
340 ++#define __round_mask(x, y) ((__typeof__(x))((y)-1))
341 ++#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
342 ++#define round_down(x, y) ((x) & ~__round_mask(x, y))
343 ++
344 + #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
345 + #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
346 + #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
347 +@@ -163,6 +173,11 @@ extern int _cond_resched(void);
348 (__x < 0) ? -__x : __x; \
349 })
350
351 @@ -99897,7 +99959,7 @@ index 4e647bb..23b3911 100644
352 int size);
353 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
354 diff --git a/include/linux/slab.h b/include/linux/slab.h
355 -index 2da8372..740c52f 100644
356 +index 2da8372..aa58826 100644
357 --- a/include/linux/slab.h
358 +++ b/include/linux/slab.h
359 @@ -11,12 +11,20 @@
360 @@ -99947,26 +100009,15 @@ index 2da8372..740c52f 100644
361
362 /*
363 * Allocator specific definitions. These are mainly used to establish optimized
364 -@@ -217,8 +230,18 @@ size_t ksize(const void *);
365 +@@ -217,6 +230,7 @@ size_t ksize(const void *);
366 * for general use, and so are not documented here. For a full list of
367 * potential flags, always refer to linux/gfp.h.
368 */
369 +
370 -+extern void kcalloc_error(void)
371 -+#if defined(CONFIG_GCOV_KERNEL) && defined(CONFIG_PAX_SIZE_OVERFLOW)
372 -+__compiletime_warning("kcalloc called with swapped arguments?");
373 -+#else
374 -+__compiletime_error("kcalloc called with swapped arguments?");
375 -+#endif
376 -+
377 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
378 {
379 -+ if (__builtin_constant_p(n) && !__builtin_constant_p(size))
380 -+ kcalloc_error();
381 if (size != 0 && n > ULONG_MAX / size)
382 - return NULL;
383 - return __kmalloc(n * size, flags | __GFP_ZERO);
384 -@@ -263,7 +286,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
385 +@@ -263,7 +277,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
386 * request comes from.
387 */
388 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
389 @@ -99975,7 +100026,7 @@ index 2da8372..740c52f 100644
390 #define kmalloc_track_caller(size, flags) \
391 __kmalloc_track_caller(size, flags, _RET_IP_)
392 #else
393 -@@ -281,7 +304,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
394 +@@ -281,7 +295,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
395 * allocation request comes from.
396 */
397 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
398 @@ -111700,7 +111751,7 @@ index 406e8d4..53970d3 100644
399 * - not supported under NOMMU conditions
400 */
401 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
402 -index 3ecab7e..594a471 100644
403 +index 3ecab7e..be580fc 100644
404 --- a/mm/page_alloc.c
405 +++ b/mm/page_alloc.c
406 @@ -289,7 +289,7 @@ out:
407 @@ -111768,15 +111819,60 @@ index 3ecab7e..594a471 100644
408 for_each_populated_zone(zone) {
409 show_node(zone);
410 printk("%s per-cpu:\n", zone->name);
411 -@@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct pglist_data *pgdat,
412 +@@ -3715,10 +3734,11 @@ static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
413 + * round what is now in bits to nearest long in bits, then return it in
414 + * bytes.
415 + */
416 +-static unsigned long __init usemap_size(unsigned long zonesize)
417 ++static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
418 + {
419 + unsigned long usemapsize;
420 +
421 ++ zonesize += zone_start_pfn & (pageblock_nr_pages-1);
422 + usemapsize = roundup(zonesize, pageblock_nr_pages);
423 + usemapsize = usemapsize >> pageblock_order;
424 + usemapsize *= NR_PAGEBLOCK_BITS;
425 +@@ -3728,16 +3748,18 @@ static unsigned long __init usemap_size(unsigned long zonesize)
426 + }
427 +
428 + static void __init setup_usemap(struct pglist_data *pgdat,
429 +- struct zone *zone, unsigned long zonesize)
430 ++ struct zone *zone,
431 ++ unsigned long zone_start_pfn,
432 ++ unsigned long zonesize)
433 + {
434 +- unsigned long usemapsize = usemap_size(zonesize);
435 ++ unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
436 + zone->pageblock_flags = NULL;
437 + if (usemapsize)
438 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
439 }
440 #else
441 -static void inline setup_usemap(struct pglist_data *pgdat,
442 -+static inline void setup_usemap(struct pglist_data *pgdat,
443 - struct zone *zone, unsigned long zonesize) {}
444 +- struct zone *zone, unsigned long zonesize) {}
445 ++static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
446 ++ unsigned long zone_start_pfn, unsigned long zonesize) {}
447 #endif /* CONFIG_SPARSEMEM */
448
449 + #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
450 +@@ -3869,7 +3891,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
451 + continue;
452 +
453 + set_pageblock_order(pageblock_default_order());
454 +- setup_usemap(pgdat, zone, size);
455 ++ setup_usemap(pgdat, zone, zone_start_pfn, size);
456 + ret = init_currently_empty_zone(zone, zone_start_pfn,
457 + size, MEMMAP_EARLY);
458 + BUG_ON(ret);
459 +@@ -4945,7 +4967,7 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
460 + pfn &= (PAGES_PER_SECTION-1);
461 + return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
462 + #else
463 +- pfn = pfn - zone->zone_start_pfn;
464 ++ pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages);
465 + return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
466 + #endif /* CONFIG_SPARSEMEM */
467 + }
468 diff --git a/mm/percpu.c b/mm/percpu.c
469 index c90614a..5f7b7b8 100644
470 --- a/mm/percpu.c
471 @@ -113913,6 +114009,47 @@ index 30e74ee..bfc6ee0 100644
472 kfree_skb(skb);
473 return NET_RX_DROP;
474 }
475 +diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
476 +index 4e80f33..a815e4e 100644
477 +--- a/net/ipv4/arp.c
478 ++++ b/net/ipv4/arp.c
479 +@@ -909,23 +909,25 @@ static void parp_redo(struct sk_buff *skb)
480 + static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
481 + struct packet_type *pt, struct net_device *orig_dev)
482 + {
483 +- struct arphdr *arp;
484 ++ const struct arphdr *arp;
485 +
486 +- /* ARP header, plus 2 device addresses, plus 2 IP addresses. */
487 +- if (!pskb_may_pull(skb, arp_hdr_len(dev)))
488 +- goto freeskb;
489 +-
490 +- arp = arp_hdr(skb);
491 +- if (arp->ar_hln != dev->addr_len ||
492 +- dev->flags & IFF_NOARP ||
493 ++ if (dev->flags & IFF_NOARP ||
494 + skb->pkt_type == PACKET_OTHERHOST ||
495 +- skb->pkt_type == PACKET_LOOPBACK ||
496 +- arp->ar_pln != 4)
497 ++ skb->pkt_type == PACKET_LOOPBACK)
498 + goto freeskb;
499 +
500 +- if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
501 ++ skb = skb_share_check(skb, GFP_ATOMIC);
502 ++ if (!skb)
503 + goto out_of_mem;
504 +
505 ++ /* ARP header, plus 2 device addresses, plus 2 IP addresses. */
506 ++ if (!pskb_may_pull(skb, arp_hdr_len(dev)))
507 ++ goto freeskb;
508 ++
509 ++ arp = arp_hdr(skb);
510 ++ if (arp->ar_hln != dev->addr_len || arp->ar_pln != 4)
511 ++ goto freeskb;
512 ++
513 + memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
514 +
515 + return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, skb, dev, NULL, arp_process);
516 diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
517 index dba56d2..acee5d6 100644
518 --- a/net/ipv4/inet_diag.c
519 @@ -114420,7 +114557,7 @@ index 1eba160b..c35d91f 100644
520 }
521 }
522 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
523 -index db755c4..07d671b 100644
524 +index db755c4..4cf3b9d 100644
525 --- a/net/ipv4/tcp_input.c
526 +++ b/net/ipv4/tcp_input.c
527 @@ -82,6 +82,9 @@ int sysctl_tcp_dsack __read_mostly = 1;
528 @@ -114569,7 +114706,7 @@ index db755c4..07d671b 100644
529 if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
530 goto csum_error;
531
532 -+ if (!th->ack)
533 ++ if (!th->ack && !th->rst)
534 + goto discard;
535 +
536 /*
537 @@ -114635,7 +114772,7 @@ index db755c4..07d671b 100644
538 - res = tcp_validate_incoming(sk, skb, th, 0);
539 - if (res <= 0)
540 - return -res;
541 -+ if (!th->ack)
542 ++ if (!th->ack && !th->rst)
543 + goto discard;
544 +
545 + if (!tcp_validate_incoming(sk, skb, th, 0))
546 @@ -118026,12 +118163,12 @@ index 6bf21f8..c0546b3 100644
547 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
548 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
549 new file mode 100644
550 -index 0000000..008ac1a
551 +index 0000000..5e0222d
552 --- /dev/null
553 +++ b/scripts/gcc-plugin.sh
554 @@ -0,0 +1,17 @@
555 +#!/bin/bash
556 -+plugincc=`$1 -x c -shared - -o /dev/null -I\`$3 -print-file-name=plugin\`/include 2>&1 <<EOF
557 ++plugincc=`$1 -E -shared - -o /dev/null -I\`$3 -print-file-name=plugin\`/include 2>&1 <<EOF
558 +#include "gcc-plugin.h"
559 +#include "tree.h"
560 +#include "tm.h"
561
562 diff --git a/3.2.38/0000_README b/3.2.39/0000_README
563 similarity index 95%
564 rename from 3.2.38/0000_README
565 rename to 3.2.39/0000_README
566 index 6ecee87..4b7b629 100644
567 --- a/3.2.38/0000_README
568 +++ b/3.2.39/0000_README
569 @@ -70,7 +70,11 @@ Patch: 1037_linux-3.2.38.patch
570 From: http://www.kernel.org
571 Desc: Linux 3.2.38
572
573 -Patch: 4420_grsecurity-2.9.1-3.2.38-201302171808.patch
574 +Patch: 1039_linux-3.2.39.patch
575 +From: http://www.kernel.org
576 +Desc: Linux 3.2.39
577 +
578 +Patch: 4420_grsecurity-2.9.1-3.2.39-201302222046.patch
579 From: http://www.grsecurity.net
580 Desc: hardened-sources base patch from upstream grsecurity
581
582
583 diff --git a/3.2.38/1021_linux-3.2.22.patch b/3.2.39/1021_linux-3.2.22.patch
584 similarity index 100%
585 rename from 3.2.38/1021_linux-3.2.22.patch
586 rename to 3.2.39/1021_linux-3.2.22.patch
587
588 diff --git a/3.2.38/1022_linux-3.2.23.patch b/3.2.39/1022_linux-3.2.23.patch
589 similarity index 100%
590 rename from 3.2.38/1022_linux-3.2.23.patch
591 rename to 3.2.39/1022_linux-3.2.23.patch
592
593 diff --git a/3.2.38/1023_linux-3.2.24.patch b/3.2.39/1023_linux-3.2.24.patch
594 similarity index 100%
595 rename from 3.2.38/1023_linux-3.2.24.patch
596 rename to 3.2.39/1023_linux-3.2.24.patch
597
598 diff --git a/3.2.38/1024_linux-3.2.25.patch b/3.2.39/1024_linux-3.2.25.patch
599 similarity index 100%
600 rename from 3.2.38/1024_linux-3.2.25.patch
601 rename to 3.2.39/1024_linux-3.2.25.patch
602
603 diff --git a/3.2.38/1025_linux-3.2.26.patch b/3.2.39/1025_linux-3.2.26.patch
604 similarity index 100%
605 rename from 3.2.38/1025_linux-3.2.26.patch
606 rename to 3.2.39/1025_linux-3.2.26.patch
607
608 diff --git a/3.2.38/1026_linux-3.2.27.patch b/3.2.39/1026_linux-3.2.27.patch
609 similarity index 100%
610 rename from 3.2.38/1026_linux-3.2.27.patch
611 rename to 3.2.39/1026_linux-3.2.27.patch
612
613 diff --git a/3.2.38/1027_linux-3.2.28.patch b/3.2.39/1027_linux-3.2.28.patch
614 similarity index 100%
615 rename from 3.2.38/1027_linux-3.2.28.patch
616 rename to 3.2.39/1027_linux-3.2.28.patch
617
618 diff --git a/3.2.38/1028_linux-3.2.29.patch b/3.2.39/1028_linux-3.2.29.patch
619 similarity index 100%
620 rename from 3.2.38/1028_linux-3.2.29.patch
621 rename to 3.2.39/1028_linux-3.2.29.patch
622
623 diff --git a/3.2.38/1029_linux-3.2.30.patch b/3.2.39/1029_linux-3.2.30.patch
624 similarity index 100%
625 rename from 3.2.38/1029_linux-3.2.30.patch
626 rename to 3.2.39/1029_linux-3.2.30.patch
627
628 diff --git a/3.2.38/1030_linux-3.2.31.patch b/3.2.39/1030_linux-3.2.31.patch
629 similarity index 100%
630 rename from 3.2.38/1030_linux-3.2.31.patch
631 rename to 3.2.39/1030_linux-3.2.31.patch
632
633 diff --git a/3.2.38/1031_linux-3.2.32.patch b/3.2.39/1031_linux-3.2.32.patch
634 similarity index 100%
635 rename from 3.2.38/1031_linux-3.2.32.patch
636 rename to 3.2.39/1031_linux-3.2.32.patch
637
638 diff --git a/3.2.38/1032_linux-3.2.33.patch b/3.2.39/1032_linux-3.2.33.patch
639 similarity index 100%
640 rename from 3.2.38/1032_linux-3.2.33.patch
641 rename to 3.2.39/1032_linux-3.2.33.patch
642
643 diff --git a/3.2.38/1033_linux-3.2.34.patch b/3.2.39/1033_linux-3.2.34.patch
644 similarity index 100%
645 rename from 3.2.38/1033_linux-3.2.34.patch
646 rename to 3.2.39/1033_linux-3.2.34.patch
647
648 diff --git a/3.2.38/1034_linux-3.2.35.patch b/3.2.39/1034_linux-3.2.35.patch
649 similarity index 100%
650 rename from 3.2.38/1034_linux-3.2.35.patch
651 rename to 3.2.39/1034_linux-3.2.35.patch
652
653 diff --git a/3.2.38/1035_linux-3.2.36.patch b/3.2.39/1035_linux-3.2.36.patch
654 similarity index 100%
655 rename from 3.2.38/1035_linux-3.2.36.patch
656 rename to 3.2.39/1035_linux-3.2.36.patch
657
658 diff --git a/3.2.38/1036_linux-3.2.37.patch b/3.2.39/1036_linux-3.2.37.patch
659 similarity index 100%
660 rename from 3.2.38/1036_linux-3.2.37.patch
661 rename to 3.2.39/1036_linux-3.2.37.patch
662
663 diff --git a/3.2.38/1037_linux-3.2.38.patch b/3.2.39/1037_linux-3.2.38.patch
664 similarity index 100%
665 rename from 3.2.38/1037_linux-3.2.38.patch
666 rename to 3.2.39/1037_linux-3.2.38.patch
667
668 diff --git a/3.2.39/1039_linux-3.2.39.patch b/3.2.39/1039_linux-3.2.39.patch
669 new file mode 100644
670 index 0000000..5639e92
671 --- /dev/null
672 +++ b/3.2.39/1039_linux-3.2.39.patch
673 @@ -0,0 +1,2660 @@
674 +diff --git a/MAINTAINERS b/MAINTAINERS
675 +index 82d7fa6..83f156e 100644
676 +--- a/MAINTAINERS
677 ++++ b/MAINTAINERS
678 +@@ -2584,7 +2584,7 @@ S: Maintained
679 + F: drivers/net/ethernet/i825xx/eexpress.*
680 +
681 + ETHERNET BRIDGE
682 +-M: Stephen Hemminger <shemminger@××××××.com>
683 ++M: Stephen Hemminger <stephen@××××××××××××××.org>
684 + L: bridge@××××××××××××××××××××××.org
685 + L: netdev@×××××××××××.org
686 + W: http://www.linuxfoundation.org/en/Net:Bridge
687 +@@ -4475,7 +4475,7 @@ S: Supported
688 + F: drivers/infiniband/hw/nes/
689 +
690 + NETEM NETWORK EMULATOR
691 +-M: Stephen Hemminger <shemminger@××××××.com>
692 ++M: Stephen Hemminger <stephen@××××××××××××××.org>
693 + L: netem@××××××××××××××××××××××.org
694 + S: Maintained
695 + F: net/sched/sch_netem.c
696 +@@ -5993,7 +5993,7 @@ S: Maintained
697 + F: drivers/usb/misc/sisusbvga/
698 +
699 + SKGE, SKY2 10/100/1000 GIGABIT ETHERNET DRIVERS
700 +-M: Stephen Hemminger <shemminger@××××××.com>
701 ++M: Stephen Hemminger <stephen@××××××××××××××.org>
702 + L: netdev@×××××××××××.org
703 + S: Maintained
704 + F: drivers/net/ethernet/marvell/sk*
705 +diff --git a/Makefile b/Makefile
706 +index c8c9d02..0fceb8b 100644
707 +--- a/Makefile
708 ++++ b/Makefile
709 +@@ -1,6 +1,6 @@
710 + VERSION = 3
711 + PATCHLEVEL = 2
712 +-SUBLEVEL = 38
713 ++SUBLEVEL = 39
714 + EXTRAVERSION =
715 + NAME = Saber-toothed Squirrel
716 +
717 +diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
718 +index a6253ec..95b4eb3 100644
719 +--- a/arch/x86/ia32/ia32entry.S
720 ++++ b/arch/x86/ia32/ia32entry.S
721 +@@ -208,7 +208,7 @@ sysexit_from_sys_call:
722 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
723 + jnz ia32_ret_from_sys_call
724 + TRACE_IRQS_ON
725 +- sti
726 ++ ENABLE_INTERRUPTS(CLBR_NONE)
727 + movl %eax,%esi /* second arg, syscall return value */
728 + cmpl $0,%eax /* is it < 0? */
729 + setl %al /* 1 if so, 0 if not */
730 +@@ -218,7 +218,7 @@ sysexit_from_sys_call:
731 + GET_THREAD_INFO(%r10)
732 + movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
733 + movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
734 +- cli
735 ++ DISABLE_INTERRUPTS(CLBR_NONE)
736 + TRACE_IRQS_OFF
737 + testl %edi,TI_flags(%r10)
738 + jz \exit
739 +diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
740 +index c346d11..d4f278e 100644
741 +--- a/arch/x86/kernel/step.c
742 ++++ b/arch/x86/kernel/step.c
743 +@@ -157,6 +157,34 @@ static int enable_single_step(struct task_struct *child)
744 + return 1;
745 + }
746 +
747 ++static void set_task_blockstep(struct task_struct *task, bool on)
748 ++{
749 ++ unsigned long debugctl;
750 ++
751 ++ /*
752 ++ * Ensure irq/preemption can't change debugctl in between.
753 ++ * Note also that both TIF_BLOCKSTEP and debugctl should
754 ++ * be changed atomically wrt preemption.
755 ++ *
756 ++ * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if
757 ++ * task is current or it can't be running, otherwise we can race
758 ++ * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but
759 ++ * PTRACE_KILL is not safe.
760 ++ */
761 ++ local_irq_disable();
762 ++ debugctl = get_debugctlmsr();
763 ++ if (on) {
764 ++ debugctl |= DEBUGCTLMSR_BTF;
765 ++ set_tsk_thread_flag(task, TIF_BLOCKSTEP);
766 ++ } else {
767 ++ debugctl &= ~DEBUGCTLMSR_BTF;
768 ++ clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
769 ++ }
770 ++ if (task == current)
771 ++ update_debugctlmsr(debugctl);
772 ++ local_irq_enable();
773 ++}
774 ++
775 + /*
776 + * Enable single or block step.
777 + */
778 +@@ -169,19 +197,10 @@ static void enable_step(struct task_struct *child, bool block)
779 + * So no one should try to use debugger block stepping in a program
780 + * that uses user-mode single stepping itself.
781 + */
782 +- if (enable_single_step(child) && block) {
783 +- unsigned long debugctl = get_debugctlmsr();
784 +-
785 +- debugctl |= DEBUGCTLMSR_BTF;
786 +- update_debugctlmsr(debugctl);
787 +- set_tsk_thread_flag(child, TIF_BLOCKSTEP);
788 +- } else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) {
789 +- unsigned long debugctl = get_debugctlmsr();
790 +-
791 +- debugctl &= ~DEBUGCTLMSR_BTF;
792 +- update_debugctlmsr(debugctl);
793 +- clear_tsk_thread_flag(child, TIF_BLOCKSTEP);
794 +- }
795 ++ if (enable_single_step(child) && block)
796 ++ set_task_blockstep(child, true);
797 ++ else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP))
798 ++ set_task_blockstep(child, false);
799 + }
800 +
801 + void user_enable_single_step(struct task_struct *child)
802 +@@ -199,13 +218,8 @@ void user_disable_single_step(struct task_struct *child)
803 + /*
804 + * Make sure block stepping (BTF) is disabled.
805 + */
806 +- if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) {
807 +- unsigned long debugctl = get_debugctlmsr();
808 +-
809 +- debugctl &= ~DEBUGCTLMSR_BTF;
810 +- update_debugctlmsr(debugctl);
811 +- clear_tsk_thread_flag(child, TIF_BLOCKSTEP);
812 +- }
813 ++ if (test_tsk_thread_flag(child, TIF_BLOCKSTEP))
814 ++ set_task_blockstep(child, false);
815 +
816 + /* Always clear TIF_SINGLESTEP... */
817 + clear_tsk_thread_flag(child, TIF_SINGLESTEP);
818 +diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
819 +index b040b0e..7328f71 100644
820 +--- a/arch/x86/xen/xen-asm_32.S
821 ++++ b/arch/x86/xen/xen-asm_32.S
822 +@@ -88,11 +88,11 @@ ENTRY(xen_iret)
823 + */
824 + #ifdef CONFIG_SMP
825 + GET_THREAD_INFO(%eax)
826 +- movl TI_cpu(%eax), %eax
827 +- movl __per_cpu_offset(,%eax,4), %eax
828 +- mov xen_vcpu(%eax), %eax
829 ++ movl %ss:TI_cpu(%eax), %eax
830 ++ movl %ss:__per_cpu_offset(,%eax,4), %eax
831 ++ mov %ss:xen_vcpu(%eax), %eax
832 + #else
833 +- movl xen_vcpu, %eax
834 ++ movl %ss:xen_vcpu, %eax
835 + #endif
836 +
837 + /* check IF state we're restoring */
838 +@@ -105,11 +105,11 @@ ENTRY(xen_iret)
839 + * resuming the code, so we don't have to be worried about
840 + * being preempted to another CPU.
841 + */
842 +- setz XEN_vcpu_info_mask(%eax)
843 ++ setz %ss:XEN_vcpu_info_mask(%eax)
844 + xen_iret_start_crit:
845 +
846 + /* check for unmasked and pending */
847 +- cmpw $0x0001, XEN_vcpu_info_pending(%eax)
848 ++ cmpw $0x0001, %ss:XEN_vcpu_info_pending(%eax)
849 +
850 + /*
851 + * If there's something pending, mask events again so we can
852 +@@ -117,7 +117,7 @@ xen_iret_start_crit:
853 + * touch XEN_vcpu_info_mask.
854 + */
855 + jne 1f
856 +- movb $1, XEN_vcpu_info_mask(%eax)
857 ++ movb $1, %ss:XEN_vcpu_info_mask(%eax)
858 +
859 + 1: popl %eax
860 +
861 +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
862 +index b07edc4..62c1325 100644
863 +--- a/drivers/ata/ahci.c
864 ++++ b/drivers/ata/ahci.c
865 +@@ -52,7 +52,9 @@
866 + #define DRV_VERSION "3.0"
867 +
868 + enum {
869 +- AHCI_PCI_BAR = 5,
870 ++ AHCI_PCI_BAR_STA2X11 = 0,
871 ++ AHCI_PCI_BAR_ENMOTUS = 2,
872 ++ AHCI_PCI_BAR_STANDARD = 5,
873 + };
874 +
875 + enum board_ids {
876 +@@ -375,6 +377,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
877 + { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */
878 + { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
879 +
880 ++ /* ST Microelectronics */
881 ++ { PCI_VDEVICE(STMICRO, 0xCC06), board_ahci }, /* ST ConneXt */
882 ++
883 + /* Marvell */
884 + { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
885 + { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
886 +@@ -400,6 +405,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
887 + { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
888 + { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
889 +
890 ++ /* Enmotus */
891 ++ { PCI_DEVICE(0x1c44, 0x8000), board_ahci },
892 ++
893 + /* Generic, PCI class code for AHCI */
894 + { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
895 + PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
896 +@@ -629,6 +637,13 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
897 + {
898 + int rc;
899 +
900 ++ /*
901 ++ * If the device fixup already set the dma_mask to some non-standard
902 ++ * value, don't extend it here. This happens on STA2X11, for example.
903 ++ */
904 ++ if (pdev->dma_mask && pdev->dma_mask < DMA_BIT_MASK(32))
905 ++ return 0;
906 ++
907 + if (using_dac &&
908 + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
909 + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
910 +@@ -1033,6 +1048,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
911 + struct ahci_host_priv *hpriv;
912 + struct ata_host *host;
913 + int n_ports, i, rc;
914 ++ int ahci_pci_bar = AHCI_PCI_BAR_STANDARD;
915 +
916 + VPRINTK("ENTER\n");
917 +
918 +@@ -1064,6 +1080,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
919 + dev_info(&pdev->dev,
920 + "PDC42819 can only drive SATA devices with this driver\n");
921 +
922 ++ /* Both Connext and Enmotus devices use non-standard BARs */
923 ++ if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == 0xCC06)
924 ++ ahci_pci_bar = AHCI_PCI_BAR_STA2X11;
925 ++ else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000)
926 ++ ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS;
927 ++
928 + /* acquire resources */
929 + rc = pcim_enable_device(pdev);
930 + if (rc)
931 +@@ -1072,7 +1094,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
932 + /* AHCI controllers often implement SFF compatible interface.
933 + * Grab all PCI BARs just in case.
934 + */
935 +- rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
936 ++ rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
937 + if (rc == -EBUSY)
938 + pcim_pin_device(pdev);
939 + if (rc)
940 +@@ -1115,7 +1137,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
941 + if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
942 + pci_intx(pdev, 1);
943 +
944 +- hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
945 ++ hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
946 +
947 + /* save initial config */
948 + ahci_pci_save_initial_config(pdev, hpriv);
949 +@@ -1179,8 +1201,8 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
950 + for (i = 0; i < host->n_ports; i++) {
951 + struct ata_port *ap = host->ports[i];
952 +
953 +- ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
954 +- ata_port_pbar_desc(ap, AHCI_PCI_BAR,
955 ++ ata_port_pbar_desc(ap, ahci_pci_bar, -1, "abar");
956 ++ ata_port_pbar_desc(ap, ahci_pci_bar,
957 + 0x100 + ap->port_no * 0x80, "port");
958 +
959 + /* set enclosure management message type */
960 +diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h
961 +index 6a0955e..53ecac5 100644
962 +--- a/drivers/atm/iphase.h
963 ++++ b/drivers/atm/iphase.h
964 +@@ -636,82 +636,82 @@ struct rx_buf_desc {
965 + #define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE
966 + #define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE
967 +
968 +-typedef volatile u_int freg_t;
969 ++typedef volatile u_int ffreg_t;
970 + typedef u_int rreg_t;
971 +
972 + typedef struct _ffredn_t {
973 +- freg_t idlehead_high; /* Idle cell header (high) */
974 +- freg_t idlehead_low; /* Idle cell header (low) */
975 +- freg_t maxrate; /* Maximum rate */
976 +- freg_t stparms; /* Traffic Management Parameters */
977 +- freg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */
978 +- freg_t rm_type; /* */
979 +- u_int filler5[0x17 - 0x06];
980 +- freg_t cmd_reg; /* Command register */
981 +- u_int filler18[0x20 - 0x18];
982 +- freg_t cbr_base; /* CBR Pointer Base */
983 +- freg_t vbr_base; /* VBR Pointer Base */
984 +- freg_t abr_base; /* ABR Pointer Base */
985 +- freg_t ubr_base; /* UBR Pointer Base */
986 +- u_int filler24;
987 +- freg_t vbrwq_base; /* VBR Wait Queue Base */
988 +- freg_t abrwq_base; /* ABR Wait Queue Base */
989 +- freg_t ubrwq_base; /* UBR Wait Queue Base */
990 +- freg_t vct_base; /* Main VC Table Base */
991 +- freg_t vcte_base; /* Extended Main VC Table Base */
992 +- u_int filler2a[0x2C - 0x2A];
993 +- freg_t cbr_tab_beg; /* CBR Table Begin */
994 +- freg_t cbr_tab_end; /* CBR Table End */
995 +- freg_t cbr_pointer; /* CBR Pointer */
996 +- u_int filler2f[0x30 - 0x2F];
997 +- freg_t prq_st_adr; /* Packet Ready Queue Start Address */
998 +- freg_t prq_ed_adr; /* Packet Ready Queue End Address */
999 +- freg_t prq_rd_ptr; /* Packet Ready Queue read pointer */
1000 +- freg_t prq_wr_ptr; /* Packet Ready Queue write pointer */
1001 +- freg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/
1002 +- freg_t tcq_ed_adr; /* Transmit Complete Queue End Address */
1003 +- freg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */
1004 +- freg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/
1005 +- u_int filler38[0x40 - 0x38];
1006 +- freg_t queue_base; /* Base address for PRQ and TCQ */
1007 +- freg_t desc_base; /* Base address of descriptor table */
1008 +- u_int filler42[0x45 - 0x42];
1009 +- freg_t mode_reg_0; /* Mode register 0 */
1010 +- freg_t mode_reg_1; /* Mode register 1 */
1011 +- freg_t intr_status_reg;/* Interrupt Status register */
1012 +- freg_t mask_reg; /* Mask Register */
1013 +- freg_t cell_ctr_high1; /* Total cell transfer count (high) */
1014 +- freg_t cell_ctr_lo1; /* Total cell transfer count (low) */
1015 +- freg_t state_reg; /* Status register */
1016 +- u_int filler4c[0x58 - 0x4c];
1017 +- freg_t curr_desc_num; /* Contains the current descriptor num */
1018 +- freg_t next_desc; /* Next descriptor */
1019 +- freg_t next_vc; /* Next VC */
1020 +- u_int filler5b[0x5d - 0x5b];
1021 +- freg_t present_slot_cnt;/* Present slot count */
1022 +- u_int filler5e[0x6a - 0x5e];
1023 +- freg_t new_desc_num; /* New descriptor number */
1024 +- freg_t new_vc; /* New VC */
1025 +- freg_t sched_tbl_ptr; /* Schedule table pointer */
1026 +- freg_t vbrwq_wptr; /* VBR wait queue write pointer */
1027 +- freg_t vbrwq_rptr; /* VBR wait queue read pointer */
1028 +- freg_t abrwq_wptr; /* ABR wait queue write pointer */
1029 +- freg_t abrwq_rptr; /* ABR wait queue read pointer */
1030 +- freg_t ubrwq_wptr; /* UBR wait queue write pointer */
1031 +- freg_t ubrwq_rptr; /* UBR wait queue read pointer */
1032 +- freg_t cbr_vc; /* CBR VC */
1033 +- freg_t vbr_sb_vc; /* VBR SB VC */
1034 +- freg_t abr_sb_vc; /* ABR SB VC */
1035 +- freg_t ubr_sb_vc; /* UBR SB VC */
1036 +- freg_t vbr_next_link; /* VBR next link */
1037 +- freg_t abr_next_link; /* ABR next link */
1038 +- freg_t ubr_next_link; /* UBR next link */
1039 +- u_int filler7a[0x7c-0x7a];
1040 +- freg_t out_rate_head; /* Out of rate head */
1041 +- u_int filler7d[0xca-0x7d]; /* pad out to full address space */
1042 +- freg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */
1043 +- freg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */
1044 +- u_int fillercc[0x100-0xcc]; /* pad out to full address space */
1045 ++ ffreg_t idlehead_high; /* Idle cell header (high) */
1046 ++ ffreg_t idlehead_low; /* Idle cell header (low) */
1047 ++ ffreg_t maxrate; /* Maximum rate */
1048 ++ ffreg_t stparms; /* Traffic Management Parameters */
1049 ++ ffreg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */
1050 ++ ffreg_t rm_type; /* */
1051 ++ u_int filler5[0x17 - 0x06];
1052 ++ ffreg_t cmd_reg; /* Command register */
1053 ++ u_int filler18[0x20 - 0x18];
1054 ++ ffreg_t cbr_base; /* CBR Pointer Base */
1055 ++ ffreg_t vbr_base; /* VBR Pointer Base */
1056 ++ ffreg_t abr_base; /* ABR Pointer Base */
1057 ++ ffreg_t ubr_base; /* UBR Pointer Base */
1058 ++ u_int filler24;
1059 ++ ffreg_t vbrwq_base; /* VBR Wait Queue Base */
1060 ++ ffreg_t abrwq_base; /* ABR Wait Queue Base */
1061 ++ ffreg_t ubrwq_base; /* UBR Wait Queue Base */
1062 ++ ffreg_t vct_base; /* Main VC Table Base */
1063 ++ ffreg_t vcte_base; /* Extended Main VC Table Base */
1064 ++ u_int filler2a[0x2C - 0x2A];
1065 ++ ffreg_t cbr_tab_beg; /* CBR Table Begin */
1066 ++ ffreg_t cbr_tab_end; /* CBR Table End */
1067 ++ ffreg_t cbr_pointer; /* CBR Pointer */
1068 ++ u_int filler2f[0x30 - 0x2F];
1069 ++ ffreg_t prq_st_adr; /* Packet Ready Queue Start Address */
1070 ++ ffreg_t prq_ed_adr; /* Packet Ready Queue End Address */
1071 ++ ffreg_t prq_rd_ptr; /* Packet Ready Queue read pointer */
1072 ++ ffreg_t prq_wr_ptr; /* Packet Ready Queue write pointer */
1073 ++ ffreg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/
1074 ++ ffreg_t tcq_ed_adr; /* Transmit Complete Queue End Address */
1075 ++ ffreg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */
1076 ++ ffreg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/
1077 ++ u_int filler38[0x40 - 0x38];
1078 ++ ffreg_t queue_base; /* Base address for PRQ and TCQ */
1079 ++ ffreg_t desc_base; /* Base address of descriptor table */
1080 ++ u_int filler42[0x45 - 0x42];
1081 ++ ffreg_t mode_reg_0; /* Mode register 0 */
1082 ++ ffreg_t mode_reg_1; /* Mode register 1 */
1083 ++ ffreg_t intr_status_reg;/* Interrupt Status register */
1084 ++ ffreg_t mask_reg; /* Mask Register */
1085 ++ ffreg_t cell_ctr_high1; /* Total cell transfer count (high) */
1086 ++ ffreg_t cell_ctr_lo1; /* Total cell transfer count (low) */
1087 ++ ffreg_t state_reg; /* Status register */
1088 ++ u_int filler4c[0x58 - 0x4c];
1089 ++ ffreg_t curr_desc_num; /* Contains the current descriptor num */
1090 ++ ffreg_t next_desc; /* Next descriptor */
1091 ++ ffreg_t next_vc; /* Next VC */
1092 ++ u_int filler5b[0x5d - 0x5b];
1093 ++ ffreg_t present_slot_cnt;/* Present slot count */
1094 ++ u_int filler5e[0x6a - 0x5e];
1095 ++ ffreg_t new_desc_num; /* New descriptor number */
1096 ++ ffreg_t new_vc; /* New VC */
1097 ++ ffreg_t sched_tbl_ptr; /* Schedule table pointer */
1098 ++ ffreg_t vbrwq_wptr; /* VBR wait queue write pointer */
1099 ++ ffreg_t vbrwq_rptr; /* VBR wait queue read pointer */
1100 ++ ffreg_t abrwq_wptr; /* ABR wait queue write pointer */
1101 ++ ffreg_t abrwq_rptr; /* ABR wait queue read pointer */
1102 ++ ffreg_t ubrwq_wptr; /* UBR wait queue write pointer */
1103 ++ ffreg_t ubrwq_rptr; /* UBR wait queue read pointer */
1104 ++ ffreg_t cbr_vc; /* CBR VC */
1105 ++ ffreg_t vbr_sb_vc; /* VBR SB VC */
1106 ++ ffreg_t abr_sb_vc; /* ABR SB VC */
1107 ++ ffreg_t ubr_sb_vc; /* UBR SB VC */
1108 ++ ffreg_t vbr_next_link; /* VBR next link */
1109 ++ ffreg_t abr_next_link; /* ABR next link */
1110 ++ ffreg_t ubr_next_link; /* UBR next link */
1111 ++ u_int filler7a[0x7c-0x7a];
1112 ++ ffreg_t out_rate_head; /* Out of rate head */
1113 ++ u_int filler7d[0xca-0x7d]; /* pad out to full address space */
1114 ++ ffreg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */
1115 ++ ffreg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */
1116 ++ u_int fillercc[0x100-0xcc]; /* pad out to full address space */
1117 + } ffredn_t;
1118 +
1119 + typedef struct _rfredn_t {
1120 +diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
1121 +index 8e3c46d..7795d1e 100644
1122 +--- a/drivers/char/virtio_console.c
1123 ++++ b/drivers/char/virtio_console.c
1124 +@@ -1789,7 +1789,8 @@ static void virtcons_remove(struct virtio_device *vdev)
1125 + /* Disable interrupts for vqs */
1126 + vdev->config->reset(vdev);
1127 + /* Finish up work that's lined up */
1128 +- cancel_work_sync(&portdev->control_work);
1129 ++ if (use_multiport(portdev))
1130 ++ cancel_work_sync(&portdev->control_work);
1131 +
1132 + list_for_each_entry_safe(port, port2, &portdev->ports, list)
1133 + unplug_port(port);
1134 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1135 +index c05e825..7817429 100644
1136 +--- a/drivers/gpu/drm/i915/intel_display.c
1137 ++++ b/drivers/gpu/drm/i915/intel_display.c
1138 +@@ -7156,8 +7156,6 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
1139 + OUT_RING(pf | pipesrc);
1140 +
1141 + intel_mark_page_flip_active(intel_crtc);
1142 +-
1143 +- intel_mark_page_flip_active(intel_crtc);
1144 + ADVANCE_LP_RING();
1145 + return 0;
1146 +
1147 +@@ -7193,6 +7191,8 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
1148 + pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
1149 + pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
1150 + OUT_RING(pf | pipesrc);
1151 ++
1152 ++ intel_mark_page_flip_active(intel_crtc);
1153 + ADVANCE_LP_RING();
1154 + return 0;
1155 +
1156 +diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
1157 +index 1b98338..ec36dd9 100644
1158 +--- a/drivers/gpu/drm/radeon/radeon_combios.c
1159 ++++ b/drivers/gpu/drm/radeon/radeon_combios.c
1160 +@@ -2455,6 +2455,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1161 + 1),
1162 + ATOM_DEVICE_CRT1_SUPPORT);
1163 + }
1164 ++ /* RV100 board with external TDMS bit mis-set.
1165 ++ * Actually uses internal TMDS, clear the bit.
1166 ++ */
1167 ++ if (dev->pdev->device == 0x5159 &&
1168 ++ dev->pdev->subsystem_vendor == 0x1014 &&
1169 ++ dev->pdev->subsystem_device == 0x029A) {
1170 ++ tmp &= ~(1 << 4);
1171 ++ }
1172 + if ((tmp >> 4) & 0x1) {
1173 + devices |= ATOM_DEVICE_DFP2_SUPPORT;
1174 + radeon_add_legacy_encoder(dev,
1175 +diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
1176 +index aec8e0c..63e7143 100644
1177 +--- a/drivers/gpu/drm/radeon/radeon_display.c
1178 ++++ b/drivers/gpu/drm/radeon/radeon_display.c
1179 +@@ -1110,8 +1110,10 @@ radeon_user_framebuffer_create(struct drm_device *dev,
1180 + }
1181 +
1182 + radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
1183 +- if (radeon_fb == NULL)
1184 ++ if (radeon_fb == NULL) {
1185 ++ drm_gem_object_unreference_unlocked(obj);
1186 + return ERR_PTR(-ENOMEM);
1187 ++ }
1188 +
1189 + radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
1190 +
1191 +diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
1192 +index 49d5820..65be5e8 100644
1193 +--- a/drivers/gpu/drm/radeon/radeon_ring.c
1194 ++++ b/drivers/gpu/drm/radeon/radeon_ring.c
1195 +@@ -306,6 +306,9 @@ int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw)
1196 + {
1197 + int r;
1198 +
1199 ++ /* make sure we aren't trying to allocate more space than there is on the ring */
1200 ++ if (ndw > (rdev->cp.ring_size / 4))
1201 ++ return -ENOMEM;
1202 + /* Align requested size with padding so unlock_commit can
1203 + * pad safely */
1204 + ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
1205 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
1206 +index 2d41336..c15c38e 100644
1207 +--- a/drivers/hid/hid-ids.h
1208 ++++ b/drivers/hid/hid-ids.h
1209 +@@ -278,6 +278,9 @@
1210 + #define USB_VENDOR_ID_EZKEY 0x0518
1211 + #define USB_DEVICE_ID_BTC_8193 0x0002
1212 +
1213 ++#define USB_VENDOR_ID_FORMOSA 0x147a
1214 ++#define USB_DEVICE_ID_FORMOSA_IR_RECEIVER 0xe03e
1215 ++
1216 + #define USB_VENDOR_ID_FREESCALE 0x15A2
1217 + #define USB_DEVICE_ID_FREESCALE_MX28 0x004F
1218 +
1219 +diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
1220 +index aec3fa3..e26eddf 100644
1221 +--- a/drivers/hid/usbhid/hid-quirks.c
1222 ++++ b/drivers/hid/usbhid/hid-quirks.c
1223 +@@ -68,6 +68,7 @@ static const struct hid_blacklist {
1224 + { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
1225 + { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
1226 + { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
1227 ++ { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
1228 + { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
1229 + { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
1230 + { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
1231 +diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
1232 +index fd17bb3..08c2329 100644
1233 +--- a/drivers/isdn/gigaset/capi.c
1234 ++++ b/drivers/isdn/gigaset/capi.c
1235 +@@ -264,6 +264,8 @@ static inline void dump_rawmsg(enum debuglevel level, const char *tag,
1236 + CAPIMSG_APPID(data), CAPIMSG_MSGID(data), l,
1237 + CAPIMSG_CONTROL(data));
1238 + l -= 12;
1239 ++ if (l <= 0)
1240 ++ return;
1241 + dbgline = kmalloc(3*l, GFP_ATOMIC);
1242 + if (!dbgline)
1243 + return;
1244 +diff --git a/drivers/media/video/gspca/kinect.c b/drivers/media/video/gspca/kinect.c
1245 +index 4fe51fd..acaef66 100644
1246 +--- a/drivers/media/video/gspca/kinect.c
1247 ++++ b/drivers/media/video/gspca/kinect.c
1248 +@@ -390,6 +390,7 @@ static const struct sd_desc sd_desc = {
1249 + /* -- module initialisation -- */
1250 + static const struct usb_device_id device_table[] = {
1251 + {USB_DEVICE(0x045e, 0x02ae)},
1252 ++ {USB_DEVICE(0x045e, 0x02bf)},
1253 + {}
1254 + };
1255 +
1256 +diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
1257 +index 21a3d77..64647d4 100644
1258 +--- a/drivers/net/can/c_can/c_can.c
1259 ++++ b/drivers/net/can/c_can/c_can.c
1260 +@@ -446,8 +446,12 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
1261 +
1262 + priv->write_reg(priv, &priv->regs->ifregs[iface].mask1,
1263 + IFX_WRITE_LOW_16BIT(mask));
1264 ++
1265 ++ /* According to C_CAN documentation, the reserved bit
1266 ++ * in IFx_MASK2 register is fixed 1
1267 ++ */
1268 + priv->write_reg(priv, &priv->regs->ifregs[iface].mask2,
1269 +- IFX_WRITE_HIGH_16BIT(mask));
1270 ++ IFX_WRITE_HIGH_16BIT(mask) | BIT(13));
1271 +
1272 + priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
1273 + IFX_WRITE_LOW_16BIT(id));
1274 +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
1275 +index 01bc102..c86fa50 100644
1276 +--- a/drivers/net/ethernet/broadcom/tg3.c
1277 ++++ b/drivers/net/ethernet/broadcom/tg3.c
1278 +@@ -1135,14 +1135,26 @@ static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1279 + return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1280 + }
1281 +
1282 +-#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1283 +- tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1284 +- MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1285 +- MII_TG3_AUXCTL_ACTL_TX_6DB)
1286 ++static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1287 ++{
1288 ++ u32 val;
1289 ++ int err;
1290 +
1291 +-#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1292 +- tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1293 +- MII_TG3_AUXCTL_ACTL_TX_6DB);
1294 ++ err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1295 ++
1296 ++ if (err)
1297 ++ return err;
1298 ++ if (enable)
1299 ++
1300 ++ val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1301 ++ else
1302 ++ val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1303 ++
1304 ++ err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1305 ++ val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1306 ++
1307 ++ return err;
1308 ++}
1309 +
1310 + static int tg3_bmcr_reset(struct tg3 *tp)
1311 + {
1312 +@@ -2087,7 +2099,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
1313 +
1314 + otp = tp->phy_otp;
1315 +
1316 +- if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1317 ++ if (tg3_phy_toggle_auxctl_smdsp(tp, true))
1318 + return;
1319 +
1320 + phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1321 +@@ -2112,7 +2124,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
1322 + ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1323 + tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1324 +
1325 +- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1326 ++ tg3_phy_toggle_auxctl_smdsp(tp, false);
1327 + }
1328 +
1329 + static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1330 +@@ -2148,9 +2160,9 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1331 +
1332 + if (!tp->setlpicnt) {
1333 + if (current_link_up == 1 &&
1334 +- !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1335 ++ !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
1336 + tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
1337 +- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1338 ++ tg3_phy_toggle_auxctl_smdsp(tp, false);
1339 + }
1340 +
1341 + val = tr32(TG3_CPMU_EEE_MODE);
1342 +@@ -2166,11 +2178,11 @@ static void tg3_phy_eee_enable(struct tg3 *tp)
1343 + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1344 + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1345 + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1346 +- !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1347 ++ !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
1348 + val = MII_TG3_DSP_TAP26_ALNOKO |
1349 + MII_TG3_DSP_TAP26_RMRXSTO;
1350 + tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
1351 +- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1352 ++ tg3_phy_toggle_auxctl_smdsp(tp, false);
1353 + }
1354 +
1355 + val = tr32(TG3_CPMU_EEE_MODE);
1356 +@@ -2314,7 +2326,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1357 + tg3_writephy(tp, MII_CTRL1000,
1358 + CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
1359 +
1360 +- err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1361 ++ err = tg3_phy_toggle_auxctl_smdsp(tp, true);
1362 + if (err)
1363 + return err;
1364 +
1365 +@@ -2335,7 +2347,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1366 + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1367 + tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
1368 +
1369 +- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1370 ++ tg3_phy_toggle_auxctl_smdsp(tp, false);
1371 +
1372 + tg3_writephy(tp, MII_CTRL1000, phy9_orig);
1373 +
1374 +@@ -2424,10 +2436,10 @@ static int tg3_phy_reset(struct tg3 *tp)
1375 +
1376 + out:
1377 + if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
1378 +- !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1379 ++ !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
1380 + tg3_phydsp_write(tp, 0x201f, 0x2aaa);
1381 + tg3_phydsp_write(tp, 0x000a, 0x0323);
1382 +- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1383 ++ tg3_phy_toggle_auxctl_smdsp(tp, false);
1384 + }
1385 +
1386 + if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
1387 +@@ -2436,14 +2448,14 @@ out:
1388 + }
1389 +
1390 + if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
1391 +- if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1392 ++ if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
1393 + tg3_phydsp_write(tp, 0x000a, 0x310b);
1394 + tg3_phydsp_write(tp, 0x201f, 0x9506);
1395 + tg3_phydsp_write(tp, 0x401f, 0x14e2);
1396 +- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1397 ++ tg3_phy_toggle_auxctl_smdsp(tp, false);
1398 + }
1399 + } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
1400 +- if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1401 ++ if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
1402 + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1403 + if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
1404 + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1405 +@@ -2452,7 +2464,7 @@ out:
1406 + } else
1407 + tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1408 +
1409 +- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1410 ++ tg3_phy_toggle_auxctl_smdsp(tp, false);
1411 + }
1412 + }
1413 +
1414 +@@ -3639,7 +3651,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
1415 + tw32(TG3_CPMU_EEE_MODE,
1416 + tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1417 +
1418 +- err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1419 ++ err = tg3_phy_toggle_auxctl_smdsp(tp, true);
1420 + if (!err) {
1421 + u32 err2;
1422 +
1423 +@@ -3671,7 +3683,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
1424 + MII_TG3_DSP_CH34TP2_HIBW01);
1425 + }
1426 +
1427 +- err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1428 ++ err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
1429 + if (!err)
1430 + err = err2;
1431 + }
1432 +@@ -6353,6 +6365,9 @@ static void tg3_poll_controller(struct net_device *dev)
1433 + int i;
1434 + struct tg3 *tp = netdev_priv(dev);
1435 +
1436 ++ if (tg3_irq_sync(tp))
1437 ++ return;
1438 ++
1439 + for (i = 0; i < tp->irq_cnt; i++)
1440 + tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
1441 + }
1442 +@@ -15388,6 +15403,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
1443 + tp->pm_cap = pm_cap;
1444 + tp->rx_mode = TG3_DEF_RX_MODE;
1445 + tp->tx_mode = TG3_DEF_TX_MODE;
1446 ++ tp->irq_sync = 1;
1447 +
1448 + if (tg3_debug > 0)
1449 + tp->msg_enable = tg3_debug;
1450 +diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
1451 +index a8259cc..5674145 100644
1452 +--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
1453 ++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
1454 +@@ -144,7 +144,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
1455 + buffrag->length, PCI_DMA_TODEVICE);
1456 + buffrag->dma = 0ULL;
1457 + }
1458 +- for (j = 0; j < cmd_buf->frag_count; j++) {
1459 ++ for (j = 1; j < cmd_buf->frag_count; j++) {
1460 + buffrag++;
1461 + if (buffrag->dma) {
1462 + pci_unmap_page(adapter->pdev, buffrag->dma,
1463 +diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
1464 +index da5204d..4a238a4 100644
1465 +--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
1466 ++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
1467 +@@ -1924,10 +1924,12 @@ unwind:
1468 + while (--i >= 0) {
1469 + nf = &pbuf->frag_array[i+1];
1470 + pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
1471 ++ nf->dma = 0ULL;
1472 + }
1473 +
1474 + nf = &pbuf->frag_array[0];
1475 + pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
1476 ++ nf->dma = 0ULL;
1477 +
1478 + out_err:
1479 + return -ENOMEM;
1480 +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
1481 +index b8db4cd..a6153f1 100644
1482 +--- a/drivers/net/ethernet/realtek/r8169.c
1483 ++++ b/drivers/net/ethernet/realtek/r8169.c
1484 +@@ -5829,13 +5829,6 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
1485 + dev->stats.rx_bytes += pkt_size;
1486 + dev->stats.rx_packets++;
1487 + }
1488 +-
1489 +- /* Work around for AMD plateform. */
1490 +- if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
1491 +- (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
1492 +- desc->opts2 = 0;
1493 +- cur_rx++;
1494 +- }
1495 + }
1496 +
1497 + count = cur_rx - tp->cur_rx;
1498 +diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
1499 +index 4ce9e5f..d0893e4 100644
1500 +--- a/drivers/net/loopback.c
1501 ++++ b/drivers/net/loopback.c
1502 +@@ -78,6 +78,11 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
1503 +
1504 + skb_orphan(skb);
1505 +
1506 ++ /* Before queueing this packet to netif_rx(),
1507 ++ * make sure dst is refcounted.
1508 ++ */
1509 ++ skb_dst_force(skb);
1510 ++
1511 + skb->protocol = eth_type_trans(skb, dev);
1512 +
1513 + /* it's OK to use per_cpu_ptr() because BHs are off */
1514 +diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
1515 +index 8d3ab37..6618dd6 100644
1516 +--- a/drivers/net/wireless/mwifiex/scan.c
1517 ++++ b/drivers/net/wireless/mwifiex/scan.c
1518 +@@ -1594,7 +1594,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1519 + dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n",
1520 + scan_rsp->number_of_sets);
1521 + ret = -1;
1522 +- goto done;
1523 ++ goto check_next_scan;
1524 + }
1525 +
1526 + bytes_left = le16_to_cpu(scan_rsp->bss_descript_size);
1527 +@@ -1663,7 +1663,8 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1528 + if (!beacon_size || beacon_size > bytes_left) {
1529 + bss_info += bytes_left;
1530 + bytes_left = 0;
1531 +- return -1;
1532 ++ ret = -1;
1533 ++ goto check_next_scan;
1534 + }
1535 +
1536 + /* Initialize the current working beacon pointer for this BSS
1537 +@@ -1716,7 +1717,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1538 + dev_err(priv->adapter->dev, "%s: in processing"
1539 + " IE, bytes left < IE length\n",
1540 + __func__);
1541 +- goto done;
1542 ++ goto check_next_scan;
1543 + }
1544 + if (element_id == WLAN_EID_DS_PARAMS) {
1545 + channel = *(u8 *) (current_ptr +
1546 +@@ -1782,6 +1783,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1547 + }
1548 + }
1549 +
1550 ++check_next_scan:
1551 + spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
1552 + if (list_empty(&adapter->scan_pending_q)) {
1553 + spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
1554 +@@ -1812,7 +1814,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1555 + mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
1556 + }
1557 +
1558 +-done:
1559 + return ret;
1560 + }
1561 +
1562 +diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
1563 +index 22ed6df..2be9880 100644
1564 +--- a/drivers/net/wireless/rt2x00/rt2500usb.c
1565 ++++ b/drivers/net/wireless/rt2x00/rt2500usb.c
1566 +@@ -1921,7 +1921,7 @@ static struct usb_device_id rt2500usb_device_table[] = {
1567 + { USB_DEVICE(0x0b05, 0x1706) },
1568 + { USB_DEVICE(0x0b05, 0x1707) },
1569 + /* Belkin */
1570 +- { USB_DEVICE(0x050d, 0x7050) },
1571 ++ { USB_DEVICE(0x050d, 0x7050) }, /* FCC ID: K7SF5D7050A ver. 2.x */
1572 + { USB_DEVICE(0x050d, 0x7051) },
1573 + /* Cisco Systems */
1574 + { USB_DEVICE(0x13b1, 0x000d) },
1575 +diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
1576 +index b66a61b..3d4ea1f 100644
1577 +--- a/drivers/net/wireless/rt2x00/rt2800usb.c
1578 ++++ b/drivers/net/wireless/rt2x00/rt2800usb.c
1579 +@@ -959,6 +959,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
1580 + { USB_DEVICE(0x07d1, 0x3c15) },
1581 + { USB_DEVICE(0x07d1, 0x3c16) },
1582 + { USB_DEVICE(0x2001, 0x3c1b) },
1583 ++ { USB_DEVICE(0x2001, 0x3c1e) },
1584 + /* Draytek */
1585 + { USB_DEVICE(0x07fa, 0x7712) },
1586 + /* DVICO */
1587 +@@ -1090,6 +1091,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
1588 + { USB_DEVICE(0x177f, 0x0153) },
1589 + { USB_DEVICE(0x177f, 0x0302) },
1590 + { USB_DEVICE(0x177f, 0x0313) },
1591 ++ { USB_DEVICE(0x177f, 0x0323) },
1592 + /* U-Media */
1593 + { USB_DEVICE(0x157e, 0x300e) },
1594 + { USB_DEVICE(0x157e, 0x3013) },
1595 +diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
1596 +index 2ad468d..9e724eb 100644
1597 +--- a/drivers/net/wireless/rt2x00/rt73usb.c
1598 ++++ b/drivers/net/wireless/rt2x00/rt73usb.c
1599 +@@ -2421,6 +2421,7 @@ static struct usb_device_id rt73usb_device_table[] = {
1600 + { USB_DEVICE(0x0b05, 0x1723) },
1601 + { USB_DEVICE(0x0b05, 0x1724) },
1602 + /* Belkin */
1603 ++ { USB_DEVICE(0x050d, 0x7050) }, /* FCC ID: K7SF5D7050B ver. 3.x */
1604 + { USB_DEVICE(0x050d, 0x705a) },
1605 + { USB_DEVICE(0x050d, 0x905b) },
1606 + { USB_DEVICE(0x050d, 0x905c) },
1607 +diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
1608 +index a49e848..30dd0a9 100644
1609 +--- a/drivers/net/wireless/rtlwifi/usb.c
1610 ++++ b/drivers/net/wireless/rtlwifi/usb.c
1611 +@@ -503,8 +503,8 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
1612 + WARN_ON(skb_queue_empty(&rx_queue));
1613 + while (!skb_queue_empty(&rx_queue)) {
1614 + _skb = skb_dequeue(&rx_queue);
1615 +- _rtl_usb_rx_process_agg(hw, skb);
1616 +- ieee80211_rx_irqsafe(hw, skb);
1617 ++ _rtl_usb_rx_process_agg(hw, _skb);
1618 ++ ieee80211_rx_irqsafe(hw, _skb);
1619 + }
1620 + }
1621 +
1622 +diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
1623 +index 94b79c3..9d7f172 100644
1624 +--- a/drivers/net/xen-netback/common.h
1625 ++++ b/drivers/net/xen-netback/common.h
1626 +@@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
1627 + /* Notify xenvif that ring now has space to send an skb to the frontend */
1628 + void xenvif_notify_tx_completion(struct xenvif *vif);
1629 +
1630 ++/* Prevent the device from generating any further traffic. */
1631 ++void xenvif_carrier_off(struct xenvif *vif);
1632 ++
1633 + /* Returns number of ring slots required to send an skb to the frontend */
1634 + unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
1635 +
1636 +diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
1637 +index 1825629..5925e0b 100644
1638 +--- a/drivers/net/xen-netback/interface.c
1639 ++++ b/drivers/net/xen-netback/interface.c
1640 +@@ -342,17 +342,22 @@ err:
1641 + return err;
1642 + }
1643 +
1644 +-void xenvif_disconnect(struct xenvif *vif)
1645 ++void xenvif_carrier_off(struct xenvif *vif)
1646 + {
1647 + struct net_device *dev = vif->dev;
1648 +- if (netif_carrier_ok(dev)) {
1649 +- rtnl_lock();
1650 +- netif_carrier_off(dev); /* discard queued packets */
1651 +- if (netif_running(dev))
1652 +- xenvif_down(vif);
1653 +- rtnl_unlock();
1654 +- xenvif_put(vif);
1655 +- }
1656 ++
1657 ++ rtnl_lock();
1658 ++ netif_carrier_off(dev); /* discard queued packets */
1659 ++ if (netif_running(dev))
1660 ++ xenvif_down(vif);
1661 ++ rtnl_unlock();
1662 ++ xenvif_put(vif);
1663 ++}
1664 ++
1665 ++void xenvif_disconnect(struct xenvif *vif)
1666 ++{
1667 ++ if (netif_carrier_ok(vif->dev))
1668 ++ xenvif_carrier_off(vif);
1669 +
1670 + atomic_dec(&vif->refcnt);
1671 + wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
1672 +diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
1673 +index 15e332d..b802bb3 100644
1674 +--- a/drivers/net/xen-netback/netback.c
1675 ++++ b/drivers/net/xen-netback/netback.c
1676 +@@ -146,7 +146,8 @@ void xen_netbk_remove_xenvif(struct xenvif *vif)
1677 + atomic_dec(&netbk->netfront_count);
1678 + }
1679 +
1680 +-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx);
1681 ++static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
1682 ++ u8 status);
1683 + static void make_tx_response(struct xenvif *vif,
1684 + struct xen_netif_tx_request *txp,
1685 + s8 st);
1686 +@@ -851,7 +852,7 @@ static void netbk_tx_err(struct xenvif *vif,
1687 +
1688 + do {
1689 + make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
1690 +- if (cons >= end)
1691 ++ if (cons == end)
1692 + break;
1693 + txp = RING_GET_REQUEST(&vif->tx, cons++);
1694 + } while (1);
1695 +@@ -860,6 +861,13 @@ static void netbk_tx_err(struct xenvif *vif,
1696 + xenvif_put(vif);
1697 + }
1698 +
1699 ++static void netbk_fatal_tx_err(struct xenvif *vif)
1700 ++{
1701 ++ netdev_err(vif->dev, "fatal error; disabling device\n");
1702 ++ xenvif_carrier_off(vif);
1703 ++ xenvif_put(vif);
1704 ++}
1705 ++
1706 + static int netbk_count_requests(struct xenvif *vif,
1707 + struct xen_netif_tx_request *first,
1708 + struct xen_netif_tx_request *txp,
1709 +@@ -873,19 +881,22 @@ static int netbk_count_requests(struct xenvif *vif,
1710 +
1711 + do {
1712 + if (frags >= work_to_do) {
1713 +- netdev_dbg(vif->dev, "Need more frags\n");
1714 ++ netdev_err(vif->dev, "Need more frags\n");
1715 ++ netbk_fatal_tx_err(vif);
1716 + return -frags;
1717 + }
1718 +
1719 + if (unlikely(frags >= MAX_SKB_FRAGS)) {
1720 +- netdev_dbg(vif->dev, "Too many frags\n");
1721 ++ netdev_err(vif->dev, "Too many frags\n");
1722 ++ netbk_fatal_tx_err(vif);
1723 + return -frags;
1724 + }
1725 +
1726 + memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
1727 + sizeof(*txp));
1728 + if (txp->size > first->size) {
1729 +- netdev_dbg(vif->dev, "Frags galore\n");
1730 ++ netdev_err(vif->dev, "Frag is bigger than frame.\n");
1731 ++ netbk_fatal_tx_err(vif);
1732 + return -frags;
1733 + }
1734 +
1735 +@@ -893,8 +904,9 @@ static int netbk_count_requests(struct xenvif *vif,
1736 + frags++;
1737 +
1738 + if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
1739 +- netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n",
1740 ++ netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
1741 + txp->offset, txp->size);
1742 ++ netbk_fatal_tx_err(vif);
1743 + return -frags;
1744 + }
1745 + } while ((txp++)->flags & XEN_NETTXF_more_data);
1746 +@@ -938,7 +950,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
1747 + pending_idx = netbk->pending_ring[index];
1748 + page = xen_netbk_alloc_page(netbk, skb, pending_idx);
1749 + if (!page)
1750 +- return NULL;
1751 ++ goto err;
1752 +
1753 + netbk->mmap_pages[pending_idx] = page;
1754 +
1755 +@@ -962,6 +974,17 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
1756 + }
1757 +
1758 + return gop;
1759 ++err:
1760 ++ /* Unwind, freeing all pages and sending error responses. */
1761 ++ while (i-- > start) {
1762 ++ xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]),
1763 ++ XEN_NETIF_RSP_ERROR);
1764 ++ }
1765 ++ /* The head too, if necessary. */
1766 ++ if (start)
1767 ++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1768 ++
1769 ++ return NULL;
1770 + }
1771 +
1772 + static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1773 +@@ -970,30 +993,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1774 + {
1775 + struct gnttab_copy *gop = *gopp;
1776 + u16 pending_idx = *((u16 *)skb->data);
1777 +- struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
1778 +- struct xenvif *vif = pending_tx_info[pending_idx].vif;
1779 +- struct xen_netif_tx_request *txp;
1780 + struct skb_shared_info *shinfo = skb_shinfo(skb);
1781 + int nr_frags = shinfo->nr_frags;
1782 + int i, err, start;
1783 +
1784 + /* Check status of header. */
1785 + err = gop->status;
1786 +- if (unlikely(err)) {
1787 +- pending_ring_idx_t index;
1788 +- index = pending_index(netbk->pending_prod++);
1789 +- txp = &pending_tx_info[pending_idx].req;
1790 +- make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
1791 +- netbk->pending_ring[index] = pending_idx;
1792 +- xenvif_put(vif);
1793 +- }
1794 ++ if (unlikely(err))
1795 ++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1796 +
1797 + /* Skip first skb fragment if it is on same page as header fragment. */
1798 + start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
1799 +
1800 + for (i = start; i < nr_frags; i++) {
1801 + int j, newerr;
1802 +- pending_ring_idx_t index;
1803 +
1804 + pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
1805 +
1806 +@@ -1002,16 +1015,12 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1807 + if (likely(!newerr)) {
1808 + /* Had a previous error? Invalidate this fragment. */
1809 + if (unlikely(err))
1810 +- xen_netbk_idx_release(netbk, pending_idx);
1811 ++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1812 + continue;
1813 + }
1814 +
1815 + /* Error on this fragment: respond to client with an error. */
1816 +- txp = &netbk->pending_tx_info[pending_idx].req;
1817 +- make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
1818 +- index = pending_index(netbk->pending_prod++);
1819 +- netbk->pending_ring[index] = pending_idx;
1820 +- xenvif_put(vif);
1821 ++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1822 +
1823 + /* Not the first error? Preceding frags already invalidated. */
1824 + if (err)
1825 +@@ -1019,10 +1028,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1826 +
1827 + /* First error: invalidate header and preceding fragments. */
1828 + pending_idx = *((u16 *)skb->data);
1829 +- xen_netbk_idx_release(netbk, pending_idx);
1830 ++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1831 + for (j = start; j < i; j++) {
1832 + pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1833 +- xen_netbk_idx_release(netbk, pending_idx);
1834 ++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1835 + }
1836 +
1837 + /* Remember the error: invalidate all subsequent fragments. */
1838 +@@ -1056,7 +1065,7 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
1839 +
1840 + /* Take an extra reference to offset xen_netbk_idx_release */
1841 + get_page(netbk->mmap_pages[pending_idx]);
1842 +- xen_netbk_idx_release(netbk, pending_idx);
1843 ++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1844 + }
1845 + }
1846 +
1847 +@@ -1069,7 +1078,8 @@ static int xen_netbk_get_extras(struct xenvif *vif,
1848 +
1849 + do {
1850 + if (unlikely(work_to_do-- <= 0)) {
1851 +- netdev_dbg(vif->dev, "Missing extra info\n");
1852 ++ netdev_err(vif->dev, "Missing extra info\n");
1853 ++ netbk_fatal_tx_err(vif);
1854 + return -EBADR;
1855 + }
1856 +
1857 +@@ -1078,8 +1088,9 @@ static int xen_netbk_get_extras(struct xenvif *vif,
1858 + if (unlikely(!extra.type ||
1859 + extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1860 + vif->tx.req_cons = ++cons;
1861 +- netdev_dbg(vif->dev,
1862 ++ netdev_err(vif->dev,
1863 + "Invalid extra type: %d\n", extra.type);
1864 ++ netbk_fatal_tx_err(vif);
1865 + return -EINVAL;
1866 + }
1867 +
1868 +@@ -1095,13 +1106,15 @@ static int netbk_set_skb_gso(struct xenvif *vif,
1869 + struct xen_netif_extra_info *gso)
1870 + {
1871 + if (!gso->u.gso.size) {
1872 +- netdev_dbg(vif->dev, "GSO size must not be zero.\n");
1873 ++ netdev_err(vif->dev, "GSO size must not be zero.\n");
1874 ++ netbk_fatal_tx_err(vif);
1875 + return -EINVAL;
1876 + }
1877 +
1878 + /* Currently only TCPv4 S.O. is supported. */
1879 + if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
1880 +- netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1881 ++ netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1882 ++ netbk_fatal_tx_err(vif);
1883 + return -EINVAL;
1884 + }
1885 +
1886 +@@ -1238,9 +1251,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1887 +
1888 + /* Get a netif from the list with work to do. */
1889 + vif = poll_net_schedule_list(netbk);
1890 ++ /* This can sometimes happen because the test of
1891 ++ * list_empty(net_schedule_list) at the top of the
1892 ++ * loop is unlocked. Just go back and have another
1893 ++ * look.
1894 ++ */
1895 + if (!vif)
1896 + continue;
1897 +
1898 ++ if (vif->tx.sring->req_prod - vif->tx.req_cons >
1899 ++ XEN_NETIF_TX_RING_SIZE) {
1900 ++ netdev_err(vif->dev,
1901 ++ "Impossible number of requests. "
1902 ++ "req_prod %d, req_cons %d, size %ld\n",
1903 ++ vif->tx.sring->req_prod, vif->tx.req_cons,
1904 ++ XEN_NETIF_TX_RING_SIZE);
1905 ++ netbk_fatal_tx_err(vif);
1906 ++ continue;
1907 ++ }
1908 ++
1909 + RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
1910 + if (!work_to_do) {
1911 + xenvif_put(vif);
1912 +@@ -1268,17 +1297,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1913 + work_to_do = xen_netbk_get_extras(vif, extras,
1914 + work_to_do);
1915 + idx = vif->tx.req_cons;
1916 +- if (unlikely(work_to_do < 0)) {
1917 +- netbk_tx_err(vif, &txreq, idx);
1918 ++ if (unlikely(work_to_do < 0))
1919 + continue;
1920 +- }
1921 + }
1922 +
1923 + ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
1924 +- if (unlikely(ret < 0)) {
1925 +- netbk_tx_err(vif, &txreq, idx - ret);
1926 ++ if (unlikely(ret < 0))
1927 + continue;
1928 +- }
1929 ++
1930 + idx += ret;
1931 +
1932 + if (unlikely(txreq.size < ETH_HLEN)) {
1933 +@@ -1290,11 +1316,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1934 +
1935 + /* No crossing a page as the payload mustn't fragment. */
1936 + if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
1937 +- netdev_dbg(vif->dev,
1938 ++ netdev_err(vif->dev,
1939 + "txreq.offset: %x, size: %u, end: %lu\n",
1940 + txreq.offset, txreq.size,
1941 + (txreq.offset&~PAGE_MASK) + txreq.size);
1942 +- netbk_tx_err(vif, &txreq, idx);
1943 ++ netbk_fatal_tx_err(vif);
1944 + continue;
1945 + }
1946 +
1947 +@@ -1322,8 +1348,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1948 + gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1949 +
1950 + if (netbk_set_skb_gso(vif, skb, gso)) {
1951 ++ /* Failure in netbk_set_skb_gso is fatal. */
1952 + kfree_skb(skb);
1953 +- netbk_tx_err(vif, &txreq, idx);
1954 + continue;
1955 + }
1956 + }
1957 +@@ -1424,7 +1450,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
1958 + txp->size -= data_len;
1959 + } else {
1960 + /* Schedule a response immediately. */
1961 +- xen_netbk_idx_release(netbk, pending_idx);
1962 ++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1963 + }
1964 +
1965 + if (txp->flags & XEN_NETTXF_csum_blank)
1966 +@@ -1479,7 +1505,8 @@ static void xen_netbk_tx_action(struct xen_netbk *netbk)
1967 +
1968 + }
1969 +
1970 +-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
1971 ++static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
1972 ++ u8 status)
1973 + {
1974 + struct xenvif *vif;
1975 + struct pending_tx_info *pending_tx_info;
1976 +@@ -1493,7 +1520,7 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
1977 +
1978 + vif = pending_tx_info->vif;
1979 +
1980 +- make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY);
1981 ++ make_tx_response(vif, &pending_tx_info->req, status);
1982 +
1983 + index = pending_index(netbk->pending_prod++);
1984 + netbk->pending_ring[index] = pending_idx;
1985 +diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
1986 +index da8beb8..627b66a 100644
1987 +--- a/drivers/rtc/rtc-isl1208.c
1988 ++++ b/drivers/rtc/rtc-isl1208.c
1989 +@@ -494,6 +494,7 @@ isl1208_rtc_interrupt(int irq, void *data)
1990 + {
1991 + unsigned long timeout = jiffies + msecs_to_jiffies(1000);
1992 + struct i2c_client *client = data;
1993 ++ struct rtc_device *rtc = i2c_get_clientdata(client);
1994 + int handled = 0, sr, err;
1995 +
1996 + /*
1997 +@@ -516,6 +517,8 @@ isl1208_rtc_interrupt(int irq, void *data)
1998 + if (sr & ISL1208_REG_SR_ALM) {
1999 + dev_dbg(&client->dev, "alarm!\n");
2000 +
2001 ++ rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF);
2002 ++
2003 + /* Clear the alarm */
2004 + sr &= ~ISL1208_REG_SR_ALM;
2005 + sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR, sr);
2006 +diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
2007 +index 1e80a48..73816d8 100644
2008 +--- a/drivers/rtc/rtc-pl031.c
2009 ++++ b/drivers/rtc/rtc-pl031.c
2010 +@@ -44,6 +44,7 @@
2011 + #define RTC_YMR 0x34 /* Year match register */
2012 + #define RTC_YLR 0x38 /* Year data load register */
2013 +
2014 ++#define RTC_CR_EN (1 << 0) /* counter enable bit */
2015 + #define RTC_CR_CWEN (1 << 26) /* Clockwatch enable bit */
2016 +
2017 + #define RTC_TCR_EN (1 << 1) /* Periodic timer enable bit */
2018 +@@ -312,7 +313,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
2019 + int ret;
2020 + struct pl031_local *ldata;
2021 + struct rtc_class_ops *ops = id->data;
2022 +- unsigned long time;
2023 ++ unsigned long time, data;
2024 +
2025 + ret = amba_request_regions(adev, NULL);
2026 + if (ret)
2027 +@@ -339,10 +340,11 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
2028 + dev_dbg(&adev->dev, "designer ID = 0x%02x\n", ldata->hw_designer);
2029 + dev_dbg(&adev->dev, "revision = 0x%01x\n", ldata->hw_revision);
2030 +
2031 ++ data = readl(ldata->base + RTC_CR);
2032 + /* Enable the clockwatch on ST Variants */
2033 + if (ldata->hw_designer == AMBA_VENDOR_ST)
2034 +- writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN,
2035 +- ldata->base + RTC_CR);
2036 ++ data |= RTC_CR_CWEN;
2037 ++ writel(data | RTC_CR_EN, ldata->base + RTC_CR);
2038 +
2039 + /*
2040 + * On ST PL031 variants, the RTC reset value does not provide correct
2041 +diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
2042 +index 34655d0..08e470f 100644
2043 +--- a/drivers/usb/host/ehci-sched.c
2044 ++++ b/drivers/usb/host/ehci-sched.c
2045 +@@ -236,7 +236,7 @@ static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
2046 + }
2047 +
2048 + static const unsigned char
2049 +-max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
2050 ++max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 };
2051 +
2052 + /* carryover low/fullspeed bandwidth that crosses uframe boundries */
2053 + static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
2054 +diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
2055 +index 5cc401b..c7cfbce 100644
2056 +--- a/drivers/usb/host/pci-quirks.c
2057 ++++ b/drivers/usb/host/pci-quirks.c
2058 +@@ -780,6 +780,7 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
2059 + "defaulting to EHCI.\n");
2060 + dev_warn(&xhci_pdev->dev,
2061 + "USB 3.0 devices will work at USB 2.0 speeds.\n");
2062 ++ usb_disable_xhci_ports(xhci_pdev);
2063 + return;
2064 + }
2065 +
2066 +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
2067 +index 2ed591d..5c1f9e7 100644
2068 +--- a/drivers/usb/host/xhci-ring.c
2069 ++++ b/drivers/usb/host/xhci-ring.c
2070 +@@ -2504,6 +2504,8 @@ cleanup:
2071 + (trb_comp_code != COMP_STALL &&
2072 + trb_comp_code != COMP_BABBLE))
2073 + xhci_urb_free_priv(xhci, urb_priv);
2074 ++ else
2075 ++ kfree(urb_priv);
2076 +
2077 + usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
2078 + if ((urb->actual_length != urb->transfer_buffer_length &&
2079 +@@ -3032,7 +3034,7 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
2080 + * running_total.
2081 + */
2082 + packets_transferred = (running_total + trb_buff_len) /
2083 +- usb_endpoint_maxp(&urb->ep->desc);
2084 ++ GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
2085 +
2086 + if ((total_packet_count - packets_transferred) > 31)
2087 + return 31 << 17;
2088 +@@ -3594,7 +3596,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2089 + td_len = urb->iso_frame_desc[i].length;
2090 + td_remain_len = td_len;
2091 + total_packet_count = DIV_ROUND_UP(td_len,
2092 +- usb_endpoint_maxp(&urb->ep->desc));
2093 ++ GET_MAX_PACKET(
2094 ++ usb_endpoint_maxp(&urb->ep->desc)));
2095 + /* A zero-length transfer still involves at least one packet. */
2096 + if (total_packet_count == 0)
2097 + total_packet_count++;
2098 +@@ -3617,9 +3620,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2099 + td = urb_priv->td[i];
2100 + for (j = 0; j < trbs_per_td; j++) {
2101 + u32 remainder = 0;
2102 +- field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
2103 ++ field = 0;
2104 +
2105 + if (first_trb) {
2106 ++ field = TRB_TBC(burst_count) |
2107 ++ TRB_TLBPC(residue);
2108 + /* Queue the isoc TRB */
2109 + field |= TRB_TYPE(TRB_ISOC);
2110 + /* Assume URB_ISO_ASAP is set */
2111 +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
2112 +index 2cc7c18..d644a66 100644
2113 +--- a/drivers/usb/serial/ftdi_sio.c
2114 ++++ b/drivers/usb/serial/ftdi_sio.c
2115 +@@ -590,6 +590,7 @@ static struct usb_device_id id_table_combined [] = {
2116 + /*
2117 + * ELV devices:
2118 + */
2119 ++ { USB_DEVICE(FTDI_ELV_VID, FTDI_ELV_WS300_PID) },
2120 + { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) },
2121 + { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) },
2122 + { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) },
2123 +@@ -676,6 +677,7 @@ static struct usb_device_id id_table_combined [] = {
2124 + { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) },
2125 + { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) },
2126 + { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) },
2127 ++ { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
2128 + { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
2129 + { USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) },
2130 + { USB_DEVICE(FTDI_VID, FTDI_MHAM_KW_PID) },
2131 +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
2132 +index dd6edf8..97e0a6b 100644
2133 +--- a/drivers/usb/serial/ftdi_sio_ids.h
2134 ++++ b/drivers/usb/serial/ftdi_sio_ids.h
2135 +@@ -147,6 +147,11 @@
2136 + #define XSENS_CONVERTER_6_PID 0xD38E
2137 + #define XSENS_CONVERTER_7_PID 0xD38F
2138 +
2139 ++/**
2140 ++ * Zolix (www.zolix.com.cb) product ids
2141 ++ */
2142 ++#define FTDI_OMNI1509 0xD491 /* Omni1509 embedded USB-serial */
2143 ++
2144 + /*
2145 + * NDI (www.ndigital.com) product ids
2146 + */
2147 +@@ -204,7 +209,7 @@
2148 +
2149 + /*
2150 + * ELV USB devices submitted by Christian Abt of ELV (www.elv.de).
2151 +- * All of these devices use FTDI's vendor ID (0x0403).
2152 ++ * Almost all of these devices use FTDI's vendor ID (0x0403).
2153 + * Further IDs taken from ELV Windows .inf file.
2154 + *
2155 + * The previously included PID for the UO 100 module was incorrect.
2156 +@@ -212,6 +217,8 @@
2157 + *
2158 + * Armin Laeuger originally sent the PID for the UM 100 module.
2159 + */
2160 ++#define FTDI_ELV_VID 0x1B1F /* ELV AG */
2161 ++#define FTDI_ELV_WS300_PID 0xC006 /* eQ3 WS 300 PC II */
2162 + #define FTDI_ELV_USR_PID 0xE000 /* ELV Universal-Sound-Recorder */
2163 + #define FTDI_ELV_MSM1_PID 0xE001 /* ELV Mini-Sound-Modul */
2164 + #define FTDI_ELV_KL100_PID 0xE002 /* ELV Kfz-Leistungsmesser KL 100 */
2165 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2166 +index 9db3e23..52cd814 100644
2167 +--- a/drivers/usb/serial/option.c
2168 ++++ b/drivers/usb/serial/option.c
2169 +@@ -242,6 +242,7 @@ static void option_instat_callback(struct urb *urb);
2170 + #define TELIT_PRODUCT_CC864_DUAL 0x1005
2171 + #define TELIT_PRODUCT_CC864_SINGLE 0x1006
2172 + #define TELIT_PRODUCT_DE910_DUAL 0x1010
2173 ++#define TELIT_PRODUCT_LE920 0x1200
2174 +
2175 + /* ZTE PRODUCTS */
2176 + #define ZTE_VENDOR_ID 0x19d2
2177 +@@ -453,6 +454,10 @@ static void option_instat_callback(struct urb *urb);
2178 + #define TPLINK_VENDOR_ID 0x2357
2179 + #define TPLINK_PRODUCT_MA180 0x0201
2180 +
2181 ++/* Changhong products */
2182 ++#define CHANGHONG_VENDOR_ID 0x2077
2183 ++#define CHANGHONG_PRODUCT_CH690 0x7001
2184 ++
2185 + /* some devices interfaces need special handling due to a number of reasons */
2186 + enum option_blacklist_reason {
2187 + OPTION_BLACKLIST_NONE = 0,
2188 +@@ -534,6 +539,11 @@ static const struct option_blacklist_info zte_1255_blacklist = {
2189 + .reserved = BIT(3) | BIT(4),
2190 + };
2191 +
2192 ++static const struct option_blacklist_info telit_le920_blacklist = {
2193 ++ .sendsetup = BIT(0),
2194 ++ .reserved = BIT(1) | BIT(5),
2195 ++};
2196 ++
2197 + static const struct usb_device_id option_ids[] = {
2198 + { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
2199 + { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
2200 +@@ -784,6 +794,8 @@ static const struct usb_device_id option_ids[] = {
2201 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
2202 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
2203 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
2204 ++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
2205 ++ .driver_info = (kernel_ulong_t)&telit_le920_blacklist },
2206 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
2207 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
2208 + .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
2209 +@@ -1318,6 +1330,7 @@ static const struct usb_device_id option_ids[] = {
2210 + { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) },
2211 + { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
2212 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2213 ++ { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
2214 + { } /* Terminating entry */
2215 + };
2216 + MODULE_DEVICE_TABLE(usb, option_ids);
2217 +diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
2218 +index 6634477..14c4a82 100644
2219 +--- a/drivers/usb/serial/qcserial.c
2220 ++++ b/drivers/usb/serial/qcserial.c
2221 +@@ -55,6 +55,7 @@ static const struct usb_device_id id_table[] = {
2222 + {DEVICE_G1K(0x05c6, 0x9221)}, /* Generic Gobi QDL device */
2223 + {DEVICE_G1K(0x05c6, 0x9231)}, /* Generic Gobi QDL device */
2224 + {DEVICE_G1K(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */
2225 ++ {DEVICE_G1K(0x1bc7, 0x900e)}, /* Telit Gobi QDL device */
2226 +
2227 + /* Gobi 2000 devices */
2228 + {USB_DEVICE(0x1410, 0xa010)}, /* Novatel Gobi 2000 QDL device */
2229 +diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
2230 +index 105d900..16b0bf0 100644
2231 +--- a/drivers/usb/storage/initializers.c
2232 ++++ b/drivers/usb/storage/initializers.c
2233 +@@ -92,8 +92,8 @@ int usb_stor_ucr61s2b_init(struct us_data *us)
2234 + return 0;
2235 + }
2236 +
2237 +-/* This places the HUAWEI E220 devices in multi-port mode */
2238 +-int usb_stor_huawei_e220_init(struct us_data *us)
2239 ++/* This places the HUAWEI usb dongles in multi-port mode */
2240 ++static int usb_stor_huawei_feature_init(struct us_data *us)
2241 + {
2242 + int result;
2243 +
2244 +@@ -104,3 +104,75 @@ int usb_stor_huawei_e220_init(struct us_data *us)
2245 + US_DEBUGP("Huawei mode set result is %d\n", result);
2246 + return 0;
2247 + }
2248 ++
2249 ++/*
2250 ++ * It will send a scsi switch command called rewind' to huawei dongle.
2251 ++ * When the dongle receives this command at the first time,
2252 ++ * it will reboot immediately. After rebooted, it will ignore this command.
2253 ++ * So it is unnecessary to read its response.
2254 ++ */
2255 ++static int usb_stor_huawei_scsi_init(struct us_data *us)
2256 ++{
2257 ++ int result = 0;
2258 ++ int act_len = 0;
2259 ++ struct bulk_cb_wrap *bcbw = (struct bulk_cb_wrap *) us->iobuf;
2260 ++ char rewind_cmd[] = {0x11, 0x06, 0x20, 0x00, 0x00, 0x01, 0x01, 0x00,
2261 ++ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
2262 ++
2263 ++ bcbw->Signature = cpu_to_le32(US_BULK_CB_SIGN);
2264 ++ bcbw->Tag = 0;
2265 ++ bcbw->DataTransferLength = 0;
2266 ++ bcbw->Flags = bcbw->Lun = 0;
2267 ++ bcbw->Length = sizeof(rewind_cmd);
2268 ++ memset(bcbw->CDB, 0, sizeof(bcbw->CDB));
2269 ++ memcpy(bcbw->CDB, rewind_cmd, sizeof(rewind_cmd));
2270 ++
2271 ++ result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcbw,
2272 ++ US_BULK_CB_WRAP_LEN, &act_len);
2273 ++ US_DEBUGP("transfer actual length=%d, result=%d\n", act_len, result);
2274 ++ return result;
2275 ++}
2276 ++
2277 ++/*
2278 ++ * It tries to find the supported Huawei USB dongles.
2279 ++ * In Huawei, they assign the following product IDs
2280 ++ * for all of their mobile broadband dongles,
2281 ++ * including the new dongles in the future.
2282 ++ * So if the product ID is not included in this list,
2283 ++ * it means it is not Huawei's mobile broadband dongles.
2284 ++ */
2285 ++static int usb_stor_huawei_dongles_pid(struct us_data *us)
2286 ++{
2287 ++ struct usb_interface_descriptor *idesc;
2288 ++ int idProduct;
2289 ++
2290 ++ idesc = &us->pusb_intf->cur_altsetting->desc;
2291 ++ idProduct = us->pusb_dev->descriptor.idProduct;
2292 ++ /* The first port is CDROM,
2293 ++ * means the dongle in the single port mode,
2294 ++ * and a switch command is required to be sent. */
2295 ++ if (idesc && idesc->bInterfaceNumber == 0) {
2296 ++ if ((idProduct == 0x1001)
2297 ++ || (idProduct == 0x1003)
2298 ++ || (idProduct == 0x1004)
2299 ++ || (idProduct >= 0x1401 && idProduct <= 0x1500)
2300 ++ || (idProduct >= 0x1505 && idProduct <= 0x1600)
2301 ++ || (idProduct >= 0x1c02 && idProduct <= 0x2202)) {
2302 ++ return 1;
2303 ++ }
2304 ++ }
2305 ++ return 0;
2306 ++}
2307 ++
2308 ++int usb_stor_huawei_init(struct us_data *us)
2309 ++{
2310 ++ int result = 0;
2311 ++
2312 ++ if (usb_stor_huawei_dongles_pid(us)) {
2313 ++ if (us->pusb_dev->descriptor.idProduct >= 0x1446)
2314 ++ result = usb_stor_huawei_scsi_init(us);
2315 ++ else
2316 ++ result = usb_stor_huawei_feature_init(us);
2317 ++ }
2318 ++ return result;
2319 ++}
2320 +diff --git a/drivers/usb/storage/initializers.h b/drivers/usb/storage/initializers.h
2321 +index 529327f..5376d4f 100644
2322 +--- a/drivers/usb/storage/initializers.h
2323 ++++ b/drivers/usb/storage/initializers.h
2324 +@@ -46,5 +46,5 @@ int usb_stor_euscsi_init(struct us_data *us);
2325 + * flash reader */
2326 + int usb_stor_ucr61s2b_init(struct us_data *us);
2327 +
2328 +-/* This places the HUAWEI E220 devices in multi-port mode */
2329 +-int usb_stor_huawei_e220_init(struct us_data *us);
2330 ++/* This places the HUAWEI usb dongles in multi-port mode */
2331 ++int usb_stor_huawei_init(struct us_data *us);
2332 +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
2333 +index fa8a1b2..12640ef 100644
2334 +--- a/drivers/usb/storage/unusual_devs.h
2335 ++++ b/drivers/usb/storage/unusual_devs.h
2336 +@@ -1515,335 +1515,10 @@ UNUSUAL_DEV( 0x1210, 0x0003, 0x0100, 0x0100,
2337 + /* Reported by fangxiaozhi <huananhu@××××××.com>
2338 + * This brings the HUAWEI data card devices into multi-port mode
2339 + */
2340 +-UNUSUAL_DEV( 0x12d1, 0x1001, 0x0000, 0x0000,
2341 ++UNUSUAL_VENDOR_INTF(0x12d1, 0x08, 0x06, 0x50,
2342 + "HUAWEI MOBILE",
2343 + "Mass Storage",
2344 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2345 +- 0),
2346 +-UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0x0000,
2347 +- "HUAWEI MOBILE",
2348 +- "Mass Storage",
2349 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2350 +- 0),
2351 +-UNUSUAL_DEV( 0x12d1, 0x1004, 0x0000, 0x0000,
2352 +- "HUAWEI MOBILE",
2353 +- "Mass Storage",
2354 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2355 +- 0),
2356 +-UNUSUAL_DEV( 0x12d1, 0x1401, 0x0000, 0x0000,
2357 +- "HUAWEI MOBILE",
2358 +- "Mass Storage",
2359 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2360 +- 0),
2361 +-UNUSUAL_DEV( 0x12d1, 0x1402, 0x0000, 0x0000,
2362 +- "HUAWEI MOBILE",
2363 +- "Mass Storage",
2364 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2365 +- 0),
2366 +-UNUSUAL_DEV( 0x12d1, 0x1403, 0x0000, 0x0000,
2367 +- "HUAWEI MOBILE",
2368 +- "Mass Storage",
2369 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2370 +- 0),
2371 +-UNUSUAL_DEV( 0x12d1, 0x1404, 0x0000, 0x0000,
2372 +- "HUAWEI MOBILE",
2373 +- "Mass Storage",
2374 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2375 +- 0),
2376 +-UNUSUAL_DEV( 0x12d1, 0x1405, 0x0000, 0x0000,
2377 +- "HUAWEI MOBILE",
2378 +- "Mass Storage",
2379 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2380 +- 0),
2381 +-UNUSUAL_DEV( 0x12d1, 0x1406, 0x0000, 0x0000,
2382 +- "HUAWEI MOBILE",
2383 +- "Mass Storage",
2384 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2385 +- 0),
2386 +-UNUSUAL_DEV( 0x12d1, 0x1407, 0x0000, 0x0000,
2387 +- "HUAWEI MOBILE",
2388 +- "Mass Storage",
2389 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2390 +- 0),
2391 +-UNUSUAL_DEV( 0x12d1, 0x1408, 0x0000, 0x0000,
2392 +- "HUAWEI MOBILE",
2393 +- "Mass Storage",
2394 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2395 +- 0),
2396 +-UNUSUAL_DEV( 0x12d1, 0x1409, 0x0000, 0x0000,
2397 +- "HUAWEI MOBILE",
2398 +- "Mass Storage",
2399 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2400 +- 0),
2401 +-UNUSUAL_DEV( 0x12d1, 0x140A, 0x0000, 0x0000,
2402 +- "HUAWEI MOBILE",
2403 +- "Mass Storage",
2404 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2405 +- 0),
2406 +-UNUSUAL_DEV( 0x12d1, 0x140B, 0x0000, 0x0000,
2407 +- "HUAWEI MOBILE",
2408 +- "Mass Storage",
2409 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2410 +- 0),
2411 +-UNUSUAL_DEV( 0x12d1, 0x140C, 0x0000, 0x0000,
2412 +- "HUAWEI MOBILE",
2413 +- "Mass Storage",
2414 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2415 +- 0),
2416 +-UNUSUAL_DEV( 0x12d1, 0x140D, 0x0000, 0x0000,
2417 +- "HUAWEI MOBILE",
2418 +- "Mass Storage",
2419 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2420 +- 0),
2421 +-UNUSUAL_DEV( 0x12d1, 0x140E, 0x0000, 0x0000,
2422 +- "HUAWEI MOBILE",
2423 +- "Mass Storage",
2424 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2425 +- 0),
2426 +-UNUSUAL_DEV( 0x12d1, 0x140F, 0x0000, 0x0000,
2427 +- "HUAWEI MOBILE",
2428 +- "Mass Storage",
2429 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2430 +- 0),
2431 +-UNUSUAL_DEV( 0x12d1, 0x1410, 0x0000, 0x0000,
2432 +- "HUAWEI MOBILE",
2433 +- "Mass Storage",
2434 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2435 +- 0),
2436 +-UNUSUAL_DEV( 0x12d1, 0x1411, 0x0000, 0x0000,
2437 +- "HUAWEI MOBILE",
2438 +- "Mass Storage",
2439 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2440 +- 0),
2441 +-UNUSUAL_DEV( 0x12d1, 0x1412, 0x0000, 0x0000,
2442 +- "HUAWEI MOBILE",
2443 +- "Mass Storage",
2444 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2445 +- 0),
2446 +-UNUSUAL_DEV( 0x12d1, 0x1413, 0x0000, 0x0000,
2447 +- "HUAWEI MOBILE",
2448 +- "Mass Storage",
2449 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2450 +- 0),
2451 +-UNUSUAL_DEV( 0x12d1, 0x1414, 0x0000, 0x0000,
2452 +- "HUAWEI MOBILE",
2453 +- "Mass Storage",
2454 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2455 +- 0),
2456 +-UNUSUAL_DEV( 0x12d1, 0x1415, 0x0000, 0x0000,
2457 +- "HUAWEI MOBILE",
2458 +- "Mass Storage",
2459 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2460 +- 0),
2461 +-UNUSUAL_DEV( 0x12d1, 0x1416, 0x0000, 0x0000,
2462 +- "HUAWEI MOBILE",
2463 +- "Mass Storage",
2464 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2465 +- 0),
2466 +-UNUSUAL_DEV( 0x12d1, 0x1417, 0x0000, 0x0000,
2467 +- "HUAWEI MOBILE",
2468 +- "Mass Storage",
2469 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2470 +- 0),
2471 +-UNUSUAL_DEV( 0x12d1, 0x1418, 0x0000, 0x0000,
2472 +- "HUAWEI MOBILE",
2473 +- "Mass Storage",
2474 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2475 +- 0),
2476 +-UNUSUAL_DEV( 0x12d1, 0x1419, 0x0000, 0x0000,
2477 +- "HUAWEI MOBILE",
2478 +- "Mass Storage",
2479 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2480 +- 0),
2481 +-UNUSUAL_DEV( 0x12d1, 0x141A, 0x0000, 0x0000,
2482 +- "HUAWEI MOBILE",
2483 +- "Mass Storage",
2484 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2485 +- 0),
2486 +-UNUSUAL_DEV( 0x12d1, 0x141B, 0x0000, 0x0000,
2487 +- "HUAWEI MOBILE",
2488 +- "Mass Storage",
2489 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2490 +- 0),
2491 +-UNUSUAL_DEV( 0x12d1, 0x141C, 0x0000, 0x0000,
2492 +- "HUAWEI MOBILE",
2493 +- "Mass Storage",
2494 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2495 +- 0),
2496 +-UNUSUAL_DEV( 0x12d1, 0x141D, 0x0000, 0x0000,
2497 +- "HUAWEI MOBILE",
2498 +- "Mass Storage",
2499 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2500 +- 0),
2501 +-UNUSUAL_DEV( 0x12d1, 0x141E, 0x0000, 0x0000,
2502 +- "HUAWEI MOBILE",
2503 +- "Mass Storage",
2504 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2505 +- 0),
2506 +-UNUSUAL_DEV( 0x12d1, 0x141F, 0x0000, 0x0000,
2507 +- "HUAWEI MOBILE",
2508 +- "Mass Storage",
2509 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2510 +- 0),
2511 +-UNUSUAL_DEV( 0x12d1, 0x1420, 0x0000, 0x0000,
2512 +- "HUAWEI MOBILE",
2513 +- "Mass Storage",
2514 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2515 +- 0),
2516 +-UNUSUAL_DEV( 0x12d1, 0x1421, 0x0000, 0x0000,
2517 +- "HUAWEI MOBILE",
2518 +- "Mass Storage",
2519 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2520 +- 0),
2521 +-UNUSUAL_DEV( 0x12d1, 0x1422, 0x0000, 0x0000,
2522 +- "HUAWEI MOBILE",
2523 +- "Mass Storage",
2524 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2525 +- 0),
2526 +-UNUSUAL_DEV( 0x12d1, 0x1423, 0x0000, 0x0000,
2527 +- "HUAWEI MOBILE",
2528 +- "Mass Storage",
2529 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2530 +- 0),
2531 +-UNUSUAL_DEV( 0x12d1, 0x1424, 0x0000, 0x0000,
2532 +- "HUAWEI MOBILE",
2533 +- "Mass Storage",
2534 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2535 +- 0),
2536 +-UNUSUAL_DEV( 0x12d1, 0x1425, 0x0000, 0x0000,
2537 +- "HUAWEI MOBILE",
2538 +- "Mass Storage",
2539 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2540 +- 0),
2541 +-UNUSUAL_DEV( 0x12d1, 0x1426, 0x0000, 0x0000,
2542 +- "HUAWEI MOBILE",
2543 +- "Mass Storage",
2544 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2545 +- 0),
2546 +-UNUSUAL_DEV( 0x12d1, 0x1427, 0x0000, 0x0000,
2547 +- "HUAWEI MOBILE",
2548 +- "Mass Storage",
2549 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2550 +- 0),
2551 +-UNUSUAL_DEV( 0x12d1, 0x1428, 0x0000, 0x0000,
2552 +- "HUAWEI MOBILE",
2553 +- "Mass Storage",
2554 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2555 +- 0),
2556 +-UNUSUAL_DEV( 0x12d1, 0x1429, 0x0000, 0x0000,
2557 +- "HUAWEI MOBILE",
2558 +- "Mass Storage",
2559 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2560 +- 0),
2561 +-UNUSUAL_DEV( 0x12d1, 0x142A, 0x0000, 0x0000,
2562 +- "HUAWEI MOBILE",
2563 +- "Mass Storage",
2564 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2565 +- 0),
2566 +-UNUSUAL_DEV( 0x12d1, 0x142B, 0x0000, 0x0000,
2567 +- "HUAWEI MOBILE",
2568 +- "Mass Storage",
2569 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2570 +- 0),
2571 +-UNUSUAL_DEV( 0x12d1, 0x142C, 0x0000, 0x0000,
2572 +- "HUAWEI MOBILE",
2573 +- "Mass Storage",
2574 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2575 +- 0),
2576 +-UNUSUAL_DEV( 0x12d1, 0x142D, 0x0000, 0x0000,
2577 +- "HUAWEI MOBILE",
2578 +- "Mass Storage",
2579 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2580 +- 0),
2581 +-UNUSUAL_DEV( 0x12d1, 0x142E, 0x0000, 0x0000,
2582 +- "HUAWEI MOBILE",
2583 +- "Mass Storage",
2584 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2585 +- 0),
2586 +-UNUSUAL_DEV( 0x12d1, 0x142F, 0x0000, 0x0000,
2587 +- "HUAWEI MOBILE",
2588 +- "Mass Storage",
2589 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2590 +- 0),
2591 +-UNUSUAL_DEV( 0x12d1, 0x1430, 0x0000, 0x0000,
2592 +- "HUAWEI MOBILE",
2593 +- "Mass Storage",
2594 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2595 +- 0),
2596 +-UNUSUAL_DEV( 0x12d1, 0x1431, 0x0000, 0x0000,
2597 +- "HUAWEI MOBILE",
2598 +- "Mass Storage",
2599 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2600 +- 0),
2601 +-UNUSUAL_DEV( 0x12d1, 0x1432, 0x0000, 0x0000,
2602 +- "HUAWEI MOBILE",
2603 +- "Mass Storage",
2604 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2605 +- 0),
2606 +-UNUSUAL_DEV( 0x12d1, 0x1433, 0x0000, 0x0000,
2607 +- "HUAWEI MOBILE",
2608 +- "Mass Storage",
2609 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2610 +- 0),
2611 +-UNUSUAL_DEV( 0x12d1, 0x1434, 0x0000, 0x0000,
2612 +- "HUAWEI MOBILE",
2613 +- "Mass Storage",
2614 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2615 +- 0),
2616 +-UNUSUAL_DEV( 0x12d1, 0x1435, 0x0000, 0x0000,
2617 +- "HUAWEI MOBILE",
2618 +- "Mass Storage",
2619 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2620 +- 0),
2621 +-UNUSUAL_DEV( 0x12d1, 0x1436, 0x0000, 0x0000,
2622 +- "HUAWEI MOBILE",
2623 +- "Mass Storage",
2624 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2625 +- 0),
2626 +-UNUSUAL_DEV( 0x12d1, 0x1437, 0x0000, 0x0000,
2627 +- "HUAWEI MOBILE",
2628 +- "Mass Storage",
2629 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2630 +- 0),
2631 +-UNUSUAL_DEV( 0x12d1, 0x1438, 0x0000, 0x0000,
2632 +- "HUAWEI MOBILE",
2633 +- "Mass Storage",
2634 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2635 +- 0),
2636 +-UNUSUAL_DEV( 0x12d1, 0x1439, 0x0000, 0x0000,
2637 +- "HUAWEI MOBILE",
2638 +- "Mass Storage",
2639 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2640 +- 0),
2641 +-UNUSUAL_DEV( 0x12d1, 0x143A, 0x0000, 0x0000,
2642 +- "HUAWEI MOBILE",
2643 +- "Mass Storage",
2644 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2645 +- 0),
2646 +-UNUSUAL_DEV( 0x12d1, 0x143B, 0x0000, 0x0000,
2647 +- "HUAWEI MOBILE",
2648 +- "Mass Storage",
2649 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2650 +- 0),
2651 +-UNUSUAL_DEV( 0x12d1, 0x143C, 0x0000, 0x0000,
2652 +- "HUAWEI MOBILE",
2653 +- "Mass Storage",
2654 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2655 +- 0),
2656 +-UNUSUAL_DEV( 0x12d1, 0x143D, 0x0000, 0x0000,
2657 +- "HUAWEI MOBILE",
2658 +- "Mass Storage",
2659 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2660 +- 0),
2661 +-UNUSUAL_DEV( 0x12d1, 0x143E, 0x0000, 0x0000,
2662 +- "HUAWEI MOBILE",
2663 +- "Mass Storage",
2664 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2665 +- 0),
2666 +-UNUSUAL_DEV( 0x12d1, 0x143F, 0x0000, 0x0000,
2667 +- "HUAWEI MOBILE",
2668 +- "Mass Storage",
2669 +- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
2670 ++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_init,
2671 + 0),
2672 +
2673 + /* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */
2674 +diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
2675 +index db51ba1..d582af4 100644
2676 +--- a/drivers/usb/storage/usb.c
2677 ++++ b/drivers/usb/storage/usb.c
2678 +@@ -120,6 +120,17 @@ MODULE_PARM_DESC(quirks, "supplemental list of device IDs and their quirks");
2679 + .useTransport = use_transport, \
2680 + }
2681 +
2682 ++#define UNUSUAL_VENDOR_INTF(idVendor, cl, sc, pr, \
2683 ++ vendor_name, product_name, use_protocol, use_transport, \
2684 ++ init_function, Flags) \
2685 ++{ \
2686 ++ .vendorName = vendor_name, \
2687 ++ .productName = product_name, \
2688 ++ .useProtocol = use_protocol, \
2689 ++ .useTransport = use_transport, \
2690 ++ .initFunction = init_function, \
2691 ++}
2692 ++
2693 + static struct us_unusual_dev us_unusual_dev_list[] = {
2694 + # include "unusual_devs.h"
2695 + { } /* Terminating entry */
2696 +@@ -128,6 +139,7 @@ static struct us_unusual_dev us_unusual_dev_list[] = {
2697 + #undef UNUSUAL_DEV
2698 + #undef COMPLIANT_DEV
2699 + #undef USUAL_DEV
2700 ++#undef UNUSUAL_VENDOR_INTF
2701 +
2702 +
2703 + #ifdef CONFIG_PM /* Minimal support for suspend and resume */
2704 +diff --git a/drivers/usb/storage/usual-tables.c b/drivers/usb/storage/usual-tables.c
2705 +index b969279..a9b5f2e 100644
2706 +--- a/drivers/usb/storage/usual-tables.c
2707 ++++ b/drivers/usb/storage/usual-tables.c
2708 +@@ -46,6 +46,20 @@
2709 + { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans), \
2710 + .driver_info = ((useType)<<24) }
2711 +
2712 ++/* Define the device is matched with Vendor ID and interface descriptors */
2713 ++#define UNUSUAL_VENDOR_INTF(id_vendor, cl, sc, pr, \
2714 ++ vendorName, productName, useProtocol, useTransport, \
2715 ++ initFunction, flags) \
2716 ++{ \
2717 ++ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \
2718 ++ | USB_DEVICE_ID_MATCH_VENDOR, \
2719 ++ .idVendor = (id_vendor), \
2720 ++ .bInterfaceClass = (cl), \
2721 ++ .bInterfaceSubClass = (sc), \
2722 ++ .bInterfaceProtocol = (pr), \
2723 ++ .driver_info = (flags) \
2724 ++}
2725 ++
2726 + struct usb_device_id usb_storage_usb_ids[] = {
2727 + # include "unusual_devs.h"
2728 + { } /* Terminating entry */
2729 +@@ -57,6 +71,7 @@ MODULE_DEVICE_TABLE(usb, usb_storage_usb_ids);
2730 + #undef UNUSUAL_DEV
2731 + #undef COMPLIANT_DEV
2732 + #undef USUAL_DEV
2733 ++#undef UNUSUAL_VENDOR_INTF
2734 +
2735 +
2736 + /*
2737 +diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
2738 +index c598cfb..2b5e695 100644
2739 +--- a/fs/nilfs2/ioctl.c
2740 ++++ b/fs/nilfs2/ioctl.c
2741 +@@ -664,8 +664,11 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
2742 + if (ret < 0)
2743 + printk(KERN_ERR "NILFS: GC failed during preparation: "
2744 + "cannot read source blocks: err=%d\n", ret);
2745 +- else
2746 ++ else {
2747 ++ if (nilfs_sb_need_update(nilfs))
2748 ++ set_nilfs_discontinued(nilfs);
2749 + ret = nilfs_clean_segments(inode->i_sb, argv, kbufs);
2750 ++ }
2751 +
2752 + nilfs_remove_all_gcinodes(nilfs);
2753 + clear_nilfs_gc_running(nilfs);
2754 +diff --git a/fs/splice.c b/fs/splice.c
2755 +index 014fcb4..58ab918 100644
2756 +--- a/fs/splice.c
2757 ++++ b/fs/splice.c
2758 +@@ -697,8 +697,10 @@ static int pipe_to_sendpage(struct pipe_inode_info *pipe,
2759 + return -EINVAL;
2760 +
2761 + more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0;
2762 +- if (sd->len < sd->total_len)
2763 ++
2764 ++ if (sd->len < sd->total_len && pipe->nrbufs > 1)
2765 + more |= MSG_SENDPAGE_NOTLAST;
2766 ++
2767 + return file->f_op->sendpage(file, buf->page, buf->offset,
2768 + sd->len, &pos, more);
2769 + }
2770 +diff --git a/include/linux/sched.h b/include/linux/sched.h
2771 +index 1e86bb4..8204898 100644
2772 +--- a/include/linux/sched.h
2773 ++++ b/include/linux/sched.h
2774 +@@ -2597,7 +2597,16 @@ static inline void thread_group_cputime_init(struct signal_struct *sig)
2775 + extern void recalc_sigpending_and_wake(struct task_struct *t);
2776 + extern void recalc_sigpending(void);
2777 +
2778 +-extern void signal_wake_up(struct task_struct *t, int resume_stopped);
2779 ++extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
2780 ++
2781 ++static inline void signal_wake_up(struct task_struct *t, bool resume)
2782 ++{
2783 ++ signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
2784 ++}
2785 ++static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
2786 ++{
2787 ++ signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
2788 ++}
2789 +
2790 + /*
2791 + * Wrappers for p->thread_info->cpu access. No-op on UP.
2792 +diff --git a/kernel/ptrace.c b/kernel/ptrace.c
2793 +index 78ab24a..67fedad 100644
2794 +--- a/kernel/ptrace.c
2795 ++++ b/kernel/ptrace.c
2796 +@@ -117,11 +117,45 @@ void __ptrace_unlink(struct task_struct *child)
2797 + * TASK_KILLABLE sleeps.
2798 + */
2799 + if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
2800 +- signal_wake_up(child, task_is_traced(child));
2801 ++ ptrace_signal_wake_up(child, true);
2802 +
2803 + spin_unlock(&child->sighand->siglock);
2804 + }
2805 +
2806 ++/* Ensure that nothing can wake it up, even SIGKILL */
2807 ++static bool ptrace_freeze_traced(struct task_struct *task)
2808 ++{
2809 ++ bool ret = false;
2810 ++
2811 ++ /* Lockless, nobody but us can set this flag */
2812 ++ if (task->jobctl & JOBCTL_LISTENING)
2813 ++ return ret;
2814 ++
2815 ++ spin_lock_irq(&task->sighand->siglock);
2816 ++ if (task_is_traced(task) && !__fatal_signal_pending(task)) {
2817 ++ task->state = __TASK_TRACED;
2818 ++ ret = true;
2819 ++ }
2820 ++ spin_unlock_irq(&task->sighand->siglock);
2821 ++
2822 ++ return ret;
2823 ++}
2824 ++
2825 ++static void ptrace_unfreeze_traced(struct task_struct *task)
2826 ++{
2827 ++ if (task->state != __TASK_TRACED)
2828 ++ return;
2829 ++
2830 ++ WARN_ON(!task->ptrace || task->parent != current);
2831 ++
2832 ++ spin_lock_irq(&task->sighand->siglock);
2833 ++ if (__fatal_signal_pending(task))
2834 ++ wake_up_state(task, __TASK_TRACED);
2835 ++ else
2836 ++ task->state = TASK_TRACED;
2837 ++ spin_unlock_irq(&task->sighand->siglock);
2838 ++}
2839 ++
2840 + /**
2841 + * ptrace_check_attach - check whether ptracee is ready for ptrace operation
2842 + * @child: ptracee to check for
2843 +@@ -151,24 +185,29 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
2844 + * be changed by us so it's not changing right after this.
2845 + */
2846 + read_lock(&tasklist_lock);
2847 +- if ((child->ptrace & PT_PTRACED) && child->parent == current) {
2848 ++ if (child->ptrace && child->parent == current) {
2849 ++ WARN_ON(child->state == __TASK_TRACED);
2850 + /*
2851 + * child->sighand can't be NULL, release_task()
2852 + * does ptrace_unlink() before __exit_signal().
2853 + */
2854 +- spin_lock_irq(&child->sighand->siglock);
2855 +- WARN_ON_ONCE(task_is_stopped(child));
2856 +- if (ignore_state || (task_is_traced(child) &&
2857 +- !(child->jobctl & JOBCTL_LISTENING)))
2858 ++ if (ignore_state || ptrace_freeze_traced(child))
2859 + ret = 0;
2860 +- spin_unlock_irq(&child->sighand->siglock);
2861 + }
2862 + read_unlock(&tasklist_lock);
2863 +
2864 +- if (!ret && !ignore_state)
2865 +- ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
2866 ++ if (!ret && !ignore_state) {
2867 ++ if (!wait_task_inactive(child, __TASK_TRACED)) {
2868 ++ /*
2869 ++ * This can only happen if may_ptrace_stop() fails and
2870 ++ * ptrace_stop() changes ->state back to TASK_RUNNING,
2871 ++ * so we should not worry about leaking __TASK_TRACED.
2872 ++ */
2873 ++ WARN_ON(child->state == __TASK_TRACED);
2874 ++ ret = -ESRCH;
2875 ++ }
2876 ++ }
2877 +
2878 +- /* All systems go.. */
2879 + return ret;
2880 + }
2881 +
2882 +@@ -307,7 +346,7 @@ static int ptrace_attach(struct task_struct *task, long request,
2883 + */
2884 + if (task_is_stopped(task) &&
2885 + task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
2886 +- signal_wake_up(task, 1);
2887 ++ signal_wake_up_state(task, __TASK_STOPPED);
2888 +
2889 + spin_unlock(&task->sighand->siglock);
2890 +
2891 +@@ -736,7 +775,7 @@ int ptrace_request(struct task_struct *child, long request,
2892 + * tracee into STOP.
2893 + */
2894 + if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
2895 +- signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
2896 ++ ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
2897 +
2898 + unlock_task_sighand(child, &flags);
2899 + ret = 0;
2900 +@@ -762,7 +801,7 @@ int ptrace_request(struct task_struct *child, long request,
2901 + * start of this trap and now. Trigger re-trap.
2902 + */
2903 + if (child->jobctl & JOBCTL_TRAP_NOTIFY)
2904 +- signal_wake_up(child, true);
2905 ++ ptrace_signal_wake_up(child, true);
2906 + ret = 0;
2907 + }
2908 + unlock_task_sighand(child, &flags);
2909 +@@ -899,6 +938,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
2910 + goto out_put_task_struct;
2911 +
2912 + ret = arch_ptrace(child, request, addr, data);
2913 ++ if (ret || request != PTRACE_DETACH)
2914 ++ ptrace_unfreeze_traced(child);
2915 +
2916 + out_put_task_struct:
2917 + put_task_struct(child);
2918 +@@ -1038,8 +1079,11 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
2919 +
2920 + ret = ptrace_check_attach(child, request == PTRACE_KILL ||
2921 + request == PTRACE_INTERRUPT);
2922 +- if (!ret)
2923 ++ if (!ret) {
2924 + ret = compat_arch_ptrace(child, request, addr, data);
2925 ++ if (ret || request != PTRACE_DETACH)
2926 ++ ptrace_unfreeze_traced(child);
2927 ++ }
2928 +
2929 + out_put_task_struct:
2930 + put_task_struct(child);
2931 +diff --git a/kernel/resource.c b/kernel/resource.c
2932 +index 7640b3a..08aa28e 100644
2933 +--- a/kernel/resource.c
2934 ++++ b/kernel/resource.c
2935 +@@ -757,6 +757,7 @@ static void __init __reserve_region_with_split(struct resource *root,
2936 + struct resource *parent = root;
2937 + struct resource *conflict;
2938 + struct resource *res = kzalloc(sizeof(*res), GFP_ATOMIC);
2939 ++ struct resource *next_res = NULL;
2940 +
2941 + if (!res)
2942 + return;
2943 +@@ -766,21 +767,46 @@ static void __init __reserve_region_with_split(struct resource *root,
2944 + res->end = end;
2945 + res->flags = IORESOURCE_BUSY;
2946 +
2947 +- conflict = __request_resource(parent, res);
2948 +- if (!conflict)
2949 +- return;
2950 ++ while (1) {
2951 +
2952 +- /* failed, split and try again */
2953 +- kfree(res);
2954 ++ conflict = __request_resource(parent, res);
2955 ++ if (!conflict) {
2956 ++ if (!next_res)
2957 ++ break;
2958 ++ res = next_res;
2959 ++ next_res = NULL;
2960 ++ continue;
2961 ++ }
2962 +
2963 +- /* conflict covered whole area */
2964 +- if (conflict->start <= start && conflict->end >= end)
2965 +- return;
2966 ++ /* conflict covered whole area */
2967 ++ if (conflict->start <= res->start &&
2968 ++ conflict->end >= res->end) {
2969 ++ kfree(res);
2970 ++ WARN_ON(next_res);
2971 ++ break;
2972 ++ }
2973 ++
2974 ++ /* failed, split and try again */
2975 ++ if (conflict->start > res->start) {
2976 ++ end = res->end;
2977 ++ res->end = conflict->start - 1;
2978 ++ if (conflict->end < end) {
2979 ++ next_res = kzalloc(sizeof(*next_res),
2980 ++ GFP_ATOMIC);
2981 ++ if (!next_res) {
2982 ++ kfree(res);
2983 ++ break;
2984 ++ }
2985 ++ next_res->name = name;
2986 ++ next_res->start = conflict->end + 1;
2987 ++ next_res->end = end;
2988 ++ next_res->flags = IORESOURCE_BUSY;
2989 ++ }
2990 ++ } else {
2991 ++ res->start = conflict->end + 1;
2992 ++ }
2993 ++ }
2994 +
2995 +- if (conflict->start > start)
2996 +- __reserve_region_with_split(root, start, conflict->start-1, name);
2997 +- if (conflict->end < end)
2998 +- __reserve_region_with_split(root, conflict->end+1, end, name);
2999 + }
3000 +
3001 + void __init reserve_region_with_split(struct resource *root,
3002 +diff --git a/kernel/sched.c b/kernel/sched.c
3003 +index fcc893f..eeeec4e 100644
3004 +--- a/kernel/sched.c
3005 ++++ b/kernel/sched.c
3006 +@@ -2924,7 +2924,8 @@ out:
3007 + */
3008 + int wake_up_process(struct task_struct *p)
3009 + {
3010 +- return try_to_wake_up(p, TASK_ALL, 0);
3011 ++ WARN_ON(task_is_stopped_or_traced(p));
3012 ++ return try_to_wake_up(p, TASK_NORMAL, 0);
3013 + }
3014 + EXPORT_SYMBOL(wake_up_process);
3015 +
3016 +diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
3017 +index 78fcacf..6ad4fb3 100644
3018 +--- a/kernel/sched_rt.c
3019 ++++ b/kernel/sched_rt.c
3020 +@@ -384,7 +384,7 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
3021 + static int do_balance_runtime(struct rt_rq *rt_rq)
3022 + {
3023 + struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
3024 +- struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
3025 ++ struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
3026 + int i, weight, more = 0;
3027 + u64 rt_period;
3028 +
3029 +diff --git a/kernel/signal.c b/kernel/signal.c
3030 +index 08e0b97..d2f55ea 100644
3031 +--- a/kernel/signal.c
3032 ++++ b/kernel/signal.c
3033 +@@ -676,23 +676,17 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
3034 + * No need to set need_resched since signal event passing
3035 + * goes through ->blocked
3036 + */
3037 +-void signal_wake_up(struct task_struct *t, int resume)
3038 ++void signal_wake_up_state(struct task_struct *t, unsigned int state)
3039 + {
3040 +- unsigned int mask;
3041 +-
3042 + set_tsk_thread_flag(t, TIF_SIGPENDING);
3043 +-
3044 + /*
3045 +- * For SIGKILL, we want to wake it up in the stopped/traced/killable
3046 ++ * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
3047 + * case. We don't check t->state here because there is a race with it
3048 + * executing another processor and just now entering stopped state.
3049 + * By using wake_up_state, we ensure the process will wake up and
3050 + * handle its death signal.
3051 + */
3052 +- mask = TASK_INTERRUPTIBLE;
3053 +- if (resume)
3054 +- mask |= TASK_WAKEKILL;
3055 +- if (!wake_up_state(t, mask))
3056 ++ if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
3057 + kick_process(t);
3058 + }
3059 +
3060 +@@ -841,7 +835,7 @@ static void ptrace_trap_notify(struct task_struct *t)
3061 + assert_spin_locked(&t->sighand->siglock);
3062 +
3063 + task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
3064 +- signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
3065 ++ ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
3066 + }
3067 +
3068 + /*
3069 +@@ -1765,6 +1759,10 @@ static inline int may_ptrace_stop(void)
3070 + * If SIGKILL was already sent before the caller unlocked
3071 + * ->siglock we must see ->core_state != NULL. Otherwise it
3072 + * is safe to enter schedule().
3073 ++ *
3074 ++ * This is almost outdated, a task with the pending SIGKILL can't
3075 ++ * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
3076 ++ * after SIGKILL was already dequeued.
3077 + */
3078 + if (unlikely(current->mm->core_state) &&
3079 + unlikely(current->mm == current->parent->mm))
3080 +@@ -1890,6 +1888,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
3081 + if (gstop_done)
3082 + do_notify_parent_cldstop(current, false, why);
3083 +
3084 ++ /* tasklist protects us from ptrace_freeze_traced() */
3085 + __set_current_state(TASK_RUNNING);
3086 + if (clear_code)
3087 + current->exit_code = 0;
3088 +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
3089 +index 6033f02..7a157b3 100644
3090 +--- a/net/bluetooth/hci_event.c
3091 ++++ b/net/bluetooth/hci_event.c
3092 +@@ -1972,7 +1972,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
3093 + if (ev->opcode != HCI_OP_NOP)
3094 + del_timer(&hdev->cmd_timer);
3095 +
3096 +- if (ev->ncmd) {
3097 ++ if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
3098 + atomic_set(&hdev->cmd_cnt, 1);
3099 + if (!skb_queue_empty(&hdev->cmd_q))
3100 + tasklet_schedule(&hdev->cmd_task);
3101 +diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
3102 +index 1849ee0..9ab60e6 100644
3103 +--- a/net/bluetooth/smp.c
3104 ++++ b/net/bluetooth/smp.c
3105 +@@ -642,6 +642,19 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3106 +
3107 + skb_pull(skb, sizeof(code));
3108 +
3109 ++ /*
3110 ++ * The SMP context must be initialized for all other PDUs except
3111 ++ * pairing and security requests. If we get any other PDU when
3112 ++ * not initialized simply disconnect (done if this function
3113 ++ * returns an error).
3114 ++ */
3115 ++ if (code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ &&
3116 ++ !conn->smp_chan) {
3117 ++ BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code);
3118 ++ kfree_skb(skb);
3119 ++ return -ENOTSUPP;
3120 ++ }
3121 ++
3122 + switch (code) {
3123 + case SMP_CMD_PAIRING_REQ:
3124 + reason = smp_cmd_pairing_req(conn, skb);
3125 +diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
3126 +index 577ea5d..7c1745d 100644
3127 +--- a/net/bridge/br_netfilter.c
3128 ++++ b/net/bridge/br_netfilter.c
3129 +@@ -245,6 +245,9 @@ static int br_parse_ip_options(struct sk_buff *skb)
3130 + struct net_device *dev = skb->dev;
3131 + u32 len;
3132 +
3133 ++ if (!pskb_may_pull(skb, sizeof(struct iphdr)))
3134 ++ goto inhdr_error;
3135 ++
3136 + iph = ip_hdr(skb);
3137 + opt = &(IPCB(skb)->opt);
3138 +
3139 +diff --git a/net/core/pktgen.c b/net/core/pktgen.c
3140 +index 7bc9991..2ef7da0 100644
3141 +--- a/net/core/pktgen.c
3142 ++++ b/net/core/pktgen.c
3143 +@@ -1803,10 +1803,13 @@ static ssize_t pktgen_thread_write(struct file *file,
3144 + return -EFAULT;
3145 + i += len;
3146 + mutex_lock(&pktgen_thread_lock);
3147 +- pktgen_add_device(t, f);
3148 ++ ret = pktgen_add_device(t, f);
3149 + mutex_unlock(&pktgen_thread_lock);
3150 +- ret = count;
3151 +- sprintf(pg_result, "OK: add_device=%s", f);
3152 ++ if (!ret) {
3153 ++ ret = count;
3154 ++ sprintf(pg_result, "OK: add_device=%s", f);
3155 ++ } else
3156 ++ sprintf(pg_result, "ERROR: can not add device %s", f);
3157 + goto out;
3158 + }
3159 +
3160 +diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
3161 +index 0106d25..3b36002 100644
3162 +--- a/net/ipv4/ip_sockglue.c
3163 ++++ b/net/ipv4/ip_sockglue.c
3164 +@@ -600,7 +600,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
3165 + case IP_TTL:
3166 + if (optlen < 1)
3167 + goto e_inval;
3168 +- if (val != -1 && (val < 0 || val > 255))
3169 ++ if (val != -1 && (val < 1 || val > 255))
3170 + goto e_inval;
3171 + inet->uc_ttl = val;
3172 + break;
3173 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
3174 +index aab8f08..e865ed1 100644
3175 +--- a/net/ipv4/tcp_input.c
3176 ++++ b/net/ipv4/tcp_input.c
3177 +@@ -3655,6 +3655,11 @@ static int tcp_process_frto(struct sock *sk, int flag)
3178 + }
3179 + } else {
3180 + if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
3181 ++ if (!tcp_packets_in_flight(tp)) {
3182 ++ tcp_enter_frto_loss(sk, 2, flag);
3183 ++ return true;
3184 ++ }
3185 ++
3186 + /* Prevent sending of new data. */
3187 + tp->snd_cwnd = min(tp->snd_cwnd,
3188 + tcp_packets_in_flight(tp));
3189 +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
3190 +index aef80d7..b27baed 100644
3191 +--- a/net/ipv6/addrconf.c
3192 ++++ b/net/ipv6/addrconf.c
3193 +@@ -1739,7 +1739,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
3194 + continue;
3195 + if ((rt->rt6i_flags & flags) != flags)
3196 + continue;
3197 +- if ((noflags != 0) && ((rt->rt6i_flags & flags) != 0))
3198 ++ if ((rt->rt6i_flags & noflags) != 0)
3199 + continue;
3200 + dst_hold(&rt->dst);
3201 + break;
3202 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
3203 +index ae98e09..3ccd9b2 100644
3204 +--- a/net/ipv6/ip6_output.c
3205 ++++ b/net/ipv6/ip6_output.c
3206 +@@ -1284,10 +1284,10 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
3207 + cork->length = 0;
3208 + sk->sk_sndmsg_page = NULL;
3209 + sk->sk_sndmsg_off = 0;
3210 +- exthdrlen = (opt ? opt->opt_flen : 0) - rt->rt6i_nfheader_len;
3211 ++ exthdrlen = (opt ? opt->opt_flen : 0);
3212 + length += exthdrlen;
3213 + transhdrlen += exthdrlen;
3214 +- dst_exthdrlen = rt->dst.header_len;
3215 ++ dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
3216 + } else {
3217 + rt = (struct rt6_info *)cork->dst;
3218 + fl6 = &inet->cork.fl.u.ip6;
3219 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
3220 +index 19724bd..791c1fa 100644
3221 +--- a/net/ipv6/route.c
3222 ++++ b/net/ipv6/route.c
3223 +@@ -819,7 +819,8 @@ restart:
3224 + dst_hold(&rt->dst);
3225 + read_unlock_bh(&table->tb6_lock);
3226 +
3227 +- if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
3228 ++ if (!dst_get_neighbour_raw(&rt->dst)
3229 ++ && !(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_LOCAL)))
3230 + nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
3231 + else if (!(rt->dst.flags & DST_HOST))
3232 + nrt = rt6_alloc_clone(rt, &fl6->daddr);
3233 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
3234 +index 85afc13..835fcea 100644
3235 +--- a/net/packet/af_packet.c
3236 ++++ b/net/packet/af_packet.c
3237 +@@ -2422,13 +2422,15 @@ static int packet_release(struct socket *sock)
3238 +
3239 + packet_flush_mclist(sk);
3240 +
3241 +- memset(&req_u, 0, sizeof(req_u));
3242 +-
3243 +- if (po->rx_ring.pg_vec)
3244 ++ if (po->rx_ring.pg_vec) {
3245 ++ memset(&req_u, 0, sizeof(req_u));
3246 + packet_set_ring(sk, &req_u, 1, 0);
3247 ++ }
3248 +
3249 +- if (po->tx_ring.pg_vec)
3250 ++ if (po->tx_ring.pg_vec) {
3251 ++ memset(&req_u, 0, sizeof(req_u));
3252 + packet_set_ring(sk, &req_u, 1, 1);
3253 ++ }
3254 +
3255 + fanout_release(sk);
3256 +
3257 +diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
3258 +index c8cc24e..dbe5870a 100644
3259 +--- a/net/sctp/endpointola.c
3260 ++++ b/net/sctp/endpointola.c
3261 +@@ -248,6 +248,8 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
3262 + /* Final destructor for endpoint. */
3263 + static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
3264 + {
3265 ++ int i;
3266 ++
3267 + SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);
3268 +
3269 + /* Free up the HMAC transform. */
3270 +@@ -270,6 +272,9 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
3271 + sctp_inq_free(&ep->base.inqueue);
3272 + sctp_bind_addr_free(&ep->base.bind_addr);
3273 +
3274 ++ for (i = 0; i < SCTP_HOW_MANY_SECRETS; ++i)
3275 ++ memset(&ep->secret_key[i], 0, SCTP_SECRET_SIZE);
3276 ++
3277 + /* Remove and free the port */
3278 + if (sctp_sk(ep->base.sk)->bind_hash)
3279 + sctp_put_port(ep->base.sk);
3280 +diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
3281 +index cfeb1d4..96eb168 100644
3282 +--- a/net/sctp/outqueue.c
3283 ++++ b/net/sctp/outqueue.c
3284 +@@ -223,7 +223,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
3285 +
3286 + /* Free the outqueue structure and any related pending chunks.
3287 + */
3288 +-void sctp_outq_teardown(struct sctp_outq *q)
3289 ++static void __sctp_outq_teardown(struct sctp_outq *q)
3290 + {
3291 + struct sctp_transport *transport;
3292 + struct list_head *lchunk, *temp;
3293 +@@ -276,8 +276,6 @@ void sctp_outq_teardown(struct sctp_outq *q)
3294 + sctp_chunk_free(chunk);
3295 + }
3296 +
3297 +- q->error = 0;
3298 +-
3299 + /* Throw away any leftover control chunks. */
3300 + list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
3301 + list_del_init(&chunk->list);
3302 +@@ -285,11 +283,17 @@ void sctp_outq_teardown(struct sctp_outq *q)
3303 + }
3304 + }
3305 +
3306 ++void sctp_outq_teardown(struct sctp_outq *q)
3307 ++{
3308 ++ __sctp_outq_teardown(q);
3309 ++ sctp_outq_init(q->asoc, q);
3310 ++}
3311 ++
3312 + /* Free the outqueue structure and any related pending chunks. */
3313 + void sctp_outq_free(struct sctp_outq *q)
3314 + {
3315 + /* Throw away leftover chunks. */
3316 +- sctp_outq_teardown(q);
3317 ++ __sctp_outq_teardown(q);
3318 +
3319 + /* If we were kmalloc()'d, free the memory. */
3320 + if (q->malloced)
3321 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
3322 +index fa8333b..5e0d86e 100644
3323 +--- a/net/sctp/socket.c
3324 ++++ b/net/sctp/socket.c
3325 +@@ -3375,7 +3375,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
3326 +
3327 + ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);
3328 + out:
3329 +- kfree(authkey);
3330 ++ kzfree(authkey);
3331 + return ret;
3332 + }
3333 +
3334
3335 diff --git a/3.2.38/4420_grsecurity-2.9.1-3.2.38-201302171808.patch b/3.2.39/4420_grsecurity-2.9.1-3.2.39-201302222046.patch
3336 similarity index 99%
3337 rename from 3.2.38/4420_grsecurity-2.9.1-3.2.38-201302171808.patch
3338 rename to 3.2.39/4420_grsecurity-2.9.1-3.2.39-201302222046.patch
3339 index ce8c16c..ed3c4f5 100644
3340 --- a/3.2.38/4420_grsecurity-2.9.1-3.2.38-201302171808.patch
3341 +++ b/3.2.39/4420_grsecurity-2.9.1-3.2.39-201302222046.patch
3342 @@ -255,7 +255,7 @@ index 88fd7f5..b318a78 100644
3343 ==============================================================
3344
3345 diff --git a/Makefile b/Makefile
3346 -index c8c9d02..7e79e3e 100644
3347 +index 0fceb8b..feec909 100644
3348 --- a/Makefile
3349 +++ b/Makefile
3350 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
3351 @@ -281,12 +281,16 @@ index c8c9d02..7e79e3e 100644
3352 $(Q)$(MAKE) $(build)=scripts/basic
3353 $(Q)rm -f .tmp_quiet_recordmcount
3354
3355 -@@ -564,6 +565,60 @@ else
3356 +@@ -564,6 +565,64 @@ else
3357 KBUILD_CFLAGS += -O2
3358 endif
3359
3360 +ifndef DISABLE_PAX_PLUGINS
3361 ++ifeq ($(call cc-ifversion, -ge, 0408, y), y)
3362 ++PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
3363 ++else
3364 +PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
3365 ++endif
3366 +ifneq ($(PLUGINCC),)
3367 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
3368 +ifndef CONFIG_UML
3369 @@ -342,7 +346,7 @@ index c8c9d02..7e79e3e 100644
3370 include $(srctree)/arch/$(SRCARCH)/Makefile
3371
3372 ifneq ($(CONFIG_FRAME_WARN),0)
3373 -@@ -708,7 +763,7 @@ export mod_strip_cmd
3374 +@@ -708,7 +767,7 @@ export mod_strip_cmd
3375
3376
3377 ifeq ($(KBUILD_EXTMOD),)
3378 @@ -351,7 +355,7 @@ index c8c9d02..7e79e3e 100644
3379
3380 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
3381 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
3382 -@@ -932,6 +987,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
3383 +@@ -932,6 +991,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
3384
3385 # The actual objects are generated when descending,
3386 # make sure no implicit rule kicks in
3387 @@ -360,7 +364,7 @@ index c8c9d02..7e79e3e 100644
3388 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
3389
3390 # Handle descending into subdirectories listed in $(vmlinux-dirs)
3391 -@@ -941,7 +998,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
3392 +@@ -941,7 +1002,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
3393 # Error messages still appears in the original language
3394
3395 PHONY += $(vmlinux-dirs)
3396 @@ -369,7 +373,7 @@ index c8c9d02..7e79e3e 100644
3397 $(Q)$(MAKE) $(build)=$@
3398
3399 # Store (new) KERNELRELASE string in include/config/kernel.release
3400 -@@ -985,6 +1042,7 @@ prepare0: archprepare FORCE
3401 +@@ -985,6 +1046,7 @@ prepare0: archprepare FORCE
3402 $(Q)$(MAKE) $(build)=.
3403
3404 # All the preparing..
3405 @@ -377,7 +381,7 @@ index c8c9d02..7e79e3e 100644
3406 prepare: prepare0
3407
3408 # Generate some files
3409 -@@ -1089,6 +1147,8 @@ all: modules
3410 +@@ -1089,6 +1151,8 @@ all: modules
3411 # using awk while concatenating to the final file.
3412
3413 PHONY += modules
3414 @@ -386,7 +390,7 @@ index c8c9d02..7e79e3e 100644
3415 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
3416 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
3417 @$(kecho) ' Building modules, stage 2.';
3418 -@@ -1104,7 +1164,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
3419 +@@ -1104,7 +1168,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
3420
3421 # Target to prepare building external modules
3422 PHONY += modules_prepare
3423 @@ -395,7 +399,7 @@ index c8c9d02..7e79e3e 100644
3424
3425 # Target to install modules
3426 PHONY += modules_install
3427 -@@ -1163,7 +1223,7 @@ CLEAN_FILES += vmlinux System.map \
3428 +@@ -1163,7 +1227,7 @@ CLEAN_FILES += vmlinux System.map \
3429 MRPROPER_DIRS += include/config usr/include include/generated \
3430 arch/*/include/generated
3431 MRPROPER_FILES += .config .config.old .version .old_version \
3432 @@ -404,7 +408,7 @@ index c8c9d02..7e79e3e 100644
3433 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
3434
3435 # clean - Delete most, but leave enough to build external modules
3436 -@@ -1201,6 +1261,7 @@ distclean: mrproper
3437 +@@ -1201,6 +1265,7 @@ distclean: mrproper
3438 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
3439 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
3440 -o -name '.*.rej' \
3441 @@ -412,7 +416,7 @@ index c8c9d02..7e79e3e 100644
3442 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
3443 -type f -print | xargs rm -f
3444
3445 -@@ -1361,6 +1422,8 @@ PHONY += $(module-dirs) modules
3446 +@@ -1361,6 +1426,8 @@ PHONY += $(module-dirs) modules
3447 $(module-dirs): crmodverdir $(objtree)/Module.symvers
3448 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
3449
3450 @@ -421,7 +425,7 @@ index c8c9d02..7e79e3e 100644
3451 modules: $(module-dirs)
3452 @$(kecho) ' Building modules, stage 2.';
3453 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
3454 -@@ -1487,17 +1550,21 @@ else
3455 +@@ -1487,17 +1554,21 @@ else
3456 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
3457 endif
3458
3459 @@ -447,7 +451,7 @@ index c8c9d02..7e79e3e 100644
3460 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
3461 %.symtypes: %.c prepare scripts FORCE
3462 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
3463 -@@ -1507,11 +1574,15 @@ endif
3464 +@@ -1507,11 +1578,15 @@ endif
3465 $(cmd_crmodverdir)
3466 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
3467 $(build)=$(build-dir)
3468 @@ -1690,6 +1694,19 @@ index 3606e85..44ba19d 100644
3469 .endm
3470
3471 #ifdef CONFIG_XIP_KERNEL
3472 +diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
3473 +index 2bc1a8e..f433c88 100644
3474 +--- a/arch/arm/kernel/hw_breakpoint.c
3475 ++++ b/arch/arm/kernel/hw_breakpoint.c
3476 +@@ -986,7 +986,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
3477 + return NOTIFY_OK;
3478 + }
3479 +
3480 +-static struct notifier_block __cpuinitdata dbg_reset_nb = {
3481 ++static struct notifier_block dbg_reset_nb = {
3482 + .notifier_call = dbg_reset_notify,
3483 + };
3484 +
3485 diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
3486 index 1e9be5d..4e0f470 100644
3487 --- a/arch/arm/kernel/module.c
3488 @@ -2600,6 +2617,32 @@ index 449c8c0..50cdf87 100644
3489 __cu_len; \
3490 })
3491
3492 +diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
3493 +index c539c68..c95d3db 100644
3494 +--- a/arch/ia64/kernel/err_inject.c
3495 ++++ b/arch/ia64/kernel/err_inject.c
3496 +@@ -256,7 +256,7 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
3497 + return NOTIFY_OK;
3498 + }
3499 +
3500 +-static struct notifier_block __cpuinitdata err_inject_cpu_notifier =
3501 ++static struct notifier_block err_inject_cpu_notifier =
3502 + {
3503 + .notifier_call = err_inject_cpu_callback,
3504 + };
3505 +diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
3506 +index 84fb405..7f52920 100644
3507 +--- a/arch/ia64/kernel/mca.c
3508 ++++ b/arch/ia64/kernel/mca.c
3509 +@@ -1919,7 +1919,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
3510 + return NOTIFY_OK;
3511 + }
3512 +
3513 +-static struct notifier_block mca_cpu_notifier __cpuinitdata = {
3514 ++static struct notifier_block mca_cpu_notifier = {
3515 + .notifier_call = mca_cpu_callback
3516 + };
3517 +
3518 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
3519 index 24603be..948052d 100644
3520 --- a/arch/ia64/kernel/module.c
3521 @@ -2692,6 +2735,32 @@ index 24603be..948052d 100644
3522 mod->arch.gp = gp;
3523 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
3524 }
3525 +diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
3526 +index 77597e5..6f28f3f 100644
3527 +--- a/arch/ia64/kernel/palinfo.c
3528 ++++ b/arch/ia64/kernel/palinfo.c
3529 +@@ -1045,7 +1045,7 @@ static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
3530 + return NOTIFY_OK;
3531 + }
3532 +
3533 +-static struct notifier_block __refdata palinfo_cpu_notifier =
3534 ++static struct notifier_block palinfo_cpu_notifier =
3535 + {
3536 + .notifier_call = palinfo_cpu_callback,
3537 + .priority = 0,
3538 +diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
3539 +index 79802e5..1a89ec5 100644
3540 +--- a/arch/ia64/kernel/salinfo.c
3541 ++++ b/arch/ia64/kernel/salinfo.c
3542 +@@ -616,7 +616,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
3543 + return NOTIFY_OK;
3544 + }
3545 +
3546 +-static struct notifier_block salinfo_cpu_notifier __cpuinitdata =
3547 ++static struct notifier_block salinfo_cpu_notifier =
3548 + {
3549 + .notifier_call = salinfo_cpu_callback,
3550 + .priority = 0,
3551 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
3552 index 609d500..acd0429 100644
3553 --- a/arch/ia64/kernel/sys_ia64.c
3554 @@ -2736,6 +2805,19 @@ index 609d500..acd0429 100644
3555 /* Remember the address where we stopped this search: */
3556 mm->free_area_cache = addr + len;
3557 return addr;
3558 +diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
3559 +index 9be1f11..f2eef30 100644
3560 +--- a/arch/ia64/kernel/topology.c
3561 ++++ b/arch/ia64/kernel/topology.c
3562 +@@ -444,7 +444,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
3563 + return NOTIFY_OK;
3564 + }
3565 +
3566 +-static struct notifier_block __cpuinitdata cache_cpu_notifier =
3567 ++static struct notifier_block cache_cpu_notifier =
3568 + {
3569 + .notifier_call = cache_cpu_callback
3570 + };
3571 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
3572 index 53c0ba0..2accdde 100644
3573 --- a/arch/ia64/kernel/vmlinux.lds.S
3574 @@ -4663,6 +4745,19 @@ index f2496f2..4e3cc47 100644
3575 return ret;
3576 }
3577 #endif
3578 +diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
3579 +index 55be64d..94d8783 100644
3580 +--- a/arch/powerpc/kernel/sysfs.c
3581 ++++ b/arch/powerpc/kernel/sysfs.c
3582 +@@ -517,7 +517,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
3583 + return NOTIFY_OK;
3584 + }
3585 +
3586 +-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
3587 ++static struct notifier_block sysfs_cpu_nb = {
3588 + .notifier_call = sysfs_cpu_notify,
3589 + };
3590 +
3591 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
3592 index 82dcd4d..a80088a 100644
3593 --- a/arch/powerpc/kernel/traps.c
3594 @@ -4896,6 +4991,32 @@ index 5a783d8..fbe4c8b 100644
3595 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3596 mm->unmap_area = arch_unmap_area_topdown;
3597 }
3598 +diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
3599 +index 5b63bd3..248942d 100644
3600 +--- a/arch/powerpc/mm/mmu_context_nohash.c
3601 ++++ b/arch/powerpc/mm/mmu_context_nohash.c
3602 +@@ -370,7 +370,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
3603 + return NOTIFY_OK;
3604 + }
3605 +
3606 +-static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
3607 ++static struct notifier_block mmu_context_cpu_nb = {
3608 + .notifier_call = mmu_context_cpu_notify,
3609 + };
3610 +
3611 +diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
3612 +index b22a83a..e55f74f 100644
3613 +--- a/arch/powerpc/mm/numa.c
3614 ++++ b/arch/powerpc/mm/numa.c
3615 +@@ -964,7 +964,7 @@ static void __init *careful_zallocation(int nid, unsigned long size,
3616 + return ret;
3617 + }
3618 +
3619 +-static struct notifier_block __cpuinitdata ppc64_numa_nb = {
3620 ++static struct notifier_block ppc64_numa_nb = {
3621 + .notifier_call = cpu_numa_callback,
3622 + .priority = 1 /* Must run before sched domains notifier. */
3623 + };
3624 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
3625 index 73709f7..63db0f7 100644
3626 --- a/arch/powerpc/mm/slice.c
3627 @@ -4966,6 +5087,32 @@ index 73709f7..63db0f7 100644
3628 /* If hint, make sure it matches our alignment restrictions */
3629 if (!fixed && addr) {
3630 addr = _ALIGN_UP(addr, 1ul << pshift);
3631 +diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
3632 +index 3394254..8c6825c 100644
3633 +--- a/arch/powerpc/platforms/powermac/smp.c
3634 ++++ b/arch/powerpc/platforms/powermac/smp.c
3635 +@@ -886,7 +886,7 @@ static int smp_core99_cpu_notify(struct notifier_block *self,
3636 + return NOTIFY_OK;
3637 + }
3638 +
3639 +-static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
3640 ++static struct notifier_block smp_core99_cpu_nb = {
3641 + .notifier_call = smp_core99_cpu_notify,
3642 + };
3643 + #endif /* CONFIG_HOTPLUG_CPU */
3644 +diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
3645 +index 24bff4f..0248123 100644
3646 +--- a/arch/s390/appldata/appldata_base.c
3647 ++++ b/arch/s390/appldata/appldata_base.c
3648 +@@ -610,7 +610,7 @@ static int __cpuinit appldata_cpu_notify(struct notifier_block *self,
3649 + return NOTIFY_OK;
3650 + }
3651 +
3652 +-static struct notifier_block __cpuinitdata appldata_nb = {
3653 ++static struct notifier_block appldata_nb = {
3654 + .notifier_call = appldata_cpu_notify,
3655 + };
3656 +
3657 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
3658 index 8517d2a..d2738d4 100644
3659 --- a/arch/s390/include/asm/atomic.h
3660 @@ -5216,6 +5363,19 @@ index 53088e2..9f44a36 100644
3661 - return base;
3662 - return ret;
3663 -}
3664 +diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
3665 +index 1df64a8..aea2a39 100644
3666 +--- a/arch/s390/kernel/smp.c
3667 ++++ b/arch/s390/kernel/smp.c
3668 +@@ -1035,7 +1035,7 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
3669 + return notifier_from_errno(err);
3670 + }
3671 +
3672 +-static struct notifier_block __cpuinitdata smp_cpu_nb = {
3673 ++static struct notifier_block smp_cpu_nb = {
3674 + .notifier_call = smp_cpu_notify,
3675 + };
3676 +
3677 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
3678 index c70b3d8..d01c6b3 100644
3679 --- a/arch/s390/mm/mmap.c
3680 @@ -5324,6 +5484,19 @@ index ef9e555..331bd29 100644
3681
3682 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
3683
3684 +diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
3685 +index 03f2b55..b027032 100644
3686 +--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
3687 ++++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
3688 +@@ -143,7 +143,7 @@ shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
3689 + return NOTIFY_OK;
3690 + }
3691 +
3692 +-static struct notifier_block __cpuinitdata shx3_cpu_notifier = {
3693 ++static struct notifier_block shx3_cpu_notifier = {
3694 + .notifier_call = shx3_cpu_callback,
3695 + };
3696 +
3697 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
3698 index afeb710..e8366ef 100644
3699 --- a/arch/sh/mm/mmap.c
3700 @@ -6464,6 +6637,19 @@ index 7f5f65d..3308382 100644
3701 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
3702
3703 2:
3704 +diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
3705 +index 7408201..b349841 100644
3706 +--- a/arch/sparc/kernel/sysfs.c
3707 ++++ b/arch/sparc/kernel/sysfs.c
3708 +@@ -266,7 +266,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
3709 + return NOTIFY_OK;
3710 + }
3711 +
3712 +-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
3713 ++static struct notifier_block sysfs_cpu_nb = {
3714 + .notifier_call = sysfs_cpu_notify,
3715 + };
3716 +
3717 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
3718 index 591f20c..0f1b925 100644
3719 --- a/arch/sparc/kernel/traps_32.c
3720 @@ -9071,7 +9257,7 @@ index 6557769..ef6ae89 100644
3721
3722 if (err)
3723 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
3724 -index a6253ec..0a325de 100644
3725 +index 95b4eb3..ccdcbb6 100644
3726 --- a/arch/x86/ia32/ia32entry.S
3727 +++ b/arch/x86/ia32/ia32entry.S
3728 @@ -13,7 +13,9 @@
3729 @@ -9218,7 +9404,7 @@ index a6253ec..0a325de 100644
3730 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
3731 jnz ia32_ret_from_sys_call
3732 TRACE_IRQS_ON
3733 - sti
3734 + ENABLE_INTERRUPTS(CLBR_NONE)
3735 @@ -215,12 +261,12 @@ sysexit_from_sys_call:
3736 movzbl %al,%edi /* zero-extend that into %edi */
3737 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
3738 @@ -9227,7 +9413,7 @@ index a6253ec..0a325de 100644
3739 + GET_THREAD_INFO(%r11)
3740 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
3741 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
3742 - cli
3743 + DISABLE_INTERRUPTS(CLBR_NONE)
3744 TRACE_IRQS_OFF
3745 - testl %edi,TI_flags(%r10)
3746 + testl %edi,TI_flags(%r11)
3747 @@ -14451,9 +14637,18 @@ index 1911442..2424a83 100644
3748 .name = "summit",
3749 .probe = probe_summit,
3750 diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
3751 -index 5007958..7a534f0 100644
3752 +index 5007958..2eba140 100644
3753 --- a/arch/x86/kernel/apic/x2apic_cluster.c
3754 +++ b/arch/x86/kernel/apic/x2apic_cluster.c
3755 +@@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
3756 + return notifier_from_errno(err);
3757 + }
3758 +
3759 +-static struct notifier_block __refdata x2apic_cpu_notifier = {
3760 ++static struct notifier_block x2apic_cpu_notifier = {
3761 + .notifier_call = update_clusterinfo,
3762 + };
3763 +
3764 @@ -208,7 +208,7 @@ static int x2apic_cluster_probe(void)
3765 return 0;
3766 }
3767 @@ -14806,7 +15001,7 @@ index 3e6ff6c..54b4992 100644
3768 }
3769 #endif
3770 diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
3771 -index 0e89635..279dd37 100644
3772 +index 0e89635..f0a7525 100644
3773 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c
3774 +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
3775 @@ -984,6 +984,22 @@ static struct attribute *default_attrs[] = {
3776 @@ -14896,8 +15091,17 @@ index 0e89635..279dd37 100644
3777 per_cpu(ici_cache_kobject, cpu),
3778 "index%1lu", i);
3779 if (unlikely(retval)) {
3780 +@@ -1189,7 +1207,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
3781 + return NOTIFY_OK;
3782 + }
3783 +
3784 +-static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
3785 ++static struct notifier_block cacheinfo_cpu_notifier = {
3786 + .notifier_call = cacheinfo_cpu_callback,
3787 + };
3788 +
3789 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
3790 -index 3b67877..6e11450 100644
3791 +index 3b67877..e41ede1 100644
3792 --- a/arch/x86/kernel/cpu/mcheck/mce.c
3793 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
3794 @@ -42,6 +42,7 @@
3795 @@ -15017,7 +15221,7 @@ index 3b67877..6e11450 100644
3796 }
3797
3798 -static struct notifier_block mce_cpu_notifier __cpuinitdata = {
3799 -+static struct notifier_block mce_cpu_notifier __cpuinitconst = {
3800 ++static struct notifier_block mce_cpu_notifier = {
3801 .notifier_call = mce_cpu_callback,
3802 };
3803
3804 @@ -15052,6 +15256,19 @@ index 5c0e653..0882b0a 100644
3805 /* Make sure the vector pointer is visible before we enable MCEs: */
3806 wmb();
3807
3808 +diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
3809 +index ce04b58..b84acbd 100644
3810 +--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
3811 ++++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
3812 +@@ -290,7 +290,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
3813 + return notifier_from_errno(err);
3814 + }
3815 +
3816 +-static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
3817 ++static struct notifier_block thermal_throttle_cpu_notifier =
3818 + {
3819 + .notifier_call = thermal_throttle_cpu_callback,
3820 + };
3821 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
3822 index 54060f5..c1a7577 100644
3823 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
3824 @@ -15113,6 +15330,19 @@ index 2bda212..78cc605 100644
3825 }
3826 }
3827
3828 +diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
3829 +index 212a6a4..322f5d9 100644
3830 +--- a/arch/x86/kernel/cpuid.c
3831 ++++ b/arch/x86/kernel/cpuid.c
3832 +@@ -172,7 +172,7 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
3833 + return notifier_from_errno(err);
3834 + }
3835 +
3836 +-static struct notifier_block __refdata cpuid_class_cpu_notifier =
3837 ++static struct notifier_block cpuid_class_cpu_notifier =
3838 + {
3839 + .notifier_call = cpuid_class_cpu_callback,
3840 + };
3841 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
3842 index 13ad899..f642b9a 100644
3843 --- a/arch/x86/kernel/crash.c
3844 @@ -18754,7 +18984,7 @@ index 7da647d..c828808 100644
3845 reset_current_kprobe();
3846 preempt_enable_no_resched();
3847 diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
3848 -index a9c2116..a52d4fc 100644
3849 +index a9c2116..94c1e1a 100644
3850 --- a/arch/x86/kernel/kvm.c
3851 +++ b/arch/x86/kernel/kvm.c
3852 @@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(void)
3853 @@ -18765,6 +18995,15 @@ index a9c2116..a52d4fc 100644
3854 #endif
3855 #endif
3856 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
3857 +@@ -579,7 +580,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
3858 + return NOTIFY_OK;
3859 + }
3860 +
3861 +-static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
3862 ++static struct notifier_block kvm_cpu_notifier = {
3863 + .notifier_call = kvm_cpu_notify,
3864 + };
3865 + #endif
3866 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
3867 index ea69726..604d066 100644
3868 --- a/arch/x86/kernel/ldt.c
3869 @@ -18864,6 +19103,19 @@ index a3fa43b..8966f4c 100644
3870
3871 relocate_kernel_ptr = control_page;
3872 page_list[PA_CONTROL_PAGE] = __pa(control_page);
3873 +diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
3874 +index 29c95d7..97b7b1b 100644
3875 +--- a/arch/x86/kernel/microcode_core.c
3876 ++++ b/arch/x86/kernel/microcode_core.c
3877 +@@ -507,7 +507,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
3878 + return NOTIFY_OK;
3879 + }
3880 +
3881 +-static struct notifier_block __refdata mc_cpu_notifier = {
3882 ++static struct notifier_block mc_cpu_notifier = {
3883 + .notifier_call = mc_cpu_callback,
3884 + };
3885 +
3886 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
3887 index 3ca42d0..7cff8cc 100644
3888 --- a/arch/x86/kernel/microcode_intel.c
3889 @@ -19024,6 +19276,19 @@ index 925179f..6794bbb 100644
3890 #if 0
3891 if ((s64)val != *(s32 *)loc)
3892 goto overflow;
3893 +diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
3894 +index f7d1a64..399615a 100644
3895 +--- a/arch/x86/kernel/msr.c
3896 ++++ b/arch/x86/kernel/msr.c
3897 +@@ -235,7 +235,7 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
3898 + return notifier_from_errno(err);
3899 + }
3900 +
3901 +-static struct notifier_block __refdata msr_class_cpu_notifier = {
3902 ++static struct notifier_block msr_class_cpu_notifier = {
3903 + .notifier_call = msr_class_cpu_callback,
3904 + };
3905 +
3906 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
3907 index e88f37b..1353db6 100644
3908 --- a/arch/x86/kernel/nmi.c
3909 @@ -20085,7 +20350,7 @@ index 9f548cb..caf76f7 100644
3910 if (err) {
3911 pr_debug("do_boot_cpu failed %d\n", err);
3912 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
3913 -index c346d11..d43b163 100644
3914 +index d4f278e..86c58c0 100644
3915 --- a/arch/x86/kernel/step.c
3916 +++ b/arch/x86/kernel/step.c
3917 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
3918 @@ -20521,7 +20786,7 @@ index 9a0e312..e6f66f2 100644
3919 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
3920 .long sys_exit
3921 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
3922 -index e2410e2..4fe3fbc 100644
3923 +index e2410e2..b98a4fd 100644
3924 --- a/arch/x86/kernel/tboot.c
3925 +++ b/arch/x86/kernel/tboot.c
3926 @@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
3927 @@ -20551,7 +20816,7 @@ index e2410e2..4fe3fbc 100644
3928
3929 static int tboot_wait_for_aps(int num_aps)
3930 {
3931 -@@ -322,9 +322,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
3932 +@@ -322,16 +322,16 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
3933 {
3934 switch (action) {
3935 case CPU_DYING:
3936 @@ -20563,6 +20828,14 @@ index e2410e2..4fe3fbc 100644
3937 return NOTIFY_BAD;
3938 break;
3939 }
3940 + return NOTIFY_OK;
3941 + }
3942 +
3943 +-static struct notifier_block tboot_cpu_notifier __cpuinitdata =
3944 ++static struct notifier_block tboot_cpu_notifier =
3945 + {
3946 + .notifier_call = tboot_cpu_callback,
3947 + };
3948 @@ -343,7 +343,7 @@ static __init int tboot_late_init(void)
3949
3950 tboot_create_trampoline();
3951 @@ -27418,6 +27691,19 @@ index bff89df..377758a 100644
3952 unsigned long stack = kernel_stack_pointer(regs);
3953 if (depth)
3954 dump_trace(NULL, regs, (unsigned long *)stack, 0,
3955 +diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
3956 +index 385a940..b11662d 100644
3957 +--- a/arch/x86/pci/amd_bus.c
3958 ++++ b/arch/x86/pci/amd_bus.c
3959 +@@ -355,7 +355,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self,
3960 + return NOTIFY_OK;
3961 + }
3962 +
3963 +-static struct notifier_block __cpuinitdata amd_cpu_notifier = {
3964 ++static struct notifier_block amd_cpu_notifier = {
3965 + .notifier_call = amd_cpu_notify,
3966 + };
3967 +
3968 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
3969 index cb29191..036766d 100644
3970 --- a/arch/x86/pci/mrst.c
3971 @@ -28415,7 +28701,7 @@ index 153407c..611cba9 100644
3972 -}
3973 -__setup("vdso=", vdso_setup);
3974 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
3975 -index 69b9ef6..c76f1fe 100644
3976 +index 69b9ef6..30a09b1 100644
3977 --- a/arch/x86/xen/enlighten.c
3978 +++ b/arch/x86/xen/enlighten.c
3979 @@ -86,8 +86,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
3980 @@ -28520,6 +28806,15 @@ index 69b9ef6..c76f1fe 100644
3981
3982 xen_smp_init();
3983
3984 +@@ -1400,7 +1399,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
3985 + return NOTIFY_OK;
3986 + }
3987 +
3988 +-static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = {
3989 ++static struct notifier_block xen_hvm_cpu_notifier = {
3990 + .notifier_call = xen_hvm_cpu_notify,
3991 + };
3992 +
3993 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
3994 index 2b8b0de..0787f8a 100644
3995 --- a/arch/x86/xen/mmu.c
3996 @@ -28620,10 +28915,10 @@ index 9a23fff..c05e794 100644
3997 xen_init_spinlocks();
3998 }
3999 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
4000 -index b040b0e..c457aa7 100644
4001 +index 7328f71..c457aa7 100644
4002 --- a/arch/x86/xen/xen-asm_32.S
4003 +++ b/arch/x86/xen/xen-asm_32.S
4004 -@@ -83,16 +83,16 @@ ENTRY(xen_iret)
4005 +@@ -83,14 +83,14 @@ ENTRY(xen_iret)
4006 ESP_OFFSET=4 # bytes pushed onto stack
4007
4008 /*
4009 @@ -28633,43 +28928,17 @@ index b040b0e..c457aa7 100644
4010 */
4011 #ifdef CONFIG_SMP
4012 - GET_THREAD_INFO(%eax)
4013 -- movl TI_cpu(%eax), %eax
4014 -- movl __per_cpu_offset(,%eax,4), %eax
4015 -- mov xen_vcpu(%eax), %eax
4016 +- movl %ss:TI_cpu(%eax), %eax
4017 +- movl %ss:__per_cpu_offset(,%eax,4), %eax
4018 +- mov %ss:xen_vcpu(%eax), %eax
4019 + push %fs
4020 + mov $(__KERNEL_PERCPU), %eax
4021 + mov %eax, %fs
4022 + mov PER_CPU_VAR(xen_vcpu), %eax
4023 + pop %fs
4024 #else
4025 -- movl xen_vcpu, %eax
4026 -+ movl %ss:xen_vcpu, %eax
4027 + movl %ss:xen_vcpu, %eax
4028 #endif
4029 -
4030 - /* check IF state we're restoring */
4031 -@@ -105,11 +105,11 @@ ENTRY(xen_iret)
4032 - * resuming the code, so we don't have to be worried about
4033 - * being preempted to another CPU.
4034 - */
4035 -- setz XEN_vcpu_info_mask(%eax)
4036 -+ setz %ss:XEN_vcpu_info_mask(%eax)
4037 - xen_iret_start_crit:
4038 -
4039 - /* check for unmasked and pending */
4040 -- cmpw $0x0001, XEN_vcpu_info_pending(%eax)
4041 -+ cmpw $0x0001, %ss:XEN_vcpu_info_pending(%eax)
4042 -
4043 - /*
4044 - * If there's something pending, mask events again so we can
4045 -@@ -117,7 +117,7 @@ xen_iret_start_crit:
4046 - * touch XEN_vcpu_info_mask.
4047 - */
4048 - jne 1f
4049 -- movb $1, XEN_vcpu_info_mask(%eax)
4050 -+ movb $1, %ss:XEN_vcpu_info_mask(%eax)
4051 -
4052 - 1: popl %eax
4053 -
4054 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
4055 index aaa7291..3f77960 100644
4056 --- a/arch/x86/xen/xen-head.S
4057 @@ -28767,7 +29036,7 @@ index af00795..2bb8105 100644
4058 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
4059 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
4060 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
4061 -index 58916af..9cb880b 100644
4062 +index 58916af..eb9dbcf6 100644
4063 --- a/block/blk-iopoll.c
4064 +++ b/block/blk-iopoll.c
4065 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
4066 @@ -28779,6 +29048,15 @@ index 58916af..9cb880b 100644
4067 {
4068 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
4069 int rearm = 0, budget = blk_iopoll_budget;
4070 +@@ -209,7 +209,7 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
4071 + return NOTIFY_OK;
4072 + }
4073 +
4074 +-static struct notifier_block __cpuinitdata blk_iopoll_cpu_notifier = {
4075 ++static struct notifier_block blk_iopoll_cpu_notifier = {
4076 + .notifier_call = blk_iopoll_cpu_notify,
4077 + };
4078 +
4079 diff --git a/block/blk-map.c b/block/blk-map.c
4080 index 623e1cd..ca1e109 100644
4081 --- a/block/blk-map.c
4082 @@ -28793,7 +29071,7 @@ index 623e1cd..ca1e109 100644
4083 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
4084 else
4085 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
4086 -index 1366a89..e17f54b 100644
4087 +index 1366a89..dfb3871 100644
4088 --- a/block/blk-softirq.c
4089 +++ b/block/blk-softirq.c
4090 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
4091 @@ -28805,6 +29083,15 @@ index 1366a89..e17f54b 100644
4092 {
4093 struct list_head *cpu_list, local_list;
4094
4095 +@@ -97,7 +97,7 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self,
4096 + return NOTIFY_OK;
4097 + }
4098 +
4099 +-static struct notifier_block __cpuinitdata blk_cpu_notifier = {
4100 ++static struct notifier_block blk_cpu_notifier = {
4101 + .notifier_call = blk_cpu_notify,
4102 + };
4103 +
4104 diff --git a/block/bsg.c b/block/bsg.c
4105 index c0ab25c..9d49f8f 100644
4106 --- a/block/bsg.c
4107 @@ -31529,7 +31816,7 @@ index 0636520..169c1d0 100644
4108 acpi_os_unmap_memory(virt, len);
4109 return 0;
4110 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
4111 -index 8e3c46d..c139b99 100644
4112 +index 7795d1e..bc6d80a 100644
4113 --- a/drivers/char/virtio_console.c
4114 +++ b/drivers/char/virtio_console.c
4115 @@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
4116 @@ -31550,6 +31837,32 @@ index 8e3c46d..c139b99 100644
4117 }
4118
4119 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
4120 +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
4121 +index 987a165..4620e42 100644
4122 +--- a/drivers/cpufreq/cpufreq.c
4123 ++++ b/drivers/cpufreq/cpufreq.c
4124 +@@ -1790,7 +1790,7 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
4125 + return NOTIFY_OK;
4126 + }
4127 +
4128 +-static struct notifier_block __refdata cpufreq_cpu_notifier = {
4129 ++static struct notifier_block cpufreq_cpu_notifier = {
4130 + .notifier_call = cpufreq_cpu_callback,
4131 + };
4132 +
4133 +diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
4134 +index c5072a9..d5f3bf1 100644
4135 +--- a/drivers/cpufreq/cpufreq_stats.c
4136 ++++ b/drivers/cpufreq/cpufreq_stats.c
4137 +@@ -341,7 +341,7 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
4138 + }
4139 +
4140 + /* priority=1 so this will get called before cpufreq_remove_dev */
4141 +-static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
4142 ++static struct notifier_block cpufreq_stat_cpu_notifier = {
4143 + .notifier_call = cpufreq_stat_cpu_callback,
4144 + .priority = 1,
4145 + };
4146 diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
4147 index eb1d864..39ee5a7 100644
4148 --- a/drivers/dma/dmatest.c
4149 @@ -31563,6 +31876,19 @@ index eb1d864..39ee5a7 100644
4150 }
4151
4152 pr_info("dmatest: Started %u threads using %s\n",
4153 +diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
4154 +index 81809c2..6409470 100644
4155 +--- a/drivers/dma/shdma.c
4156 ++++ b/drivers/dma/shdma.c
4157 +@@ -1054,7 +1054,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
4158 + return ret;
4159 + }
4160 +
4161 +-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
4162 ++static struct notifier_block sh_dmae_nmi_notifier = {
4163 + .notifier_call = sh_dmae_nmi_handler,
4164 +
4165 + /* Run before NMI debug handler and KGDB */
4166 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
4167 index a9d5482..376077f 100644
4168 --- a/drivers/edac/amd64_edac.c
4169 @@ -32531,7 +32857,7 @@ index 93e74fb..4a1182d 100644
4170 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
4171 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
4172 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
4173 -index c05e825..b086c8c 100644
4174 +index 7817429..b6d75d8 100644
4175 --- a/drivers/gpu/drm/i915/intel_display.c
4176 +++ b/drivers/gpu/drm/i915/intel_display.c
4177 @@ -2214,7 +2214,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
4178 @@ -32553,7 +32879,7 @@ index c05e825..b086c8c 100644
4179
4180 wake_up(&dev_priv->pending_flip_queue);
4181 schedule_work(&work->work);
4182 -@@ -7190,7 +7189,13 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
4183 +@@ -7188,7 +7187,13 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
4184 OUT_RING(fb->pitch | obj->tiling_mode);
4185 OUT_RING(obj->gtt_offset);
4186
4187 @@ -32567,7 +32893,7 @@ index c05e825..b086c8c 100644
4188 + pf = 0;
4189 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
4190 OUT_RING(pf | pipesrc);
4191 - ADVANCE_LP_RING();
4192 +
4193 @@ -7324,7 +7329,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4194 /* Block clients from rendering to the new back buffer until
4195 * the flip occurs and the object is no longer visible.
4196 @@ -33524,6 +33850,19 @@ index 66f6729..2d6de0a 100644
4197
4198 mutex_lock(&resource->lock);
4199 resource->trip[attr->index - 7] = temp;
4200 +diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
4201 +index 3d630bb..77756d7 100644
4202 +--- a/drivers/hwmon/coretemp.c
4203 ++++ b/drivers/hwmon/coretemp.c
4204 +@@ -787,7 +787,7 @@ static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb,
4205 + return NOTIFY_OK;
4206 + }
4207 +
4208 +-static struct notifier_block coretemp_cpu_notifier __refdata = {
4209 ++static struct notifier_block coretemp_cpu_notifier = {
4210 + .notifier_call = coretemp_cpu_callback,
4211 + };
4212 +
4213 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
4214 index 5357925..6cf0418 100644
4215 --- a/drivers/hwmon/sht15.c
4216 @@ -33576,6 +33915,19 @@ index 5357925..6cf0418 100644
4217 return;
4218 }
4219
4220 +diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
4221 +index 8eac67d..d7b2fa5 100644
4222 +--- a/drivers/hwmon/via-cputemp.c
4223 ++++ b/drivers/hwmon/via-cputemp.c
4224 +@@ -304,7 +304,7 @@ static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb,
4225 + return NOTIFY_OK;
4226 + }
4227 +
4228 +-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
4229 ++static struct notifier_block via_cputemp_cpu_notifier = {
4230 + .notifier_call = via_cputemp_cpu_callback,
4231 + };
4232 +
4233 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
4234 index 378fcb5..5e91fa8 100644
4235 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
4236 @@ -34379,6 +34731,19 @@ index 40c8353..946b0e4 100644
4237 }
4238 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
4239 __func__, stag_state, type, pdid, stag_idx);
4240 +diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
4241 +index e571e60..523c505 100644
4242 +--- a/drivers/infiniband/hw/ehca/ehca_irq.c
4243 ++++ b/drivers/infiniband/hw/ehca/ehca_irq.c
4244 +@@ -883,7 +883,7 @@ static int __cpuinit comp_pool_callback(struct notifier_block *nfb,
4245 + return NOTIFY_OK;
4246 + }
4247 +
4248 +-static struct notifier_block comp_pool_callback_nb __cpuinitdata = {
4249 ++static struct notifier_block comp_pool_callback_nb = {
4250 + .notifier_call = comp_pool_callback,
4251 + .priority = 0,
4252 + };
4253 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
4254 index 79b3dbc..96e5fcc 100644
4255 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
4256 @@ -37105,6 +37470,19 @@ index 61d2bdd..7f1154a 100644
4257 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
4258 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
4259 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
4260 +diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
4261 +index ed79b2d..b17b19d 100644
4262 +--- a/drivers/net/ethernet/ibm/emac/core.c
4263 ++++ b/drivers/net/ethernet/ibm/emac/core.c
4264 +@@ -2309,7 +2309,7 @@ static int __devinit emac_of_bus_notify(struct notifier_block *nb,
4265 + return 0;
4266 + }
4267 +
4268 +-static struct notifier_block emac_of_bus_notifier __devinitdata = {
4269 ++static struct notifier_block emac_of_bus_notifier = {
4270 + .notifier_call = emac_of_bus_notify
4271 + };
4272 +
4273 diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
4274 index e1159e5..34efe3e 100644
4275 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
4276 @@ -37348,7 +37726,7 @@ index 49b549f..13d648c 100644
4277
4278 mac->phydev = phydev;
4279 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
4280 -index b8db4cd..41bf50c 100644
4281 +index a6153f1..4bdf0c8 100644
4282 --- a/drivers/net/ethernet/realtek/r8169.c
4283 +++ b/drivers/net/ethernet/realtek/r8169.c
4284 @@ -704,17 +704,17 @@ struct rtl8169_private {
4285 @@ -37422,6 +37800,19 @@ index d4d2bc1..14b8672 100644
4286 };
4287
4288 static int stmmac_init_fs(struct net_device *dev)
4289 +diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
4290 +index 97f342e..63fee4d 100644
4291 +--- a/drivers/net/macvlan.c
4292 ++++ b/drivers/net/macvlan.c
4293 +@@ -850,7 +850,7 @@ static int macvlan_device_event(struct notifier_block *unused,
4294 + return NOTIFY_DONE;
4295 + }
4296 +
4297 +-static struct notifier_block macvlan_notifier_block __read_mostly = {
4298 ++static struct notifier_block macvlan_notifier_block = {
4299 + .notifier_call = macvlan_device_event,
4300 + };
4301 +
4302 diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
4303 index 26106c0..4046553 100644
4304 --- a/drivers/net/macvtap.c
4305 @@ -38429,6 +38820,19 @@ index 2f0aa0f..90fab02 100644
4306 {
4307 return __oprofilefs_create_file(sb, root, name,
4308 &atomic_ro_fops, 0444, val);
4309 +diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
4310 +index 878fba1..2084bcf 100644
4311 +--- a/drivers/oprofile/timer_int.c
4312 ++++ b/drivers/oprofile/timer_int.c
4313 +@@ -93,7 +93,7 @@ static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
4314 + return NOTIFY_OK;
4315 + }
4316 +
4317 +-static struct notifier_block __refdata oprofile_cpu_notifier = {
4318 ++static struct notifier_block oprofile_cpu_notifier = {
4319 + .notifier_call = oprofile_cpu_notify,
4320 + };
4321 +
4322 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
4323 index 3f56bc0..707d642 100644
4324 --- a/drivers/parport/procfs.c
4325 @@ -52461,7 +52865,7 @@ index dba43c3..7511af2 100644
4326
4327 if (op) {
4328 diff --git a/fs/splice.c b/fs/splice.c
4329 -index 014fcb4..980206f 100644
4330 +index 58ab918..e471089 100644
4331 --- a/fs/splice.c
4332 +++ b/fs/splice.c
4333 @@ -195,7 +195,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
4334 @@ -52512,7 +52916,7 @@ index 014fcb4..980206f 100644
4335 vec[i].iov_len = this_len;
4336 spd.pages[i] = page;
4337 spd.nr_pages++;
4338 -@@ -853,10 +853,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
4339 +@@ -855,10 +855,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
4340 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
4341 {
4342 while (!pipe->nrbufs) {
4343 @@ -52525,7 +52929,7 @@ index 014fcb4..980206f 100644
4344 return 0;
4345
4346 if (sd->flags & SPLICE_F_NONBLOCK)
4347 -@@ -1189,7 +1189,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
4348 +@@ -1191,7 +1191,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
4349 * out of the pipe right after the splice_to_pipe(). So set
4350 * PIPE_READERS appropriately.
4351 */
4352 @@ -52534,7 +52938,7 @@ index 014fcb4..980206f 100644
4353
4354 current->splice_pipe = pipe;
4355 }
4356 -@@ -1742,9 +1742,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
4357 +@@ -1744,9 +1744,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
4358 ret = -ERESTARTSYS;
4359 break;
4360 }
4361 @@ -52546,7 +52950,7 @@ index 014fcb4..980206f 100644
4362 if (flags & SPLICE_F_NONBLOCK) {
4363 ret = -EAGAIN;
4364 break;
4365 -@@ -1776,7 +1776,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
4366 +@@ -1778,7 +1778,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
4367 pipe_lock(pipe);
4368
4369 while (pipe->nrbufs >= pipe->buffers) {
4370 @@ -52555,7 +52959,7 @@ index 014fcb4..980206f 100644
4371 send_sig(SIGPIPE, current, 0);
4372 ret = -EPIPE;
4373 break;
4374 -@@ -1789,9 +1789,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
4375 +@@ -1791,9 +1791,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
4376 ret = -ERESTARTSYS;
4377 break;
4378 }
4379 @@ -52567,7 +52971,7 @@ index 014fcb4..980206f 100644
4380 }
4381
4382 pipe_unlock(pipe);
4383 -@@ -1827,14 +1827,14 @@ retry:
4384 +@@ -1829,14 +1829,14 @@ retry:
4385 pipe_double_lock(ipipe, opipe);
4386
4387 do {
4388 @@ -52584,7 +52988,7 @@ index 014fcb4..980206f 100644
4389 break;
4390
4391 /*
4392 -@@ -1931,7 +1931,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
4393 +@@ -1933,7 +1933,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
4394 pipe_double_lock(ipipe, opipe);
4395
4396 do {
4397 @@ -52593,7 +52997,7 @@ index 014fcb4..980206f 100644
4398 send_sig(SIGPIPE, current, 0);
4399 if (!ret)
4400 ret = -EPIPE;
4401 -@@ -1976,7 +1976,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
4402 +@@ -1978,7 +1978,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
4403 * return EAGAIN if we have the potential of some data in the
4404 * future, otherwise just return 0
4405 */
4406 @@ -64013,6 +64417,19 @@ index 320d6c9..89f1e77 100644
4407 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
4408
4409 #endif /* __LINUX_COMPILER_H */
4410 +diff --git a/include/linux/cpu.h b/include/linux/cpu.h
4411 +index c692acc..95bcc75 100644
4412 +--- a/include/linux/cpu.h
4413 ++++ b/include/linux/cpu.h
4414 +@@ -108,7 +108,7 @@ enum {
4415 + /* Need to know about CPUs going up/down? */
4416 + #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
4417 + #define cpu_notifier(fn, pri) { \
4418 +- static struct notifier_block fn##_nb __cpuinitdata = \
4419 ++ static struct notifier_block fn##_nb = \
4420 + { .notifier_call = fn, .priority = pri }; \
4421 + register_cpu_notifier(&fn##_nb); \
4422 + }
4423 diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
4424 index b936763..48685ee 100644
4425 --- a/include/linux/crash_dump.h
4426 @@ -66467,7 +66884,7 @@ index 4633b2f..988bc08 100644
4427 atomic_t refcnt;
4428 unsigned int max_seq_nr;
4429 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
4430 -index b669be6..8335421 100644
4431 +index b669be6..22773f5 100644
4432 --- a/include/linux/perf_event.h
4433 +++ b/include/linux/perf_event.h
4434 @@ -748,8 +748,8 @@ struct perf_event {
4435 @@ -66492,6 +66909,15 @@ index b669be6..8335421 100644
4436
4437 /*
4438 * Protect attach/detach and child_list:
4439 +@@ -1193,7 +1193,7 @@ static inline void perf_event_task_tick(void) { }
4440 + */
4441 + #define perf_cpu_notifier(fn) \
4442 + do { \
4443 +- static struct notifier_block fn##_nb __cpuinitdata = \
4444 ++ static struct notifier_block fn##_nb = \
4445 + { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
4446 + fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
4447 + (void *)(unsigned long)smp_processor_id()); \
4448 diff --git a/include/linux/personality.h b/include/linux/personality.h
4449 index 8fc7dd1a..c19d89e 100644
4450 --- a/include/linux/personality.h
4451 @@ -66770,7 +67196,7 @@ index 2148b12..519b820 100644
4452
4453 static inline void anon_vma_merge(struct vm_area_struct *vma,
4454 diff --git a/include/linux/sched.h b/include/linux/sched.h
4455 -index 1e86bb4..bcc2c30 100644
4456 +index 8204898..bcc2c30 100644
4457 --- a/include/linux/sched.h
4458 +++ b/include/linux/sched.h
4459 @@ -101,6 +101,7 @@ struct bio_list;
4460 @@ -67033,24 +67459,6 @@ index 1e86bb4..bcc2c30 100644
4461
4462 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
4463 }
4464 -@@ -2597,7 +2713,16 @@ static inline void thread_group_cputime_init(struct signal_struct *sig)
4465 - extern void recalc_sigpending_and_wake(struct task_struct *t);
4466 - extern void recalc_sigpending(void);
4467 -
4468 --extern void signal_wake_up(struct task_struct *t, int resume_stopped);
4469 -+extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
4470 -+
4471 -+static inline void signal_wake_up(struct task_struct *t, bool resume)
4472 -+{
4473 -+ signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
4474 -+}
4475 -+static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
4476 -+{
4477 -+ signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
4478 -+}
4479 -
4480 - /*
4481 - * Wrappers for p->thread_info->cpu access. No-op on UP.
4482 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
4483 index 899fbb4..1cb4138 100644
4484 --- a/include/linux/screen_info.h
4485 @@ -67195,7 +67603,7 @@ index 53dc7e7..e353d6b 100644
4486 int size);
4487 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
4488 diff --git a/include/linux/slab.h b/include/linux/slab.h
4489 -index 573c809..0ae46e1 100644
4490 +index 573c809..d11cea2 100644
4491 --- a/include/linux/slab.h
4492 +++ b/include/linux/slab.h
4493 @@ -11,12 +11,20 @@
4494 @@ -67252,26 +67660,15 @@ index 573c809..0ae46e1 100644
4495
4496 /*
4497 * Allocator specific definitions. These are mainly used to establish optimized
4498 -@@ -240,8 +253,18 @@ size_t ksize(const void *);
4499 +@@ -240,6 +253,7 @@ size_t ksize(const void *);
4500 * for general use, and so are not documented here. For a full list of
4501 * potential flags, always refer to linux/gfp.h.
4502 */
4503 +
4504 -+extern void kcalloc_error(void)
4505 -+#if defined(CONFIG_GCOV_KERNEL) && defined(CONFIG_PAX_SIZE_OVERFLOW)
4506 -+__compiletime_warning("kcalloc called with swapped arguments?");
4507 -+#else
4508 -+__compiletime_error("kcalloc called with swapped arguments?");
4509 -+#endif
4510 -+
4511 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
4512 {
4513 -+ if (__builtin_constant_p(n) && !__builtin_constant_p(size))
4514 -+ kcalloc_error();
4515 if (size != 0 && n > ULONG_MAX / size)
4516 - return NULL;
4517 - return __kmalloc(n * size, flags | __GFP_ZERO);
4518 -@@ -287,7 +310,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
4519 +@@ -287,7 +301,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
4520 */
4521 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
4522 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
4523 @@ -67280,7 +67677,7 @@ index 573c809..0ae46e1 100644
4524 #define kmalloc_track_caller(size, flags) \
4525 __kmalloc_track_caller(size, flags, _RET_IP_)
4526 #else
4527 -@@ -306,7 +329,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
4528 +@@ -306,7 +320,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
4529 */
4530 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
4531 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
4532 @@ -70469,7 +70866,7 @@ index 9b22d03..6295b62 100644
4533 prev->next = info->next;
4534 else
4535 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
4536 -index 6db7a5e..25b6648 100644
4537 +index 6db7a5e..0d600bd 100644
4538 --- a/kernel/hrtimer.c
4539 +++ b/kernel/hrtimer.c
4540 @@ -1407,7 +1407,7 @@ void hrtimer_peek_ahead_timers(void)
4541 @@ -70481,6 +70878,15 @@ index 6db7a5e..25b6648 100644
4542 {
4543 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
4544
4545 +@@ -1751,7 +1751,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
4546 + return NOTIFY_OK;
4547 + }
4548 +
4549 +-static struct notifier_block __cpuinitdata hrtimers_nb = {
4550 ++static struct notifier_block hrtimers_nb = {
4551 + .notifier_call = hrtimer_cpu_notify,
4552 + };
4553 +
4554 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
4555 index 66ff710..794bc5a 100644
4556 --- a/kernel/jump_label.c
4557 @@ -72221,92 +72627,10 @@ index 76b8e77..a2930e8 100644
4558 }
4559
4560 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
4561 -index 78ab24a..5333587 100644
4562 +index 67fedad..5333587 100644
4563 --- a/kernel/ptrace.c
4564 +++ b/kernel/ptrace.c
4565 -@@ -117,11 +117,45 @@ void __ptrace_unlink(struct task_struct *child)
4566 - * TASK_KILLABLE sleeps.
4567 - */
4568 - if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
4569 -- signal_wake_up(child, task_is_traced(child));
4570 -+ ptrace_signal_wake_up(child, true);
4571 -
4572 - spin_unlock(&child->sighand->siglock);
4573 - }
4574 -
4575 -+/* Ensure that nothing can wake it up, even SIGKILL */
4576 -+static bool ptrace_freeze_traced(struct task_struct *task)
4577 -+{
4578 -+ bool ret = false;
4579 -+
4580 -+ /* Lockless, nobody but us can set this flag */
4581 -+ if (task->jobctl & JOBCTL_LISTENING)
4582 -+ return ret;
4583 -+
4584 -+ spin_lock_irq(&task->sighand->siglock);
4585 -+ if (task_is_traced(task) && !__fatal_signal_pending(task)) {
4586 -+ task->state = __TASK_TRACED;
4587 -+ ret = true;
4588 -+ }
4589 -+ spin_unlock_irq(&task->sighand->siglock);
4590 -+
4591 -+ return ret;
4592 -+}
4593 -+
4594 -+static void ptrace_unfreeze_traced(struct task_struct *task)
4595 -+{
4596 -+ if (task->state != __TASK_TRACED)
4597 -+ return;
4598 -+
4599 -+ WARN_ON(!task->ptrace || task->parent != current);
4600 -+
4601 -+ spin_lock_irq(&task->sighand->siglock);
4602 -+ if (__fatal_signal_pending(task))
4603 -+ wake_up_state(task, __TASK_TRACED);
4604 -+ else
4605 -+ task->state = TASK_TRACED;
4606 -+ spin_unlock_irq(&task->sighand->siglock);
4607 -+}
4608 -+
4609 - /**
4610 - * ptrace_check_attach - check whether ptracee is ready for ptrace operation
4611 - * @child: ptracee to check for
4612 -@@ -151,28 +185,34 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
4613 - * be changed by us so it's not changing right after this.
4614 - */
4615 - read_lock(&tasklist_lock);
4616 -- if ((child->ptrace & PT_PTRACED) && child->parent == current) {
4617 -+ if (child->ptrace && child->parent == current) {
4618 -+ WARN_ON(child->state == __TASK_TRACED);
4619 - /*
4620 - * child->sighand can't be NULL, release_task()
4621 - * does ptrace_unlink() before __exit_signal().
4622 - */
4623 -- spin_lock_irq(&child->sighand->siglock);
4624 -- WARN_ON_ONCE(task_is_stopped(child));
4625 -- if (ignore_state || (task_is_traced(child) &&
4626 -- !(child->jobctl & JOBCTL_LISTENING)))
4627 -+ if (ignore_state || ptrace_freeze_traced(child))
4628 - ret = 0;
4629 -- spin_unlock_irq(&child->sighand->siglock);
4630 - }
4631 - read_unlock(&tasklist_lock);
4632 -
4633 -- if (!ret && !ignore_state)
4634 -- ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
4635 -+ if (!ret && !ignore_state) {
4636 -+ if (!wait_task_inactive(child, __TASK_TRACED)) {
4637 -+ /*
4638 -+ * This can only happen if may_ptrace_stop() fails and
4639 -+ * ptrace_stop() changes ->state back to TASK_RUNNING,
4640 -+ * so we should not worry about leaking __TASK_TRACED.
4641 -+ */
4642 -+ WARN_ON(child->state == __TASK_TRACED);
4643 -+ ret = -ESRCH;
4644 -+ }
4645 -+ }
4646 -
4647 -- /* All systems go.. */
4648 +@@ -211,7 +211,8 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
4649 return ret;
4650 }
4651
4652 @@ -72316,7 +72640,7 @@ index 78ab24a..5333587 100644
4653 {
4654 const struct cred *cred = current_cred(), *tcred;
4655
4656 -@@ -198,7 +238,8 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
4657 +@@ -237,7 +238,8 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
4658 cred->gid == tcred->sgid &&
4659 cred->gid == tcred->gid))
4660 goto ok;
4661 @@ -72326,7 +72650,7 @@ index 78ab24a..5333587 100644
4662 goto ok;
4663 rcu_read_unlock();
4664 return -EPERM;
4665 -@@ -207,7 +248,9 @@ ok:
4666 +@@ -246,7 +248,9 @@ ok:
4667 smp_rmb();
4668 if (task->mm)
4669 dumpable = get_dumpable(task->mm);
4670 @@ -72337,7 +72661,7 @@ index 78ab24a..5333587 100644
4671 return -EPERM;
4672
4673 return security_ptrace_access_check(task, mode);
4674 -@@ -217,7 +260,21 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
4675 +@@ -256,7 +260,21 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
4676 {
4677 int err;
4678 task_lock(task);
4679 @@ -72360,7 +72684,7 @@ index 78ab24a..5333587 100644
4680 task_unlock(task);
4681 return !err;
4682 }
4683 -@@ -262,7 +319,7 @@ static int ptrace_attach(struct task_struct *task, long request,
4684 +@@ -301,7 +319,7 @@ static int ptrace_attach(struct task_struct *task, long request,
4685 goto out;
4686
4687 task_lock(task);
4688 @@ -72369,7 +72693,7 @@ index 78ab24a..5333587 100644
4689 task_unlock(task);
4690 if (retval)
4691 goto unlock_creds;
4692 -@@ -277,7 +334,7 @@ static int ptrace_attach(struct task_struct *task, long request,
4693 +@@ -316,7 +334,7 @@ static int ptrace_attach(struct task_struct *task, long request,
4694 task->ptrace = PT_PTRACED;
4695 if (seize)
4696 task->ptrace |= PT_SEIZED;
4697 @@ -72378,16 +72702,7 @@ index 78ab24a..5333587 100644
4698 task->ptrace |= PT_PTRACE_CAP;
4699
4700 __ptrace_link(task, current);
4701 -@@ -307,7 +364,7 @@ static int ptrace_attach(struct task_struct *task, long request,
4702 - */
4703 - if (task_is_stopped(task) &&
4704 - task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
4705 -- signal_wake_up(task, 1);
4706 -+ signal_wake_up_state(task, __TASK_STOPPED);
4707 -
4708 - spin_unlock(&task->sighand->siglock);
4709 -
4710 -@@ -483,7 +540,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
4711 +@@ -522,7 +540,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
4712 break;
4713 return -EIO;
4714 }
4715 @@ -72396,7 +72711,7 @@ index 78ab24a..5333587 100644
4716 return -EFAULT;
4717 copied += retval;
4718 src += retval;
4719 -@@ -680,7 +737,7 @@ int ptrace_request(struct task_struct *child, long request,
4720 +@@ -719,7 +737,7 @@ int ptrace_request(struct task_struct *child, long request,
4721 bool seized = child->ptrace & PT_SEIZED;
4722 int ret = -EIO;
4723 siginfo_t siginfo, *si;
4724 @@ -72405,25 +72720,7 @@ index 78ab24a..5333587 100644
4725 unsigned long __user *datalp = datavp;
4726 unsigned long flags;
4727
4728 -@@ -736,7 +793,7 @@ int ptrace_request(struct task_struct *child, long request,
4729 - * tracee into STOP.
4730 - */
4731 - if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
4732 -- signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
4733 -+ ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
4734 -
4735 - unlock_task_sighand(child, &flags);
4736 - ret = 0;
4737 -@@ -762,7 +819,7 @@ int ptrace_request(struct task_struct *child, long request,
4738 - * start of this trap and now. Trigger re-trap.
4739 - */
4740 - if (child->jobctl & JOBCTL_TRAP_NOTIFY)
4741 -- signal_wake_up(child, true);
4742 -+ ptrace_signal_wake_up(child, true);
4743 - ret = 0;
4744 - }
4745 - unlock_task_sighand(child, &flags);
4746 -@@ -882,14 +939,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
4747 +@@ -921,14 +939,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
4748 goto out;
4749 }
4750
4751 @@ -72446,16 +72743,7 @@ index 78ab24a..5333587 100644
4752 goto out_put_task_struct;
4753 }
4754
4755 -@@ -899,6 +963,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
4756 - goto out_put_task_struct;
4757 -
4758 - ret = arch_ptrace(child, request, addr, data);
4759 -+ if (ret || request != PTRACE_DETACH)
4760 -+ ptrace_unfreeze_traced(child);
4761 -
4762 - out_put_task_struct:
4763 - put_task_struct(child);
4764 -@@ -915,7 +981,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
4765 +@@ -956,7 +981,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
4766 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
4767 if (copied != sizeof(tmp))
4768 return -EIO;
4769 @@ -72464,7 +72752,7 @@ index 78ab24a..5333587 100644
4770 }
4771
4772 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
4773 -@@ -1025,21 +1091,31 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
4774 +@@ -1066,14 +1091,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
4775 goto out;
4776 }
4777
4778 @@ -72487,17 +72775,6 @@ index 78ab24a..5333587 100644
4779 goto out_put_task_struct;
4780 }
4781
4782 - ret = ptrace_check_attach(child, request == PTRACE_KILL ||
4783 - request == PTRACE_INTERRUPT);
4784 -- if (!ret)
4785 -+ if (!ret) {
4786 - ret = compat_arch_ptrace(child, request, addr, data);
4787 -+ if (ret || request != PTRACE_DETACH)
4788 -+ ptrace_unfreeze_traced(child);
4789 -+ }
4790 -
4791 - out_put_task_struct:
4792 - put_task_struct(child);
4793 diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
4794 index 636af6d..8af70ab 100644
4795 --- a/kernel/rcutiny.c
4796 @@ -72894,7 +73171,7 @@ index 9feffa4..54058df 100644
4797 rdp->dynticks->dynticks_nmi_nesting,
4798 rdp->dynticks_fqs);
4799 diff --git a/kernel/resource.c b/kernel/resource.c
4800 -index 7640b3a..5879283 100644
4801 +index 08aa28e..b958c1c 100644
4802 --- a/kernel/resource.c
4803 +++ b/kernel/resource.c
4804 @@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
4805 @@ -73011,20 +73288,10 @@ index 3d9f31c..7fefc9e 100644
4806
4807 default:
4808 diff --git a/kernel/sched.c b/kernel/sched.c
4809 -index fcc893f..223b418 100644
4810 +index eeeec4e..403ccf3 100644
4811 --- a/kernel/sched.c
4812 +++ b/kernel/sched.c
4813 -@@ -2924,7 +2924,8 @@ out:
4814 - */
4815 - int wake_up_process(struct task_struct *p)
4816 - {
4817 -- return try_to_wake_up(p, TASK_ALL, 0);
4818 -+ WARN_ON(task_is_stopped_or_traced(p));
4819 -+ return try_to_wake_up(p, TASK_NORMAL, 0);
4820 - }
4821 - EXPORT_SYMBOL(wake_up_process);
4822 -
4823 -@@ -5290,6 +5291,8 @@ int can_nice(const struct task_struct *p, const int nice)
4824 +@@ -5291,6 +5291,8 @@ int can_nice(const struct task_struct *p, const int nice)
4825 /* convert nice value [19,-20] to rlimit style value [1,40] */
4826 int nice_rlim = 20 - nice;
4827
4828 @@ -73033,7 +73300,7 @@ index fcc893f..223b418 100644
4829 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
4830 capable(CAP_SYS_NICE));
4831 }
4832 -@@ -5323,7 +5326,8 @@ SYSCALL_DEFINE1(nice, int, increment)
4833 +@@ -5324,7 +5326,8 @@ SYSCALL_DEFINE1(nice, int, increment)
4834 if (nice > 19)
4835 nice = 19;
4836
4837 @@ -73043,7 +73310,7 @@ index fcc893f..223b418 100644
4838 return -EPERM;
4839
4840 retval = security_task_setnice(current, nice);
4841 -@@ -5480,6 +5484,7 @@ recheck:
4842 +@@ -5481,6 +5484,7 @@ recheck:
4843 unsigned long rlim_rtprio =
4844 task_rlimit(p, RLIMIT_RTPRIO);
4845
4846 @@ -73051,6 +73318,15 @@ index fcc893f..223b418 100644
4847 /* can't set/change the rt policy */
4848 if (policy != p->policy && !rlim_rtprio)
4849 return -EPERM;
4850 +@@ -6875,7 +6879,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
4851 + * happens before everything else. This has to be lower priority than
4852 + * the notifier in the perf_event subsystem, though.
4853 + */
4854 +-static struct notifier_block __cpuinitdata migration_notifier = {
4855 ++static struct notifier_block migration_notifier = {
4856 + .notifier_call = migration_call,
4857 + .priority = CPU_PRI_MIGRATION,
4858 + };
4859 diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
4860 index f280df1..da1281d 100644
4861 --- a/kernel/sched_autogroup.c
4862 @@ -73087,7 +73363,7 @@ index 66e4576..d05c6d5 100644
4863 int this_cpu = smp_processor_id();
4864 struct rq *this_rq = cpu_rq(this_cpu);
4865 diff --git a/kernel/signal.c b/kernel/signal.c
4866 -index 08e0b97..4dc47a0 100644
4867 +index d2f55ea..4dc47a0 100644
4868 --- a/kernel/signal.c
4869 +++ b/kernel/signal.c
4870 @@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cachep;
4871 @@ -73133,34 +73409,7 @@ index 08e0b97..4dc47a0 100644
4872 if (is_global_init(tsk))
4873 return 1;
4874 if (handler != SIG_IGN && handler != SIG_DFL)
4875 -@@ -676,23 +679,17 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
4876 - * No need to set need_resched since signal event passing
4877 - * goes through ->blocked
4878 - */
4879 --void signal_wake_up(struct task_struct *t, int resume)
4880 -+void signal_wake_up_state(struct task_struct *t, unsigned int state)
4881 - {
4882 -- unsigned int mask;
4883 --
4884 - set_tsk_thread_flag(t, TIF_SIGPENDING);
4885 --
4886 - /*
4887 -- * For SIGKILL, we want to wake it up in the stopped/traced/killable
4888 -+ * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
4889 - * case. We don't check t->state here because there is a race with it
4890 - * executing another processor and just now entering stopped state.
4891 - * By using wake_up_state, we ensure the process will wake up and
4892 - * handle its death signal.
4893 - */
4894 -- mask = TASK_INTERRUPTIBLE;
4895 -- if (resume)
4896 -- mask |= TASK_WAKEKILL;
4897 -- if (!wake_up_state(t, mask))
4898 -+ if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
4899 - kick_process(t);
4900 - }
4901 -
4902 -@@ -815,6 +812,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
4903 +@@ -809,6 +812,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
4904 }
4905 }
4906
4907 @@ -73174,16 +73423,7 @@ index 08e0b97..4dc47a0 100644
4908 return security_task_kill(t, info, sig, 0);
4909 }
4910
4911 -@@ -841,7 +845,7 @@ static void ptrace_trap_notify(struct task_struct *t)
4912 - assert_spin_locked(&t->sighand->siglock);
4913 -
4914 - task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
4915 -- signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
4916 -+ ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
4917 - }
4918 -
4919 - /*
4920 -@@ -1165,7 +1169,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
4921 +@@ -1159,7 +1169,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
4922 return send_signal(sig, info, p, 1);
4923 }
4924
4925 @@ -73192,7 +73432,7 @@ index 08e0b97..4dc47a0 100644
4926 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
4927 {
4928 return send_signal(sig, info, t, 0);
4929 -@@ -1202,6 +1206,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
4930 +@@ -1196,6 +1206,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
4931 unsigned long int flags;
4932 int ret, blocked, ignored;
4933 struct k_sigaction *action;
4934 @@ -73200,7 +73440,7 @@ index 08e0b97..4dc47a0 100644
4935
4936 spin_lock_irqsave(&t->sighand->siglock, flags);
4937 action = &t->sighand->action[sig-1];
4938 -@@ -1216,9 +1221,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
4939 +@@ -1210,9 +1221,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
4940 }
4941 if (action->sa.sa_handler == SIG_DFL)
4942 t->signal->flags &= ~SIGNAL_UNKILLABLE;
4943 @@ -73219,7 +73459,7 @@ index 08e0b97..4dc47a0 100644
4944 return ret;
4945 }
4946
4947 -@@ -1285,8 +1299,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
4948 +@@ -1279,8 +1299,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
4949 ret = check_kill_permission(sig, info, p);
4950 rcu_read_unlock();
4951
4952 @@ -73232,26 +73472,7 @@ index 08e0b97..4dc47a0 100644
4953
4954 return ret;
4955 }
4956 -@@ -1765,6 +1782,10 @@ static inline int may_ptrace_stop(void)
4957 - * If SIGKILL was already sent before the caller unlocked
4958 - * ->siglock we must see ->core_state != NULL. Otherwise it
4959 - * is safe to enter schedule().
4960 -+ *
4961 -+ * This is almost outdated, a task with the pending SIGKILL can't
4962 -+ * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
4963 -+ * after SIGKILL was already dequeued.
4964 - */
4965 - if (unlikely(current->mm->core_state) &&
4966 - unlikely(current->mm == current->parent->mm))
4967 -@@ -1890,6 +1911,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
4968 - if (gstop_done)
4969 - do_notify_parent_cldstop(current, false, why);
4970 -
4971 -+ /* tasklist protects us from ptrace_freeze_traced() */
4972 - __set_current_state(TASK_RUNNING);
4973 - if (clear_code)
4974 - current->exit_code = 0;
4975 -@@ -2763,7 +2785,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
4976 +@@ -2762,7 +2785,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
4977 int error = -ESRCH;
4978
4979 rcu_read_lock();
4980 @@ -73269,9 +73490,18 @@ index 08e0b97..4dc47a0 100644
4981 error = check_kill_permission(sig, info, p);
4982 /*
4983 diff --git a/kernel/smp.c b/kernel/smp.c
4984 -index 9e800b2..451c00b 100644
4985 +index 9e800b2..1533ba5 100644
4986 --- a/kernel/smp.c
4987 +++ b/kernel/smp.c
4988 +@@ -75,7 +75,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
4989 + return NOTIFY_OK;
4990 + }
4991 +
4992 +-static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
4993 ++static struct notifier_block hotplug_cfd_notifier = {
4994 + .notifier_call = hotplug_cfd,
4995 + };
4996 +
4997 @@ -591,22 +591,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
4998 }
4999 EXPORT_SYMBOL(smp_call_function);
5000 @@ -73300,7 +73530,7 @@ index 9e800b2..451c00b 100644
5001 raw_spin_unlock_irq(&call_function.lock);
5002 }
5003 diff --git a/kernel/softirq.c b/kernel/softirq.c
5004 -index 2c71d91..2c2ecef 100644
5005 +index 2c71d91..f6c64a4 100644
5006 --- a/kernel/softirq.c
5007 +++ b/kernel/softirq.c
5008 @@ -52,11 +52,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
5009 @@ -73353,6 +73583,37 @@ index 2c71d91..2c2ecef 100644
5010 {
5011 struct tasklet_struct *list;
5012
5013 +@@ -712,7 +712,7 @@ static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
5014 + return NOTIFY_OK;
5015 + }
5016 +
5017 +-static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
5018 ++static struct notifier_block remote_softirq_cpu_notifier = {
5019 + .notifier_call = remote_softirq_cpu_notify,
5020 + };
5021 +
5022 +@@ -894,7 +894,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
5023 + return NOTIFY_OK;
5024 + }
5025 +
5026 +-static struct notifier_block __cpuinitdata cpu_nfb = {
5027 ++static struct notifier_block cpu_nfb = {
5028 + .notifier_call = cpu_callback
5029 + };
5030 +
5031 +diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
5032 +index 2f194e9..2c05ea9 100644
5033 +--- a/kernel/stop_machine.c
5034 ++++ b/kernel/stop_machine.c
5035 +@@ -362,7 +362,7 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
5036 + * cpu notifiers. It currently shares the same priority as sched
5037 + * migration_notifier.
5038 + */
5039 +-static struct notifier_block __cpuinitdata cpu_stop_cpu_notifier = {
5040 ++static struct notifier_block cpu_stop_cpu_notifier = {
5041 + .notifier_call = cpu_stop_cpu_callback,
5042 + .priority = 10,
5043 + };
5044 diff --git a/kernel/sys.c b/kernel/sys.c
5045 index f5939c2..110dc5d 100644
5046 --- a/kernel/sys.c
5047 @@ -74078,7 +74339,7 @@ index 0b537f2..40d6c20 100644
5048 return -ENOMEM;
5049 return 0;
5050 diff --git a/kernel/timer.c b/kernel/timer.c
5051 -index c219db6..815c225 100644
5052 +index c219db6..90f3084 100644
5053 --- a/kernel/timer.c
5054 +++ b/kernel/timer.c
5055 @@ -1306,7 +1306,7 @@ void update_process_times(int user_tick)
5056 @@ -74095,7 +74356,7 @@ index c219db6..815c225 100644
5057 }
5058
5059 -static struct notifier_block __cpuinitdata timers_nb = {
5060 -+static struct notifier_block __cpuinitconst timers_nb = {
5061 ++static struct notifier_block timers_nb = {
5062 .notifier_call = timer_cpu_notify,
5063 };
5064
5065 @@ -74699,6 +74960,19 @@ index 209b379..7f76423 100644
5066 tsk->comm);
5067 put_task_struct(tsk);
5068 }
5069 +diff --git a/kernel/watchdog.c b/kernel/watchdog.c
5070 +index a8bc4d9..eae8357 100644
5071 +--- a/kernel/watchdog.c
5072 ++++ b/kernel/watchdog.c
5073 +@@ -574,7 +574,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
5074 + return NOTIFY_OK;
5075 + }
5076 +
5077 +-static struct notifier_block __cpuinitdata cpu_nfb = {
5078 ++static struct notifier_block cpu_nfb = {
5079 + .notifier_call = cpu_callback
5080 + };
5081 +
5082 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
5083 index 7bf068a..1323074 100644
5084 --- a/kernel/workqueue.c
5085 @@ -78315,8 +78589,21 @@ index f59e170..34e2a2b 100644
5086 *region = *vma->vm_region;
5087 new->vm_region = region;
5088
5089 +diff --git a/mm/page-writeback.c b/mm/page-writeback.c
5090 +index 50f0824..97710b4 100644
5091 +--- a/mm/page-writeback.c
5092 ++++ b/mm/page-writeback.c
5093 +@@ -1380,7 +1380,7 @@ ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
5094 + return NOTIFY_DONE;
5095 + }
5096 +
5097 +-static struct notifier_block __cpuinitdata ratelimit_nb = {
5098 ++static struct notifier_block ratelimit_nb = {
5099 + .notifier_call = ratelimit_handler,
5100 + .next = NULL,
5101 + };
5102 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
5103 -index 4d3a697..29ecee3 100644
5104 +index 4d3a697..4f0e54f 100644
5105 --- a/mm/page_alloc.c
5106 +++ b/mm/page_alloc.c
5107 @@ -341,7 +341,7 @@ out:
5108 @@ -78377,6 +78664,52 @@ index 4d3a697..29ecee3 100644
5109 return 1;
5110 }
5111 return 0;
5112 +@@ -4253,10 +4271,11 @@ static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
5113 + * round what is now in bits to nearest long in bits, then return it in
5114 + * bytes.
5115 + */
5116 +-static unsigned long __init usemap_size(unsigned long zonesize)
5117 ++static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
5118 + {
5119 + unsigned long usemapsize;
5120 +
5121 ++ zonesize += zone_start_pfn & (pageblock_nr_pages-1);
5122 + usemapsize = roundup(zonesize, pageblock_nr_pages);
5123 + usemapsize = usemapsize >> pageblock_order;
5124 + usemapsize *= NR_PAGEBLOCK_BITS;
5125 +@@ -4266,17 +4285,19 @@ static unsigned long __init usemap_size(unsigned long zonesize)
5126 + }
5127 +
5128 + static void __init setup_usemap(struct pglist_data *pgdat,
5129 +- struct zone *zone, unsigned long zonesize)
5130 ++ struct zone *zone,
5131 ++ unsigned long zone_start_pfn,
5132 ++ unsigned long zonesize)
5133 + {
5134 +- unsigned long usemapsize = usemap_size(zonesize);
5135 ++ unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
5136 + zone->pageblock_flags = NULL;
5137 + if (usemapsize)
5138 + zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
5139 + usemapsize);
5140 + }
5141 + #else
5142 +-static inline void setup_usemap(struct pglist_data *pgdat,
5143 +- struct zone *zone, unsigned long zonesize) {}
5144 ++static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
5145 ++ unsigned long zone_start_pfn, unsigned long zonesize) {}
5146 + #endif /* CONFIG_SPARSEMEM */
5147 +
5148 + #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
5149 +@@ -4401,7 +4422,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
5150 + continue;
5151 +
5152 + set_pageblock_order();
5153 +- setup_usemap(pgdat, zone, size);
5154 ++ setup_usemap(pgdat, zone, zone_start_pfn, size);
5155 + ret = init_currently_empty_zone(zone, zone_start_pfn,
5156 + size, MEMMAP_EARLY);
5157 + BUG_ON(ret);
5158 diff --git a/mm/percpu.c b/mm/percpu.c
5159 index 5c29750..99f6386 100644
5160 --- a/mm/percpu.c
5161 @@ -78617,7 +78950,7 @@ index 12b9e80..5118865 100644
5162 return -ENOMEM;
5163
5164 diff --git a/mm/slab.c b/mm/slab.c
5165 -index 4c3b671..fb969ec 100644
5166 +index 4c3b671..40fa2eb 100644
5167 --- a/mm/slab.c
5168 +++ b/mm/slab.c
5169 @@ -151,7 +151,7 @@
5170 @@ -78700,6 +79033,15 @@ index 4c3b671..fb969ec 100644
5171 return csizep->cs_cachep;
5172 }
5173
5174 +@@ -1370,7 +1377,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
5175 + return notifier_from_errno(err);
5176 + }
5177 +
5178 +-static struct notifier_block __cpuinitdata cpucache_notifier = {
5179 ++static struct notifier_block cpucache_notifier = {
5180 + &cpuup_callback, NULL, 0
5181 + };
5182 +
5183 @@ -1572,7 +1579,7 @@ void __init kmem_cache_init(void)
5184 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
5185 sizes[INDEX_AC].cs_size,
5186 @@ -79186,7 +79528,7 @@ index 8105be4..8d6cd07 100644
5187 EXPORT_SYMBOL(kmem_cache_free);
5188
5189 diff --git a/mm/slub.c b/mm/slub.c
5190 -index 5710788..dffead9 100644
5191 +index 5710788..dbb5d49 100644
5192 --- a/mm/slub.c
5193 +++ b/mm/slub.c
5194 @@ -208,7 +208,7 @@ struct track {
5195 @@ -79409,6 +79751,15 @@ index 5710788..dffead9 100644
5196 goto err;
5197 }
5198 up_write(&slub_lock);
5199 +@@ -3979,7 +4060,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
5200 + return NOTIFY_OK;
5201 + }
5202 +
5203 +-static struct notifier_block __cpuinitdata slab_notifier = {
5204 ++static struct notifier_block slab_notifier = {
5205 + .notifier_call = slab_cpuup_callback
5206 + };
5207 +
5208 @@ -4037,7 +4118,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
5209 }
5210 #endif
5211 @@ -79800,7 +80151,7 @@ index eeba3bb..8555cab 100644
5212 if (v->nr_pages)
5213 seq_printf(m, " pages=%d", v->nr_pages);
5214 diff --git a/mm/vmstat.c b/mm/vmstat.c
5215 -index 8fd603b..cf0d930 100644
5216 +index 8fd603b..495a5a1 100644
5217 --- a/mm/vmstat.c
5218 +++ b/mm/vmstat.c
5219 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
5220 @@ -79830,6 +80181,15 @@ index 8fd603b..cf0d930 100644
5221 }
5222
5223 #endif
5224 +@@ -1193,7 +1193,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
5225 + return NOTIFY_OK;
5226 + }
5227 +
5228 +-static struct notifier_block __cpuinitdata vmstat_notifier =
5229 ++static struct notifier_block vmstat_notifier =
5230 + { &vmstat_cpuup_callback, NULL, 0 };
5231 + #endif
5232 +
5233 @@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
5234 start_cpu_timer(cpu);
5235 #endif
5236 @@ -81068,6 +81428,47 @@ index 39a2d29..f39c0fe 100644
5237 ---help---
5238 Econet is a fairly old and slow networking protocol mainly used by
5239 Acorn computers to access file and print servers. It uses native
5240 +diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
5241 +index 59a7041..060976d 100644
5242 +--- a/net/ipv4/arp.c
5243 ++++ b/net/ipv4/arp.c
5244 +@@ -945,24 +945,25 @@ static void parp_redo(struct sk_buff *skb)
5245 + static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
5246 + struct packet_type *pt, struct net_device *orig_dev)
5247 + {
5248 +- struct arphdr *arp;
5249 ++ const struct arphdr *arp;
5250 +
5251 +- /* ARP header, plus 2 device addresses, plus 2 IP addresses. */
5252 +- if (!pskb_may_pull(skb, arp_hdr_len(dev)))
5253 +- goto freeskb;
5254 +-
5255 +- arp = arp_hdr(skb);
5256 +- if (arp->ar_hln != dev->addr_len ||
5257 +- dev->flags & IFF_NOARP ||
5258 ++ if (dev->flags & IFF_NOARP ||
5259 + skb->pkt_type == PACKET_OTHERHOST ||
5260 +- skb->pkt_type == PACKET_LOOPBACK ||
5261 +- arp->ar_pln != 4)
5262 ++ skb->pkt_type == PACKET_LOOPBACK)
5263 + goto freeskb;
5264 +
5265 + skb = skb_share_check(skb, GFP_ATOMIC);
5266 +- if (skb == NULL)
5267 ++ if (!skb)
5268 + goto out_of_mem;
5269 +
5270 ++ /* ARP header, plus 2 device addresses, plus 2 IP addresses. */
5271 ++ if (!pskb_may_pull(skb, arp_hdr_len(dev)))
5272 ++ goto freeskb;
5273 ++
5274 ++ arp = arp_hdr(skb);
5275 ++ if (arp->ar_hln != dev->addr_len || arp->ar_pln != 4)
5276 ++ goto freeskb;
5277 ++
5278 + memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
5279 +
5280 + return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, skb, dev, NULL, arp_process);
5281 diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
5282 index e41c40f..26d7e03 100644
5283 --- a/net/ipv4/devinet.c
5284 @@ -81248,7 +81649,7 @@ index a4e7131..fe66a18f 100644
5285
5286 rc = qp->q.fragments && (end - start) > max;
5287 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
5288 -index 0106d25..cc0b33e 100644
5289 +index 3b36002..27e6634 100644
5290 --- a/net/ipv4/ip_sockglue.c
5291 +++ b/net/ipv4/ip_sockglue.c
5292 @@ -1120,7 +1120,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
5293 @@ -81520,10 +81921,10 @@ index 94cdbc5..0cb0063 100644
5294 ts = peer->tcp_ts;
5295 tsage = get_seconds() - peer->tcp_ts_stamp;
5296 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
5297 -index aab8f08..36092b1 100644
5298 +index e865ed1..457805b 100644
5299 --- a/net/ipv4/tcp_input.c
5300 +++ b/net/ipv4/tcp_input.c
5301 -@@ -4727,7 +4727,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
5302 +@@ -4732,7 +4732,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
5303 * simplifies code)
5304 */
5305 static void
5306 @@ -81532,17 +81933,17 @@ index aab8f08..36092b1 100644
5307 struct sk_buff *head, struct sk_buff *tail,
5308 u32 start, u32 end)
5309 {
5310 -@@ -5542,6 +5542,9 @@ slow_path:
5311 +@@ -5547,6 +5547,9 @@ slow_path:
5312 if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
5313 goto csum_error;
5314
5315 -+ if (!th->ack)
5316 ++ if (!th->ack && !th->rst)
5317 + goto discard;
5318 +
5319 /*
5320 * Standard slow path.
5321 */
5322 -@@ -5550,7 +5553,7 @@ slow_path:
5323 +@@ -5555,7 +5558,7 @@ slow_path:
5324 return 0;
5325
5326 step5:
5327 @@ -81551,7 +81952,7 @@ index aab8f08..36092b1 100644
5328 goto discard;
5329
5330 /* ts_recent update must be made after we are sure that the packet
5331 -@@ -5786,6 +5789,7 @@ discard:
5332 +@@ -5791,6 +5794,7 @@ discard:
5333 tcp_paws_reject(&tp->rx_opt, 0))
5334 goto discard_and_undo;
5335
5336 @@ -81559,7 +81960,7 @@ index aab8f08..36092b1 100644
5337 if (th->syn) {
5338 /* We see SYN without ACK. It is attempt of
5339 * simultaneous connect with crossed SYNs.
5340 -@@ -5834,6 +5838,7 @@ discard:
5341 +@@ -5839,6 +5843,7 @@ discard:
5342 goto discard;
5343 #endif
5344 }
5345 @@ -81567,7 +81968,7 @@ index aab8f08..36092b1 100644
5346 /* "fifth, if neither of the SYN or RST bits is set then
5347 * drop the segment and return."
5348 */
5349 -@@ -5877,7 +5882,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5350 +@@ -5882,7 +5887,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5351 goto discard;
5352
5353 if (th->syn) {
5354 @@ -81576,11 +81977,11 @@ index aab8f08..36092b1 100644
5355 goto discard;
5356 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
5357 return 1;
5358 -@@ -5916,11 +5921,14 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5359 +@@ -5921,11 +5926,14 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5360 return 0;
5361 }
5362
5363 -+ if (!th->ack)
5364 ++ if (!th->ack && !th->rst)
5365 + goto discard;
5366 +
5367 if (!tcp_validate_incoming(sk, skb, th, 0))
5368 @@ -81592,7 +81993,7 @@ index aab8f08..36092b1 100644
5369 int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
5370
5371 switch (sk->sk_state) {
5372 -@@ -6025,8 +6033,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5373 +@@ -6030,8 +6038,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5374 }
5375 break;
5376 }
5377 @@ -81886,7 +82287,7 @@ index 5a65eea..bd913a1 100644
5378
5379 int udp4_seq_show(struct seq_file *seq, void *v)
5380 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
5381 -index aef80d7..1624eee 100644
5382 +index b27baed..5c4d458 100644
5383 --- a/net/ipv6/addrconf.c
5384 +++ b/net/ipv6/addrconf.c
5385 @@ -2151,7 +2151,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
5386 @@ -82423,6 +82824,19 @@ index cf98d62..7bf2972 100644
5387 }
5388
5389 write_unlock_bh(&iucv_sk_list.lock);
5390 +diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
5391 +index 403be43..87f09da 100644
5392 +--- a/net/iucv/iucv.c
5393 ++++ b/net/iucv/iucv.c
5394 +@@ -690,7 +690,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
5395 + return NOTIFY_OK;
5396 + }
5397 +
5398 +-static struct notifier_block __refdata iucv_cpu_notifier = {
5399 ++static struct notifier_block iucv_cpu_notifier = {
5400 + .notifier_call = iucv_cpu_notify,
5401 + };
5402 +
5403 diff --git a/net/key/af_key.c b/net/key/af_key.c
5404 index 1e733e9..3d73c9f 100644
5405 --- a/net/key/af_key.c
5406 @@ -83014,7 +83428,7 @@ index f156382..95ce7ba 100644
5407 *uaddr_len = sizeof(struct sockaddr_ax25);
5408 }
5409 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
5410 -index 85afc13..d9fb2db 100644
5411 +index 835fcea..d9fb2db 100644
5412 --- a/net/packet/af_packet.c
5413 +++ b/net/packet/af_packet.c
5414 @@ -1684,7 +1684,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
5415 @@ -83035,27 +83449,7 @@ index 85afc13..d9fb2db 100644
5416 spin_unlock(&sk->sk_receive_queue.lock);
5417
5418 drop_n_restore:
5419 -@@ -2422,13 +2422,15 @@ static int packet_release(struct socket *sock)
5420 -
5421 - packet_flush_mclist(sk);
5422 -
5423 -- memset(&req_u, 0, sizeof(req_u));
5424 --
5425 -- if (po->rx_ring.pg_vec)
5426 -+ if (po->rx_ring.pg_vec) {
5427 -+ memset(&req_u, 0, sizeof(req_u));
5428 - packet_set_ring(sk, &req_u, 1, 0);
5429 -+ }
5430 -
5431 -- if (po->tx_ring.pg_vec)
5432 -+ if (po->tx_ring.pg_vec) {
5433 -+ memset(&req_u, 0, sizeof(req_u));
5434 - packet_set_ring(sk, &req_u, 1, 1);
5435 -+ }
5436 -
5437 - fanout_release(sk);
5438 -
5439 -@@ -2621,6 +2623,7 @@ out:
5440 +@@ -2623,6 +2623,7 @@ out:
5441
5442 static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
5443 {
5444 @@ -83063,7 +83457,7 @@ index 85afc13..d9fb2db 100644
5445 struct sock_exterr_skb *serr;
5446 struct sk_buff *skb, *skb2;
5447 int copied, err;
5448 -@@ -2642,8 +2645,9 @@ static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
5449 +@@ -2644,8 +2645,9 @@ static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
5450 sock_recv_timestamp(msg, sk, skb);
5451
5452 serr = SKB_EXT_ERR(skb);
5453 @@ -83074,7 +83468,7 @@ index 85afc13..d9fb2db 100644
5454
5455 msg->msg_flags |= MSG_ERRQUEUE;
5456 err = copied;
5457 -@@ -3274,7 +3278,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
5458 +@@ -3276,7 +3278,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
5459 case PACKET_HDRLEN:
5460 if (len > sizeof(int))
5461 len = sizeof(int);
5462 @@ -83083,7 +83477,7 @@ index 85afc13..d9fb2db 100644
5463 return -EFAULT;
5464 switch (val) {
5465 case TPACKET_V1:
5466 -@@ -3324,7 +3328,11 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
5467 +@@ -3326,7 +3328,11 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
5468
5469 if (put_user(len, optlen))
5470 return -EFAULT;
5471 @@ -83617,29 +84011,6 @@ index bf81204..333926d 100644
5472 SCTP_DBG_OBJCNT_DEC(keys);
5473 }
5474 }
5475 -diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
5476 -index c8cc24e..dbe5870a 100644
5477 ---- a/net/sctp/endpointola.c
5478 -+++ b/net/sctp/endpointola.c
5479 -@@ -248,6 +248,8 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
5480 - /* Final destructor for endpoint. */
5481 - static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
5482 - {
5483 -+ int i;
5484 -+
5485 - SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);
5486 -
5487 - /* Free up the HMAC transform. */
5488 -@@ -270,6 +272,9 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
5489 - sctp_inq_free(&ep->base.inqueue);
5490 - sctp_bind_addr_free(&ep->base.bind_addr);
5491 -
5492 -+ for (i = 0; i < SCTP_HOW_MANY_SECRETS; ++i)
5493 -+ memset(&ep->secret_key[i], 0, SCTP_SECRET_SIZE);
5494 -+
5495 - /* Remove and free the port */
5496 - if (sctp_sk(ep->base.sk)->bind_hash)
5497 - sctp_put_port(ep->base.sk);
5498 diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
5499 index 8104278..631330b 100644
5500 --- a/net/sctp/ipv6.c
5501 @@ -83694,18 +84065,9 @@ index 6f6ad86..f80bd85 100644
5502
5503 static int sctp_v4_protosw_init(void)
5504 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
5505 -index fa8333b..8633998 100644
5506 +index 5e0d86e..8633998 100644
5507 --- a/net/sctp/socket.c
5508 +++ b/net/sctp/socket.c
5509 -@@ -3375,7 +3375,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
5510 -
5511 - ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);
5512 - out:
5513 -- kfree(authkey);
5514 -+ kzfree(authkey);
5515 - return ret;
5516 - }
5517 -
5518 @@ -4583,6 +4583,8 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
5519 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
5520 if (space_left < addrlen)
5521 @@ -84705,12 +85067,12 @@ index cb1f50c..cef2a7c 100644
5522 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
5523 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
5524 new file mode 100644
5525 -index 0000000..008ac1a
5526 +index 0000000..5e0222d
5527 --- /dev/null
5528 +++ b/scripts/gcc-plugin.sh
5529 @@ -0,0 +1,17 @@
5530 +#!/bin/bash
5531 -+plugincc=`$1 -x c -shared - -o /dev/null -I\`$3 -print-file-name=plugin\`/include 2>&1 <<EOF
5532 ++plugincc=`$1 -E -shared - -o /dev/null -I\`$3 -print-file-name=plugin\`/include 2>&1 <<EOF
5533 +#include "gcc-plugin.h"
5534 +#include "tree.h"
5535 +#include "tm.h"
5536
5537 diff --git a/3.2.38/4425_grsec_remove_EI_PAX.patch b/3.2.39/4425_grsec_remove_EI_PAX.patch
5538 similarity index 100%
5539 rename from 3.2.38/4425_grsec_remove_EI_PAX.patch
5540 rename to 3.2.39/4425_grsec_remove_EI_PAX.patch
5541
5542 diff --git a/3.2.38/4430_grsec-remove-localversion-grsec.patch b/3.2.39/4430_grsec-remove-localversion-grsec.patch
5543 similarity index 100%
5544 rename from 3.2.38/4430_grsec-remove-localversion-grsec.patch
5545 rename to 3.2.39/4430_grsec-remove-localversion-grsec.patch
5546
5547 diff --git a/3.2.38/4435_grsec-mute-warnings.patch b/3.2.39/4435_grsec-mute-warnings.patch
5548 similarity index 100%
5549 rename from 3.2.38/4435_grsec-mute-warnings.patch
5550 rename to 3.2.39/4435_grsec-mute-warnings.patch
5551
5552 diff --git a/3.2.38/4440_grsec-remove-protected-paths.patch b/3.2.39/4440_grsec-remove-protected-paths.patch
5553 similarity index 100%
5554 rename from 3.2.38/4440_grsec-remove-protected-paths.patch
5555 rename to 3.2.39/4440_grsec-remove-protected-paths.patch
5556
5557 diff --git a/3.2.38/4450_grsec-kconfig-default-gids.patch b/3.2.39/4450_grsec-kconfig-default-gids.patch
5558 similarity index 100%
5559 rename from 3.2.38/4450_grsec-kconfig-default-gids.patch
5560 rename to 3.2.39/4450_grsec-kconfig-default-gids.patch
5561
5562 diff --git a/3.2.38/4465_selinux-avc_audit-log-curr_ip.patch b/3.2.39/4465_selinux-avc_audit-log-curr_ip.patch
5563 similarity index 100%
5564 rename from 3.2.38/4465_selinux-avc_audit-log-curr_ip.patch
5565 rename to 3.2.39/4465_selinux-avc_audit-log-curr_ip.patch
5566
5567 diff --git a/3.2.38/4470_disable-compat_vdso.patch b/3.2.39/4470_disable-compat_vdso.patch
5568 similarity index 100%
5569 rename from 3.2.38/4470_disable-compat_vdso.patch
5570 rename to 3.2.39/4470_disable-compat_vdso.patch
5571
5572 diff --git a/3.7.9/0000_README b/3.8.0/0000_README
5573 similarity index 96%
5574 rename from 3.7.9/0000_README
5575 rename to 3.8.0/0000_README
5576 index bd6a050..8d7fe2e 100644
5577 --- a/3.7.9/0000_README
5578 +++ b/3.8.0/0000_README
5579 @@ -2,7 +2,7 @@ README
5580 -----------------------------------------------------------------------------
5581 Individual Patch Descriptions:
5582 -----------------------------------------------------------------------------
5583 -Patch: 4420_grsecurity-2.9.1-3.7.9-201302171808.patch
5584 +Patch: 4420_grsecurity-2.9.1-3.8.0-201302231124.patch
5585 From: http://www.grsecurity.net
5586 Desc: hardened-sources base patch from upstream grsecurity
5587
5588
5589 diff --git a/3.7.9/4420_grsecurity-2.9.1-3.7.9-201302171808.patch b/3.8.0/4420_grsecurity-2.9.1-3.8.0-201302231124.patch
5590 similarity index 93%
5591 rename from 3.7.9/4420_grsecurity-2.9.1-3.7.9-201302171808.patch
5592 rename to 3.8.0/4420_grsecurity-2.9.1-3.8.0-201302231124.patch
5593 index f81b3df..c065fb8 100644
5594 --- a/3.7.9/4420_grsecurity-2.9.1-3.7.9-201302171808.patch
5595 +++ b/3.8.0/4420_grsecurity-2.9.1-3.8.0-201302231124.patch
5596 @@ -1,5 +1,5 @@
5597 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
5598 -index 74c25c8..deadba2 100644
5599 +index b89a739..dba90c5 100644
5600 --- a/Documentation/dontdiff
5601 +++ b/Documentation/dontdiff
5602 @@ -2,9 +2,11 @@
5603 @@ -144,7 +144,7 @@ index 74c25c8..deadba2 100644
5604 mkprep
5605 mkregtable
5606 mktables
5607 -@@ -186,6 +205,8 @@ oui.c*
5608 +@@ -185,6 +204,8 @@ oui.c*
5609 page-types
5610 parse.c
5611 parse.h
5612 @@ -153,7 +153,7 @@ index 74c25c8..deadba2 100644
5613 patches*
5614 pca200e.bin
5615 pca200e_ecd.bin2
5616 -@@ -195,6 +216,7 @@ perf-archive
5617 +@@ -194,6 +215,7 @@ perf-archive
5618 piggyback
5619 piggy.gzip
5620 piggy.S
5621 @@ -161,7 +161,7 @@ index 74c25c8..deadba2 100644
5622 pnmtologo
5623 ppc_defs.h*
5624 pss_boot.h
5625 -@@ -204,7 +226,10 @@ r200_reg_safe.h
5626 +@@ -203,7 +225,10 @@ r200_reg_safe.h
5627 r300_reg_safe.h
5628 r420_reg_safe.h
5629 r600_reg_safe.h
5630 @@ -172,7 +172,7 @@ index 74c25c8..deadba2 100644
5631 relocs
5632 rlim_names.h
5633 rn50_reg_safe.h
5634 -@@ -214,8 +239,11 @@ series
5635 +@@ -213,8 +238,11 @@ series
5636 setup
5637 setup.bin
5638 setup.elf
5639 @@ -184,7 +184,7 @@ index 74c25c8..deadba2 100644
5640 split-include
5641 syscalltab.h
5642 tables.c
5643 -@@ -225,6 +253,7 @@ tftpboot.img
5644 +@@ -224,6 +252,7 @@ tftpboot.img
5645 timeconst.h
5646 times.h*
5647 trix_boot.h
5648 @@ -192,7 +192,7 @@ index 74c25c8..deadba2 100644
5649 utsrelease.h*
5650 vdso-syms.lds
5651 vdso.lds
5652 -@@ -236,13 +265,17 @@ vdso32.lds
5653 +@@ -235,13 +264,17 @@ vdso32.lds
5654 vdso32.so.dbg
5655 vdso64.lds
5656 vdso64.so.dbg
5657 @@ -210,7 +210,7 @@ index 74c25c8..deadba2 100644
5658 vmlinuz
5659 voffset.h
5660 vsyscall.lds
5661 -@@ -250,9 +283,11 @@ vsyscall_32.lds
5662 +@@ -249,9 +282,11 @@ vsyscall_32.lds
5663 wanxlfw.inc
5664 uImage
5665 unifdef
5666 @@ -223,20 +223,21 @@ index 74c25c8..deadba2 100644
5667 +zconf.lex.c
5668 zoffset.h
5669 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
5670 -index 9776f06..18b1856 100644
5671 +index 6c72381..2fe9ae4 100644
5672 --- a/Documentation/kernel-parameters.txt
5673 +++ b/Documentation/kernel-parameters.txt
5674 -@@ -905,6 +905,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
5675 - gpt [EFI] Forces disk with valid GPT signature but
5676 - invalid Protective MBR to be treated as GPT.
5677 +@@ -917,6 +917,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
5678 + Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
5679 + Default: 1024
5680
5681 -+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
5682 ++ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
5683 + ignore grsecurity's /proc restrictions
5684 +
5685 ++
5686 hashdist= [KNL,NUMA] Large hashes allocated during boot
5687 are distributed across NUMA nodes. Defaults on
5688 for 64-bit NUMA, off otherwise.
5689 -@@ -2082,6 +2085,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
5690 +@@ -2116,6 +2120,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
5691 the specified number of seconds. This is to be used if
5692 your oopses keep scrolling off the screen.
5693
5694 @@ -251,7 +252,7 @@ index 9776f06..18b1856 100644
5695
5696 pcd. [PARIDE]
5697 diff --git a/Makefile b/Makefile
5698 -index 5634228..b54a897 100644
5699 +index d69266c..e4f6593 100644
5700 --- a/Makefile
5701 +++ b/Makefile
5702 @@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
5703 @@ -277,12 +278,16 @@ index 5634228..b54a897 100644
5704 $(Q)$(MAKE) $(build)=scripts/basic
5705 $(Q)rm -f .tmp_quiet_recordmcount
5706
5707 -@@ -575,6 +576,60 @@ else
5708 +@@ -575,6 +576,64 @@ else
5709 KBUILD_CFLAGS += -O2
5710 endif
5711
5712 +ifndef DISABLE_PAX_PLUGINS
5713 ++ifeq ($(call cc-ifversion, -ge, 0408, y), y)
5714 ++PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
5715 ++else
5716 +PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
5717 ++endif
5718 +ifneq ($(PLUGINCC),)
5719 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
5720 +ifndef CONFIG_UML
5721 @@ -338,7 +343,7 @@ index 5634228..b54a897 100644
5722 include $(srctree)/arch/$(SRCARCH)/Makefile
5723
5724 ifdef CONFIG_READABLE_ASM
5725 -@@ -731,7 +786,7 @@ export mod_sign_cmd
5726 +@@ -731,7 +790,7 @@ export mod_sign_cmd
5727
5728
5729 ifeq ($(KBUILD_EXTMOD),)
5730 @@ -347,7 +352,7 @@ index 5634228..b54a897 100644
5731
5732 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
5733 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
5734 -@@ -778,6 +833,8 @@ endif
5735 +@@ -778,6 +837,8 @@ endif
5736
5737 # The actual objects are generated when descending,
5738 # make sure no implicit rule kicks in
5739 @@ -356,7 +361,7 @@ index 5634228..b54a897 100644
5740 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
5741
5742 # Handle descending into subdirectories listed in $(vmlinux-dirs)
5743 -@@ -787,7 +844,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
5744 +@@ -787,7 +848,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
5745 # Error messages still appears in the original language
5746
5747 PHONY += $(vmlinux-dirs)
5748 @@ -365,7 +370,7 @@ index 5634228..b54a897 100644
5749 $(Q)$(MAKE) $(build)=$@
5750
5751 # Store (new) KERNELRELASE string in include/config/kernel.release
5752 -@@ -831,6 +888,7 @@ prepare0: archprepare FORCE
5753 +@@ -831,6 +892,7 @@ prepare0: archprepare FORCE
5754 $(Q)$(MAKE) $(build)=.
5755
5756 # All the preparing..
5757 @@ -373,7 +378,7 @@ index 5634228..b54a897 100644
5758 prepare: prepare0
5759
5760 # Generate some files
5761 -@@ -938,6 +996,8 @@ all: modules
5762 +@@ -938,6 +1000,8 @@ all: modules
5763 # using awk while concatenating to the final file.
5764
5765 PHONY += modules
5766 @@ -382,7 +387,7 @@ index 5634228..b54a897 100644
5767 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
5768 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
5769 @$(kecho) ' Building modules, stage 2.';
5770 -@@ -953,7 +1013,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
5771 +@@ -953,7 +1017,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
5772
5773 # Target to prepare building external modules
5774 PHONY += modules_prepare
5775 @@ -391,7 +396,7 @@ index 5634228..b54a897 100644
5776
5777 # Target to install modules
5778 PHONY += modules_install
5779 -@@ -1013,7 +1073,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
5780 +@@ -1019,7 +1083,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
5781 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
5782 signing_key.priv signing_key.x509 x509.genkey \
5783 extra_certificates signing_key.x509.keyid \
5784 @@ -400,7 +405,7 @@ index 5634228..b54a897 100644
5785
5786 # clean - Delete most, but leave enough to build external modules
5787 #
5788 -@@ -1053,6 +1113,7 @@ distclean: mrproper
5789 +@@ -1059,6 +1123,7 @@ distclean: mrproper
5790 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
5791 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
5792 -o -name '.*.rej' \
5793 @@ -408,7 +413,7 @@ index 5634228..b54a897 100644
5794 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
5795 -type f -print | xargs rm -f
5796
5797 -@@ -1213,6 +1274,8 @@ PHONY += $(module-dirs) modules
5798 +@@ -1219,6 +1284,8 @@ PHONY += $(module-dirs) modules
5799 $(module-dirs): crmodverdir $(objtree)/Module.symvers
5800 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
5801
5802 @@ -417,7 +422,7 @@ index 5634228..b54a897 100644
5803 modules: $(module-dirs)
5804 @$(kecho) ' Building modules, stage 2.';
5805 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
5806 -@@ -1349,17 +1412,21 @@ else
5807 +@@ -1355,17 +1422,21 @@ else
5808 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
5809 endif
5810
5811 @@ -443,7 +448,7 @@ index 5634228..b54a897 100644
5812 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
5813 %.symtypes: %.c prepare scripts FORCE
5814 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
5815 -@@ -1369,11 +1436,15 @@ endif
5816 +@@ -1375,11 +1446,15 @@ endif
5817 $(cmd_crmodverdir)
5818 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
5819 $(build)=$(build-dir)
5820 @@ -792,6 +797,19 @@ index 0c4132d..88f0d53 100644
5821 } else if (!cause) {
5822 /* Allow reads even for write-only mappings */
5823 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
5824 +diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
5825 +index 67874b8..0e40765 100644
5826 +--- a/arch/arm/Kconfig
5827 ++++ b/arch/arm/Kconfig
5828 +@@ -1813,7 +1813,7 @@ config ALIGNMENT_TRAP
5829 +
5830 + config UACCESS_WITH_MEMCPY
5831 + bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
5832 +- depends on MMU
5833 ++ depends on MMU && !PAX_MEMORY_UDEREF
5834 + default y if CPU_FEROCEON
5835 + help
5836 + Implement faster copy_to_user and clear_user methods for CPU
5837 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
5838 index c79f61f..9ac0642 100644
5839 --- a/arch/arm/include/asm/atomic.h
5840 @@ -1456,6 +1474,31 @@ index e1489c5..d418304 100644
5841
5842 /*
5843 * Select the calling method
5844 +diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
5845 +index 6dcc164..b14d917 100644
5846 +--- a/arch/arm/include/asm/checksum.h
5847 ++++ b/arch/arm/include/asm/checksum.h
5848 +@@ -37,7 +37,19 @@ __wsum
5849 + csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
5850 +
5851 + __wsum
5852 +-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
5853 ++__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
5854 ++
5855 ++static inline __wsum
5856 ++csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
5857 ++{
5858 ++ __wsum ret;
5859 ++ pax_open_userland();
5860 ++ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
5861 ++ pax_close_userland();
5862 ++ return ret;
5863 ++}
5864 ++
5865 ++
5866 +
5867 + /*
5868 + * Fold a partial checksum without adding pseudo headers
5869 diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
5870 index 7eb18c1..e38b6d2 100644
5871 --- a/arch/arm/include/asm/cmpxchg.h
5872 @@ -1496,6 +1539,67 @@ index ab98fdd..6b19938 100644
5873
5874 #define udelay(n) \
5875 (__builtin_constant_p(n) ? \
5876 +diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
5877 +index 6ddbe44..758b5f2 100644
5878 +--- a/arch/arm/include/asm/domain.h
5879 ++++ b/arch/arm/include/asm/domain.h
5880 +@@ -48,18 +48,37 @@
5881 + * Domain types
5882 + */
5883 + #define DOMAIN_NOACCESS 0
5884 +-#define DOMAIN_CLIENT 1
5885 + #ifdef CONFIG_CPU_USE_DOMAINS
5886 ++#define DOMAIN_USERCLIENT 1
5887 ++#define DOMAIN_KERNELCLIENT 1
5888 + #define DOMAIN_MANAGER 3
5889 ++#define DOMAIN_VECTORS DOMAIN_USER
5890 + #else
5891 ++
5892 ++#ifdef CONFIG_PAX_KERNEXEC
5893 + #define DOMAIN_MANAGER 1
5894 ++#define DOMAIN_KERNEXEC 3
5895 ++#else
5896 ++#define DOMAIN_MANAGER 1
5897 ++#endif
5898 ++
5899 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
5900 ++#define DOMAIN_USERCLIENT 0
5901 ++#define DOMAIN_UDEREF 1
5902 ++#define DOMAIN_VECTORS DOMAIN_KERNEL
5903 ++#else
5904 ++#define DOMAIN_USERCLIENT 1
5905 ++#define DOMAIN_VECTORS DOMAIN_USER
5906 ++#endif
5907 ++#define DOMAIN_KERNELCLIENT 1
5908 ++
5909 + #endif
5910 +
5911 + #define domain_val(dom,type) ((type) << (2*(dom)))
5912 +
5913 + #ifndef __ASSEMBLY__
5914 +
5915 +-#ifdef CONFIG_CPU_USE_DOMAINS
5916 ++#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
5917 + static inline void set_domain(unsigned val)
5918 + {
5919 + asm volatile(
5920 +@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
5921 + isb();
5922 + }
5923 +
5924 +-#define modify_domain(dom,type) \
5925 +- do { \
5926 +- struct thread_info *thread = current_thread_info(); \
5927 +- unsigned int domain = thread->cpu_domain; \
5928 +- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
5929 +- thread->cpu_domain = domain | domain_val(dom, type); \
5930 +- set_domain(thread->cpu_domain); \
5931 +- } while (0)
5932 +-
5933 ++extern void modify_domain(unsigned int dom, unsigned int type);
5934 + #else
5935 + static inline void set_domain(unsigned val) { }
5936 + static inline void modify_domain(unsigned dom, unsigned type) { }
5937 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
5938 index 38050b1..9d90e8b 100644
5939 --- a/arch/arm/include/asm/elf.h
5940 @@ -1525,6 +1629,76 @@ index 38050b1..9d90e8b 100644
5941 -#define arch_randomize_brk arch_randomize_brk
5942 -
5943 #endif
5944 +diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
5945 +index de53547..52b9a28 100644
5946 +--- a/arch/arm/include/asm/fncpy.h
5947 ++++ b/arch/arm/include/asm/fncpy.h
5948 +@@ -81,7 +81,9 @@
5949 + BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
5950 + (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
5951 + \
5952 ++ pax_open_kernel(); \
5953 + memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
5954 ++ pax_close_kernel(); \
5955 + flush_icache_range((unsigned long)(dest_buf), \
5956 + (unsigned long)(dest_buf) + (size)); \
5957 + \
5958 +diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
5959 +index e42cf59..7b94b8f 100644
5960 +--- a/arch/arm/include/asm/futex.h
5961 ++++ b/arch/arm/include/asm/futex.h
5962 +@@ -50,6 +50,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
5963 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
5964 + return -EFAULT;
5965 +
5966 ++ pax_open_userland();
5967 ++
5968 + smp_mb();
5969 + __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
5970 + "1: ldrex %1, [%4]\n"
5971 +@@ -65,6 +67,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
5972 + : "cc", "memory");
5973 + smp_mb();
5974 +
5975 ++ pax_close_userland();
5976 ++
5977 + *uval = val;
5978 + return ret;
5979 + }
5980 +@@ -95,6 +99,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
5981 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
5982 + return -EFAULT;
5983 +
5984 ++ pax_open_userland();
5985 ++
5986 + __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
5987 + "1: " TUSER(ldr) " %1, [%4]\n"
5988 + " teq %1, %2\n"
5989 +@@ -105,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
5990 + : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
5991 + : "cc", "memory");
5992 +
5993 ++ pax_close_userland();
5994 ++
5995 + *uval = val;
5996 + return ret;
5997 + }
5998 +@@ -127,6 +135,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
5999 + return -EFAULT;
6000 +
6001 + pagefault_disable(); /* implies preempt_disable() */
6002 ++ pax_open_userland();
6003 +
6004 + switch (op) {
6005 + case FUTEX_OP_SET:
6006 +@@ -148,6 +157,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
6007 + ret = -ENOSYS;
6008 + }
6009 +
6010 ++ pax_close_userland();
6011 + pagefault_enable(); /* subsumes preempt_enable() */
6012 +
6013 + if (!ret) {
6014 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
6015 index 83eb2f7..ed77159 100644
6016 --- a/arch/arm/include/asm/kmap_types.h
6017 @@ -1551,16 +1725,25 @@ index 9e614a1..3302cca 100644
6018 struct dma_struct {
6019 void *addr; /* single DMA address */
6020 diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
6021 -index 195ac2f..2272f0d 100644
6022 +index 2fe141f..192dc01 100644
6023 --- a/arch/arm/include/asm/mach/map.h
6024 +++ b/arch/arm/include/asm/mach/map.h
6025 -@@ -34,6 +34,9 @@ struct map_desc {
6026 +@@ -27,13 +27,16 @@ struct map_desc {
6027 + #define MT_MINICLEAN 6
6028 + #define MT_LOW_VECTORS 7
6029 + #define MT_HIGH_VECTORS 8
6030 +-#define MT_MEMORY 9
6031 ++#define MT_MEMORY_RWX 9
6032 + #define MT_ROM 10
6033 +-#define MT_MEMORY_NONCACHED 11
6034 ++#define MT_MEMORY_NONCACHED_RX 11
6035 + #define MT_MEMORY_DTCM 12
6036 #define MT_MEMORY_ITCM 13
6037 #define MT_MEMORY_SO 14
6038 #define MT_MEMORY_DMA_READY 15
6039 -+#define MT_MEMORY_R 16
6040 -+#define MT_MEMORY_RW 17
6041 -+#define MT_MEMORY_RX 18
6042 ++#define MT_MEMORY_RW 16
6043 ++#define MT_MEMORY_RX 17
6044 ++#define MT_MEMORY_NONCACHED_RW 18
6045
6046 #ifdef CONFIG_MMU
6047 extern void iotable_init(struct map_desc *, int);
6048 @@ -1591,7 +1774,7 @@ index 812a494..71fc0b6 100644
6049 #ifdef MULTI_USER
6050 extern struct cpu_user_fns cpu_user;
6051 diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
6052 -index 943504f..84d0f84 100644
6053 +index 943504f..c37a730 100644
6054 --- a/arch/arm/include/asm/pgalloc.h
6055 +++ b/arch/arm/include/asm/pgalloc.h
6056 @@ -17,6 +17,7 @@
6057 @@ -1622,16 +1805,19 @@ index 943504f..84d0f84 100644
6058
6059 #endif /* CONFIG_ARM_LPAE */
6060
6061 -@@ -126,6 +133,16 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
6062 +@@ -126,6 +133,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
6063 __free_page(pte);
6064 }
6065
6066 -+static inline void __pmd_update(pmd_t *pmdp, pmdval_t prot)
6067 ++static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
6068 +{
6069 -+ pmdval_t pmdval = pmd_val(*pmdp) | prot;
6070 -+ pmdp[0] = __pmd(pmdval);
6071 -+#ifndef CONFIG_ARM_LPAE
6072 -+ pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
6073 ++#ifdef CONFIG_ARM_LPAE
6074 ++ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
6075 ++#else
6076 ++ if (addr & SECTION_SIZE)
6077 ++ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
6078 ++ else
6079 ++ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
6080 +#endif
6081 + flush_pmd_entry(pmdp);
6082 +}
6083 @@ -1639,7 +1825,7 @@ index 943504f..84d0f84 100644
6084 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
6085 pmdval_t prot)
6086 {
6087 -@@ -155,7 +172,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
6088 +@@ -155,7 +175,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
6089 static inline void
6090 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
6091 {
6092 @@ -1649,14 +1835,14 @@ index 943504f..84d0f84 100644
6093 #define pmd_pgtable(pmd) pmd_page(pmd)
6094
6095 diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
6096 -index 5cfba15..d437dc2 100644
6097 +index 5cfba15..f415e1a 100644
6098 --- a/arch/arm/include/asm/pgtable-2level-hwdef.h
6099 +++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
6100 @@ -20,12 +20,15 @@
6101 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
6102 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
6103 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
6104 -+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* PXN */
6105 ++#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
6106 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
6107 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
6108 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
6109 @@ -1664,7 +1850,7 @@ index 5cfba15..d437dc2 100644
6110 /*
6111 * - section
6112 */
6113 -+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0)
6114 ++#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
6115 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
6116 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
6117 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
6118 @@ -1672,27 +1858,35 @@ index 5cfba15..d437dc2 100644
6119 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
6120 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
6121 #define PMD_SECT_AF (_AT(pmdval_t, 0))
6122 -+#define PMD_SECT_AP_RDONLY (_AT(pmdval_t, 0))
6123 ++#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
6124
6125 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
6126 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
6127 +@@ -66,6 +70,7 @@
6128 + * - extended small page/tiny page
6129 + */
6130 + #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
6131 ++#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
6132 + #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
6133 + #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
6134 + #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
6135 diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
6136 -index 2317a71..1897391 100644
6137 +index f97ee02..07f1be5 100644
6138 --- a/arch/arm/include/asm/pgtable-2level.h
6139 +++ b/arch/arm/include/asm/pgtable-2level.h
6140 -@@ -123,6 +123,7 @@
6141 - #define L_PTE_USER (_AT(pteval_t, 1) << 8)
6142 +@@ -125,6 +125,7 @@
6143 #define L_PTE_XN (_AT(pteval_t, 1) << 9)
6144 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
6145 -+#define L_PTE_PXN (_AT(pteval_t, 1) << 11) /* v7*/
6146 + #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
6147 ++#define L_PTE_PXN (_AT(pteval_t, 1) << 12) /* v7*/
6148
6149 /*
6150 * These are the memory types, defined to be compatible with
6151 diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
6152 -index d795282..d82ff13 100644
6153 +index d795282..a43ea90 100644
6154 --- a/arch/arm/include/asm/pgtable-3level-hwdef.h
6155 +++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
6156 -@@ -32,6 +32,7 @@
6157 +@@ -32,15 +32,18 @@
6158 #define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
6159 #define PMD_BIT4 (_AT(pmdval_t, 0))
6160 #define PMD_DOMAIN(x) (_AT(pmdval_t, 0))
6161 @@ -1700,7 +1894,10 @@ index d795282..d82ff13 100644
6162
6163 /*
6164 * - section
6165 -@@ -41,9 +42,11 @@
6166 + */
6167 + #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
6168 + #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
6169 ++#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7)
6170 #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
6171 #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
6172 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
6173 @@ -1708,10 +1905,6 @@ index d795282..d82ff13 100644
6174 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 54)
6175 #define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0))
6176 #define PMD_SECT_AP_READ (_AT(pmdval_t, 0))
6177 -+#define PMD_SECT_AP_RDONLY (_AT(pmdval_t, 1) << 7)
6178 - #define PMD_SECT_TEX(x) (_AT(pmdval_t, 0))
6179 -
6180 - /*
6181 @@ -66,6 +69,7 @@
6182 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
6183 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
6184 @@ -1721,10 +1914,10 @@ index d795282..d82ff13 100644
6185
6186 /*
6187 diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
6188 -index b249035..4ab204b 100644
6189 +index a3f3792..7b932a6 100644
6190 --- a/arch/arm/include/asm/pgtable-3level.h
6191 +++ b/arch/arm/include/asm/pgtable-3level.h
6192 -@@ -73,6 +73,7 @@
6193 +@@ -74,6 +74,7 @@
6194 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
6195 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
6196 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
6197 @@ -1732,7 +1925,7 @@ index b249035..4ab204b 100644
6198 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
6199 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
6200 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
6201 -@@ -80,6 +81,7 @@
6202 +@@ -82,6 +83,7 @@
6203 /*
6204 * To be used in assembly code with the upper page attributes.
6205 */
6206 @@ -1741,7 +1934,7 @@ index b249035..4ab204b 100644
6207 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
6208
6209 diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
6210 -index 08c1231..1031bb4 100644
6211 +index 9c82f988..514705a 100644
6212 --- a/arch/arm/include/asm/pgtable.h
6213 +++ b/arch/arm/include/asm/pgtable.h
6214 @@ -30,6 +30,9 @@
6215 @@ -1764,7 +1957,7 @@ index 08c1231..1031bb4 100644
6216 extern void __pte_error(const char *file, int line, pte_t);
6217 extern void __pmd_error(const char *file, int line, pmd_t);
6218 extern void __pgd_error(const char *file, int line, pgd_t);
6219 -@@ -53,6 +59,17 @@ extern void __pgd_error(const char *file, int line, pgd_t);
6220 +@@ -53,6 +59,50 @@ extern void __pgd_error(const char *file, int line, pgd_t);
6221 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
6222 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
6223
6224 @@ -1772,8 +1965,41 @@ index 08c1231..1031bb4 100644
6225 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
6226 +
6227 +#ifdef CONFIG_PAX_KERNEXEC
6228 -+static inline unsigned long pax_open_kernel(void) { return 0; /* TODO */ }
6229 -+static inline unsigned long pax_close_kernel(void) { return 0; /* TODO */ }
6230 ++#include <asm/domain.h>
6231 ++#include <linux/thread_info.h>
6232 ++#include <linux/preempt.h>
6233 ++#endif
6234 ++
6235 ++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
6236 ++static inline int test_domain(int domain, int domaintype)
6237 ++{
6238 ++ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
6239 ++}
6240 ++#endif
6241 ++
6242 ++#ifdef CONFIG_PAX_KERNEXEC
6243 ++static inline unsigned long pax_open_kernel(void) {
6244 ++#ifdef CONFIG_ARM_LPAE
6245 ++ /* TODO */
6246 ++#else
6247 ++ preempt_disable();
6248 ++ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
6249 ++ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
6250 ++#endif
6251 ++ return 0;
6252 ++}
6253 ++
6254 ++static inline unsigned long pax_close_kernel(void) {
6255 ++#ifdef CONFIG_ARM_LPAE
6256 ++ /* TODO */
6257 ++#else
6258 ++ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
6259 ++ /* DOMAIN_MANAGER = "client" under KERNEXEC */
6260 ++ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
6261 ++ preempt_enable_no_resched();
6262 ++#endif
6263 ++ return 0;
6264 ++}
6265 +#else
6266 +static inline unsigned long pax_open_kernel(void) { return 0; }
6267 +static inline unsigned long pax_close_kernel(void) { return 0; }
6268 @@ -1782,7 +2008,7 @@ index 08c1231..1031bb4 100644
6269 /*
6270 * This is the lowest virtual address we can permit any user space
6271 * mapping to be mapped at. This is particularly important for
6272 -@@ -63,8 +80,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
6273 +@@ -63,8 +113,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
6274 /*
6275 * The pgprot_* and protection_map entries will be fixed up in runtime
6276 * to include the cachable and bufferable bits based on memory policy,
6277 @@ -1793,12 +2019,12 @@ index 08c1231..1031bb4 100644
6278 */
6279 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
6280
6281 -@@ -242,7 +259,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
6282 +@@ -240,7 +290,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
6283
6284 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
6285 {
6286 -- const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER;
6287 -+ const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | __supported_pte_mask;
6288 +- const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | L_PTE_NONE;
6289 ++ const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | L_PTE_NONE | __supported_pte_mask;
6290 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
6291 return pte;
6292 }
6293 @@ -1815,11 +2041,27 @@ index f3628fb..a0672dd 100644
6294
6295 #ifndef MULTI_CPU
6296 extern void cpu_proc_init(void);
6297 +diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
6298 +index 06e7d50..8a8e251 100644
6299 +--- a/arch/arm/include/asm/processor.h
6300 ++++ b/arch/arm/include/asm/processor.h
6301 +@@ -65,9 +65,8 @@ struct thread_struct {
6302 + regs->ARM_cpsr |= PSR_ENDSTATE; \
6303 + regs->ARM_pc = pc & ~1; /* pc */ \
6304 + regs->ARM_sp = sp; /* sp */ \
6305 +- regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
6306 +- regs->ARM_r1 = stack[1]; /* r1 (argv) */ \
6307 +- regs->ARM_r0 = stack[0]; /* r0 (argc) */ \
6308 ++ /* r2 (envp), r1 (argv), r0 (argc) */ \
6309 ++ (void)copy_from_user(&regs->ARM_r0, (const char __user *)stack, 3 * sizeof(unsigned long)); \
6310 + nommu_start_thread(regs); \
6311 + })
6312 +
6313 diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
6314 -index 2e3be16..4dc90fc 100644
6315 +index d3a22be..3a69ad5 100644
6316 --- a/arch/arm/include/asm/smp.h
6317 +++ b/arch/arm/include/asm/smp.h
6318 -@@ -106,7 +106,7 @@ struct smp_operations {
6319 +@@ -107,7 +107,7 @@ struct smp_operations {
6320 int (*cpu_disable)(unsigned int cpu);
6321 #endif
6322 #endif
6323 @@ -1829,40 +2071,161 @@ index 2e3be16..4dc90fc 100644
6324 /*
6325 * set platform specific SMP operations
6326 diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
6327 -index 8477b4c..801a6a9 100644
6328 +index cddda1f..ff357f7 100644
6329 --- a/arch/arm/include/asm/thread_info.h
6330 +++ b/arch/arm/include/asm/thread_info.h
6331 -@@ -151,6 +151,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
6332 - #define TIF_SYSCALL_TRACE 8
6333 +@@ -77,9 +77,9 @@ struct thread_info {
6334 + .flags = 0, \
6335 + .preempt_count = INIT_PREEMPT_COUNT, \
6336 + .addr_limit = KERNEL_DS, \
6337 +- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
6338 +- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
6339 +- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
6340 ++ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
6341 ++ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
6342 ++ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
6343 + .restart_block = { \
6344 + .fn = do_no_restart_syscall, \
6345 + }, \
6346 +@@ -152,6 +152,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
6347 #define TIF_SYSCALL_AUDIT 9
6348 #define TIF_SYSCALL_TRACEPOINT 10
6349 + #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
6350 +
6351 +/* within 8 bits of TIF_SYSCALL_TRACE
6352 -+ to meet flexible second operand requirements
6353 -+*/
6354 -+#define TIF_GRSEC_SETXID 11
6355 ++ * to meet flexible second operand requirements
6356 ++ */
6357 ++#define TIF_GRSEC_SETXID 12
6358 +
6359 #define TIF_USING_IWMMXT 17
6360 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
6361 #define TIF_RESTORE_SIGMASK 20
6362 -@@ -165,9 +171,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
6363 +@@ -165,10 +171,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
6364 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
6365 - #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
6366 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
6367 + #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
6368 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
6369
6370 /* Checks for any syscall work in entry-common.S */
6371 --#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6372 -+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | \
6373 -+ _TIF_GRSEC_SETXID)
6374 + #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
6375 +- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
6376 ++ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
6377
6378 /*
6379 * Change these and you break ASM code in entry-common.S
6380 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
6381 -index 7e1f760..f2c37b1 100644
6382 +index 7e1f760..752fcb7 100644
6383 --- a/arch/arm/include/asm/uaccess.h
6384 +++ b/arch/arm/include/asm/uaccess.h
6385 -@@ -418,8 +418,23 @@ do { \
6386 +@@ -18,6 +18,7 @@
6387 + #include <asm/domain.h>
6388 + #include <asm/unified.h>
6389 + #include <asm/compiler.h>
6390 ++#include <asm/pgtable.h>
6391 +
6392 + #define VERIFY_READ 0
6393 + #define VERIFY_WRITE 1
6394 +@@ -60,10 +61,34 @@ extern int __put_user_bad(void);
6395 + #define USER_DS TASK_SIZE
6396 + #define get_fs() (current_thread_info()->addr_limit)
6397 +
6398 ++static inline void pax_open_userland(void)
6399 ++{
6400 ++
6401 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
6402 ++ if (get_fs() == USER_DS) {
6403 ++ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
6404 ++ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
6405 ++ }
6406 ++#endif
6407 ++
6408 ++}
6409 ++
6410 ++static inline void pax_close_userland(void)
6411 ++{
6412 ++
6413 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
6414 ++ if (get_fs() == USER_DS) {
6415 ++ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
6416 ++ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
6417 ++ }
6418 ++#endif
6419 ++
6420 ++}
6421 ++
6422 + static inline void set_fs(mm_segment_t fs)
6423 + {
6424 + current_thread_info()->addr_limit = fs;
6425 +- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
6426 ++ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
6427 + }
6428 +
6429 + #define segment_eq(a,b) ((a) == (b))
6430 +@@ -143,8 +168,12 @@ extern int __get_user_4(void *);
6431 +
6432 + #define get_user(x,p) \
6433 + ({ \
6434 ++ int __e; \
6435 + might_fault(); \
6436 +- __get_user_check(x,p); \
6437 ++ pax_open_userland(); \
6438 ++ __e = __get_user_check(x,p); \
6439 ++ pax_close_userland(); \
6440 ++ __e; \
6441 + })
6442 +
6443 + extern int __put_user_1(void *, unsigned int);
6444 +@@ -188,8 +217,12 @@ extern int __put_user_8(void *, unsigned long long);
6445 +
6446 + #define put_user(x,p) \
6447 + ({ \
6448 ++ int __e; \
6449 + might_fault(); \
6450 +- __put_user_check(x,p); \
6451 ++ pax_open_userland(); \
6452 ++ __e = __put_user_check(x,p); \
6453 ++ pax_close_userland(); \
6454 ++ __e; \
6455 + })
6456 +
6457 + #else /* CONFIG_MMU */
6458 +@@ -230,13 +263,17 @@ static inline void set_fs(mm_segment_t fs)
6459 + #define __get_user(x,ptr) \
6460 + ({ \
6461 + long __gu_err = 0; \
6462 ++ pax_open_userland(); \
6463 + __get_user_err((x),(ptr),__gu_err); \
6464 ++ pax_close_userland(); \
6465 + __gu_err; \
6466 + })
6467 +
6468 + #define __get_user_error(x,ptr,err) \
6469 + ({ \
6470 ++ pax_open_userland(); \
6471 + __get_user_err((x),(ptr),err); \
6472 ++ pax_close_userland(); \
6473 + (void) 0; \
6474 + })
6475 +
6476 +@@ -312,13 +349,17 @@ do { \
6477 + #define __put_user(x,ptr) \
6478 + ({ \
6479 + long __pu_err = 0; \
6480 ++ pax_open_userland(); \
6481 + __put_user_err((x),(ptr),__pu_err); \
6482 ++ pax_close_userland(); \
6483 + __pu_err; \
6484 + })
6485 +
6486 + #define __put_user_error(x,ptr,err) \
6487 + ({ \
6488 ++ pax_open_userland(); \
6489 + __put_user_err((x),(ptr),err); \
6490 ++ pax_close_userland(); \
6491 + (void) 0; \
6492 + })
6493 +
6494 +@@ -418,11 +459,44 @@ do { \
6495
6496
6497 #ifdef CONFIG_MMU
6498 @@ -1873,22 +2236,44 @@ index 7e1f760..f2c37b1 100644
6499 +
6500 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
6501 +{
6502 -+ check_object_size(to, n, false);
6503 ++ unsigned long ret;
6504 +
6505 -+ return ___copy_from_user(to, from, n);
6506 ++ check_object_size(to, n, false);
6507 ++ pax_open_userland();
6508 ++ ret = ___copy_from_user(to, from, n);
6509 ++ pax_close_userland();
6510 ++ return ret;
6511 +}
6512 +
6513 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
6514 +{
6515 -+ check_object_size(from, n, true);
6516 ++ unsigned long ret;
6517 +
6518 -+ return ___copy_to_user(to, from, n);
6519 ++ check_object_size(from, n, true);
6520 ++ pax_open_userland();
6521 ++ ret = ___copy_to_user(to, from, n);
6522 ++ pax_close_userland();
6523 ++ return ret;
6524 +}
6525 +
6526 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
6527 - extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
6528 +-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
6529 ++extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
6530 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
6531 -@@ -431,6 +446,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
6532 ++
6533 ++static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
6534 ++{
6535 ++ unsigned long ret;
6536 ++ pax_open_userland();
6537 ++ ret = ___clear_user(addr, n);
6538 ++ pax_close_userland();
6539 ++ return ret;
6540 ++}
6541 ++
6542 + #else
6543 + #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
6544 + #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
6545 +@@ -431,6 +505,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
6546
6547 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
6548 {
6549 @@ -1898,7 +2283,7 @@ index 7e1f760..f2c37b1 100644
6550 if (access_ok(VERIFY_READ, from, n))
6551 n = __copy_from_user(to, from, n);
6552 else /* security hole - plug it */
6553 -@@ -440,6 +458,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
6554 +@@ -440,6 +517,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
6555
6556 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
6557 {
6558 @@ -1936,8 +2321,364 @@ index 60d3b73..9168db0 100644
6559 EXPORT_SYMBOL(__clear_user);
6560
6561 EXPORT_SYMBOL(__get_user_1);
6562 +diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
6563 +index 0f82098..3dbd3ee 100644
6564 +--- a/arch/arm/kernel/entry-armv.S
6565 ++++ b/arch/arm/kernel/entry-armv.S
6566 +@@ -47,6 +47,87 @@
6567 + 9997:
6568 + .endm
6569 +
6570 ++ .macro pax_enter_kernel
6571 ++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
6572 ++ @ make aligned space for saved DACR
6573 ++ sub sp, sp, #8
6574 ++ @ save regs
6575 ++ stmdb sp!, {r1, r2}
6576 ++ @ read DACR from cpu_domain into r1
6577 ++ mov r2, sp
6578 ++ @ assume 8K pages, since we have to split the immediate in two
6579 ++ bic r2, r2, #(0x1fc0)
6580 ++ bic r2, r2, #(0x3f)
6581 ++ ldr r1, [r2, #TI_CPU_DOMAIN]
6582 ++ @ store old DACR on stack
6583 ++ str r1, [sp, #8]
6584 ++#ifdef CONFIG_PAX_KERNEXEC
6585 ++ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
6586 ++ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
6587 ++ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
6588 ++#endif
6589 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
6590 ++ @ set current DOMAIN_USER to DOMAIN_NOACCESS
6591 ++ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
6592 ++#endif
6593 ++ @ write r1 to current_thread_info()->cpu_domain
6594 ++ str r1, [r2, #TI_CPU_DOMAIN]
6595 ++ @ write r1 to DACR
6596 ++ mcr p15, 0, r1, c3, c0, 0
6597 ++ @ instruction sync
6598 ++ instr_sync
6599 ++ @ restore regs
6600 ++ ldmia sp!, {r1, r2}
6601 ++#endif
6602 ++ .endm
6603 ++
6604 ++ .macro pax_open_userland
6605 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
6606 ++ @ save regs
6607 ++ stmdb sp!, {r0, r1}
6608 ++ @ read DACR from cpu_domain into r1
6609 ++ mov r0, sp
6610 ++ @ assume 8K pages, since we have to split the immediate in two
6611 ++ bic r0, r0, #(0x1fc0)
6612 ++ bic r0, r0, #(0x3f)
6613 ++ ldr r1, [r0, #TI_CPU_DOMAIN]
6614 ++ @ set current DOMAIN_USER to DOMAIN_CLIENT
6615 ++ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
6616 ++ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
6617 ++ @ write r1 to current_thread_info()->cpu_domain
6618 ++ str r1, [r0, #TI_CPU_DOMAIN]
6619 ++ @ write r1 to DACR
6620 ++ mcr p15, 0, r1, c3, c0, 0
6621 ++ @ instruction sync
6622 ++ instr_sync
6623 ++ @ restore regs
6624 ++ ldmia sp!, {r0, r1}
6625 ++#endif
6626 ++ .endm
6627 ++
6628 ++ .macro pax_close_userland
6629 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
6630 ++ @ save regs
6631 ++ stmdb sp!, {r0, r1}
6632 ++ @ read DACR from cpu_domain into r1
6633 ++ mov r0, sp
6634 ++ @ assume 8K pages, since we have to split the immediate in two
6635 ++ bic r0, r0, #(0x1fc0)
6636 ++ bic r0, r0, #(0x3f)
6637 ++ ldr r1, [r0, #TI_CPU_DOMAIN]
6638 ++ @ set current DOMAIN_USER to DOMAIN_NOACCESS
6639 ++ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
6640 ++ @ write r1 to current_thread_info()->cpu_domain
6641 ++ str r1, [r0, #TI_CPU_DOMAIN]
6642 ++ @ write r1 to DACR
6643 ++ mcr p15, 0, r1, c3, c0, 0
6644 ++ @ instruction sync
6645 ++ instr_sync
6646 ++ @ restore regs
6647 ++ ldmia sp!, {r0, r1}
6648 ++#endif
6649 ++ .endm
6650 ++
6651 + .macro pabt_helper
6652 + @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
6653 + #ifdef MULTI_PABORT
6654 +@@ -89,11 +170,15 @@
6655 + * Invalid mode handlers
6656 + */
6657 + .macro inv_entry, reason
6658 ++
6659 ++ pax_enter_kernel
6660 ++
6661 + sub sp, sp, #S_FRAME_SIZE
6662 + ARM( stmib sp, {r1 - lr} )
6663 + THUMB( stmia sp, {r0 - r12} )
6664 + THUMB( str sp, [sp, #S_SP] )
6665 + THUMB( str lr, [sp, #S_LR] )
6666 ++
6667 + mov r1, #\reason
6668 + .endm
6669 +
6670 +@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
6671 + .macro svc_entry, stack_hole=0
6672 + UNWIND(.fnstart )
6673 + UNWIND(.save {r0 - pc} )
6674 ++
6675 ++ pax_enter_kernel
6676 ++
6677 + sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
6678 ++
6679 + #ifdef CONFIG_THUMB2_KERNEL
6680 + SPFIX( str r0, [sp] ) @ temporarily saved
6681 + SPFIX( mov r0, sp )
6682 +@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
6683 + ldmia r0, {r3 - r5}
6684 + add r7, sp, #S_SP - 4 @ here for interlock avoidance
6685 + mov r6, #-1 @ "" "" "" ""
6686 ++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
6687 ++ @ offset sp by 8 as done in pax_enter_kernel
6688 ++ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
6689 ++#else
6690 + add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
6691 ++#endif
6692 + SPFIX( addeq r2, r2, #4 )
6693 + str r3, [sp, #-4]! @ save the "real" r0 copied
6694 + @ from the exception stack
6695 +@@ -359,6 +453,9 @@ ENDPROC(__pabt_svc)
6696 + .macro usr_entry
6697 + UNWIND(.fnstart )
6698 + UNWIND(.cantunwind ) @ don't unwind the user space
6699 ++
6700 ++ pax_enter_kernel_user
6701 ++
6702 + sub sp, sp, #S_FRAME_SIZE
6703 + ARM( stmib sp, {r1 - r12} )
6704 + THUMB( stmia sp, {r0 - r12} )
6705 +@@ -456,7 +553,9 @@ __und_usr:
6706 + tst r3, #PSR_T_BIT @ Thumb mode?
6707 + bne __und_usr_thumb
6708 + sub r4, r2, #4 @ ARM instr at LR - 4
6709 ++ pax_open_userland
6710 + 1: ldrt r0, [r4]
6711 ++ pax_close_userland
6712 + #ifdef CONFIG_CPU_ENDIAN_BE8
6713 + rev r0, r0 @ little endian instruction
6714 + #endif
6715 +@@ -491,10 +590,14 @@ __und_usr_thumb:
6716 + */
6717 + .arch armv6t2
6718 + #endif
6719 ++ pax_open_userland
6720 + 2: ldrht r5, [r4]
6721 ++ pax_close_userland
6722 + cmp r5, #0xe800 @ 32bit instruction if xx != 0
6723 + blo __und_usr_fault_16 @ 16bit undefined instruction
6724 ++ pax_open_userland
6725 + 3: ldrht r0, [r2]
6726 ++ pax_close_userland
6727 + add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
6728 + str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
6729 + orr r0, r0, r5, lsl #16
6730 +@@ -733,7 +836,7 @@ ENTRY(__switch_to)
6731 + THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
6732 + THUMB( str sp, [ip], #4 )
6733 + THUMB( str lr, [ip], #4 )
6734 +-#ifdef CONFIG_CPU_USE_DOMAINS
6735 ++#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC)
6736 + ldr r6, [r2, #TI_CPU_DOMAIN]
6737 + #endif
6738 + set_tls r3, r4, r5
6739 +@@ -742,7 +845,7 @@ ENTRY(__switch_to)
6740 + ldr r8, =__stack_chk_guard
6741 + ldr r7, [r7, #TSK_STACK_CANARY]
6742 + #endif
6743 +-#ifdef CONFIG_CPU_USE_DOMAINS
6744 ++#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC)
6745 + mcr p15, 0, r6, c3, c0, 0 @ Set domain register
6746 + #endif
6747 + mov r5, r0
6748 +diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
6749 +index a6c301e..908821b 100644
6750 +--- a/arch/arm/kernel/entry-common.S
6751 ++++ b/arch/arm/kernel/entry-common.S
6752 +@@ -10,18 +10,46 @@
6753 +
6754 + #include <asm/unistd.h>
6755 + #include <asm/ftrace.h>
6756 ++#include <asm/domain.h>
6757 + #include <asm/unwind.h>
6758 +
6759 ++#include "entry-header.S"
6760 ++
6761 + #ifdef CONFIG_NEED_RET_TO_USER
6762 + #include <mach/entry-macro.S>
6763 + #else
6764 + .macro arch_ret_to_user, tmp1, tmp2
6765 ++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
6766 ++ @ save regs
6767 ++ stmdb sp!, {r1, r2}
6768 ++ @ read DACR from cpu_domain into r1
6769 ++ mov r2, sp
6770 ++ @ assume 8K pages, since we have to split the immediate in two
6771 ++ bic r2, r2, #(0x1fc0)
6772 ++ bic r2, r2, #(0x3f)
6773 ++ ldr r1, [r2, #TI_CPU_DOMAIN]
6774 ++#ifdef CONFIG_PAX_KERNEXEC
6775 ++ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
6776 ++ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
6777 ++ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
6778 ++#endif
6779 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
6780 ++ @ set current DOMAIN_USER to DOMAIN_UDEREF
6781 ++ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
6782 ++ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
6783 ++#endif
6784 ++ @ write r1 to current_thread_info()->cpu_domain
6785 ++ str r1, [r2, #TI_CPU_DOMAIN]
6786 ++ @ write r1 to DACR
6787 ++ mcr p15, 0, r1, c3, c0, 0
6788 ++ @ instruction sync
6789 ++ instr_sync
6790 ++ @ restore regs
6791 ++ ldmia sp!, {r1, r2}
6792 ++#endif
6793 + .endm
6794 + #endif
6795 +
6796 +-#include "entry-header.S"
6797 +-
6798 +-
6799 + .align 5
6800 + /*
6801 + * This is the fast syscall return path. We do as little as
6802 +@@ -339,6 +367,7 @@ ENDPROC(ftrace_stub)
6803 +
6804 + .align 5
6805 + ENTRY(vector_swi)
6806 ++
6807 + sub sp, sp, #S_FRAME_SIZE
6808 + stmia sp, {r0 - r12} @ Calling r0 - r12
6809 + ARM( add r8, sp, #S_PC )
6810 +@@ -388,6 +417,12 @@ ENTRY(vector_swi)
6811 + ldr scno, [lr, #-4] @ get SWI instruction
6812 + #endif
6813 +
6814 ++ /*
6815 ++ * do this here to avoid a performance hit of wrapping the code above
6816 ++ * that directly dereferences userland to parse the SWI instruction
6817 ++ */
6818 ++ pax_enter_kernel_user
6819 ++
6820 + #ifdef CONFIG_ALIGNMENT_TRAP
6821 + ldr ip, __cr_alignment
6822 + ldr ip, [ip]
6823 +diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
6824 +index 9a8531e..812e287 100644
6825 +--- a/arch/arm/kernel/entry-header.S
6826 ++++ b/arch/arm/kernel/entry-header.S
6827 +@@ -73,9 +73,66 @@
6828 + msr cpsr_c, \rtemp @ switch back to the SVC mode
6829 + .endm
6830 +
6831 ++ .macro pax_enter_kernel_user
6832 ++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
6833 ++ @ save regs
6834 ++ stmdb sp!, {r0, r1}
6835 ++ @ read DACR from cpu_domain into r1
6836 ++ mov r0, sp
6837 ++ @ assume 8K pages, since we have to split the immediate in two
6838 ++ bic r0, r0, #(0x1fc0)
6839 ++ bic r0, r0, #(0x3f)
6840 ++ ldr r1, [r0, #TI_CPU_DOMAIN]
6841 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
6842 ++ @ set current DOMAIN_USER to DOMAIN_NOACCESS
6843 ++ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
6844 ++#endif
6845 ++#ifdef CONFIG_PAX_KERNEXEC
6846 ++ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
6847 ++ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
6848 ++ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
6849 ++#endif
6850 ++ @ write r1 to current_thread_info()->cpu_domain
6851 ++ str r1, [r0, #TI_CPU_DOMAIN]
6852 ++ @ write r1 to DACR
6853 ++ mcr p15, 0, r1, c3, c0, 0
6854 ++ @ instruction sync
6855 ++ instr_sync
6856 ++ @ restore regs
6857 ++ ldmia sp!, {r0, r1}
6858 ++#endif
6859 ++ .endm
6860 ++
6861 ++ .macro pax_exit_kernel
6862 ++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
6863 ++ @ save regs
6864 ++ stmdb sp!, {r0, r1}
6865 ++ @ read old DACR from stack into r1
6866 ++ ldr r1, [sp, #(8 + S_SP)]
6867 ++ sub r1, r1, #8
6868 ++ ldr r1, [r1]
6869 ++
6870 ++ @ write r1 to current_thread_info()->cpu_domain
6871 ++ mov r0, sp
6872 ++ @ assume 8K pages, since we have to split the immediate in two
6873 ++ bic r0, r0, #(0x1fc0)
6874 ++ bic r0, r0, #(0x3f)
6875 ++ str r1, [r0, #TI_CPU_DOMAIN]
6876 ++ @ write r1 to DACR
6877 ++ mcr p15, 0, r1, c3, c0, 0
6878 ++ @ instruction sync
6879 ++ instr_sync
6880 ++ @ restore regs
6881 ++ ldmia sp!, {r0, r1}
6882 ++#endif
6883 ++ .endm
6884 ++
6885 + #ifndef CONFIG_THUMB2_KERNEL
6886 + .macro svc_exit, rpsr
6887 + msr spsr_cxsf, \rpsr
6888 ++
6889 ++ pax_exit_kernel
6890 ++
6891 + #if defined(CONFIG_CPU_V6)
6892 + ldr r0, [sp]
6893 + strex r1, r2, [sp] @ clear the exclusive monitor
6894 +@@ -121,6 +178,9 @@
6895 + .endm
6896 + #else /* CONFIG_THUMB2_KERNEL */
6897 + .macro svc_exit, rpsr
6898 ++
6899 ++ pax_exit_kernel
6900 ++
6901 + ldr lr, [sp, #S_SP] @ top of the stack
6902 + ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
6903 + clrex @ clear the exclusive monitor
6904 +diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
6905 +index 2adda11..7fbe958 100644
6906 +--- a/arch/arm/kernel/fiq.c
6907 ++++ b/arch/arm/kernel/fiq.c
6908 +@@ -82,7 +82,9 @@ void set_fiq_handler(void *start, unsigned int length)
6909 + #if defined(CONFIG_CPU_USE_DOMAINS)
6910 + memcpy((void *)0xffff001c, start, length);
6911 + #else
6912 ++ pax_open_kernel();
6913 + memcpy(vectors_page + 0x1c, start, length);
6914 ++ pax_close_kernel();
6915 + #endif
6916 + flush_icache_range(0xffff001c, 0xffff001c + length);
6917 + if (!vectors_high())
6918 diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
6919 -index 486a15a..d95523a 100644
6920 +index 486a15a..2d6880e 100644
6921 --- a/arch/arm/kernel/head.S
6922 +++ b/arch/arm/kernel/head.S
6923 @@ -52,7 +52,9 @@
6924 @@ -1951,6 +2692,28 @@ index 486a15a..d95523a 100644
6925 .endm
6926
6927 /*
6928 +@@ -416,7 +418,7 @@ __enable_mmu:
6929 + mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
6930 + domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
6931 + domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
6932 +- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
6933 ++ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
6934 + mcr p15, 0, r5, c3, c0, 0 @ load domain access register
6935 + mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
6936 + #endif
6937 +diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
6938 +index 5ff2e77..556d030 100644
6939 +--- a/arch/arm/kernel/hw_breakpoint.c
6940 ++++ b/arch/arm/kernel/hw_breakpoint.c
6941 +@@ -1011,7 +1011,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
6942 + return NOTIFY_OK;
6943 + }
6944 +
6945 +-static struct notifier_block __cpuinitdata dbg_reset_nb = {
6946 ++static struct notifier_block dbg_reset_nb = {
6947 + .notifier_call = dbg_reset_notify,
6948 + };
6949 +
6950 diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
6951 index 1e9be5d..03edbc2 100644
6952 --- a/arch/arm/kernel/module.c
6953 @@ -1995,8 +2758,21 @@ index 1e9be5d..03edbc2 100644
6954 #endif
6955
6956 int
6957 +diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
6958 +index 5f66206..dce492f 100644
6959 +--- a/arch/arm/kernel/perf_event_cpu.c
6960 ++++ b/arch/arm/kernel/perf_event_cpu.c
6961 +@@ -171,7 +171,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
6962 + return NOTIFY_OK;
6963 + }
6964 +
6965 +-static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
6966 ++static struct notifier_block cpu_pmu_hotplug_notifier = {
6967 + .notifier_call = cpu_pmu_notify,
6968 + };
6969 +
6970 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
6971 -index 90084a6..a8b26bc 100644
6972 +index c6dec5f..f853532 100644
6973 --- a/arch/arm/kernel/process.c
6974 +++ b/arch/arm/kernel/process.c
6975 @@ -28,7 +28,6 @@
6976 @@ -2030,7 +2806,7 @@ index 90084a6..a8b26bc 100644
6977 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
6978 "sp : %08lx ip : %08lx fp : %08lx\n",
6979 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
6980 -@@ -451,12 +451,6 @@ unsigned long get_wchan(struct task_struct *p)
6981 +@@ -452,12 +452,6 @@ unsigned long get_wchan(struct task_struct *p)
6982 return 0;
6983 }
6984
6985 @@ -2044,22 +2820,19 @@ index 90084a6..a8b26bc 100644
6986 /*
6987 * The vectors page is always readable from user space for the
6988 diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
6989 -index 739db3a..7f4a272 100644
6990 +index 03deeff..741ce88 100644
6991 --- a/arch/arm/kernel/ptrace.c
6992 +++ b/arch/arm/kernel/ptrace.c
6993 -@@ -916,6 +916,10 @@ enum ptrace_syscall_dir {
6994 - PTRACE_SYSCALL_EXIT,
6995 - };
6996 +@@ -937,10 +937,19 @@ static int tracehook_report_syscall(struct pt_regs *regs,
6997 + return current_thread_info()->syscall;
6998 + }
6999
7000 +#ifdef CONFIG_GRKERNSEC_SETXID
7001 +extern void gr_delayed_cred_worker(void);
7002 +#endif
7003 +
7004 - static int ptrace_syscall_trace(struct pt_regs *regs, int scno,
7005 - enum ptrace_syscall_dir dir)
7006 + asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
7007 {
7008 -@@ -923,6 +927,11 @@ static int ptrace_syscall_trace(struct pt_regs *regs, int scno,
7009 -
7010 current_thread_info()->syscall = scno;
7011
7012 +#ifdef CONFIG_GRKERNSEC_SETXID
7013 @@ -2067,11 +2840,11 @@ index 739db3a..7f4a272 100644
7014 + gr_delayed_cred_worker();
7015 +#endif
7016 +
7017 - if (!test_thread_flag(TIF_SYSCALL_TRACE))
7018 - return scno;
7019 -
7020 + /* Do the secure computing check first; failures should be fast. */
7021 + if (secure_computing(scno) == -1)
7022 + return -1;
7023 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
7024 -index da1d1aa..ef9bc58 100644
7025 +index 3f6cbb2..6d856f5 100644
7026 --- a/arch/arm/kernel/setup.c
7027 +++ b/arch/arm/kernel/setup.c
7028 @@ -97,21 +97,23 @@ EXPORT_SYMBOL(system_serial_high);
7029 @@ -2119,7 +2892,7 @@ index da1d1aa..ef9bc58 100644
7030 (mmfr0 & 0x000000f0) == 0x00000020)
7031 cpu_arch = CPU_ARCH_ARMv6;
7032 else
7033 -@@ -455,7 +461,7 @@ static void __init setup_processor(void)
7034 +@@ -462,7 +468,7 @@ static void __init setup_processor(void)
7035 __cpu_architecture = __get_cpu_architecture();
7036
7037 #ifdef MULTI_CPU
7038 @@ -2129,7 +2902,7 @@ index da1d1aa..ef9bc58 100644
7039 #ifdef MULTI_TLB
7040 cpu_tlb = *list->tlb;
7041 diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
7042 -index fbc8b26..000ded0 100644
7043 +index 84f4cbf..672f5b8 100644
7044 --- a/arch/arm/kernel/smp.c
7045 +++ b/arch/arm/kernel/smp.c
7046 @@ -70,7 +70,7 @@ enum ipi_msg_type {
7047 @@ -2142,7 +2915,7 @@ index fbc8b26..000ded0 100644
7048 void __init smp_set_ops(struct smp_operations *ops)
7049 {
7050 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
7051 -index b0179b8..7713948 100644
7052 +index b0179b8..b7b16c7 100644
7053 --- a/arch/arm/kernel/traps.c
7054 +++ b/arch/arm/kernel/traps.c
7055 @@ -57,7 +57,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
7056 @@ -2173,8 +2946,29 @@ index b0179b8..7713948 100644
7057 if (signr)
7058 do_exit(signr);
7059 }
7060 +@@ -601,7 +606,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
7061 + * The user helper at 0xffff0fe0 must be used instead.
7062 + * (see entry-armv.S for details)
7063 + */
7064 ++ pax_open_kernel();
7065 + *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
7066 ++ pax_close_kernel();
7067 + }
7068 + return 0;
7069 +
7070 +@@ -849,5 +856,9 @@ void __init early_trap_init(void *vectors_base)
7071 + sigreturn_codes, sizeof(sigreturn_codes));
7072 +
7073 + flush_icache_range(vectors, vectors + PAGE_SIZE);
7074 +- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
7075 ++
7076 ++#ifndef CONFIG_PAX_MEMORY_UDEREF
7077 ++ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
7078 ++#endif
7079 ++
7080 + }
7081 diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
7082 -index 36ff15b..75d9e9d 100644
7083 +index 11c1785..c67d54c 100644
7084 --- a/arch/arm/kernel/vmlinux.lds.S
7085 +++ b/arch/arm/kernel/vmlinux.lds.S
7086 @@ -8,7 +8,11 @@
7087 @@ -2202,7 +2996,7 @@ index 36ff15b..75d9e9d 100644
7088 .text : { /* Real text segment */
7089 _stext = .; /* Text and read-only data */
7090 __exception_text_start = .;
7091 -@@ -133,6 +142,10 @@ SECTIONS
7092 +@@ -144,6 +153,10 @@ SECTIONS
7093
7094 _etext = .; /* End of text and rodata section */
7095
7096 @@ -2213,7 +3007,7 @@ index 36ff15b..75d9e9d 100644
7097 #ifndef CONFIG_XIP_KERNEL
7098 . = ALIGN(PAGE_SIZE);
7099 __init_begin = .;
7100 -@@ -192,6 +205,11 @@ SECTIONS
7101 +@@ -203,6 +216,11 @@ SECTIONS
7102 . = PAGE_OFFSET + TEXT_OFFSET;
7103 #else
7104 __init_end = .;
7105 @@ -2225,6 +3019,36 @@ index 36ff15b..75d9e9d 100644
7106 . = ALIGN(THREAD_SIZE);
7107 __data_loc = .;
7108 #endif
7109 +diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
7110 +index 14a0d98..7771a7d 100644
7111 +--- a/arch/arm/lib/clear_user.S
7112 ++++ b/arch/arm/lib/clear_user.S
7113 +@@ -12,14 +12,14 @@
7114 +
7115 + .text
7116 +
7117 +-/* Prototype: int __clear_user(void *addr, size_t sz)
7118 ++/* Prototype: int ___clear_user(void *addr, size_t sz)
7119 + * Purpose : clear some user memory
7120 + * Params : addr - user memory address to clear
7121 + * : sz - number of bytes to clear
7122 + * Returns : number of bytes NOT cleared
7123 + */
7124 + ENTRY(__clear_user_std)
7125 +-WEAK(__clear_user)
7126 ++WEAK(___clear_user)
7127 + stmfd sp!, {r1, lr}
7128 + mov r2, #0
7129 + cmp r1, #4
7130 +@@ -44,7 +44,7 @@ WEAK(__clear_user)
7131 + USER( strnebt r2, [r0])
7132 + mov r0, #0
7133 + ldmfd sp!, {r1, pc}
7134 +-ENDPROC(__clear_user)
7135 ++ENDPROC(___clear_user)
7136 + ENDPROC(__clear_user_std)
7137 +
7138 + .pushsection .fixup,"ax"
7139 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
7140 index 66a477a..bee61d3 100644
7141 --- a/arch/arm/lib/copy_from_user.S
7142 @@ -2291,6 +3115,21 @@ index d066df6..df28194 100644
7143 ENDPROC(__copy_to_user_std)
7144
7145 .pushsection .fixup,"ax"
7146 +diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
7147 +index 7d08b43..f7ca7ea 100644
7148 +--- a/arch/arm/lib/csumpartialcopyuser.S
7149 ++++ b/arch/arm/lib/csumpartialcopyuser.S
7150 +@@ -57,8 +57,8 @@
7151 + * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
7152 + */
7153 +
7154 +-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
7155 +-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
7156 ++#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
7157 ++#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
7158 +
7159 + #include "csumpartialcopygeneric.S"
7160 +
7161 diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
7162 index 0dc5385..45833ef 100644
7163 --- a/arch/arm/lib/delay.c
7164 @@ -2349,7 +3188,7 @@ index 025f742..8432b08 100644
7165 /*
7166 * This test is stubbed out of the main function above to keep
7167 diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
7168 -index 2c6c218..2b87c2d 100644
7169 +index bac21a5..b67ef8e 100644
7170 --- a/arch/arm/mach-kirkwood/common.c
7171 +++ b/arch/arm/mach-kirkwood/common.c
7172 @@ -150,7 +150,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
7173 @@ -2386,10 +3225,10 @@ index 2c6c218..2b87c2d 100644
7174
7175 if (IS_ERR(clk))
7176 diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
7177 -index d95f727..12f10dd 100644
7178 +index 0abb30f..54064da 100644
7179 --- a/arch/arm/mach-omap2/board-n8x0.c
7180 +++ b/arch/arm/mach-omap2/board-n8x0.c
7181 -@@ -589,7 +589,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
7182 +@@ -631,7 +631,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
7183 }
7184 #endif
7185
7186 @@ -2398,14 +3237,27 @@ index d95f727..12f10dd 100644
7187 .late_init = n8x0_menelaus_late_init,
7188 };
7189
7190 +diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
7191 +index 5d3b4f4..ddba3c0 100644
7192 +--- a/arch/arm/mach-omap2/omap-wakeupgen.c
7193 ++++ b/arch/arm/mach-omap2/omap-wakeupgen.c
7194 +@@ -340,7 +340,7 @@ static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self,
7195 + return NOTIFY_OK;
7196 + }
7197 +
7198 +-static struct notifier_block __refdata irq_hotplug_notifier = {
7199 ++static struct notifier_block irq_hotplug_notifier = {
7200 + .notifier_call = irq_cpu_hotplug_notify,
7201 + };
7202 +
7203 diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
7204 -index 87cc6d0..fd4f248 100644
7205 +index 4653efb..8c60bf7 100644
7206 --- a/arch/arm/mach-omap2/omap_hwmod.c
7207 +++ b/arch/arm/mach-omap2/omap_hwmod.c
7208 @@ -189,10 +189,10 @@ struct omap_hwmod_soc_ops {
7209 - int (*is_hardreset_asserted)(struct omap_hwmod *oh,
7210 - struct omap_hwmod_rst_info *ohri);
7211 int (*init_clkdm)(struct omap_hwmod *oh);
7212 + void (*update_context_lost)(struct omap_hwmod *oh);
7213 + int (*get_context_lost)(struct omap_hwmod *oh);
7214 -};
7215 +} __no_const;
7216
7217 @@ -2415,8 +3267,47 @@ index 87cc6d0..fd4f248 100644
7218
7219 /* omap_hwmod_list contains all registered struct omap_hwmods */
7220 static LIST_HEAD(omap_hwmod_list);
7221 +diff --git a/arch/arm/mach-ux500/include/mach/setup.h b/arch/arm/mach-ux500/include/mach/setup.h
7222 +index 6be4c4d..32ac32a 100644
7223 +--- a/arch/arm/mach-ux500/include/mach/setup.h
7224 ++++ b/arch/arm/mach-ux500/include/mach/setup.h
7225 +@@ -38,13 +38,6 @@ extern struct sys_timer ux500_timer;
7226 + .type = MT_DEVICE, \
7227 + }
7228 +
7229 +-#define __MEM_DEV_DESC(x, sz) { \
7230 +- .virtual = IO_ADDRESS(x), \
7231 +- .pfn = __phys_to_pfn(x), \
7232 +- .length = sz, \
7233 +- .type = MT_MEMORY, \
7234 +-}
7235 +-
7236 + extern struct smp_operations ux500_smp_ops;
7237 + extern void ux500_cpu_die(unsigned int cpu);
7238 +
7239 +diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
7240 +index 3fd629d..8b1aca9 100644
7241 +--- a/arch/arm/mm/Kconfig
7242 ++++ b/arch/arm/mm/Kconfig
7243 +@@ -425,7 +425,7 @@ config CPU_32v5
7244 +
7245 + config CPU_32v6
7246 + bool
7247 +- select CPU_USE_DOMAINS if CPU_V6 && MMU
7248 ++ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC
7249 + select TLS_REG_EMUL if !CPU_32v6K && !MMU
7250 +
7251 + config CPU_32v6K
7252 +@@ -577,6 +577,7 @@ config CPU_CP15_MPU
7253 +
7254 + config CPU_USE_DOMAINS
7255 + bool
7256 ++ depends on !ARM_LPAE && !PAX_KERNEXEC
7257 + help
7258 + This option enables or disables the use of domain switching
7259 + via the set_fs() function.
7260 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
7261 -index 5dbf13f..9be36fd 100644
7262 +index 5dbf13f..6393f55 100644
7263 --- a/arch/arm/mm/fault.c
7264 +++ b/arch/arm/mm/fault.c
7265 @@ -25,6 +25,7 @@
7266 @@ -2427,27 +3318,28 @@ index 5dbf13f..9be36fd 100644
7267
7268 #include "fault.h"
7269
7270 -@@ -138,6 +139,19 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
7271 +@@ -138,6 +139,20 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
7272 if (fixup_exception(regs))
7273 return;
7274
7275 +#ifdef CONFIG_PAX_KERNEXEC
7276 -+ if (fsr & FSR_WRITE) {
7277 -+ if (((unsigned long)_stext <= addr && addr < init_mm.end_code) || (MODULES_VADDR <= addr && addr < MODULES_END)) {
7278 -+ if (current->signal->curr_ip)
7279 -+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
7280 -+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
7281 -+ else
7282 -+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
7283 -+ current->comm, task_pid_nr(current), current_uid(), current_euid());
7284 -+ }
7285 ++ if ((fsr & FSR_WRITE) &&
7286 ++ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
7287 ++ (MODULES_VADDR <= addr && addr < MODULES_END)))
7288 ++ {
7289 ++ if (current->signal->curr_ip)
7290 ++ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
7291 ++ from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()));
7292 ++ else
7293 ++ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
7294 ++ from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()));
7295 + }
7296 +#endif
7297 +
7298 /*
7299 * No handler, we'll have to terminate things with extreme prejudice.
7300 */
7301 -@@ -174,6 +188,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
7302 +@@ -174,6 +189,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
7303 }
7304 #endif
7305
7306 @@ -2461,7 +3353,7 @@ index 5dbf13f..9be36fd 100644
7307 tsk->thread.address = addr;
7308 tsk->thread.error_code = fsr;
7309 tsk->thread.trap_no = 14;
7310 -@@ -398,6 +419,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
7311 +@@ -398,6 +420,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
7312 }
7313 #endif /* CONFIG_MMU */
7314
7315 @@ -2495,20 +3387,43 @@ index 5dbf13f..9be36fd 100644
7316 /*
7317 * First Level Translation Fault Handler
7318 *
7319 -@@ -575,12 +623,41 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
7320 +@@ -543,9 +592,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
7321 + const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
7322 + struct siginfo info;
7323 +
7324 ++#ifdef CONFIG_PAX_MEMORY_UDEREF
7325 ++ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
7326 ++ if (current->signal->curr_ip)
7327 ++ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
7328 ++ from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()), addr);
7329 ++ else
7330 ++ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
7331 ++ from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()), addr);
7332 ++ goto die;
7333 ++ }
7334 ++#endif
7335 ++
7336 + if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
7337 + return;
7338 +
7339 ++die:
7340 + printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
7341 + inf->name, fsr, addr);
7342 +
7343 +@@ -575,9 +637,38 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
7344 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
7345 struct siginfo info;
7346
7347 -+#ifdef CONFIG_PAX_KERNEXEC
7348 -+ if (!user_mode(regs) && is_xn_fault(ifsr)) {
7349 ++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
7350 ++ if (!user_mode(regs) && (is_domain_fault(ifsr) || is_xn_fault(ifsr))) {
7351 + if (current->signal->curr_ip)
7352 -+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n",
7353 -+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid(),
7354 -+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
7355 ++ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
7356 ++ from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()),
7357 ++ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
7358 + else
7359 -+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n",
7360 -+ current->comm, task_pid_nr(current), current_uid(), current_euid(),
7361 -+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
7362 ++ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
7363 ++ from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()),
7364 ++ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
7365 + goto die;
7366 + }
7367 +#endif
7368 @@ -2530,15 +3445,12 @@ index 5dbf13f..9be36fd 100644
7369 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
7370 return;
7371
7372 ++die:
7373 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
7374 inf->name, ifsr, addr);
7375
7376 -+die:
7377 - info.si_signo = inf->sig;
7378 - info.si_errno = 0;
7379 - info.si_code = inf->code;
7380 diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
7381 -index cf08bdf..f1a0383 100644
7382 +index cf08bdf..772656c 100644
7383 --- a/arch/arm/mm/fault.h
7384 +++ b/arch/arm/mm/fault.h
7385 @@ -3,6 +3,7 @@
7386 @@ -2549,7 +3461,7 @@ index cf08bdf..f1a0383 100644
7387 */
7388 #define FSR_LNX_PF (1 << 31)
7389 #define FSR_WRITE (1 << 11)
7390 -@@ -22,6 +23,12 @@ static inline int fsr_fs(unsigned int fsr)
7391 +@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
7392 }
7393 #endif
7394
7395 @@ -2559,50 +3471,67 @@ index cf08bdf..f1a0383 100644
7396 + return ((fsr_fs(fsr) & 0x3c) == 0xc);
7397 +}
7398 +
7399 ++static inline int is_domain_fault(unsigned int fsr)
7400 ++{
7401 ++ return ((fsr_fs(fsr) & 0xD) == 0x9);
7402 ++}
7403 ++
7404 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
7405 unsigned long search_exception_table(unsigned long addr);
7406
7407 diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
7408 -index ad722f1..46b670e 100644
7409 +index ad722f1..763fdd3 100644
7410 --- a/arch/arm/mm/init.c
7411 +++ b/arch/arm/mm/init.c
7412 -@@ -734,9 +734,43 @@ void __init mem_init(void)
7413 +@@ -30,6 +30,8 @@
7414 + #include <asm/setup.h>
7415 + #include <asm/tlb.h>
7416 + #include <asm/fixmap.h>
7417 ++#include <asm/system_info.h>
7418 ++#include <asm/cp15.h>
7419
7420 - void free_initmem(void)
7421 + #include <asm/mach/arch.h>
7422 + #include <asm/mach/map.h>
7423 +@@ -736,7 +738,46 @@ void free_initmem(void)
7424 {
7425 -+
7426 + #ifdef CONFIG_HAVE_TCM
7427 + extern char __tcm_start, __tcm_end;
7428 ++#endif
7429 +
7430 +#ifdef CONFIG_PAX_KERNEXEC
7431 + unsigned long addr;
7432 + pgd_t *pgd;
7433 + pud_t *pud;
7434 + pmd_t *pmd;
7435 -+#endif
7436 ++ int cpu_arch = cpu_architecture();
7437 ++ unsigned int cr = get_cr();
7438 +
7439 - #ifdef CONFIG_HAVE_TCM
7440 - extern char __tcm_start, __tcm_end;
7441 ++ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
7442 ++ /* make pages tables, etc before .text NX */
7443 ++ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
7444 ++ pgd = pgd_offset_k(addr);
7445 ++ pud = pud_offset(pgd, addr);
7446 ++ pmd = pmd_offset(pud, addr);
7447 ++ __section_update(pmd, addr, PMD_SECT_XN);
7448 ++ }
7449 ++ /* make init NX */
7450 ++ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
7451 ++ pgd = pgd_offset_k(addr);
7452 ++ pud = pud_offset(pgd, addr);
7453 ++ pmd = pmd_offset(pud, addr);
7454 ++ __section_update(pmd, addr, PMD_SECT_XN);
7455 ++ }
7456 ++ /* make kernel code/rodata RX */
7457 ++ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
7458 ++ pgd = pgd_offset_k(addr);
7459 ++ pud = pud_offset(pgd, addr);
7460 ++ pmd = pmd_offset(pud, addr);
7461 ++#ifdef CONFIG_ARM_LPAE
7462 ++ __section_update(pmd, addr, PMD_SECT_RDONLY);
7463 ++#else
7464 ++ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
7465 +#endif
7466 -
7467 -+#ifdef CONFIG_PAX_KERNEXEC
7468 -+ /* make pages tables, etc before .text NX */
7469 -+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += PMD_SIZE) {
7470 -+ pgd = pgd_offset_k(addr);
7471 -+ pud = pud_offset(pgd, addr);
7472 -+ pmd = pmd_offset(pud, addr);
7473 -+ __pmd_update(pmd, PMD_SECT_XN);
7474 -+ }
7475 -+ /* make init NX */
7476 -+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += PMD_SIZE) {
7477 -+ pgd = pgd_offset_k(addr);
7478 -+ pud = pud_offset(pgd, addr);
7479 -+ pmd = pmd_offset(pud, addr);
7480 -+ __pmd_update(pmd, PMD_SECT_XN);
7481 -+ }
7482 -+ /* make kernel code/rodata read-only */
7483 -+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += PMD_SIZE) {
7484 -+ pgd = pgd_offset_k(addr);
7485 -+ pud = pud_offset(pgd, addr);
7486 -+ pmd = pmd_offset(pud, addr);
7487 -+ __pmd_update(pmd, PMD_SECT_AP_RDONLY);
7488 ++ }
7489 + }
7490 +#endif
7491 +
7492 @@ -2610,19 +3539,35 @@ index ad722f1..46b670e 100644
7493 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
7494 totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
7495 __phys_to_pfn(__pa(&__tcm_end)),
7496 +diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
7497 +index 88fd86c..7a224ce 100644
7498 +--- a/arch/arm/mm/ioremap.c
7499 ++++ b/arch/arm/mm/ioremap.c
7500 +@@ -335,9 +335,9 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
7501 + unsigned int mtype;
7502 +
7503 + if (cached)
7504 +- mtype = MT_MEMORY;
7505 ++ mtype = MT_MEMORY_RX;
7506 + else
7507 +- mtype = MT_MEMORY_NONCACHED;
7508 ++ mtype = MT_MEMORY_NONCACHED_RX;
7509 +
7510 + return __arm_ioremap_caller(phys_addr, size, mtype,
7511 + __builtin_return_address(0));
7512 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
7513 -index ce8cb19..061aa14 100644
7514 +index 10062ce..aa96dd7 100644
7515 --- a/arch/arm/mm/mmap.c
7516 +++ b/arch/arm/mm/mmap.c
7517 -@@ -72,6 +72,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
7518 - unsigned long start_addr;
7519 +@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
7520 + struct vm_area_struct *vma;
7521 int do_align = 0;
7522 int aliasing = cache_is_vipt_aliasing();
7523 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7524 + struct vm_unmapped_area_info info;
7525
7526 /*
7527 - * We only need to do colour alignment if either the I or D
7528 -@@ -93,6 +94,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
7529 +@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
7530 if (len > TASK_SIZE)
7531 return -ENOMEM;
7532
7533 @@ -2633,7 +3578,7 @@ index ce8cb19..061aa14 100644
7534 if (addr) {
7535 if (do_align)
7536 addr = COLOUR_ALIGN(addr, pgoff);
7537 -@@ -100,15 +105,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
7538 +@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
7539 addr = PAGE_ALIGN(addr);
7540
7541 vma = find_vma(mm, addr);
7542 @@ -2642,44 +3587,16 @@ index ce8cb19..061aa14 100644
7543 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7544 return addr;
7545 }
7546 - if (len > mm->cached_hole_size) {
7547 -- start_addr = addr = mm->free_area_cache;
7548 -+ start_addr = addr = mm->free_area_cache;
7549 - } else {
7550 -- start_addr = addr = mm->mmap_base;
7551 -- mm->cached_hole_size = 0;
7552 -+ start_addr = addr = mm->mmap_base;
7553 -+ mm->cached_hole_size = 0;
7554 - }
7555
7556 - full_search:
7557 -@@ -124,14 +128,14 @@ full_search:
7558 - * Start a new search - just in case we missed
7559 - * some holes.
7560 - */
7561 -- if (start_addr != TASK_UNMAPPED_BASE) {
7562 -- start_addr = addr = TASK_UNMAPPED_BASE;
7563 -+ if (start_addr != mm->mmap_base) {
7564 -+ start_addr = addr = mm->mmap_base;
7565 - mm->cached_hole_size = 0;
7566 - goto full_search;
7567 - }
7568 - return -ENOMEM;
7569 - }
7570 -- if (!vma || addr + len <= vma->vm_start) {
7571 -+ if (check_heap_stack_gap(vma, addr, len, offset)) {
7572 - /*
7573 - * Remember the place where we stopped the search:
7574 - */
7575 -@@ -156,6 +160,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7576 +@@ -112,6 +116,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7577 unsigned long addr = addr0;
7578 int do_align = 0;
7579 int aliasing = cache_is_vipt_aliasing();
7580 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7581 + struct vm_unmapped_area_info info;
7582
7583 /*
7584 - * We only need to do colour alignment if either the I or D
7585 -@@ -175,6 +180,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7586 +@@ -132,6 +137,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7587 return addr;
7588 }
7589
7590 @@ -2690,7 +3607,7 @@ index ce8cb19..061aa14 100644
7591 /* requesting a specific address */
7592 if (addr) {
7593 if (do_align)
7594 -@@ -182,8 +191,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7595 +@@ -139,8 +148,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7596 else
7597 addr = PAGE_ALIGN(addr);
7598 vma = find_vma(mm, addr);
7599 @@ -2700,61 +3617,31 @@ index ce8cb19..061aa14 100644
7600 return addr;
7601 }
7602
7603 -@@ -203,7 +211,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7604 - /* make sure it can fit in the remaining address space */
7605 - if (addr > len) {
7606 - vma = find_vma(mm, addr-len);
7607 -- if (!vma || addr <= vma->vm_start)
7608 -+ if (check_heap_stack_gap(vma, addr - len, len, offset))
7609 - /* remember the address as a hint for next time */
7610 - return (mm->free_area_cache = addr-len);
7611 +@@ -162,6 +170,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7612 + VM_BUG_ON(addr != -ENOMEM);
7613 + info.flags = 0;
7614 + info.low_limit = mm->mmap_base;
7615 ++
7616 ++#ifdef CONFIG_PAX_RANDMMAP
7617 ++ if (mm->pax_flags & MF_PAX_RANDMMAP)
7618 ++ info.low_limit += mm->delta_mmap;
7619 ++#endif
7620 ++
7621 + info.high_limit = TASK_SIZE;
7622 + addr = vm_unmapped_area(&info);
7623 }
7624 -@@ -212,17 +220,17 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7625 - goto bottomup;
7626 -
7627 - addr = mm->mmap_base - len;
7628 -- if (do_align)
7629 -- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
7630 -
7631 - do {
7632 -+ if (do_align)
7633 -+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
7634 - /*
7635 - * Lookup failure means no vma is above this address,
7636 - * else if new region fits below vma->vm_start,
7637 - * return with success:
7638 - */
7639 - vma = find_vma(mm, addr);
7640 -- if (!vma || addr+len <= vma->vm_start)
7641 -+ if (check_heap_stack_gap(vma, addr, len, offset))
7642 - /* remember the address as a hint for next time */
7643 - return (mm->free_area_cache = addr);
7644 -
7645 -@@ -231,10 +239,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7646 - mm->cached_hole_size = vma->vm_start - addr;
7647 -
7648 - /* try just below the current vma->vm_start */
7649 -- addr = vma->vm_start - len;
7650 -- if (do_align)
7651 -- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
7652 -- } while (len < vma->vm_start);
7653 -+ addr = skip_heap_stack_gap(vma, len, offset);
7654 -+ } while (!IS_ERR_VALUE(addr));
7655 -
7656 - bottomup:
7657 - /*
7658 -@@ -259,6 +265,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7659 +@@ -173,6 +187,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7660 {
7661 unsigned long random_factor = 0UL;
7662
7663 +#ifdef CONFIG_PAX_RANDMMAP
7664 -+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
7665 ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7666 +#endif
7667 +
7668 /* 8 bits of randomness in 20 address space bits */
7669 if ((current->flags & PF_RANDOMIZE) &&
7670 !(current->personality & ADDR_NO_RANDOMIZE))
7671 -@@ -266,10 +276,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7672 +@@ -180,10 +198,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7673
7674 if (mmap_is_legacy()) {
7675 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
7676 @@ -2778,89 +3665,243 @@ index ce8cb19..061aa14 100644
7677 mm->unmap_area = arch_unmap_area_topdown;
7678 }
7679 diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
7680 -index 99b47b9..579b667 100644
7681 +index ce328c7..f82bebb 100644
7682 --- a/arch/arm/mm/mmu.c
7683 +++ b/arch/arm/mm/mmu.c
7684 -@@ -227,16 +227,16 @@ static struct mem_type mem_types[] = {
7685 +@@ -35,6 +35,23 @@
7686 +
7687 + #include "mm.h"
7688 +
7689 ++
7690 ++#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
7691 ++void modify_domain(unsigned int dom, unsigned int type)
7692 ++{
7693 ++ struct thread_info *thread = current_thread_info();
7694 ++ unsigned int domain = thread->cpu_domain;
7695 ++ /*
7696 ++ * DOMAIN_MANAGER might be defined to some other value,
7697 ++ * use the arch-defined constant
7698 ++ */
7699 ++ domain &= ~domain_val(dom, 3);
7700 ++ thread->cpu_domain = domain | domain_val(dom, type);
7701 ++ set_domain(thread->cpu_domain);
7702 ++}
7703 ++EXPORT_SYMBOL(modify_domain);
7704 ++#endif
7705 ++
7706 + /*
7707 + * empty_zero_page is a special page that is used for
7708 + * zero-initialized data and COW.
7709 +@@ -195,10 +212,18 @@ void adjust_cr(unsigned long mask, unsigned long set)
7710 + }
7711 + #endif
7712 +
7713 +-#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
7714 ++#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY
7715 + #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
7716 +
7717 +-static struct mem_type mem_types[] = {
7718 ++#ifdef CONFIG_PAX_KERNEXEC
7719 ++#define L_PTE_KERNEXEC L_PTE_RDONLY
7720 ++#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
7721 ++#else
7722 ++#define L_PTE_KERNEXEC L_PTE_DIRTY
7723 ++#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
7724 ++#endif
7725 ++
7726 ++static struct mem_type mem_types[] __read_only = {
7727 + [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
7728 + .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
7729 + L_PTE_SHARED,
7730 +@@ -227,16 +252,16 @@ static struct mem_type mem_types[] = {
7731 [MT_UNCACHED] = {
7732 .prot_pte = PROT_PTE_DEVICE,
7733 .prot_l1 = PMD_TYPE_TABLE,
7734 - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
7735 -+ .prot_sect = PROT_SECT_DEVICE | PMD_SECT_XN,
7736 ++ .prot_sect = PROT_SECT_DEVICE,
7737 .domain = DOMAIN_IO,
7738 },
7739 [MT_CACHECLEAN] = {
7740 - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
7741 -+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_AP_RDONLY,
7742 ++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
7743 .domain = DOMAIN_KERNEL,
7744 },
7745 #ifndef CONFIG_ARM_LPAE
7746 [MT_MINICLEAN] = {
7747 - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
7748 -+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE | PMD_SECT_AP_RDONLY,
7749 ++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_RDONLY,
7750 .domain = DOMAIN_KERNEL,
7751 },
7752 #endif
7753 -@@ -258,8 +258,26 @@ static struct mem_type mem_types[] = {
7754 +@@ -244,36 +269,54 @@ static struct mem_type mem_types[] = {
7755 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
7756 + L_PTE_RDONLY,
7757 + .prot_l1 = PMD_TYPE_TABLE,
7758 +- .domain = DOMAIN_USER,
7759 ++ .domain = DOMAIN_VECTORS,
7760 + },
7761 + [MT_HIGH_VECTORS] = {
7762 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
7763 + L_PTE_USER | L_PTE_RDONLY,
7764 + .prot_l1 = PMD_TYPE_TABLE,
7765 +- .domain = DOMAIN_USER,
7766 ++ .domain = DOMAIN_VECTORS,
7767 + },
7768 +- [MT_MEMORY] = {
7769 ++ [MT_MEMORY_RWX] = {
7770 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
7771 + .prot_l1 = PMD_TYPE_TABLE,
7772 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
7773 .domain = DOMAIN_KERNEL,
7774 },
7775 -+ [MT_MEMORY_R] = {
7776 -+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_RDONLY | L_PTE_XN,
7777 -+ .prot_l1 = PMD_TYPE_TABLE,
7778 -+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_RDONLY | PMD_SECT_XN,
7779 -+ .domain = DOMAIN_KERNEL,
7780 -+ },
7781 + [MT_MEMORY_RW] = {
7782 -+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN,
7783 ++ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
7784 + .prot_l1 = PMD_TYPE_TABLE,
7785 -+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_XN,
7786 ++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
7787 + .domain = DOMAIN_KERNEL,
7788 + },
7789 + [MT_MEMORY_RX] = {
7790 -+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_RDONLY,
7791 ++ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
7792 + .prot_l1 = PMD_TYPE_TABLE,
7793 -+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_RDONLY,
7794 ++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
7795 + .domain = DOMAIN_KERNEL,
7796 + },
7797 [MT_ROM] = {
7798 - .prot_sect = PMD_TYPE_SECT,
7799 -+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_RDONLY,
7800 ++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
7801 .domain = DOMAIN_KERNEL,
7802 },
7803 - [MT_MEMORY_NONCACHED] = {
7804 -@@ -273,7 +291,7 @@ static struct mem_type mem_types[] = {
7805 +- [MT_MEMORY_NONCACHED] = {
7806 ++ [MT_MEMORY_NONCACHED_RW] = {
7807 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
7808 - L_PTE_XN,
7809 + L_PTE_MT_BUFFERABLE,
7810 + .prot_l1 = PMD_TYPE_TABLE,
7811 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
7812 + .domain = DOMAIN_KERNEL,
7813 + },
7814 ++ [MT_MEMORY_NONCACHED_RX] = {
7815 ++ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
7816 ++ L_PTE_MT_BUFFERABLE,
7817 ++ .prot_l1 = PMD_TYPE_TABLE,
7818 ++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
7819 ++ .domain = DOMAIN_KERNEL,
7820 ++ },
7821 + [MT_MEMORY_DTCM] = {
7822 +- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
7823 +- L_PTE_XN,
7824 ++ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
7825 .prot_l1 = PMD_TYPE_TABLE,
7826 - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
7827 -+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_AP_RDONLY,
7828 ++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
7829 .domain = DOMAIN_KERNEL,
7830 },
7831 [MT_MEMORY_ITCM] = {
7832 -@@ -432,6 +450,8 @@ static void __init build_mem_type_table(void)
7833 +@@ -283,10 +326,10 @@ static struct mem_type mem_types[] = {
7834 + },
7835 + [MT_MEMORY_SO] = {
7836 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
7837 +- L_PTE_MT_UNCACHED | L_PTE_XN,
7838 ++ L_PTE_MT_UNCACHED,
7839 + .prot_l1 = PMD_TYPE_TABLE,
7840 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
7841 +- PMD_SECT_UNCACHED | PMD_SECT_XN,
7842 ++ PMD_SECT_UNCACHED,
7843 + .domain = DOMAIN_KERNEL,
7844 + },
7845 + [MT_MEMORY_DMA_READY] = {
7846 +@@ -371,9 +414,35 @@ static void __init build_mem_type_table(void)
7847 + * to prevent speculative instruction fetches.
7848 + */
7849 + mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
7850 ++ mem_types[MT_DEVICE].prot_pte |= L_PTE_XN;
7851 + mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
7852 ++ mem_types[MT_DEVICE_NONSHARED].prot_pte |= L_PTE_XN;
7853 + mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
7854 ++ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_XN;
7855 + mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
7856 ++ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_XN;
7857 ++
7858 ++ /* Mark other regions on ARMv6+ as execute-never */
7859 ++
7860 ++#ifdef CONFIG_PAX_KERNEXEC
7861 ++ mem_types[MT_UNCACHED].prot_sect |= PMD_SECT_XN;
7862 ++ mem_types[MT_UNCACHED].prot_pte |= L_PTE_XN;
7863 ++ mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_XN;
7864 ++ mem_types[MT_CACHECLEAN].prot_pte |= L_PTE_XN;
7865 ++#ifndef CONFIG_ARM_LPAE
7866 ++ mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_XN;
7867 ++ mem_types[MT_MINICLEAN].prot_pte |= L_PTE_XN;
7868 ++#endif
7869 ++ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
7870 ++ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_XN;
7871 ++ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_XN;
7872 ++ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= PMD_SECT_XN;
7873 ++ mem_types[MT_MEMORY_DTCM].prot_sect |= PMD_SECT_XN;
7874 ++ mem_types[MT_MEMORY_DTCM].prot_pte |= L_PTE_XN;
7875 ++#endif
7876 ++
7877 ++ mem_types[MT_MEMORY_SO].prot_sect |= PMD_SECT_XN;
7878 ++ mem_types[MT_MEMORY_SO].prot_pte |= L_PTE_XN;
7879 + }
7880 + if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
7881 + /*
7882 +@@ -432,6 +501,9 @@ static void __init build_mem_type_table(void)
7883 * from SVC mode and no access from userspace.
7884 */
7885 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
7886 ++#ifdef CONFIG_PAX_KERNEXEC
7887 + mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
7888 -+ mem_types[MT_MEMORY_R].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
7889 ++#endif
7890 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
7891 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
7892 #endif
7893 -@@ -450,6 +470,12 @@ static void __init build_mem_type_table(void)
7894 +@@ -448,11 +520,17 @@ static void __init build_mem_type_table(void)
7895 + mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
7896 + mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
7897 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
7898 - mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
7899 - mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
7900 -+ mem_types[MT_MEMORY_R].prot_sect |= PMD_SECT_S;
7901 -+ mem_types[MT_MEMORY_R].prot_pte |= L_PTE_SHARED;
7902 +- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
7903 +- mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
7904 ++ mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
7905 ++ mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
7906 + mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
7907 + mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
7908 + mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
7909 + mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
7910 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
7911 - mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
7912 - mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
7913 -@@ -487,6 +513,8 @@ static void __init build_mem_type_table(void)
7914 +- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
7915 +- mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
7916 ++ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_S;
7917 ++ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= L_PTE_SHARED;
7918 ++ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_S;
7919 ++ mem_types[MT_MEMORY_NONCACHED_RX].prot_pte |= L_PTE_SHARED;
7920 + }
7921 + }
7922 +
7923 +@@ -463,15 +541,20 @@ static void __init build_mem_type_table(void)
7924 + if (cpu_arch >= CPU_ARCH_ARMv6) {
7925 + if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
7926 + /* Non-cacheable Normal is XCB = 001 */
7927 +- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
7928 ++ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
7929 ++ PMD_SECT_BUFFERED;
7930 ++ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
7931 + PMD_SECT_BUFFERED;
7932 + } else {
7933 + /* For both ARMv6 and non-TEX-remapping ARMv7 */
7934 +- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
7935 ++ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
7936 ++ PMD_SECT_TEX(1);
7937 ++ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
7938 + PMD_SECT_TEX(1);
7939 + }
7940 + } else {
7941 +- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
7942 ++ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_BUFFERABLE;
7943 ++ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_BUFFERABLE;
7944 + }
7945 +
7946 + #ifdef CONFIG_ARM_LPAE
7947 +@@ -487,6 +570,8 @@ static void __init build_mem_type_table(void)
7948 vecs_pgprot |= PTE_EXT_AF;
7949 #endif
7950
7951 @@ -2869,24 +3910,63 @@ index 99b47b9..579b667 100644
7952 for (i = 0; i < 16; i++) {
7953 pteval_t v = pgprot_val(protection_map[i]);
7954 protection_map[i] = __pgprot(v | user_pgprot);
7955 -@@ -503,6 +531,12 @@ static void __init build_mem_type_table(void)
7956 +@@ -501,10 +586,15 @@ static void __init build_mem_type_table(void)
7957 +
7958 + mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
7959 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
7960 - mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
7961 - mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
7962 -+ mem_types[MT_MEMORY_R].prot_sect |= ecc_mask | cp->pmd;
7963 -+ mem_types[MT_MEMORY_R].prot_pte |= kern_pgprot;
7964 +- mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
7965 +- mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
7966 ++ mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
7967 ++ mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
7968 + mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
7969 + mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
7970 + mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
7971 + mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
7972 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
7973 - mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
7974 +- mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
7975 ++ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= ecc_mask;
7976 ++ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= ecc_mask;
7977 mem_types[MT_ROM].prot_sect |= cp->pmd;
7978 -@@ -1198,7 +1232,41 @@ static void __init map_lowmem(void)
7979 +
7980 + switch (cp->pmd) {
7981 +@@ -1105,18 +1195,15 @@ void __init arm_mm_memblock_reserve(void)
7982 + * called function. This means you can't use any function or debugging
7983 + * method which may touch any device, otherwise the kernel _will_ crash.
7984 + */
7985 ++
7986 ++static char vectors[PAGE_SIZE] __read_only __aligned(PAGE_SIZE);
7987 ++
7988 + static void __init devicemaps_init(struct machine_desc *mdesc)
7989 + {
7990 + struct map_desc map;
7991 + unsigned long addr;
7992 +- void *vectors;
7993 +
7994 +- /*
7995 +- * Allocate the vector page early.
7996 +- */
7997 +- vectors = early_alloc(PAGE_SIZE);
7998 +-
7999 +- early_trap_init(vectors);
8000 ++ early_trap_init(&vectors);
8001 +
8002 + for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
8003 + pmd_clear(pmd_off_k(addr));
8004 +@@ -1156,7 +1243,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
8005 + * location (0xffff0000). If we aren't using high-vectors, also
8006 + * create a mapping at the low-vectors virtual address.
8007 + */
8008 +- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
8009 ++ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
8010 + map.virtual = 0xffff0000;
8011 + map.length = PAGE_SIZE;
8012 + map.type = MT_HIGH_VECTORS;
8013 +@@ -1214,8 +1301,39 @@ static void __init map_lowmem(void)
8014 map.pfn = __phys_to_pfn(start);
8015 map.virtual = __phys_to_virt(start);
8016 map.length = end - start;
8017 -+
8018 +- map.type = MT_MEMORY;
8019 +
8020 +#ifdef CONFIG_PAX_KERNEXEC
8021 + if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
8022 + struct map_desc kernel;
8023 @@ -2896,19 +3976,19 @@ index 99b47b9..579b667 100644
8024 + initmap.pfn = __phys_to_pfn(__pa(__init_begin));
8025 + initmap.virtual = (unsigned long)__init_begin;
8026 + initmap.length = _sdata - __init_begin;
8027 -+ initmap.type = MT_MEMORY;
8028 ++ initmap.type = MT_MEMORY_RWX;
8029 + create_mapping(&initmap);
8030 +
8031 + /* when freeing initmem we will make this RX */
8032 + kernel.pfn = __phys_to_pfn(__pa(_stext));
8033 + kernel.virtual = (unsigned long)_stext;
8034 + kernel.length = __init_begin - _stext;
8035 -+ kernel.type = MT_MEMORY;
8036 ++ kernel.type = MT_MEMORY_RWX;
8037 + create_mapping(&kernel);
8038 +
8039 + if (map.virtual < (unsigned long)_stext) {
8040 + map.length = (unsigned long)_stext - map.virtual;
8041 -+ map.type = MT_MEMORY;
8042 ++ map.type = MT_MEMORY_RWX;
8043 + create_mapping(&map);
8044 + }
8045 +
8046 @@ -2916,19 +3996,44 @@ index 99b47b9..579b667 100644
8047 + map.virtual = (unsigned long)_sdata;
8048 + map.length = end - __pa(_sdata);
8049 + }
8050 ++#endif
8051 +
8052 + map.type = MT_MEMORY_RW;
8053 -+#else
8054 - map.type = MT_MEMORY;
8055 -+#endif
8056 -
8057 create_mapping(&map);
8058 }
8059 + }
8060 +diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
8061 +index 6d98c13..3cfb174 100644
8062 +--- a/arch/arm/mm/proc-v7-2level.S
8063 ++++ b/arch/arm/mm/proc-v7-2level.S
8064 +@@ -99,6 +99,9 @@ ENTRY(cpu_v7_set_pte_ext)
8065 + tst r1, #L_PTE_XN
8066 + orrne r3, r3, #PTE_EXT_XN
8067 +
8068 ++ tst r1, #L_PTE_PXN
8069 ++ orrne r3, r3, #PTE_EXT_PXN
8070 ++
8071 + tst r1, #L_PTE_YOUNG
8072 + tstne r1, #L_PTE_VALID
8073 + #ifndef CONFIG_CPU_USE_DOMAINS
8074 +diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
8075 +index a5bc92d..0bb4730 100644
8076 +--- a/arch/arm/plat-omap/sram.c
8077 ++++ b/arch/arm/plat-omap/sram.c
8078 +@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
8079 + * Looks like we need to preserve some bootloader code at the
8080 + * beginning of SRAM for jumping to flash for reboot to work...
8081 + */
8082 ++ pax_open_kernel();
8083 + memset_io(omap_sram_base + omap_sram_skip, 0,
8084 + omap_sram_size - omap_sram_skip);
8085 ++ pax_close_kernel();
8086 + }
8087 diff --git a/arch/arm/plat-orion/include/plat/addr-map.h b/arch/arm/plat-orion/include/plat/addr-map.h
8088 -index ec63e4a..62aa5f1d 100644
8089 +index b76c065..b6e766b 100644
8090 --- a/arch/arm/plat-orion/include/plat/addr-map.h
8091 +++ b/arch/arm/plat-orion/include/plat/addr-map.h
8092 -@@ -26,7 +26,7 @@ struct orion_addr_map_cfg {
8093 +@@ -27,7 +27,7 @@ struct orion_addr_map_cfg {
8094 value in bridge_virt_base */
8095 void __iomem *(*win_cfg_base) (const struct orion_addr_map_cfg *cfg,
8096 const int win);
8097 @@ -2950,6 +4055,32 @@ index f5144cd..71f6d1f 100644
8098
8099 extern void *samsung_dmadev_get_ops(void);
8100 extern void *s3c_dma_get_ops(void);
8101 +diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
8102 +index 0c3ba9f..95722b3 100644
8103 +--- a/arch/arm64/kernel/debug-monitors.c
8104 ++++ b/arch/arm64/kernel/debug-monitors.c
8105 +@@ -151,7 +151,7 @@ static int __cpuinit os_lock_notify(struct notifier_block *self,
8106 + return NOTIFY_OK;
8107 + }
8108 +
8109 +-static struct notifier_block __cpuinitdata os_lock_nb = {
8110 ++static struct notifier_block os_lock_nb = {
8111 + .notifier_call = os_lock_notify,
8112 + };
8113 +
8114 +diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
8115 +index 5ab825c..96aaec8 100644
8116 +--- a/arch/arm64/kernel/hw_breakpoint.c
8117 ++++ b/arch/arm64/kernel/hw_breakpoint.c
8118 +@@ -831,7 +831,7 @@ static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self,
8119 + return NOTIFY_OK;
8120 + }
8121 +
8122 +-static struct notifier_block __cpuinitdata hw_breakpoint_reset_nb = {
8123 ++static struct notifier_block hw_breakpoint_reset_nb = {
8124 + .notifier_call = hw_breakpoint_reset_notify,
8125 + };
8126 +
8127 diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
8128 index c3a58a1..78fbf54 100644
8129 --- a/arch/avr32/include/asm/cache.h
8130 @@ -3414,6 +4545,32 @@ index 449c8c0..50cdf87 100644
8131 __cu_len; \
8132 })
8133
8134 +diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
8135 +index 2d67317..07d8bfa 100644
8136 +--- a/arch/ia64/kernel/err_inject.c
8137 ++++ b/arch/ia64/kernel/err_inject.c
8138 +@@ -256,7 +256,7 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
8139 + return NOTIFY_OK;
8140 + }
8141 +
8142 +-static struct notifier_block __cpuinitdata err_inject_cpu_notifier =
8143 ++static struct notifier_block err_inject_cpu_notifier =
8144 + {
8145 + .notifier_call = err_inject_cpu_callback,
8146 + };
8147 +diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
8148 +index 65bf9cd..794f06b 100644
8149 +--- a/arch/ia64/kernel/mca.c
8150 ++++ b/arch/ia64/kernel/mca.c
8151 +@@ -1922,7 +1922,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
8152 + return NOTIFY_OK;
8153 + }
8154 +
8155 +-static struct notifier_block mca_cpu_notifier __cpuinitdata = {
8156 ++static struct notifier_block mca_cpu_notifier = {
8157 + .notifier_call = mca_cpu_callback
8158 + };
8159 +
8160 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
8161 index 24603be..948052d 100644
8162 --- a/arch/ia64/kernel/module.c
8163 @@ -3506,6 +4663,32 @@ index 24603be..948052d 100644
8164 mod->arch.gp = gp;
8165 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
8166 }
8167 +diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
8168 +index 77597e5..6f28f3f 100644
8169 +--- a/arch/ia64/kernel/palinfo.c
8170 ++++ b/arch/ia64/kernel/palinfo.c
8171 +@@ -1045,7 +1045,7 @@ static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
8172 + return NOTIFY_OK;
8173 + }
8174 +
8175 +-static struct notifier_block __refdata palinfo_cpu_notifier =
8176 ++static struct notifier_block palinfo_cpu_notifier =
8177 + {
8178 + .notifier_call = palinfo_cpu_callback,
8179 + .priority = 0,
8180 +diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
8181 +index 79802e5..1a89ec5 100644
8182 +--- a/arch/ia64/kernel/salinfo.c
8183 ++++ b/arch/ia64/kernel/salinfo.c
8184 +@@ -616,7 +616,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
8185 + return NOTIFY_OK;
8186 + }
8187 +
8188 +-static struct notifier_block salinfo_cpu_notifier __cpuinitdata =
8189 ++static struct notifier_block salinfo_cpu_notifier =
8190 + {
8191 + .notifier_call = salinfo_cpu_callback,
8192 + .priority = 0,
8193 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
8194 index d9439ef..d0cac6b 100644
8195 --- a/arch/ia64/kernel/sys_ia64.c
8196 @@ -3551,7 +4734,7 @@ index d9439ef..d0cac6b 100644
8197 mm->free_area_cache = addr + len;
8198 return addr;
8199 diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
8200 -index c64460b..4d250a6 100644
8201 +index dc00b2c..cce53c2 100644
8202 --- a/arch/ia64/kernel/topology.c
8203 +++ b/arch/ia64/kernel/topology.c
8204 @@ -445,7 +445,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
8205 @@ -3649,7 +4832,7 @@ index 5ca674b..127c3cb 100644
8206 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
8207 }
8208 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
8209 -index 082e383..fb7be80 100644
8210 +index b755ea9..b9a969e 100644
8211 --- a/arch/ia64/mm/init.c
8212 +++ b/arch/ia64/mm/init.c
8213 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
8214 @@ -3834,10 +5017,10 @@ index c1f6afa..38cc6e9 100644
8215
8216 #endif /* _ASM_EXEC_H */
8217 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
8218 -index da9bd7d..91aa7ab 100644
8219 +index dbaec94..6a14935 100644
8220 --- a/arch/mips/include/asm/page.h
8221 +++ b/arch/mips/include/asm/page.h
8222 -@@ -98,7 +98,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
8223 +@@ -96,7 +96,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
8224 #ifdef CONFIG_CPU_MIPS32
8225 typedef struct { unsigned long pte_low, pte_high; } pte_t;
8226 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
8227 @@ -3863,10 +5046,10 @@ index 881d18b..cea38bc 100644
8228
8229 /*
8230 diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
8231 -index 18806a5..141ffcf 100644
8232 +index b2050b9..d71bb1b 100644
8233 --- a/arch/mips/include/asm/thread_info.h
8234 +++ b/arch/mips/include/asm/thread_info.h
8235 -@@ -110,6 +110,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
8236 +@@ -111,6 +111,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
8237 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
8238 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
8239 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
8240 @@ -3875,7 +5058,7 @@ index 18806a5..141ffcf 100644
8241 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
8242
8243 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
8244 -@@ -125,15 +127,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
8245 +@@ -126,15 +128,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
8246 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
8247 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
8248 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
8249 @@ -3933,10 +5116,10 @@ index ff44823..97f8906 100644
8250
8251 /*
8252 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
8253 -index 69b17a9..9db82f9 100644
8254 +index a11c6f9..be5e164 100644
8255 --- a/arch/mips/kernel/process.c
8256 +++ b/arch/mips/kernel/process.c
8257 -@@ -478,15 +478,3 @@ unsigned long get_wchan(struct task_struct *task)
8258 +@@ -460,15 +460,3 @@ unsigned long get_wchan(struct task_struct *task)
8259 out:
8260 return pc;
8261 }
8262 @@ -3980,7 +5163,7 @@ index 4812c6d..2069554 100644
8263 goto out;
8264
8265 diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
8266 -index 374f66e..1c882a0 100644
8267 +index d20a4bc..7096ae5 100644
8268 --- a/arch/mips/kernel/scall32-o32.S
8269 +++ b/arch/mips/kernel/scall32-o32.S
8270 @@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
8271 @@ -3993,7 +5176,7 @@ index 374f66e..1c882a0 100644
8272 bnez t0, syscall_trace_entry # -> yes
8273
8274 diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
8275 -index 169de6a..f594a89 100644
8276 +index b64f642..0fe6eab 100644
8277 --- a/arch/mips/kernel/scall64-64.S
8278 +++ b/arch/mips/kernel/scall64-64.S
8279 @@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
8280 @@ -4006,10 +5189,10 @@ index 169de6a..f594a89 100644
8281 and t0, t1, t0
8282 bnez t0, syscall_trace_entry
8283 diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
8284 -index 86ec03f..1235baf 100644
8285 +index c29ac19..c592d05 100644
8286 --- a/arch/mips/kernel/scall64-n32.S
8287 +++ b/arch/mips/kernel/scall64-n32.S
8288 -@@ -53,7 +53,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
8289 +@@ -47,7 +47,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
8290
8291 sd a3, PT_R26(sp) # save a3 for syscall restarting
8292
8293 @@ -4019,7 +5202,7 @@ index 86ec03f..1235baf 100644
8294 and t0, t1, t0
8295 bnez t0, n32_syscall_trace_entry
8296 diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
8297 -index 53c2d72..3734584 100644
8298 +index cf3e75e..72e93fe 100644
8299 --- a/arch/mips/kernel/scall64-o32.S
8300 +++ b/arch/mips/kernel/scall64-o32.S
8301 @@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
8302 @@ -4060,18 +5243,18 @@ index ddcec1e..c7f983e 100644
8303 * This routine handles page faults. It determines the address,
8304 * and the problem, and then passes it off to one of the appropriate
8305 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
8306 -index 302d779..6459dc0 100644
8307 +index 7e5fe27..479a219 100644
8308 --- a/arch/mips/mm/mmap.c
8309 +++ b/arch/mips/mm/mmap.c
8310 -@@ -71,6 +71,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
8311 +@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
8312 struct vm_area_struct *vma;
8313 unsigned long addr = addr0;
8314 int do_color_align;
8315 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8316 + struct vm_unmapped_area_info info;
8317
8318 if (unlikely(len > TASK_SIZE))
8319 - return -ENOMEM;
8320 -@@ -95,6 +96,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
8321 +@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
8322 do_color_align = 1;
8323
8324 /* requesting a specific address */
8325 @@ -4083,7 +5266,7 @@ index 302d779..6459dc0 100644
8326 if (addr) {
8327 if (do_color_align)
8328 addr = COLOUR_ALIGN(addr, pgoff);
8329 -@@ -102,8 +108,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
8330 +@@ -91,8 +97,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
8331 addr = PAGE_ALIGN(addr);
8332
8333 vma = find_vma(mm, addr);
8334 @@ -4093,70 +5276,18 @@ index 302d779..6459dc0 100644
8335 return addr;
8336 }
8337
8338 -@@ -118,7 +123,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
8339 - /* At this point: (!vma || addr < vma->vm_end). */
8340 - if (TASK_SIZE - len < addr)
8341 - return -ENOMEM;
8342 -- if (!vma || addr + len <= vma->vm_start)
8343 -+ if (check_heap_stack_gap(vmm, addr, len, offset))
8344 - return addr;
8345 - addr = vma->vm_end;
8346 - if (do_color_align)
8347 -@@ -145,7 +150,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
8348 - /* make sure it can fit in the remaining address space */
8349 - if (likely(addr > len)) {
8350 - vma = find_vma(mm, addr - len);
8351 -- if (!vma || addr <= vma->vm_start) {
8352 -+ if (check_heap_stack_gap(vmm, addr - len, len, offset))
8353 - /* cache the address as a hint for next time */
8354 - return mm->free_area_cache = addr - len;
8355 - }
8356 -@@ -155,17 +160,17 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
8357 - goto bottomup;
8358 -
8359 - addr = mm->mmap_base - len;
8360 -- if (do_color_align)
8361 -- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
8362 -
8363 - do {
8364 -+ if (do_color_align)
8365 -+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
8366 - /*
8367 - * Lookup failure means no vma is above this address,
8368 - * else if new region fits below vma->vm_start,
8369 - * return with success:
8370 - */
8371 - vma = find_vma(mm, addr);
8372 -- if (likely(!vma || addr + len <= vma->vm_start)) {
8373 -+ if (check_heap_stack_gap(vmm, addr, len, offset)) {
8374 - /* cache the address as a hint for next time */
8375 - return mm->free_area_cache = addr;
8376 - }
8377 -@@ -175,10 +180,8 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
8378 - mm->cached_hole_size = vma->vm_start - addr;
8379 -
8380 - /* try just below the current vma->vm_start */
8381 -- addr = vma->vm_start - len;
8382 -- if (do_color_align)
8383 -- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
8384 -- } while (likely(len < vma->vm_start));
8385 -+ addr = skip_heap_stack_gap(vma, len, offset);
8386 -+ } while (!IS_ERR_VALUE(addr));
8387 -
8388 - bottomup:
8389 - /*
8390 -@@ -223,6 +226,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8391 +@@ -146,6 +151,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8392 {
8393 unsigned long random_factor = 0UL;
8394
8395 +#ifdef CONFIG_PAX_RANDMMAP
8396 -+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
8397 ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8398 +#endif
8399 +
8400 if (current->flags & PF_RANDOMIZE) {
8401 random_factor = get_random_int();
8402 random_factor = random_factor << PAGE_SHIFT;
8403 -@@ -234,38 +241,23 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8404 +@@ -157,42 +166,27 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8405
8406 if (mmap_is_legacy()) {
8407 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
8408 @@ -4180,7 +5311,7 @@ index 302d779..6459dc0 100644
8409 mm->unmap_area = arch_unmap_area_topdown;
8410 }
8411 }
8412 --
8413 +
8414 -static inline unsigned long brk_rnd(void)
8415 -{
8416 - unsigned long rnd = get_random_int();
8417 @@ -4207,6 +5338,10 @@ index 302d779..6459dc0 100644
8418 -
8419 - return ret;
8420 -}
8421 +-
8422 + int __virt_addr_valid(const volatile void *kaddr)
8423 + {
8424 + return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
8425 diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
8426 index 967d144..db12197 100644
8427 --- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
8428 @@ -4398,7 +5533,7 @@ index 4ba2c93..f5e3974 100644
8429 else
8430 copy_from_user_overflow();
8431 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
8432 -index 5e34ccf..672bc9c 100644
8433 +index 2a625fb..9908930 100644
8434 --- a/arch/parisc/kernel/module.c
8435 +++ b/arch/parisc/kernel/module.c
8436 @@ -98,16 +98,38 @@
8437 @@ -4444,7 +5579,7 @@ index 5e34ccf..672bc9c 100644
8438 }
8439
8440 static inline int in_local(struct module *me, void *loc)
8441 -@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
8442 +@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
8443 }
8444
8445 /* align things a bit */
8446 @@ -4464,7 +5599,7 @@ index 5e34ccf..672bc9c 100644
8447
8448 me->arch.got_max = gots;
8449 me->arch.fdesc_max = fdescs;
8450 -@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
8451 +@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
8452
8453 BUG_ON(value == 0);
8454
8455 @@ -4473,7 +5608,7 @@ index 5e34ccf..672bc9c 100644
8456 for (i = 0; got[i].addr; i++)
8457 if (got[i].addr == value)
8458 goto out;
8459 -@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
8460 +@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
8461 #ifdef CONFIG_64BIT
8462 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
8463 {
8464 @@ -4482,7 +5617,7 @@ index 5e34ccf..672bc9c 100644
8465
8466 if (!value) {
8467 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
8468 -@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
8469 +@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
8470
8471 /* Create new one */
8472 fdesc->addr = value;
8473 @@ -4491,7 +5626,7 @@ index 5e34ccf..672bc9c 100644
8474 return (Elf_Addr)fdesc;
8475 }
8476 #endif /* CONFIG_64BIT */
8477 -@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
8478 +@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
8479
8480 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
8481 end = table + sechdrs[me->arch.unwind_section].sh_size;
8482 @@ -4982,7 +6117,7 @@ index 4aad413..85d86bf 100644
8483 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
8484 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
8485 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
8486 -index d24c141..b60696e 100644
8487 +index 3d5c9dc..62f8414 100644
8488 --- a/arch/powerpc/include/asm/reg.h
8489 +++ b/arch/powerpc/include/asm/reg.h
8490 @@ -215,6 +215,7 @@
8491 @@ -5231,10 +6366,10 @@ index 4684e33..acc4d19e 100644
8492 ld r4,_DAR(r1)
8493 bl .bad_page_fault
8494 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
8495 -index 10b658a..e542888 100644
8496 +index 4665e82..080ea99 100644
8497 --- a/arch/powerpc/kernel/exceptions-64s.S
8498 +++ b/arch/powerpc/kernel/exceptions-64s.S
8499 -@@ -1013,10 +1013,10 @@ handle_page_fault:
8500 +@@ -1206,10 +1206,10 @@ handle_page_fault:
8501 11: ld r4,_DAR(r1)
8502 ld r5,_DSISR(r1)
8503 addi r3,r1,STACK_FRAME_OVERHEAD
8504 @@ -5280,7 +6415,7 @@ index 2e3200c..72095ce 100644
8505 /* Find this entry, or if that fails, the next avail. entry */
8506 while (entry->jump[0]) {
8507 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
8508 -index ba48233..16ac31d 100644
8509 +index 8143067..21ae55b 100644
8510 --- a/arch/powerpc/kernel/process.c
8511 +++ b/arch/powerpc/kernel/process.c
8512 @@ -680,8 +680,8 @@ void show_regs(struct pt_regs * regs)
8513 @@ -5294,7 +6429,7 @@ index ba48233..16ac31d 100644
8514 #endif
8515 show_stack(current, (unsigned long *) regs->gpr[1]);
8516 if (!user_mode(regs))
8517 -@@ -1175,10 +1175,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8518 +@@ -1129,10 +1129,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8519 newsp = stack[0];
8520 ip = stack[STACK_FRAME_LR_SAVE];
8521 if (!firstframe || ip != lr) {
8522 @@ -5307,7 +6442,7 @@ index ba48233..16ac31d 100644
8523 (void *)current->ret_stack[curr_frame].ret);
8524 curr_frame--;
8525 }
8526 -@@ -1198,7 +1198,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8527 +@@ -1152,7 +1152,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8528 struct pt_regs *regs = (struct pt_regs *)
8529 (sp + STACK_FRAME_OVERHEAD);
8530 lr = regs->link;
8531 @@ -5316,7 +6451,7 @@ index ba48233..16ac31d 100644
8532 regs->trap, (void *)regs->nip, (void *)lr);
8533 firstframe = 1;
8534 }
8535 -@@ -1240,58 +1240,3 @@ void __ppc64_runlatch_off(void)
8536 +@@ -1194,58 +1194,3 @@ void __ppc64_runlatch_off(void)
8537 mtspr(SPRN_CTRLT, ctrl);
8538 }
8539 #endif /* CONFIG_PPC64 */
8540 @@ -5376,10 +6511,10 @@ index ba48233..16ac31d 100644
8541 - return ret;
8542 -}
8543 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
8544 -index 79d8e56..38ffcbb 100644
8545 +index c497000..8fde506 100644
8546 --- a/arch/powerpc/kernel/ptrace.c
8547 +++ b/arch/powerpc/kernel/ptrace.c
8548 -@@ -1663,6 +1663,10 @@ long arch_ptrace(struct task_struct *child, long request,
8549 +@@ -1737,6 +1737,10 @@ long arch_ptrace(struct task_struct *child, long request,
8550 return ret;
8551 }
8552
8553 @@ -5390,7 +6525,7 @@ index 79d8e56..38ffcbb 100644
8554 /*
8555 * We must return the syscall number to actually look up in the table.
8556 * This can be -1L to skip running any syscall at all.
8557 -@@ -1673,6 +1677,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
8558 +@@ -1747,6 +1751,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
8559
8560 secure_computing_strict(regs->gpr[0]);
8561
8562 @@ -5402,7 +6537,7 @@ index 79d8e56..38ffcbb 100644
8563 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
8564 tracehook_report_syscall_entry(regs))
8565 /*
8566 -@@ -1707,6 +1716,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
8567 +@@ -1781,6 +1790,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
8568 {
8569 int step;
8570
8571 @@ -5428,7 +6563,7 @@ index 804e323..79181c1 100644
8572 goto badframe;
8573 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
8574 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
8575 -index d183f87..1867f1a 100644
8576 +index 1ca045d..139c3f7 100644
8577 --- a/arch/powerpc/kernel/signal_64.c
8578 +++ b/arch/powerpc/kernel/signal_64.c
8579 @@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
8580 @@ -5440,6 +6575,19 @@ index d183f87..1867f1a 100644
8581 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
8582 } else {
8583 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
8584 +diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
8585 +index 3ce1f86..c30e629 100644
8586 +--- a/arch/powerpc/kernel/sysfs.c
8587 ++++ b/arch/powerpc/kernel/sysfs.c
8588 +@@ -522,7 +522,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
8589 + return NOTIFY_OK;
8590 + }
8591 +
8592 +-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
8593 ++static struct notifier_block sysfs_cpu_nb = {
8594 + .notifier_call = sysfs_cpu_notify,
8595 + };
8596 +
8597 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
8598 index 3251840..3f7c77a 100644
8599 --- a/arch/powerpc/kernel/traps.c
8600 @@ -5529,7 +6677,7 @@ index 5eea6f3..5d10396 100644
8601 EXPORT_SYMBOL(copy_in_user);
8602
8603 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
8604 -index 0a6b283..7674925 100644
8605 +index 3a8489a..6a63b3b 100644
8606 --- a/arch/powerpc/mm/fault.c
8607 +++ b/arch/powerpc/mm/fault.c
8608 @@ -32,6 +32,10 @@
8609 @@ -5577,7 +6725,7 @@ index 0a6b283..7674925 100644
8610 /*
8611 * Check whether the instruction at regs->nip is a store using
8612 * an update addressing form which will update r1.
8613 -@@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
8614 +@@ -213,7 +244,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
8615 * indicate errors in DSISR but can validly be set in SRR1.
8616 */
8617 if (trap == 0x400)
8618 @@ -5586,7 +6734,7 @@ index 0a6b283..7674925 100644
8619 else
8620 is_write = error_code & DSISR_ISSTORE;
8621 #else
8622 -@@ -367,7 +398,7 @@ good_area:
8623 +@@ -364,7 +395,7 @@ good_area:
8624 * "undefined". Of those that can be set, this is the only
8625 * one which seems bad.
8626 */
8627 @@ -5595,7 +6743,7 @@ index 0a6b283..7674925 100644
8628 /* Guarded storage error. */
8629 goto bad_area;
8630 #endif /* CONFIG_8xx */
8631 -@@ -382,7 +413,7 @@ good_area:
8632 +@@ -379,7 +410,7 @@ good_area:
8633 * processors use the same I/D cache coherency mechanism
8634 * as embedded.
8635 */
8636 @@ -5604,7 +6752,7 @@ index 0a6b283..7674925 100644
8637 goto bad_area;
8638 #endif /* CONFIG_PPC_STD_MMU */
8639
8640 -@@ -465,6 +496,23 @@ bad_area:
8641 +@@ -462,6 +493,23 @@ bad_area:
8642 bad_area_nosemaphore:
8643 /* User mode accesses cause a SIGSEGV */
8644 if (user_mode(regs)) {
8645 @@ -5629,7 +6777,7 @@ index 0a6b283..7674925 100644
8646 return 0;
8647 }
8648 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
8649 -index 67a42ed..c16ef80 100644
8650 +index 67a42ed..cd463e0 100644
8651 --- a/arch/powerpc/mm/mmap_64.c
8652 +++ b/arch/powerpc/mm/mmap_64.c
8653 @@ -57,6 +57,10 @@ static unsigned long mmap_rnd(void)
8654 @@ -5637,7 +6785,7 @@ index 67a42ed..c16ef80 100644
8655 unsigned long rnd = 0;
8656
8657 +#ifdef CONFIG_PAX_RANDMMAP
8658 -+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
8659 ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8660 +#endif
8661 +
8662 if (current->flags & PF_RANDOMIZE) {
8663 @@ -5666,8 +6814,34 @@ index 67a42ed..c16ef80 100644
8664 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
8665 mm->unmap_area = arch_unmap_area_topdown;
8666 }
8667 +diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
8668 +index e779642..e5bb889 100644
8669 +--- a/arch/powerpc/mm/mmu_context_nohash.c
8670 ++++ b/arch/powerpc/mm/mmu_context_nohash.c
8671 +@@ -363,7 +363,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
8672 + return NOTIFY_OK;
8673 + }
8674 +
8675 +-static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
8676 ++static struct notifier_block mmu_context_cpu_nb = {
8677 + .notifier_call = mmu_context_cpu_notify,
8678 + };
8679 +
8680 +diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
8681 +index bba87ca..c346a33 100644
8682 +--- a/arch/powerpc/mm/numa.c
8683 ++++ b/arch/powerpc/mm/numa.c
8684 +@@ -932,7 +932,7 @@ static void __init *careful_zallocation(int nid, unsigned long size,
8685 + return ret;
8686 + }
8687 +
8688 +-static struct notifier_block __cpuinitdata ppc64_numa_nb = {
8689 ++static struct notifier_block ppc64_numa_nb = {
8690 + .notifier_call = cpu_numa_callback,
8691 + .priority = 1 /* Must run before sched domains notifier. */
8692 + };
8693 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
8694 -index 5829d2a..af84242 100644
8695 +index cf9dada..241529f 100644
8696 --- a/arch/powerpc/mm/slice.c
8697 +++ b/arch/powerpc/mm/slice.c
8698 @@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
8699 @@ -5736,6 +6910,19 @@ index 5829d2a..af84242 100644
8700 /* If hint, make sure it matches our alignment restrictions */
8701 if (!fixed && addr) {
8702 addr = _ALIGN_UP(addr, 1ul << pshift);
8703 +diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
8704 +index bdb738a..49c9f95 100644
8705 +--- a/arch/powerpc/platforms/powermac/smp.c
8706 ++++ b/arch/powerpc/platforms/powermac/smp.c
8707 +@@ -885,7 +885,7 @@ static int smp_core99_cpu_notify(struct notifier_block *self,
8708 + return NOTIFY_OK;
8709 + }
8710 +
8711 +-static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
8712 ++static struct notifier_block smp_core99_cpu_nb = {
8713 + .notifier_call = smp_core99_cpu_notify,
8714 + };
8715 + #endif /* CONFIG_HOTPLUG_CPU */
8716 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
8717 index c797832..ce575c8 100644
8718 --- a/arch/s390/include/asm/atomic.h
8719 @@ -5934,10 +7121,10 @@ index 4610dea..cf0af21 100644
8720 if (r_type == R_390_GOTPC)
8721 *(unsigned int *) loc = val;
8722 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
8723 -index cd31ad4..201c5a3 100644
8724 +index 536d645..4a5bd9e 100644
8725 --- a/arch/s390/kernel/process.c
8726 +++ b/arch/s390/kernel/process.c
8727 -@@ -283,39 +283,3 @@ unsigned long get_wchan(struct task_struct *p)
8728 +@@ -250,39 +250,3 @@ unsigned long get_wchan(struct task_struct *p)
8729 }
8730 return 0;
8731 }
8732 @@ -6055,10 +7242,10 @@ index f9f3cd5..58ff438 100644
8733
8734 #endif /* _ASM_SCORE_EXEC_H */
8735 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
8736 -index 637970c..0b6556b 100644
8737 +index 7956846..5f37677 100644
8738 --- a/arch/score/kernel/process.c
8739 +++ b/arch/score/kernel/process.c
8740 -@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
8741 +@@ -134,8 +134,3 @@ unsigned long get_wchan(struct task_struct *task)
8742
8743 return task_pt_regs(task)->cp0_epc;
8744 }
8745 @@ -6084,19 +7271,43 @@ index ef9e555..331bd29 100644
8746
8747 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8748
8749 +diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
8750 +index 03f2b55..b027032 100644
8751 +--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
8752 ++++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
8753 +@@ -143,7 +143,7 @@ shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
8754 + return NOTIFY_OK;
8755 + }
8756 +
8757 +-static struct notifier_block __cpuinitdata shx3_cpu_notifier = {
8758 ++static struct notifier_block shx3_cpu_notifier = {
8759 + .notifier_call = shx3_cpu_callback,
8760 + };
8761 +
8762 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
8763 -index afeb710..e8366ef 100644
8764 +index 6777177..cb5e44f 100644
8765 --- a/arch/sh/mm/mmap.c
8766 +++ b/arch/sh/mm/mmap.c
8767 -@@ -49,6 +49,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8768 +@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8769 + struct mm_struct *mm = current->mm;
8770 struct vm_area_struct *vma;
8771 - unsigned long start_addr;
8772 int do_colour_align;
8773 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8774 + struct vm_unmapped_area_info info;
8775
8776 if (flags & MAP_FIXED) {
8777 - /* We do not accept a shared mapping if it would violate
8778 -@@ -74,8 +75,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8779 +@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8780 + if (filp || (flags & MAP_SHARED))
8781 + do_colour_align = 1;
8782 +
8783 ++#ifdef CONFIG_PAX_RANDMMAP
8784 ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8785 ++#endif
8786 ++
8787 + if (addr) {
8788 + if (do_colour_align)
8789 + addr = COLOUR_ALIGN(addr, pgoff);
8790 +@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8791 addr = PAGE_ALIGN(addr);
8792
8793 vma = find_vma(mm, addr);
8794 @@ -6106,24 +7317,33 @@ index afeb710..e8366ef 100644
8795 return addr;
8796 }
8797
8798 -@@ -106,7 +106,7 @@ full_search:
8799 - }
8800 - return -ENOMEM;
8801 - }
8802 -- if (likely(!vma || addr + len <= vma->vm_start)) {
8803 -+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
8804 - /*
8805 - * Remember the place where we stopped the search:
8806 - */
8807 -@@ -131,6 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8808 + info.flags = 0;
8809 + info.length = len;
8810 +- info.low_limit = TASK_UNMAPPED_BASE;
8811 ++ info.low_limit = mm->mmap_base;
8812 + info.high_limit = TASK_SIZE;
8813 + info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
8814 + info.align_offset = pgoff << PAGE_SHIFT;
8815 +@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8816 struct mm_struct *mm = current->mm;
8817 unsigned long addr = addr0;
8818 int do_colour_align;
8819 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8820 + struct vm_unmapped_area_info info;
8821
8822 if (flags & MAP_FIXED) {
8823 - /* We do not accept a shared mapping if it would violate
8824 -@@ -157,8 +158,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8825 +@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8826 + if (filp || (flags & MAP_SHARED))
8827 + do_colour_align = 1;
8828 +
8829 ++#ifdef CONFIG_PAX_RANDMMAP
8830 ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8831 ++#endif
8832 ++
8833 + /* requesting a specific address */
8834 + if (addr) {
8835 + if (do_colour_align)
8836 +@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8837 addr = PAGE_ALIGN(addr);
8838
8839 vma = find_vma(mm, addr);
8840 @@ -6133,51 +7353,19 @@ index afeb710..e8366ef 100644
8841 return addr;
8842 }
8843
8844 -@@ -179,7 +179,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8845 - /* make sure it can fit in the remaining address space */
8846 - if (likely(addr > len)) {
8847 - vma = find_vma(mm, addr-len);
8848 -- if (!vma || addr <= vma->vm_start) {
8849 -+ if (check_heap_stack_gap(vma, addr - len, len, offset)) {
8850 - /* remember the address as a hint for next time */
8851 - return (mm->free_area_cache = addr-len);
8852 - }
8853 -@@ -188,18 +188,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8854 - if (unlikely(mm->mmap_base < len))
8855 - goto bottomup;
8856 -
8857 -- addr = mm->mmap_base-len;
8858 -- if (do_colour_align)
8859 -- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
8860 -+ addr = mm->mmap_base - len;
8861 -
8862 - do {
8863 -+ if (do_colour_align)
8864 -+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
8865 - /*
8866 - * Lookup failure means no vma is above this address,
8867 - * else if new region fits below vma->vm_start,
8868 - * return with success:
8869 - */
8870 - vma = find_vma(mm, addr);
8871 -- if (likely(!vma || addr+len <= vma->vm_start)) {
8872 -+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
8873 - /* remember the address as a hint for next time */
8874 - return (mm->free_area_cache = addr);
8875 - }
8876 -@@ -209,10 +209,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8877 - mm->cached_hole_size = vma->vm_start - addr;
8878 -
8879 - /* try just below the current vma->vm_start */
8880 -- addr = vma->vm_start-len;
8881 -- if (do_colour_align)
8882 -- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
8883 -- } while (likely(len < vma->vm_start));
8884 -+ addr = skip_heap_stack_gap(vma, len, offset);
8885 -+ } while (!IS_ERR_VALUE(addr));
8886 -
8887 - bottomup:
8888 - /*
8889 +@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8890 + VM_BUG_ON(addr != -ENOMEM);
8891 + info.flags = 0;
8892 + info.low_limit = TASK_UNMAPPED_BASE;
8893 ++
8894 ++#ifdef CONFIG_PAX_RANDMMAP
8895 ++ if (mm->pax_flags & MF_PAX_RANDMMAP)
8896 ++ info.low_limit += mm->delta_mmap;
8897 ++#endif
8898 ++
8899 + info.high_limit = TASK_SIZE;
8900 + addr = vm_unmapped_area(&info);
8901 + }
8902 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
8903 index be56a24..443328f 100644
8904 --- a/arch/sparc/include/asm/atomic_64.h
8905 @@ -6616,7 +7804,7 @@ index 25849ae..924c54b 100644
8906
8907 /*
8908 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
8909 -index a3fe4dc..cae132a 100644
8910 +index 269bd92..e46a9b8 100644
8911 --- a/arch/sparc/include/asm/thread_info_64.h
8912 +++ b/arch/sparc/include/asm/thread_info_64.h
8913 @@ -63,6 +63,8 @@ struct thread_info {
8914 @@ -6628,7 +7816,7 @@ index a3fe4dc..cae132a 100644
8915 unsigned long fpregs[0] __attribute__ ((aligned(64)));
8916 };
8917
8918 -@@ -193,10 +195,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
8919 +@@ -192,10 +194,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
8920 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
8921 /* flag bit 6 is available */
8922 #define TIF_32BIT 7 /* 32-bit binary */
8923 @@ -6641,7 +7829,7 @@ index a3fe4dc..cae132a 100644
8924 /* NOTE: Thread flags >= 12 should be ones we have no interest
8925 * in using in assembly, else we can't use the mask as
8926 * an immediate value in instructions such as andcc.
8927 -@@ -215,12 +218,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
8928 +@@ -214,12 +217,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
8929 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
8930 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8931 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
8932 @@ -6734,7 +7922,7 @@ index 53a28dd..50c38c3 100644
8933 }
8934
8935 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
8936 -index 73083e1..2bc62a6 100644
8937 +index e562d3c..191f176 100644
8938 --- a/arch/sparc/include/asm/uaccess_64.h
8939 +++ b/arch/sparc/include/asm/uaccess_64.h
8940 @@ -10,6 +10,7 @@
8941 @@ -6793,7 +7981,7 @@ index 6cf591b..b49e65a 100644
8942 extra-y := head_$(BITS).o
8943
8944 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
8945 -index 487bffb..955a925 100644
8946 +index be8e862..5b50b12 100644
8947 --- a/arch/sparc/kernel/process_32.c
8948 +++ b/arch/sparc/kernel/process_32.c
8949 @@ -126,14 +126,14 @@ void show_regs(struct pt_regs *r)
8950 @@ -6823,7 +8011,7 @@ index 487bffb..955a925 100644
8951 } while (++count < 16);
8952 printk("\n");
8953 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
8954 -index c6e0c29..052832b 100644
8955 +index cdb80b2..5ca141d 100644
8956 --- a/arch/sparc/kernel/process_64.c
8957 +++ b/arch/sparc/kernel/process_64.c
8958 @@ -181,14 +181,14 @@ static void show_regwindow(struct pt_regs *regs)
8959 @@ -6901,44 +8089,28 @@ index 7ff45e4..a58f271 100644
8960
8961 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
8962 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
8963 -index 0c9b31b..55a8ba6 100644
8964 +index 2da0bdc..79128d2 100644
8965 --- a/arch/sparc/kernel/sys_sparc_32.c
8966 +++ b/arch/sparc/kernel/sys_sparc_32.c
8967 -@@ -39,6 +39,7 @@ asmlinkage unsigned long sys_getpagesize(void)
8968 - unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
8969 - {
8970 - struct vm_area_struct * vmm;
8971 -+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
8972 -
8973 - if (flags & MAP_FIXED) {
8974 - /* We do not accept a shared mapping if it would violate
8975 -@@ -54,7 +55,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8976 +@@ -52,7 +52,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8977 if (len > TASK_SIZE - PAGE_SIZE)
8978 return -ENOMEM;
8979 if (!addr)
8980 - addr = TASK_UNMAPPED_BASE;
8981 + addr = current->mm->mmap_base;
8982
8983 - if (flags & MAP_SHARED)
8984 - addr = COLOUR_ALIGN(addr);
8985 -@@ -65,7 +66,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8986 - /* At this point: (!vmm || addr < vmm->vm_end). */
8987 - if (TASK_SIZE - PAGE_SIZE - len < addr)
8988 - return -ENOMEM;
8989 -- if (!vmm || addr + len <= vmm->vm_start)
8990 -+ if (check_heap_stack_gap(vmm, addr, len, offset))
8991 - return addr;
8992 - addr = vmm->vm_end;
8993 - if (flags & MAP_SHARED)
8994 + info.flags = 0;
8995 + info.length = len;
8996 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
8997 -index 878ef3d..f100719 100644
8998 +index 708bc29..f0129cb 100644
8999 --- a/arch/sparc/kernel/sys_sparc_64.c
9000 +++ b/arch/sparc/kernel/sys_sparc_64.c
9001 -@@ -102,12 +102,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9002 +@@ -90,13 +90,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9003 + struct vm_area_struct * vma;
9004 unsigned long task_size = TASK_SIZE;
9005 - unsigned long start_addr;
9006 int do_color_align;
9007 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9008 + struct vm_unmapped_area_info info;
9009
9010 if (flags & MAP_FIXED) {
9011 /* We do not accept a shared mapping if it would violate
9012 @@ -6949,7 +8121,7 @@ index 878ef3d..f100719 100644
9013 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
9014 return -EINVAL;
9015 return addr;
9016 -@@ -122,6 +123,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9017 +@@ -111,6 +112,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9018 if (filp || (flags & MAP_SHARED))
9019 do_color_align = 1;
9020
9021 @@ -6959,8 +8131,8 @@ index 878ef3d..f100719 100644
9022 +
9023 if (addr) {
9024 if (do_color_align)
9025 - addr = COLOUR_ALIGN(addr, pgoff);
9026 -@@ -129,15 +134,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9027 + addr = COLOR_ALIGN(addr, pgoff);
9028 +@@ -118,14 +123,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9029 addr = PAGE_ALIGN(addr);
9030
9031 vma = find_vma(mm, addr);
9032 @@ -6970,42 +8142,35 @@ index 878ef3d..f100719 100644
9033 return addr;
9034 }
9035
9036 - if (len > mm->cached_hole_size) {
9037 -- start_addr = addr = mm->free_area_cache;
9038 -+ start_addr = addr = mm->free_area_cache;
9039 - } else {
9040 -- start_addr = addr = TASK_UNMAPPED_BASE;
9041 -+ start_addr = addr = mm->mmap_base;
9042 - mm->cached_hole_size = 0;
9043 + info.flags = 0;
9044 + info.length = len;
9045 +- info.low_limit = TASK_UNMAPPED_BASE;
9046 ++ info.low_limit = mm->mmap_base;
9047 + info.high_limit = min(task_size, VA_EXCLUDE_START);
9048 + info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
9049 + info.align_offset = pgoff << PAGE_SHIFT;
9050 +@@ -134,6 +138,12 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9051 + if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
9052 + VM_BUG_ON(addr != -ENOMEM);
9053 + info.low_limit = VA_EXCLUDE_END;
9054 ++
9055 ++#ifdef CONFIG_PAX_RANDMMAP
9056 ++ if (mm->pax_flags & MF_PAX_RANDMMAP)
9057 ++ info.low_limit += mm->delta_mmap;
9058 ++#endif
9059 ++
9060 + info.high_limit = task_size;
9061 + addr = vm_unmapped_area(&info);
9062 }
9063 -
9064 -@@ -157,14 +161,14 @@ full_search:
9065 - vma = find_vma(mm, VA_EXCLUDE_END);
9066 - }
9067 - if (unlikely(task_size < addr)) {
9068 -- if (start_addr != TASK_UNMAPPED_BASE) {
9069 -- start_addr = addr = TASK_UNMAPPED_BASE;
9070 -+ if (start_addr != mm->mmap_base) {
9071 -+ start_addr = addr = mm->mmap_base;
9072 - mm->cached_hole_size = 0;
9073 - goto full_search;
9074 - }
9075 - return -ENOMEM;
9076 - }
9077 -- if (likely(!vma || addr + len <= vma->vm_start)) {
9078 -+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
9079 - /*
9080 - * Remember the place where we stopped the search:
9081 - */
9082 -@@ -190,6 +194,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9083 +@@ -151,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9084 unsigned long task_size = STACK_TOP32;
9085 unsigned long addr = addr0;
9086 int do_color_align;
9087 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9088 + struct vm_unmapped_area_info info;
9089
9090 /* This should only ever run for 32-bit processes. */
9091 - BUG_ON(!test_thread_flag(TIF_32BIT));
9092 -@@ -198,7 +203,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9093 +@@ -160,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9094 /* We do not accept a shared mapping if it would violate
9095 * cache aliasing constraints.
9096 */
9097 @@ -7014,7 +8179,18 @@ index 878ef3d..f100719 100644
9098 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
9099 return -EINVAL;
9100 return addr;
9101 -@@ -219,8 +224,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9102 +@@ -173,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9103 + if (filp || (flags & MAP_SHARED))
9104 + do_color_align = 1;
9105 +
9106 ++#ifdef CONFIG_PAX_RANDMMAP
9107 ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9108 ++#endif
9109 ++
9110 + /* requesting a specific address */
9111 + if (addr) {
9112 + if (do_color_align)
9113 +@@ -181,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9114 addr = PAGE_ALIGN(addr);
9115
9116 vma = find_vma(mm, addr);
9117 @@ -7024,63 +8200,31 @@ index 878ef3d..f100719 100644
9118 return addr;
9119 }
9120
9121 -@@ -241,7 +245,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9122 - /* make sure it can fit in the remaining address space */
9123 - if (likely(addr > len)) {
9124 - vma = find_vma(mm, addr-len);
9125 -- if (!vma || addr <= vma->vm_start) {
9126 -+ if (check_heap_stack_gap(vma, addr - len, len, offset)) {
9127 - /* remember the address as a hint for next time */
9128 - return (mm->free_area_cache = addr-len);
9129 - }
9130 -@@ -250,18 +254,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9131 - if (unlikely(mm->mmap_base < len))
9132 - goto bottomup;
9133 -
9134 -- addr = mm->mmap_base-len;
9135 -- if (do_color_align)
9136 -- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
9137 -+ addr = mm->mmap_base - len;
9138 -
9139 - do {
9140 -+ if (do_color_align)
9141 -+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
9142 - /*
9143 - * Lookup failure means no vma is above this address,
9144 - * else if new region fits below vma->vm_start,
9145 - * return with success:
9146 - */
9147 - vma = find_vma(mm, addr);
9148 -- if (likely(!vma || addr+len <= vma->vm_start)) {
9149 -+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
9150 - /* remember the address as a hint for next time */
9151 - return (mm->free_area_cache = addr);
9152 - }
9153 -@@ -271,10 +275,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9154 - mm->cached_hole_size = vma->vm_start - addr;
9155 -
9156 - /* try just below the current vma->vm_start */
9157 -- addr = vma->vm_start-len;
9158 -- if (do_color_align)
9159 -- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
9160 -- } while (likely(len < vma->vm_start));
9161 -+ addr = skip_heap_stack_gap(vma, len, offset);
9162 -+ } while (!IS_ERR_VALUE(addr));
9163 -
9164 - bottomup:
9165 - /*
9166 -@@ -348,6 +350,10 @@ static unsigned long mmap_rnd(void)
9167 +@@ -204,6 +218,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9168 + VM_BUG_ON(addr != -ENOMEM);
9169 + info.flags = 0;
9170 + info.low_limit = TASK_UNMAPPED_BASE;
9171 ++
9172 ++#ifdef CONFIG_PAX_RANDMMAP
9173 ++ if (mm->pax_flags & MF_PAX_RANDMMAP)
9174 ++ info.low_limit += mm->delta_mmap;
9175 ++#endif
9176 ++
9177 + info.high_limit = STACK_TOP32;
9178 + addr = vm_unmapped_area(&info);
9179 + }
9180 +@@ -264,6 +284,10 @@ static unsigned long mmap_rnd(void)
9181 {
9182 unsigned long rnd = 0UL;
9183
9184 +#ifdef CONFIG_PAX_RANDMMAP
9185 -+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
9186 ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9187 +#endif
9188 +
9189 if (current->flags & PF_RANDOMIZE) {
9190 unsigned long val = get_random_int();
9191 if (test_thread_flag(TIF_32BIT))
9192 -@@ -373,6 +379,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9193 +@@ -289,6 +313,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9194 gap == RLIM_INFINITY ||
9195 sysctl_legacy_va_layout) {
9196 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
9197 @@ -7093,7 +8237,7 @@ index 878ef3d..f100719 100644
9198 mm->get_unmapped_area = arch_get_unmapped_area;
9199 mm->unmap_area = arch_unmap_area;
9200 } else {
9201 -@@ -385,6 +397,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9202 +@@ -301,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9203 gap = (task_size / 6 * 5);
9204
9205 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
9206 @@ -7107,10 +8251,10 @@ index 878ef3d..f100719 100644
9207 mm->unmap_area = arch_unmap_area_topdown;
9208 }
9209 diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
9210 -index bf23477..b7425a6 100644
9211 +index e0fed77..604a7e5 100644
9212 --- a/arch/sparc/kernel/syscalls.S
9213 +++ b/arch/sparc/kernel/syscalls.S
9214 -@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
9215 +@@ -58,7 +58,7 @@ sys32_rt_sigreturn:
9216 #endif
9217 .align 32
9218 1: ldx [%g6 + TI_FLAGS], %l5
9219 @@ -7119,7 +8263,7 @@ index bf23477..b7425a6 100644
9220 be,pt %icc, rtrap
9221 nop
9222 call syscall_trace_leave
9223 -@@ -189,7 +189,7 @@ linux_sparc_syscall32:
9224 +@@ -190,7 +190,7 @@ linux_sparc_syscall32:
9225
9226 srl %i5, 0, %o5 ! IEU1
9227 srl %i2, 0, %o2 ! IEU0 Group
9228 @@ -7128,7 +8272,7 @@ index bf23477..b7425a6 100644
9229 bne,pn %icc, linux_syscall_trace32 ! CTI
9230 mov %i0, %l5 ! IEU1
9231 call %l7 ! CTI Group brk forced
9232 -@@ -212,7 +212,7 @@ linux_sparc_syscall:
9233 +@@ -213,7 +213,7 @@ linux_sparc_syscall:
9234
9235 mov %i3, %o3 ! IEU1
9236 mov %i4, %o4 ! IEU0 Group
9237 @@ -7137,7 +8281,7 @@ index bf23477..b7425a6 100644
9238 bne,pn %icc, linux_syscall_trace ! CTI Group
9239 mov %i0, %l5 ! IEU0
9240 2: call %l7 ! CTI Group brk forced
9241 -@@ -228,7 +228,7 @@ ret_sys_call:
9242 +@@ -229,7 +229,7 @@ ret_sys_call:
9243
9244 cmp %o0, -ERESTART_RESTARTBLOCK
9245 bgeu,pn %xcc, 1f
9246 @@ -7146,6 +8290,19 @@ index bf23477..b7425a6 100644
9247 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
9248
9249 2:
9250 +diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
9251 +index 654e8aa..45f431b 100644
9252 +--- a/arch/sparc/kernel/sysfs.c
9253 ++++ b/arch/sparc/kernel/sysfs.c
9254 +@@ -266,7 +266,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
9255 + return NOTIFY_OK;
9256 + }
9257 +
9258 +-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
9259 ++static struct notifier_block sysfs_cpu_nb = {
9260 + .notifier_call = sysfs_cpu_notify,
9261 + };
9262 +
9263 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
9264 index a5785ea..405c5f7 100644
9265 --- a/arch/sparc/kernel/traps_32.c
9266 @@ -7180,7 +8337,7 @@ index a5785ea..405c5f7 100644
9267 }
9268
9269 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
9270 -index b66a779..8e8d66c 100644
9271 +index e7ecf15..6520e65 100644
9272 --- a/arch/sparc/kernel/traps_64.c
9273 +++ b/arch/sparc/kernel/traps_64.c
9274 @@ -76,7 +76,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
9275 @@ -8434,83 +9591,60 @@ index 097aee7..5ca6697 100644
9276 * load/store/atomic was a write or not, it only says that there
9277 * was no match. So in such a case we (carefully) read the
9278 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
9279 -index f76f83d..ee0d859 100644
9280 +index d2b5944..bd813f2 100644
9281 --- a/arch/sparc/mm/hugetlbpage.c
9282 +++ b/arch/sparc/mm/hugetlbpage.c
9283 -@@ -34,6 +34,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
9284 - struct vm_area_struct * vma;
9285 - unsigned long task_size = TASK_SIZE;
9286 - unsigned long start_addr;
9287 -+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9288 -
9289 - if (test_thread_flag(TIF_32BIT))
9290 - task_size = STACK_TOP32;
9291 -@@ -67,7 +68,7 @@ full_search:
9292 - }
9293 - return -ENOMEM;
9294 - }
9295 -- if (likely(!vma || addr + len <= vma->vm_start)) {
9296 -+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
9297 - /*
9298 - * Remember the place where we stopped the search:
9299 - */
9300 -@@ -90,6 +91,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9301 - struct vm_area_struct *vma;
9302 - struct mm_struct *mm = current->mm;
9303 - unsigned long addr = addr0;
9304 -+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9305 -
9306 - /* This should only ever run for 32-bit processes. */
9307 - BUG_ON(!test_thread_flag(TIF_32BIT));
9308 -@@ -106,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9309 - /* make sure it can fit in the remaining address space */
9310 - if (likely(addr > len)) {
9311 - vma = find_vma(mm, addr-len);
9312 -- if (!vma || addr <= vma->vm_start) {
9313 -+ if (check_heap_stack_gap(vma, addr - len, len, offset)) {
9314 - /* remember the address as a hint for next time */
9315 - return (mm->free_area_cache = addr-len);
9316 - }
9317 -@@ -115,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9318 - if (unlikely(mm->mmap_base < len))
9319 - goto bottomup;
9320 -
9321 -- addr = (mm->mmap_base-len) & HPAGE_MASK;
9322 -+ addr = mm->mmap_base - len;
9323 -
9324 - do {
9325 -+ addr &= HPAGE_MASK;
9326 - /*
9327 - * Lookup failure means no vma is above this address,
9328 - * else if new region fits below vma->vm_start,
9329 - * return with success:
9330 - */
9331 - vma = find_vma(mm, addr);
9332 -- if (likely(!vma || addr+len <= vma->vm_start)) {
9333 -+ if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
9334 - /* remember the address as a hint for next time */
9335 - return (mm->free_area_cache = addr);
9336 - }
9337 -@@ -134,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9338 - mm->cached_hole_size = vma->vm_start - addr;
9339 -
9340 - /* try just below the current vma->vm_start */
9341 -- addr = (vma->vm_start-len) & HPAGE_MASK;
9342 -- } while (likely(len < vma->vm_start));
9343 -+ addr = skip_heap_stack_gap(vma, len, offset);
9344 -+ } while (!IS_ERR_VALUE(addr));
9345 -
9346 - bottomup:
9347 - /*
9348 -@@ -163,6 +166,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
9349 +@@ -38,7 +38,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
9350 +
9351 + info.flags = 0;
9352 + info.length = len;
9353 +- info.low_limit = TASK_UNMAPPED_BASE;
9354 ++ info.low_limit = mm->mmap_base;
9355 + info.high_limit = min(task_size, VA_EXCLUDE_START);
9356 + info.align_mask = PAGE_MASK & ~HPAGE_MASK;
9357 + info.align_offset = 0;
9358 +@@ -47,6 +47,12 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
9359 + if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
9360 + VM_BUG_ON(addr != -ENOMEM);
9361 + info.low_limit = VA_EXCLUDE_END;
9362 ++
9363 ++#ifdef CONFIG_PAX_RANDMMAP
9364 ++ if (mm->pax_flags & MF_PAX_RANDMMAP)
9365 ++ info.low_limit += mm->delta_mmap;
9366 ++#endif
9367 ++
9368 + info.high_limit = task_size;
9369 + addr = vm_unmapped_area(&info);
9370 + }
9371 +@@ -85,6 +91,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9372 + VM_BUG_ON(addr != -ENOMEM);
9373 + info.flags = 0;
9374 + info.low_limit = TASK_UNMAPPED_BASE;
9375 ++
9376 ++#ifdef CONFIG_PAX_RANDMMAP
9377 ++ if (mm->pax_flags & MF_PAX_RANDMMAP)
9378 ++ info.low_limit += mm->delta_mmap;
9379 ++#endif
9380 ++
9381 + info.high_limit = STACK_TOP32;
9382 + addr = vm_unmapped_area(&info);
9383 + }
9384 +@@ -99,6 +111,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
9385 struct mm_struct *mm = current->mm;
9386 struct vm_area_struct *vma;
9387 unsigned long task_size = TASK_SIZE;
9388 -+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9389 ++ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
9390
9391 if (test_thread_flag(TIF_32BIT))
9392 task_size = STACK_TOP32;
9393 -@@ -181,8 +185,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
9394 +@@ -114,11 +127,14 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
9395 + return addr;
9396 + }
9397 +
9398 ++#ifdef CONFIG_PAX_RANDMMAP
9399 ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9400 ++#endif
9401 ++
9402 if (addr) {
9403 addr = ALIGN(addr, HPAGE_SIZE);
9404 vma = find_vma(mm, addr);
9405 @@ -8649,10 +9783,10 @@ index 0032f92..cd151e0 100644
9406 #ifdef CONFIG_64BIT
9407 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
9408 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
9409 -index b6d699c..df7ac1d 100644
9410 +index b462b13..e7a19aa 100644
9411 --- a/arch/um/kernel/process.c
9412 +++ b/arch/um/kernel/process.c
9413 -@@ -387,22 +387,6 @@ int singlestepping(void * t)
9414 +@@ -386,22 +386,6 @@ int singlestepping(void * t)
9415 return 2;
9416 }
9417
9418 @@ -8693,10 +9827,10 @@ index ad8f795..2c7eec6 100644
9419 /*
9420 * Memory returned by kmalloc() may be used for DMA, so we must make
9421 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
9422 -index 46c3bff..b82f26b 100644
9423 +index 225543b..f12405b 100644
9424 --- a/arch/x86/Kconfig
9425 +++ b/arch/x86/Kconfig
9426 -@@ -241,7 +241,7 @@ config X86_HT
9427 +@@ -238,7 +238,7 @@ config X86_HT
9428
9429 config X86_32_LAZY_GS
9430 def_bool y
9431 @@ -8705,7 +9839,7 @@ index 46c3bff..b82f26b 100644
9432
9433 config ARCH_HWEIGHT_CFLAGS
9434 string
9435 -@@ -1033,6 +1033,7 @@ config MICROCODE_OLD_INTERFACE
9436 +@@ -1031,6 +1031,7 @@ config MICROCODE_OLD_INTERFACE
9437
9438 config X86_MSR
9439 tristate "/dev/cpu/*/msr - Model-specific register support"
9440 @@ -8713,7 +9847,7 @@ index 46c3bff..b82f26b 100644
9441 ---help---
9442 This device gives privileged processes access to the x86
9443 Model-Specific Registers (MSRs). It is a character device with
9444 -@@ -1056,7 +1057,7 @@ choice
9445 +@@ -1054,7 +1055,7 @@ choice
9446
9447 config NOHIGHMEM
9448 bool "off"
9449 @@ -8722,7 +9856,7 @@ index 46c3bff..b82f26b 100644
9450 ---help---
9451 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
9452 However, the address space of 32-bit x86 processors is only 4
9453 -@@ -1093,7 +1094,7 @@ config NOHIGHMEM
9454 +@@ -1091,7 +1092,7 @@ config NOHIGHMEM
9455
9456 config HIGHMEM4G
9457 bool "4GB"
9458 @@ -8731,7 +9865,7 @@ index 46c3bff..b82f26b 100644
9459 ---help---
9460 Select this if you have a 32-bit processor and between 1 and 4
9461 gigabytes of physical RAM.
9462 -@@ -1147,7 +1148,7 @@ config PAGE_OFFSET
9463 +@@ -1145,7 +1146,7 @@ config PAGE_OFFSET
9464 hex
9465 default 0xB0000000 if VMSPLIT_3G_OPT
9466 default 0x80000000 if VMSPLIT_2G
9467 @@ -8740,7 +9874,7 @@ index 46c3bff..b82f26b 100644
9468 default 0x40000000 if VMSPLIT_1G
9469 default 0xC0000000
9470 depends on X86_32
9471 -@@ -1548,6 +1549,7 @@ config SECCOMP
9472 +@@ -1546,6 +1547,7 @@ config SECCOMP
9473
9474 config CC_STACKPROTECTOR
9475 bool "Enable -fstack-protector buffer overflow detection"
9476 @@ -8748,7 +9882,7 @@ index 46c3bff..b82f26b 100644
9477 ---help---
9478 This option turns on the -fstack-protector GCC feature. This
9479 feature puts, at the beginning of functions, a canary value on
9480 -@@ -1605,6 +1607,7 @@ config KEXEC_JUMP
9481 +@@ -1603,6 +1605,7 @@ config KEXEC_JUMP
9482 config PHYSICAL_START
9483 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
9484 default "0x1000000"
9485 @@ -8756,7 +9890,7 @@ index 46c3bff..b82f26b 100644
9486 ---help---
9487 This gives the physical address where the kernel is loaded.
9488
9489 -@@ -1668,6 +1671,7 @@ config X86_NEED_RELOCS
9490 +@@ -1666,6 +1669,7 @@ config X86_NEED_RELOCS
9491 config PHYSICAL_ALIGN
9492 hex "Alignment value to which kernel should be aligned" if X86_32
9493 default "0x1000000"
9494 @@ -8764,8 +9898,8 @@ index 46c3bff..b82f26b 100644
9495 range 0x2000 0x1000000
9496 ---help---
9497 This value puts the alignment restrictions on physical address
9498 -@@ -1699,9 +1703,10 @@ config HOTPLUG_CPU
9499 - Say N if you want to disable CPU hotplug.
9500 +@@ -1741,9 +1745,10 @@ config DEBUG_HOTPLUG_CPU0
9501 + If unsure, say N.
9502
9503 config COMPAT_VDSO
9504 - def_bool y
9505 @@ -8777,19 +9911,19 @@ index 46c3bff..b82f26b 100644
9506 Map the 32-bit VDSO to the predictable old-style address too.
9507
9508 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
9509 -index f3b86d0..17fd30f 100644
9510 +index c026cca..14657ae 100644
9511 --- a/arch/x86/Kconfig.cpu
9512 +++ b/arch/x86/Kconfig.cpu
9513 -@@ -335,7 +335,7 @@ config X86_PPRO_FENCE
9514 +@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
9515
9516 config X86_F00F_BUG
9517 def_bool y
9518 -- depends on M586MMX || M586TSC || M586 || M486 || M386
9519 -+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
9520 +- depends on M586MMX || M586TSC || M586 || M486
9521 ++ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
9522
9523 config X86_INVD_BUG
9524 def_bool y
9525 -@@ -359,7 +359,7 @@ config X86_POPAD_OK
9526 +@@ -327,7 +327,7 @@ config X86_INVD_BUG
9527
9528 config X86_ALIGNMENT_16
9529 def_bool y
9530 @@ -8798,7 +9932,7 @@ index f3b86d0..17fd30f 100644
9531
9532 config X86_INTEL_USERCOPY
9533 def_bool y
9534 -@@ -405,7 +405,7 @@ config X86_CMPXCHG64
9535 +@@ -373,7 +373,7 @@ config X86_CMPXCHG64
9536 # generates cmov.
9537 config X86_CMOV
9538 def_bool y
9539 @@ -8839,7 +9973,7 @@ index b322f12..652d0d9 100644
9540 Enabling this option turns a certain set of sanity checks for user
9541 copy operations into compile time failures.
9542 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
9543 -index 05afcca..b6ecb51 100644
9544 +index e71fc42..7829607 100644
9545 --- a/arch/x86/Makefile
9546 +++ b/arch/x86/Makefile
9547 @@ -50,6 +50,7 @@ else
9548 @@ -8850,7 +9984,7 @@ index 05afcca..b6ecb51 100644
9549 KBUILD_AFLAGS += -m64
9550 KBUILD_CFLAGS += -m64
9551
9552 -@@ -229,3 +230,12 @@ define archhelp
9553 +@@ -230,3 +231,12 @@ define archhelp
9554 echo ' FDARGS="..." arguments for the booted kernel'
9555 echo ' FDINITRD=file initrd for the booted kernel'
9556 endef
9557 @@ -8864,7 +9998,7 @@ index 05afcca..b6ecb51 100644
9558 +archprepare:
9559 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
9560 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
9561 -index ccce0ed..fd9da25 100644
9562 +index 379814b..add62ce 100644
9563 --- a/arch/x86/boot/Makefile
9564 +++ b/arch/x86/boot/Makefile
9565 @@ -65,6 +65,9 @@ KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
9566 @@ -8936,10 +10070,10 @@ index 8a84501..b2d165f 100644
9567 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
9568 GCOV_PROFILE := n
9569 diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
9570 -index ccae7e2..8ac70be 100644
9571 +index f8fa411..c570c53 100644
9572 --- a/arch/x86/boot/compressed/eboot.c
9573 +++ b/arch/x86/boot/compressed/eboot.c
9574 -@@ -144,7 +144,6 @@ again:
9575 +@@ -145,7 +145,6 @@ again:
9576 *addr = max_addr;
9577 }
9578
9579 @@ -8947,7 +10081,7 @@ index ccae7e2..8ac70be 100644
9580 efi_call_phys1(sys_table->boottime->free_pool, map);
9581
9582 fail:
9583 -@@ -208,7 +207,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
9584 +@@ -209,7 +208,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
9585 if (i == map_size / desc_size)
9586 status = EFI_NOT_FOUND;
9587
9588 @@ -8956,7 +10090,7 @@ index ccae7e2..8ac70be 100644
9589 fail:
9590 return status;
9591 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
9592 -index ccb2f4a..e49b20e 100644
9593 +index 1e3184f..0d11e2e 100644
9594 --- a/arch/x86/boot/compressed/head_32.S
9595 +++ b/arch/x86/boot/compressed/head_32.S
9596 @@ -118,7 +118,7 @@ preferred_addr:
9597 @@ -8988,7 +10122,7 @@ index ccb2f4a..e49b20e 100644
9598 jmp 1b
9599 2:
9600 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
9601 -index 2c4b171..e1fa5b1 100644
9602 +index f5d1aaa..cce11dc 100644
9603 --- a/arch/x86/boot/compressed/head_64.S
9604 +++ b/arch/x86/boot/compressed/head_64.S
9605 @@ -91,7 +91,7 @@ ENTRY(startup_32)
9606 @@ -9131,10 +10265,10 @@ index 4d3ff03..e4972ff 100644
9607 err = check_flags();
9608 }
9609 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
9610 -index 8c132a6..13e5c96 100644
9611 +index 944ce59..87ee37a 100644
9612 --- a/arch/x86/boot/header.S
9613 +++ b/arch/x86/boot/header.S
9614 -@@ -387,10 +387,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
9615 +@@ -401,10 +401,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
9616 # single linked list of
9617 # struct setup_data
9618
9619 @@ -9515,7 +10649,7 @@ index 0b33743..7a56206 100644
9620 + pax_force_retaddr 0, 1
9621 ret;
9622 diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
9623 -index a41a3aa..bdf5753 100644
9624 +index 15b00ac..2071784 100644
9625 --- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
9626 +++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
9627 @@ -23,6 +23,8 @@
9628 @@ -9526,33 +10660,55 @@ index a41a3aa..bdf5753 100644
9629 +
9630 .file "cast5-avx-x86_64-asm_64.S"
9631
9632 - .extern cast5_s1
9633 -@@ -293,6 +295,7 @@ __skip_enc:
9634 - leaq 3*(2*4*4)(%r11), %rax;
9635 - outunpack_blocks(%rax, RR4, RL4, RTMP, RX, RKM);
9636 + .extern cast_s1
9637 +@@ -281,6 +283,7 @@ __skip_enc:
9638 + outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
9639 + outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
9640
9641 + pax_force_retaddr 0, 1
9642 ret;
9643
9644 - __enc_xor16:
9645 -@@ -303,6 +306,7 @@ __enc_xor16:
9646 - leaq 3*(2*4*4)(%r11), %rax;
9647 - outunpack_xor_blocks(%rax, RR4, RL4, RTMP, RX, RKM);
9648 + .align 16
9649 +@@ -353,6 +356,7 @@ __dec_tail:
9650 + outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
9651 + outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
9652
9653 + pax_force_retaddr 0, 1
9654 ret;
9655
9656 + __skip_dec:
9657 +@@ -392,6 +396,7 @@ cast5_ecb_enc_16way:
9658 + vmovdqu RR4, (6*4*4)(%r11);
9659 + vmovdqu RL4, (7*4*4)(%r11);
9660 +
9661 ++ pax_force_retaddr
9662 + ret;
9663 +
9664 .align 16
9665 -@@ -369,6 +373,7 @@ __dec_tail:
9666 - leaq 3*(2*4*4)(%r11), %rax;
9667 - outunpack_blocks(%rax, RR4, RL4, RTMP, RX, RKM);
9668 +@@ -427,6 +432,7 @@ cast5_ecb_dec_16way:
9669 + vmovdqu RR4, (6*4*4)(%r11);
9670 + vmovdqu RL4, (7*4*4)(%r11);
9671
9672 -+ pax_force_retaddr 0, 1
9673 ++ pax_force_retaddr
9674 ret;
9675
9676 - __skip_dec:
9677 + .align 16
9678 +@@ -479,6 +485,7 @@ cast5_cbc_dec_16way:
9679 +
9680 + popq %r12;
9681 +
9682 ++ pax_force_retaddr
9683 + ret;
9684 +
9685 + .align 16
9686 +@@ -555,4 +562,5 @@ cast5_ctr_16way:
9687 +
9688 + popq %r12;
9689 +
9690 ++ pax_force_retaddr
9691 + ret;
9692 diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
9693 -index 218d283..819e6da 100644
9694 +index 2569d0d..637c289 100644
9695 --- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
9696 +++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
9697 @@ -23,6 +23,8 @@
9698 @@ -9561,29 +10717,54 @@ index 218d283..819e6da 100644
9699
9700 +#include <asm/alternative-asm.h>
9701 +
9702 - .file "cast6-avx-x86_64-asm_64.S"
9703 + #include "glue_helper-asm-avx.S"
9704
9705 - .extern cast6_s1
9706 -@@ -324,12 +326,14 @@ __cast6_enc_blk_8way:
9707 - outunpack_blocks(%r11, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
9708 - outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
9709 + .file "cast6-avx-x86_64-asm_64.S"
9710 +@@ -294,6 +296,7 @@ __cast6_enc_blk8:
9711 + outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
9712 + outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
9713
9714 + pax_force_retaddr 0, 1
9715 ret;
9716
9717 - __enc_xor8:
9718 - outunpack_xor_blocks(%r11, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
9719 - outunpack_xor_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
9720 + .align 8
9721 +@@ -340,6 +343,7 @@ __cast6_dec_blk8:
9722 + outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
9723 + outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
9724
9725 + pax_force_retaddr 0, 1
9726 ret;
9727
9728 - .align 16
9729 -@@ -380,4 +384,5 @@ cast6_dec_blk_8way:
9730 - outunpack_blocks(%r11, RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
9731 - outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
9732 + .align 8
9733 +@@ -361,6 +365,7 @@ cast6_ecb_enc_8way:
9734
9735 -+ pax_force_retaddr 0, 1
9736 + store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
9737 +
9738 ++ pax_force_retaddr
9739 + ret;
9740 +
9741 + .align 8
9742 +@@ -382,6 +387,7 @@ cast6_ecb_dec_8way:
9743 +
9744 + store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
9745 +
9746 ++ pax_force_retaddr
9747 + ret;
9748 +
9749 + .align 8
9750 +@@ -408,6 +414,7 @@ cast6_cbc_dec_8way:
9751 +
9752 + popq %r12;
9753 +
9754 ++ pax_force_retaddr
9755 + ret;
9756 +
9757 + .align 8
9758 +@@ -436,4 +443,5 @@ cast6_ctr_8way:
9759 +
9760 + popq %r12;
9761 +
9762 ++ pax_force_retaddr
9763 ret;
9764 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
9765 index 6214a9b..1f4fc9a 100644
9766 @@ -9618,7 +10799,7 @@ index 6214a9b..1f4fc9a 100644
9767 + pax_force_retaddr
9768 ret
9769 diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
9770 -index 504106b..4e50951 100644
9771 +index 02b0e9f..cf4cf5c 100644
9772 --- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
9773 +++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
9774 @@ -24,6 +24,8 @@
9775 @@ -9627,27 +10808,52 @@ index 504106b..4e50951 100644
9776
9777 +#include <asm/alternative-asm.h>
9778 +
9779 + #include "glue_helper-asm-avx.S"
9780 +
9781 .file "serpent-avx-x86_64-asm_64.S"
9782 - .text
9783 +@@ -618,6 +620,7 @@ __serpent_enc_blk8_avx:
9784 + write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
9785 + write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
9786
9787 -@@ -638,12 +640,14 @@ __serpent_enc_blk_8way_avx:
9788 - write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
9789 - write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
9790 ++ pax_force_retaddr
9791 + ret;
9792 +
9793 + .align 8
9794 +@@ -673,6 +676,7 @@ __serpent_dec_blk8_avx:
9795 + write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
9796 + write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
9797
9798 + pax_force_retaddr
9799 ret;
9800
9801 - __enc_xor8:
9802 - xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
9803 - xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
9804 + .align 8
9805 +@@ -692,6 +696,7 @@ serpent_ecb_enc_8way_avx:
9806 +
9807 + store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
9808
9809 + pax_force_retaddr
9810 ret;
9811
9812 .align 8
9813 -@@ -701,4 +705,5 @@ serpent_dec_blk_8way_avx:
9814 - write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
9815 - write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
9816 +@@ -711,6 +716,7 @@ serpent_ecb_dec_8way_avx:
9817 +
9818 + store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
9819 +
9820 ++ pax_force_retaddr
9821 + ret;
9822 +
9823 + .align 8
9824 +@@ -730,6 +736,7 @@ serpent_cbc_dec_8way_avx:
9825 +
9826 + store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
9827 +
9828 ++ pax_force_retaddr
9829 + ret;
9830 +
9831 + .align 8
9832 +@@ -751,4 +758,5 @@ serpent_ctr_8way_avx:
9833 +
9834 + store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
9835
9836 + pax_force_retaddr
9837 ret;
9838 @@ -9707,7 +10913,7 @@ index 49d6987..df66bd4 100644
9839
9840 .size \name, .-\name
9841 diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
9842 -index 1585abb..1ff9d9b 100644
9843 +index ebac16b..8092eb9 100644
9844 --- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
9845 +++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
9846 @@ -23,6 +23,8 @@
9847 @@ -9716,27 +10922,52 @@ index 1585abb..1ff9d9b 100644
9848
9849 +#include <asm/alternative-asm.h>
9850 +
9851 + #include "glue_helper-asm-avx.S"
9852 +
9853 .file "twofish-avx-x86_64-asm_64.S"
9854 - .text
9855 +@@ -283,6 +285,7 @@ __twofish_enc_blk8:
9856 + outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
9857 + outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
9858 +
9859 ++ pax_force_retaddr 0, 1
9860 + ret;
9861
9862 -@@ -303,12 +305,14 @@ __twofish_enc_blk_8way:
9863 - outunpack_blocks(%r11, RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
9864 - outunpack_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
9865 + .align 8
9866 +@@ -324,6 +327,7 @@ __twofish_dec_blk8:
9867 + outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
9868 + outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
9869
9870 + pax_force_retaddr 0, 1
9871 ret;
9872
9873 - __enc_xor8:
9874 - outunpack_xor_blocks(%r11, RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
9875 - outunpack_xor_blocks(%rax, RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
9876 + .align 8
9877 +@@ -345,6 +349,7 @@ twofish_ecb_enc_8way:
9878 +
9879 + store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
9880 +
9881 ++ pax_force_retaddr 0, 1
9882 + ret;
9883 +
9884 + .align 8
9885 +@@ -366,6 +371,7 @@ twofish_ecb_dec_8way:
9886 +
9887 + store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
9888
9889 + pax_force_retaddr 0, 1
9890 ret;
9891
9892 .align 8
9893 -@@ -354,4 +358,5 @@ twofish_dec_blk_8way:
9894 - outunpack_blocks(%r11, RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
9895 - outunpack_blocks(%rax, RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
9896 +@@ -392,6 +398,7 @@ twofish_cbc_dec_8way:
9897 +
9898 + popq %r12;
9899 +
9900 ++ pax_force_retaddr 0, 1
9901 + ret;
9902 +
9903 + .align 8
9904 +@@ -420,4 +427,5 @@ twofish_ctr_8way:
9905 +
9906 + popq %r12;
9907
9908 + pax_force_retaddr 0, 1
9909 ret;
9910 @@ -9803,7 +11034,7 @@ index 7bcf3fc..f53832f 100644
9911 + pax_force_retaddr 0, 1
9912 ret
9913 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
9914 -index 07b3a68..bd2a388 100644
9915 +index a703af1..f5b9c36 100644
9916 --- a/arch/x86/ia32/ia32_aout.c
9917 +++ b/arch/x86/ia32/ia32_aout.c
9918 @@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
9919 @@ -9816,21 +11047,10 @@ index 07b3a68..bd2a388 100644
9920 set_fs(KERNEL_DS);
9921 has_dumped = 1;
9922 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
9923 -index efc6a95..95abfe2 100644
9924 +index a1daf4a..f8c4537 100644
9925 --- a/arch/x86/ia32/ia32_signal.c
9926 +++ b/arch/x86/ia32/ia32_signal.c
9927 -@@ -163,8 +163,8 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
9928 - }
9929 - seg = get_fs();
9930 - set_fs(KERNEL_DS);
9931 -- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
9932 -- (stack_t __force __user *) &uoss, regs->sp);
9933 -+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
9934 -+ (stack_t __force_user *) &uoss, regs->sp);
9935 - set_fs(seg);
9936 - if (ret >= 0 && uoss_ptr) {
9937 - if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
9938 -@@ -396,7 +396,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
9939 +@@ -348,7 +348,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
9940 sp -= frame_size;
9941 /* Align the stack pointer according to the i386 ABI,
9942 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
9943 @@ -9839,7 +11059,7 @@ index efc6a95..95abfe2 100644
9944 return (void __user *) sp;
9945 }
9946
9947 -@@ -454,7 +454,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
9948 +@@ -406,7 +406,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
9949 * These are actually not used anymore, but left because some
9950 * gdb versions depend on them as a marker.
9951 */
9952 @@ -9848,7 +11068,7 @@ index efc6a95..95abfe2 100644
9953 } put_user_catch(err);
9954
9955 if (err)
9956 -@@ -496,7 +496,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
9957 +@@ -448,7 +448,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
9958 0xb8,
9959 __NR_ia32_rt_sigreturn,
9960 0x80cd,
9961 @@ -9857,7 +11077,7 @@ index efc6a95..95abfe2 100644
9962 };
9963
9964 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
9965 -@@ -522,16 +522,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
9966 +@@ -471,16 +471,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
9967
9968 if (ka->sa.sa_flags & SA_RESTORER)
9969 restorer = ka->sa.sa_restorer;
9970 @@ -9880,7 +11100,7 @@ index efc6a95..95abfe2 100644
9971
9972 err |= copy_siginfo_to_user32(&frame->info, info);
9973 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
9974 -index e7fa545..9e6fe1a 100644
9975 +index 142c4ce..19b683f 100644
9976 --- a/arch/x86/ia32/ia32entry.S
9977 +++ b/arch/x86/ia32/ia32entry.S
9978 @@ -15,8 +15,10 @@
9979 @@ -10192,7 +11412,7 @@ index e7fa545..9e6fe1a 100644
9980 END(ia32_syscall)
9981
9982 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
9983 -index 86d68d1..f9960fe 100644
9984 +index d0b689b..34be51d 100644
9985 --- a/arch/x86/ia32/sys_ia32.c
9986 +++ b/arch/x86/ia32/sys_ia32.c
9987 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
9988 @@ -10350,7 +11570,7 @@ index 20370c6..a2eb9b0 100644
9989 "popl %%ebp\n\t"
9990 "popl %%edi\n\t"
9991 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
9992 -index b6c3b82..b4c077a 100644
9993 +index 722aa3b..3a0bb27 100644
9994 --- a/arch/x86/include/asm/atomic.h
9995 +++ b/arch/x86/include/asm/atomic.h
9996 @@ -22,7 +22,18 @@
9997 @@ -10603,19 +11823,14 @@ index b6c3b82..b4c077a 100644
9998 : "+m" (v->counter), "=qm" (c)
9999 : "ir" (i) : "memory");
10000 return c;
10001 -@@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
10002 - goto no_xadd;
10003 - #endif
10004 - /* Modern 486+ processor */
10005 -- return i + xadd(&v->counter, i);
10006 +@@ -172,6 +334,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
10007 + */
10008 + static inline int atomic_add_return(int i, atomic_t *v)
10009 + {
10010 + return i + xadd_check_overflow(&v->counter, i);
10011 -
10012 - #ifdef CONFIG_M386
10013 - no_xadd: /* Legacy 386 processor */
10014 -@@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
10015 - }
10016 -
10017 - /**
10018 ++}
10019 ++
10020 ++/**
10021 + * atomic_add_return_unchecked - add integer and return
10022 + * @i: integer value to add
10023 + * @v: pointer of type atomic_unchecked_t
10024 @@ -10624,30 +11839,10 @@ index b6c3b82..b4c077a 100644
10025 + */
10026 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
10027 +{
10028 -+#ifdef CONFIG_M386
10029 -+ int __i;
10030 -+ unsigned long flags;
10031 -+ if (unlikely(boot_cpu_data.x86 <= 3))
10032 -+ goto no_xadd;
10033 -+#endif
10034 -+ /* Modern 486+ processor */
10035 -+ return i + xadd(&v->counter, i);
10036 -+
10037 -+#ifdef CONFIG_M386
10038 -+no_xadd: /* Legacy 386 processor */
10039 -+ raw_local_irq_save(flags);
10040 -+ __i = atomic_read_unchecked(v);
10041 -+ atomic_set_unchecked(v, i + __i);
10042 -+ raw_local_irq_restore(flags);
10043 -+ return i + __i;
10044 -+#endif
10045 -+}
10046 -+
10047 -+/**
10048 - * atomic_sub_return - subtract integer and return
10049 - * @v: pointer of type atomic_t
10050 - * @i: integer value to subtract
10051 -@@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
10052 + return i + xadd(&v->counter, i);
10053 + }
10054 +
10055 +@@ -188,6 +362,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
10056 }
10057
10058 #define atomic_inc_return(v) (atomic_add_return(1, v))
10059 @@ -10658,7 +11853,7 @@ index b6c3b82..b4c077a 100644
10060 #define atomic_dec_return(v) (atomic_sub_return(1, v))
10061
10062 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
10063 -@@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
10064 +@@ -195,11 +373,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
10065 return cmpxchg(&v->counter, old, new);
10066 }
10067
10068 @@ -10680,7 +11875,7 @@ index b6c3b82..b4c077a 100644
10069 /**
10070 * __atomic_add_unless - add unless the number is already a given value
10071 * @v: pointer of type atomic_t
10072 -@@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
10073 +@@ -211,12 +399,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
10074 */
10075 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10076 {
10077 @@ -10709,7 +11904,7 @@ index b6c3b82..b4c077a 100644
10078 if (likely(old == c))
10079 break;
10080 c = old;
10081 -@@ -241,6 +458,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10082 +@@ -225,6 +426,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10083 }
10084
10085 /**
10086 @@ -10759,7 +11954,7 @@ index b6c3b82..b4c077a 100644
10087 * atomic_inc_short - increment of a short integer
10088 * @v: pointer to type int
10089 *
10090 -@@ -269,14 +529,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
10091 +@@ -253,14 +497,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
10092 #endif
10093
10094 /* These are x86-specific, used by some header files */
10095 @@ -11304,11 +12499,11 @@ index 6dfd019..0c6699f 100644
10096
10097 /**
10098 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
10099 -index b13fe63..0dab13a 100644
10100 +index 4fa687a..60f2d39 100644
10101 --- a/arch/x86/include/asm/boot.h
10102 +++ b/arch/x86/include/asm/boot.h
10103 -@@ -11,10 +11,15 @@
10104 - #include <asm/pgtable_types.h>
10105 +@@ -6,10 +6,15 @@
10106 + #include <uapi/asm/boot.h>
10107
10108 /* Physical address where kernel should be loaded. */
10109 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10110 @@ -11457,10 +12652,10 @@ index 8d871ea..c1a0dc9 100644
10111 ({ \
10112 __typeof__ (*(ptr)) __ret = (inc); \
10113 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
10114 -index 8c297aa..7a90f03 100644
10115 +index 2d9075e..b75a844 100644
10116 --- a/arch/x86/include/asm/cpufeature.h
10117 +++ b/arch/x86/include/asm/cpufeature.h
10118 -@@ -205,7 +205,7 @@
10119 +@@ -206,7 +206,7 @@
10120 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
10121 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
10122 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
10123 @@ -11469,7 +12664,7 @@ index 8c297aa..7a90f03 100644
10124 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
10125 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
10126 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
10127 -@@ -379,7 +379,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
10128 +@@ -375,7 +375,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
10129 ".section .discard,\"aw\",@progbits\n"
10130 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
10131 ".previous\n"
10132 @@ -11689,21 +12884,8 @@ index 278441f..b95a174 100644
10133 };
10134 } __attribute__((packed));
10135
10136 -diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
10137 -index 3778256..c5d4fce 100644
10138 ---- a/arch/x86/include/asm/e820.h
10139 -+++ b/arch/x86/include/asm/e820.h
10140 -@@ -69,7 +69,7 @@ struct e820map {
10141 - #define ISA_START_ADDRESS 0xa0000
10142 - #define ISA_END_ADDRESS 0x100000
10143 -
10144 --#define BIOS_BEGIN 0x000a0000
10145 -+#define BIOS_BEGIN 0x000c0000
10146 - #define BIOS_END 0x00100000
10147 -
10148 - #define BIOS_ROM_BASE 0xffe00000
10149 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
10150 -index 5939f44..f8845f6 100644
10151 +index 9c999c1..3860cb8 100644
10152 --- a/arch/x86/include/asm/elf.h
10153 +++ b/arch/x86/include/asm/elf.h
10154 @@ -243,7 +243,25 @@ extern int force_personality32;
10155 @@ -11807,7 +12989,7 @@ index 41ab26e..a88c9e6 100644
10156 return fpu_restore_checking(&tsk->thread.fpu);
10157 }
10158 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10159 -index f373046..02653e2 100644
10160 +index be27ba1..8f13ff9 100644
10161 --- a/arch/x86/include/asm/futex.h
10162 +++ b/arch/x86/include/asm/futex.h
10163 @@ -12,6 +12,7 @@
10164 @@ -11846,7 +13028,7 @@ index f373046..02653e2 100644
10165 : "r" (oparg), "i" (-EFAULT), "1" (0))
10166
10167 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10168 -@@ -65,10 +67,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10169 +@@ -59,10 +61,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10170
10171 switch (op) {
10172 case FUTEX_OP_SET:
10173 @@ -11859,7 +13041,7 @@ index f373046..02653e2 100644
10174 uaddr, oparg);
10175 break;
10176 case FUTEX_OP_OR:
10177 -@@ -128,14 +130,14 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
10178 +@@ -116,14 +118,14 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
10179 return -EFAULT;
10180
10181 asm volatile("\t" ASM_STAC "\n"
10182 @@ -11959,7 +13141,7 @@ index d3ddd17..c9fb0cc 100644
10183 #define flush_insn_slot(p) do { } while (0)
10184
10185 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10186 -index c8bed0d..85c03fd 100644
10187 +index 2d89e39..baee879 100644
10188 --- a/arch/x86/include/asm/local.h
10189 +++ b/arch/x86/include/asm/local.h
10190 @@ -10,33 +10,97 @@ typedef struct {
10191 @@ -12136,11 +13318,10 @@ index c8bed0d..85c03fd 100644
10192 : "+m" (l->a.counter), "=qm" (c)
10193 : "ir" (i) : "memory");
10194 return c;
10195 -@@ -132,7 +232,15 @@ static inline long local_add_return(long i, local_t *l)
10196 - #endif
10197 - /* Modern 486+ processor */
10198 - __i = i;
10199 -- asm volatile(_ASM_XADD "%0, %1;"
10200 +@@ -125,6 +225,30 @@ static inline int local_add_negative(long i, local_t *l)
10201 + static inline long local_add_return(long i, local_t *l)
10202 + {
10203 + long __i = i;
10204 + asm volatile(_ASM_XADD "%0, %1\n"
10205 +
10206 +#ifdef CONFIG_PAX_REFCOUNT
10207 @@ -12150,13 +13331,11 @@ index c8bed0d..85c03fd 100644
10208 + _ASM_EXTABLE(0b, 0b)
10209 +#endif
10210 +
10211 - : "+r" (i), "+m" (l->a.counter)
10212 - : : "memory");
10213 - return i + __i;
10214 -@@ -147,6 +255,38 @@ no_xadd: /* Legacy 386 processor */
10215 - #endif
10216 - }
10217 -
10218 ++ : "+r" (i), "+m" (l->a.counter)
10219 ++ : : "memory");
10220 ++ return i + __i;
10221 ++}
10222 ++
10223 +/**
10224 + * local_add_return_unchecked - add and return
10225 + * @i: integer value to add
10226 @@ -12166,33 +13345,11 @@ index c8bed0d..85c03fd 100644
10227 + */
10228 +static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
10229 +{
10230 -+ long __i;
10231 -+#ifdef CONFIG_M386
10232 -+ unsigned long flags;
10233 -+ if (unlikely(boot_cpu_data.x86 <= 3))
10234 -+ goto no_xadd;
10235 -+#endif
10236 -+ /* Modern 486+ processor */
10237 -+ __i = i;
10238 -+ asm volatile(_ASM_XADD "%0, %1\n"
10239 -+ : "+r" (i), "+m" (l->a.counter)
10240 -+ : : "memory");
10241 -+ return i + __i;
10242 -+
10243 -+#ifdef CONFIG_M386
10244 -+no_xadd: /* Legacy 386 processor */
10245 -+ local_irq_save(flags);
10246 -+ __i = local_read_unchecked(l);
10247 -+ local_set_unchecked(l, i + __i);
10248 -+ local_irq_restore(flags);
10249 -+ return i + __i;
10250 -+#endif
10251 -+}
10252 -+
10253 - static inline long local_sub_return(long i, local_t *l)
10254 - {
10255 - return local_add_return(-i, l);
10256 -@@ -157,6 +297,8 @@ static inline long local_sub_return(long i, local_t *l)
10257 ++ long __i = i;
10258 + asm volatile(_ASM_XADD "%0, %1;"
10259 + : "+r" (i), "+m" (l->a.counter)
10260 + : : "memory");
10261 +@@ -141,6 +265,8 @@ static inline long local_sub_return(long i, local_t *l)
10262
10263 #define local_cmpxchg(l, o, n) \
10264 (cmpxchg_local(&((l)->a.counter), (o), (n)))
10265 @@ -12202,24 +13359,26 @@ index c8bed0d..85c03fd 100644
10266 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
10267
10268 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10269 -index 593e51d..fa69c9a 100644
10270 ---- a/arch/x86/include/asm/mman.h
10271 +new file mode 100644
10272 +index 0000000..2bfd3ba
10273 +--- /dev/null
10274 +++ b/arch/x86/include/asm/mman.h
10275 -@@ -5,4 +5,14 @@
10276 -
10277 - #include <asm-generic/mman.h>
10278 -
10279 +@@ -0,0 +1,15 @@
10280 ++#ifndef _X86_MMAN_H
10281 ++#define _X86_MMAN_H
10282 ++
10283 ++#include <uapi/asm/mman.h>
10284 ++
10285 +#ifdef __KERNEL__
10286 +#ifndef __ASSEMBLY__
10287 +#ifdef CONFIG_X86_32
10288 +#define arch_mmap_check i386_mmap_check
10289 -+int i386_mmap_check(unsigned long addr, unsigned long len,
10290 -+ unsigned long flags);
10291 ++int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
10292 +#endif
10293 +#endif
10294 +#endif
10295 +
10296 - #endif /* _ASM_X86_MMAN_H */
10297 ++#endif /* X86_MMAN_H */
10298 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10299 index 5f55e69..e20bfb1 100644
10300 --- a/arch/x86/include/asm/mmu.h
10301 @@ -12379,7 +13538,7 @@ index cdbf367..adb37ac 100644
10302
10303 #define activate_mm(prev, next) \
10304 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
10305 -index 9eae775..c914fea 100644
10306 +index e3b7819..b257c64 100644
10307 --- a/arch/x86/include/asm/module.h
10308 +++ b/arch/x86/include/asm/module.h
10309 @@ -5,6 +5,7 @@
10310 @@ -12387,10 +13546,10 @@ index 9eae775..c914fea 100644
10311 #ifdef CONFIG_X86_64
10312 /* X86_64 does not define MODULE_PROC_FAMILY */
10313 +#define MODULE_PROC_FAMILY ""
10314 - #elif defined CONFIG_M386
10315 - #define MODULE_PROC_FAMILY "386 "
10316 #elif defined CONFIG_M486
10317 -@@ -59,8 +60,20 @@
10318 + #define MODULE_PROC_FAMILY "486 "
10319 + #elif defined CONFIG_M586
10320 +@@ -57,8 +58,20 @@
10321 #error unknown processor family
10322 #endif
10323
10324 @@ -12427,10 +13586,10 @@ index 320f7bb..e89f8f8 100644
10325 extern unsigned long __phys_addr(unsigned long);
10326 #define __phys_reloc_hide(x) (x)
10327 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
10328 -index a0facf3..c017b15 100644
10329 +index 5edd174..9cf5821 100644
10330 --- a/arch/x86/include/asm/paravirt.h
10331 +++ b/arch/x86/include/asm/paravirt.h
10332 -@@ -632,6 +632,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
10333 +@@ -630,6 +630,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
10334 val);
10335 }
10336
10337 @@ -12449,7 +13608,7 @@ index a0facf3..c017b15 100644
10338 static inline void pgd_clear(pgd_t *pgdp)
10339 {
10340 set_pgd(pgdp, __pgd(0));
10341 -@@ -713,6 +725,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
10342 +@@ -711,6 +723,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
10343 pv_mmu_ops.set_fixmap(idx, phys, flags);
10344 }
10345
10346 @@ -12471,7 +13630,7 @@ index a0facf3..c017b15 100644
10347 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
10348
10349 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
10350 -@@ -929,7 +956,7 @@ extern void default_banner(void);
10351 +@@ -927,7 +954,7 @@ extern void default_banner(void);
10352
10353 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
10354 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
10355 @@ -12480,7 +13639,7 @@ index a0facf3..c017b15 100644
10356 #endif
10357
10358 #define INTERRUPT_RETURN \
10359 -@@ -1004,6 +1031,21 @@ extern void default_banner(void);
10360 +@@ -1002,6 +1029,21 @@ extern void default_banner(void);
10361 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
10362 CLBR_NONE, \
10363 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
10364 @@ -12662,7 +13821,7 @@ index 4cc9f2b..5fd9226 100644
10365
10366 /*
10367 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
10368 -index 796ed83..9f6c8dd 100644
10369 +index 1c1a955..50f828c 100644
10370 --- a/arch/x86/include/asm/pgtable.h
10371 +++ b/arch/x86/include/asm/pgtable.h
10372 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
10373 @@ -12772,7 +13931,7 @@ index 796ed83..9f6c8dd 100644
10374 #include <linux/mm_types.h>
10375
10376 static inline int pte_none(pte_t pte)
10377 -@@ -570,7 +639,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
10378 +@@ -583,7 +652,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
10379
10380 static inline int pgd_bad(pgd_t pgd)
10381 {
10382 @@ -12781,7 +13940,7 @@ index 796ed83..9f6c8dd 100644
10383 }
10384
10385 static inline int pgd_none(pgd_t pgd)
10386 -@@ -593,7 +662,12 @@ static inline int pgd_none(pgd_t pgd)
10387 +@@ -606,7 +675,12 @@ static inline int pgd_none(pgd_t pgd)
10388 * pgd_offset() returns a (pgd_t *)
10389 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
10390 */
10391 @@ -12795,7 +13954,7 @@ index 796ed83..9f6c8dd 100644
10392 /*
10393 * a shortcut which implies the use of the kernel's pgd, instead
10394 * of a process's
10395 -@@ -604,6 +678,20 @@ static inline int pgd_none(pgd_t pgd)
10396 +@@ -617,6 +691,20 @@ static inline int pgd_none(pgd_t pgd)
10397 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
10398 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
10399
10400 @@ -12816,7 +13975,7 @@ index 796ed83..9f6c8dd 100644
10401 #ifndef __ASSEMBLY__
10402
10403 extern int direct_gbpages;
10404 -@@ -768,11 +856,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
10405 +@@ -781,11 +869,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
10406 * dst and src can be on the same page, but the range must not overlap,
10407 * and must not cross a page boundary.
10408 */
10409 @@ -12993,7 +14152,7 @@ index 766ea16..5b96cb3 100644
10410
10411 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
10412 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
10413 -index ec8a1fc..7ccb593 100644
10414 +index 3c32db8..1ddccf5 100644
10415 --- a/arch/x86/include/asm/pgtable_types.h
10416 +++ b/arch/x86/include/asm/pgtable_types.h
10417 @@ -16,13 +16,12 @@
10418 @@ -13033,7 +14192,7 @@ index ec8a1fc..7ccb593 100644
10419 #endif
10420
10421 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
10422 -@@ -96,6 +96,9 @@
10423 +@@ -116,6 +116,9 @@
10424 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
10425 _PAGE_ACCESSED)
10426
10427 @@ -13043,7 +14202,7 @@ index ec8a1fc..7ccb593 100644
10428 #define __PAGE_KERNEL_EXEC \
10429 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
10430 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
10431 -@@ -106,7 +109,7 @@
10432 +@@ -126,7 +129,7 @@
10433 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
10434 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
10435 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
10436 @@ -13052,7 +14211,7 @@ index ec8a1fc..7ccb593 100644
10437 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
10438 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
10439 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
10440 -@@ -168,8 +171,8 @@
10441 +@@ -188,8 +191,8 @@
10442 * bits are combined, this will alow user to access the high address mapped
10443 * VDSO in the presence of CONFIG_COMPAT_VDSO
10444 */
10445 @@ -13063,7 +14222,7 @@ index ec8a1fc..7ccb593 100644
10446 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
10447 #endif
10448
10449 -@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
10450 +@@ -227,7 +230,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
10451 {
10452 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
10453 }
10454 @@ -13081,7 +14240,7 @@ index ec8a1fc..7ccb593 100644
10455 #if PAGETABLE_LEVELS > 3
10456 typedef struct { pudval_t pud; } pud_t;
10457
10458 -@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
10459 +@@ -241,8 +254,6 @@ static inline pudval_t native_pud_val(pud_t pud)
10460 return pud.pud;
10461 }
10462 #else
10463 @@ -13090,7 +14249,7 @@ index ec8a1fc..7ccb593 100644
10464 static inline pudval_t native_pud_val(pud_t pud)
10465 {
10466 return native_pgd_val(pud.pgd);
10467 -@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
10468 +@@ -262,8 +273,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
10469 return pmd.pmd;
10470 }
10471 #else
10472 @@ -13099,7 +14258,7 @@ index ec8a1fc..7ccb593 100644
10473 static inline pmdval_t native_pmd_val(pmd_t pmd)
10474 {
10475 return native_pgd_val(pmd.pud.pgd);
10476 -@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
10477 +@@ -303,7 +312,6 @@ typedef struct page *pgtable_t;
10478
10479 extern pteval_t __supported_pte_mask;
10480 extern void set_nx(void);
10481 @@ -13108,10 +14267,10 @@ index ec8a1fc..7ccb593 100644
10482 #define pgprot_writecombine pgprot_writecombine
10483 extern pgprot_t pgprot_writecombine(pgprot_t prot);
10484 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
10485 -index ad1fc85..0b15fe1 100644
10486 +index 888184b..a07ac89 100644
10487 --- a/arch/x86/include/asm/processor.h
10488 +++ b/arch/x86/include/asm/processor.h
10489 -@@ -289,7 +289,7 @@ struct tss_struct {
10490 +@@ -287,7 +287,7 @@ struct tss_struct {
10491
10492 } ____cacheline_aligned;
10493
10494 @@ -13120,7 +14279,7 @@ index ad1fc85..0b15fe1 100644
10495
10496 /*
10497 * Save the original ist values for checking stack pointers during debugging
10498 -@@ -818,11 +818,18 @@ static inline void spin_lock_prefetch(const void *x)
10499 +@@ -827,11 +827,18 @@ static inline void spin_lock_prefetch(const void *x)
10500 */
10501 #define TASK_SIZE PAGE_OFFSET
10502 #define TASK_SIZE_MAX TASK_SIZE
10503 @@ -13141,7 +14300,7 @@ index ad1fc85..0b15fe1 100644
10504 .vm86_info = NULL, \
10505 .sysenter_cs = __KERNEL_CS, \
10506 .io_bitmap_ptr = NULL, \
10507 -@@ -836,7 +843,7 @@ static inline void spin_lock_prefetch(const void *x)
10508 +@@ -845,7 +852,7 @@ static inline void spin_lock_prefetch(const void *x)
10509 */
10510 #define INIT_TSS { \
10511 .x86_tss = { \
10512 @@ -13150,7 +14309,7 @@ index ad1fc85..0b15fe1 100644
10513 .ss0 = __KERNEL_DS, \
10514 .ss1 = __KERNEL_CS, \
10515 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
10516 -@@ -847,11 +854,7 @@ static inline void spin_lock_prefetch(const void *x)
10517 +@@ -856,11 +863,7 @@ static inline void spin_lock_prefetch(const void *x)
10518 extern unsigned long thread_saved_pc(struct task_struct *tsk);
10519
10520 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
10521 @@ -13163,7 +14322,7 @@ index ad1fc85..0b15fe1 100644
10522
10523 /*
10524 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
10525 -@@ -866,7 +869,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
10526 +@@ -875,7 +878,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
10527 #define task_pt_regs(task) \
10528 ({ \
10529 struct pt_regs *__regs__; \
10530 @@ -13172,7 +14331,7 @@ index ad1fc85..0b15fe1 100644
10531 __regs__ - 1; \
10532 })
10533
10534 -@@ -876,13 +879,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
10535 +@@ -885,13 +888,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
10536 /*
10537 * User space process size. 47bits minus one guard page.
10538 */
10539 @@ -13188,7 +14347,7 @@ index ad1fc85..0b15fe1 100644
10540
10541 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
10542 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
10543 -@@ -893,11 +896,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
10544 +@@ -902,11 +905,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
10545 #define STACK_TOP_MAX TASK_SIZE_MAX
10546
10547 #define INIT_THREAD { \
10548 @@ -13202,7 +14361,7 @@ index ad1fc85..0b15fe1 100644
10549 }
10550
10551 /*
10552 -@@ -925,6 +928,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
10553 +@@ -934,6 +937,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
10554 */
10555 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
10556
10557 @@ -13213,7 +14372,7 @@ index ad1fc85..0b15fe1 100644
10558 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
10559
10560 /* Get/set a process' ability to use the timestamp counter instruction */
10561 -@@ -985,12 +992,12 @@ extern bool cpu_has_amd_erratum(const int *);
10562 +@@ -994,12 +1001,12 @@ extern bool cpu_has_amd_erratum(const int *);
10563 #define cpu_has_amd_erratum(x) (false)
10564 #endif /* CONFIG_CPU_SUP_AMD */
10565
10566 @@ -13229,10 +14388,10 @@ index ad1fc85..0b15fe1 100644
10567
10568 #endif /* _ASM_X86_PROCESSOR_H */
10569 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
10570 -index 19f16eb..b50624b 100644
10571 +index 942a086..6c26446 100644
10572 --- a/arch/x86/include/asm/ptrace.h
10573 +++ b/arch/x86/include/asm/ptrace.h
10574 -@@ -155,28 +155,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
10575 +@@ -85,28 +85,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
10576 }
10577
10578 /*
10579 @@ -13268,7 +14427,7 @@ index 19f16eb..b50624b 100644
10580 #endif
10581 }
10582
10583 -@@ -192,15 +193,16 @@ static inline int v8086_mode(struct pt_regs *regs)
10584 +@@ -122,15 +123,16 @@ static inline int v8086_mode(struct pt_regs *regs)
10585 #ifdef CONFIG_X86_64
10586 static inline bool user_64bit_mode(struct pt_regs *regs)
10587 {
10588 @@ -13286,7 +14445,22 @@ index 19f16eb..b50624b 100644
10589 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
10590 #endif
10591 }
10592 +
10593 +@@ -181,9 +183,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
10594 + * Traps from the kernel do not save sp and ss.
10595 + * Use the helper function to retrieve sp.
10596 + */
10597 +- if (offset == offsetof(struct pt_regs, sp) &&
10598 +- regs->cs == __KERNEL_CS)
10599 +- return kernel_stack_pointer(regs);
10600 ++ if (offset == offsetof(struct pt_regs, sp)) {
10601 ++ unsigned long cs = regs->cs & 0xffff;
10602 ++ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
10603 ++ return kernel_stack_pointer(regs);
10604 ++ }
10605 #endif
10606 + return *(unsigned long *)((unsigned long)regs + offset);
10607 + }
10608 diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
10609 index fe1ec5b..dc5c3fe 100644
10610 --- a/arch/x86/include/asm/realmode.h
10611 @@ -13532,7 +14706,7 @@ index c48a950..c6d7468 100644
10612
10613 #endif /* !__ASSEMBLY__ */
10614 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
10615 -index 4f19a15..9e14f27 100644
10616 +index b073aae..39f9bdd 100644
10617 --- a/arch/x86/include/asm/smp.h
10618 +++ b/arch/x86/include/asm/smp.h
10619 @@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
10620 @@ -13553,7 +14727,7 @@ index 4f19a15..9e14f27 100644
10621
10622 /* Globals due to paravirt */
10623 extern void set_cpu_sibling_map(int cpu);
10624 -@@ -190,14 +190,8 @@ extern unsigned disabled_cpus __cpuinitdata;
10625 +@@ -191,14 +191,8 @@ extern unsigned disabled_cpus __cpuinitdata;
10626 extern int safe_smp_processor_id(void);
10627
10628 #elif defined(CONFIG_X86_64_SMP)
10629 @@ -13937,7 +15111,7 @@ index 2d946e6..e453ec4 100644
10630 #endif
10631 #endif /* _ASM_X86_THREAD_INFO_H */
10632 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
10633 -index 7ccf8d1..9a18110 100644
10634 +index 1709801..0a60f2f 100644
10635 --- a/arch/x86/include/asm/uaccess.h
10636 +++ b/arch/x86/include/asm/uaccess.h
10637 @@ -7,6 +7,7 @@
10638 @@ -14033,7 +15207,7 @@ index 7ccf8d1..9a18110 100644
10639 "3: " ASM_CLAC "\n" \
10640 _ASM_EXTABLE_EX(1b, 2b) \
10641 _ASM_EXTABLE_EX(2b, 3b) \
10642 -@@ -261,7 +300,7 @@ extern void __put_user_8(void);
10643 +@@ -259,7 +298,7 @@ extern void __put_user_8(void);
10644 __typeof__(*(ptr)) __pu_val; \
10645 __chk_user_ptr(ptr); \
10646 might_fault(); \
10647 @@ -14042,7 +15216,7 @@ index 7ccf8d1..9a18110 100644
10648 switch (sizeof(*(ptr))) { \
10649 case 1: \
10650 __put_user_x(1, __pu_val, ptr, __ret_pu); \
10651 -@@ -383,7 +422,7 @@ do { \
10652 +@@ -358,7 +397,7 @@ do { \
10653
10654 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10655 asm volatile(ASM_STAC "\n" \
10656 @@ -14051,7 +15225,7 @@ index 7ccf8d1..9a18110 100644
10657 "2: " ASM_CLAC "\n" \
10658 ".section .fixup,\"ax\"\n" \
10659 "3: mov %3,%0\n" \
10660 -@@ -391,7 +430,7 @@ do { \
10661 +@@ -366,7 +405,7 @@ do { \
10662 " jmp 2b\n" \
10663 ".previous\n" \
10664 _ASM_EXTABLE(1b, 3b) \
10665 @@ -14060,7 +15234,7 @@ index 7ccf8d1..9a18110 100644
10666 : "m" (__m(addr)), "i" (errret), "0" (err))
10667
10668 #define __get_user_size_ex(x, ptr, size) \
10669 -@@ -416,7 +455,7 @@ do { \
10670 +@@ -391,7 +430,7 @@ do { \
10671 } while (0)
10672
10673 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10674 @@ -14069,7 +15243,7 @@ index 7ccf8d1..9a18110 100644
10675 "2:\n" \
10676 _ASM_EXTABLE_EX(1b, 2b) \
10677 : ltype(x) : "m" (__m(addr)))
10678 -@@ -433,13 +472,24 @@ do { \
10679 +@@ -408,13 +447,24 @@ do { \
10680 int __gu_err; \
10681 unsigned long __gu_val; \
10682 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10683 @@ -14096,7 +15270,7 @@ index 7ccf8d1..9a18110 100644
10684
10685 /*
10686 * Tell gcc we read from memory instead of writing: this is because
10687 -@@ -448,7 +498,7 @@ struct __large_struct { unsigned long buf[100]; };
10688 +@@ -423,7 +473,7 @@ struct __large_struct { unsigned long buf[100]; };
10689 */
10690 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10691 asm volatile(ASM_STAC "\n" \
10692 @@ -14105,7 +15279,7 @@ index 7ccf8d1..9a18110 100644
10693 "2: " ASM_CLAC "\n" \
10694 ".section .fixup,\"ax\"\n" \
10695 "3: mov %3,%0\n" \
10696 -@@ -456,10 +506,10 @@ struct __large_struct { unsigned long buf[100]; };
10697 +@@ -431,10 +481,10 @@ struct __large_struct { unsigned long buf[100]; };
10698 ".previous\n" \
10699 _ASM_EXTABLE(1b, 3b) \
10700 : "=r"(err) \
10701 @@ -14118,7 +15292,7 @@ index 7ccf8d1..9a18110 100644
10702 "2:\n" \
10703 _ASM_EXTABLE_EX(1b, 2b) \
10704 : : ltype(x), "m" (__m(addr)))
10705 -@@ -498,8 +548,12 @@ struct __large_struct { unsigned long buf[100]; };
10706 +@@ -473,8 +523,12 @@ struct __large_struct { unsigned long buf[100]; };
10707 * On error, the variable @x is set to zero.
10708 */
10709
10710 @@ -14131,7 +15305,7 @@ index 7ccf8d1..9a18110 100644
10711
10712 /**
10713 * __put_user: - Write a simple value into user space, with less checking.
10714 -@@ -521,8 +575,12 @@ struct __large_struct { unsigned long buf[100]; };
10715 +@@ -496,8 +550,12 @@ struct __large_struct { unsigned long buf[100]; };
10716 * Returns zero on success, or -EFAULT on error.
10717 */
10718
10719 @@ -14144,7 +15318,7 @@ index 7ccf8d1..9a18110 100644
10720
10721 #define __get_user_unaligned __get_user
10722 #define __put_user_unaligned __put_user
10723 -@@ -540,7 +598,7 @@ struct __large_struct { unsigned long buf[100]; };
10724 +@@ -515,7 +573,7 @@ struct __large_struct { unsigned long buf[100]; };
10725 #define get_user_ex(x, ptr) do { \
10726 unsigned long __gue_val; \
10727 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10728 @@ -14152,8 +15326,8 @@ index 7ccf8d1..9a18110 100644
10729 + (x) = (__typeof__(*(ptr)))__gue_val; \
10730 } while (0)
10731
10732 - #ifdef CONFIG_X86_WP_WORKS_OK
10733 -@@ -574,8 +632,8 @@ strncpy_from_user(char *dst, const char __user *src, long count);
10734 + #define put_user_try uaccess_try
10735 +@@ -532,8 +590,8 @@ strncpy_from_user(char *dst, const char __user *src, long count);
10736 extern __must_check long strlen_user(const char __user *str);
10737 extern __must_check long strnlen_user(const char __user *str, long n);
10738
10739 @@ -14339,7 +15513,7 @@ index 7f760a9..04b1c65 100644
10740 }
10741
10742 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
10743 -index 142810c..4b68a3e 100644
10744 +index 142810c..747941a 100644
10745 --- a/arch/x86/include/asm/uaccess_64.h
10746 +++ b/arch/x86/include/asm/uaccess_64.h
10747 @@ -10,6 +10,9 @@
10748 @@ -14370,7 +15544,7 @@ index 142810c..4b68a3e 100644
10749 copy_user_generic(void *to, const void *from, unsigned len)
10750 {
10751 unsigned ret;
10752 -@@ -41,142 +44,203 @@ copy_user_generic(void *to, const void *from, unsigned len)
10753 +@@ -41,142 +44,204 @@ copy_user_generic(void *to, const void *from, unsigned len)
10754 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
10755 "=d" (len)),
10756 "1" (to), "2" (from), "3" (len)
10757 @@ -14422,6 +15596,7 @@ index 142810c..4b68a3e 100644
10758 -#endif
10759 +
10760 + check_object_size(to, n, false);
10761 ++
10762 + if (access_ok(VERIFY_READ, from, n))
10763 + n = __copy_from_user(to, from, n);
10764 + else if (n < INT_MAX)
10765 @@ -14619,7 +15794,7 @@ index 142810c..4b68a3e 100644
10766 ret, "b", "b", "=q", 1);
10767 if (likely(!ret))
10768 __put_user_asm(tmp, (u8 __user *)dst,
10769 -@@ -185,7 +249,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10770 +@@ -185,7 +250,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10771 }
10772 case 2: {
10773 u16 tmp;
10774 @@ -14628,7 +15803,7 @@ index 142810c..4b68a3e 100644
10775 ret, "w", "w", "=r", 2);
10776 if (likely(!ret))
10777 __put_user_asm(tmp, (u16 __user *)dst,
10778 -@@ -195,7 +259,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10779 +@@ -195,7 +260,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10780
10781 case 4: {
10782 u32 tmp;
10783 @@ -14637,7 +15812,7 @@ index 142810c..4b68a3e 100644
10784 ret, "l", "k", "=r", 4);
10785 if (likely(!ret))
10786 __put_user_asm(tmp, (u32 __user *)dst,
10787 -@@ -204,7 +268,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10788 +@@ -204,7 +269,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10789 }
10790 case 8: {
10791 u64 tmp;
10792 @@ -14646,7 +15821,7 @@ index 142810c..4b68a3e 100644
10793 ret, "q", "", "=r", 8);
10794 if (likely(!ret))
10795 __put_user_asm(tmp, (u64 __user *)dst,
10796 -@@ -212,41 +276,72 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10797 +@@ -212,41 +277,72 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10798 return ret;
10799 }
10800 default:
10801 @@ -14828,11 +16003,24 @@ index 0415cda..b43d877 100644
10802 "2: " ASM_CLAC "\n"
10803 ".section .fixup,\"ax\"\n"
10804 "3: movl $-1,%[err]\n"
10805 +diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
10806 +index bbae024..e1528f9 100644
10807 +--- a/arch/x86/include/uapi/asm/e820.h
10808 ++++ b/arch/x86/include/uapi/asm/e820.h
10809 +@@ -63,7 +63,7 @@ struct e820map {
10810 + #define ISA_START_ADDRESS 0xa0000
10811 + #define ISA_END_ADDRESS 0x100000
10812 +
10813 +-#define BIOS_BEGIN 0x000a0000
10814 ++#define BIOS_BEGIN 0x000c0000
10815 + #define BIOS_END 0x00100000
10816 +
10817 + #define BIOS_ROM_BASE 0xffe00000
10818 diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
10819 -index 91ce48f..a48ea05 100644
10820 +index 34e923a..0c6bb6e 100644
10821 --- a/arch/x86/kernel/Makefile
10822 +++ b/arch/x86/kernel/Makefile
10823 -@@ -23,7 +23,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
10824 +@@ -22,7 +22,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
10825 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
10826 obj-$(CONFIG_IRQ_WORK) += irq_work.o
10827 obj-y += probe_roms.o
10828 @@ -14842,7 +16030,7 @@ index 91ce48f..a48ea05 100644
10829 obj-y += syscall_$(BITS).o
10830 obj-$(CONFIG_X86_64) += vsyscall_64.o
10831 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
10832 -index 11676cf..a8cf3ec 100644
10833 +index d5e0d71..6533e08 100644
10834 --- a/arch/x86/kernel/acpi/sleep.c
10835 +++ b/arch/x86/kernel/acpi/sleep.c
10836 @@ -74,8 +74,12 @@ int acpi_suspend_lowlevel(void)
10837 @@ -15016,10 +16204,19 @@ index ef5ccca..bd83949 100644
10838 }
10839
10840 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
10841 -index b17416e..5ed0f3e 100644
10842 +index b994cc8..812b537 100644
10843 --- a/arch/x86/kernel/apic/apic.c
10844 +++ b/arch/x86/kernel/apic/apic.c
10845 -@@ -185,7 +185,7 @@ int first_system_vector = 0xfe;
10846 +@@ -131,7 +131,7 @@ static int __init parse_lapic(char *arg)
10847 + {
10848 + if (config_enabled(CONFIG_X86_32) && !arg)
10849 + force_enable_local_apic = 1;
10850 +- else if (!strncmp(arg, "notscdeadline", 13))
10851 ++ else if (arg && !strncmp(arg, "notscdeadline", 13))
10852 + setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
10853 + return 0;
10854 + }
10855 +@@ -189,7 +189,7 @@ int first_system_vector = 0xfe;
10856 /*
10857 * Debug level, exported for io_apic.c
10858 */
10859 @@ -15028,7 +16225,7 @@ index b17416e..5ed0f3e 100644
10860
10861 int pic_mode;
10862
10863 -@@ -1923,7 +1923,7 @@ void smp_error_interrupt(struct pt_regs *regs)
10864 +@@ -1956,7 +1956,7 @@ void smp_error_interrupt(struct pt_regs *regs)
10865 apic_write(APIC_ESR, 0);
10866 v1 = apic_read(APIC_ESR);
10867 ack_APIC_irq();
10868 @@ -15096,7 +16293,7 @@ index 0874799..24a836e 100644
10869 .name = "es7000",
10870 .probe = probe_es7000,
10871 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
10872 -index 1817fa9..7bff097 100644
10873 +index b739d39..6e4f1db 100644
10874 --- a/arch/x86/kernel/apic/io_apic.c
10875 +++ b/arch/x86/kernel/apic/io_apic.c
10876 @@ -1084,7 +1084,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
10877 @@ -15117,7 +16314,7 @@ index 1817fa9..7bff097 100644
10878 {
10879 raw_spin_unlock(&vector_lock);
10880 }
10881 -@@ -2411,7 +2411,7 @@ static void ack_apic_edge(struct irq_data *data)
10882 +@@ -2399,7 +2399,7 @@ static void ack_apic_edge(struct irq_data *data)
10883 ack_APIC_irq();
10884 }
10885
10886 @@ -15126,7 +16323,7 @@ index 1817fa9..7bff097 100644
10887
10888 #ifdef CONFIG_GENERIC_PENDING_IRQ
10889 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
10890 -@@ -2552,7 +2552,7 @@ static void ack_apic_level(struct irq_data *data)
10891 +@@ -2540,7 +2540,7 @@ static void ack_apic_level(struct irq_data *data)
10892 * at the cpu.
10893 */
10894 if (!(v & (1 << (i & 0x1f)))) {
10895 @@ -15176,9 +16373,18 @@ index 77c95c0..434f8a4 100644
10896 .name = "summit",
10897 .probe = probe_summit,
10898 diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
10899 -index c88baa4..a89def0 100644
10900 +index c88baa4..757aee1 100644
10901 --- a/arch/x86/kernel/apic/x2apic_cluster.c
10902 +++ b/arch/x86/kernel/apic/x2apic_cluster.c
10903 +@@ -183,7 +183,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
10904 + return notifier_from_errno(err);
10905 + }
10906 +
10907 +-static struct notifier_block __refdata x2apic_cpu_notifier = {
10908 ++static struct notifier_block x2apic_cpu_notifier = {
10909 + .notifier_call = update_clusterinfo,
10910 + };
10911 +
10912 @@ -235,7 +235,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
10913 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
10914 }
10915 @@ -15357,10 +16563,10 @@ index a0e067d..9c7db16 100644
10916 obj-y += proc.o capflags.o powerflags.o common.o
10917 obj-y += vmware.o hypervisor.o mshyperv.o
10918 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
10919 -index 1b7d165..b9e2627 100644
10920 +index 15239ff..e23e04e 100644
10921 --- a/arch/x86/kernel/cpu/amd.c
10922 +++ b/arch/x86/kernel/cpu/amd.c
10923 -@@ -738,7 +738,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
10924 +@@ -733,7 +733,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
10925 unsigned int size)
10926 {
10927 /* AMD errata T13 (order #21922) */
10928 @@ -15370,7 +16576,7 @@ index 1b7d165..b9e2627 100644
10929 if (c->x86_model == 3 && c->x86_mask == 0)
10930 size = 64;
10931 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
10932 -index 7505f7b..d59dac0 100644
10933 +index 9c3ab43..51e6366 100644
10934 --- a/arch/x86/kernel/cpu/common.c
10935 +++ b/arch/x86/kernel/cpu/common.c
10936 @@ -86,60 +86,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
10937 @@ -15478,16 +16684,7 @@ index 7505f7b..d59dac0 100644
10938 EXPORT_PER_CPU_SYMBOL(kernel_stack);
10939
10940 DEFINE_PER_CPU(char *, irq_stack_ptr) =
10941 -@@ -1178,7 +1130,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
10942 - {
10943 - memset(regs, 0, sizeof(struct pt_regs));
10944 - regs->fs = __KERNEL_PERCPU;
10945 -- regs->gs = __KERNEL_STACK_CANARY;
10946 -+ savesegment(gs, regs->gs);
10947 -
10948 - return regs;
10949 - }
10950 -@@ -1233,7 +1185,7 @@ void __cpuinit cpu_init(void)
10951 +@@ -1224,7 +1176,7 @@ void __cpuinit cpu_init(void)
10952 int i;
10953
10954 cpu = stack_smp_processor_id();
10955 @@ -15496,7 +16693,7 @@ index 7505f7b..d59dac0 100644
10956 oist = &per_cpu(orig_ist, cpu);
10957
10958 #ifdef CONFIG_NUMA
10959 -@@ -1259,7 +1211,7 @@ void __cpuinit cpu_init(void)
10960 +@@ -1250,7 +1202,7 @@ void __cpuinit cpu_init(void)
10961 switch_to_new_gdt(cpu);
10962 loadsegment(fs, 0);
10963
10964 @@ -15505,15 +16702,15 @@ index 7505f7b..d59dac0 100644
10965
10966 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10967 syscall_init();
10968 -@@ -1268,7 +1220,6 @@ void __cpuinit cpu_init(void)
10969 +@@ -1259,7 +1211,6 @@ void __cpuinit cpu_init(void)
10970 wrmsrl(MSR_KERNEL_GS_BASE, 0);
10971 barrier();
10972
10973 - x86_configure_nx();
10974 - if (cpu != 0)
10975 - enable_x2apic();
10976 + enable_x2apic();
10977
10978 -@@ -1321,7 +1272,7 @@ void __cpuinit cpu_init(void)
10979 + /*
10980 +@@ -1311,7 +1262,7 @@ void __cpuinit cpu_init(void)
10981 {
10982 int cpu = smp_processor_id();
10983 struct task_struct *curr = current;
10984 @@ -15523,7 +16720,7 @@ index 7505f7b..d59dac0 100644
10985
10986 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
10987 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
10988 -index 198e019..867575e 100644
10989 +index fcaabd0..7b55a26 100644
10990 --- a/arch/x86/kernel/cpu/intel.c
10991 +++ b/arch/x86/kernel/cpu/intel.c
10992 @@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
10993 @@ -15536,10 +16733,10 @@ index 198e019..867575e 100644
10994 }
10995 #endif
10996 diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
10997 -index 93c5451..3887433 100644
10998 +index 84c1309..39b7224 100644
10999 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c
11000 +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
11001 -@@ -983,6 +983,22 @@ static struct attribute *default_attrs[] = {
11002 +@@ -1017,6 +1017,22 @@ static struct attribute *default_attrs[] = {
11003 };
11004
11005 #ifdef CONFIG_AMD_NB
11006 @@ -15562,7 +16759,7 @@ index 93c5451..3887433 100644
11007 static struct attribute ** __cpuinit amd_l3_attrs(void)
11008 {
11009 static struct attribute **attrs;
11010 -@@ -993,18 +1009,7 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
11011 +@@ -1027,18 +1043,7 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
11012
11013 n = ARRAY_SIZE(default_attrs);
11014
11015 @@ -15582,7 +16779,7 @@ index 93c5451..3887433 100644
11016
11017 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
11018 attrs[n++] = &cache_disable_0.attr;
11019 -@@ -1055,6 +1060,13 @@ static struct kobj_type ktype_cache = {
11020 +@@ -1089,6 +1094,13 @@ static struct kobj_type ktype_cache = {
11021 .default_attrs = default_attrs,
11022 };
11023
11024 @@ -15596,7 +16793,7 @@ index 93c5451..3887433 100644
11025 static struct kobj_type ktype_percpu_entry = {
11026 .sysfs_ops = &sysfs_ops,
11027 };
11028 -@@ -1120,20 +1132,26 @@ static int __cpuinit cache_add_dev(struct device *dev)
11029 +@@ -1154,20 +1166,26 @@ static int __cpuinit cache_add_dev(struct device *dev)
11030 return retval;
11031 }
11032
11033 @@ -15626,8 +16823,17 @@ index 93c5451..3887433 100644
11034 per_cpu(ici_cache_kobject, cpu),
11035 "index%1lu", i);
11036 if (unlikely(retval)) {
11037 +@@ -1222,7 +1240,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
11038 + return NOTIFY_OK;
11039 + }
11040 +
11041 +-static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
11042 ++static struct notifier_block cacheinfo_cpu_notifier = {
11043 + .notifier_call = cacheinfo_cpu_callback,
11044 + };
11045 +
11046 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
11047 -index 46cbf86..55c7292 100644
11048 +index 80dbda8..b45ebad 100644
11049 --- a/arch/x86/kernel/cpu/mcheck/mce.c
11050 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
11051 @@ -45,6 +45,7 @@
11052 @@ -15638,7 +16844,7 @@ index 46cbf86..55c7292 100644
11053
11054 #include "mce-internal.h"
11055
11056 -@@ -254,7 +255,7 @@ static void print_mce(struct mce *m)
11057 +@@ -246,7 +247,7 @@ static void print_mce(struct mce *m)
11058 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
11059 m->cs, m->ip);
11060
11061 @@ -15647,7 +16853,7 @@ index 46cbf86..55c7292 100644
11062 print_symbol("{%s}", m->ip);
11063 pr_cont("\n");
11064 }
11065 -@@ -287,10 +288,10 @@ static void print_mce(struct mce *m)
11066 +@@ -279,10 +280,10 @@ static void print_mce(struct mce *m)
11067
11068 #define PANIC_TIMEOUT 5 /* 5 seconds */
11069
11070 @@ -15660,7 +16866,7 @@ index 46cbf86..55c7292 100644
11071
11072 /* Panic in progress. Enable interrupts and wait for final IPI */
11073 static void wait_for_panic(void)
11074 -@@ -314,7 +315,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
11075 +@@ -306,7 +307,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
11076 /*
11077 * Make sure only one CPU runs in machine check panic
11078 */
11079 @@ -15669,7 +16875,7 @@ index 46cbf86..55c7292 100644
11080 wait_for_panic();
11081 barrier();
11082
11083 -@@ -322,7 +323,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
11084 +@@ -314,7 +315,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
11085 console_verbose();
11086 } else {
11087 /* Don't log too much for fake panic */
11088 @@ -15678,16 +16884,16 @@ index 46cbf86..55c7292 100644
11089 return;
11090 }
11091 /* First print corrected ones that are still unlogged */
11092 -@@ -694,7 +695,7 @@ static int mce_timed_out(u64 *t)
11093 +@@ -686,7 +687,7 @@ static int mce_timed_out(u64 *t)
11094 * might have been modified by someone else.
11095 */
11096 rmb();
11097 - if (atomic_read(&mce_paniced))
11098 + if (atomic_read_unchecked(&mce_paniced))
11099 wait_for_panic();
11100 - if (!monarch_timeout)
11101 + if (!mca_cfg.monarch_timeout)
11102 goto out;
11103 -@@ -1659,7 +1660,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
11104 +@@ -1662,7 +1663,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
11105 }
11106
11107 /* Call the installed machine check handler for this CPU setup. */
11108 @@ -15696,7 +16902,7 @@ index 46cbf86..55c7292 100644
11109 unexpected_machine_check;
11110
11111 /*
11112 -@@ -1682,7 +1683,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
11113 +@@ -1685,7 +1686,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
11114 return;
11115 }
11116
11117 @@ -15706,7 +16912,7 @@ index 46cbf86..55c7292 100644
11118
11119 __mcheck_cpu_init_generic();
11120 __mcheck_cpu_init_vendor(c);
11121 -@@ -1696,7 +1699,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
11122 +@@ -1699,7 +1702,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
11123 */
11124
11125 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
11126 @@ -15715,7 +16921,7 @@ index 46cbf86..55c7292 100644
11127 static int mce_chrdev_open_exclu; /* already open exclusive? */
11128
11129 static int mce_chrdev_open(struct inode *inode, struct file *file)
11130 -@@ -1704,7 +1707,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
11131 +@@ -1707,7 +1710,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
11132 spin_lock(&mce_chrdev_state_lock);
11133
11134 if (mce_chrdev_open_exclu ||
11135 @@ -15724,7 +16930,7 @@ index 46cbf86..55c7292 100644
11136 spin_unlock(&mce_chrdev_state_lock);
11137
11138 return -EBUSY;
11139 -@@ -1712,7 +1715,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
11140 +@@ -1715,7 +1718,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
11141
11142 if (file->f_flags & O_EXCL)
11143 mce_chrdev_open_exclu = 1;
11144 @@ -15733,7 +16939,7 @@ index 46cbf86..55c7292 100644
11145
11146 spin_unlock(&mce_chrdev_state_lock);
11147
11148 -@@ -1723,7 +1726,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
11149 +@@ -1726,7 +1729,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
11150 {
11151 spin_lock(&mce_chrdev_state_lock);
11152
11153 @@ -15742,16 +16948,16 @@ index 46cbf86..55c7292 100644
11154 mce_chrdev_open_exclu = 0;
11155
11156 spin_unlock(&mce_chrdev_state_lock);
11157 -@@ -2367,7 +2370,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
11158 +@@ -2372,7 +2375,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
11159 return NOTIFY_OK;
11160 }
11161
11162 -static struct notifier_block mce_cpu_notifier __cpuinitdata = {
11163 -+static struct notifier_block mce_cpu_notifier __cpuinitconst = {
11164 ++static struct notifier_block mce_cpu_notifier = {
11165 .notifier_call = mce_cpu_callback,
11166 };
11167
11168 -@@ -2445,7 +2448,7 @@ struct dentry *mce_get_debugfs_dir(void)
11169 +@@ -2450,7 +2453,7 @@ struct dentry *mce_get_debugfs_dir(void)
11170 static void mce_reset(void)
11171 {
11172 cpu_missing = 0;
11173 @@ -15782,6 +16988,19 @@ index 2d5454c..51987eb 100644
11174 /* Make sure the vector pointer is visible before we enable MCEs: */
11175 wmb();
11176
11177 +diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
11178 +index 47a1870..8c019a7 100644
11179 +--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
11180 ++++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
11181 +@@ -288,7 +288,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
11182 + return notifier_from_errno(err);
11183 + }
11184 +
11185 +-static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
11186 ++static struct notifier_block thermal_throttle_cpu_notifier =
11187 + {
11188 + .notifier_call = thermal_throttle_cpu_callback,
11189 + };
11190 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
11191 index 2d7998f..17c9de1 100644
11192 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
11193 @@ -15805,7 +17024,7 @@ index 2d7998f..17c9de1 100644
11194 wmb();
11195
11196 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
11197 -index 6b96110..0da73eb 100644
11198 +index 726bf96..81f0526 100644
11199 --- a/arch/x86/kernel/cpu/mtrr/main.c
11200 +++ b/arch/x86/kernel/cpu/mtrr/main.c
11201 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
11202 @@ -15831,10 +17050,10 @@ index df5e41f..816c719 100644
11203 extern int generic_get_free_region(unsigned long base, unsigned long size,
11204 int replace_reg);
11205 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
11206 -index d18b2b8..d3b834c 100644
11207 +index 6774c17..a691911 100644
11208 --- a/arch/x86/kernel/cpu/perf_event.c
11209 +++ b/arch/x86/kernel/cpu/perf_event.c
11210 -@@ -1759,7 +1759,7 @@ static unsigned long get_segment_base(unsigned int segment)
11211 +@@ -1880,7 +1880,7 @@ static unsigned long get_segment_base(unsigned int segment)
11212 if (idx > GDT_ENTRIES)
11213 return 0;
11214
11215 @@ -15843,7 +17062,7 @@ index d18b2b8..d3b834c 100644
11216 }
11217
11218 return get_desc_base(desc + idx);
11219 -@@ -1849,7 +1849,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
11220 +@@ -1970,7 +1970,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
11221 break;
11222
11223 perf_callchain_store(entry, frame.return_address);
11224 @@ -15853,10 +17072,10 @@ index d18b2b8..d3b834c 100644
11225 }
11226
11227 diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
11228 -index 324bb52..1a93d85 100644
11229 +index 4914e94..60b06e3 100644
11230 --- a/arch/x86/kernel/cpu/perf_event_intel.c
11231 +++ b/arch/x86/kernel/cpu/perf_event_intel.c
11232 -@@ -1949,10 +1949,10 @@ __init int intel_pmu_init(void)
11233 +@@ -1958,10 +1958,10 @@ __init int intel_pmu_init(void)
11234 * v2 and above have a perf capabilities MSR
11235 */
11236 if (version > 1) {
11237 @@ -15870,11 +17089,37 @@ index 324bb52..1a93d85 100644
11238 }
11239
11240 intel_ds_init();
11241 +diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
11242 +index b43200d..62cddfe 100644
11243 +--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
11244 ++++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
11245 +@@ -2826,7 +2826,7 @@ static int
11246 + return NOTIFY_OK;
11247 + }
11248 +
11249 +-static struct notifier_block uncore_cpu_nb __cpuinitdata = {
11250 ++static struct notifier_block uncore_cpu_nb = {
11251 + .notifier_call = uncore_cpu_notifier,
11252 + /*
11253 + * to migrate uncore events, our notifier should be executed
11254 +diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
11255 +index 60c7891..9e911d3 100644
11256 +--- a/arch/x86/kernel/cpuid.c
11257 ++++ b/arch/x86/kernel/cpuid.c
11258 +@@ -171,7 +171,7 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
11259 + return notifier_from_errno(err);
11260 + }
11261 +
11262 +-static struct notifier_block __refdata cpuid_class_cpu_notifier =
11263 ++static struct notifier_block cpuid_class_cpu_notifier =
11264 + {
11265 + .notifier_call = cpuid_class_cpu_callback,
11266 + };
11267 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
11268 -index 13ad899..f642b9a 100644
11269 +index 74467fe..18793d5 100644
11270 --- a/arch/x86/kernel/crash.c
11271 +++ b/arch/x86/kernel/crash.c
11272 -@@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
11273 +@@ -58,10 +58,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
11274 {
11275 #ifdef CONFIG_X86_32
11276 struct pt_regs fixed_regs;
11277 @@ -16305,7 +17550,7 @@ index 9b9f18b..9fcaa04 100644
11278 #include <asm/processor.h>
11279 #include <asm/fcntl.h>
11280 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
11281 -index cf8639b..6c6a674 100644
11282 +index 6ed91d9..6cc365b 100644
11283 --- a/arch/x86/kernel/entry_32.S
11284 +++ b/arch/x86/kernel/entry_32.S
11285 @@ -177,13 +177,153 @@
11286 @@ -16779,7 +18024,7 @@ index cf8639b..6c6a674 100644
11287 CFI_ENDPROC
11288 /*
11289 * End of kprobes section
11290 -@@ -772,8 +1004,15 @@ ENDPROC(ptregs_clone)
11291 +@@ -753,8 +985,15 @@ PTREGSCALL1(vm86old)
11292 * normal stack and adjusts ESP with the matching offset.
11293 */
11294 /* fixup the stack */
11295 @@ -16797,7 +18042,7 @@ index cf8639b..6c6a674 100644
11296 shl $16, %eax
11297 addl %esp, %eax /* the adjusted stack pointer */
11298 pushl_cfi $__KERNEL_DS
11299 -@@ -826,7 +1065,7 @@ vector=vector+1
11300 +@@ -807,7 +1046,7 @@ vector=vector+1
11301 .endr
11302 2: jmp common_interrupt
11303 .endr
11304 @@ -16806,7 +18051,7 @@ index cf8639b..6c6a674 100644
11305
11306 .previous
11307 END(interrupt)
11308 -@@ -877,7 +1116,7 @@ ENTRY(coprocessor_error)
11309 +@@ -858,7 +1097,7 @@ ENTRY(coprocessor_error)
11310 pushl_cfi $do_coprocessor_error
11311 jmp error_code
11312 CFI_ENDPROC
11313 @@ -16815,7 +18060,7 @@ index cf8639b..6c6a674 100644
11314
11315 ENTRY(simd_coprocessor_error)
11316 RING0_INT_FRAME
11317 -@@ -899,7 +1138,7 @@ ENTRY(simd_coprocessor_error)
11318 +@@ -880,7 +1119,7 @@ ENTRY(simd_coprocessor_error)
11319 #endif
11320 jmp error_code
11321 CFI_ENDPROC
11322 @@ -16824,7 +18069,7 @@ index cf8639b..6c6a674 100644
11323
11324 ENTRY(device_not_available)
11325 RING0_INT_FRAME
11326 -@@ -908,18 +1147,18 @@ ENTRY(device_not_available)
11327 +@@ -889,18 +1128,18 @@ ENTRY(device_not_available)
11328 pushl_cfi $do_device_not_available
11329 jmp error_code
11330 CFI_ENDPROC
11331 @@ -16846,7 +18091,7 @@ index cf8639b..6c6a674 100644
11332 #endif
11333
11334 ENTRY(overflow)
11335 -@@ -929,7 +1168,7 @@ ENTRY(overflow)
11336 +@@ -910,7 +1149,7 @@ ENTRY(overflow)
11337 pushl_cfi $do_overflow
11338 jmp error_code
11339 CFI_ENDPROC
11340 @@ -16855,7 +18100,7 @@ index cf8639b..6c6a674 100644
11341
11342 ENTRY(bounds)
11343 RING0_INT_FRAME
11344 -@@ -938,7 +1177,7 @@ ENTRY(bounds)
11345 +@@ -919,7 +1158,7 @@ ENTRY(bounds)
11346 pushl_cfi $do_bounds
11347 jmp error_code
11348 CFI_ENDPROC
11349 @@ -16864,7 +18109,7 @@ index cf8639b..6c6a674 100644
11350
11351 ENTRY(invalid_op)
11352 RING0_INT_FRAME
11353 -@@ -947,7 +1186,7 @@ ENTRY(invalid_op)
11354 +@@ -928,7 +1167,7 @@ ENTRY(invalid_op)
11355 pushl_cfi $do_invalid_op
11356 jmp error_code
11357 CFI_ENDPROC
11358 @@ -16873,7 +18118,7 @@ index cf8639b..6c6a674 100644
11359
11360 ENTRY(coprocessor_segment_overrun)
11361 RING0_INT_FRAME
11362 -@@ -956,7 +1195,7 @@ ENTRY(coprocessor_segment_overrun)
11363 +@@ -937,7 +1176,7 @@ ENTRY(coprocessor_segment_overrun)
11364 pushl_cfi $do_coprocessor_segment_overrun
11365 jmp error_code
11366 CFI_ENDPROC
11367 @@ -16882,7 +18127,7 @@ index cf8639b..6c6a674 100644
11368
11369 ENTRY(invalid_TSS)
11370 RING0_EC_FRAME
11371 -@@ -964,7 +1203,7 @@ ENTRY(invalid_TSS)
11372 +@@ -945,7 +1184,7 @@ ENTRY(invalid_TSS)
11373 pushl_cfi $do_invalid_TSS
11374 jmp error_code
11375 CFI_ENDPROC
11376 @@ -16891,7 +18136,7 @@ index cf8639b..6c6a674 100644
11377
11378 ENTRY(segment_not_present)
11379 RING0_EC_FRAME
11380 -@@ -972,7 +1211,7 @@ ENTRY(segment_not_present)
11381 +@@ -953,7 +1192,7 @@ ENTRY(segment_not_present)
11382 pushl_cfi $do_segment_not_present
11383 jmp error_code
11384 CFI_ENDPROC
11385 @@ -16900,7 +18145,7 @@ index cf8639b..6c6a674 100644
11386
11387 ENTRY(stack_segment)
11388 RING0_EC_FRAME
11389 -@@ -980,7 +1219,7 @@ ENTRY(stack_segment)
11390 +@@ -961,7 +1200,7 @@ ENTRY(stack_segment)
11391 pushl_cfi $do_stack_segment
11392 jmp error_code
11393 CFI_ENDPROC
11394 @@ -16909,7 +18154,7 @@ index cf8639b..6c6a674 100644
11395
11396 ENTRY(alignment_check)
11397 RING0_EC_FRAME
11398 -@@ -988,7 +1227,7 @@ ENTRY(alignment_check)
11399 +@@ -969,7 +1208,7 @@ ENTRY(alignment_check)
11400 pushl_cfi $do_alignment_check
11401 jmp error_code
11402 CFI_ENDPROC
11403 @@ -16918,7 +18163,7 @@ index cf8639b..6c6a674 100644
11404
11405 ENTRY(divide_error)
11406 RING0_INT_FRAME
11407 -@@ -997,7 +1236,7 @@ ENTRY(divide_error)
11408 +@@ -978,7 +1217,7 @@ ENTRY(divide_error)
11409 pushl_cfi $do_divide_error
11410 jmp error_code
11411 CFI_ENDPROC
11412 @@ -16927,7 +18172,7 @@ index cf8639b..6c6a674 100644
11413
11414 #ifdef CONFIG_X86_MCE
11415 ENTRY(machine_check)
11416 -@@ -1007,7 +1246,7 @@ ENTRY(machine_check)
11417 +@@ -988,7 +1227,7 @@ ENTRY(machine_check)
11418 pushl_cfi machine_check_vector
11419 jmp error_code
11420 CFI_ENDPROC
11421 @@ -16936,7 +18181,7 @@ index cf8639b..6c6a674 100644
11422 #endif
11423
11424 ENTRY(spurious_interrupt_bug)
11425 -@@ -1017,7 +1256,7 @@ ENTRY(spurious_interrupt_bug)
11426 +@@ -998,7 +1237,7 @@ ENTRY(spurious_interrupt_bug)
11427 pushl_cfi $do_spurious_interrupt_bug
11428 jmp error_code
11429 CFI_ENDPROC
11430 @@ -16945,7 +18190,7 @@ index cf8639b..6c6a674 100644
11431 /*
11432 * End of kprobes section
11433 */
11434 -@@ -1120,7 +1359,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
11435 +@@ -1101,7 +1340,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
11436
11437 ENTRY(mcount)
11438 ret
11439 @@ -16954,7 +18199,7 @@ index cf8639b..6c6a674 100644
11440
11441 ENTRY(ftrace_caller)
11442 cmpl $0, function_trace_stop
11443 -@@ -1153,7 +1392,7 @@ ftrace_graph_call:
11444 +@@ -1134,7 +1373,7 @@ ftrace_graph_call:
11445 .globl ftrace_stub
11446 ftrace_stub:
11447 ret
11448 @@ -16963,7 +18208,7 @@ index cf8639b..6c6a674 100644
11449
11450 ENTRY(ftrace_regs_caller)
11451 pushf /* push flags before compare (in cs location) */
11452 -@@ -1254,7 +1493,7 @@ trace:
11453 +@@ -1235,7 +1474,7 @@ trace:
11454 popl %ecx
11455 popl %eax
11456 jmp ftrace_stub
11457 @@ -16972,7 +18217,7 @@ index cf8639b..6c6a674 100644
11458 #endif /* CONFIG_DYNAMIC_FTRACE */
11459 #endif /* CONFIG_FUNCTION_TRACER */
11460
11461 -@@ -1272,7 +1511,7 @@ ENTRY(ftrace_graph_caller)
11462 +@@ -1253,7 +1492,7 @@ ENTRY(ftrace_graph_caller)
11463 popl %ecx
11464 popl %eax
11465 ret
11466 @@ -16981,7 +18226,7 @@ index cf8639b..6c6a674 100644
11467
11468 .globl return_to_handler
11469 return_to_handler:
11470 -@@ -1328,15 +1567,18 @@ error_code:
11471 +@@ -1309,15 +1548,18 @@ error_code:
11472 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
11473 REG_TO_PTGS %ecx
11474 SET_KERNEL_GS %ecx
11475 @@ -17002,7 +18247,7 @@ index cf8639b..6c6a674 100644
11476
11477 /*
11478 * Debug traps and NMI can happen at the one SYSENTER instruction
11479 -@@ -1379,7 +1621,7 @@ debug_stack_correct:
11480 +@@ -1360,7 +1602,7 @@ debug_stack_correct:
11481 call do_debug
11482 jmp ret_from_exception
11483 CFI_ENDPROC
11484 @@ -17011,7 +18256,7 @@ index cf8639b..6c6a674 100644
11485
11486 /*
11487 * NMI is doubly nasty. It can happen _while_ we're handling
11488 -@@ -1417,6 +1659,9 @@ nmi_stack_correct:
11489 +@@ -1398,6 +1640,9 @@ nmi_stack_correct:
11490 xorl %edx,%edx # zero error code
11491 movl %esp,%eax # pt_regs pointer
11492 call do_nmi
11493 @@ -17021,7 +18266,7 @@ index cf8639b..6c6a674 100644
11494 jmp restore_all_notrace
11495 CFI_ENDPROC
11496
11497 -@@ -1453,12 +1698,15 @@ nmi_espfix_stack:
11498 +@@ -1434,12 +1679,15 @@ nmi_espfix_stack:
11499 FIXUP_ESPFIX_STACK # %eax == %esp
11500 xorl %edx,%edx # zero error code
11501 call do_nmi
11502 @@ -17038,7 +18283,7 @@ index cf8639b..6c6a674 100644
11503
11504 ENTRY(int3)
11505 RING0_INT_FRAME
11506 -@@ -1471,14 +1719,14 @@ ENTRY(int3)
11507 +@@ -1452,14 +1700,14 @@ ENTRY(int3)
11508 call do_int3
11509 jmp ret_from_exception
11510 CFI_ENDPROC
11511 @@ -17055,7 +18300,7 @@ index cf8639b..6c6a674 100644
11512
11513 #ifdef CONFIG_KVM_GUEST
11514 ENTRY(async_page_fault)
11515 -@@ -1487,7 +1735,7 @@ ENTRY(async_page_fault)
11516 +@@ -1468,7 +1716,7 @@ ENTRY(async_page_fault)
11517 pushl_cfi $do_async_page_fault
11518 jmp error_code
11519 CFI_ENDPROC
11520 @@ -17065,11 +18310,11 @@ index cf8639b..6c6a674 100644
11521
11522 /*
11523 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
11524 -index 1328fe4..cb03298 100644
11525 +index cb3c591..bc63707 100644
11526 --- a/arch/x86/kernel/entry_64.S
11527 +++ b/arch/x86/kernel/entry_64.S
11528 @@ -59,6 +59,8 @@
11529 - #include <asm/rcu.h>
11530 + #include <asm/context_tracking.h>
11531 #include <asm/smap.h>
11532 #include <linux/err.h>
11533 +#include <asm/pgtable.h>
11534 @@ -17631,8 +18876,20 @@ index 1328fe4..cb03298 100644
11535 +ENDPROC(\label)
11536 .endm
11537
11538 - PTREGSCALL stub_clone, sys_clone, %r8
11539 -@@ -860,9 +1158,10 @@ ENTRY(ptregscall_common)
11540 + .macro FORK_LIKE func
11541 +@@ -856,9 +1154,10 @@ ENTRY(stub_\func)
11542 + DEFAULT_FRAME 0 8 /* offset 8: return address */
11543 + call sys_\func
11544 + RESTORE_TOP_OF_STACK %r11, 8
11545 ++ pax_force_retaddr
11546 + ret $REST_SKIP /* pop extended registers */
11547 + CFI_ENDPROC
11548 +-END(stub_\func)
11549 ++ENDPROC(stub_\func)
11550 + .endm
11551 +
11552 + FORK_LIKE clone
11553 +@@ -875,9 +1174,10 @@ ENTRY(ptregscall_common)
11554 movq_cfi_restore R12+8, r12
11555 movq_cfi_restore RBP+8, rbp
11556 movq_cfi_restore RBX+8, rbx
11557 @@ -17644,7 +18901,7 @@ index 1328fe4..cb03298 100644
11558
11559 ENTRY(stub_execve)
11560 CFI_STARTPROC
11561 -@@ -876,7 +1175,7 @@ ENTRY(stub_execve)
11562 +@@ -891,7 +1191,7 @@ ENTRY(stub_execve)
11563 RESTORE_REST
11564 jmp int_ret_from_sys_call
11565 CFI_ENDPROC
11566 @@ -17653,7 +18910,7 @@ index 1328fe4..cb03298 100644
11567
11568 /*
11569 * sigreturn is special because it needs to restore all registers on return.
11570 -@@ -894,7 +1193,7 @@ ENTRY(stub_rt_sigreturn)
11571 +@@ -909,7 +1209,7 @@ ENTRY(stub_rt_sigreturn)
11572 RESTORE_REST
11573 jmp int_ret_from_sys_call
11574 CFI_ENDPROC
11575 @@ -17661,8 +18918,8 @@ index 1328fe4..cb03298 100644
11576 +ENDPROC(stub_rt_sigreturn)
11577
11578 #ifdef CONFIG_X86_X32_ABI
11579 - PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx
11580 -@@ -962,7 +1261,7 @@ vector=vector+1
11581 + ENTRY(stub_x32_rt_sigreturn)
11582 +@@ -975,7 +1275,7 @@ vector=vector+1
11583 2: jmp common_interrupt
11584 .endr
11585 CFI_ENDPROC
11586 @@ -17671,7 +18928,7 @@ index 1328fe4..cb03298 100644
11587
11588 .previous
11589 END(interrupt)
11590 -@@ -982,6 +1281,16 @@ END(interrupt)
11591 +@@ -995,6 +1295,16 @@ END(interrupt)
11592 subq $ORIG_RAX-RBP, %rsp
11593 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
11594 SAVE_ARGS_IRQ
11595 @@ -17688,7 +18945,7 @@ index 1328fe4..cb03298 100644
11596 call \func
11597 .endm
11598
11599 -@@ -1014,7 +1323,7 @@ ret_from_intr:
11600 +@@ -1027,7 +1337,7 @@ ret_from_intr:
11601
11602 exit_intr:
11603 GET_THREAD_INFO(%rcx)
11604 @@ -17697,7 +18954,7 @@ index 1328fe4..cb03298 100644
11605 je retint_kernel
11606
11607 /* Interrupt came from user space */
11608 -@@ -1036,12 +1345,16 @@ retint_swapgs: /* return to user-space */
11609 +@@ -1049,12 +1359,16 @@ retint_swapgs: /* return to user-space */
11610 * The iretq could re-enable interrupts:
11611 */
11612 DISABLE_INTERRUPTS(CLBR_ANY)
11613 @@ -17714,7 +18971,7 @@ index 1328fe4..cb03298 100644
11614 /*
11615 * The iretq could re-enable interrupts:
11616 */
11617 -@@ -1124,7 +1437,7 @@ ENTRY(retint_kernel)
11618 +@@ -1137,7 +1451,7 @@ ENTRY(retint_kernel)
11619 #endif
11620
11621 CFI_ENDPROC
11622 @@ -17723,7 +18980,7 @@ index 1328fe4..cb03298 100644
11623 /*
11624 * End of kprobes section
11625 */
11626 -@@ -1142,7 +1455,7 @@ ENTRY(\sym)
11627 +@@ -1155,7 +1469,7 @@ ENTRY(\sym)
11628 interrupt \do_sym
11629 jmp ret_from_intr
11630 CFI_ENDPROC
11631 @@ -17732,7 +18989,7 @@ index 1328fe4..cb03298 100644
11632 .endm
11633
11634 #ifdef CONFIG_SMP
11635 -@@ -1198,12 +1511,22 @@ ENTRY(\sym)
11636 +@@ -1211,12 +1525,22 @@ ENTRY(\sym)
11637 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
11638 call error_entry
11639 DEFAULT_FRAME 0
11640 @@ -17756,7 +19013,7 @@ index 1328fe4..cb03298 100644
11641 .endm
11642
11643 .macro paranoidzeroentry sym do_sym
11644 -@@ -1216,15 +1539,25 @@ ENTRY(\sym)
11645 +@@ -1229,15 +1553,25 @@ ENTRY(\sym)
11646 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
11647 call save_paranoid
11648 TRACE_IRQS_OFF
11649 @@ -17784,7 +19041,7 @@ index 1328fe4..cb03298 100644
11650 .macro paranoidzeroentry_ist sym do_sym ist
11651 ENTRY(\sym)
11652 INTR_FRAME
11653 -@@ -1235,14 +1568,30 @@ ENTRY(\sym)
11654 +@@ -1248,14 +1582,30 @@ ENTRY(\sym)
11655 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
11656 call save_paranoid
11657 TRACE_IRQS_OFF_DEBUG
11658 @@ -17816,7 +19073,7 @@ index 1328fe4..cb03298 100644
11659 .endm
11660
11661 .macro errorentry sym do_sym
11662 -@@ -1254,13 +1603,23 @@ ENTRY(\sym)
11663 +@@ -1267,13 +1617,23 @@ ENTRY(\sym)
11664 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
11665 call error_entry
11666 DEFAULT_FRAME 0
11667 @@ -17841,7 +19098,7 @@ index 1328fe4..cb03298 100644
11668 .endm
11669
11670 /* error code is on the stack already */
11671 -@@ -1274,13 +1633,23 @@ ENTRY(\sym)
11672 +@@ -1287,13 +1647,23 @@ ENTRY(\sym)
11673 call save_paranoid
11674 DEFAULT_FRAME 0
11675 TRACE_IRQS_OFF
11676 @@ -17866,7 +19123,7 @@ index 1328fe4..cb03298 100644
11677 .endm
11678
11679 zeroentry divide_error do_divide_error
11680 -@@ -1310,9 +1679,10 @@ gs_change:
11681 +@@ -1323,9 +1693,10 @@ gs_change:
11682 2: mfence /* workaround */
11683 SWAPGS
11684 popfq_cfi
11685 @@ -17878,7 +19135,7 @@ index 1328fe4..cb03298 100644
11686
11687 _ASM_EXTABLE(gs_change,bad_gs)
11688 .section .fixup,"ax"
11689 -@@ -1340,9 +1710,10 @@ ENTRY(call_softirq)
11690 +@@ -1353,9 +1724,10 @@ ENTRY(call_softirq)
11691 CFI_DEF_CFA_REGISTER rsp
11692 CFI_ADJUST_CFA_OFFSET -8
11693 decl PER_CPU_VAR(irq_count)
11694 @@ -17890,7 +19147,7 @@ index 1328fe4..cb03298 100644
11695
11696 #ifdef CONFIG_XEN
11697 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
11698 -@@ -1380,7 +1751,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
11699 +@@ -1393,7 +1765,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
11700 decl PER_CPU_VAR(irq_count)
11701 jmp error_exit
11702 CFI_ENDPROC
11703 @@ -17899,7 +19156,7 @@ index 1328fe4..cb03298 100644
11704
11705 /*
11706 * Hypervisor uses this for application faults while it executes.
11707 -@@ -1439,7 +1810,7 @@ ENTRY(xen_failsafe_callback)
11708 +@@ -1452,7 +1824,7 @@ ENTRY(xen_failsafe_callback)
11709 SAVE_ALL
11710 jmp error_exit
11711 CFI_ENDPROC
11712 @@ -17908,7 +19165,7 @@ index 1328fe4..cb03298 100644
11713
11714 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
11715 xen_hvm_callback_vector xen_evtchn_do_upcall
11716 -@@ -1488,16 +1859,31 @@ ENTRY(paranoid_exit)
11717 +@@ -1501,16 +1873,31 @@ ENTRY(paranoid_exit)
11718 TRACE_IRQS_OFF_DEBUG
11719 testl %ebx,%ebx /* swapgs needed? */
11720 jnz paranoid_restore
11721 @@ -17941,7 +19198,7 @@ index 1328fe4..cb03298 100644
11722 jmp irq_return
11723 paranoid_userspace:
11724 GET_THREAD_INFO(%rcx)
11725 -@@ -1526,7 +1912,7 @@ paranoid_schedule:
11726 +@@ -1539,7 +1926,7 @@ paranoid_schedule:
11727 TRACE_IRQS_OFF
11728 jmp paranoid_userspace
11729 CFI_ENDPROC
11730 @@ -17950,7 +19207,7 @@ index 1328fe4..cb03298 100644
11731
11732 /*
11733 * Exception entry point. This expects an error code/orig_rax on the stack.
11734 -@@ -1553,12 +1939,13 @@ ENTRY(error_entry)
11735 +@@ -1566,12 +1953,13 @@ ENTRY(error_entry)
11736 movq_cfi r14, R14+8
11737 movq_cfi r15, R15+8
11738 xorl %ebx,%ebx
11739 @@ -17965,7 +19222,7 @@ index 1328fe4..cb03298 100644
11740 ret
11741
11742 /*
11743 -@@ -1585,7 +1972,7 @@ bstep_iret:
11744 +@@ -1598,7 +1986,7 @@ bstep_iret:
11745 movq %rcx,RIP+8(%rsp)
11746 jmp error_swapgs
11747 CFI_ENDPROC
11748 @@ -17974,7 +19231,7 @@ index 1328fe4..cb03298 100644
11749
11750
11751 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
11752 -@@ -1605,7 +1992,7 @@ ENTRY(error_exit)
11753 +@@ -1618,7 +2006,7 @@ ENTRY(error_exit)
11754 jnz retint_careful
11755 jmp retint_swapgs
11756 CFI_ENDPROC
11757 @@ -17983,7 +19240,7 @@ index 1328fe4..cb03298 100644
11758
11759 /*
11760 * Test if a given stack is an NMI stack or not.
11761 -@@ -1663,9 +2050,11 @@ ENTRY(nmi)
11762 +@@ -1676,9 +2064,11 @@ ENTRY(nmi)
11763 * If %cs was not the kernel segment, then the NMI triggered in user
11764 * space, which means it is definitely not nested.
11765 */
11766 @@ -17996,7 +19253,7 @@ index 1328fe4..cb03298 100644
11767 /*
11768 * Check the special variable on the stack to see if NMIs are
11769 * executing.
11770 -@@ -1824,6 +2213,17 @@ end_repeat_nmi:
11771 +@@ -1847,6 +2237,17 @@ end_repeat_nmi:
11772 */
11773 movq %cr2, %r12
11774
11775 @@ -18014,7 +19271,7 @@ index 1328fe4..cb03298 100644
11776 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
11777 movq %rsp,%rdi
11778 movq $-1,%rsi
11779 -@@ -1839,21 +2239,32 @@ end_repeat_nmi:
11780 +@@ -1862,23 +2263,34 @@ end_repeat_nmi:
11781 testl %ebx,%ebx /* swapgs needed? */
11782 jnz nmi_restore
11783 nmi_swapgs:
11784 @@ -18024,16 +19281,18 @@ index 1328fe4..cb03298 100644
11785 + pax_exit_kernel
11786 +#endif
11787 SWAPGS_UNSAFE_STACK
11788 -+ RESTORE_ALL 8
11789 ++ RESTORE_ALL 6*8
11790 + /* Clear the NMI executing stack variable */
11791 -+ movq $0, 10*8(%rsp)
11792 ++ movq $0, 5*8(%rsp)
11793 + jmp irq_return
11794 nmi_restore:
11795 + pax_exit_kernel
11796 - RESTORE_ALL 8
11797 + /* Pop the extra iret frame at once */
11798 + RESTORE_ALL 6*8
11799 + pax_force_retaddr_bts
11800 +
11801 /* Clear the NMI executing stack variable */
11802 - movq $0, 10*8(%rsp)
11803 + movq $0, 5*8(%rsp)
11804 jmp irq_return
11805 CFI_ENDPROC
11806 -END(nmi)
11807 @@ -18139,7 +19398,7 @@ index c18f59d..9c0c9f6 100644
11808 #ifdef CONFIG_BLK_DEV_INITRD
11809 /* Reserve INITRD */
11810 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
11811 -index 4dac2f6..bc6a335 100644
11812 +index c8932c7..d56b622 100644
11813 --- a/arch/x86/kernel/head_32.S
11814 +++ b/arch/x86/kernel/head_32.S
11815 @@ -26,6 +26,12 @@
11816 @@ -18309,7 +19568,7 @@ index 4dac2f6..bc6a335 100644
11817 num_subarch_entries = (. - subarch_entries) / 4
11818 .previous
11819 #else
11820 -@@ -316,6 +388,7 @@ default_entry:
11821 +@@ -335,6 +407,7 @@ default_entry:
11822 movl pa(mmu_cr4_features),%eax
11823 movl %eax,%cr4
11824
11825 @@ -18317,7 +19576,7 @@ index 4dac2f6..bc6a335 100644
11826 testb $X86_CR4_PAE, %al # check if PAE is enabled
11827 jz 6f
11828
11829 -@@ -344,6 +417,9 @@ default_entry:
11830 +@@ -363,6 +436,9 @@ default_entry:
11831 /* Make changes effective */
11832 wrmsr
11833
11834 @@ -18327,7 +19586,7 @@ index 4dac2f6..bc6a335 100644
11835 6:
11836
11837 /*
11838 -@@ -442,14 +518,20 @@ is386: movl $2,%ecx # set MP
11839 +@@ -460,14 +536,20 @@ is386: movl $2,%ecx # set MP
11840 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
11841 movl %eax,%ss # after changing gdt.
11842
11843 @@ -18349,7 +19608,7 @@ index 4dac2f6..bc6a335 100644
11844 movl %eax,%gs
11845
11846 xorl %eax,%eax # Clear LDT
11847 -@@ -526,8 +608,11 @@ setup_once:
11848 +@@ -544,8 +626,11 @@ setup_once:
11849 * relocation. Manually set base address in stack canary
11850 * segment descriptor.
11851 */
11852 @@ -18362,7 +19621,7 @@ index 4dac2f6..bc6a335 100644
11853 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
11854 shrl $16, %ecx
11855 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
11856 -@@ -558,7 +643,7 @@ ENDPROC(early_idt_handlers)
11857 +@@ -576,7 +661,7 @@ ENDPROC(early_idt_handlers)
11858 /* This is global to keep gas from relaxing the jumps */
11859 ENTRY(early_idt_handler)
11860 cld
11861 @@ -18371,7 +19630,7 @@ index 4dac2f6..bc6a335 100644
11862 je hlt_loop
11863 incl %ss:early_recursion_flag
11864
11865 -@@ -596,8 +681,8 @@ ENTRY(early_idt_handler)
11866 +@@ -614,8 +699,8 @@ ENTRY(early_idt_handler)
11867 pushl (20+6*4)(%esp) /* trapno */
11868 pushl $fault_msg
11869 call printk
11870 @@ -18381,7 +19640,7 @@ index 4dac2f6..bc6a335 100644
11871 hlt_loop:
11872 hlt
11873 jmp hlt_loop
11874 -@@ -616,8 +701,11 @@ ENDPROC(early_idt_handler)
11875 +@@ -634,8 +719,11 @@ ENDPROC(early_idt_handler)
11876 /* This is the default interrupt "handler" :-) */
11877 ALIGN
11878 ignore_int:
11879 @@ -18394,7 +19653,7 @@ index 4dac2f6..bc6a335 100644
11880 pushl %eax
11881 pushl %ecx
11882 pushl %edx
11883 -@@ -626,9 +714,6 @@ ignore_int:
11884 +@@ -644,9 +732,6 @@ ignore_int:
11885 movl $(__KERNEL_DS),%eax
11886 movl %eax,%ds
11887 movl %eax,%es
11888 @@ -18404,7 +19663,7 @@ index 4dac2f6..bc6a335 100644
11889 pushl 16(%esp)
11890 pushl 24(%esp)
11891 pushl 32(%esp)
11892 -@@ -662,29 +747,43 @@ ENTRY(setup_once_ref)
11893 +@@ -680,29 +765,43 @@ ENTRY(setup_once_ref)
11894 /*
11895 * BSS section
11896 */
11897 @@ -18453,7 +19712,7 @@ index 4dac2f6..bc6a335 100644
11898 ENTRY(initial_page_table)
11899 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
11900 # if KPMDS == 3
11901 -@@ -703,12 +802,20 @@ ENTRY(initial_page_table)
11902 +@@ -721,12 +820,20 @@ ENTRY(initial_page_table)
11903 # error "Kernel PMDs should be 1, 2 or 3"
11904 # endif
11905 .align PAGE_SIZE /* needs to be page-sized too */
11906 @@ -18475,7 +19734,7 @@ index 4dac2f6..bc6a335 100644
11907
11908 __INITRODATA
11909 int_msg:
11910 -@@ -736,7 +843,7 @@ fault_msg:
11911 +@@ -754,7 +861,7 @@ fault_msg:
11912 * segment size, and 32-bit linear address value:
11913 */
11914
11915 @@ -18484,7 +19743,7 @@ index 4dac2f6..bc6a335 100644
11916 .globl boot_gdt_descr
11917 .globl idt_descr
11918
11919 -@@ -745,7 +852,7 @@ fault_msg:
11920 +@@ -763,7 +870,7 @@ fault_msg:
11921 .word 0 # 32 bit align gdt_desc.address
11922 boot_gdt_descr:
11923 .word __BOOT_DS+7
11924 @@ -18493,7 +19752,7 @@ index 4dac2f6..bc6a335 100644
11925
11926 .word 0 # 32-bit align idt_desc.address
11927 idt_descr:
11928 -@@ -756,7 +863,7 @@ idt_descr:
11929 +@@ -774,7 +881,7 @@ idt_descr:
11930 .word 0 # 32 bit align gdt_desc.address
11931 ENTRY(early_gdt_descr)
11932 .word GDT_ENTRIES*8-1
11933 @@ -18502,7 +19761,7 @@ index 4dac2f6..bc6a335 100644
11934
11935 /*
11936 * The boot_gdt must mirror the equivalent in setup.S and is
11937 -@@ -765,5 +872,65 @@ ENTRY(early_gdt_descr)
11938 +@@ -783,5 +890,65 @@ ENTRY(early_gdt_descr)
11939 .align L1_CACHE_BYTES
11940 ENTRY(boot_gdt)
11941 .fill GDT_ENTRY_BOOT_CS,8,0
11942 @@ -18571,7 +19830,7 @@ index 4dac2f6..bc6a335 100644
11943 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
11944 + .endr
11945 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
11946 -index 94bf9cc..400455a 100644
11947 +index 980053c..74d3b44 100644
11948 --- a/arch/x86/kernel/head_64.S
11949 +++ b/arch/x86/kernel/head_64.S
11950 @@ -20,6 +20,8 @@
11951 @@ -18681,7 +19940,7 @@ index 94bf9cc..400455a 100644
11952 movq initial_code(%rip),%rax
11953 pushq $0 # fake return address to stop unwinder
11954 pushq $__KERNEL_CS # set correct cs
11955 -@@ -268,7 +273,7 @@ ENTRY(secondary_startup_64)
11956 +@@ -284,7 +289,7 @@ ENDPROC(start_cpu0)
11957 bad_address:
11958 jmp bad_address
11959
11960 @@ -18690,7 +19949,16 @@ index 94bf9cc..400455a 100644
11961 .globl early_idt_handlers
11962 early_idt_handlers:
11963 # 104(%rsp) %rflags
11964 -@@ -347,11 +352,15 @@ ENTRY(early_idt_handler)
11965 +@@ -343,7 +348,7 @@ ENTRY(early_idt_handler)
11966 + call dump_stack
11967 + #ifdef CONFIG_KALLSYMS
11968 + leaq early_idt_ripmsg(%rip),%rdi
11969 +- movq 40(%rsp),%rsi # %rip again
11970 ++ movq 88(%rsp),%rsi # %rip again
11971 + call __print_symbol
11972 + #endif
11973 + #endif /* EARLY_PRINTK */
11974 +@@ -363,11 +368,15 @@ ENTRY(early_idt_handler)
11975 addq $16,%rsp # drop vector number and error code
11976 decl early_recursion_flag(%rip)
11977 INTERRUPT_RETURN
11978 @@ -18706,7 +19974,7 @@ index 94bf9cc..400455a 100644
11979 #ifdef CONFIG_EARLY_PRINTK
11980 early_idt_msg:
11981 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
11982 -@@ -360,6 +369,7 @@ early_idt_ripmsg:
11983 +@@ -376,6 +385,7 @@ early_idt_ripmsg:
11984 #endif /* CONFIG_EARLY_PRINTK */
11985 .previous
11986
11987 @@ -18714,7 +19982,7 @@ index 94bf9cc..400455a 100644
11988 #define NEXT_PAGE(name) \
11989 .balign PAGE_SIZE; \
11990 ENTRY(name)
11991 -@@ -372,7 +382,6 @@ ENTRY(name)
11992 +@@ -388,7 +398,6 @@ ENTRY(name)
11993 i = i + 1 ; \
11994 .endr
11995
11996 @@ -18722,7 +19990,7 @@ index 94bf9cc..400455a 100644
11997 /*
11998 * This default setting generates an ident mapping at address 0x100000
11999 * and a mapping for the kernel that precisely maps virtual address
12000 -@@ -383,13 +392,41 @@ NEXT_PAGE(init_level4_pgt)
12001 +@@ -399,13 +408,41 @@ NEXT_PAGE(init_level4_pgt)
12002 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12003 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
12004 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12005 @@ -18764,7 +20032,7 @@ index 94bf9cc..400455a 100644
12006
12007 NEXT_PAGE(level3_kernel_pgt)
12008 .fill L3_START_KERNEL,8,0
12009 -@@ -397,20 +434,23 @@ NEXT_PAGE(level3_kernel_pgt)
12010 +@@ -413,20 +450,23 @@ NEXT_PAGE(level3_kernel_pgt)
12011 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
12012 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
12013
12014 @@ -18796,7 +20064,7 @@ index 94bf9cc..400455a 100644
12015
12016 NEXT_PAGE(level2_kernel_pgt)
12017 /*
12018 -@@ -423,37 +463,59 @@ NEXT_PAGE(level2_kernel_pgt)
12019 +@@ -439,37 +479,59 @@ NEXT_PAGE(level2_kernel_pgt)
12020 * If you want to increase this then increase MODULES_VADDR
12021 * too.)
12022 */
12023 @@ -18893,7 +20161,7 @@ index 9c3bd4a..e1d9b35 100644
12024 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
12025 +#endif
12026 diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
12027 -index 675a050..95febfd 100644
12028 +index 245a71d..89d9ce4 100644
12029 --- a/arch/x86/kernel/i387.c
12030 +++ b/arch/x86/kernel/i387.c
12031 @@ -55,7 +55,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
12032 @@ -19440,6 +20708,19 @@ index 57916c0..9e0b9d0 100644
12033 return ret;
12034
12035 switch (val) {
12036 +diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
12037 +index 9c2bd8b..bb1131c 100644
12038 +--- a/arch/x86/kernel/kvm.c
12039 ++++ b/arch/x86/kernel/kvm.c
12040 +@@ -452,7 +452,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
12041 + return NOTIFY_OK;
12042 + }
12043 +
12044 +-static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
12045 ++static struct notifier_block kvm_cpu_notifier = {
12046 + .notifier_call = kvm_cpu_notify,
12047 + };
12048 + #endif
12049 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
12050 index ebc9873..1b9724b 100644
12051 --- a/arch/x86/kernel/ldt.c
12052 @@ -19539,6 +20820,19 @@ index 5b19e4d..6476a76 100644
12053
12054 relocate_kernel_ptr = control_page;
12055 page_list[PA_CONTROL_PAGE] = __pa(control_page);
12056 +diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
12057 +index 3a04b22..1d2eb09 100644
12058 +--- a/arch/x86/kernel/microcode_core.c
12059 ++++ b/arch/x86/kernel/microcode_core.c
12060 +@@ -512,7 +512,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
12061 + return NOTIFY_OK;
12062 + }
12063 +
12064 +-static struct notifier_block __refdata mc_cpu_notifier = {
12065 ++static struct notifier_block mc_cpu_notifier = {
12066 + .notifier_call = mc_cpu_callback,
12067 + };
12068 +
12069 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
12070 index 3544aed..01ddc1c 100644
12071 --- a/arch/x86/kernel/microcode_intel.c
12072 @@ -19696,6 +20990,19 @@ index 216a4d7..228255a 100644
12073 #if 0
12074 if ((s64)val != *(s32 *)loc)
12075 goto overflow;
12076 +diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
12077 +index 4929502..686c291 100644
12078 +--- a/arch/x86/kernel/msr.c
12079 ++++ b/arch/x86/kernel/msr.c
12080 +@@ -234,7 +234,7 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
12081 + return notifier_from_errno(err);
12082 + }
12083 +
12084 +-static struct notifier_block __refdata msr_class_cpu_notifier = {
12085 ++static struct notifier_block msr_class_cpu_notifier = {
12086 + .notifier_call = msr_class_cpu_callback,
12087 + };
12088 +
12089 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
12090 index f84f5c5..e27e54b 100644
12091 --- a/arch/x86/kernel/nmi.c
12092 @@ -19880,7 +21187,7 @@ index 35ccf75..7a15747 100644
12093 #define DEBUG 1
12094
12095 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
12096 -index b644e1c..4a6d379 100644
12097 +index 2ed787f..f70c9f6 100644
12098 --- a/arch/x86/kernel/process.c
12099 +++ b/arch/x86/kernel/process.c
12100 @@ -36,7 +36,8 @@
12101 @@ -19930,7 +21237,7 @@ index b644e1c..4a6d379 100644
12102 flush_ptrace_hw_breakpoint(tsk);
12103 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
12104 drop_init_fpu(tsk);
12105 -@@ -336,7 +340,7 @@ static void __exit_idle(void)
12106 +@@ -301,7 +305,7 @@ static void __exit_idle(void)
12107 void exit_idle(void)
12108 {
12109 /* idle loop has pid 0 */
12110 @@ -19939,7 +21246,7 @@ index b644e1c..4a6d379 100644
12111 return;
12112 __exit_idle();
12113 }
12114 -@@ -445,7 +449,7 @@ bool set_pm_idle_to_default(void)
12115 +@@ -404,7 +408,7 @@ bool set_pm_idle_to_default(void)
12116
12117 return ret;
12118 }
12119 @@ -19948,7 +21255,7 @@ index b644e1c..4a6d379 100644
12120 {
12121 local_irq_disable();
12122 /*
12123 -@@ -673,16 +677,37 @@ static int __init idle_setup(char *str)
12124 +@@ -632,16 +636,37 @@ static int __init idle_setup(char *str)
12125 }
12126 early_param("idle", idle_setup);
12127
12128 @@ -19997,7 +21304,7 @@ index b644e1c..4a6d379 100644
12129 +}
12130 +#endif
12131 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
12132 -index 44e0bff..5ceb99c 100644
12133 +index b5a8905..d9cacac 100644
12134 --- a/arch/x86/kernel/process_32.c
12135 +++ b/arch/x86/kernel/process_32.c
12136 @@ -65,6 +65,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
12137 @@ -20033,9 +21340,9 @@ index 44e0bff..5ceb99c 100644
12138 print_symbol("EIP is at %s\n", regs->ip);
12139
12140 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
12141 -@@ -131,20 +131,21 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
12142 - unsigned long arg,
12143 - struct task_struct *p, struct pt_regs *regs)
12144 +@@ -130,20 +130,21 @@ void release_thread(struct task_struct *dead_task)
12145 + int copy_thread(unsigned long clone_flags, unsigned long sp,
12146 + unsigned long arg, struct task_struct *p)
12147 {
12148 - struct pt_regs *childregs = task_pt_regs(p);
12149 + struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
12150 @@ -20046,7 +21353,7 @@ index 44e0bff..5ceb99c 100644
12151 p->thread.sp0 = (unsigned long) (childregs+1);
12152 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
12153
12154 - if (unlikely(!regs)) {
12155 + if (unlikely(p->flags & PF_KTHREAD)) {
12156 /* kernel thread */
12157 memset(childregs, 0, sizeof(struct pt_regs));
12158 p->thread.ip = (unsigned long) ret_from_kernel_thread;
12159 @@ -20104,10 +21411,10 @@ index 44e0bff..5ceb99c 100644
12160 }
12161 -
12162 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
12163 -index 16c6365..5d32218 100644
12164 +index 6e68a61..955a9a5 100644
12165 --- a/arch/x86/kernel/process_64.c
12166 +++ b/arch/x86/kernel/process_64.c
12167 -@@ -153,10 +153,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
12168 +@@ -152,10 +152,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
12169 struct pt_regs *childregs;
12170 struct task_struct *me = current;
12171
12172 @@ -20158,10 +21465,10 @@ index 16c6365..5d32218 100644
12173 ip = *(u64 *)(fp+8);
12174 if (!in_sched_functions(ip))
12175 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
12176 -index 974b67e..53bdb6c 100644
12177 +index b629bbe..0fa615a 100644
12178 --- a/arch/x86/kernel/ptrace.c
12179 +++ b/arch/x86/kernel/ptrace.c
12180 -@@ -183,14 +183,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
12181 +@@ -184,14 +184,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
12182 {
12183 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
12184 unsigned long sp = (unsigned long)&regs->sp;
12185 @@ -20180,7 +21487,7 @@ index 974b67e..53bdb6c 100644
12186
12187 return (unsigned long)regs;
12188 }
12189 -@@ -587,7 +586,7 @@ static void ptrace_triggered(struct perf_event *bp,
12190 +@@ -588,7 +587,7 @@ static void ptrace_triggered(struct perf_event *bp,
12191 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
12192 {
12193 int i;
12194 @@ -20189,7 +21496,7 @@ index 974b67e..53bdb6c 100644
12195 struct arch_hw_breakpoint *info;
12196
12197 for (i = 0; i < HBP_NUM; i++) {
12198 -@@ -855,7 +854,7 @@ long arch_ptrace(struct task_struct *child, long request,
12199 +@@ -856,7 +855,7 @@ long arch_ptrace(struct task_struct *child, long request,
12200 unsigned long addr, unsigned long data)
12201 {
12202 int ret;
12203 @@ -20198,7 +21505,7 @@ index 974b67e..53bdb6c 100644
12204
12205 switch (request) {
12206 /* read the word at location addr in the USER area. */
12207 -@@ -940,14 +939,14 @@ long arch_ptrace(struct task_struct *child, long request,
12208 +@@ -941,14 +940,14 @@ long arch_ptrace(struct task_struct *child, long request,
12209 if ((int) addr < 0)
12210 return -EIO;
12211 ret = do_get_thread_area(child, addr,
12212 @@ -20215,7 +21522,7 @@ index 974b67e..53bdb6c 100644
12213 break;
12214 #endif
12215
12216 -@@ -1325,7 +1324,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
12217 +@@ -1326,7 +1325,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
12218
12219 #ifdef CONFIG_X86_64
12220
12221 @@ -20224,7 +21531,7 @@ index 974b67e..53bdb6c 100644
12222 [REGSET_GENERAL] = {
12223 .core_note_type = NT_PRSTATUS,
12224 .n = sizeof(struct user_regs_struct) / sizeof(long),
12225 -@@ -1366,7 +1365,7 @@ static const struct user_regset_view user_x86_64_view = {
12226 +@@ -1367,7 +1366,7 @@ static const struct user_regset_view user_x86_64_view = {
12227 #endif /* CONFIG_X86_64 */
12228
12229 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
12230 @@ -20233,7 +21540,7 @@ index 974b67e..53bdb6c 100644
12231 [REGSET_GENERAL] = {
12232 .core_note_type = NT_PRSTATUS,
12233 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
12234 -@@ -1419,7 +1418,7 @@ static const struct user_regset_view user_x86_32_view = {
12235 +@@ -1420,7 +1419,7 @@ static const struct user_regset_view user_x86_32_view = {
12236 */
12237 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
12238
12239 @@ -20242,7 +21549,7 @@ index 974b67e..53bdb6c 100644
12240 {
12241 #ifdef CONFIG_X86_64
12242 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
12243 -@@ -1454,7 +1453,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
12244 +@@ -1455,7 +1454,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
12245 memset(info, 0, sizeof(*info));
12246 info->si_signo = SIGTRAP;
12247 info->si_code = si_code;
12248 @@ -20251,7 +21558,7 @@ index 974b67e..53bdb6c 100644
12249 }
12250
12251 void user_single_step_siginfo(struct task_struct *tsk,
12252 -@@ -1483,6 +1482,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
12253 +@@ -1484,6 +1483,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
12254 # define IS_IA32 0
12255 #endif
12256
12257 @@ -20262,9 +21569,9 @@ index 974b67e..53bdb6c 100644
12258 /*
12259 * We must return the syscall number to actually look up in the table.
12260 * This can be -1L to skip running any syscall at all.
12261 -@@ -1493,6 +1496,11 @@ long syscall_trace_enter(struct pt_regs *regs)
12262 +@@ -1494,6 +1497,11 @@ long syscall_trace_enter(struct pt_regs *regs)
12263
12264 - rcu_user_exit();
12265 + user_exit();
12266
12267 +#ifdef CONFIG_GRKERNSEC_SETXID
12268 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
12269 @@ -20274,9 +21581,9 @@ index 974b67e..53bdb6c 100644
12270 /*
12271 * If we stepped into a sysenter/syscall insn, it trapped in
12272 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
12273 -@@ -1548,6 +1556,11 @@ void syscall_trace_leave(struct pt_regs *regs)
12274 +@@ -1549,6 +1557,11 @@ void syscall_trace_leave(struct pt_regs *regs)
12275 */
12276 - rcu_user_exit();
12277 + user_exit();
12278
12279 +#ifdef CONFIG_GRKERNSEC_SETXID
12280 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
12281 @@ -20287,10 +21594,10 @@ index 974b67e..53bdb6c 100644
12282
12283 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
12284 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
12285 -index 42eb330..139955c 100644
12286 +index 85c3959..76b89f9 100644
12287 --- a/arch/x86/kernel/pvclock.c
12288 +++ b/arch/x86/kernel/pvclock.c
12289 -@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
12290 +@@ -43,11 +43,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
12291 return pv_tsc_khz;
12292 }
12293
12294 @@ -20303,8 +21610,8 @@ index 42eb330..139955c 100644
12295 + atomic64_set_unchecked(&last_value, 0);
12296 }
12297
12298 - cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
12299 -@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
12300 + u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
12301 +@@ -92,11 +92,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
12302 * updating at the same time, and one of them could be slightly behind,
12303 * making the assumption that last_value always go forward fail to hold.
12304 */
12305 @@ -20458,10 +21765,10 @@ index 7a6f3b3..bed145d7 100644
12306
12307 1:
12308 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
12309 -index aeacb0e..f9d4c02 100644
12310 +index 8b24289..d37b58b 100644
12311 --- a/arch/x86/kernel/setup.c
12312 +++ b/arch/x86/kernel/setup.c
12313 -@@ -441,7 +441,7 @@ static void __init parse_setup_data(void)
12314 +@@ -437,7 +437,7 @@ static void __init parse_setup_data(void)
12315
12316 switch (data->type) {
12317 case SETUP_E820_EXT:
12318 @@ -20470,7 +21777,7 @@ index aeacb0e..f9d4c02 100644
12319 break;
12320 case SETUP_DTB:
12321 add_dtb(pa_data);
12322 -@@ -710,7 +710,7 @@ static void __init trim_bios_range(void)
12323 +@@ -706,7 +706,7 @@ static void __init trim_bios_range(void)
12324 * area (640->1Mb) as ram even though it is not.
12325 * take them out.
12326 */
12327 @@ -20479,7 +21786,7 @@ index aeacb0e..f9d4c02 100644
12328
12329 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
12330 }
12331 -@@ -834,14 +834,14 @@ void __init setup_arch(char **cmdline_p)
12332 +@@ -830,14 +830,14 @@ void __init setup_arch(char **cmdline_p)
12333
12334 if (!boot_params.hdr.root_flags)
12335 root_mountflags &= ~MS_RDONLY;
12336 @@ -20567,10 +21874,10 @@ index 5cdff03..5810740 100644
12337 * Up to this point, the boot CPU has been using .init.data
12338 * area. Reload any changed state for the boot CPU.
12339 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
12340 -index 70b27ee..fcf827f 100644
12341 +index d6bf1f3..3ffce5a 100644
12342 --- a/arch/x86/kernel/signal.c
12343 +++ b/arch/x86/kernel/signal.c
12344 -@@ -195,7 +195,7 @@ static unsigned long align_sigframe(unsigned long sp)
12345 +@@ -196,7 +196,7 @@ static unsigned long align_sigframe(unsigned long sp)
12346 * Align the stack pointer according to the i386 ABI,
12347 * i.e. so that on function entry ((sp + 4) & 15) == 0.
12348 */
12349 @@ -20579,7 +21886,7 @@ index 70b27ee..fcf827f 100644
12350 #else /* !CONFIG_X86_32 */
12351 sp = round_down(sp, 16) - 8;
12352 #endif
12353 -@@ -303,9 +303,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
12354 +@@ -304,9 +304,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
12355 }
12356
12357 if (current->mm->context.vdso)
12358 @@ -20591,7 +21898,7 @@ index 70b27ee..fcf827f 100644
12359 if (ka->sa.sa_flags & SA_RESTORER)
12360 restorer = ka->sa.sa_restorer;
12361
12362 -@@ -319,7 +319,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
12363 +@@ -320,7 +320,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
12364 * reasons and because gdb uses it as a signature to notice
12365 * signal handler stack frames.
12366 */
12367 @@ -20600,8 +21907,8 @@ index 70b27ee..fcf827f 100644
12368
12369 if (err)
12370 return -EFAULT;
12371 -@@ -369,7 +369,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
12372 - put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
12373 +@@ -367,7 +367,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
12374 + err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
12375
12376 /* Set up to return from userspace. */
12377 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
12378 @@ -20612,7 +21919,7 @@ index 70b27ee..fcf827f 100644
12379 if (ka->sa.sa_flags & SA_RESTORER)
12380 restorer = ka->sa.sa_restorer;
12381 put_user_ex(restorer, &frame->pretcode);
12382 -@@ -381,7 +384,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
12383 +@@ -379,7 +382,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
12384 * reasons and because gdb uses it as a signature to notice
12385 * signal handler stack frames.
12386 */
12387 @@ -20635,10 +21942,10 @@ index 48d2b7d..90d328a 100644
12388 .smp_prepare_cpus = native_smp_prepare_cpus,
12389 .smp_cpus_done = native_smp_cpus_done,
12390 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
12391 -index f3e2ec8..ad5287a 100644
12392 +index ed0fe38..87fc692 100644
12393 --- a/arch/x86/kernel/smpboot.c
12394 +++ b/arch/x86/kernel/smpboot.c
12395 -@@ -673,6 +673,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
12396 +@@ -748,6 +748,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
12397 idle->thread.sp = (unsigned long) (((struct pt_regs *)
12398 (THREAD_SIZE + task_stack_page(idle))) - 1);
12399 per_cpu(current_task, cpu) = idle;
12400 @@ -20646,7 +21953,7 @@ index f3e2ec8..ad5287a 100644
12401
12402 #ifdef CONFIG_X86_32
12403 /* Stack for startup_32 can be just as for start_secondary onwards */
12404 -@@ -680,11 +681,13 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
12405 +@@ -755,11 +756,13 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
12406 #else
12407 clear_tsk_thread_flag(idle, TIF_FORK);
12408 initial_gs = per_cpu_offset(cpu);
12409 @@ -20663,7 +21970,7 @@ index f3e2ec8..ad5287a 100644
12410 initial_code = (unsigned long)start_secondary;
12411 stack_start = idle->thread.sp;
12412
12413 -@@ -823,6 +826,15 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
12414 +@@ -908,6 +911,15 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
12415 /* the FPU context is blank, nobody can own it */
12416 __cpu_disable_lazy_restore(cpu);
12417
12418 @@ -20972,10 +22279,10 @@ index 0000000..26bb1af
12419 + return addr;
12420 +}
12421 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
12422 -index b4d3c39..d699d77 100644
12423 +index 97ef74b..57a1882 100644
12424 --- a/arch/x86/kernel/sys_x86_64.c
12425 +++ b/arch/x86/kernel/sys_x86_64.c
12426 -@@ -95,8 +95,8 @@ out:
12427 +@@ -81,8 +81,8 @@ out:
12428 return error;
12429 }
12430
12431 @@ -20986,7 +22293,7 @@ index b4d3c39..d699d77 100644
12432 {
12433 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
12434 unsigned long new_begin;
12435 -@@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
12436 +@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
12437 *begin = new_begin;
12438 }
12439 } else {
12440 @@ -20995,9 +22302,9 @@ index b4d3c39..d699d77 100644
12441 *end = TASK_SIZE;
12442 }
12443 }
12444 -@@ -128,20 +128,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
12445 +@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
12446 struct vm_area_struct *vma;
12447 - unsigned long start_addr;
12448 + struct vm_unmapped_area_info info;
12449 unsigned long begin, end;
12450 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
12451
12452 @@ -21022,27 +22329,8 @@ index b4d3c39..d699d77 100644
12453 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
12454 return addr;
12455 }
12456 - if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
12457 -@@ -172,7 +176,7 @@ full_search:
12458 - }
12459 - return -ENOMEM;
12460 - }
12461 -- if (!vma || addr + len <= vma->vm_start) {
12462 -+ if (check_heap_stack_gap(vma, addr, len, offset)) {
12463 - /*
12464 - * Remember the place where we stopped the search:
12465 - */
12466 -@@ -195,7 +199,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12467 - {
12468 - struct vm_area_struct *vma;
12469 - struct mm_struct *mm = current->mm;
12470 -- unsigned long addr = addr0, start_addr;
12471 -+ unsigned long base = mm->mmap_base, addr = addr0, start_addr;
12472 -+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
12473
12474 - /* requested length too big for entire address space */
12475 - if (len > TASK_SIZE)
12476 -@@ -208,13 +213,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12477 +@@ -161,6 +165,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12478 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
12479 goto bottomup;
12480
12481 @@ -21053,64 +22341,8 @@ index b4d3c39..d699d77 100644
12482 /* requesting a specific address */
12483 if (addr) {
12484 addr = PAGE_ALIGN(addr);
12485 -- vma = find_vma(mm, addr);
12486 -- if (TASK_SIZE - len >= addr &&
12487 -- (!vma || addr + len <= vma->vm_start))
12488 -- return addr;
12489 -+ if (TASK_SIZE - len >= addr) {
12490 -+ vma = find_vma(mm, addr);
12491 -+ if (check_heap_stack_gap(vma, addr, len, offset))
12492 -+ return addr;
12493 -+ }
12494 - }
12495 -
12496 - /* check if free_area_cache is useful for us */
12497 -@@ -240,7 +250,7 @@ try_again:
12498 - * return with success:
12499 - */
12500 - vma = find_vma(mm, addr);
12501 -- if (!vma || addr+len <= vma->vm_start)
12502 -+ if (check_heap_stack_gap(vma, addr, len, offset))
12503 - /* remember the address as a hint for next time */
12504 - return mm->free_area_cache = addr;
12505 -
12506 -@@ -249,8 +259,8 @@ try_again:
12507 - mm->cached_hole_size = vma->vm_start - addr;
12508 -
12509 - /* try just below the current vma->vm_start */
12510 -- addr = vma->vm_start-len;
12511 -- } while (len < vma->vm_start);
12512 -+ addr = skip_heap_stack_gap(vma, len, offset);
12513 -+ } while (!IS_ERR_VALUE(addr));
12514 -
12515 - fail:
12516 - /*
12517 -@@ -270,13 +280,21 @@ bottomup:
12518 - * can happen with large stack limits and large mmap()
12519 - * allocations.
12520 - */
12521 -+ mm->mmap_base = TASK_UNMAPPED_BASE;
12522 -+
12523 -+#ifdef CONFIG_PAX_RANDMMAP
12524 -+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12525 -+ mm->mmap_base += mm->delta_mmap;
12526 -+#endif
12527 -+
12528 -+ mm->free_area_cache = mm->mmap_base;
12529 - mm->cached_hole_size = ~0UL;
12530 -- mm->free_area_cache = TASK_UNMAPPED_BASE;
12531 - addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
12532 - /*
12533 - * Restore the topdown base:
12534 - */
12535 -- mm->free_area_cache = mm->mmap_base;
12536 -+ mm->mmap_base = base;
12537 -+ mm->free_area_cache = base;
12538 - mm->cached_hole_size = ~0UL;
12539 -
12540 - return addr;
12541 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
12542 -index f84fe00..93fe08f 100644
12543 +index f84fe00..f41d9f1 100644
12544 --- a/arch/x86/kernel/tboot.c
12545 +++ b/arch/x86/kernel/tboot.c
12546 @@ -220,7 +220,7 @@ static int tboot_setup_sleep(void)
12547 @@ -21140,7 +22372,7 @@ index f84fe00..93fe08f 100644
12548
12549 static int tboot_wait_for_aps(int num_aps)
12550 {
12551 -@@ -324,9 +324,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
12552 +@@ -324,16 +324,16 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
12553 {
12554 switch (action) {
12555 case CPU_DYING:
12556 @@ -21152,6 +22384,14 @@ index f84fe00..93fe08f 100644
12557 return NOTIFY_BAD;
12558 break;
12559 }
12560 + return NOTIFY_OK;
12561 + }
12562 +
12563 +-static struct notifier_block tboot_cpu_notifier __cpuinitdata =
12564 ++static struct notifier_block tboot_cpu_notifier =
12565 + {
12566 + .notifier_call = tboot_cpu_callback,
12567 + };
12568 @@ -345,7 +345,7 @@ static __init int tboot_late_init(void)
12569
12570 tboot_create_trampoline();
12571 @@ -21221,13 +22461,13 @@ index 9d9d2f9..cad418a 100644
12572 else
12573 info = infobuf;
12574 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
12575 -index 8276dc6..4ca48a2 100644
12576 +index ecffca1..95c4d13 100644
12577 --- a/arch/x86/kernel/traps.c
12578 +++ b/arch/x86/kernel/traps.c
12579 -@@ -71,12 +71,6 @@ asmlinkage int system_call(void);
12580 +@@ -68,12 +68,6 @@
12581 + #include <asm/setup.h>
12582
12583 - /* Do we ignore FPU interrupts ? */
12584 - char ignore_fpu_irq;
12585 + asmlinkage int system_call(void);
12586 -
12587 -/*
12588 - * The IDT has to be page-aligned to simplify the Pentium
12589 @@ -21237,7 +22477,7 @@ index 8276dc6..4ca48a2 100644
12590 #endif
12591
12592 DECLARE_BITMAP(used_vectors, NR_VECTORS);
12593 -@@ -109,11 +103,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
12594 +@@ -106,11 +100,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
12595 }
12596
12597 static int __kprobes
12598 @@ -21251,7 +22491,7 @@ index 8276dc6..4ca48a2 100644
12599 /*
12600 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
12601 * On nmi (interrupt 2), do_trap should not be called.
12602 -@@ -126,12 +120,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
12603 +@@ -123,12 +117,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
12604 return -1;
12605 }
12606 #endif
12607 @@ -21277,7 +22517,7 @@ index 8276dc6..4ca48a2 100644
12608 return 0;
12609 }
12610
12611 -@@ -139,7 +145,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
12612 +@@ -136,7 +142,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
12613 }
12614
12615 static void __kprobes
12616 @@ -21286,7 +22526,7 @@ index 8276dc6..4ca48a2 100644
12617 long error_code, siginfo_t *info)
12618 {
12619 struct task_struct *tsk = current;
12620 -@@ -163,7 +169,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
12621 +@@ -160,7 +166,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
12622 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
12623 printk_ratelimit()) {
12624 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
12625 @@ -21295,7 +22535,7 @@ index 8276dc6..4ca48a2 100644
12626 regs->ip, regs->sp, error_code);
12627 print_vma_addr(" in ", regs->ip);
12628 pr_cont("\n");
12629 -@@ -269,7 +275,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
12630 +@@ -266,7 +272,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
12631 conditional_sti(regs);
12632
12633 #ifdef CONFIG_X86_32
12634 @@ -21304,7 +22544,7 @@ index 8276dc6..4ca48a2 100644
12635 local_irq_enable();
12636 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
12637 goto exit;
12638 -@@ -277,18 +283,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
12639 +@@ -274,18 +280,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
12640 #endif
12641
12642 tsk = current;
12643 @@ -21349,7 +22589,7 @@ index 8276dc6..4ca48a2 100644
12644 tsk->thread.error_code = error_code;
12645 tsk->thread.trap_nr = X86_TRAP_GP;
12646
12647 -@@ -443,7 +473,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
12648 +@@ -440,7 +470,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
12649 /* It's safe to allow irq's after DR6 has been saved */
12650 preempt_conditional_sti(regs);
12651
12652 @@ -21358,7 +22598,7 @@ index 8276dc6..4ca48a2 100644
12653 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
12654 X86_TRAP_DB);
12655 preempt_conditional_cli(regs);
12656 -@@ -458,7 +488,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
12657 +@@ -455,7 +485,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
12658 * We already checked v86 mode above, so we can check for kernel mode
12659 * by just checking the CPL of CS.
12660 */
12661 @@ -21367,7 +22607,7 @@ index 8276dc6..4ca48a2 100644
12662 tsk->thread.debugreg6 &= ~DR_STEP;
12663 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
12664 regs->flags &= ~X86_EFLAGS_TF;
12665 -@@ -490,7 +520,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
12666 +@@ -487,7 +517,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
12667 return;
12668 conditional_sti(regs);
12669
12670 @@ -21377,10 +22617,10 @@ index 8276dc6..4ca48a2 100644
12671 if (!fixup_exception(regs)) {
12672 task->thread.error_code = error_code;
12673 diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
12674 -index aafa555..a04691a 100644
12675 +index c71025b..b117501 100644
12676 --- a/arch/x86/kernel/uprobes.c
12677 +++ b/arch/x86/kernel/uprobes.c
12678 -@@ -614,7 +614,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
12679 +@@ -629,7 +629,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
12680 int ret = NOTIFY_DONE;
12681
12682 /* We are only interested in userspace traps */
12683 @@ -21402,7 +22642,7 @@ index b9242ba..50c5edd 100644
12684 * verify_cpu, returns the status of longmode and SSE in register %eax.
12685 * 0: Success 1: Failure
12686 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
12687 -index 5c9687b..5f857d3 100644
12688 +index 1dfe69c..a3df6f6 100644
12689 --- a/arch/x86/kernel/vm86_32.c
12690 +++ b/arch/x86/kernel/vm86_32.c
12691 @@ -43,6 +43,7 @@
12692 @@ -21735,7 +22975,7 @@ index 22a1530..8fbaaad 100644
12693
12694 #ifdef CONFIG_SMP
12695 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
12696 -index 3a3e8c9..1af9465 100644
12697 +index 9a907a6..f83f921 100644
12698 --- a/arch/x86/kernel/vsyscall_64.c
12699 +++ b/arch/x86/kernel/vsyscall_64.c
12700 @@ -56,15 +56,13 @@
12701 @@ -21755,7 +22995,7 @@ index 3a3e8c9..1af9465 100644
12702 else if (!strcmp("none", str))
12703 vsyscall_mode = NONE;
12704 else
12705 -@@ -315,8 +313,7 @@ done:
12706 +@@ -323,8 +321,7 @@ do_ret:
12707 return true;
12708
12709 sigsegv:
12710 @@ -21765,7 +23005,7 @@ index 3a3e8c9..1af9465 100644
12711 }
12712
12713 /*
12714 -@@ -369,10 +366,7 @@ void __init map_vsyscall(void)
12715 +@@ -377,10 +374,7 @@ void __init map_vsyscall(void)
12716 extern char __vvar_page;
12717 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
12718
12719 @@ -21850,7 +23090,7 @@ index ada87a3..afea76d 100644
12720 if ((unsigned long)buf % 64 || fx_only) {
12721 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
12722 diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
12723 -index ec79e77..420f5cc 100644
12724 +index a20ecb5..d0e2194 100644
12725 --- a/arch/x86/kvm/cpuid.c
12726 +++ b/arch/x86/kvm/cpuid.c
12727 @@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
12728 @@ -21901,7 +23141,7 @@ index ec79e77..420f5cc 100644
12729
12730 out:
12731 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
12732 -index bba39bf..296540a 100644
12733 +index a27e763..54bfe43 100644
12734 --- a/arch/x86/kvm/emulate.c
12735 +++ b/arch/x86/kvm/emulate.c
12736 @@ -292,6 +292,7 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
12737 @@ -21930,7 +23170,7 @@ index bba39bf..296540a 100644
12738 case 1: \
12739 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
12740 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
12741 -index 43e9fad..3b7c059 100644
12742 +index 9392f52..0e56d77 100644
12743 --- a/arch/x86/kvm/lapic.c
12744 +++ b/arch/x86/kvm/lapic.c
12745 @@ -55,7 +55,7 @@
12746 @@ -21943,7 +23183,7 @@ index 43e9fad..3b7c059 100644
12747 #define APIC_LVT_NUM 6
12748 /* 14 is the version for Xeon and Pentium 8.4.8*/
12749 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
12750 -index 714e2c0..3f7a086 100644
12751 +index 891eb6d..e027900 100644
12752 --- a/arch/x86/kvm/paging_tmpl.h
12753 +++ b/arch/x86/kvm/paging_tmpl.h
12754 @@ -208,7 +208,7 @@ retry_walk:
12755 @@ -21956,10 +23196,10 @@ index 714e2c0..3f7a086 100644
12756 goto error;
12757 walker->ptep_user[walker->level - 1] = ptep_user;
12758 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
12759 -index d017df3..61ae42e 100644
12760 +index d29d3cd..ec9d522 100644
12761 --- a/arch/x86/kvm/svm.c
12762 +++ b/arch/x86/kvm/svm.c
12763 -@@ -3500,7 +3500,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
12764 +@@ -3507,7 +3507,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
12765 int cpu = raw_smp_processor_id();
12766
12767 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
12768 @@ -21971,7 +23211,7 @@ index d017df3..61ae42e 100644
12769 load_TR_desc();
12770 }
12771
12772 -@@ -3874,6 +3878,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
12773 +@@ -3881,6 +3885,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
12774 #endif
12775 #endif
12776
12777 @@ -21983,10 +23223,10 @@ index d017df3..61ae42e 100644
12778
12779 local_irq_disable();
12780 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
12781 -index f858159..4ab7dba 100644
12782 +index 9120ae1..238abc0 100644
12783 --- a/arch/x86/kvm/vmx.c
12784 +++ b/arch/x86/kvm/vmx.c
12785 -@@ -1332,7 +1332,11 @@ static void reload_tss(void)
12786 +@@ -1370,7 +1370,11 @@ static void reload_tss(void)
12787 struct desc_struct *descs;
12788
12789 descs = (void *)gdt->address;
12790 @@ -21998,7 +23238,7 @@ index f858159..4ab7dba 100644
12791 load_TR_desc();
12792 }
12793
12794 -@@ -1546,6 +1550,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
12795 +@@ -1594,6 +1598,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
12796 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
12797 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
12798
12799 @@ -22009,7 +23249,7 @@ index f858159..4ab7dba 100644
12800 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
12801 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
12802 vmx->loaded_vmcs->cpu = cpu;
12803 -@@ -2669,8 +2677,11 @@ static __init int hardware_setup(void)
12804 +@@ -2738,8 +2746,11 @@ static __init int hardware_setup(void)
12805 if (!cpu_has_vmx_flexpriority())
12806 flexpriority_enabled = 0;
12807
12808 @@ -22023,7 +23263,7 @@ index f858159..4ab7dba 100644
12809
12810 if (enable_ept && !cpu_has_vmx_ept_2m_page())
12811 kvm_disable_largepages();
12812 -@@ -3712,7 +3723,10 @@ static void vmx_set_constant_host_state(void)
12813 +@@ -3782,7 +3793,10 @@ static void vmx_set_constant_host_state(void)
12814
12815 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
12816 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
12817 @@ -22034,7 +23274,7 @@ index f858159..4ab7dba 100644
12818
12819 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
12820 #ifdef CONFIG_X86_64
12821 -@@ -3733,7 +3747,7 @@ static void vmx_set_constant_host_state(void)
12822 +@@ -3803,7 +3817,7 @@ static void vmx_set_constant_host_state(void)
12823 native_store_idt(&dt);
12824 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
12825
12826 @@ -22043,7 +23283,7 @@ index f858159..4ab7dba 100644
12827
12828 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
12829 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
12830 -@@ -6279,6 +6293,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
12831 +@@ -6355,6 +6369,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
12832 "jmp 2f \n\t"
12833 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
12834 "2: "
12835 @@ -22056,7 +23296,7 @@ index f858159..4ab7dba 100644
12836 /* Save guest registers, load host registers, keep flags */
12837 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
12838 "pop %0 \n\t"
12839 -@@ -6331,6 +6351,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
12840 +@@ -6407,6 +6427,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
12841 #endif
12842 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
12843 [wordsize]"i"(sizeof(ulong))
12844 @@ -22068,7 +23308,7 @@ index f858159..4ab7dba 100644
12845 : "cc", "memory"
12846 #ifdef CONFIG_X86_64
12847 , "rax", "rbx", "rdi", "rsi"
12848 -@@ -6344,7 +6369,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
12849 +@@ -6420,7 +6445,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
12850 if (debugctlmsr)
12851 update_debugctlmsr(debugctlmsr);
12852
12853 @@ -22077,7 +23317,7 @@ index f858159..4ab7dba 100644
12854 /*
12855 * The sysexit path does not restore ds/es, so we must set them to
12856 * a reasonable value ourselves.
12857 -@@ -6353,8 +6378,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
12858 +@@ -6429,8 +6454,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
12859 * may be executed in interrupt context, which saves and restore segments
12860 * around it, nullifying its effect.
12861 */
12862 @@ -22099,10 +23339,10 @@ index f858159..4ab7dba 100644
12863
12864 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
12865 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
12866 -index 4f76417..93429b5 100644
12867 +index c243b81..9eb193f 100644
12868 --- a/arch/x86/kvm/x86.c
12869 +++ b/arch/x86/kvm/x86.c
12870 -@@ -1390,8 +1390,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
12871 +@@ -1692,8 +1692,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
12872 {
12873 struct kvm *kvm = vcpu->kvm;
12874 int lm = is_long_mode(vcpu);
12875 @@ -22113,7 +23353,7 @@ index 4f76417..93429b5 100644
12876 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
12877 : kvm->arch.xen_hvm_config.blob_size_32;
12878 u32 page_num = data & ~PAGE_MASK;
12879 -@@ -2255,6 +2255,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
12880 +@@ -2571,6 +2571,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
12881 if (n < msr_list.nmsrs)
12882 goto out;
12883 r = -EFAULT;
12884 @@ -22122,7 +23362,7 @@ index 4f76417..93429b5 100644
12885 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
12886 num_msrs_to_save * sizeof(u32)))
12887 goto out;
12888 -@@ -2379,7 +2381,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
12889 +@@ -2700,7 +2702,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
12890 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
12891 struct kvm_interrupt *irq)
12892 {
12893 @@ -22131,9 +23371,9 @@ index 4f76417..93429b5 100644
12894 return -EINVAL;
12895 if (irqchip_in_kernel(vcpu->kvm))
12896 return -ENXIO;
12897 -@@ -4881,7 +4883,7 @@ static void kvm_set_mmio_spte_mask(void)
12898 - kvm_mmu_set_mmio_spte_mask(mask);
12899 - }
12900 +@@ -5213,7 +5215,7 @@ static struct notifier_block pvclock_gtod_notifier = {
12901 + };
12902 + #endif
12903
12904 -int kvm_arch_init(void *opaque)
12905 +int kvm_arch_init(const void *opaque)
12906 @@ -22141,7 +23381,7 @@ index 4f76417..93429b5 100644
12907 int r;
12908 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
12909 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
12910 -index 642d880..44e0f3f 100644
12911 +index df4176c..23ce092 100644
12912 --- a/arch/x86/lguest/boot.c
12913 +++ b/arch/x86/lguest/boot.c
12914 @@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
12915 @@ -22944,92 +24184,92 @@ index 1e572c5..2a162cd 100644
12916
12917 CFI_ENDPROC
12918 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
12919 -index 6b34d04..dccb07f 100644
12920 +index 176cca6..1166c50 100644
12921 --- a/arch/x86/lib/copy_page_64.S
12922 +++ b/arch/x86/lib/copy_page_64.S
12923 -@@ -9,6 +9,7 @@ copy_page_c:
12924 +@@ -9,6 +9,7 @@ copy_page_rep:
12925 CFI_STARTPROC
12926 - movl $4096/8,%ecx
12927 - rep movsq
12928 + movl $4096/8, %ecx
12929 + rep movsq
12930 + pax_force_retaddr
12931 ret
12932 CFI_ENDPROC
12933 - ENDPROC(copy_page_c)
12934 -@@ -20,12 +21,14 @@ ENDPROC(copy_page_c)
12935 + ENDPROC(copy_page_rep)
12936 +@@ -20,12 +21,14 @@ ENDPROC(copy_page_rep)
12937
12938 ENTRY(copy_page)
12939 CFI_STARTPROC
12940 -- subq $2*8,%rsp
12941 +- subq $2*8, %rsp
12942 - CFI_ADJUST_CFA_OFFSET 2*8
12943 -+ subq $3*8,%rsp
12944 ++ subq $3*8, %rsp
12945 + CFI_ADJUST_CFA_OFFSET 3*8
12946 - movq %rbx,(%rsp)
12947 + movq %rbx, (%rsp)
12948 CFI_REL_OFFSET rbx, 0
12949 - movq %r12,1*8(%rsp)
12950 + movq %r12, 1*8(%rsp)
12951 CFI_REL_OFFSET r12, 1*8
12952 -+ movq %r13,2*8(%rsp)
12953 ++ movq %r13, 2*8(%rsp)
12954 + CFI_REL_OFFSET r13, 2*8
12955
12956 - movl $(4096/64)-5,%ecx
12957 + movl $(4096/64)-5, %ecx
12958 .p2align 4
12959 -@@ -37,7 +40,7 @@ ENTRY(copy_page)
12960 - movq 16 (%rsi), %rdx
12961 - movq 24 (%rsi), %r8
12962 - movq 32 (%rsi), %r9
12963 -- movq 40 (%rsi), %r10
12964 -+ movq 40 (%rsi), %r13
12965 - movq 48 (%rsi), %r11
12966 - movq 56 (%rsi), %r12
12967 -
12968 -@@ -48,7 +51,7 @@ ENTRY(copy_page)
12969 - movq %rdx, 16 (%rdi)
12970 - movq %r8, 24 (%rdi)
12971 - movq %r9, 32 (%rdi)
12972 -- movq %r10, 40 (%rdi)
12973 -+ movq %r13, 40 (%rdi)
12974 - movq %r11, 48 (%rdi)
12975 - movq %r12, 56 (%rdi)
12976 -
12977 -@@ -67,7 +70,7 @@ ENTRY(copy_page)
12978 - movq 16 (%rsi), %rdx
12979 - movq 24 (%rsi), %r8
12980 - movq 32 (%rsi), %r9
12981 -- movq 40 (%rsi), %r10
12982 -+ movq 40 (%rsi), %r13
12983 - movq 48 (%rsi), %r11
12984 - movq 56 (%rsi), %r12
12985 -
12986 -@@ -76,7 +79,7 @@ ENTRY(copy_page)
12987 - movq %rdx, 16 (%rdi)
12988 - movq %r8, 24 (%rdi)
12989 - movq %r9, 32 (%rdi)
12990 -- movq %r10, 40 (%rdi)
12991 -+ movq %r13, 40 (%rdi)
12992 - movq %r11, 48 (%rdi)
12993 - movq %r12, 56 (%rdi)
12994 -
12995 -@@ -89,8 +92,11 @@ ENTRY(copy_page)
12996 +@@ -36,7 +39,7 @@ ENTRY(copy_page)
12997 + movq 0x8*2(%rsi), %rdx
12998 + movq 0x8*3(%rsi), %r8
12999 + movq 0x8*4(%rsi), %r9
13000 +- movq 0x8*5(%rsi), %r10
13001 ++ movq 0x8*5(%rsi), %r13
13002 + movq 0x8*6(%rsi), %r11
13003 + movq 0x8*7(%rsi), %r12
13004 +
13005 +@@ -47,7 +50,7 @@ ENTRY(copy_page)
13006 + movq %rdx, 0x8*2(%rdi)
13007 + movq %r8, 0x8*3(%rdi)
13008 + movq %r9, 0x8*4(%rdi)
13009 +- movq %r10, 0x8*5(%rdi)
13010 ++ movq %r13, 0x8*5(%rdi)
13011 + movq %r11, 0x8*6(%rdi)
13012 + movq %r12, 0x8*7(%rdi)
13013 +
13014 +@@ -66,7 +69,7 @@ ENTRY(copy_page)
13015 + movq 0x8*2(%rsi), %rdx
13016 + movq 0x8*3(%rsi), %r8
13017 + movq 0x8*4(%rsi), %r9
13018 +- movq 0x8*5(%rsi), %r10
13019 ++ movq 0x8*5(%rsi), %r13
13020 + movq 0x8*6(%rsi), %r11
13021 + movq 0x8*7(%rsi), %r12
13022 +
13023 +@@ -75,7 +78,7 @@ ENTRY(copy_page)
13024 + movq %rdx, 0x8*2(%rdi)
13025 + movq %r8, 0x8*3(%rdi)
13026 + movq %r9, 0x8*4(%rdi)
13027 +- movq %r10, 0x8*5(%rdi)
13028 ++ movq %r13, 0x8*5(%rdi)
13029 + movq %r11, 0x8*6(%rdi)
13030 + movq %r12, 0x8*7(%rdi)
13031 +
13032 +@@ -87,8 +90,11 @@ ENTRY(copy_page)
13033 CFI_RESTORE rbx
13034 - movq 1*8(%rsp),%r12
13035 + movq 1*8(%rsp), %r12
13036 CFI_RESTORE r12
13037 -- addq $2*8,%rsp
13038 +- addq $2*8, %rsp
13039 - CFI_ADJUST_CFA_OFFSET -2*8
13040 -+ movq 2*8(%rsp),%r13
13041 ++ movq 2*8(%rsp), %r13
13042 + CFI_RESTORE r13
13043 -+ addq $3*8,%rsp
13044 ++ addq $3*8, %rsp
13045 + CFI_ADJUST_CFA_OFFSET -3*8
13046 + pax_force_retaddr
13047 ret
13048 .Lcopy_page_end:
13049 CFI_ENDPROC
13050 -@@ -101,7 +107,7 @@ ENDPROC(copy_page)
13051 +@@ -99,7 +105,7 @@ ENDPROC(copy_page)
13052
13053 #include <asm/cpufeature.h>
13054
13055 - .section .altinstr_replacement,"ax"
13056 + .section .altinstr_replacement,"a"
13057 1: .byte 0xeb /* jmp <disp8> */
13058 - .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
13059 + .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
13060 2:
13061 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
13062 index a30ca15..d25fab6 100644
13063 @@ -24351,7 +25591,7 @@ index a63efd6..ccecad8 100644
13064 ret
13065 CFI_ENDPROC
13066 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
13067 -index 98f6d6b6..d27f045 100644
13068 +index f0312d7..9c39d63 100644
13069 --- a/arch/x86/lib/usercopy_32.c
13070 +++ b/arch/x86/lib/usercopy_32.c
13071 @@ -42,11 +42,13 @@ do { \
13072 @@ -24844,8 +26084,8 @@ index 98f6d6b6..d27f045 100644
13073 "2:\n" \
13074 ".section .fixup,\"ax\"\n" \
13075 "5: addl %3,%0\n" \
13076 -@@ -629,9 +741,9 @@ survive:
13077 - #endif
13078 +@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
13079 + {
13080 stac();
13081 if (movsl_is_ok(to, from, n))
13082 - __copy_user(to, from, n);
13083 @@ -24856,7 +26096,7 @@ index 98f6d6b6..d27f045 100644
13084 clac();
13085 return n;
13086 }
13087 -@@ -655,10 +767,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
13088 +@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
13089 {
13090 stac();
13091 if (movsl_is_ok(to, from, n))
13092 @@ -24869,7 +26109,7 @@ index 98f6d6b6..d27f045 100644
13093 clac();
13094 return n;
13095 }
13096 -@@ -689,66 +800,51 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
13097 +@@ -632,66 +743,51 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
13098 if (n > 64 && cpu_has_xmm2)
13099 n = __copy_user_intel_nocache(to, from, n);
13100 else
13101 @@ -25082,7 +26322,7 @@ index 903ec1e..c4166b2 100644
13102 }
13103
13104 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
13105 -index 79ff7da..610cf70 100644
13106 +index fb674fd..272f369 100644
13107 --- a/arch/x86/mm/fault.c
13108 +++ b/arch/x86/mm/fault.c
13109 @@ -13,12 +13,19 @@
13110 @@ -25096,7 +26336,7 @@ index 79ff7da..610cf70 100644
13111 #include <asm/pgalloc.h> /* pgd_*(), ... */
13112 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
13113 #include <asm/fixmap.h> /* VSYSCALL_START */
13114 - #include <asm/rcu.h> /* exception_enter(), ... */
13115 + #include <asm/context_tracking.h> /* exception_enter(), ... */
13116 +#include <asm/tlbflush.h>
13117 +
13118 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13119 @@ -25309,7 +26549,7 @@ index 79ff7da..610cf70 100644
13120 /* Kernel addresses are always protection faults: */
13121 if (address >= TASK_SIZE)
13122 error_code |= PF_PROT;
13123 -@@ -847,7 +945,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
13124 +@@ -833,7 +931,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
13125 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
13126 printk(KERN_ERR
13127 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
13128 @@ -25318,7 +26558,7 @@ index 79ff7da..610cf70 100644
13129 code = BUS_MCEERR_AR;
13130 }
13131 #endif
13132 -@@ -903,6 +1001,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
13133 +@@ -896,6 +994,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
13134 return 1;
13135 }
13136
13137 @@ -25418,7 +26658,7 @@ index 79ff7da..610cf70 100644
13138 /*
13139 * Handle a spurious fault caused by a stale TLB entry.
13140 *
13141 -@@ -975,6 +1166,9 @@ int show_unhandled_signals = 1;
13142 +@@ -968,6 +1159,9 @@ int show_unhandled_signals = 1;
13143 static inline int
13144 access_error(unsigned long error_code, struct vm_area_struct *vma)
13145 {
13146 @@ -25428,7 +26668,7 @@ index 79ff7da..610cf70 100644
13147 if (error_code & PF_WRITE) {
13148 /* write, present and write, not present: */
13149 if (unlikely(!(vma->vm_flags & VM_WRITE)))
13150 -@@ -1003,7 +1197,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
13151 +@@ -996,7 +1190,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
13152 if (error_code & PF_USER)
13153 return false;
13154
13155 @@ -25437,7 +26677,7 @@ index 79ff7da..610cf70 100644
13156 return false;
13157
13158 return true;
13159 -@@ -1019,18 +1213,33 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
13160 +@@ -1012,18 +1206,33 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
13161 {
13162 struct vm_area_struct *vma;
13163 struct task_struct *tsk;
13164 @@ -25476,7 +26716,7 @@ index 79ff7da..610cf70 100644
13165
13166 /*
13167 * Detect and handle instructions that would cause a page fault for
13168 -@@ -1091,7 +1300,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
13169 +@@ -1084,7 +1293,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
13170 * User-mode registers count as a user access even for any
13171 * potential system fault or CPU buglet:
13172 */
13173 @@ -25485,7 +26725,7 @@ index 79ff7da..610cf70 100644
13174 local_irq_enable();
13175 error_code |= PF_USER;
13176 } else {
13177 -@@ -1153,6 +1362,11 @@ retry:
13178 +@@ -1146,6 +1355,11 @@ retry:
13179 might_sleep();
13180 }
13181
13182 @@ -25497,7 +26737,7 @@ index 79ff7da..610cf70 100644
13183 vma = find_vma(mm, address);
13184 if (unlikely(!vma)) {
13185 bad_area(regs, error_code, address);
13186 -@@ -1164,18 +1378,24 @@ retry:
13187 +@@ -1157,18 +1371,24 @@ retry:
13188 bad_area(regs, error_code, address);
13189 return;
13190 }
13191 @@ -25533,7 +26773,7 @@ index 79ff7da..610cf70 100644
13192 if (unlikely(expand_stack(vma, address))) {
13193 bad_area(regs, error_code, address);
13194 return;
13195 -@@ -1239,3 +1459,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
13196 +@@ -1232,3 +1452,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
13197 __do_page_fault(regs, error_code);
13198 exception_exit(regs);
13199 }
13200 @@ -25856,172 +27096,36 @@ index 6f31ee5..8ee4164 100644
13201
13202 return (void *)vaddr;
13203 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
13204 -index 937bff5..dce75ff 100644
13205 +index ae1aa71..56316db 100644
13206 --- a/arch/x86/mm/hugetlbpage.c
13207 +++ b/arch/x86/mm/hugetlbpage.c
13208 -@@ -276,13 +276,21 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
13209 - struct hstate *h = hstate_file(file);
13210 - struct mm_struct *mm = current->mm;
13211 - struct vm_area_struct *vma;
13212 -- unsigned long start_addr;
13213 -+ unsigned long start_addr, pax_task_size = TASK_SIZE;
13214 -+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
13215 -+
13216 -+#ifdef CONFIG_PAX_SEGMEXEC
13217 -+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
13218 -+ pax_task_size = SEGMEXEC_TASK_SIZE;
13219 -+#endif
13220 -+
13221 -+ pax_task_size -= PAGE_SIZE;
13222 -
13223 - if (len > mm->cached_hole_size) {
13224 -- start_addr = mm->free_area_cache;
13225 -+ start_addr = mm->free_area_cache;
13226 - } else {
13227 -- start_addr = TASK_UNMAPPED_BASE;
13228 -- mm->cached_hole_size = 0;
13229 -+ start_addr = mm->mmap_base;
13230 -+ mm->cached_hole_size = 0;
13231 - }
13232 -
13233 - full_search:
13234 -@@ -290,26 +298,27 @@ full_search:
13235 -
13236 - for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
13237 - /* At this point: (!vma || addr < vma->vm_end). */
13238 -- if (TASK_SIZE - len < addr) {
13239 -+ if (pax_task_size - len < addr) {
13240 - /*
13241 - * Start a new search - just in case we missed
13242 - * some holes.
13243 - */
13244 -- if (start_addr != TASK_UNMAPPED_BASE) {
13245 -- start_addr = TASK_UNMAPPED_BASE;
13246 -+ if (start_addr != mm->mmap_base) {
13247 -+ start_addr = mm->mmap_base;
13248 - mm->cached_hole_size = 0;
13249 - goto full_search;
13250 - }
13251 - return -ENOMEM;
13252 - }
13253 -- if (!vma || addr + len <= vma->vm_start) {
13254 -- mm->free_area_cache = addr + len;
13255 -- return addr;
13256 -- }
13257 -+ if (check_heap_stack_gap(vma, addr, len, offset))
13258 -+ break;
13259 - if (addr + mm->cached_hole_size < vma->vm_start)
13260 - mm->cached_hole_size = vma->vm_start - addr;
13261 - addr = ALIGN(vma->vm_end, huge_page_size(h));
13262 - }
13263 +@@ -279,6 +279,12 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
13264 + info.flags = 0;
13265 + info.length = len;
13266 + info.low_limit = TASK_UNMAPPED_BASE;
13267 +
13268 -+ mm->free_area_cache = addr + len;
13269 -+ return addr;
13270 - }
13271 -
13272 - static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
13273 -@@ -320,9 +329,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
13274 - struct mm_struct *mm = current->mm;
13275 - struct vm_area_struct *vma;
13276 - unsigned long base = mm->mmap_base;
13277 -- unsigned long addr = addr0;
13278 -+ unsigned long addr;
13279 - unsigned long largest_hole = mm->cached_hole_size;
13280 -- unsigned long start_addr;
13281 -+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
13282 -
13283 - /* don't allow allocations above current base */
13284 - if (mm->free_area_cache > base)
13285 -@@ -332,16 +341,15 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
13286 - largest_hole = 0;
13287 - mm->free_area_cache = base;
13288 - }
13289 --try_again:
13290 -- start_addr = mm->free_area_cache;
13291 -
13292 - /* make sure it can fit in the remaining address space */
13293 - if (mm->free_area_cache < len)
13294 - goto fail;
13295 -
13296 - /* either no address requested or can't fit in requested address hole */
13297 -- addr = (mm->free_area_cache - len) & huge_page_mask(h);
13298 -+ addr = mm->free_area_cache - len;
13299 - do {
13300 -+ addr &= huge_page_mask(h);
13301 - /*
13302 - * Lookup failure means no vma is above this address,
13303 - * i.e. return with success:
13304 -@@ -350,10 +358,10 @@ try_again:
13305 - if (!vma)
13306 - return addr;
13307 -
13308 -- if (addr + len <= vma->vm_start) {
13309 -+ if (check_heap_stack_gap(vma, addr, len, offset)) {
13310 - /* remember the address as a hint for next time */
13311 -- mm->cached_hole_size = largest_hole;
13312 -- return (mm->free_area_cache = addr);
13313 -+ mm->cached_hole_size = largest_hole;
13314 -+ return (mm->free_area_cache = addr);
13315 - } else if (mm->free_area_cache == vma->vm_end) {
13316 - /* pull free_area_cache down to the first hole */
13317 - mm->free_area_cache = vma->vm_start;
13318 -@@ -362,29 +370,34 @@ try_again:
13319 -
13320 - /* remember the largest hole we saw so far */
13321 - if (addr + largest_hole < vma->vm_start)
13322 -- largest_hole = vma->vm_start - addr;
13323 -+ largest_hole = vma->vm_start - addr;
13324 -
13325 - /* try just below the current vma->vm_start */
13326 -- addr = (vma->vm_start - len) & huge_page_mask(h);
13327 -- } while (len <= vma->vm_start);
13328 -+ addr = skip_heap_stack_gap(vma, len, offset);
13329 -+ } while (!IS_ERR_VALUE(addr));
13330 -
13331 - fail:
13332 - /*
13333 -- * if hint left us with no space for the requested
13334 -- * mapping then try again:
13335 -- */
13336 -- if (start_addr != base) {
13337 -- mm->free_area_cache = base;
13338 -- largest_hole = 0;
13339 -- goto try_again;
13340 -- }
13341 -- /*
13342 - * A failed mmap() very likely causes application failure,
13343 - * so fall back to the bottom-up function here. This scenario
13344 - * can happen with large stack limits and large mmap()
13345 - * allocations.
13346 - */
13347 -- mm->free_area_cache = TASK_UNMAPPED_BASE;
13348 -+
13349 -+#ifdef CONFIG_PAX_SEGMEXEC
13350 -+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
13351 -+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
13352 -+ else
13353 ++#ifdef CONFIG_PAX_RANDMMAP
13354 ++ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
13355 ++ info.low_limit += current->mm->delta_mmap;
13356 +#endif
13357 +
13358 -+ mm->mmap_base = TASK_UNMAPPED_BASE;
13359 + info.high_limit = TASK_SIZE;
13360 + info.align_mask = PAGE_MASK & ~huge_page_mask(h);
13361 + info.align_offset = 0;
13362 +@@ -311,6 +317,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
13363 + VM_BUG_ON(addr != -ENOMEM);
13364 + info.flags = 0;
13365 + info.low_limit = TASK_UNMAPPED_BASE;
13366 +
13367 +#ifdef CONFIG_PAX_RANDMMAP
13368 -+ if (mm->pax_flags & MF_PAX_RANDMMAP)
13369 -+ mm->mmap_base += mm->delta_mmap;
13370 ++ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
13371 ++ info.low_limit += current->mm->delta_mmap;
13372 +#endif
13373 +
13374 -+ mm->free_area_cache = mm->mmap_base;
13375 - mm->cached_hole_size = ~0UL;
13376 - addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
13377 - len, pgoff, flags);
13378 -@@ -392,6 +405,7 @@ fail:
13379 - /*
13380 - * Restore the topdown base:
13381 - */
13382 -+ mm->mmap_base = base;
13383 - mm->free_area_cache = base;
13384 - mm->cached_hole_size = ~0UL;
13385 -
13386 -@@ -405,10 +419,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
13387 + info.high_limit = TASK_SIZE;
13388 + addr = vm_unmapped_area(&info);
13389 + }
13390 +@@ -325,10 +337,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
13391 struct hstate *h = hstate_file(file);
13392 struct mm_struct *mm = current->mm;
13393 struct vm_area_struct *vma;
13394 @@ -26043,7 +27147,7 @@ index 937bff5..dce75ff 100644
13395 return -ENOMEM;
13396
13397 if (flags & MAP_FIXED) {
13398 -@@ -417,11 +441,14 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
13399 +@@ -337,11 +359,14 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
13400 return addr;
13401 }
13402
13403 @@ -26251,7 +27355,7 @@ index d7aea41..0fc945b 100644
13404 (unsigned long)(&__init_begin),
13405 (unsigned long)(&__init_end));
13406 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
13407 -index 11a5800..4bd9977 100644
13408 +index 745d66b..56bf568 100644
13409 --- a/arch/x86/mm/init_32.c
13410 +++ b/arch/x86/mm/init_32.c
13411 @@ -73,36 +73,6 @@ static __init void *alloc_low_page(void)
13412 @@ -26468,7 +27572,7 @@ index 11a5800..4bd9977 100644
13413 EXPORT_SYMBOL_GPL(__supported_pte_mask);
13414
13415 /* user-defined highmem size */
13416 -@@ -731,6 +730,12 @@ void __init mem_init(void)
13417 +@@ -728,6 +727,12 @@ void __init mem_init(void)
13418
13419 pci_iommu_alloc();
13420
13421 @@ -26481,7 +27585,7 @@ index 11a5800..4bd9977 100644
13422 #ifdef CONFIG_FLATMEM
13423 BUG_ON(!mem_map);
13424 #endif
13425 -@@ -757,7 +762,7 @@ void __init mem_init(void)
13426 +@@ -754,7 +759,7 @@ void __init mem_init(void)
13427 reservedpages++;
13428
13429 codesize = (unsigned long) &_etext - (unsigned long) &_text;
13430 @@ -26490,7 +27594,7 @@ index 11a5800..4bd9977 100644
13431 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
13432
13433 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
13434 -@@ -798,10 +803,10 @@ void __init mem_init(void)
13435 +@@ -795,10 +800,10 @@ void __init mem_init(void)
13436 ((unsigned long)&__init_end -
13437 (unsigned long)&__init_begin) >> 10,
13438
13439 @@ -26504,7 +27608,7 @@ index 11a5800..4bd9977 100644
13440 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
13441
13442 /*
13443 -@@ -879,6 +884,7 @@ void set_kernel_text_rw(void)
13444 +@@ -876,6 +881,7 @@ void set_kernel_text_rw(void)
13445 if (!kernel_set_to_readonly)
13446 return;
13447
13448 @@ -26512,7 +27616,7 @@ index 11a5800..4bd9977 100644
13449 pr_debug("Set kernel text: %lx - %lx for read write\n",
13450 start, start+size);
13451
13452 -@@ -893,6 +899,7 @@ void set_kernel_text_ro(void)
13453 +@@ -890,6 +896,7 @@ void set_kernel_text_ro(void)
13454 if (!kernel_set_to_readonly)
13455 return;
13456
13457 @@ -26520,7 +27624,7 @@ index 11a5800..4bd9977 100644
13458 pr_debug("Set kernel text: %lx - %lx for read only\n",
13459 start, start+size);
13460
13461 -@@ -921,6 +928,7 @@ void mark_rodata_ro(void)
13462 +@@ -918,6 +925,7 @@ void mark_rodata_ro(void)
13463 unsigned long start = PFN_ALIGN(_text);
13464 unsigned long size = PFN_ALIGN(_etext) - start;
13465
13466 @@ -26529,7 +27633,7 @@ index 11a5800..4bd9977 100644
13467 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
13468 size >> 10);
13469 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
13470 -index ce42da7..678a54e 100644
13471 +index 75c9a6a..498d677 100644
13472 --- a/arch/x86/mm/init_64.c
13473 +++ b/arch/x86/mm/init_64.c
13474 @@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpages_on);
13475 @@ -26664,7 +27768,7 @@ index ce42da7..678a54e 100644
13476 spin_unlock(&init_mm.page_table_lock);
13477 pgd_changed = true;
13478 }
13479 -@@ -691,6 +705,12 @@ void __init mem_init(void)
13480 +@@ -693,6 +707,12 @@ void __init mem_init(void)
13481
13482 pci_iommu_alloc();
13483
13484 @@ -26677,7 +27781,7 @@ index ce42da7..678a54e 100644
13485 /* clear_bss() already clear the empty_zero_page */
13486
13487 reservedpages = 0;
13488 -@@ -854,8 +874,8 @@ int kern_addr_valid(unsigned long addr)
13489 +@@ -856,8 +876,8 @@ int kern_addr_valid(unsigned long addr)
13490 static struct vm_area_struct gate_vma = {
13491 .vm_start = VSYSCALL_START,
13492 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
13493 @@ -26688,7 +27792,7 @@ index ce42da7..678a54e 100644
13494 };
13495
13496 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
13497 -@@ -889,7 +909,7 @@ int in_gate_area_no_mm(unsigned long addr)
13498 +@@ -891,7 +911,7 @@ int in_gate_area_no_mm(unsigned long addr)
13499
13500 const char *arch_vma_name(struct vm_area_struct *vma)
13501 {
13502 @@ -27116,7 +28220,7 @@ index 9f0614d..92ae64a 100644
13503 p += get_opcode(p, &opcode);
13504 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
13505 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
13506 -index 8573b83..4f3ed7e 100644
13507 +index e27fbf8..8b56dc9 100644
13508 --- a/arch/x86/mm/pgtable.c
13509 +++ b/arch/x86/mm/pgtable.c
13510 @@ -84,10 +84,64 @@ static inline void pgd_list_del(pgd_t *pgd)
13511 @@ -27195,7 +28299,7 @@ index 8573b83..4f3ed7e 100644
13512 /*
13513 * List of all pgd's needed for non-PAE so it can invalidate entries
13514 @@ -140,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
13515 - * -- wli
13516 + * -- nyc
13517 */
13518
13519 -#ifdef CONFIG_X86_PAE
13520 @@ -27428,7 +28532,7 @@ index 410531d..0f16030 100644
13521 }
13522
13523 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
13524 -index 60f926c..a710970 100644
13525 +index 13a6b29..c2fff23 100644
13526 --- a/arch/x86/mm/tlb.c
13527 +++ b/arch/x86/mm/tlb.c
13528 @@ -48,7 +48,11 @@ void leave_mm(int cpu)
13529 @@ -27557,18 +28661,18 @@ index 877b9a1..a8ecf42 100644
13530 + pax_force_retaddr
13531 ret
13532 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
13533 -index 520d2bd..b895ef4 100644
13534 +index d11a470..3f9adff3 100644
13535 --- a/arch/x86/net/bpf_jit_comp.c
13536 +++ b/arch/x86/net/bpf_jit_comp.c
13537 -@@ -11,6 +11,7 @@
13538 - #include <asm/cacheflush.h>
13539 +@@ -12,6 +12,7 @@
13540 #include <linux/netdevice.h>
13541 #include <linux/filter.h>
13542 + #include <linux/if_vlan.h>
13543 +#include <linux/random.h>
13544
13545 /*
13546 * Conventions :
13547 -@@ -48,13 +49,87 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
13548 +@@ -49,13 +50,87 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
13549 return ptr + len;
13550 }
13551
13552 @@ -27656,7 +28760,7 @@ index 520d2bd..b895ef4 100644
13553
13554 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
13555 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
13556 -@@ -89,6 +164,24 @@ do { \
13557 +@@ -90,6 +165,24 @@ do { \
13558 #define X86_JBE 0x76
13559 #define X86_JA 0x77
13560
13561 @@ -27681,7 +28785,7 @@ index 520d2bd..b895ef4 100644
13562 #define EMIT_COND_JMP(op, offset) \
13563 do { \
13564 if (is_near(offset)) \
13565 -@@ -96,6 +189,7 @@ do { \
13566 +@@ -97,6 +190,7 @@ do { \
13567 else { \
13568 EMIT2(0x0f, op + 0x10); \
13569 EMIT(offset, 4); /* jxx .+off32 */ \
13570 @@ -27689,7 +28793,7 @@ index 520d2bd..b895ef4 100644
13571 } \
13572 } while (0)
13573
13574 -@@ -120,12 +214,17 @@ static inline void bpf_flush_icache(void *start, void *end)
13575 +@@ -121,12 +215,17 @@ static inline void bpf_flush_icache(void *start, void *end)
13576 set_fs(old_fs);
13577 }
13578
13579 @@ -27708,7 +28812,7 @@ index 520d2bd..b895ef4 100644
13580 u8 *prog;
13581 unsigned int proglen, oldproglen = 0;
13582 int ilen, i;
13583 -@@ -138,6 +237,9 @@ void bpf_jit_compile(struct sk_filter *fp)
13584 +@@ -139,6 +238,9 @@ void bpf_jit_compile(struct sk_filter *fp)
13585 unsigned int *addrs;
13586 const struct sock_filter *filter = fp->insns;
13587 int flen = fp->len;
13588 @@ -27718,7 +28822,7 @@ index 520d2bd..b895ef4 100644
13589
13590 if (!bpf_jit_enable)
13591 return;
13592 -@@ -146,11 +248,19 @@ void bpf_jit_compile(struct sk_filter *fp)
13593 +@@ -147,11 +249,19 @@ void bpf_jit_compile(struct sk_filter *fp)
13594 if (addrs == NULL)
13595 return;
13596
13597 @@ -27740,7 +28844,7 @@ index 520d2bd..b895ef4 100644
13598 addrs[i] = proglen;
13599 }
13600 cleanup_addr = proglen; /* epilogue address */
13601 -@@ -258,10 +368,8 @@ void bpf_jit_compile(struct sk_filter *fp)
13602 +@@ -261,10 +371,8 @@ void bpf_jit_compile(struct sk_filter *fp)
13603 case BPF_S_ALU_MUL_K: /* A *= K */
13604 if (is_imm8(K))
13605 EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
13606 @@ -27753,7 +28857,7 @@ index 520d2bd..b895ef4 100644
13607 break;
13608 case BPF_S_ALU_DIV_X: /* A /= X; */
13609 seen |= SEEN_XREG;
13610 -@@ -301,13 +409,23 @@ void bpf_jit_compile(struct sk_filter *fp)
13611 +@@ -304,13 +412,23 @@ void bpf_jit_compile(struct sk_filter *fp)
13612 break;
13613 case BPF_S_ALU_MOD_K: /* A %= K; */
13614 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
13615 @@ -27777,7 +28881,7 @@ index 520d2bd..b895ef4 100644
13616 EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
13617 break;
13618 case BPF_S_ALU_AND_X:
13619 -@@ -543,8 +661,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
13620 +@@ -564,8 +682,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
13621 if (is_imm8(K)) {
13622 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
13623 } else {
13624 @@ -27787,7 +28891,7 @@ index 520d2bd..b895ef4 100644
13625 }
13626 } else {
13627 EMIT2(0x89,0xde); /* mov %ebx,%esi */
13628 -@@ -627,17 +744,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
13629 +@@ -648,17 +765,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
13630 break;
13631 default:
13632 /* hmm, too complex filter, give up with jit compiler */
13633 @@ -27810,7 +28914,7 @@ index 520d2bd..b895ef4 100644
13634 }
13635 proglen += ilen;
13636 addrs[i] = proglen;
13637 -@@ -658,11 +776,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
13638 +@@ -679,11 +797,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
13639 break;
13640 }
13641 if (proglen == oldproglen) {
13642 @@ -27824,7 +28928,7 @@ index 520d2bd..b895ef4 100644
13643 }
13644 oldproglen = proglen;
13645 }
13646 -@@ -678,7 +794,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
13647 +@@ -699,7 +815,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
13648 bpf_flush_icache(image, image + proglen);
13649
13650 fp->bpf_func = (void *)image;
13651 @@ -27836,7 +28940,7 @@ index 520d2bd..b895ef4 100644
13652 out:
13653 kfree(addrs);
13654 return;
13655 -@@ -686,18 +805,20 @@ out:
13656 +@@ -707,18 +826,20 @@ out:
13657
13658 static void jit_free_defer(struct work_struct *arg)
13659 {
13660 @@ -27896,8 +29000,21 @@ index d6aa6e8..266395a 100644
13661 unsigned long stack = kernel_stack_pointer(regs);
13662 if (depth)
13663 dump_trace(NULL, regs, (unsigned long *)stack, 0,
13664 +diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
13665 +index e9e6ed5..e47ae67 100644
13666 +--- a/arch/x86/pci/amd_bus.c
13667 ++++ b/arch/x86/pci/amd_bus.c
13668 +@@ -337,7 +337,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self,
13669 + return NOTIFY_OK;
13670 + }
13671 +
13672 +-static struct notifier_block __cpuinitdata amd_cpu_notifier = {
13673 ++static struct notifier_block amd_cpu_notifier = {
13674 + .notifier_call = amd_cpu_notify,
13675 + };
13676 +
13677 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
13678 -index e14a2ff..3fd6b58 100644
13679 +index 6eb18c4..20d83de 100644
13680 --- a/arch/x86/pci/mrst.c
13681 +++ b/arch/x86/pci/mrst.c
13682 @@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
13683 @@ -27912,10 +29029,10 @@ index e14a2ff..3fd6b58 100644
13684 /* Continue with standard init */
13685 return 1;
13686 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
13687 -index da8fe05..7ee6704 100644
13688 +index c77b24a..c979855 100644
13689 --- a/arch/x86/pci/pcbios.c
13690 +++ b/arch/x86/pci/pcbios.c
13691 -@@ -79,50 +79,93 @@ union bios32 {
13692 +@@ -79,7 +79,7 @@ union bios32 {
13693 static struct {
13694 unsigned long address;
13695 unsigned short segment;
13696 @@ -27924,13 +29041,7 @@ index da8fe05..7ee6704 100644
13697
13698 /*
13699 * Returns the entry point for the given service, NULL on error
13700 - */
13701 -
13702 --static unsigned long bios32_service(unsigned long service)
13703 -+static unsigned long __devinit bios32_service(unsigned long service)
13704 - {
13705 - unsigned char return_code; /* %al */
13706 - unsigned long address; /* %ebx */
13707 +@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
13708 unsigned long length; /* %ecx */
13709 unsigned long entry; /* %edx */
13710 unsigned long flags;
13711 @@ -28021,9 +29132,9 @@ index da8fe05..7ee6704 100644
13712 -static int pci_bios_present;
13713 +static int pci_bios_present __read_only;
13714
13715 - static int __devinit check_pcibios(void)
13716 + static int check_pcibios(void)
13717 {
13718 -@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
13719 +@@ -131,11 +174,13 @@ static int check_pcibios(void)
13720 unsigned long flags, pcibios_entry;
13721
13722 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
13723 @@ -28040,7 +29151,7 @@ index da8fe05..7ee6704 100644
13724 "jc 1f\n\t"
13725 "xor %%ah, %%ah\n"
13726 "1:"
13727 -@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
13728 +@@ -144,7 +189,8 @@ static int check_pcibios(void)
13729 "=b" (ebx),
13730 "=c" (ecx)
13731 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
13732 @@ -28458,7 +29569,7 @@ index 4c07cca..2c8427d 100644
13733 ret
13734 ENDPROC(efi_call6)
13735 diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
13736 -index fd41a92..9c33628 100644
13737 +index e31bcd8..f12dc46 100644
13738 --- a/arch/x86/platform/mrst/mrst.c
13739 +++ b/arch/x86/platform/mrst/mrst.c
13740 @@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
13741 @@ -28493,10 +29604,10 @@ index d6ee929..3637cb5 100644
13742 .getproplen = olpc_dt_getproplen,
13743 .getproperty = olpc_dt_getproperty,
13744 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
13745 -index 218cdb1..c1178eb 100644
13746 +index 120cee1..b2db75a 100644
13747 --- a/arch/x86/power/cpu.c
13748 +++ b/arch/x86/power/cpu.c
13749 -@@ -132,7 +132,7 @@ static void do_fpu_end(void)
13750 +@@ -133,7 +133,7 @@ static void do_fpu_end(void)
13751 static void fix_processor_context(void)
13752 {
13753 int cpu = smp_processor_id();
13754 @@ -28505,7 +29616,7 @@ index 218cdb1..c1178eb 100644
13755
13756 set_tss_desc(cpu, t); /*
13757 * This just modifies memory; should not be
13758 -@@ -142,8 +142,6 @@ static void fix_processor_context(void)
13759 +@@ -143,8 +143,6 @@ static void fix_processor_context(void)
13760 */
13761
13762 #ifdef CONFIG_X86_64
13763 @@ -28619,7 +29730,7 @@ index bb360dc..3e5945f 100644
13764
13765 /*
13766 diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
13767 -index 5a1847d..deccb30 100644
13768 +index 79d67bd..c7e1b90 100644
13769 --- a/arch/x86/tools/relocs.c
13770 +++ b/arch/x86/tools/relocs.c
13771 @@ -12,10 +12,13 @@
13772 @@ -28828,7 +29939,7 @@ index 5a1847d..deccb30 100644
13773 + read_relocs(fp, use_real_mode);
13774 if (show_absolute_syms) {
13775 print_absolute_symbols();
13776 - return 0;
13777 + goto out;
13778 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
13779 index fd14be1..e3c79c0 100644
13780 --- a/arch/x86/vdso/Makefile
13781 @@ -28929,7 +30040,7 @@ index 0faad64..39ef157 100644
13782 return NULL;
13783 }
13784 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
13785 -index 00aaf04..4a26505 100644
13786 +index 431e875..cbb23f3 100644
13787 --- a/arch/x86/vdso/vma.c
13788 +++ b/arch/x86/vdso/vma.c
13789 @@ -16,8 +16,6 @@
13790 @@ -28945,7 +30056,7 @@ index 00aaf04..4a26505 100644
13791 * unaligned here as a result of stack start randomization.
13792 */
13793 addr = PAGE_ALIGN(addr);
13794 -- addr = align_addr(addr, NULL, ALIGN_VDSO);
13795 +- addr = align_vdso_addr(addr);
13796
13797 return addr;
13798 }
13799 @@ -28967,7 +30078,7 @@ index 00aaf04..4a26505 100644
13800 +#endif
13801 +
13802 addr = vdso_addr(mm->start_stack, size);
13803 -+ addr = align_addr(addr, NULL, ALIGN_VDSO);
13804 ++ addr = align_vdso_addr(addr);
13805 addr = get_unmapped_area(NULL, addr, size, 0, 0);
13806 if (IS_ERR_VALUE(addr)) {
13807 ret = addr;
13808 @@ -29002,7 +30113,7 @@ index 00aaf04..4a26505 100644
13809 -}
13810 -__setup("vdso=", vdso_setup);
13811 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
13812 -index 586d838..e883209 100644
13813 +index e014092..c76ab69 100644
13814 --- a/arch/x86/xen/enlighten.c
13815 +++ b/arch/x86/xen/enlighten.c
13816 @@ -99,8 +99,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
13817 @@ -29014,7 +30125,7 @@ index 586d838..e883209 100644
13818 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
13819 __read_mostly int xen_have_vector_callback;
13820 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
13821 -@@ -473,8 +471,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
13822 +@@ -495,8 +493,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
13823 {
13824 unsigned long va = dtr->address;
13825 unsigned int size = dtr->size + 1;
13826 @@ -29024,7 +30135,7 @@ index 586d838..e883209 100644
13827 int f;
13828
13829 /*
13830 -@@ -522,8 +519,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
13831 +@@ -544,8 +541,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
13832 {
13833 unsigned long va = dtr->address;
13834 unsigned int size = dtr->size + 1;
13835 @@ -29034,7 +30145,7 @@ index 586d838..e883209 100644
13836 int f;
13837
13838 /*
13839 -@@ -916,7 +912,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
13840 +@@ -938,7 +934,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
13841 return 0;
13842 }
13843
13844 @@ -29043,7 +30154,7 @@ index 586d838..e883209 100644
13845 {
13846 apic->read = xen_apic_read;
13847 apic->write = xen_apic_write;
13848 -@@ -1222,30 +1218,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
13849 +@@ -1244,30 +1240,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
13850 #endif
13851 };
13852
13853 @@ -29081,7 +30192,7 @@ index 586d838..e883209 100644
13854 {
13855 if (pm_power_off)
13856 pm_power_off();
13857 -@@ -1347,7 +1343,17 @@ asmlinkage void __init xen_start_kernel(void)
13858 +@@ -1369,7 +1365,17 @@ asmlinkage void __init xen_start_kernel(void)
13859 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
13860
13861 /* Work out if we support NX */
13862 @@ -29100,7 +30211,7 @@ index 586d838..e883209 100644
13863
13864 xen_setup_features();
13865
13866 -@@ -1376,14 +1382,7 @@ asmlinkage void __init xen_start_kernel(void)
13867 +@@ -1398,14 +1404,7 @@ asmlinkage void __init xen_start_kernel(void)
13868 pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
13869 }
13870
13871 @@ -29116,8 +30227,17 @@ index 586d838..e883209 100644
13872
13873 xen_smp_init();
13874
13875 +@@ -1590,7 +1589,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
13876 + return NOTIFY_OK;
13877 + }
13878 +
13879 +-static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = {
13880 ++static struct notifier_block xen_hvm_cpu_notifier = {
13881 + .notifier_call = xen_hvm_cpu_notify,
13882 + };
13883 +
13884 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
13885 -index dcf5f2d..d804c25 100644
13886 +index 01de35c..0bda07b 100644
13887 --- a/arch/x86/xen/mmu.c
13888 +++ b/arch/x86/xen/mmu.c
13889 @@ -1881,6 +1881,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
13890 @@ -29160,7 +30280,7 @@ index dcf5f2d..d804c25 100644
13891 .alloc_pud = xen_alloc_pmd_init,
13892 .release_pud = xen_release_pmd_init,
13893 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
13894 -index 353c50f..8f3c179 100644
13895 +index 34bc4ce..c34aa24 100644
13896 --- a/arch/x86/xen/smp.c
13897 +++ b/arch/x86/xen/smp.c
13898 @@ -229,11 +229,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
13899 @@ -29207,7 +30327,7 @@ index 353c50f..8f3c179 100644
13900 #endif
13901 xen_setup_runstate_info(cpu);
13902 xen_setup_timer(cpu);
13903 -@@ -637,7 +631,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
13904 +@@ -630,7 +624,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
13905
13906 void __init xen_smp_init(void)
13907 {
13908 @@ -29338,7 +30458,7 @@ index af00795..2bb8105 100644
13909 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
13910 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
13911 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
13912 -index 58916af..9cb880b 100644
13913 +index 58916af..eb9dbcf6 100644
13914 --- a/block/blk-iopoll.c
13915 +++ b/block/blk-iopoll.c
13916 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
13917 @@ -29350,6 +30470,15 @@ index 58916af..9cb880b 100644
13918 {
13919 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
13920 int rearm = 0, budget = blk_iopoll_budget;
13921 +@@ -209,7 +209,7 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
13922 + return NOTIFY_OK;
13923 + }
13924 +
13925 +-static struct notifier_block __cpuinitdata blk_iopoll_cpu_notifier = {
13926 ++static struct notifier_block blk_iopoll_cpu_notifier = {
13927 + .notifier_call = blk_iopoll_cpu_notify,
13928 + };
13929 +
13930 diff --git a/block/blk-map.c b/block/blk-map.c
13931 index 623e1cd..ca1e109 100644
13932 --- a/block/blk-map.c
13933 @@ -29364,7 +30493,7 @@ index 623e1cd..ca1e109 100644
13934 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
13935 else
13936 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
13937 -index 467c8de..4bddc6d 100644
13938 +index 467c8de..f3628c5 100644
13939 --- a/block/blk-softirq.c
13940 +++ b/block/blk-softirq.c
13941 @@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
13942 @@ -29376,6 +30505,15 @@ index 467c8de..4bddc6d 100644
13943 {
13944 struct list_head *cpu_list, local_list;
13945
13946 +@@ -98,7 +98,7 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self,
13947 + return NOTIFY_OK;
13948 + }
13949 +
13950 +-static struct notifier_block __cpuinitdata blk_cpu_notifier = {
13951 ++static struct notifier_block blk_cpu_notifier = {
13952 + .notifier_call = blk_cpu_notify,
13953 + };
13954 +
13955 diff --git a/block/bsg.c b/block/bsg.c
13956 index ff64ae3..593560c 100644
13957 --- a/block/bsg.c
13958 @@ -29421,7 +30559,7 @@ index 7c668c8..db3521c 100644
13959 err = -EFAULT;
13960 goto out;
13961 diff --git a/block/partitions/efi.c b/block/partitions/efi.c
13962 -index 6296b40..417c00f 100644
13963 +index b62fb88..bdab4c4 100644
13964 --- a/block/partitions/efi.c
13965 +++ b/block/partitions/efi.c
13966 @@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
13967 @@ -29828,36 +30966,11 @@ index 7586544..636a2f0 100644
13968 err = ec_write(*off, byte_write);
13969 if (err)
13970 return err;
13971 -diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
13972 -index 27adb09..ef98796b 100644
13973 ---- a/drivers/acpi/proc.c
13974 -+++ b/drivers/acpi/proc.c
13975 -@@ -362,16 +362,13 @@ acpi_system_write_wakeup_device(struct file *file,
13976 - struct list_head *node, *next;
13977 - char strbuf[5];
13978 - char str[5] = "";
13979 -- unsigned int len = count;
13980 -
13981 -- if (len > 4)
13982 -- len = 4;
13983 -- if (len < 0)
13984 -- return -EFAULT;
13985 -+ if (count > 4)
13986 -+ count = 4;
13987 -
13988 -- if (copy_from_user(strbuf, buffer, len))
13989 -+ if (copy_from_user(strbuf, buffer, count))
13990 - return -EFAULT;
13991 -- strbuf[len] = '\0';
13992 -+ strbuf[count] = '\0';
13993 - sscanf(strbuf, "%s", str);
13994 -
13995 - mutex_lock(&acpi_device_lock);
13996 diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
13997 -index bd4e5dc..0497b66 100644
13998 +index e83311b..142b5cc 100644
13999 --- a/drivers/acpi/processor_driver.c
14000 +++ b/drivers/acpi/processor_driver.c
14001 -@@ -552,7 +552,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
14002 +@@ -558,7 +558,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
14003 return 0;
14004 #endif
14005
14006 @@ -29867,10 +30980,10 @@ index bd4e5dc..0497b66 100644
14007 /*
14008 * Buggy BIOS check
14009 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
14010 -index c8ac4fe..631818e 100644
14011 +index 46cd3f4..0871ad0 100644
14012 --- a/drivers/ata/libata-core.c
14013 +++ b/drivers/ata/libata-core.c
14014 -@@ -4779,7 +4779,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
14015 +@@ -4780,7 +4780,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
14016 struct ata_port *ap;
14017 unsigned int tag;
14018
14019 @@ -29879,7 +30992,7 @@ index c8ac4fe..631818e 100644
14020 ap = qc->ap;
14021
14022 qc->flags = 0;
14023 -@@ -4795,7 +4795,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
14024 +@@ -4796,7 +4796,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
14025 struct ata_port *ap;
14026 struct ata_link *link;
14027
14028 @@ -29888,7 +31001,7 @@ index c8ac4fe..631818e 100644
14029 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
14030 ap = qc->ap;
14031 link = qc->dev->link;
14032 -@@ -5891,6 +5891,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
14033 +@@ -5892,6 +5892,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
14034 return;
14035
14036 spin_lock(&lock);
14037 @@ -29896,7 +31009,7 @@ index c8ac4fe..631818e 100644
14038
14039 for (cur = ops->inherits; cur; cur = cur->inherits) {
14040 void **inherit = (void **)cur;
14041 -@@ -5904,8 +5905,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
14042 +@@ -5905,8 +5906,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
14043 if (IS_ERR(*pp))
14044 *pp = NULL;
14045
14046 @@ -29908,10 +31021,10 @@ index c8ac4fe..631818e 100644
14047 }
14048
14049 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
14050 -index 371fd2c..0836c78 100644
14051 +index 405022d..fb70e53 100644
14052 --- a/drivers/ata/pata_arasan_cf.c
14053 +++ b/drivers/ata/pata_arasan_cf.c
14054 -@@ -861,7 +861,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
14055 +@@ -864,7 +864,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
14056 /* Handle platform specific quirks */
14057 if (pdata->quirk) {
14058 if (pdata->quirk & CF_BROKEN_PIO) {
14059 @@ -29936,7 +31049,7 @@ index f9b983a..887b9d8 100644
14060 return 0;
14061 }
14062 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
14063 -index ff7bb8a..568fc0b 100644
14064 +index 77a7480..05cde58 100644
14065 --- a/drivers/atm/ambassador.c
14066 +++ b/drivers/atm/ambassador.c
14067 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
14068 @@ -30029,7 +31142,7 @@ index b22d71c..d6e1049 100644
14069 if (vcc->pop) vcc->pop(vcc,skb);
14070 else dev_kfree_skb(skb);
14071 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
14072 -index 81e44f7..498ea36 100644
14073 +index c1eb6fa..4c71be9 100644
14074 --- a/drivers/atm/eni.c
14075 +++ b/drivers/atm/eni.c
14076 @@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
14077 @@ -30078,7 +31191,7 @@ index 81e44f7..498ea36 100644
14078 dma_complete++;
14079 }
14080 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
14081 -index 86fed1b..6dc4721 100644
14082 +index b41c948..a002b17 100644
14083 --- a/drivers/atm/firestream.c
14084 +++ b/drivers/atm/firestream.c
14085 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
14086 @@ -30115,10 +31228,10 @@ index 86fed1b..6dc4721 100644
14087 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
14088 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
14089 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
14090 -index 361f5ae..7fc552d 100644
14091 +index 204814e..cede831 100644
14092 --- a/drivers/atm/fore200e.c
14093 +++ b/drivers/atm/fore200e.c
14094 -@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
14095 +@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
14096 #endif
14097 /* check error condition */
14098 if (*entry->status & STATUS_ERROR)
14099 @@ -30130,7 +31243,7 @@ index 361f5ae..7fc552d 100644
14100 }
14101 }
14102
14103 -@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
14104 +@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
14105 if (skb == NULL) {
14106 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
14107
14108 @@ -30139,7 +31252,7 @@ index 361f5ae..7fc552d 100644
14109 return -ENOMEM;
14110 }
14111
14112 -@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
14113 +@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
14114
14115 dev_kfree_skb_any(skb);
14116
14117 @@ -30156,7 +31269,7 @@ index 361f5ae..7fc552d 100644
14118
14119 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
14120
14121 -@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
14122 +@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
14123 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
14124 fore200e->atm_dev->number,
14125 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
14126 @@ -30165,7 +31278,7 @@ index 361f5ae..7fc552d 100644
14127 }
14128 }
14129
14130 -@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
14131 +@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
14132 goto retry_here;
14133 }
14134
14135 @@ -30175,10 +31288,10 @@ index 361f5ae..7fc552d 100644
14136 fore200e->tx_sat++;
14137 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
14138 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
14139 -index b182c2f..1c6fa8a 100644
14140 +index 72b6960..cf9167a 100644
14141 --- a/drivers/atm/he.c
14142 +++ b/drivers/atm/he.c
14143 -@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
14144 +@@ -1699,7 +1699,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
14145
14146 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
14147 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
14148 @@ -30187,7 +31300,7 @@ index b182c2f..1c6fa8a 100644
14149 goto return_host_buffers;
14150 }
14151
14152 -@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
14153 +@@ -1726,7 +1726,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
14154 RBRQ_LEN_ERR(he_dev->rbrq_head)
14155 ? "LEN_ERR" : "",
14156 vcc->vpi, vcc->vci);
14157 @@ -30196,7 +31309,7 @@ index b182c2f..1c6fa8a 100644
14158 goto return_host_buffers;
14159 }
14160
14161 -@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
14162 +@@ -1778,7 +1778,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
14163 vcc->push(vcc, skb);
14164 spin_lock(&he_dev->global_lock);
14165
14166 @@ -30205,7 +31318,7 @@ index b182c2f..1c6fa8a 100644
14167
14168 return_host_buffers:
14169 ++pdus_assembled;
14170 -@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
14171 +@@ -2104,7 +2104,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
14172 tpd->vcc->pop(tpd->vcc, tpd->skb);
14173 else
14174 dev_kfree_skb_any(tpd->skb);
14175 @@ -30214,7 +31327,7 @@ index b182c2f..1c6fa8a 100644
14176 }
14177 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
14178 return;
14179 -@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
14180 +@@ -2516,7 +2516,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
14181 vcc->pop(vcc, skb);
14182 else
14183 dev_kfree_skb_any(skb);
14184 @@ -30223,7 +31336,7 @@ index b182c2f..1c6fa8a 100644
14185 return -EINVAL;
14186 }
14187
14188 -@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
14189 +@@ -2527,7 +2527,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
14190 vcc->pop(vcc, skb);
14191 else
14192 dev_kfree_skb_any(skb);
14193 @@ -30232,7 +31345,7 @@ index b182c2f..1c6fa8a 100644
14194 return -EINVAL;
14195 }
14196 #endif
14197 -@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
14198 +@@ -2539,7 +2539,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
14199 vcc->pop(vcc, skb);
14200 else
14201 dev_kfree_skb_any(skb);
14202 @@ -30241,7 +31354,7 @@ index b182c2f..1c6fa8a 100644
14203 spin_unlock_irqrestore(&he_dev->global_lock, flags);
14204 return -ENOMEM;
14205 }
14206 -@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
14207 +@@ -2581,7 +2581,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
14208 vcc->pop(vcc, skb);
14209 else
14210 dev_kfree_skb_any(skb);
14211 @@ -30250,7 +31363,7 @@ index b182c2f..1c6fa8a 100644
14212 spin_unlock_irqrestore(&he_dev->global_lock, flags);
14213 return -ENOMEM;
14214 }
14215 -@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
14216 +@@ -2612,7 +2612,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
14217 __enqueue_tpd(he_dev, tpd, cid);
14218 spin_unlock_irqrestore(&he_dev->global_lock, flags);
14219
14220 @@ -30260,7 +31373,7 @@ index b182c2f..1c6fa8a 100644
14221 return 0;
14222 }
14223 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
14224 -index 7d01c2a..4e3ac01 100644
14225 +index 1dc0519..1aadaf7 100644
14226 --- a/drivers/atm/horizon.c
14227 +++ b/drivers/atm/horizon.c
14228 @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
14229 @@ -30282,7 +31395,7 @@ index 7d01c2a..4e3ac01 100644
14230 // free the skb
14231 hrz_kfree_skb (skb);
14232 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
14233 -index 8974bd2..b856f85 100644
14234 +index 272f009..a18ba55 100644
14235 --- a/drivers/atm/idt77252.c
14236 +++ b/drivers/atm/idt77252.c
14237 @@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
14238 @@ -30440,7 +31553,7 @@ index 8974bd2..b856f85 100644
14239 }
14240 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
14241 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
14242 -index 96cce6d..62c3ec5 100644
14243 +index 4217f29..88f547a 100644
14244 --- a/drivers/atm/iphase.c
14245 +++ b/drivers/atm/iphase.c
14246 @@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
14247 @@ -30540,7 +31653,7 @@ index 96cce6d..62c3ec5 100644
14248 vcc->tx_quota = vcc->tx_quota * 3 / 4;
14249 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
14250 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
14251 -index 68c7588..7036683 100644
14252 +index fa7d701..1e404c7 100644
14253 --- a/drivers/atm/lanai.c
14254 +++ b/drivers/atm/lanai.c
14255 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
14256 @@ -30598,7 +31711,7 @@ index 68c7588..7036683 100644
14257 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
14258 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
14259 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
14260 -index 1c70c45..300718d 100644
14261 +index ed1d2b7..8cffc1f 100644
14262 --- a/drivers/atm/nicstar.c
14263 +++ b/drivers/atm/nicstar.c
14264 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
14265 @@ -30803,10 +31916,10 @@ index 1c70c45..300718d 100644
14266 }
14267
14268 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
14269 -index 1853a45..cf2426d 100644
14270 +index 0474a89..06ea4a1 100644
14271 --- a/drivers/atm/solos-pci.c
14272 +++ b/drivers/atm/solos-pci.c
14273 -@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
14274 +@@ -838,7 +838,7 @@ void solos_bh(unsigned long card_arg)
14275 }
14276 atm_charge(vcc, skb->truesize);
14277 vcc->push(vcc, skb);
14278 @@ -30815,14 +31928,14 @@ index 1853a45..cf2426d 100644
14279 break;
14280
14281 case PKT_STATUS:
14282 -@@ -1010,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_card *card)
14283 +@@ -1117,7 +1117,7 @@ static uint32_t fpga_tx(struct solos_card *card)
14284 vcc = SKB_CB(oldskb)->vcc;
14285
14286 if (vcc) {
14287 - atomic_inc(&vcc->stats->tx);
14288 + atomic_inc_unchecked(&vcc->stats->tx);
14289 solos_pop(vcc, oldskb);
14290 - } else
14291 + } else {
14292 dev_kfree_skb_irq(oldskb);
14293 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
14294 index 0215934..ce9f5b1 100644
14295 @@ -30888,7 +32001,7 @@ index 5120a96..e2572bd 100644
14296 }
14297
14298 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
14299 -index abe4e20..83c4727 100644
14300 +index 969c3c2..9b72956 100644
14301 --- a/drivers/atm/zatm.c
14302 +++ b/drivers/atm/zatm.c
14303 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
14304 @@ -30919,7 +32032,7 @@ index abe4e20..83c4727 100644
14305 }
14306
14307 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
14308 -index 147d1a4..d0fd4b0 100644
14309 +index 17cf7ca..7e553e1 100644
14310 --- a/drivers/base/devtmpfs.c
14311 +++ b/drivers/base/devtmpfs.c
14312 @@ -347,7 +347,7 @@ int devtmpfs_mount(const char *mntdir)
14313 @@ -30971,10 +32084,10 @@ index e6ee5e8..98ad7fc 100644
14314
14315 split_counters(&cnt, &inpr);
14316 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
14317 -index ca83f96..69d4ea9 100644
14318 +index ade58bc..867143d 100644
14319 --- a/drivers/block/cciss.c
14320 +++ b/drivers/block/cciss.c
14321 -@@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
14322 +@@ -1196,6 +1196,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
14323 int err;
14324 u32 cp;
14325
14326 @@ -30983,7 +32096,7 @@ index ca83f96..69d4ea9 100644
14327 err = 0;
14328 err |=
14329 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
14330 -@@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
14331 +@@ -3005,7 +3007,7 @@ static void start_io(ctlr_info_t *h)
14332 while (!list_empty(&h->reqQ)) {
14333 c = list_entry(h->reqQ.next, CommandList_struct, list);
14334 /* can't do anything if fifo is full */
14335 @@ -30992,7 +32105,7 @@ index ca83f96..69d4ea9 100644
14336 dev_warn(&h->pdev->dev, "fifo full\n");
14337 break;
14338 }
14339 -@@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
14340 +@@ -3015,7 +3017,7 @@ static void start_io(ctlr_info_t *h)
14341 h->Qdepth--;
14342
14343 /* Tell the controller execute command */
14344 @@ -31001,7 +32114,7 @@ index ca83f96..69d4ea9 100644
14345
14346 /* Put job onto the completed Q */
14347 addQ(&h->cmpQ, c);
14348 -@@ -3443,17 +3445,17 @@ startio:
14349 +@@ -3441,17 +3443,17 @@ startio:
14350
14351 static inline unsigned long get_next_completion(ctlr_info_t *h)
14352 {
14353 @@ -31022,7 +32135,7 @@ index ca83f96..69d4ea9 100644
14354 (h->interrupts_enabled == 0));
14355 }
14356
14357 -@@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
14358 +@@ -3484,7 +3486,7 @@ static inline u32 next_command(ctlr_info_t *h)
14359 u32 a;
14360
14361 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
14362 @@ -31031,7 +32144,7 @@ index ca83f96..69d4ea9 100644
14363
14364 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
14365 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
14366 -@@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
14367 +@@ -4041,7 +4043,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
14368 trans_support & CFGTBL_Trans_use_short_tags);
14369
14370 /* Change the access methods to the performant access methods */
14371 @@ -31040,7 +32153,7 @@ index ca83f96..69d4ea9 100644
14372 h->transMethod = CFGTBL_Trans_Performant;
14373
14374 return;
14375 -@@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
14376 +@@ -4310,7 +4312,7 @@ static int cciss_pci_init(ctlr_info_t *h)
14377 if (prod_index < 0)
14378 return -ENODEV;
14379 h->product_name = products[prod_index].product_name;
14380 @@ -31049,7 +32162,7 @@ index ca83f96..69d4ea9 100644
14381
14382 if (cciss_board_disabled(h)) {
14383 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
14384 -@@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
14385 +@@ -5032,7 +5034,7 @@ reinit_after_soft_reset:
14386 }
14387
14388 /* make sure the board interrupts are off */
14389 @@ -31058,7 +32171,7 @@ index ca83f96..69d4ea9 100644
14390 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
14391 if (rc)
14392 goto clean2;
14393 -@@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
14394 +@@ -5082,7 +5084,7 @@ reinit_after_soft_reset:
14395 * fake ones to scoop up any residual completions.
14396 */
14397 spin_lock_irqsave(&h->lock, flags);
14398 @@ -31067,7 +32180,7 @@ index ca83f96..69d4ea9 100644
14399 spin_unlock_irqrestore(&h->lock, flags);
14400 free_irq(h->intr[h->intr_mode], h);
14401 rc = cciss_request_irq(h, cciss_msix_discard_completions,
14402 -@@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
14403 +@@ -5102,9 +5104,9 @@ reinit_after_soft_reset:
14404 dev_info(&h->pdev->dev, "Board READY.\n");
14405 dev_info(&h->pdev->dev,
14406 "Waiting for stale completions to drain.\n");
14407 @@ -31079,7 +32192,7 @@ index ca83f96..69d4ea9 100644
14408
14409 rc = controller_reset_failed(h->cfgtable);
14410 if (rc)
14411 -@@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
14412 +@@ -5127,7 +5129,7 @@ reinit_after_soft_reset:
14413 cciss_scsi_setup(h);
14414
14415 /* Turn the interrupts on so we can service requests */
14416 @@ -31088,7 +32201,7 @@ index ca83f96..69d4ea9 100644
14417
14418 /* Get the firmware version */
14419 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
14420 -@@ -5210,7 +5212,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
14421 +@@ -5199,7 +5201,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
14422 kfree(flush_buf);
14423 if (return_code != IO_OK)
14424 dev_warn(&h->pdev->dev, "Error flushing cache\n");
14425 @@ -31111,10 +32224,10 @@ index 7fda30e..eb5dfe0 100644
14426 /* queue and queue Info */
14427 struct list_head reqQ;
14428 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
14429 -index 9125bbe..eede5c8 100644
14430 +index 3f08713..56a586a 100644
14431 --- a/drivers/block/cpqarray.c
14432 +++ b/drivers/block/cpqarray.c
14433 -@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
14434 +@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
14435 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
14436 goto Enomem4;
14437 }
14438 @@ -31123,7 +32236,7 @@ index 9125bbe..eede5c8 100644
14439 if (request_irq(hba[i]->intr, do_ida_intr,
14440 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
14441 {
14442 -@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
14443 +@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
14444 add_timer(&hba[i]->timer);
14445
14446 /* Enable IRQ now that spinlock and rate limit timer are set up */
14447 @@ -31141,7 +32254,7 @@ index 9125bbe..eede5c8 100644
14448 break;
14449 }
14450 }
14451 -@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
14452 +@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
14453 hba[ctlr]->intr = intr;
14454 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
14455 hba[ctlr]->product_name = products[j].product_name;
14456 @@ -31244,11 +32357,11 @@ index be73e9d..7fbf140 100644
14457 cmdlist_t *reqQ;
14458 cmdlist_t *cmpQ;
14459 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
14460 -index b953cc7..e3dc580 100644
14461 +index 6b51afa..17e1191 100644
14462 --- a/drivers/block/drbd/drbd_int.h
14463 +++ b/drivers/block/drbd/drbd_int.h
14464 -@@ -735,7 +735,7 @@ struct drbd_request;
14465 - struct drbd_epoch {
14466 +@@ -582,7 +582,7 @@ struct drbd_epoch {
14467 + struct drbd_tconn *tconn;
14468 struct list_head list;
14469 unsigned int barrier_nr;
14470 - atomic_t epoch_size; /* increased on every request added. */
14471 @@ -31256,159 +32369,71 @@ index b953cc7..e3dc580 100644
14472 atomic_t active; /* increased on every req. added, and dec on every finished. */
14473 unsigned long flags;
14474 };
14475 -@@ -1116,7 +1116,7 @@ struct drbd_conf {
14476 - void *int_dig_in;
14477 - void *int_dig_vv;
14478 +@@ -1011,7 +1011,7 @@ struct drbd_conf {
14479 + int al_tr_cycle;
14480 + int al_tr_pos; /* position of the next transaction in the journal */
14481 wait_queue_head_t seq_wait;
14482 - atomic_t packet_seq;
14483 + atomic_unchecked_t packet_seq;
14484 unsigned int peer_seq;
14485 spinlock_t peer_seq_lock;
14486 unsigned int minor;
14487 -@@ -1658,30 +1658,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
14488 -
14489 - static inline void drbd_tcp_cork(struct socket *sock)
14490 - {
14491 -- int __user val = 1;
14492 -+ int val = 1;
14493 - (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
14494 -- (char __user *)&val, sizeof(val));
14495 -+ (char __force_user *)&val, sizeof(val));
14496 - }
14497 -
14498 - static inline void drbd_tcp_uncork(struct socket *sock)
14499 - {
14500 -- int __user val = 0;
14501 -+ int val = 0;
14502 - (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
14503 -- (char __user *)&val, sizeof(val));
14504 -+ (char __force_user *)&val, sizeof(val));
14505 - }
14506 -
14507 - static inline void drbd_tcp_nodelay(struct socket *sock)
14508 - {
14509 -- int __user val = 1;
14510 -+ int val = 1;
14511 - (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
14512 -- (char __user *)&val, sizeof(val));
14513 -+ (char __force_user *)&val, sizeof(val));
14514 - }
14515 +@@ -1527,7 +1527,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
14516 + char __user *uoptval;
14517 + int err;
14518
14519 - static inline void drbd_tcp_quickack(struct socket *sock)
14520 - {
14521 -- int __user val = 2;
14522 -+ int val = 2;
14523 - (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
14524 -- (char __user *)&val, sizeof(val));
14525 -+ (char __force_user *)&val, sizeof(val));
14526 - }
14527 +- uoptval = (char __user __force *)optval;
14528 ++ uoptval = (char __force_user *)optval;
14529
14530 - void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
14531 + set_fs(KERNEL_DS);
14532 + if (level == SOL_SOCKET)
14533 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
14534 -index f55683a..2101b96 100644
14535 +index 8c13eeb..217adee 100644
14536 --- a/drivers/block/drbd/drbd_main.c
14537 +++ b/drivers/block/drbd/drbd_main.c
14538 -@@ -2556,7 +2556,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
14539 - p.sector = sector;
14540 - p.block_id = block_id;
14541 - p.blksize = blksize;
14542 -- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
14543 -+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
14544 -
14545 - if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
14546 - return false;
14547 -@@ -2854,7 +2854,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
14548 -
14549 - p.sector = cpu_to_be64(req->sector);
14550 - p.block_id = (unsigned long)req;
14551 -- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
14552 -+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
14553 +@@ -1317,7 +1317,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
14554 + p->sector = sector;
14555 + p->block_id = block_id;
14556 + p->blksize = blksize;
14557 +- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
14558 ++ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
14559 + return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
14560 + }
14561
14562 +@@ -1619,7 +1619,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
14563 + return -EIO;
14564 + p->sector = cpu_to_be64(req->i.sector);
14565 + p->block_id = (unsigned long)req;
14566 +- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
14567 ++ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
14568 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
14569 + if (mdev->state.conn >= C_SYNC_SOURCE &&
14570 + mdev->state.conn <= C_PAUSED_SYNC_T)
14571 +@@ -2574,8 +2574,8 @@ void conn_destroy(struct kref *kref)
14572 + {
14573 + struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
14574
14575 -@@ -3139,7 +3139,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
14576 - atomic_set(&mdev->unacked_cnt, 0);
14577 - atomic_set(&mdev->local_cnt, 0);
14578 - atomic_set(&mdev->net_cnt, 0);
14579 -- atomic_set(&mdev->packet_seq, 0);
14580 -+ atomic_set_unchecked(&mdev->packet_seq, 0);
14581 - atomic_set(&mdev->pp_in_use, 0);
14582 - atomic_set(&mdev->pp_in_use_by_net, 0);
14583 - atomic_set(&mdev->rs_sect_in, 0);
14584 -@@ -3221,8 +3221,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
14585 - mdev->receiver.t_state);
14586 -
14587 - /* no need to lock it, I'm the only thread alive */
14588 -- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
14589 -- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
14590 -+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
14591 -+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
14592 - mdev->al_writ_cnt =
14593 - mdev->bm_writ_cnt =
14594 - mdev->read_cnt =
14595 -diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
14596 -index edb490a..ecd69da 100644
14597 ---- a/drivers/block/drbd/drbd_nl.c
14598 -+++ b/drivers/block/drbd/drbd_nl.c
14599 -@@ -2407,7 +2407,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
14600 - module_put(THIS_MODULE);
14601 - }
14602 -
14603 --static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
14604 -+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
14605 -
14606 - static unsigned short *
14607 - __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
14608 -@@ -2478,7 +2478,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
14609 - cn_reply->id.idx = CN_IDX_DRBD;
14610 - cn_reply->id.val = CN_VAL_DRBD;
14611 -
14612 -- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
14613 -+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
14614 - cn_reply->ack = 0; /* not used here. */
14615 - cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
14616 - (int)((char *)tl - (char *)reply->tag_list);
14617 -@@ -2510,7 +2510,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
14618 - cn_reply->id.idx = CN_IDX_DRBD;
14619 - cn_reply->id.val = CN_VAL_DRBD;
14620 -
14621 -- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
14622 -+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
14623 - cn_reply->ack = 0; /* not used here. */
14624 - cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
14625 - (int)((char *)tl - (char *)reply->tag_list);
14626 -@@ -2588,7 +2588,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
14627 - cn_reply->id.idx = CN_IDX_DRBD;
14628 - cn_reply->id.val = CN_VAL_DRBD;
14629 -
14630 -- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
14631 -+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
14632 - cn_reply->ack = 0; // not used here.
14633 - cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
14634 - (int)((char*)tl - (char*)reply->tag_list);
14635 -@@ -2627,7 +2627,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
14636 - cn_reply->id.idx = CN_IDX_DRBD;
14637 - cn_reply->id.val = CN_VAL_DRBD;
14638 -
14639 -- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
14640 -+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
14641 - cn_reply->ack = 0; /* not used here. */
14642 - cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
14643 - (int)((char *)tl - (char *)reply->tag_list);
14644 +- if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
14645 +- conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
14646 ++ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size) != 0)
14647 ++ conn_err(tconn, "epoch_size:%d\n", atomic_read_unchecked(&tconn->current_epoch->epoch_size));
14648 + kfree(tconn->current_epoch);
14649 +
14650 + idr_destroy(&tconn->volumes);
14651 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
14652 -index c74ca2d..860c819 100644
14653 +index a9eccfc..68e4533 100644
14654 --- a/drivers/block/drbd/drbd_receiver.c
14655 +++ b/drivers/block/drbd/drbd_receiver.c
14656 -@@ -898,7 +898,7 @@ retry:
14657 - sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
14658 - sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
14659 +@@ -833,7 +833,7 @@ int drbd_connected(struct drbd_conf *mdev)
14660 + {
14661 + int err;
14662
14663 - atomic_set(&mdev->packet_seq, 0);
14664 + atomic_set_unchecked(&mdev->packet_seq, 0);
14665 mdev->peer_seq = 0;
14666
14667 - if (drbd_send_protocol(mdev) == -1)
14668 -@@ -999,7 +999,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
14669 + mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
14670 +@@ -1191,7 +1191,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
14671 do {
14672 next_epoch = NULL;
14673
14674 @@ -31417,7 +32442,7 @@ index c74ca2d..860c819 100644
14675
14676 switch (ev & ~EV_CLEANUP) {
14677 case EV_PUT:
14678 -@@ -1035,7 +1035,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
14679 +@@ -1231,7 +1231,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
14680 rv = FE_DESTROYED;
14681 } else {
14682 epoch->flags = 0;
14683 @@ -31426,24 +32451,16 @@ index c74ca2d..860c819 100644
14684 /* atomic_set(&epoch->active, 0); is already zero */
14685 if (rv == FE_STILL_LIVE)
14686 rv = FE_RECYCLED;
14687 -@@ -1210,14 +1210,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
14688 - drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
14689 - drbd_flush(mdev);
14690 +@@ -1449,7 +1449,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
14691 + conn_wait_active_ee_empty(tconn);
14692 + drbd_flush(tconn);
14693
14694 -- if (atomic_read(&mdev->current_epoch->epoch_size)) {
14695 -+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
14696 +- if (atomic_read(&tconn->current_epoch->epoch_size)) {
14697 ++ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
14698 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
14699 if (epoch)
14700 break;
14701 - }
14702 -
14703 - epoch = mdev->current_epoch;
14704 -- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
14705 -+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
14706 -
14707 - D_ASSERT(atomic_read(&epoch->active) == 0);
14708 - D_ASSERT(epoch->flags == 0);
14709 -@@ -1229,11 +1229,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
14710 +@@ -1462,11 +1462,11 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
14711 }
14712
14713 epoch->flags = 0;
14714 @@ -31451,41 +32468,41 @@ index c74ca2d..860c819 100644
14715 + atomic_set_unchecked(&epoch->epoch_size, 0);
14716 atomic_set(&epoch->active, 0);
14717
14718 - spin_lock(&mdev->epoch_lock);
14719 -- if (atomic_read(&mdev->current_epoch->epoch_size)) {
14720 -+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
14721 - list_add(&epoch->list, &mdev->current_epoch->list);
14722 - mdev->current_epoch = epoch;
14723 - mdev->epochs++;
14724 -@@ -1702,7 +1702,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
14725 - spin_unlock(&mdev->peer_seq_lock);
14726 -
14727 - drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
14728 -- atomic_inc(&mdev->current_epoch->epoch_size);
14729 -+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
14730 - return drbd_drain_block(mdev, data_size);
14731 - }
14732 -
14733 -@@ -1732,7 +1732,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
14734 -
14735 - spin_lock(&mdev->epoch_lock);
14736 - e->epoch = mdev->current_epoch;
14737 -- atomic_inc(&e->epoch->epoch_size);
14738 -+ atomic_inc_unchecked(&e->epoch->epoch_size);
14739 - atomic_inc(&e->epoch->active);
14740 - spin_unlock(&mdev->epoch_lock);
14741 -
14742 -@@ -3954,7 +3954,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
14743 - D_ASSERT(list_empty(&mdev->done_ee));
14744 -
14745 + spin_lock(&tconn->epoch_lock);
14746 +- if (atomic_read(&tconn->current_epoch->epoch_size)) {
14747 ++ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
14748 + list_add(&epoch->list, &tconn->current_epoch->list);
14749 + tconn->current_epoch = epoch;
14750 + tconn->epochs++;
14751 +@@ -2170,7 +2170,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
14752 +
14753 + err = wait_for_and_update_peer_seq(mdev, peer_seq);
14754 + drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
14755 +- atomic_inc(&tconn->current_epoch->epoch_size);
14756 ++ atomic_inc_unchecked(&tconn->current_epoch->epoch_size);
14757 + err2 = drbd_drain_block(mdev, pi->size);
14758 + if (!err)
14759 + err = err2;
14760 +@@ -2204,7 +2204,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
14761 +
14762 + spin_lock(&tconn->epoch_lock);
14763 + peer_req->epoch = tconn->current_epoch;
14764 +- atomic_inc(&peer_req->epoch->epoch_size);
14765 ++ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
14766 + atomic_inc(&peer_req->epoch->active);
14767 + spin_unlock(&tconn->epoch_lock);
14768 +
14769 +@@ -4466,7 +4466,7 @@ static void conn_disconnect(struct drbd_tconn *tconn)
14770 + if (!list_empty(&tconn->current_epoch->list))
14771 + conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
14772 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
14773 -- atomic_set(&mdev->current_epoch->epoch_size, 0);
14774 -+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
14775 - D_ASSERT(list_empty(&mdev->current_epoch->list));
14776 - }
14777 +- atomic_set(&tconn->current_epoch->epoch_size, 0);
14778 ++ atomic_set_unchecked(&tconn->current_epoch->epoch_size, 0);
14779 + tconn->send.seen_any_write_yet = false;
14780
14781 + conn_info(tconn, "Connection closed\n");
14782 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
14783 -index 54046e5..7759c55 100644
14784 +index ae12512..37fa397 100644
14785 --- a/drivers/block/loop.c
14786 +++ b/drivers/block/loop.c
14787 @@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
14788 @@ -31532,7 +32549,7 @@ index d620b44..587561e 100644
14789 }
14790
14791 diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
14792 -index 75d485a..2809958 100644
14793 +index d59cdcb..11afddf 100644
14794 --- a/drivers/cdrom/gdrom.c
14795 +++ b/drivers/cdrom/gdrom.c
14796 @@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
14797 @@ -31591,7 +32608,7 @@ index 21cb980..f15107c 100644
14798 return -EINVAL;
14799 else
14800 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
14801 -index dfd7876..c0b0885 100644
14802 +index fe6d4be..89f32100 100644
14803 --- a/drivers/char/hpet.c
14804 +++ b/drivers/char/hpet.c
14805 @@ -571,7 +571,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
14806 @@ -31604,7 +32621,7 @@ index dfd7876..c0b0885 100644
14807 {
14808 struct hpet_timer __iomem *timer;
14809 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
14810 -index a0c84bb..9edcf60 100644
14811 +index 053201b0..8335cce 100644
14812 --- a/drivers/char/ipmi/ipmi_msghandler.c
14813 +++ b/drivers/char/ipmi/ipmi_msghandler.c
14814 @@ -420,7 +420,7 @@ struct ipmi_smi {
14815 @@ -31638,7 +32655,7 @@ index a0c84bb..9edcf60 100644
14816 intf->proc_dir = NULL;
14817
14818 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
14819 -index 32a6c7e..f6966a9 100644
14820 +index 1c7fdcd..4899100 100644
14821 --- a/drivers/char/ipmi/ipmi_si_intf.c
14822 +++ b/drivers/char/ipmi/ipmi_si_intf.c
14823 @@ -275,7 +275,7 @@ struct smi_info {
14824 @@ -31672,7 +32689,7 @@ index 32a6c7e..f6966a9 100644
14825 new_smi->interrupt_disabled = 1;
14826 atomic_set(&new_smi->stop_operation, 0);
14827 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
14828 -index 0537903..121c699 100644
14829 +index c6fa3bc..4ca3e42 100644
14830 --- a/drivers/char/mem.c
14831 +++ b/drivers/char/mem.c
14832 @@ -18,6 +18,7 @@
14833 @@ -31821,10 +32838,10 @@ index 9df78e2..01ba9ae 100644
14834
14835 *ppos = i;
14836 diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
14837 -index 21721d2..4e98777 100644
14838 +index b66eaa0..2619d1b 100644
14839 --- a/drivers/char/pcmcia/synclink_cs.c
14840 +++ b/drivers/char/pcmcia/synclink_cs.c
14841 -@@ -2346,9 +2346,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
14842 +@@ -2348,9 +2348,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
14843
14844 if (debug_level >= DEBUG_LEVEL_INFO)
14845 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
14846 @@ -31836,7 +32853,7 @@ index 21721d2..4e98777 100644
14847
14848 if (tty_port_close_start(port, tty, filp) == 0)
14849 goto cleanup;
14850 -@@ -2366,7 +2366,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
14851 +@@ -2368,7 +2368,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
14852 cleanup:
14853 if (debug_level >= DEBUG_LEVEL_INFO)
14854 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__,__LINE__,
14855 @@ -31845,7 +32862,7 @@ index 21721d2..4e98777 100644
14856 }
14857
14858 /* Wait until the transmitter is empty.
14859 -@@ -2508,7 +2508,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
14860 +@@ -2510,7 +2510,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
14861
14862 if (debug_level >= DEBUG_LEVEL_INFO)
14863 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
14864 @@ -31854,7 +32871,7 @@ index 21721d2..4e98777 100644
14865
14866 /* If port is closing, signal caller to try again */
14867 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
14868 -@@ -2528,11 +2528,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
14869 +@@ -2530,11 +2530,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
14870 goto cleanup;
14871 }
14872 spin_lock(&port->lock);
14873 @@ -31868,7 +32885,7 @@ index 21721d2..4e98777 100644
14874 /* 1st open on this device, init hardware */
14875 retval = startup(info, tty);
14876 if (retval < 0)
14877 -@@ -3886,7 +3886,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
14878 +@@ -3889,7 +3889,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
14879 unsigned short new_crctype;
14880
14881 /* return error if TTY interface open */
14882 @@ -31877,7 +32894,7 @@ index 21721d2..4e98777 100644
14883 return -EBUSY;
14884
14885 switch (encoding)
14886 -@@ -3989,7 +3989,7 @@ static int hdlcdev_open(struct net_device *dev)
14887 +@@ -3992,7 +3992,7 @@ static int hdlcdev_open(struct net_device *dev)
14888
14889 /* arbitrate between network and tty opens */
14890 spin_lock_irqsave(&info->netlock, flags);
14891 @@ -31886,7 +32903,7 @@ index 21721d2..4e98777 100644
14892 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
14893 spin_unlock_irqrestore(&info->netlock, flags);
14894 return -EBUSY;
14895 -@@ -4078,7 +4078,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
14896 +@@ -4081,7 +4081,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
14897 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
14898
14899 /* return error if TTY interface open */
14900 @@ -31896,7 +32913,7 @@ index 21721d2..4e98777 100644
14901
14902 if (cmd != SIOCWANDEV)
14903 diff --git a/drivers/char/random.c b/drivers/char/random.c
14904 -index b86eae9..b9c2ed7 100644
14905 +index 85e81ec..bce8b97 100644
14906 --- a/drivers/char/random.c
14907 +++ b/drivers/char/random.c
14908 @@ -272,8 +272,13 @@
14909 @@ -31931,15 +32948,7 @@ index b86eae9..b9c2ed7 100644
14910 #if 0
14911 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
14912 { 2048, 1638, 1231, 819, 411, 1 },
14913 -@@ -437,6 +449,7 @@ struct entropy_store {
14914 - int entropy_count;
14915 - int entropy_total;
14916 - unsigned int initialized:1;
14917 -+ bool last_data_init;
14918 - __u8 last_data[EXTRACT_SIZE];
14919 - };
14920 -
14921 -@@ -527,8 +540,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
14922 +@@ -524,8 +536,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
14923 input_rotate += i ? 7 : 14;
14924 }
14925
14926 @@ -31950,36 +32959,7 @@ index b86eae9..b9c2ed7 100644
14927 smp_wmb();
14928
14929 if (out)
14930 -@@ -957,6 +970,10 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
14931 - ssize_t ret = 0, i;
14932 - __u8 tmp[EXTRACT_SIZE];
14933 -
14934 -+ /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
14935 -+ if (fips_enabled && !r->last_data_init)
14936 -+ nbytes += EXTRACT_SIZE;
14937 -+
14938 - trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
14939 - xfer_secondary_pool(r, nbytes);
14940 - nbytes = account(r, nbytes, min, reserved);
14941 -@@ -967,6 +984,17 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
14942 - if (fips_enabled) {
14943 - unsigned long flags;
14944 -
14945 -+
14946 -+ /* prime last_data value if need be, per fips 140-2 */
14947 -+ if (!r->last_data_init) {
14948 -+ spin_lock_irqsave(&r->lock, flags);
14949 -+ memcpy(r->last_data, tmp, EXTRACT_SIZE);
14950 -+ r->last_data_init = true;
14951 -+ nbytes -= EXTRACT_SIZE;
14952 -+ spin_unlock_irqrestore(&r->lock, flags);
14953 -+ extract_buf(r, tmp);
14954 -+ }
14955 -+
14956 - spin_lock_irqsave(&r->lock, flags);
14957 - if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
14958 - panic("Hardware RNG duplicated output!\n");
14959 -@@ -1008,7 +1036,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
14960 +@@ -1020,7 +1032,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
14961
14962 extract_buf(r, tmp);
14963 i = min_t(int, nbytes, EXTRACT_SIZE);
14964 @@ -31988,15 +32968,7 @@ index b86eae9..b9c2ed7 100644
14965 ret = -EFAULT;
14966 break;
14967 }
14968 -@@ -1086,6 +1114,7 @@ static void init_std_data(struct entropy_store *r)
14969 -
14970 - r->entropy_count = 0;
14971 - r->entropy_total = 0;
14972 -+ r->last_data_init = false;
14973 - mix_pool_bytes(r, &now, sizeof(now), NULL);
14974 - for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
14975 - if (!arch_get_random_long(&rv))
14976 -@@ -1342,7 +1371,7 @@ EXPORT_SYMBOL(generate_random_uuid);
14977 +@@ -1356,7 +1368,7 @@ EXPORT_SYMBOL(generate_random_uuid);
14978 #include <linux/sysctl.h>
14979
14980 static int min_read_thresh = 8, min_write_thresh;
14981 @@ -32006,7 +32978,7 @@ index b86eae9..b9c2ed7 100644
14982 static char sysctl_bootid[16];
14983
14984 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
14985 -index 9b4f011..b7e0a1a 100644
14986 +index d780295..b29f3a8 100644
14987 --- a/drivers/char/sonypi.c
14988 +++ b/drivers/char/sonypi.c
14989 @@ -54,6 +54,7 @@
14990 @@ -32111,10 +33083,10 @@ index 84ddc55..1d32f1e 100644
14991 return 0;
14992 }
14993 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
14994 -index 088c8fd..774c5a5 100644
14995 +index ee4dbea..69c817b 100644
14996 --- a/drivers/char/virtio_console.c
14997 +++ b/drivers/char/virtio_console.c
14998 -@@ -622,7 +622,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
14999 +@@ -681,7 +681,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
15000 if (to_user) {
15001 ssize_t ret;
15002
15003 @@ -32123,7 +33095,7 @@ index 088c8fd..774c5a5 100644
15004 if (ret)
15005 return -EFAULT;
15006 } else {
15007 -@@ -721,7 +721,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
15008 +@@ -780,7 +780,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
15009 if (!port_has_data(port) && !port->host_connected)
15010 return 0;
15011
15012 @@ -32132,39 +33104,60 @@ index 088c8fd..774c5a5 100644
15013 }
15014
15015 static int wait_port_writable(struct port *port, bool nonblock)
15016 -diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
15017 -index 75c0a1a..96ba8f6 100644
15018 ---- a/drivers/edac/edac_mc.c
15019 -+++ b/drivers/edac/edac_mc.c
15020 -@@ -340,7 +340,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
15021 - /*
15022 - * Alocate and fill the csrow/channels structs
15023 - */
15024 -- mci->csrows = kcalloc(sizeof(*mci->csrows), tot_csrows, GFP_KERNEL);
15025 -+ mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL);
15026 - if (!mci->csrows)
15027 - goto error;
15028 - for (row = 0; row < tot_csrows; row++) {
15029 -@@ -351,7 +351,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
15030 - csr->csrow_idx = row;
15031 - csr->mci = mci;
15032 - csr->nr_channels = tot_channels;
15033 -- csr->channels = kcalloc(sizeof(*csr->channels), tot_channels,
15034 -+ csr->channels = kcalloc(tot_channels, sizeof(*csr->channels),
15035 - GFP_KERNEL);
15036 - if (!csr->channels)
15037 - goto error;
15038 -@@ -369,7 +369,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
15039 - /*
15040 - * Allocate and fill the dimm structs
15041 - */
15042 -- mci->dimms = kcalloc(sizeof(*mci->dimms), tot_dimms, GFP_KERNEL);
15043 -+ mci->dimms = kcalloc(tot_dimms, sizeof(*mci->dimms), GFP_KERNEL);
15044 - if (!mci->dimms)
15045 - goto error;
15046 +diff --git a/drivers/clocksource/arm_generic.c b/drivers/clocksource/arm_generic.c
15047 +index 8ae1a61..9c00613 100644
15048 +--- a/drivers/clocksource/arm_generic.c
15049 ++++ b/drivers/clocksource/arm_generic.c
15050 +@@ -181,7 +181,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
15051 + return NOTIFY_OK;
15052 + }
15053 +
15054 +-static struct notifier_block __cpuinitdata arch_timer_cpu_nb = {
15055 ++static struct notifier_block arch_timer_cpu_nb = {
15056 + .notifier_call = arch_timer_cpu_notify,
15057 + };
15058
15059 +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
15060 +index 1f93dbd..edf95ff 100644
15061 +--- a/drivers/cpufreq/cpufreq.c
15062 ++++ b/drivers/cpufreq/cpufreq.c
15063 +@@ -1843,7 +1843,7 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
15064 + return NOTIFY_OK;
15065 + }
15066 +
15067 +-static struct notifier_block __refdata cpufreq_cpu_notifier = {
15068 ++static struct notifier_block cpufreq_cpu_notifier = {
15069 + .notifier_call = cpufreq_cpu_callback,
15070 + };
15071 +
15072 +diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
15073 +index 9d7732b..0b1a793 100644
15074 +--- a/drivers/cpufreq/cpufreq_stats.c
15075 ++++ b/drivers/cpufreq/cpufreq_stats.c
15076 +@@ -340,7 +340,7 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
15077 + }
15078 +
15079 + /* priority=1 so this will get called before cpufreq_remove_dev */
15080 +-static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
15081 ++static struct notifier_block cpufreq_stat_cpu_notifier = {
15082 + .notifier_call = cpufreq_stat_cpu_callback,
15083 + .priority = 1,
15084 + };
15085 +diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
15086 +index 3315e4b..fc38316 100644
15087 +--- a/drivers/dma/sh/shdma.c
15088 ++++ b/drivers/dma/sh/shdma.c
15089 +@@ -476,7 +476,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
15090 + return ret;
15091 + }
15092 +
15093 +-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
15094 ++static struct notifier_block sh_dmae_nmi_notifier = {
15095 + .notifier_call = sh_dmae_nmi_handler,
15096 +
15097 + /* Run before NMI debug handler and KGDB */
15098 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
15099 -index 1bfb207..0d059c2 100644
15100 +index 0056c4d..725934f 100644
15101 --- a/drivers/edac/edac_pci_sysfs.c
15102 +++ b/drivers/edac/edac_pci_sysfs.c
15103 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
15104 @@ -32232,7 +33225,7 @@ index 1bfb207..0d059c2 100644
15105 }
15106 }
15107 }
15108 -@@ -676,7 +676,7 @@ void edac_pci_do_parity_check(void)
15109 +@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
15110 if (!check_pci_errors)
15111 return;
15112
15113 @@ -32241,7 +33234,7 @@ index 1bfb207..0d059c2 100644
15114
15115 /* scan all PCI devices looking for a Parity Error on devices and
15116 * bridges.
15117 -@@ -688,7 +688,7 @@ void edac_pci_do_parity_check(void)
15118 +@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
15119 /* Only if operator has selected panic on PCI Error */
15120 if (edac_pci_get_panic_on_pe()) {
15121 /* If the count is different 'after' from 'before' */
15122 @@ -32251,13 +33244,13 @@ index 1bfb207..0d059c2 100644
15123 }
15124 }
15125 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
15126 -index 8c87a5e..a19cbd7 100644
15127 +index 6796799..99e8377 100644
15128 --- a/drivers/edac/mce_amd.h
15129 +++ b/drivers/edac/mce_amd.h
15130 -@@ -80,7 +80,7 @@ extern const char * const ii_msgs[];
15131 +@@ -78,7 +78,7 @@ extern const char * const ii_msgs[];
15132 struct amd_decoder_ops {
15133 - bool (*dc_mce)(u16, u8);
15134 - bool (*ic_mce)(u16, u8);
15135 + bool (*mc0_mce)(u16, u8);
15136 + bool (*mc1_mce)(u16, u8);
15137 -};
15138 +} __no_const;
15139
15140 @@ -32340,20 +33333,20 @@ index 982f1f5..d21e5da 100644
15141 iounmap(buf);
15142 return 0;
15143 diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
15144 -index bfd8f43..b1fe1f8 100644
15145 +index f5596db..9355ce6 100644
15146 --- a/drivers/firmware/efivars.c
15147 +++ b/drivers/firmware/efivars.c
15148 -@@ -1206,7 +1206,7 @@ out:
15149 - EXPORT_SYMBOL_GPL(register_efivars);
15150 +@@ -132,7 +132,7 @@ struct efivar_attribute {
15151 + };
15152
15153 static struct efivars __efivars;
15154 -static struct efivar_operations ops;
15155 +static efivar_operations_no_const ops __read_only;
15156
15157 - /*
15158 - * For now we register the efi subsystem with the firmware subsystem
15159 + #define PSTORE_EFI_ATTRIBUTES \
15160 + (EFI_VARIABLE_NON_VOLATILE | \
15161 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
15162 -index 82d5c20..44a7177 100644
15163 +index 9902732..64b62dd 100644
15164 --- a/drivers/gpio/gpio-vr41xx.c
15165 +++ b/drivers/gpio/gpio-vr41xx.c
15166 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
15167 @@ -32366,10 +33359,10 @@ index 82d5c20..44a7177 100644
15168 return -EINVAL;
15169 }
15170 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
15171 -index 1227adf..f2301c2 100644
15172 +index 7b2d378..cc947ea 100644
15173 --- a/drivers/gpu/drm/drm_crtc_helper.c
15174 +++ b/drivers/gpu/drm/drm_crtc_helper.c
15175 -@@ -286,7 +286,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
15176 +@@ -319,7 +319,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
15177 struct drm_crtc *tmp;
15178 int crtc_mask = 1;
15179
15180 @@ -32601,7 +33594,7 @@ index 2f4c434..764794b 100644
15181 if (__put_user(count, &request->count)
15182 || __put_user(list, &request->list))
15183 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
15184 -index 23dd975..63e9801 100644
15185 +index e77bd8b..1571b85 100644
15186 --- a/drivers/gpu/drm/drm_ioctl.c
15187 +++ b/drivers/gpu/drm/drm_ioctl.c
15188 @@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
15189 @@ -32636,10 +33629,10 @@ index d752c96..fe08455 100644
15190 if (drm_lock_free(&master->lock, lock->context)) {
15191 /* FIXME: Should really bail out here. */
15192 diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
15193 -index c236fd2..6b5f2e7 100644
15194 +index 200e104..59facda 100644
15195 --- a/drivers/gpu/drm/drm_stub.c
15196 +++ b/drivers/gpu/drm/drm_stub.c
15197 -@@ -511,7 +511,7 @@ void drm_unplug_dev(struct drm_device *dev)
15198 +@@ -516,7 +516,7 @@ void drm_unplug_dev(struct drm_device *dev)
15199
15200 drm_device_set_unplugged(dev);
15201
15202 @@ -32690,7 +33683,7 @@ index 6e0acad..93c8289 100644
15203 int front_offset;
15204 } drm_i810_private_t;
15205 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
15206 -index 3a1a495..995c093 100644
15207 +index 9d4a2c2..32a119f 100644
15208 --- a/drivers/gpu/drm/i915/i915_debugfs.c
15209 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
15210 @@ -496,7 +496,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
15211 @@ -32703,10 +33696,10 @@ index 3a1a495..995c093 100644
15212 if (IS_GEN6(dev) || IS_GEN7(dev)) {
15213 seq_printf(m,
15214 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
15215 -index 61ae104..f8a4bc1 100644
15216 +index 99daa89..84ebd44 100644
15217 --- a/drivers/gpu/drm/i915/i915_dma.c
15218 +++ b/drivers/gpu/drm/i915/i915_dma.c
15219 -@@ -1274,7 +1274,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
15220 +@@ -1253,7 +1253,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
15221 bool can_switch;
15222
15223 spin_lock(&dev->count_lock);
15224 @@ -32716,11 +33709,11 @@ index 61ae104..f8a4bc1 100644
15225 return can_switch;
15226 }
15227 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
15228 -index 92f1750..3beba74 100644
15229 +index 12ab3bd..b3bed3b 100644
15230 --- a/drivers/gpu/drm/i915/i915_drv.h
15231 +++ b/drivers/gpu/drm/i915/i915_drv.h
15232 -@@ -430,7 +430,7 @@ typedef struct drm_i915_private {
15233 -
15234 +@@ -656,7 +656,7 @@ typedef struct drm_i915_private {
15235 + drm_dma_handle_t *status_page_dmah;
15236 struct resource mch_res;
15237
15238 - atomic_t irq_received;
15239 @@ -32728,16 +33721,16 @@ index 92f1750..3beba74 100644
15240
15241 /* protects the irq masks */
15242 spinlock_t irq_lock;
15243 -@@ -1055,7 +1055,7 @@ struct drm_i915_gem_object {
15244 +@@ -1102,7 +1102,7 @@ struct drm_i915_gem_object {
15245 * will be page flipped away on the next vblank. When it
15246 * reaches 0, dev_priv->pending_flip_queue will be woken up.
15247 */
15248 - atomic_t pending_flip;
15249 + atomic_unchecked_t pending_flip;
15250 };
15251 + #define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
15252
15253 - #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
15254 -@@ -1558,7 +1558,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
15255 +@@ -1633,7 +1633,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
15256 struct drm_i915_private *dev_priv, unsigned port);
15257 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
15258 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
15259 @@ -32747,10 +33740,10 @@ index 92f1750..3beba74 100644
15260 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
15261 }
15262 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
15263 -index 67036e9..b9f1357 100644
15264 +index 26d08bb..fccb984 100644
15265 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
15266 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
15267 -@@ -681,7 +681,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
15268 +@@ -672,7 +672,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
15269 i915_gem_clflush_object(obj);
15270
15271 if (obj->base.pending_write_domain)
15272 @@ -32759,7 +33752,7 @@ index 67036e9..b9f1357 100644
15273
15274 flush_domains |= obj->base.write_domain;
15275 }
15276 -@@ -712,9 +712,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
15277 +@@ -703,9 +703,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
15278
15279 static int
15280 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
15281 @@ -32772,10 +33765,10 @@ index 67036e9..b9f1357 100644
15282 for (i = 0; i < count; i++) {
15283 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
15284 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
15285 -index dc29ace..137d83a 100644
15286 +index fe84338..a863190 100644
15287 --- a/drivers/gpu/drm/i915/i915_irq.c
15288 +++ b/drivers/gpu/drm/i915/i915_irq.c
15289 -@@ -531,7 +531,7 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
15290 +@@ -535,7 +535,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
15291 u32 pipe_stats[I915_MAX_PIPES];
15292 bool blc_event;
15293
15294 @@ -32784,7 +33777,7 @@ index dc29ace..137d83a 100644
15295
15296 while (true) {
15297 iir = I915_READ(VLV_IIR);
15298 -@@ -678,7 +678,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
15299 +@@ -688,7 +688,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
15300 irqreturn_t ret = IRQ_NONE;
15301 int i;
15302
15303 @@ -32793,16 +33786,16 @@ index dc29ace..137d83a 100644
15304
15305 /* disable master interrupt before clearing iir */
15306 de_ier = I915_READ(DEIER);
15307 -@@ -753,7 +753,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
15308 +@@ -760,7 +760,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
15309 + int ret = IRQ_NONE;
15310 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
15311 - u32 hotplug_mask;
15312
15313 - atomic_inc(&dev_priv->irq_received);
15314 + atomic_inc_unchecked(&dev_priv->irq_received);
15315
15316 /* disable master interrupt before clearing iir */
15317 de_ier = I915_READ(DEIER);
15318 -@@ -1762,7 +1762,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
15319 +@@ -1787,7 +1787,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
15320 {
15321 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
15322
15323 @@ -32811,7 +33804,7 @@ index dc29ace..137d83a 100644
15324
15325 I915_WRITE(HWSTAM, 0xeffe);
15326
15327 -@@ -1788,7 +1788,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
15328 +@@ -1813,7 +1813,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
15329 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
15330 int pipe;
15331
15332 @@ -32820,7 +33813,7 @@ index dc29ace..137d83a 100644
15333
15334 /* VLV magic */
15335 I915_WRITE(VLV_IMR, 0);
15336 -@@ -2093,7 +2093,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
15337 +@@ -2108,7 +2108,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
15338 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
15339 int pipe;
15340
15341 @@ -32829,7 +33822,7 @@ index dc29ace..137d83a 100644
15342
15343 for_each_pipe(pipe)
15344 I915_WRITE(PIPESTAT(pipe), 0);
15345 -@@ -2144,7 +2144,7 @@ static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
15346 +@@ -2159,7 +2159,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
15347 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
15348 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
15349
15350 @@ -32838,7 +33831,7 @@ index dc29ace..137d83a 100644
15351
15352 iir = I915_READ16(IIR);
15353 if (iir == 0)
15354 -@@ -2229,7 +2229,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
15355 +@@ -2244,7 +2244,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
15356 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
15357 int pipe;
15358
15359 @@ -32847,7 +33840,7 @@ index dc29ace..137d83a 100644
15360
15361 if (I915_HAS_HOTPLUG(dev)) {
15362 I915_WRITE(PORT_HOTPLUG_EN, 0);
15363 -@@ -2324,7 +2324,7 @@ static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
15364 +@@ -2339,7 +2339,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
15365 };
15366 int pipe, ret = IRQ_NONE;
15367
15368 @@ -32856,7 +33849,7 @@ index dc29ace..137d83a 100644
15369
15370 iir = I915_READ(IIR);
15371 do {
15372 -@@ -2450,7 +2450,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
15373 +@@ -2465,7 +2465,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
15374 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
15375 int pipe;
15376
15377 @@ -32865,7 +33858,7 @@ index dc29ace..137d83a 100644
15378
15379 I915_WRITE(PORT_HOTPLUG_EN, 0);
15380 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
15381 -@@ -2557,7 +2557,7 @@ static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
15382 +@@ -2572,7 +2572,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
15383 int irq_received;
15384 int ret = IRQ_NONE, pipe;
15385
15386 @@ -32875,10 +33868,10 @@ index dc29ace..137d83a 100644
15387 iir = I915_READ(IIR);
15388
15389 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
15390 -index 4d3c7c6..eaac87b 100644
15391 +index da1ad9c..10d368b 100644
15392 --- a/drivers/gpu/drm/i915/intel_display.c
15393 +++ b/drivers/gpu/drm/i915/intel_display.c
15394 -@@ -2131,7 +2131,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
15395 +@@ -2244,7 +2244,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
15396
15397 wait_event(dev_priv->pending_flip_queue,
15398 atomic_read(&dev_priv->mm.wedged) ||
15399 @@ -32887,7 +33880,7 @@ index 4d3c7c6..eaac87b 100644
15400
15401 /* Big Hammer, we also need to ensure that any pending
15402 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
15403 -@@ -6221,8 +6221,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
15404 +@@ -7109,8 +7109,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
15405
15406 obj = work->old_fb_obj;
15407
15408 @@ -32897,7 +33890,7 @@ index 4d3c7c6..eaac87b 100644
15409 wake_up(&dev_priv->pending_flip_queue);
15410
15411 queue_work(dev_priv->wq, &work->work);
15412 -@@ -6589,7 +6588,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
15413 +@@ -7477,7 +7476,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
15414 /* Block clients from rendering to the new back buffer until
15415 * the flip occurs and the object is no longer visible.
15416 */
15417 @@ -32906,7 +33899,7 @@ index 4d3c7c6..eaac87b 100644
15418 atomic_inc(&intel_crtc->unpin_work_count);
15419
15420 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
15421 -@@ -6606,7 +6605,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
15422 +@@ -7494,7 +7493,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
15423
15424 cleanup_pending:
15425 atomic_dec(&intel_crtc->unpin_work_count);
15426 @@ -32972,10 +33965,10 @@ index 598c281..60d590e 100644
15427
15428 *sequence = cur_fence;
15429 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
15430 -index 09fdef2..57f5c3b 100644
15431 +index 865eddf..62c4cc3 100644
15432 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
15433 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
15434 -@@ -1240,7 +1240,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
15435 +@@ -1015,7 +1015,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
15436 struct bit_table {
15437 const char id;
15438 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
15439 @@ -32985,7 +33978,7 @@ index 09fdef2..57f5c3b 100644
15440 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
15441
15442 diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
15443 -index a101699..a163f0a 100644
15444 +index aa89eb9..d45d38b 100644
15445 --- a/drivers/gpu/drm/nouveau/nouveau_drm.h
15446 +++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
15447 @@ -80,7 +80,7 @@ struct nouveau_drm {
15448 @@ -33011,7 +34004,7 @@ index cdb83ac..27f0a16 100644
15449 #define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence)
15450
15451 diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
15452 -index 5e2f521..0d21436 100644
15453 +index 8bf695c..9fbc90a 100644
15454 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
15455 +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
15456 @@ -321,7 +321,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
15457 @@ -33024,10 +34017,10 @@ index 5e2f521..0d21436 100644
15458 if (++trycnt > 100000) {
15459 NV_ERROR(drm, "%s failed and gave up.\n", __func__);
15460 diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
15461 -index 6f0ac64..9c2dfb4 100644
15462 +index 25d3495..d81aaf6 100644
15463 --- a/drivers/gpu/drm/nouveau/nouveau_vga.c
15464 +++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
15465 -@@ -63,7 +63,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
15466 +@@ -62,7 +62,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
15467 bool can_switch;
15468
15469 spin_lock(&dev->count_lock);
15470 @@ -33036,49 +34029,6 @@ index 6f0ac64..9c2dfb4 100644
15471 spin_unlock(&dev->count_lock);
15472 return can_switch;
15473 }
15474 -diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
15475 -index 9f6f55c..30e3a29 100644
15476 ---- a/drivers/gpu/drm/nouveau/nv50_evo.c
15477 -+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
15478 -@@ -152,9 +152,9 @@ nv50_evo_channel_new(struct drm_device *dev, int chid,
15479 - kzalloc(sizeof(*evo->object->oclass), GFP_KERNEL);
15480 - evo->object->oclass->ofuncs =
15481 - kzalloc(sizeof(*evo->object->oclass->ofuncs), GFP_KERNEL);
15482 -- evo->object->oclass->ofuncs->rd32 = nv50_evo_rd32;
15483 -- evo->object->oclass->ofuncs->wr32 = nv50_evo_wr32;
15484 -- evo->object->oclass->ofuncs->rd08 =
15485 -+ *(void**)&evo->object->oclass->ofuncs->rd32 = nv50_evo_rd32;
15486 -+ *(void**)&evo->object->oclass->ofuncs->wr32 = nv50_evo_wr32;
15487 -+ *(void**)&evo->object->oclass->ofuncs->rd08 =
15488 - ioremap(pci_resource_start(dev->pdev, 0) +
15489 - NV50_PDISPLAY_USER(evo->handle), PAGE_SIZE);
15490 - return 0;
15491 -diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
15492 -index b562b59..9d725a8 100644
15493 ---- a/drivers/gpu/drm/nouveau/nv50_sor.c
15494 -+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
15495 -@@ -317,7 +317,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
15496 - }
15497 -
15498 - if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
15499 -- struct dp_train_func func = {
15500 -+ static struct dp_train_func func = {
15501 - .link_set = nv50_sor_dp_link_set,
15502 - .train_set = nv50_sor_dp_train_set,
15503 - .train_adj = nv50_sor_dp_train_adj
15504 -diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
15505 -index c402fca..f1d694b 100644
15506 ---- a/drivers/gpu/drm/nouveau/nvd0_display.c
15507 -+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
15508 -@@ -1389,7 +1389,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
15509 - nv_wait(device, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
15510 -
15511 - if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
15512 -- struct dp_train_func func = {
15513 -+ static struct dp_train_func func = {
15514 - .link_set = nvd0_sor_dp_link_set,
15515 - .train_set = nvd0_sor_dp_train_set,
15516 - .train_adj = nvd0_sor_dp_train_adj
15517 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
15518 index d4660cf..70dbe65 100644
15519 --- a/drivers/gpu/drm/r128/r128_cce.c
15520 @@ -33174,10 +34124,10 @@ index 5a82b6b..9e69c73 100644
15521 if (regcomp
15522 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
15523 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
15524 -index 008d645..de03849 100644
15525 +index 0d6562b..a154330 100644
15526 --- a/drivers/gpu/drm/radeon/radeon_device.c
15527 +++ b/drivers/gpu/drm/radeon/radeon_device.c
15528 -@@ -941,7 +941,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
15529 +@@ -969,7 +969,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
15530 bool can_switch;
15531
15532 spin_lock(&dev->count_lock);
15533 @@ -33187,7 +34137,7 @@ index 008d645..de03849 100644
15534 return can_switch;
15535 }
15536 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
15537 -index a1b59ca..86f2d44 100644
15538 +index e7fdf16..f4f6490 100644
15539 --- a/drivers/gpu/drm/radeon/radeon_drv.h
15540 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
15541 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
15542 @@ -33259,10 +34209,10 @@ index 8e9057b..af6dacb 100644
15543 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
15544
15545 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
15546 -index 5ebe1b3..cf69ba0 100644
15547 +index 93f760e..33d9839 100644
15548 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
15549 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
15550 -@@ -781,7 +781,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
15551 +@@ -782,7 +782,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
15552 man->size = size >> PAGE_SHIFT;
15553 }
15554
15555 @@ -33271,7 +34221,7 @@ index 5ebe1b3..cf69ba0 100644
15556 static const struct vm_operations_struct *ttm_vm_ops = NULL;
15557
15558 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
15559 -@@ -822,8 +822,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
15560 +@@ -823,8 +823,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
15561 }
15562 if (unlikely(ttm_vm_ops == NULL)) {
15563 ttm_vm_ops = vma->vm_ops;
15564 @@ -33412,10 +34362,10 @@ index ac98964..5dbf512 100644
15565 case VIA_IRQ_ABSOLUTE:
15566 break;
15567 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
15568 -index 88a179e..57fe50481c 100644
15569 +index 13aeda7..4a952d1 100644
15570 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
15571 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
15572 -@@ -263,7 +263,7 @@ struct vmw_private {
15573 +@@ -290,7 +290,7 @@ struct vmw_private {
15574 * Fencing and IRQs.
15575 */
15576
15577 @@ -33500,10 +34450,10 @@ index 8a8725c..afed796 100644
15578 marker = list_first_entry(&queue->head,
15579 struct vmw_marker, head);
15580 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
15581 -index 52146db..ae33762 100644
15582 +index eb2ee11..6cc50ab 100644
15583 --- a/drivers/hid/hid-core.c
15584 +++ b/drivers/hid/hid-core.c
15585 -@@ -2201,7 +2201,7 @@ static bool hid_ignore(struct hid_device *hdev)
15586 +@@ -2240,7 +2240,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
15587
15588 int hid_add_device(struct hid_device *hdev)
15589 {
15590 @@ -33512,7 +34462,7 @@ index 52146db..ae33762 100644
15591 int ret;
15592
15593 if (WARN_ON(hdev->status & HID_STAT_ADDED))
15594 -@@ -2236,7 +2236,7 @@ int hid_add_device(struct hid_device *hdev)
15595 +@@ -2274,7 +2274,7 @@ int hid_add_device(struct hid_device *hdev)
15596 /* XXX hack, any other cleaner solution after the driver core
15597 * is converted to allow more than 20 bytes as the device name? */
15598 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
15599 @@ -33534,24 +34484,11 @@ index eec3291..8ed706b 100644
15600 return -EFAULT;
15601
15602 *off += size;
15603 -diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
15604 -index 14599e2..711c965 100644
15605 ---- a/drivers/hid/usbhid/hiddev.c
15606 -+++ b/drivers/hid/usbhid/hiddev.c
15607 -@@ -625,7 +625,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
15608 - break;
15609 -
15610 - case HIDIOCAPPLICATION:
15611 -- if (arg < 0 || arg >= hid->maxapplication)
15612 -+ if (arg >= hid->maxapplication)
15613 - break;
15614 -
15615 - for (i = 0; i < hid->maxcollection; i++)
15616 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
15617 -index f4c3d28..82f45a9 100644
15618 +index 773a2f2..7ce08bc 100644
15619 --- a/drivers/hv/channel.c
15620 +++ b/drivers/hv/channel.c
15621 -@@ -402,8 +402,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
15622 +@@ -394,8 +394,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
15623 int ret = 0;
15624 int t;
15625
15626 @@ -33605,8 +34542,21 @@ index 8e1a9ec..4687821 100644
15627
15628 child_device_obj->device.bus = &hv_bus;
15629 child_device_obj->device.parent = &hv_acpi_dev->dev;
15630 +diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
15631 +index d64923d..72591e8 100644
15632 +--- a/drivers/hwmon/coretemp.c
15633 ++++ b/drivers/hwmon/coretemp.c
15634 +@@ -790,7 +790,7 @@ static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb,
15635 + return NOTIFY_OK;
15636 + }
15637 +
15638 +-static struct notifier_block coretemp_cpu_notifier __refdata = {
15639 ++static struct notifier_block coretemp_cpu_notifier = {
15640 + .notifier_call = coretemp_cpu_callback,
15641 + };
15642 +
15643 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
15644 -index 07a0c1a..0cac334 100644
15645 +index 1c85d39..55ed3cf 100644
15646 --- a/drivers/hwmon/sht15.c
15647 +++ b/drivers/hwmon/sht15.c
15648 @@ -169,7 +169,7 @@ struct sht15_data {
15649 @@ -33657,6 +34607,19 @@ index 07a0c1a..0cac334 100644
15650 return;
15651 }
15652
15653 +diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
15654 +index 76f157b..9c0db1b 100644
15655 +--- a/drivers/hwmon/via-cputemp.c
15656 ++++ b/drivers/hwmon/via-cputemp.c
15657 +@@ -296,7 +296,7 @@ static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb,
15658 + return NOTIFY_OK;
15659 + }
15660 +
15661 +-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
15662 ++static struct notifier_block via_cputemp_cpu_notifier = {
15663 + .notifier_call = via_cputemp_cpu_callback,
15664 + };
15665 +
15666 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
15667 index 378fcb5..5e91fa8 100644
15668 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
15669 @@ -33987,7 +34950,7 @@ index 1f95bba..9530f87 100644
15670 sdata, wqe->wr.wr.atomic.swap);
15671 goto send_comp;
15672 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
15673 -index 748db2d..5f75cc3 100644
15674 +index 5b152a3..c1f3e83 100644
15675 --- a/drivers/infiniband/hw/nes/nes.c
15676 +++ b/drivers/infiniband/hw/nes/nes.c
15677 @@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
15678 @@ -34068,7 +35031,7 @@ index 33cc589..3bd6538 100644
15679 extern u32 int_mod_timer_init;
15680 extern u32 int_mod_cq_depth_256;
15681 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
15682 -index cfaacaf..fa0722e 100644
15683 +index 22ea67e..dcbe3bc 100644
15684 --- a/drivers/infiniband/hw/nes/nes_cm.c
15685 +++ b/drivers/infiniband/hw/nes/nes_cm.c
15686 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
15687 @@ -34114,7 +35077,7 @@ index cfaacaf..fa0722e 100644
15688
15689 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
15690 {
15691 -@@ -1281,7 +1281,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
15692 +@@ -1272,7 +1272,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
15693 kfree(listener);
15694 listener = NULL;
15695 ret = 0;
15696 @@ -34123,7 +35086,7 @@ index cfaacaf..fa0722e 100644
15697 } else {
15698 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
15699 }
15700 -@@ -1480,7 +1480,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
15701 +@@ -1466,7 +1466,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
15702 cm_node->rem_mac);
15703
15704 add_hte_node(cm_core, cm_node);
15705 @@ -34132,7 +35095,7 @@ index cfaacaf..fa0722e 100644
15706
15707 return cm_node;
15708 }
15709 -@@ -1538,7 +1538,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
15710 +@@ -1524,7 +1524,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
15711 }
15712
15713 atomic_dec(&cm_core->node_cnt);
15714 @@ -34141,7 +35104,7 @@ index cfaacaf..fa0722e 100644
15715 nesqp = cm_node->nesqp;
15716 if (nesqp) {
15717 nesqp->cm_node = NULL;
15718 -@@ -1602,7 +1602,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
15719 +@@ -1588,7 +1588,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
15720
15721 static void drop_packet(struct sk_buff *skb)
15722 {
15723 @@ -34150,7 +35113,7 @@ index cfaacaf..fa0722e 100644
15724 dev_kfree_skb_any(skb);
15725 }
15726
15727 -@@ -1665,7 +1665,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
15728 +@@ -1651,7 +1651,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
15729 {
15730
15731 int reset = 0; /* whether to send reset in case of err.. */
15732 @@ -34159,7 +35122,7 @@ index cfaacaf..fa0722e 100644
15733 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
15734 " refcnt=%d\n", cm_node, cm_node->state,
15735 atomic_read(&cm_node->ref_count));
15736 -@@ -2306,7 +2306,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
15737 +@@ -2292,7 +2292,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
15738 rem_ref_cm_node(cm_node->cm_core, cm_node);
15739 return NULL;
15740 }
15741 @@ -34168,7 +35131,7 @@ index cfaacaf..fa0722e 100644
15742 loopbackremotenode->loopbackpartner = cm_node;
15743 loopbackremotenode->tcp_cntxt.rcv_wscale =
15744 NES_CM_DEFAULT_RCV_WND_SCALE;
15745 -@@ -2581,7 +2581,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
15746 +@@ -2567,7 +2567,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
15747 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
15748 else {
15749 rem_ref_cm_node(cm_core, cm_node);
15750 @@ -34177,7 +35140,7 @@ index cfaacaf..fa0722e 100644
15751 dev_kfree_skb_any(skb);
15752 }
15753 break;
15754 -@@ -2889,7 +2889,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
15755 +@@ -2875,7 +2875,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
15756
15757 if ((cm_id) && (cm_id->event_handler)) {
15758 if (issue_disconn) {
15759 @@ -34186,7 +35149,7 @@ index cfaacaf..fa0722e 100644
15760 cm_event.event = IW_CM_EVENT_DISCONNECT;
15761 cm_event.status = disconn_status;
15762 cm_event.local_addr = cm_id->local_addr;
15763 -@@ -2911,7 +2911,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
15764 +@@ -2897,7 +2897,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
15765 }
15766
15767 if (issue_close) {
15768 @@ -34195,7 +35158,7 @@ index cfaacaf..fa0722e 100644
15769 nes_disconnect(nesqp, 1);
15770
15771 cm_id->provider_data = nesqp;
15772 -@@ -3047,7 +3047,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
15773 +@@ -3033,7 +3033,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
15774
15775 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
15776 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
15777 @@ -34204,7 +35167,7 @@ index cfaacaf..fa0722e 100644
15778
15779 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
15780 netdev_refcnt_read(nesvnic->netdev));
15781 -@@ -3242,7 +3242,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
15782 +@@ -3228,7 +3228,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
15783 struct nes_cm_core *cm_core;
15784 u8 *start_buff;
15785
15786 @@ -34213,7 +35176,7 @@ index cfaacaf..fa0722e 100644
15787 cm_node = (struct nes_cm_node *)cm_id->provider_data;
15788 loopback = cm_node->loopbackpartner;
15789 cm_core = cm_node->cm_core;
15790 -@@ -3302,7 +3302,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
15791 +@@ -3288,7 +3288,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
15792 ntohl(cm_id->local_addr.sin_addr.s_addr),
15793 ntohs(cm_id->local_addr.sin_port));
15794
15795 @@ -34222,7 +35185,7 @@ index cfaacaf..fa0722e 100644
15796 nesqp->active_conn = 1;
15797
15798 /* cache the cm_id in the qp */
15799 -@@ -3412,7 +3412,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
15800 +@@ -3398,7 +3398,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
15801 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
15802 return err;
15803 }
15804 @@ -34231,7 +35194,7 @@ index cfaacaf..fa0722e 100644
15805 }
15806
15807 cm_id->add_ref(cm_id);
15808 -@@ -3513,7 +3513,7 @@ static void cm_event_connected(struct nes_cm_event *event)
15809 +@@ -3499,7 +3499,7 @@ static void cm_event_connected(struct nes_cm_event *event)
15810
15811 if (nesqp->destroyed)
15812 return;
15813 @@ -34240,7 +35203,7 @@ index cfaacaf..fa0722e 100644
15814 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
15815 " local port 0x%04X. jiffies = %lu.\n",
15816 nesqp->hwqp.qp_id,
15817 -@@ -3693,7 +3693,7 @@ static void cm_event_reset(struct nes_cm_event *event)
15818 +@@ -3679,7 +3679,7 @@ static void cm_event_reset(struct nes_cm_event *event)
15819
15820 cm_id->add_ref(cm_id);
15821 ret = cm_id->event_handler(cm_id, &cm_event);
15822 @@ -34249,7 +35212,7 @@ index cfaacaf..fa0722e 100644
15823 cm_event.event = IW_CM_EVENT_CLOSE;
15824 cm_event.status = 0;
15825 cm_event.provider_data = cm_id->provider_data;
15826 -@@ -3729,7 +3729,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
15827 +@@ -3715,7 +3715,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
15828 return;
15829 cm_id = cm_node->cm_id;
15830
15831 @@ -34258,7 +35221,7 @@ index cfaacaf..fa0722e 100644
15832 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
15833 cm_node, cm_id, jiffies);
15834
15835 -@@ -3769,7 +3769,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
15836 +@@ -3755,7 +3755,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
15837 return;
15838 cm_id = cm_node->cm_id;
15839
15840 @@ -34268,7 +35231,7 @@ index cfaacaf..fa0722e 100644
15841 cm_node, cm_id, jiffies);
15842
15843 diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
15844 -index 3ba7be3..c81f6ff 100644
15845 +index 4166452..fc952c3 100644
15846 --- a/drivers/infiniband/hw/nes/nes_mgt.c
15847 +++ b/drivers/infiniband/hw/nes/nes_mgt.c
15848 @@ -40,8 +40,8 @@
15849 @@ -34291,7 +35254,7 @@ index 3ba7be3..c81f6ff 100644
15850
15851 /* Free packets that have not yet been forwarded */
15852 /* Lock is acquired by skb_dequeue when removing the skb */
15853 -@@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
15854 +@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
15855 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
15856 skb_queue_head_init(&nesqp->pau_list);
15857 spin_lock_init(&nesqp->pau_lock);
15858 @@ -34301,10 +35264,10 @@ index 3ba7be3..c81f6ff 100644
15859 }
15860
15861 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
15862 -index 0564be7..f68b0f1 100644
15863 +index 9542e16..a008c40 100644
15864 --- a/drivers/infiniband/hw/nes/nes_nic.c
15865 +++ b/drivers/infiniband/hw/nes/nes_nic.c
15866 -@@ -1272,39 +1272,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
15867 +@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
15868 target_stat_values[++index] = mh_detected;
15869 target_stat_values[++index] = mh_pauses_sent;
15870 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
15871 @@ -34433,19 +35396,19 @@ index da739d9..da1c7f4 100644
15872 gameport->dev.release = gameport_release_port;
15873 if (gameport->parent)
15874 diff --git a/drivers/input/input.c b/drivers/input/input.c
15875 -index 53a0dde..abffda7 100644
15876 +index c044699..174d71a 100644
15877 --- a/drivers/input/input.c
15878 +++ b/drivers/input/input.c
15879 -@@ -1902,7 +1902,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
15880 +@@ -2019,7 +2019,7 @@ static void devm_input_device_unregister(struct device *dev, void *res)
15881 */
15882 int input_register_device(struct input_dev *dev)
15883 {
15884 - static atomic_t input_no = ATOMIC_INIT(0);
15885 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
15886 + struct input_devres *devres = NULL;
15887 struct input_handler *handler;
15888 unsigned int packet_size;
15889 - const char *path;
15890 -@@ -1945,7 +1945,7 @@ int input_register_device(struct input_dev *dev)
15891 +@@ -2074,7 +2074,7 @@ int input_register_device(struct input_dev *dev)
15892 dev->setkeycode = input_default_setkeycode;
15893
15894 dev_set_name(&dev->dev, "input%ld",
15895 @@ -34467,10 +35430,10 @@ index 04c69af..5f92d00 100644
15896 #include <linux/input.h>
15897 #include <linux/gameport.h>
15898 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
15899 -index 83811e4..0822b90 100644
15900 +index d6cbfe9..6225402 100644
15901 --- a/drivers/input/joystick/xpad.c
15902 +++ b/drivers/input/joystick/xpad.c
15903 -@@ -726,7 +726,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
15904 +@@ -735,7 +735,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
15905
15906 static int xpad_led_probe(struct usb_xpad *xpad)
15907 {
15908 @@ -34479,7 +35442,7 @@ index 83811e4..0822b90 100644
15909 long led_no;
15910 struct xpad_led *led;
15911 struct led_classdev *led_cdev;
15912 -@@ -739,7 +739,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
15913 +@@ -748,7 +748,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
15914 if (!led)
15915 return -ENOMEM;
15916
15917 @@ -34502,7 +35465,7 @@ index 4c842c3..590b0bf 100644
15918
15919 return count;
15920 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
15921 -index d0f7533..fb8215b 100644
15922 +index 25fc597..558bf3b 100644
15923 --- a/drivers/input/serio/serio.c
15924 +++ b/drivers/input/serio/serio.c
15925 @@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
15926 @@ -34524,10 +35487,10 @@ index d0f7533..fb8215b 100644
15927 serio->dev.release = serio_release_port;
15928 serio->dev.groups = serio_device_attr_groups;
15929 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
15930 -index c679867..6e2e34d 100644
15931 +index 89562a8..218999b 100644
15932 --- a/drivers/isdn/capi/capi.c
15933 +++ b/drivers/isdn/capi/capi.c
15934 -@@ -83,8 +83,8 @@ struct capiminor {
15935 +@@ -81,8 +81,8 @@ struct capiminor {
15936
15937 struct capi20_appl *ap;
15938 u32 ncci;
15939 @@ -34538,7 +35501,7 @@ index c679867..6e2e34d 100644
15940
15941 struct tty_port port;
15942 int ttyinstop;
15943 -@@ -393,7 +393,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
15944 +@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
15945 capimsg_setu16(s, 2, mp->ap->applid);
15946 capimsg_setu8 (s, 4, CAPI_DATA_B3);
15947 capimsg_setu8 (s, 5, CAPI_RESP);
15948 @@ -34547,7 +35510,7 @@ index c679867..6e2e34d 100644
15949 capimsg_setu32(s, 8, mp->ncci);
15950 capimsg_setu16(s, 12, datahandle);
15951 }
15952 -@@ -514,14 +514,14 @@ static void handle_minor_send(struct capiminor *mp)
15953 +@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
15954 mp->outbytes -= len;
15955 spin_unlock_bh(&mp->outlock);
15956
15957 @@ -34615,7 +35578,7 @@ index 821f7ac..28d4030 100644
15958 } else {
15959 memcpy(buf, dp, left);
15960 diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
15961 -index b817809..409caff 100644
15962 +index e09dc8a..15e2efb 100644
15963 --- a/drivers/isdn/i4l/isdn_tty.c
15964 +++ b/drivers/isdn/i4l/isdn_tty.c
15965 @@ -1513,9 +1513,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
15966 @@ -34670,7 +35633,7 @@ index b817809..409caff 100644
15967 port->flags &= ~ASYNC_NORMAL_ACTIVE;
15968 port->tty = NULL;
15969 wake_up_interruptible(&port->open_wait);
15970 -@@ -1971,7 +1971,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
15971 +@@ -1975,7 +1975,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
15972 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
15973 modem_info *info = &dev->mdm.info[i];
15974
15975 @@ -34693,7 +35656,7 @@ index e74df7c..03a03ba 100644
15976 } else
15977 memcpy(msg, buf, count);
15978 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
15979 -index b5fdcb7..5b6c59f 100644
15980 +index a5ebc00..982886f 100644
15981 --- a/drivers/lguest/core.c
15982 +++ b/drivers/lguest/core.c
15983 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
15984 @@ -34844,10 +35807,10 @@ index 7155945..4bcc562 100644
15985
15986 seq_printf(seq, "\n");
15987 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
15988 -index a651d52..82f8a95 100644
15989 +index 0666b5d..ed82cb4 100644
15990 --- a/drivers/md/dm-ioctl.c
15991 +++ b/drivers/md/dm-ioctl.c
15992 -@@ -1601,7 +1601,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
15993 +@@ -1628,7 +1628,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
15994 cmd == DM_LIST_VERSIONS_CMD)
15995 return 0;
15996
15997 @@ -34857,7 +35820,7 @@ index a651d52..82f8a95 100644
15998 DMWARN("name not supplied when creating device");
15999 return -EINVAL;
16000 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
16001 -index fd61f98..8050783 100644
16002 +index fa51918..c26253c 100644
16003 --- a/drivers/md/dm-raid1.c
16004 +++ b/drivers/md/dm-raid1.c
16005 @@ -40,7 +40,7 @@ enum dm_raid1_error {
16006 @@ -34869,7 +35832,7 @@ index fd61f98..8050783 100644
16007 unsigned long error_type;
16008 struct dm_dev *dev;
16009 sector_t offset;
16010 -@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
16011 +@@ -183,7 +183,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
16012 struct mirror *m;
16013
16014 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
16015 @@ -34878,7 +35841,7 @@ index fd61f98..8050783 100644
16016 return m;
16017
16018 return NULL;
16019 -@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
16020 +@@ -215,7 +215,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
16021 * simple way to tell if a device has encountered
16022 * errors.
16023 */
16024 @@ -34887,7 +35850,7 @@ index fd61f98..8050783 100644
16025
16026 if (test_and_set_bit(error_type, &m->error_type))
16027 return;
16028 -@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
16029 +@@ -406,7 +406,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
16030 struct mirror *m = get_default_mirror(ms);
16031
16032 do {
16033 @@ -34896,7 +35859,7 @@ index fd61f98..8050783 100644
16034 return m;
16035
16036 if (m-- == ms->mirror)
16037 -@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
16038 +@@ -420,7 +420,7 @@ static int default_ok(struct mirror *m)
16039 {
16040 struct mirror *default_mirror = get_default_mirror(m->ms);
16041
16042 @@ -34905,7 +35868,7 @@ index fd61f98..8050783 100644
16043 }
16044
16045 static int mirror_available(struct mirror_set *ms, struct bio *bio)
16046 -@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
16047 +@@ -557,7 +557,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
16048 */
16049 if (likely(region_in_sync(ms, region, 1)))
16050 m = choose_mirror(ms, bio->bi_sector);
16051 @@ -34914,7 +35877,7 @@ index fd61f98..8050783 100644
16052 m = NULL;
16053
16054 if (likely(m))
16055 -@@ -938,7 +938,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
16056 +@@ -924,7 +924,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
16057 }
16058
16059 ms->mirror[mirror].ms = ms;
16060 @@ -34923,7 +35886,7 @@ index fd61f98..8050783 100644
16061 ms->mirror[mirror].error_type = 0;
16062 ms->mirror[mirror].offset = offset;
16063
16064 -@@ -1356,7 +1356,7 @@ static void mirror_resume(struct dm_target *ti)
16065 +@@ -1337,7 +1337,7 @@ static void mirror_resume(struct dm_target *ti)
16066 */
16067 static char device_status_char(struct mirror *m)
16068 {
16069 @@ -34933,7 +35896,7 @@ index fd61f98..8050783 100644
16070
16071 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
16072 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
16073 -index e2f87653..f279abe 100644
16074 +index c89cde8..9d184cf 100644
16075 --- a/drivers/md/dm-stripe.c
16076 +++ b/drivers/md/dm-stripe.c
16077 @@ -20,7 +20,7 @@ struct stripe {
16078 @@ -34945,7 +35908,7 @@ index e2f87653..f279abe 100644
16079 };
16080
16081 struct stripe_c {
16082 -@@ -183,7 +183,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
16083 +@@ -184,7 +184,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
16084 kfree(sc);
16085 return r;
16086 }
16087 @@ -34954,7 +35917,7 @@ index e2f87653..f279abe 100644
16088 }
16089
16090 ti->private = sc;
16091 -@@ -324,7 +324,7 @@ static int stripe_status(struct dm_target *ti, status_type_t type,
16092 +@@ -325,7 +325,7 @@ static int stripe_status(struct dm_target *ti, status_type_t type,
16093 DMEMIT("%d ", sc->stripes);
16094 for (i = 0; i < sc->stripes; i++) {
16095 DMEMIT("%s ", sc->stripe[i].dev->name);
16096 @@ -34963,7 +35926,7 @@ index e2f87653..f279abe 100644
16097 'D' : 'A';
16098 }
16099 buffer[i] = '\0';
16100 -@@ -371,8 +371,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
16101 +@@ -371,8 +371,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
16102 */
16103 for (i = 0; i < sc->stripes; i++)
16104 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
16105 @@ -34975,7 +35938,7 @@ index e2f87653..f279abe 100644
16106 schedule_work(&sc->trigger_event);
16107 }
16108 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
16109 -index fa29557..d24a5b7 100644
16110 +index daf25d0..d74f49f 100644
16111 --- a/drivers/md/dm-table.c
16112 +++ b/drivers/md/dm-table.c
16113 @@ -390,7 +390,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
16114 @@ -34988,7 +35951,7 @@ index fa29557..d24a5b7 100644
16115 "start=%llu, len=%llu, dev_size=%llu",
16116 dm_device_name(ti->table->md), bdevname(bdev, b),
16117 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
16118 -index 693e149..b7e0fde 100644
16119 +index 4d6e853..a234157 100644
16120 --- a/drivers/md/dm-thin-metadata.c
16121 +++ b/drivers/md/dm-thin-metadata.c
16122 @@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
16123 @@ -35010,10 +35973,10 @@ index 693e149..b7e0fde 100644
16124 pmd->bl_info.value_type.inc = data_block_inc;
16125 pmd->bl_info.value_type.dec = data_block_dec;
16126 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
16127 -index 77e6eff..913d695 100644
16128 +index 314a0e2..1376406 100644
16129 --- a/drivers/md/dm.c
16130 +++ b/drivers/md/dm.c
16131 -@@ -182,9 +182,9 @@ struct mapped_device {
16132 +@@ -170,9 +170,9 @@ struct mapped_device {
16133 /*
16134 * Event handling.
16135 */
16136 @@ -35025,7 +35988,7 @@ index 77e6eff..913d695 100644
16137 struct list_head uevent_list;
16138 spinlock_t uevent_lock; /* Protect access to uevent_list */
16139
16140 -@@ -1847,8 +1847,8 @@ static struct mapped_device *alloc_dev(int minor)
16141 +@@ -1872,8 +1872,8 @@ static struct mapped_device *alloc_dev(int minor)
16142 rwlock_init(&md->map_lock);
16143 atomic_set(&md->holders, 1);
16144 atomic_set(&md->open_count, 0);
16145 @@ -35036,7 +35999,7 @@ index 77e6eff..913d695 100644
16146 INIT_LIST_HEAD(&md->uevent_list);
16147 spin_lock_init(&md->uevent_lock);
16148
16149 -@@ -1982,7 +1982,7 @@ static void event_callback(void *context)
16150 +@@ -2014,7 +2014,7 @@ static void event_callback(void *context)
16151
16152 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
16153
16154 @@ -35045,7 +36008,7 @@ index 77e6eff..913d695 100644
16155 wake_up(&md->eventq);
16156 }
16157
16158 -@@ -2637,18 +2637,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
16159 +@@ -2669,18 +2669,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
16160
16161 uint32_t dm_next_uevent_seq(struct mapped_device *md)
16162 {
16163 @@ -35068,7 +36031,7 @@ index 77e6eff..913d695 100644
16164
16165 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
16166 diff --git a/drivers/md/md.c b/drivers/md/md.c
16167 -index 6120071..31d9be2 100644
16168 +index 3db3d1b..9487468 100644
16169 --- a/drivers/md/md.c
16170 +++ b/drivers/md/md.c
16171 @@ -240,10 +240,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
16172 @@ -35093,7 +36056,7 @@ index 6120071..31d9be2 100644
16173 wake_up(&md_event_waiters);
16174 }
16175
16176 -@@ -1504,7 +1504,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
16177 +@@ -1503,7 +1503,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
16178 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
16179 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
16180 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
16181 @@ -35102,7 +36065,7 @@ index 6120071..31d9be2 100644
16182
16183 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
16184 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
16185 -@@ -1748,7 +1748,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
16186 +@@ -1747,7 +1747,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
16187 else
16188 sb->resync_offset = cpu_to_le64(0);
16189
16190 @@ -35111,7 +36074,7 @@ index 6120071..31d9be2 100644
16191
16192 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
16193 sb->size = cpu_to_le64(mddev->dev_sectors);
16194 -@@ -2748,7 +2748,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
16195 +@@ -2747,7 +2747,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
16196 static ssize_t
16197 errors_show(struct md_rdev *rdev, char *page)
16198 {
16199 @@ -35120,7 +36083,7 @@ index 6120071..31d9be2 100644
16200 }
16201
16202 static ssize_t
16203 -@@ -2757,7 +2757,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
16204 +@@ -2756,7 +2756,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
16205 char *e;
16206 unsigned long n = simple_strtoul(buf, &e, 10);
16207 if (*buf && (*e == 0 || *e == '\n')) {
16208 @@ -35129,7 +36092,7 @@ index 6120071..31d9be2 100644
16209 return len;
16210 }
16211 return -EINVAL;
16212 -@@ -3204,8 +3204,8 @@ int md_rdev_init(struct md_rdev *rdev)
16213 +@@ -3203,8 +3203,8 @@ int md_rdev_init(struct md_rdev *rdev)
16214 rdev->sb_loaded = 0;
16215 rdev->bb_page = NULL;
16216 atomic_set(&rdev->nr_pending, 0);
16217 @@ -35140,7 +36103,7 @@ index 6120071..31d9be2 100644
16218
16219 INIT_LIST_HEAD(&rdev->same_set);
16220 init_waitqueue_head(&rdev->blocked_wait);
16221 -@@ -6984,7 +6984,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
16222 +@@ -6980,7 +6980,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
16223
16224 spin_unlock(&pers_lock);
16225 seq_printf(seq, "\n");
16226 @@ -35149,7 +36112,7 @@ index 6120071..31d9be2 100644
16227 return 0;
16228 }
16229 if (v == (void*)2) {
16230 -@@ -7087,7 +7087,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
16231 +@@ -7083,7 +7083,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
16232 return error;
16233
16234 seq = file->private_data;
16235 @@ -35158,7 +36121,7 @@ index 6120071..31d9be2 100644
16236 return error;
16237 }
16238
16239 -@@ -7101,7 +7101,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
16240 +@@ -7097,7 +7097,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
16241 /* always allow read */
16242 mask = POLLIN | POLLRDNORM;
16243
16244 @@ -35167,7 +36130,7 @@ index 6120071..31d9be2 100644
16245 mask |= POLLERR | POLLPRI;
16246 return mask;
16247 }
16248 -@@ -7145,7 +7145,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
16249 +@@ -7141,7 +7141,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
16250 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
16251 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
16252 (int)part_stat_read(&disk->part0, sectors[1]) -
16253 @@ -35177,7 +36140,7 @@ index 6120071..31d9be2 100644
16254 * as sync_io is counted when a request starts, and
16255 * disk_stats is counted when it completes.
16256 diff --git a/drivers/md/md.h b/drivers/md/md.h
16257 -index af443ab..0f93be3 100644
16258 +index eca59c3..7c42285 100644
16259 --- a/drivers/md/md.h
16260 +++ b/drivers/md/md.h
16261 @@ -94,13 +94,13 @@ struct md_rdev {
16262 @@ -35196,7 +36159,7 @@ index af443ab..0f93be3 100644
16263 * for reporting to userspace and storing
16264 * in superblock.
16265 */
16266 -@@ -432,7 +432,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
16267 +@@ -434,7 +434,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
16268
16269 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
16270 {
16271 @@ -35218,10 +36181,10 @@ index 1cbfc6b..56e1dbb 100644
16272 /*----------------------------------------------------------------*/
16273
16274 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
16275 -index a0f7309..5599dbc 100644
16276 +index d5bddfc..b079b4b 100644
16277 --- a/drivers/md/raid1.c
16278 +++ b/drivers/md/raid1.c
16279 -@@ -1819,7 +1819,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
16280 +@@ -1818,7 +1818,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
16281 if (r1_sync_page_io(rdev, sect, s,
16282 bio->bi_io_vec[idx].bv_page,
16283 READ) != 0)
16284 @@ -35230,7 +36193,7 @@ index a0f7309..5599dbc 100644
16285 }
16286 sectors -= s;
16287 sect += s;
16288 -@@ -2041,7 +2041,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
16289 +@@ -2040,7 +2040,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
16290 test_bit(In_sync, &rdev->flags)) {
16291 if (r1_sync_page_io(rdev, sect, s,
16292 conf->tmppage, READ)) {
16293 @@ -35240,10 +36203,10 @@ index a0f7309..5599dbc 100644
16294 "md/raid1:%s: read error corrected "
16295 "(%d sectors at %llu on %s)\n",
16296 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
16297 -index c9acbd7..386cd3e 100644
16298 +index 64d4824..8b9ea57 100644
16299 --- a/drivers/md/raid10.c
16300 +++ b/drivers/md/raid10.c
16301 -@@ -1878,7 +1878,7 @@ static void end_sync_read(struct bio *bio, int error)
16302 +@@ -1877,7 +1877,7 @@ static void end_sync_read(struct bio *bio, int error)
16303 /* The write handler will notice the lack of
16304 * R10BIO_Uptodate and record any errors etc
16305 */
16306 @@ -35252,7 +36215,7 @@ index c9acbd7..386cd3e 100644
16307 &conf->mirrors[d].rdev->corrected_errors);
16308
16309 /* for reconstruct, we always reschedule after a read.
16310 -@@ -2227,7 +2227,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
16311 +@@ -2226,7 +2226,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
16312 {
16313 struct timespec cur_time_mon;
16314 unsigned long hours_since_last;
16315 @@ -35261,7 +36224,7 @@ index c9acbd7..386cd3e 100644
16316
16317 ktime_get_ts(&cur_time_mon);
16318
16319 -@@ -2249,9 +2249,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
16320 +@@ -2248,9 +2248,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
16321 * overflowing the shift of read_errors by hours_since_last.
16322 */
16323 if (hours_since_last >= 8 * sizeof(read_errors))
16324 @@ -35273,7 +36236,7 @@ index c9acbd7..386cd3e 100644
16325 }
16326
16327 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
16328 -@@ -2305,8 +2305,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
16329 +@@ -2304,8 +2304,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
16330 return;
16331
16332 check_decay_read_errors(mddev, rdev);
16333 @@ -35284,7 +36247,7 @@ index c9acbd7..386cd3e 100644
16334 char b[BDEVNAME_SIZE];
16335 bdevname(rdev->bdev, b);
16336
16337 -@@ -2314,7 +2314,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
16338 +@@ -2313,7 +2313,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
16339 "md/raid10:%s: %s: Raid device exceeded "
16340 "read_error threshold [cur %d:max %d]\n",
16341 mdname(mddev), b,
16342 @@ -35293,7 +36256,7 @@ index c9acbd7..386cd3e 100644
16343 printk(KERN_NOTICE
16344 "md/raid10:%s: %s: Failing raid device\n",
16345 mdname(mddev), b);
16346 -@@ -2469,7 +2469,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
16347 +@@ -2468,7 +2468,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
16348 sect +
16349 choose_data_offset(r10_bio, rdev)),
16350 bdevname(rdev->bdev, b));
16351 @@ -35303,10 +36266,10 @@ index c9acbd7..386cd3e 100644
16352
16353 rdev_dec_pending(rdev, mddev);
16354 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
16355 -index a450268..c4168a9 100644
16356 +index 19d77a0..56051b92 100644
16357 --- a/drivers/md/raid5.c
16358 +++ b/drivers/md/raid5.c
16359 -@@ -1789,21 +1789,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
16360 +@@ -1797,21 +1797,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
16361 mdname(conf->mddev), STRIPE_SECTORS,
16362 (unsigned long long)s,
16363 bdevname(rdev->bdev, b));
16364 @@ -35332,7 +36295,7 @@ index a450268..c4168a9 100644
16365 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
16366 printk_ratelimited(
16367 KERN_WARNING
16368 -@@ -1831,7 +1831,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
16369 +@@ -1839,7 +1839,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
16370 mdname(conf->mddev),
16371 (unsigned long long)s,
16372 bdn);
16373 @@ -35367,50 +36330,11 @@ index 404f63a..4796533 100644
16374
16375 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
16376 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
16377 -diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
16378 -index 3aa6856..435ad25 100644
16379 ---- a/drivers/media/pci/cx88/cx88-alsa.c
16380 -+++ b/drivers/media/pci/cx88/cx88-alsa.c
16381 -@@ -749,7 +749,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
16382 - * Only boards with eeprom and byte 1 at eeprom=1 have it
16383 - */
16384 -
16385 --static const struct pci_device_id cx88_audio_pci_tbl[] __devinitdata = {
16386 -+static const struct pci_device_id cx88_audio_pci_tbl[] __devinitconst = {
16387 - {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
16388 - {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
16389 - {0, }
16390 -diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
16391 -index feff57e..66a2c67 100644
16392 ---- a/drivers/media/pci/ddbridge/ddbridge-core.c
16393 -+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
16394 -@@ -1679,7 +1679,7 @@ static struct ddb_info ddb_v6 = {
16395 - .subvendor = _subvend, .subdevice = _subdev, \
16396 - .driver_data = (unsigned long)&_driverdata }
16397 -
16398 --static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
16399 -+static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
16400 - DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
16401 - DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
16402 - DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
16403 -diff --git a/drivers/media/pci/ngene/ngene-cards.c b/drivers/media/pci/ngene/ngene-cards.c
16404 -index 96a13ed..6df45b4 100644
16405 ---- a/drivers/media/pci/ngene/ngene-cards.c
16406 -+++ b/drivers/media/pci/ngene/ngene-cards.c
16407 -@@ -741,7 +741,7 @@ static struct ngene_info ngene_info_terratec = {
16408 -
16409 - /****************************************************************************/
16410 -
16411 --static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
16412 -+static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
16413 - NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
16414 - NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
16415 - NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
16416 diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
16417 -index a3b1a34..71ce0e3 100644
16418 +index 35cc526..9d90d83 100644
16419 --- a/drivers/media/platform/omap/omap_vout.c
16420 +++ b/drivers/media/platform/omap/omap_vout.c
16421 -@@ -65,7 +65,6 @@ enum omap_vout_channels {
16422 +@@ -63,7 +63,6 @@ enum omap_vout_channels {
16423 OMAP_VIDEO2,
16424 };
16425
16426 @@ -35418,7 +36342,7 @@ index a3b1a34..71ce0e3 100644
16427 /* Variables configurable through module params*/
16428 static u32 video1_numbuffers = 3;
16429 static u32 video2_numbuffers = 3;
16430 -@@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
16431 +@@ -1010,6 +1009,12 @@ static int omap_vout_open(struct file *file)
16432 {
16433 struct videobuf_queue *q;
16434 struct omap_vout_device *vout = NULL;
16435 @@ -35431,7 +36355,7 @@ index a3b1a34..71ce0e3 100644
16436
16437 vout = video_drvdata(file);
16438 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
16439 -@@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
16440 +@@ -1027,10 +1032,6 @@ static int omap_vout_open(struct file *file)
16441 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
16442
16443 q = &vout->vbq;
16444 @@ -35443,7 +36367,7 @@ index a3b1a34..71ce0e3 100644
16445
16446 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
16447 diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
16448 -index ddb422e..8cf008e 100644
16449 +index b671e20..34088b7 100644
16450 --- a/drivers/media/platform/s5p-tv/mixer.h
16451 +++ b/drivers/media/platform/s5p-tv/mixer.h
16452 @@ -155,7 +155,7 @@ struct mxr_layer {
16453 @@ -35482,10 +36406,10 @@ index 3b1670a..595c939 100644
16454 if (done && done != layer->shadow_buf)
16455 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
16456 diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
16457 -index 0c1cd89..6574647 100644
16458 +index 1f3b743..e839271 100644
16459 --- a/drivers/media/platform/s5p-tv/mixer_video.c
16460 +++ b/drivers/media/platform/s5p-tv/mixer_video.c
16461 -@@ -209,7 +209,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
16462 +@@ -208,7 +208,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
16463 layer->geo.src.height = layer->geo.src.full_height;
16464
16465 mxr_geometry_dump(mdev, &layer->geo);
16466 @@ -35494,7 +36418,7 @@ index 0c1cd89..6574647 100644
16467 mxr_geometry_dump(mdev, &layer->geo);
16468 }
16469
16470 -@@ -227,7 +227,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
16471 +@@ -226,7 +226,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
16472 layer->geo.dst.full_width = mbus_fmt.width;
16473 layer->geo.dst.full_height = mbus_fmt.height;
16474 layer->geo.dst.field = mbus_fmt.field;
16475 @@ -35503,7 +36427,7 @@ index 0c1cd89..6574647 100644
16476
16477 mxr_geometry_dump(mdev, &layer->geo);
16478 }
16479 -@@ -333,7 +333,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
16480 +@@ -332,7 +332,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
16481 /* set source size to highest accepted value */
16482 geo->src.full_width = max(geo->dst.full_width, pix->width);
16483 geo->src.full_height = max(geo->dst.full_height, pix->height);
16484 @@ -35512,7 +36436,7 @@ index 0c1cd89..6574647 100644
16485 mxr_geometry_dump(mdev, &layer->geo);
16486 /* set cropping to total visible screen */
16487 geo->src.width = pix->width;
16488 -@@ -341,12 +341,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
16489 +@@ -340,12 +340,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
16490 geo->src.x_offset = 0;
16491 geo->src.y_offset = 0;
16492 /* assure consistency of geometry */
16493 @@ -35527,7 +36451,7 @@ index 0c1cd89..6574647 100644
16494 mxr_geometry_dump(mdev, &layer->geo);
16495
16496 /* returning results */
16497 -@@ -473,7 +473,7 @@ static int mxr_s_selection(struct file *file, void *fh,
16498 +@@ -472,7 +472,7 @@ static int mxr_s_selection(struct file *file, void *fh,
16499 target->width = s->r.width;
16500 target->height = s->r.height;
16501
16502 @@ -35536,7 +36460,7 @@ index 0c1cd89..6574647 100644
16503
16504 /* retrieve update selection rectangle */
16505 res.left = target->x_offset;
16506 -@@ -928,13 +928,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
16507 +@@ -937,13 +937,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
16508 mxr_output_get(mdev);
16509
16510 mxr_layer_update_output(layer);
16511 @@ -35552,7 +36476,7 @@ index 0c1cd89..6574647 100644
16512 mxr_streamer_get(mdev);
16513
16514 return 0;
16515 -@@ -1004,7 +1004,7 @@ static int stop_streaming(struct vb2_queue *vq)
16516 +@@ -1013,7 +1013,7 @@ static int stop_streaming(struct vb2_queue *vq)
16517 spin_unlock_irqrestore(&layer->enq_slock, flags);
16518
16519 /* disabling layer in hardware */
16520 @@ -35561,7 +36485,7 @@ index 0c1cd89..6574647 100644
16521 /* remove one streamer */
16522 mxr_streamer_put(mdev);
16523 /* allow changes in output configuration */
16524 -@@ -1043,8 +1043,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
16525 +@@ -1052,8 +1052,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
16526
16527 void mxr_layer_release(struct mxr_layer *layer)
16528 {
16529 @@ -35572,7 +36496,7 @@ index 0c1cd89..6574647 100644
16530 }
16531
16532 void mxr_base_layer_release(struct mxr_layer *layer)
16533 -@@ -1070,7 +1070,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
16534 +@@ -1079,7 +1079,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
16535
16536 layer->mdev = mdev;
16537 layer->idx = idx;
16538 @@ -35594,30 +36518,8 @@ index 3d13a63..da31bf1 100644
16539 .release = mxr_vp_layer_release,
16540 .buffer_set = mxr_vp_buffer_set,
16541 .stream_set = mxr_vp_stream_set,
16542 -diff --git a/drivers/media/platform/timblogiw.c b/drivers/media/platform/timblogiw.c
16543 -index 02194c0..36d69c1 100644
16544 ---- a/drivers/media/platform/timblogiw.c
16545 -+++ b/drivers/media/platform/timblogiw.c
16546 -@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
16547 -
16548 - /* Platform device functions */
16549 -
16550 --static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
16551 -+static struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
16552 - .vidioc_querycap = timblogiw_querycap,
16553 - .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
16554 - .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
16555 -@@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
16556 - .vidioc_enum_framesizes = timblogiw_enum_framesizes,
16557 - };
16558 -
16559 --static __devinitconst struct v4l2_file_operations timblogiw_fops = {
16560 -+static struct v4l2_file_operations timblogiw_fops = {
16561 - .owner = THIS_MODULE,
16562 - .open = timblogiw_open,
16563 - .release = timblogiw_close,
16564 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
16565 -index 697a421..16c5a5f 100644
16566 +index 643d80a..56bb96b 100644
16567 --- a/drivers/media/radio/radio-cadet.c
16568 +++ b/drivers/media/radio/radio-cadet.c
16569 @@ -302,6 +302,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
16570 @@ -35683,7 +36585,7 @@ index fb69baa..cf7ad22 100644
16571 * Rounding UP to nearest 4-kB boundary here...
16572 */
16573 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
16574 -index 551262e..7551198 100644
16575 +index fa43c39..daeb158 100644
16576 --- a/drivers/message/fusion/mptsas.c
16577 +++ b/drivers/message/fusion/mptsas.c
16578 @@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
16579 @@ -35735,10 +36637,10 @@ index 551262e..7551198 100644
16580 mptsas_get_port(struct mptsas_phyinfo *phy_info)
16581 {
16582 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
16583 -index 0c3ced7..1fe34ec 100644
16584 +index 164afa7..b6b2e74 100644
16585 --- a/drivers/message/fusion/mptscsih.c
16586 +++ b/drivers/message/fusion/mptscsih.c
16587 -@@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
16588 +@@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
16589
16590 h = shost_priv(SChost);
16591
16592 @@ -35923,7 +36825,7 @@ index a8c08f3..155fe3d 100644
16593 #endif
16594
16595 diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
16596 -index 965c480..71f2db9 100644
16597 +index 45ece11..8efa218 100644
16598 --- a/drivers/mfd/janz-cmodio.c
16599 +++ b/drivers/mfd/janz-cmodio.c
16600 @@ -13,6 +13,7 @@
16601 @@ -36289,10 +37191,10 @@ index d971817..33bdca5 100644
16602
16603 break;
16604 diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
16605 -index a0e1720..ee63d0b 100644
16606 +index 6d8f701..35b6369 100644
16607 --- a/drivers/mmc/core/mmc_ops.c
16608 +++ b/drivers/mmc/core/mmc_ops.c
16609 -@@ -245,7 +245,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
16610 +@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
16611 void *data_buf;
16612 int is_on_stack;
16613
16614 @@ -36313,10 +37215,10 @@ index 53b8fd9..615b462 100644
16615 +} __do_const;
16616 #endif /* _DW_MMC_H_ */
16617 diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
16618 -index c9ec725..178e79a 100644
16619 +index 82a8de1..3c56ccb 100644
16620 --- a/drivers/mmc/host/sdhci-s3c.c
16621 +++ b/drivers/mmc/host/sdhci-s3c.c
16622 -@@ -719,9 +719,11 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
16623 +@@ -721,9 +721,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
16624 * we can use overriding functions instead of default.
16625 */
16626 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
16627 @@ -36345,11 +37247,11 @@ index a4eb8b5..8c0628f 100644
16628 "ECC needs a full sector write (adr: %lx size %lx)\n",
16629 (long) to, (long) len);
16630 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
16631 -index e706a23..b3d262f 100644
16632 +index 0c8bb6b..6f35deb 100644
16633 --- a/drivers/mtd/nand/denali.c
16634 +++ b/drivers/mtd/nand/denali.c
16635 -@@ -26,6 +26,7 @@
16636 - #include <linux/pci.h>
16637 +@@ -24,6 +24,7 @@
16638 + #include <linux/slab.h>
16639 #include <linux/mtd/mtd.h>
16640 #include <linux/module.h>
16641 +#include <linux/slab.h>
16642 @@ -36369,7 +37271,7 @@ index 51b9d6a..52af9a7 100644
16643 #include <linux/mtd/nand.h>
16644 #include <linux/mtd/nftl.h>
16645 diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
16646 -index 203ff9d..0968ca8 100644
16647 +index 70dba5d..11a0919 100644
16648 --- a/drivers/net/ethernet/8390/ax88796.c
16649 +++ b/drivers/net/ethernet/8390/ax88796.c
16650 @@ -872,9 +872,11 @@ static int ax_probe(struct platform_device *pdev)
16651 @@ -36386,10 +37288,10 @@ index 203ff9d..0968ca8 100644
16652
16653 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
16654 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
16655 -index 9c5ea6c..eaad276 100644
16656 +index 0991534..8098e92 100644
16657 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
16658 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
16659 -@@ -1046,7 +1046,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
16660 +@@ -1094,7 +1094,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
16661 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
16662 {
16663 /* RX_MODE controlling object */
16664 @@ -36399,7 +37301,7 @@ index 9c5ea6c..eaad276 100644
16665 /* multicast configuration controlling object */
16666 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
16667 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
16668 -index 614981c..11216c7 100644
16669 +index 09b625e..15b16fe 100644
16670 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
16671 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
16672 @@ -2375,15 +2375,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
16673 @@ -36424,10 +37326,10 @@ index 614981c..11216c7 100644
16674 }
16675
16676 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
16677 -index acf2fe4..efb96df 100644
16678 +index adbd91b..58ec94a 100644
16679 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
16680 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
16681 -@@ -1281,8 +1281,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
16682 +@@ -1293,8 +1293,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
16683
16684 /********************* RX MODE ****************/
16685
16686 @@ -36438,10 +37340,10 @@ index acf2fe4..efb96df 100644
16687 /**
16688 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
16689 diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
16690 -index d9308c32..d87b824 100644
16691 +index d330e81..ce1fb9a 100644
16692 --- a/drivers/net/ethernet/broadcom/tg3.h
16693 +++ b/drivers/net/ethernet/broadcom/tg3.h
16694 -@@ -140,6 +140,7 @@
16695 +@@ -146,6 +146,7 @@
16696 #define CHIPREV_ID_5750_A0 0x4000
16697 #define CHIPREV_ID_5750_A1 0x4001
16698 #define CHIPREV_ID_5750_A3 0x4003
16699 @@ -36463,7 +37365,7 @@ index 8cffcdf..aadf043 100644
16700 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
16701
16702 diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
16703 -index f879e92..726f20f 100644
16704 +index 4c83003..2a2a5b9 100644
16705 --- a/drivers/net/ethernet/dec/tulip/de4x5.c
16706 +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
16707 @@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
16708 @@ -36485,10 +37387,10 @@ index f879e92..726f20f 100644
16709 break;
16710 }
16711 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
16712 -index d1b6cc5..cde0d97 100644
16713 +index 4d6f3c5..6169e60 100644
16714 --- a/drivers/net/ethernet/emulex/benet/be_main.c
16715 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
16716 -@@ -403,7 +403,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
16717 +@@ -455,7 +455,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
16718
16719 if (wrapped)
16720 newacc += 65536;
16721 @@ -36524,20 +37426,20 @@ index b901a01..1ff32ee 100644
16722 #include "ftmac100.h"
16723
16724 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
16725 -index d929131..aed108f 100644
16726 +index bb9256a..56d8752 100644
16727 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
16728 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
16729 -@@ -865,7 +865,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
16730 - /* store the new cycle speed */
16731 - adapter->cycle_speed = cycle_speed;
16732 +@@ -806,7 +806,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
16733 + }
16734
16735 + /* update the base incval used to calculate frequency adjustment */
16736 - ACCESS_ONCE(adapter->base_incval) = incval;
16737 + ACCESS_ONCE_RW(adapter->base_incval) = incval;
16738 smp_mb();
16739
16740 - /* grab the ptp lock */
16741 + /* need lock to prevent incorrect read while modifying cyclecounter */
16742 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
16743 -index c2e420a..26a75e0 100644
16744 +index fbe5363..266b4e3 100644
16745 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
16746 +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
16747 @@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
16748 @@ -36562,10 +37464,10 @@ index c2e420a..26a75e0 100644
16749 __vxge_hw_mempool_create(vpath->hldev,
16750 fifo->config->memblock_size,
16751 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
16752 -index 6afe74e..2e2950f 100644
16753 +index 998974f..ecd26db 100644
16754 --- a/drivers/net/ethernet/realtek/r8169.c
16755 +++ b/drivers/net/ethernet/realtek/r8169.c
16756 -@@ -747,22 +747,22 @@ struct rtl8169_private {
16757 +@@ -741,22 +741,22 @@ struct rtl8169_private {
16758 struct mdio_ops {
16759 void (*write)(struct rtl8169_private *, int, int);
16760 int (*read)(struct rtl8169_private *, int);
16761 @@ -36621,7 +37523,7 @@ index 0c74a70..3bc6f68 100644
16762
16763 /* To mask all all interrupts.*/
16764 diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
16765 -index 5fd6f46..ee1f265 100644
16766 +index e6fe0d8..2b7d752 100644
16767 --- a/drivers/net/hyperv/hyperv_net.h
16768 +++ b/drivers/net/hyperv/hyperv_net.h
16769 @@ -101,7 +101,7 @@ struct rndis_device {
16770 @@ -36634,7 +37536,7 @@ index 5fd6f46..ee1f265 100644
16771 spinlock_t request_lock;
16772 struct list_head req_list;
16773 diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
16774 -index 928148c..d83298e 100644
16775 +index 2b657d4..9903bc0 100644
16776 --- a/drivers/net/hyperv/rndis_filter.c
16777 +++ b/drivers/net/hyperv/rndis_filter.c
16778 @@ -107,7 +107,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
16779 @@ -36646,7 +37548,7 @@ index 928148c..d83298e 100644
16780
16781 /* Add to the request list */
16782 spin_lock_irqsave(&dev->request_lock, flags);
16783 -@@ -760,7 +760,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
16784 +@@ -758,7 +758,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
16785
16786 /* Setup the rndis set */
16787 halt = &request->request_msg.msg.halt_req;
16788 @@ -36656,10 +37558,10 @@ index 928148c..d83298e 100644
16789 /* Ignore return since this msg is optional. */
16790 rndis_filter_send_request(dev, request);
16791 diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
16792 -index 7d39add..037e1da 100644
16793 +index 1e9cb0b..7839125 100644
16794 --- a/drivers/net/ieee802154/fakehard.c
16795 +++ b/drivers/net/ieee802154/fakehard.c
16796 -@@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
16797 +@@ -386,7 +386,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
16798 phy->transmit_power = 0xbf;
16799
16800 dev->netdev_ops = &fake_ops;
16801 @@ -36668,6 +37570,19 @@ index 7d39add..037e1da 100644
16802
16803 priv = netdev_priv(dev);
16804 priv->phy = phy;
16805 +diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
16806 +index d3fb97d..e229d3e 100644
16807 +--- a/drivers/net/macvlan.c
16808 ++++ b/drivers/net/macvlan.c
16809 +@@ -913,7 +913,7 @@ static int macvlan_device_event(struct notifier_block *unused,
16810 + return NOTIFY_DONE;
16811 + }
16812 +
16813 +-static struct notifier_block macvlan_notifier_block __read_mostly = {
16814 ++static struct notifier_block macvlan_notifier_block = {
16815 + .notifier_call = macvlan_device_event,
16816 + };
16817 +
16818 diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
16819 index 0f0f9ce..0ca5819 100644
16820 --- a/drivers/net/macvtap.c
16821 @@ -36694,7 +37609,7 @@ index daec9b0..6428fcb 100644
16822 }
16823 EXPORT_SYMBOL(free_mdio_bitbang);
16824 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
16825 -index eb3f5ce..d773730 100644
16826 +index 0b2706a..ba1430d 100644
16827 --- a/drivers/net/ppp/ppp_generic.c
16828 +++ b/drivers/net/ppp/ppp_generic.c
16829 @@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
16830 @@ -36729,10 +37644,10 @@ index ad86660..9fd0884 100644
16831 };
16832
16833 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
16834 -index 0873cdc..ddb178e 100644
16835 +index 2917a86..edd463f 100644
16836 --- a/drivers/net/tun.c
16837 +++ b/drivers/net/tun.c
16838 -@@ -1374,7 +1374,7 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
16839 +@@ -1836,7 +1836,7 @@ unlock:
16840 }
16841
16842 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
16843 @@ -36741,18 +37656,18 @@ index 0873cdc..ddb178e 100644
16844 {
16845 struct tun_file *tfile = file->private_data;
16846 struct tun_struct *tun;
16847 -@@ -1387,6 +1387,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
16848 +@@ -1848,6 +1848,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
16849 int vnet_hdr_sz;
16850 int ret;
16851
16852 + if (ifreq_len > sizeof ifr)
16853 + return -EFAULT;
16854 +
16855 - if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) {
16856 + if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
16857 if (copy_from_user(&ifr, argp, ifreq_len))
16858 return -EFAULT;
16859 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
16860 -index 605a4ba..a883dd1 100644
16861 +index cd8ccb2..cff5144 100644
16862 --- a/drivers/net/usb/hso.c
16863 +++ b/drivers/net/usb/hso.c
16864 @@ -71,7 +71,7 @@
16865 @@ -36833,7 +37748,7 @@ index 605a4ba..a883dd1 100644
16866 /* Setup and send a ctrl req read on
16867 * port i */
16868 if (!serial->rx_urb_filled[0]) {
16869 -@@ -3078,7 +3077,7 @@ static int hso_resume(struct usb_interface *iface)
16870 +@@ -3079,7 +3078,7 @@ static int hso_resume(struct usb_interface *iface)
16871 /* Start all serial ports */
16872 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
16873 if (serial_table[i] && (serial_table[i]->interface == iface)) {
16874 @@ -36842,63 +37757,6 @@ index 605a4ba..a883dd1 100644
16875 result =
16876 hso_start_serial_device(serial_table[i], GFP_NOIO);
16877 hso_kick_transmit(dev2ser(serial_table[i]));
16878 -diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
16879 -index edb81ed..ab8931c 100644
16880 ---- a/drivers/net/usb/usbnet.c
16881 -+++ b/drivers/net/usb/usbnet.c
16882 -@@ -380,6 +380,12 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
16883 - unsigned long lockflags;
16884 - size_t size = dev->rx_urb_size;
16885 -
16886 -+ /* prevent rx skb allocation when error ratio is high */
16887 -+ if (test_bit(EVENT_RX_KILL, &dev->flags)) {
16888 -+ usb_free_urb(urb);
16889 -+ return -ENOLINK;
16890 -+ }
16891 -+
16892 - skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
16893 - if (!skb) {
16894 - netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
16895 -@@ -539,6 +545,17 @@ block:
16896 - break;
16897 - }
16898 -
16899 -+ /* stop rx if packet error rate is high */
16900 -+ if (++dev->pkt_cnt > 30) {
16901 -+ dev->pkt_cnt = 0;
16902 -+ dev->pkt_err = 0;
16903 -+ } else {
16904 -+ if (state == rx_cleanup)
16905 -+ dev->pkt_err++;
16906 -+ if (dev->pkt_err > 20)
16907 -+ set_bit(EVENT_RX_KILL, &dev->flags);
16908 -+ }
16909 -+
16910 - state = defer_bh(dev, skb, &dev->rxq, state);
16911 -
16912 - if (urb) {
16913 -@@ -790,6 +807,11 @@ int usbnet_open (struct net_device *net)
16914 - (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
16915 - "simple");
16916 -
16917 -+ /* reset rx error state */
16918 -+ dev->pkt_cnt = 0;
16919 -+ dev->pkt_err = 0;
16920 -+ clear_bit(EVENT_RX_KILL, &dev->flags);
16921 -+
16922 - // delay posting reads until we're fully open
16923 - tasklet_schedule (&dev->bh);
16924 - if (info->manage_power) {
16925 -@@ -1253,6 +1275,9 @@ static void usbnet_bh (unsigned long param)
16926 - }
16927 - }
16928 -
16929 -+ /* restart RX again after disabling due to high error rate */
16930 -+ clear_bit(EVENT_RX_KILL, &dev->flags);
16931 -+
16932 - // waiting for all pending urbs to complete?
16933 - if (dev->wait) {
16934 - if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
16935 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
16936 index 8d78253..bebbb68 100644
16937 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
16938 @@ -37105,10 +37963,10 @@ index 301bf72..3f5654f 100644
16939
16940 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
16941 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
16942 -index b68aaf5..fb20845 100644
16943 +index 9d26fc5..60d9f14 100644
16944 --- a/drivers/net/wireless/ath/ath9k/hw.h
16945 +++ b/drivers/net/wireless/ath/ath9k/hw.h
16946 -@@ -657,7 +657,7 @@ struct ath_hw_private_ops {
16947 +@@ -658,7 +658,7 @@ struct ath_hw_private_ops {
16948
16949 /* ANI */
16950 void (*ani_cache_ini_regs)(struct ath_hw *ah);
16951 @@ -37117,7 +37975,7 @@ index b68aaf5..fb20845 100644
16952
16953 /**
16954 * struct ath_hw_ops - callbacks used by hardware code and driver code
16955 -@@ -687,7 +687,7 @@ struct ath_hw_ops {
16956 +@@ -688,7 +688,7 @@ struct ath_hw_ops {
16957 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
16958 struct ath_hw_antcomb_conf *antconf);
16959 void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable);
16960 @@ -37126,30 +37984,8 @@ index b68aaf5..fb20845 100644
16961
16962 struct ath_nf_limits {
16963 s16 max;
16964 -diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
16965 -index 71ced17..cd82b12 100644
16966 ---- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
16967 -+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
16968 -@@ -184,7 +184,7 @@ struct brcmf_cfg80211_event_loop {
16969 - struct net_device *ndev,
16970 - const struct brcmf_event_msg *e,
16971 - void *data);
16972 --};
16973 -+} __no_const;
16974 -
16975 - /* basic structure of scan request */
16976 - struct brcmf_cfg80211_scan_req {
16977 -@@ -239,7 +239,7 @@ struct brcmf_cfg80211_profile {
16978 - struct brcmf_cfg80211_iscan_eloop {
16979 - s32 (*handler[WL_SCAN_ERSULTS_LAST])
16980 - (struct brcmf_cfg80211_info *cfg);
16981 --};
16982 -+} __no_const;
16983 -
16984 - /* dongle iscan controller */
16985 - struct brcmf_cfg80211_iscan_ctrl {
16986 diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
16987 -index e252acb..6ad1e65 100644
16988 +index 3726cd6..b655808 100644
16989 --- a/drivers/net/wireless/iwlegacy/3945-mac.c
16990 +++ b/drivers/net/wireless/iwlegacy/3945-mac.c
16991 @@ -3615,7 +3615,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
16992 @@ -37164,7 +38000,7 @@ index e252acb..6ad1e65 100644
16993
16994 D_INFO("*** LOAD DRIVER ***\n");
16995 diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
16996 -index 1a98fa3..51e6661 100644
16997 +index 5b9533e..7733880 100644
16998 --- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
16999 +++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
17000 @@ -203,7 +203,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
17001 @@ -37285,10 +38121,10 @@ index 1a98fa3..51e6661 100644
17002 memset(buf, 0, sizeof(buf));
17003 buf_size = min(count, sizeof(buf) - 1);
17004 diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
17005 -index fe0fffd..b4c5724 100644
17006 +index 35708b9..31f7754 100644
17007 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c
17008 +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
17009 -@@ -1967,7 +1967,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
17010 +@@ -1100,7 +1100,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
17011 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
17012
17013 char buf[8];
17014 @@ -37297,7 +38133,7 @@ index fe0fffd..b4c5724 100644
17015 u32 reset_flag;
17016
17017 memset(buf, 0, sizeof(buf));
17018 -@@ -1988,7 +1988,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
17019 +@@ -1121,7 +1121,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
17020 {
17021 struct iwl_trans *trans = file->private_data;
17022 char buf[8];
17023 @@ -37307,29 +38143,53 @@ index fe0fffd..b4c5724 100644
17024
17025 memset(buf, 0, sizeof(buf));
17026 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
17027 -index 429ca32..f86236b 100644
17028 +index ff90855..e46d223 100644
17029 --- a/drivers/net/wireless/mac80211_hwsim.c
17030 +++ b/drivers/net/wireless/mac80211_hwsim.c
17031 -@@ -1751,9 +1751,11 @@ static int __init init_mac80211_hwsim(void)
17032 - return -EINVAL;
17033 +@@ -2062,25 +2062,19 @@ static int __init init_mac80211_hwsim(void)
17034
17035 - if (fake_hw_scan) {
17036 + if (channels > 1) {
17037 + hwsim_if_comb.num_different_channels = channels;
17038 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
17039 +- mac80211_hwsim_ops.cancel_hw_scan =
17040 +- mac80211_hwsim_cancel_hw_scan;
17041 - mac80211_hwsim_ops.sw_scan_start = NULL;
17042 - mac80211_hwsim_ops.sw_scan_complete = NULL;
17043 +- mac80211_hwsim_ops.remain_on_channel =
17044 +- mac80211_hwsim_roc;
17045 +- mac80211_hwsim_ops.cancel_remain_on_channel =
17046 +- mac80211_hwsim_croc;
17047 +- mac80211_hwsim_ops.add_chanctx =
17048 +- mac80211_hwsim_add_chanctx;
17049 +- mac80211_hwsim_ops.remove_chanctx =
17050 +- mac80211_hwsim_remove_chanctx;
17051 +- mac80211_hwsim_ops.change_chanctx =
17052 +- mac80211_hwsim_change_chanctx;
17053 +- mac80211_hwsim_ops.assign_vif_chanctx =
17054 +- mac80211_hwsim_assign_vif_chanctx;
17055 +- mac80211_hwsim_ops.unassign_vif_chanctx =
17056 +- mac80211_hwsim_unassign_vif_chanctx;
17057 + pax_open_kernel();
17058 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
17059 ++ *(void **)&mac80211_hwsim_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
17060 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
17061 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
17062 ++ *(void **)&mac80211_hwsim_ops.remain_on_channel = mac80211_hwsim_roc;
17063 ++ *(void **)&mac80211_hwsim_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
17064 ++ *(void **)&mac80211_hwsim_ops.add_chanctx = mac80211_hwsim_add_chanctx;
17065 ++ *(void **)&mac80211_hwsim_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
17066 ++ *(void **)&mac80211_hwsim_ops.change_chanctx = mac80211_hwsim_change_chanctx;
17067 ++ *(void **)&mac80211_hwsim_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
17068 ++ *(void **)&mac80211_hwsim_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
17069 + pax_close_kernel();
17070 }
17071
17072 spin_lock_init(&hwsim_radio_lock);
17073 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
17074 -index bd1f0cb..db85ab0 100644
17075 +index abe1d03..fb02c22 100644
17076 --- a/drivers/net/wireless/rndis_wlan.c
17077 +++ b/drivers/net/wireless/rndis_wlan.c
17078 -@@ -1235,7 +1235,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
17079 +@@ -1238,7 +1238,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
17080
17081 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
17082
17083 @@ -37368,7 +38228,7 @@ index e488b94..14b6a0c 100644
17084 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
17085 hdr->seq_ctrl |= cpu_to_le16(seqno);
17086 diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
17087 -index e2750a1..797e179 100644
17088 +index e57ee48..541cf6c 100644
17089 --- a/drivers/net/wireless/ti/wl1251/sdio.c
17090 +++ b/drivers/net/wireless/ti/wl1251/sdio.c
17091 @@ -269,13 +269,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
17092 @@ -37394,7 +38254,7 @@ index e2750a1..797e179 100644
17093 wl1251_info("using SDIO interrupt");
17094 }
17095 diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
17096 -index dadf1db..d9db7a7 100644
17097 +index e5f5f8f..fdf15b7 100644
17098 --- a/drivers/net/wireless/ti/wl12xx/main.c
17099 +++ b/drivers/net/wireless/ti/wl12xx/main.c
17100 @@ -644,7 +644,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
17101 @@ -37420,7 +38280,7 @@ index dadf1db..d9db7a7 100644
17102 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
17103 WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
17104 diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
17105 -index a39682a..1e8220c 100644
17106 +index 8d8c1f8..e754844 100644
17107 --- a/drivers/net/wireless/ti/wl18xx/main.c
17108 +++ b/drivers/net/wireless/ti/wl18xx/main.c
17109 @@ -1489,8 +1489,10 @@ static int wl18xx_setup(struct wl1271 *wl)
17110 @@ -37556,6 +38416,19 @@ index 849357c..b83c1e0 100644
17111 {
17112 return __oprofilefs_create_file(sb, root, name,
17113 &atomic_ro_fops, 0444, val);
17114 +diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
17115 +index 93404f7..4a313d8 100644
17116 +--- a/drivers/oprofile/timer_int.c
17117 ++++ b/drivers/oprofile/timer_int.c
17118 +@@ -93,7 +93,7 @@ static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
17119 + return NOTIFY_OK;
17120 + }
17121 +
17122 +-static struct notifier_block __refdata oprofile_cpu_notifier = {
17123 ++static struct notifier_block oprofile_cpu_notifier = {
17124 + .notifier_call = oprofile_cpu_notify,
17125 + };
17126 +
17127 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
17128 index 3f56bc0..707d642 100644
17129 --- a/drivers/parport/procfs.c
17130 @@ -37610,7 +38483,7 @@ index a6a71c4..c91097b 100644
17131
17132 status = cpci_hp_register_controller(&generic_hpc);
17133 diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
17134 -index 6bf8d2a..9711ce0 100644
17135 +index 449b4bb..257e2e8 100644
17136 --- a/drivers/pci/hotplug/cpcihp_zt5550.c
17137 +++ b/drivers/pci/hotplug/cpcihp_zt5550.c
17138 @@ -59,7 +59,6 @@
17139 @@ -37673,7 +38546,7 @@ index 76ba8a1..20ca857 100644
17140
17141 /* initialize our int15 lock */
17142 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
17143 -index 449f257..0731e96 100644
17144 +index 8474b6a..ee81993 100644
17145 --- a/drivers/pci/pcie/aspm.c
17146 +++ b/drivers/pci/pcie/aspm.c
17147 @@ -27,9 +27,9 @@
17148 @@ -37690,7 +38563,7 @@ index 449f257..0731e96 100644
17149 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
17150
17151 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
17152 -index ec909af..e7517f3 100644
17153 +index 6186f03..1a78714 100644
17154 --- a/drivers/pci/probe.c
17155 +++ b/drivers/pci/probe.c
17156 @@ -173,7 +173,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
17157 @@ -37724,7 +38597,7 @@ index 9b8505c..f00870a 100644
17158 &proc_bus_pci_dev_operations);
17159 proc_initialized = 1;
17160 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
17161 -index 75dd651..2af4c9a 100644
17162 +index f946ca7..f25c833 100644
17163 --- a/drivers/platform/x86/thinkpad_acpi.c
17164 +++ b/drivers/platform/x86/thinkpad_acpi.c
17165 @@ -2097,7 +2097,7 @@ static int hotkey_mask_get(void)
17166 @@ -37916,7 +38789,7 @@ index 769d265..a3a05ca 100644
17167 + pax_close_kernel();
17168 }
17169 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
17170 -index b0ecacb..7c9da2e 100644
17171 +index 3e6db1c..1fbbdae 100644
17172 --- a/drivers/pnp/resource.c
17173 +++ b/drivers/pnp/resource.c
17174 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
17175 @@ -37963,10 +38836,10 @@ index 7df7c5f..bd48c47 100644
17176 if (ret) {
17177 dev_err(dev, "failure to register otg notifier\n");
17178 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
17179 -index 8d53174..04c65de 100644
17180 +index 4d7c635..9860196 100644
17181 --- a/drivers/regulator/max8660.c
17182 +++ b/drivers/regulator/max8660.c
17183 -@@ -333,8 +333,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
17184 +@@ -333,8 +333,10 @@ static int max8660_probe(struct i2c_client *client,
17185 max8660->shadow_regs[MAX8660_OVER1] = 5;
17186 } else {
17187 /* Otherwise devices can be toggled via software */
17188 @@ -37979,11 +38852,30 @@ index 8d53174..04c65de 100644
17189 }
17190
17191 /*
17192 +diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
17193 +index 9a8ea91..c483dd9 100644
17194 +--- a/drivers/regulator/max8973-regulator.c
17195 ++++ b/drivers/regulator/max8973-regulator.c
17196 +@@ -401,9 +401,11 @@ static int max8973_probe(struct i2c_client *client,
17197 + if (!pdata->enable_ext_control) {
17198 + max->desc.enable_reg = MAX8973_VOUT;
17199 + max->desc.enable_mask = MAX8973_VOUT_ENABLE;
17200 +- max8973_dcdc_ops.enable = regulator_enable_regmap;
17201 +- max8973_dcdc_ops.disable = regulator_disable_regmap;
17202 +- max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
17203 ++ pax_open_kernel();
17204 ++ *(void **)&max8973_dcdc_ops.enable = regulator_enable_regmap;
17205 ++ *(void **)&max8973_dcdc_ops.disable = regulator_disable_regmap;
17206 ++ *(void **)&max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
17207 ++ pax_close_kernel();
17208 + }
17209 +
17210 + max->enable_external_control = pdata->enable_ext_control;
17211 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
17212 -index 1fa6381..f58834e 100644
17213 +index 0d84b1f..c2da6ac 100644
17214 --- a/drivers/regulator/mc13892-regulator.c
17215 +++ b/drivers/regulator/mc13892-regulator.c
17216 -@@ -540,10 +540,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
17217 +@@ -540,10 +540,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
17218 }
17219 mc13xxx_unlock(mc13892);
17220
17221 @@ -37999,7 +38891,7 @@ index 1fa6381..f58834e 100644
17222 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
17223 ARRAY_SIZE(mc13892_regulators));
17224 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
17225 -index cace6d3..f623fda 100644
17226 +index 9a86b4b..3a383dc 100644
17227 --- a/drivers/rtc/rtc-dev.c
17228 +++ b/drivers/rtc/rtc-dev.c
17229 @@ -14,6 +14,7 @@
17230 @@ -38090,7 +38982,7 @@ index 593085a..47aa999 100644
17231
17232 /* These three are default values which can be overridden */
17233 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
17234 -index 4217e49..9c77e3e 100644
17235 +index 4f33806..afd6f60 100644
17236 --- a/drivers/scsi/hpsa.c
17237 +++ b/drivers/scsi/hpsa.c
17238 @@ -554,7 +554,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
17239 @@ -38141,7 +39033,7 @@ index 4217e49..9c77e3e 100644
17240 (h->interrupts_enabled == 0);
17241 }
17242
17243 -@@ -4318,7 +4318,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
17244 +@@ -4316,7 +4316,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
17245 if (prod_index < 0)
17246 return -ENODEV;
17247 h->product_name = products[prod_index].product_name;
17248 @@ -38150,7 +39042,7 @@ index 4217e49..9c77e3e 100644
17249
17250 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
17251 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
17252 -@@ -4600,7 +4600,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
17253 +@@ -4598,7 +4598,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
17254
17255 assert_spin_locked(&lockup_detector_lock);
17256 remove_ctlr_from_lockup_detector_list(h);
17257 @@ -38159,7 +39051,7 @@ index 4217e49..9c77e3e 100644
17258 spin_lock_irqsave(&h->lock, flags);
17259 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
17260 spin_unlock_irqrestore(&h->lock, flags);
17261 -@@ -4778,7 +4778,7 @@ reinit_after_soft_reset:
17262 +@@ -4775,7 +4775,7 @@ reinit_after_soft_reset:
17263 }
17264
17265 /* make sure the board interrupts are off */
17266 @@ -38168,7 +39060,7 @@ index 4217e49..9c77e3e 100644
17267
17268 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
17269 goto clean2;
17270 -@@ -4812,7 +4812,7 @@ reinit_after_soft_reset:
17271 +@@ -4809,7 +4809,7 @@ reinit_after_soft_reset:
17272 * fake ones to scoop up any residual completions.
17273 */
17274 spin_lock_irqsave(&h->lock, flags);
17275 @@ -38177,7 +39069,7 @@ index 4217e49..9c77e3e 100644
17276 spin_unlock_irqrestore(&h->lock, flags);
17277 free_irqs(h);
17278 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
17279 -@@ -4831,9 +4831,9 @@ reinit_after_soft_reset:
17280 +@@ -4828,9 +4828,9 @@ reinit_after_soft_reset:
17281 dev_info(&h->pdev->dev, "Board READY.\n");
17282 dev_info(&h->pdev->dev,
17283 "Waiting for stale completions to drain.\n");
17284 @@ -38189,7 +39081,7 @@ index 4217e49..9c77e3e 100644
17285
17286 rc = controller_reset_failed(h->cfgtable);
17287 if (rc)
17288 -@@ -4854,7 +4854,7 @@ reinit_after_soft_reset:
17289 +@@ -4851,7 +4851,7 @@ reinit_after_soft_reset:
17290 }
17291
17292 /* Turn the interrupts on so we can service requests */
17293 @@ -38198,7 +39090,7 @@ index 4217e49..9c77e3e 100644
17294
17295 hpsa_hba_inquiry(h);
17296 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
17297 -@@ -4906,7 +4906,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
17298 +@@ -4903,7 +4903,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
17299 * To write all data in the battery backed cache to disks
17300 */
17301 hpsa_flush_cache(h);
17302 @@ -38207,7 +39099,7 @@ index 4217e49..9c77e3e 100644
17303 hpsa_free_irqs_and_disable_msix(h);
17304 }
17305
17306 -@@ -5075,7 +5075,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
17307 +@@ -5071,7 +5071,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
17308 return;
17309 }
17310 /* Change the access methods to the performant access methods */
17311 @@ -38388,7 +39280,7 @@ index bdb81cd..d3c7c2c 100644
17312 .qc_issue = sas_ata_qc_issue,
17313 .qc_fill_rtf = sas_ata_qc_fill_rtf,
17314 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
17315 -index 69b5993..1ac9dce 100644
17316 +index df4c13a..a51e90c 100644
17317 --- a/drivers/scsi/lpfc/lpfc.h
17318 +++ b/drivers/scsi/lpfc/lpfc.h
17319 @@ -424,7 +424,7 @@ struct lpfc_vport {
17320 @@ -38400,7 +39292,7 @@ index 69b5993..1ac9dce 100644
17321 #endif
17322 uint8_t stat_data_enabled;
17323 uint8_t stat_data_blocked;
17324 -@@ -840,8 +840,8 @@ struct lpfc_hba {
17325 +@@ -842,8 +842,8 @@ struct lpfc_hba {
17326 struct timer_list fabric_block_timer;
17327 unsigned long bit_flags;
17328 #define FABRIC_COMANDS_BLOCKED 0
17329 @@ -38411,7 +39303,7 @@ index 69b5993..1ac9dce 100644
17330 unsigned long last_rsrc_error_time;
17331 unsigned long last_ramp_down_time;
17332 unsigned long last_ramp_up_time;
17333 -@@ -877,7 +877,7 @@ struct lpfc_hba {
17334 +@@ -879,7 +879,7 @@ struct lpfc_hba {
17335
17336 struct dentry *debug_slow_ring_trc;
17337 struct lpfc_debugfs_trc *slow_ring_trc;
17338 @@ -38504,10 +39396,10 @@ index f63f5ff..de29189 100644
17339 snprintf(name, sizeof(name), "discovery_trace");
17340 vport->debug_disc_trc =
17341 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
17342 -index 7dc4218..3436f08 100644
17343 +index 89ad558..76956c4 100644
17344 --- a/drivers/scsi/lpfc/lpfc_init.c
17345 +++ b/drivers/scsi/lpfc/lpfc_init.c
17346 -@@ -10589,8 +10589,10 @@ lpfc_init(void)
17347 +@@ -10618,8 +10618,10 @@ lpfc_init(void)
17348 "misc_register returned with status %d", error);
17349
17350 if (lpfc_enable_npiv) {
17351 @@ -38521,7 +39413,7 @@ index 7dc4218..3436f08 100644
17352 lpfc_transport_template =
17353 fc_attach_transport(&lpfc_transport_functions);
17354 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
17355 -index 7f45ac9..cf62eda 100644
17356 +index 60e5a17..ff7a793 100644
17357 --- a/drivers/scsi/lpfc/lpfc_scsi.c
17358 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
17359 @@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
17360 @@ -38576,7 +39468,7 @@ index 7f45ac9..cf62eda 100644
17361
17362 /**
17363 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
17364 -index af763ea..41904f7 100644
17365 +index b46f5e9..c4c4ccb 100644
17366 --- a/drivers/scsi/pmcraid.c
17367 +++ b/drivers/scsi/pmcraid.c
17368 @@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
17369 @@ -38629,7 +39521,7 @@ index af763ea..41904f7 100644
17370 return;
17371
17372 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
17373 -@@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
17374 +@@ -5324,8 +5324,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
17375 init_waitqueue_head(&pinstance->reset_wait_q);
17376
17377 atomic_set(&pinstance->outstanding_cmds, 0);
17378 @@ -38640,7 +39532,7 @@ index af763ea..41904f7 100644
17379
17380 INIT_LIST_HEAD(&pinstance->free_res_q);
17381 INIT_LIST_HEAD(&pinstance->used_res_q);
17382 -@@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
17383 +@@ -6038,7 +6038,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
17384 /* Schedule worker thread to handle CCN and take care of adding and
17385 * removing devices to OS
17386 */
17387 @@ -38705,7 +39597,7 @@ index 83d7984..a27d947 100644
17388 .show_host_node_name = 1,
17389 .show_host_port_name = 1,
17390 diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
17391 -index 6acb397..d86e3e0 100644
17392 +index 2411d1a..4673766 100644
17393 --- a/drivers/scsi/qla2xxx/qla_gbl.h
17394 +++ b/drivers/scsi/qla2xxx/qla_gbl.h
17395 @@ -515,8 +515,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
17396 @@ -38720,10 +39612,10 @@ index 6acb397..d86e3e0 100644
17397 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
17398 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
17399 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
17400 -index f4b1fc8..a1ce4dd 100644
17401 +index 10d23f8..a7d5d4c 100644
17402 --- a/drivers/scsi/qla2xxx/qla_os.c
17403 +++ b/drivers/scsi/qla2xxx/qla_os.c
17404 -@@ -1462,8 +1462,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
17405 +@@ -1472,8 +1472,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
17406 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
17407 /* Ok, a 64bit DMA mask is applicable. */
17408 ha->flags.enable_64bit_addressing = 1;
17409 @@ -38750,7 +39642,7 @@ index 329d553..f20d31d 100644
17410 uint32_t default_time2wait; /* Default Min time between
17411 * relogins (+aens) */
17412 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
17413 -index fbc546e..c7d1b48 100644
17414 +index 4cec123..7c1329f 100644
17415 --- a/drivers/scsi/qla4xxx/ql4_os.c
17416 +++ b/drivers/scsi/qla4xxx/ql4_os.c
17417 @@ -2621,12 +2621,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
17418 @@ -38791,7 +39683,7 @@ index 2c0d0ec..4e8681a 100644
17419 /* check if the device is still usable */
17420 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
17421 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
17422 -index 9032e91..7a805d0 100644
17423 +index f1bf5af..f67e943 100644
17424 --- a/drivers/scsi/scsi_lib.c
17425 +++ b/drivers/scsi/scsi_lib.c
17426 @@ -1454,7 +1454,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
17427 @@ -38913,7 +39805,7 @@ index 31969f2..2b348f0 100644
17428 err = class_register(&iscsi_transport_class);
17429 if (err)
17430 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
17431 -index 21a045e..ec89e03 100644
17432 +index f379c7f..e8fc69c 100644
17433 --- a/drivers/scsi/scsi_transport_srp.c
17434 +++ b/drivers/scsi/scsi_transport_srp.c
17435 @@ -33,7 +33,7 @@
17436 @@ -38925,7 +39817,7 @@ index 21a045e..ec89e03 100644
17437 };
17438 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
17439
17440 -@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
17441 +@@ -61,7 +61,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
17442 struct Scsi_Host *shost = dev_to_shost(dev);
17443 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
17444
17445 @@ -38934,7 +39826,7 @@ index 21a045e..ec89e03 100644
17446 return 0;
17447 }
17448
17449 -@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
17450 +@@ -210,7 +210,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
17451 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
17452 rport->roles = ids->roles;
17453
17454 @@ -38944,10 +39836,10 @@ index 21a045e..ec89e03 100644
17455
17456 transport_setup_device(&rport->dev);
17457 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
17458 -index a45e12a..d9120cb 100644
17459 +index 7992635..609faf8 100644
17460 --- a/drivers/scsi/sd.c
17461 +++ b/drivers/scsi/sd.c
17462 -@@ -2899,7 +2899,7 @@ static int sd_probe(struct device *dev)
17463 +@@ -2909,7 +2909,7 @@ static int sd_probe(struct device *dev)
17464 sdkp->disk = gd;
17465 sdkp->index = index;
17466 atomic_set(&sdkp->openers, 0);
17467 @@ -38970,10 +39862,10 @@ index be2c9a6..275525c 100644
17468 return blk_trace_startstop(sdp->device->request_queue, 1);
17469 case BLKTRACESTOP:
17470 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
17471 -index 84c2861..ece0a31 100644
17472 +index 19ee901..6e8c2ef 100644
17473 --- a/drivers/spi/spi.c
17474 +++ b/drivers/spi/spi.c
17475 -@@ -1453,7 +1453,7 @@ int spi_bus_unlock(struct spi_master *master)
17476 +@@ -1616,7 +1616,7 @@ int spi_bus_unlock(struct spi_master *master)
17477 EXPORT_SYMBOL_GPL(spi_bus_unlock);
17478
17479 /* portable code must never pass more than 32 bytes */
17480 @@ -39015,7 +39907,7 @@ index 34afc16..ffe44dd 100644
17481 dev_kfree_skb_irq(skb);
17482 }
17483 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
17484 -index 683bedc..86dba9a 100644
17485 +index ef32dc1..a159d68 100644
17486 --- a/drivers/staging/octeon/ethernet.c
17487 +++ b/drivers/staging/octeon/ethernet.c
17488 @@ -252,11 +252,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
17489 @@ -39269,7 +40161,7 @@ index dc23395..cf7e9b1 100644
17490 struct io_req {
17491 struct list_head list;
17492 diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
17493 -index 180c963..1f18377 100644
17494 +index 1f5088b..0e59820 100644
17495 --- a/drivers/staging/sbe-2t3e3/netdev.c
17496 +++ b/drivers/staging/sbe-2t3e3/netdev.c
17497 @@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
17498 @@ -39282,7 +40174,7 @@ index 180c963..1f18377 100644
17499
17500 return 0;
17501 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
17502 -index c66b8b3..a4a035b 100644
17503 +index 5dddc4d..34fcb2f 100644
17504 --- a/drivers/staging/usbip/vhci.h
17505 +++ b/drivers/staging/usbip/vhci.h
17506 @@ -83,7 +83,7 @@ struct vhci_hcd {
17507 @@ -39295,10 +40187,10 @@ index c66b8b3..a4a035b 100644
17508 /*
17509 * NOTE:
17510 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
17511 -index 620d1be..1cd6711 100644
17512 +index c3aa219..bf8b3de 100644
17513 --- a/drivers/staging/usbip/vhci_hcd.c
17514 +++ b/drivers/staging/usbip/vhci_hcd.c
17515 -@@ -471,7 +471,7 @@ static void vhci_tx_urb(struct urb *urb)
17516 +@@ -451,7 +451,7 @@ static void vhci_tx_urb(struct urb *urb)
17517 return;
17518 }
17519
17520 @@ -39307,7 +40199,7 @@ index 620d1be..1cd6711 100644
17521 if (priv->seqnum == 0xffff)
17522 dev_info(&urb->dev->dev, "seqnum max\n");
17523
17524 -@@ -723,7 +723,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
17525 +@@ -703,7 +703,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
17526 return -ENOMEM;
17527 }
17528
17529 @@ -39316,7 +40208,7 @@ index 620d1be..1cd6711 100644
17530 if (unlink->seqnum == 0xffff)
17531 pr_info("seqnum max\n");
17532
17533 -@@ -924,7 +924,7 @@ static int vhci_start(struct usb_hcd *hcd)
17534 +@@ -907,7 +907,7 @@ static int vhci_start(struct usb_hcd *hcd)
17535 vdev->rhport = rhport;
17536 }
17537
17538 @@ -39326,7 +40218,7 @@ index 620d1be..1cd6711 100644
17539
17540 hcd->power_budget = 0; /* no limit */
17541 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
17542 -index f0eaf04..5a82e06 100644
17543 +index ba5f1c0..11d8122 100644
17544 --- a/drivers/staging/usbip/vhci_rx.c
17545 +++ b/drivers/staging/usbip/vhci_rx.c
17546 @@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
17547 @@ -39339,7 +40231,7 @@ index f0eaf04..5a82e06 100644
17548 return;
17549 }
17550 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
17551 -index 67b1b88..6392fe9 100644
17552 +index 5f13890..36a044b 100644
17553 --- a/drivers/staging/vt6655/hostap.c
17554 +++ b/drivers/staging/vt6655/hostap.c
17555 @@ -73,14 +73,13 @@ static int msglevel =MSG_LEVEL_INFO;
17556 @@ -39369,7 +40261,7 @@ index 67b1b88..6392fe9 100644
17557
17558 pDevice->apdev->type = ARPHRD_IEEE80211;
17559 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
17560 -index 0a73d40..6fda560 100644
17561 +index 26a7d0e..897b083 100644
17562 --- a/drivers/staging/vt6656/hostap.c
17563 +++ b/drivers/staging/vt6656/hostap.c
17564 @@ -60,14 +60,13 @@ static int msglevel =MSG_LEVEL_INFO;
17565 @@ -39440,30 +40332,34 @@ index 0d4aa82..f7832d4 100644
17566 extern void tmem_register_hostops(struct tmem_hostops *m);
17567
17568 /* core tmem accessor functions */
17569 -diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
17570 -index 13fe16c..cbdc39a 100644
17571 ---- a/drivers/target/target_core_transport.c
17572 -+++ b/drivers/target/target_core_transport.c
17573 -@@ -1085,7 +1085,7 @@ struct se_device *transport_add_device_to_core_hba(
17574 +diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
17575 +index f2aa754..11337b1 100644
17576 +--- a/drivers/target/target_core_device.c
17577 ++++ b/drivers/target/target_core_device.c
17578 +@@ -1375,7 +1375,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
17579 spin_lock_init(&dev->se_port_lock);
17580 spin_lock_init(&dev->se_tmr_lock);
17581 spin_lock_init(&dev->qf_cmd_lock);
17582 - atomic_set(&dev->dev_ordered_id, 0);
17583 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
17584 -
17585 - se_dev_set_default_attribs(dev, dev_limits);
17586 -
17587 -@@ -1275,7 +1275,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
17588 + INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
17589 + spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
17590 + INIT_LIST_HEAD(&dev->t10_pr.registration_list);
17591 +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
17592 +index bd587b7..173daf3 100644
17593 +--- a/drivers/target/target_core_transport.c
17594 ++++ b/drivers/target/target_core_transport.c
17595 +@@ -1077,7 +1077,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
17596 * Used to determine when ORDERED commands should go from
17597 * Dormant to Active status.
17598 */
17599 -- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
17600 -+ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
17601 +- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
17602 ++ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
17603 smp_mb__after_atomic_inc();
17604 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
17605 cmd->se_ordered_id, cmd->sam_task_attr,
17606 diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
17607 -index 0a6a0bc..5501b06 100644
17608 +index b09c8d1f..c4225c0 100644
17609 --- a/drivers/tty/cyclades.c
17610 +++ b/drivers/tty/cyclades.c
17611 @@ -1589,10 +1589,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
17612 @@ -39479,7 +40375,7 @@ index 0a6a0bc..5501b06 100644
17613 #endif
17614
17615 /*
17616 -@@ -3989,7 +3989,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
17617 +@@ -3991,7 +3991,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
17618 for (j = 0; j < cy_card[i].nports; j++) {
17619 info = &cy_card[i].ports[j];
17620
17621 @@ -39547,7 +40443,7 @@ index 13ee53b..418d164 100644
17622
17623 spin_lock_irqsave(&hp->lock, flags);
17624 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
17625 -index cab5c7a..4cc66ea 100644
17626 +index 8776357..b2d4afd 100644
17627 --- a/drivers/tty/hvc/hvcs.c
17628 +++ b/drivers/tty/hvc/hvcs.c
17629 @@ -83,6 +83,7 @@
17630 @@ -39644,7 +40540,7 @@ index cab5c7a..4cc66ea 100644
17631
17632 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
17633 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
17634 -index 160f0ad..588b853 100644
17635 +index 2cde13d..645d78f 100644
17636 --- a/drivers/tty/ipwireless/tty.c
17637 +++ b/drivers/tty/ipwireless/tty.c
17638 @@ -29,6 +29,7 @@
17639 @@ -39761,10 +40657,10 @@ index 160f0ad..588b853 100644
17640 ipwireless_disassociate_network_ttys(network,
17641 ttyj->channel_idx);
17642 diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
17643 -index 56e616b..9d9f10a 100644
17644 +index f9d2850..b006f04 100644
17645 --- a/drivers/tty/moxa.c
17646 +++ b/drivers/tty/moxa.c
17647 -@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
17648 +@@ -1193,7 +1193,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
17649 }
17650
17651 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
17652 @@ -39774,11 +40670,11 @@ index 56e616b..9d9f10a 100644
17653 tty_port_tty_set(&ch->port, tty);
17654 mutex_lock(&ch->port.mutex);
17655 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
17656 -index 1e8e8ce..a9efc93 100644
17657 +index dcc0430..040bef9 100644
17658 --- a/drivers/tty/n_gsm.c
17659 +++ b/drivers/tty/n_gsm.c
17660 -@@ -1638,7 +1638,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
17661 - kref_init(&dlci->ref);
17662 +@@ -1636,7 +1636,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
17663 + spin_lock_init(&dlci->lock);
17664 mutex_init(&dlci->mutex);
17665 dlci->fifo = &dlci->_fifo;
17666 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
17667 @@ -39786,7 +40682,7 @@ index 1e8e8ce..a9efc93 100644
17668 kfree(dlci);
17669 return NULL;
17670 }
17671 -@@ -2925,7 +2925,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
17672 +@@ -2924,7 +2924,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
17673 struct gsm_dlci *dlci = tty->driver_data;
17674 struct tty_port *port = &dlci->port;
17675
17676 @@ -39796,10 +40692,10 @@ index 1e8e8ce..a9efc93 100644
17677 dlci_get(dlci->gsm->dlci[0]);
17678 mux_get(dlci->gsm);
17679 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
17680 -index 8c0b7b4..e88f052 100644
17681 +index 19083ef..6e34e97 100644
17682 --- a/drivers/tty/n_tty.c
17683 +++ b/drivers/tty/n_tty.c
17684 -@@ -2142,6 +2142,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
17685 +@@ -2196,6 +2196,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
17686 {
17687 *ops = tty_ldisc_N_TTY;
17688 ops->owner = NULL;
17689 @@ -39809,10 +40705,10 @@ index 8c0b7b4..e88f052 100644
17690 }
17691 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
17692 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
17693 -index 8cf8d0a..4ef9ed0 100644
17694 +index 79ff3a5..1fe9399 100644
17695 --- a/drivers/tty/pty.c
17696 +++ b/drivers/tty/pty.c
17697 -@@ -730,8 +730,10 @@ static void __init unix98_pty_init(void)
17698 +@@ -791,8 +791,10 @@ static void __init unix98_pty_init(void)
17699 panic("Couldn't register Unix98 pts driver");
17700
17701 /* Now create the /dev/ptmx special device */
17702 @@ -39825,10 +40721,10 @@ index 8cf8d0a..4ef9ed0 100644
17703 cdev_init(&ptmx_cdev, &ptmx_fops);
17704 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
17705 diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
17706 -index 9700d34..df7520c 100644
17707 +index e42009a..566a036 100644
17708 --- a/drivers/tty/rocket.c
17709 +++ b/drivers/tty/rocket.c
17710 -@@ -924,7 +924,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
17711 +@@ -925,7 +925,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
17712 tty->driver_data = info;
17713 tty_port_tty_set(port, tty);
17714
17715 @@ -39837,7 +40733,7 @@ index 9700d34..df7520c 100644
17716 atomic_inc(&rp_num_ports_open);
17717
17718 #ifdef ROCKET_DEBUG_OPEN
17719 -@@ -933,7 +933,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
17720 +@@ -934,7 +934,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
17721 #endif
17722 }
17723 #ifdef ROCKET_DEBUG_OPEN
17724 @@ -39846,7 +40742,7 @@ index 9700d34..df7520c 100644
17725 #endif
17726
17727 /*
17728 -@@ -1528,7 +1528,7 @@ static void rp_hangup(struct tty_struct *tty)
17729 +@@ -1529,7 +1529,7 @@ static void rp_hangup(struct tty_struct *tty)
17730 spin_unlock_irqrestore(&info->port.lock, flags);
17731 return;
17732 }
17733 @@ -39962,10 +40858,10 @@ index 1002054..dd644a8 100644
17734 /* This is only available if kgdboc is a built in for early debugging */
17735 static int __init kgdboc_early_init(char *opt)
17736 diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
17737 -index 7f04717..0f3794f 100644
17738 +index e514b3a..c73d614 100644
17739 --- a/drivers/tty/serial/samsung.c
17740 +++ b/drivers/tty/serial/samsung.c
17741 -@@ -445,11 +445,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
17742 +@@ -453,11 +453,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
17743 }
17744 }
17745
17746 @@ -39982,7 +40878,7 @@ index 7f04717..0f3794f 100644
17747 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
17748 port->mapbase, port->membase);
17749
17750 -@@ -1115,10 +1120,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
17751 +@@ -1122,10 +1127,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
17752 /* setup info for port */
17753 port->dev = &platdev->dev;
17754
17755 @@ -39994,10 +40890,10 @@ index 7f04717..0f3794f 100644
17756
17757 if (cfg->uart_flags & UPF_CONS_FLOW) {
17758 diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
17759 -index 0fcfd98..8244fce 100644
17760 +index 2c7230a..2104f16 100644
17761 --- a/drivers/tty/serial/serial_core.c
17762 +++ b/drivers/tty/serial/serial_core.c
17763 -@@ -1408,7 +1408,7 @@ static void uart_hangup(struct tty_struct *tty)
17764 +@@ -1455,7 +1455,7 @@ static void uart_hangup(struct tty_struct *tty)
17765 uart_flush_buffer(tty);
17766 uart_shutdown(tty, state);
17767 spin_lock_irqsave(&port->lock, flags);
17768 @@ -40006,7 +40902,7 @@ index 0fcfd98..8244fce 100644
17769 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
17770 spin_unlock_irqrestore(&port->lock, flags);
17771 tty_port_tty_set(port, NULL);
17772 -@@ -1504,7 +1504,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
17773 +@@ -1551,7 +1551,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
17774 goto end;
17775 }
17776
17777 @@ -40015,7 +40911,7 @@ index 0fcfd98..8244fce 100644
17778 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
17779 retval = -ENXIO;
17780 goto err_dec_count;
17781 -@@ -1531,7 +1531,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
17782 +@@ -1578,7 +1578,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
17783 /*
17784 * Make sure the device is in D0 state.
17785 */
17786 @@ -40024,7 +40920,7 @@ index 0fcfd98..8244fce 100644
17787 uart_change_pm(state, 0);
17788
17789 /*
17790 -@@ -1549,7 +1549,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
17791 +@@ -1596,7 +1596,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
17792 end:
17793 return retval;
17794 err_dec_count:
17795 @@ -40034,7 +40930,7 @@ index 0fcfd98..8244fce 100644
17796 goto end;
17797 }
17798 diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
17799 -index 70e3a52..5742052 100644
17800 +index 9e071f6..f30ae69 100644
17801 --- a/drivers/tty/synclink.c
17802 +++ b/drivers/tty/synclink.c
17803 @@ -3095,7 +3095,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
17804 @@ -40138,7 +41034,7 @@ index 70e3a52..5742052 100644
17805 }
17806
17807 return retval;
17808 -@@ -7661,7 +7661,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
17809 +@@ -7662,7 +7662,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
17810 unsigned short new_crctype;
17811
17812 /* return error if TTY interface open */
17813 @@ -40147,7 +41043,7 @@ index 70e3a52..5742052 100644
17814 return -EBUSY;
17815
17816 switch (encoding)
17817 -@@ -7756,7 +7756,7 @@ static int hdlcdev_open(struct net_device *dev)
17818 +@@ -7757,7 +7757,7 @@ static int hdlcdev_open(struct net_device *dev)
17819
17820 /* arbitrate between network and tty opens */
17821 spin_lock_irqsave(&info->netlock, flags);
17822 @@ -40156,7 +41052,7 @@ index 70e3a52..5742052 100644
17823 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
17824 spin_unlock_irqrestore(&info->netlock, flags);
17825 return -EBUSY;
17826 -@@ -7842,7 +7842,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
17827 +@@ -7843,7 +7843,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
17828 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
17829
17830 /* return error if TTY interface open */
17831 @@ -40166,7 +41062,7 @@ index 70e3a52..5742052 100644
17832
17833 if (cmd != SIOCWANDEV)
17834 diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
17835 -index b38e954..ce45b38 100644
17836 +index aba1e59..877ac33 100644
17837 --- a/drivers/tty/synclink_gt.c
17838 +++ b/drivers/tty/synclink_gt.c
17839 @@ -671,7 +671,7 @@ static int open(struct tty_struct *tty, struct file *filp)
17840 @@ -40284,7 +41180,7 @@ index b38e954..ce45b38 100644
17841
17842 if (!retval)
17843 diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
17844 -index f17d9f3..27a041b 100644
17845 +index fd43fb6..34704ad 100644
17846 --- a/drivers/tty/synclinkmp.c
17847 +++ b/drivers/tty/synclinkmp.c
17848 @@ -751,7 +751,7 @@ static int open(struct tty_struct *tty, struct file *filp)
17849 @@ -40423,10 +41319,10 @@ index f17d9f3..27a041b 100644
17850 if (!retval)
17851 port->flags |= ASYNC_NORMAL_ACTIVE;
17852 diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
17853 -index 16ee6ce..bfcac57 100644
17854 +index b3c4a25..723916f 100644
17855 --- a/drivers/tty/sysrq.c
17856 +++ b/drivers/tty/sysrq.c
17857 -@@ -866,7 +866,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
17858 +@@ -867,7 +867,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
17859 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
17860 size_t count, loff_t *ppos)
17861 {
17862 @@ -40436,10 +41332,10 @@ index 16ee6ce..bfcac57 100644
17863
17864 if (get_user(c, buf))
17865 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
17866 -index 2ea176b..2877bc8 100644
17867 +index da9fde8..c07975f 100644
17868 --- a/drivers/tty/tty_io.c
17869 +++ b/drivers/tty/tty_io.c
17870 -@@ -3395,7 +3395,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
17871 +@@ -3391,7 +3391,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
17872
17873 void tty_default_fops(struct file_operations *fops)
17874 {
17875 @@ -40449,31 +41345,31 @@ index 2ea176b..2877bc8 100644
17876
17877 /*
17878 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
17879 -index 0f2a2c5..471e228 100644
17880 +index c578229..45aa9ee 100644
17881 --- a/drivers/tty/tty_ldisc.c
17882 +++ b/drivers/tty/tty_ldisc.c
17883 @@ -56,7 +56,7 @@ static void put_ldisc(struct tty_ldisc *ld)
17884 - if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
17885 + if (atomic_dec_and_test(&ld->users)) {
17886 struct tty_ldisc_ops *ldo = ld->ops;
17887
17888 - ldo->refcount--;
17889 + atomic_dec(&ldo->refcount);
17890 module_put(ldo->owner);
17891 - spin_unlock_irqrestore(&tty_ldisc_lock, flags);
17892 + raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
17893
17894 @@ -91,7 +91,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
17895 - spin_lock_irqsave(&tty_ldisc_lock, flags);
17896 + raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
17897 tty_ldiscs[disc] = new_ldisc;
17898 new_ldisc->num = disc;
17899 - new_ldisc->refcount = 0;
17900 + atomic_set(&new_ldisc->refcount, 0);
17901 - spin_unlock_irqrestore(&tty_ldisc_lock, flags);
17902 + raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
17903
17904 return ret;
17905 @@ -119,7 +119,7 @@ int tty_unregister_ldisc(int disc)
17906 return -EINVAL;
17907
17908 - spin_lock_irqsave(&tty_ldisc_lock, flags);
17909 + raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
17910 - if (tty_ldiscs[disc]->refcount)
17911 + if (atomic_read(&tty_ldiscs[disc]->refcount))
17912 ret = -EBUSY;
17913 @@ -40491,17 +41387,17 @@ index 0f2a2c5..471e228 100644
17914 @@ -153,7 +153,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
17915 unsigned long flags;
17916
17917 - spin_lock_irqsave(&tty_ldisc_lock, flags);
17918 + raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
17919 - ldops->refcount--;
17920 + atomic_dec(&ldops->refcount);
17921 module_put(ldops->owner);
17922 - spin_unlock_irqrestore(&tty_ldisc_lock, flags);
17923 + raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
17924 }
17925 diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
17926 -index d7bdd8d..feaef30 100644
17927 +index b7ff59d..7c6105e 100644
17928 --- a/drivers/tty/tty_port.c
17929 +++ b/drivers/tty/tty_port.c
17930 -@@ -202,7 +202,7 @@ void tty_port_hangup(struct tty_port *port)
17931 +@@ -218,7 +218,7 @@ void tty_port_hangup(struct tty_port *port)
17932 unsigned long flags;
17933
17934 spin_lock_irqsave(&port->lock, flags);
17935 @@ -40510,7 +41406,7 @@ index d7bdd8d..feaef30 100644
17936 port->flags &= ~ASYNC_NORMAL_ACTIVE;
17937 if (port->tty) {
17938 set_bit(TTY_IO_ERROR, &port->tty->flags);
17939 -@@ -328,7 +328,7 @@ int tty_port_block_til_ready(struct tty_port *port,
17940 +@@ -344,7 +344,7 @@ int tty_port_block_til_ready(struct tty_port *port,
17941 /* The port lock protects the port counts */
17942 spin_lock_irqsave(&port->lock, flags);
17943 if (!tty_hung_up_p(filp))
17944 @@ -40519,7 +41415,7 @@ index d7bdd8d..feaef30 100644
17945 port->blocked_open++;
17946 spin_unlock_irqrestore(&port->lock, flags);
17947
17948 -@@ -370,7 +370,7 @@ int tty_port_block_til_ready(struct tty_port *port,
17949 +@@ -386,7 +386,7 @@ int tty_port_block_til_ready(struct tty_port *port,
17950 we must not mess that up further */
17951 spin_lock_irqsave(&port->lock, flags);
17952 if (!tty_hung_up_p(filp))
17953 @@ -40528,7 +41424,7 @@ index d7bdd8d..feaef30 100644
17954 port->blocked_open--;
17955 if (retval == 0)
17956 port->flags |= ASYNC_NORMAL_ACTIVE;
17957 -@@ -390,19 +390,19 @@ int tty_port_close_start(struct tty_port *port,
17958 +@@ -406,19 +406,19 @@ int tty_port_close_start(struct tty_port *port,
17959 return 0;
17960 }
17961
17962 @@ -40555,7 +41451,7 @@ index d7bdd8d..feaef30 100644
17963 spin_unlock_irqrestore(&port->lock, flags);
17964 if (port->ops->drop)
17965 port->ops->drop(port);
17966 -@@ -500,7 +500,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
17967 +@@ -516,7 +516,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
17968 {
17969 spin_lock_irq(&port->lock);
17970 if (!tty_hung_up_p(filp))
17971 @@ -40817,7 +41713,7 @@ index 35f10bf..6a38a0b 100644
17972 if (!left--) {
17973 if (instance->disconnected)
17974 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
17975 -index f460de3..95ba1f6 100644
17976 +index cbacea9..246cccd 100644
17977 --- a/drivers/usb/core/devices.c
17978 +++ b/drivers/usb/core/devices.c
17979 @@ -126,7 +126,7 @@ static const char format_endpt[] =
17980 @@ -40838,7 +41734,7 @@ index f460de3..95ba1f6 100644
17981 wake_up(&device_event.wait);
17982 }
17983
17984 -@@ -647,7 +647,7 @@ static unsigned int usb_device_poll(struct file *file,
17985 +@@ -645,7 +645,7 @@ static unsigned int usb_device_poll(struct file *file,
17986
17987 poll_wait(file, &device_event.wait, wait);
17988
17989 @@ -40848,10 +41744,10 @@ index f460de3..95ba1f6 100644
17990 file->f_version = event_count;
17991 return POLLIN | POLLRDNORM;
17992 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
17993 -index f034716..aed0368 100644
17994 +index 8e64adf..9a33a3c 100644
17995 --- a/drivers/usb/core/hcd.c
17996 +++ b/drivers/usb/core/hcd.c
17997 -@@ -1478,7 +1478,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
17998 +@@ -1522,7 +1522,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
17999 */
18000 usb_get_urb(urb);
18001 atomic_inc(&urb->use_count);
18002 @@ -40860,7 +41756,7 @@ index f034716..aed0368 100644
18003 usbmon_urb_submit(&hcd->self, urb);
18004
18005 /* NOTE requirements on root-hub callers (usbfs and the hub
18006 -@@ -1505,7 +1505,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
18007 +@@ -1549,7 +1549,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
18008 urb->hcpriv = NULL;
18009 INIT_LIST_HEAD(&urb->urb_list);
18010 atomic_dec(&urb->use_count);
18011 @@ -40883,10 +41779,10 @@ index 818e4a0..0fc9589 100644
18012 static DEVICE_ATTR(urbnum, S_IRUGO, show_urbnum, NULL);
18013
18014 diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
18015 -index cd8fb44..17fbe0c 100644
18016 +index f81b925..78d22ec 100644
18017 --- a/drivers/usb/core/usb.c
18018 +++ b/drivers/usb/core/usb.c
18019 -@@ -397,7 +397,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
18020 +@@ -388,7 +388,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
18021 set_dev_node(&dev->dev, dev_to_node(bus->controller));
18022 dev->state = USB_STATE_ATTACHED;
18023 dev->lpm_disable_count = 1;
18024 @@ -40896,7 +41792,7 @@ index cd8fb44..17fbe0c 100644
18025 INIT_LIST_HEAD(&dev->ep0.urb_list);
18026 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
18027 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
18028 -index 4bfa78a..902bfbd 100644
18029 +index 5e29dde..eca992f 100644
18030 --- a/drivers/usb/early/ehci-dbgp.c
18031 +++ b/drivers/usb/early/ehci-dbgp.c
18032 @@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
18033 @@ -40937,7 +41833,7 @@ index 4bfa78a..902bfbd 100644
18034 return 0;
18035 }
18036 diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
18037 -index f173952..83d6ec0 100644
18038 +index 598dcc1..032dd4f 100644
18039 --- a/drivers/usb/gadget/u_serial.c
18040 +++ b/drivers/usb/gadget/u_serial.c
18041 @@ -735,9 +735,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
18042 @@ -40985,7 +41881,7 @@ index f173952..83d6ec0 100644
18043
18044 gser = port->port_usb;
18045 if (gser && gser->disconnect)
18046 -@@ -1157,7 +1157,7 @@ static int gs_closed(struct gs_port *port)
18047 +@@ -1159,7 +1159,7 @@ static int gs_closed(struct gs_port *port)
18048 int cond;
18049
18050 spin_lock_irq(&port->port_lock);
18051 @@ -40994,7 +41890,7 @@ index f173952..83d6ec0 100644
18052 spin_unlock_irq(&port->port_lock);
18053 return cond;
18054 }
18055 -@@ -1270,7 +1270,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
18056 +@@ -1273,7 +1273,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
18057 /* if it's already open, start I/O ... and notify the serial
18058 * protocol about open/close status (connect/disconnect).
18059 */
18060 @@ -41003,7 +41899,7 @@ index f173952..83d6ec0 100644
18061 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
18062 gs_start_io(port);
18063 if (gser->connect)
18064 -@@ -1317,7 +1317,7 @@ void gserial_disconnect(struct gserial *gser)
18065 +@@ -1320,7 +1320,7 @@ void gserial_disconnect(struct gserial *gser)
18066
18067 port->port_usb = NULL;
18068 gser->ioport = NULL;
18069 @@ -41012,7 +41908,7 @@ index f173952..83d6ec0 100644
18070 wake_up_interruptible(&port->drain_wait);
18071 if (port->port.tty)
18072 tty_hangup(port->port.tty);
18073 -@@ -1333,7 +1333,7 @@ void gserial_disconnect(struct gserial *gser)
18074 +@@ -1336,7 +1336,7 @@ void gserial_disconnect(struct gserial *gser)
18075
18076 /* finally, free any unused/unusable I/O buffers */
18077 spin_lock_irqsave(&port->port_lock, flags);
18078 @@ -41087,29 +41983,16 @@ index 57c01ab..8a05959 100644
18079 }
18080
18081 /*
18082 -diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
18083 -index dedaf81..b0f11ab 100644
18084 ---- a/drivers/vhost/vhost.c
18085 -+++ b/drivers/vhost/vhost.c
18086 -@@ -634,7 +634,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
18087 - return 0;
18088 - }
18089 -
18090 --static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
18091 -+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
18092 - {
18093 - struct file *eventfp, *filep = NULL;
18094 - bool pollstart = false, pollstop = false;
18095 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
18096 -index 0fefa84..7a9d581 100644
18097 +index 8c55011..eed4ae1a 100644
18098 --- a/drivers/video/aty/aty128fb.c
18099 +++ b/drivers/video/aty/aty128fb.c
18100 @@ -149,7 +149,7 @@ enum {
18101 };
18102
18103 /* Must match above enum */
18104 --static char * const r128_family[] __devinitconst = {
18105 -+static const char * const r128_family[] __devinitconst = {
18106 +-static char * const r128_family[] = {
18107 ++static const char * const r128_family[] = {
18108 "AGP",
18109 "PCI",
18110 "PRO AGP",
18111 @@ -43987,7 +44870,7 @@ index 86d449e..af6a7f7 100644
18112 return count;
18113 }
18114 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
18115 -index 2f8f82d..191de37 100644
18116 +index b75db01..5631c6d 100644
18117 --- a/drivers/video/uvesafb.c
18118 +++ b/drivers/video/uvesafb.c
18119 @@ -19,6 +19,7 @@
18120 @@ -43998,7 +44881,7 @@ index 2f8f82d..191de37 100644
18121 #include <video/edid.h>
18122 #include <video/uvesafb.h>
18123 #ifdef CONFIG_X86
18124 -@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
18125 +@@ -569,10 +570,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
18126 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
18127 par->pmi_setpal = par->ypan = 0;
18128 } else {
18129 @@ -44031,7 +44914,7 @@ index 2f8f82d..191de37 100644
18130 printk(KERN_INFO "uvesafb: protected mode interface info at "
18131 "%04x:%04x\n",
18132 (u16)task->t.regs.es, (u16)task->t.regs.edi);
18133 -@@ -818,13 +841,14 @@ static int __devinit uvesafb_vbe_init(struct fb_info *info)
18134 +@@ -817,13 +840,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
18135 par->ypan = ypan;
18136
18137 if (par->pmi_setpal || par->ypan) {
18138 @@ -44048,7 +44931,7 @@ index 2f8f82d..191de37 100644
18139 }
18140 #else
18141 /* The protected mode interface is not available on non-x86. */
18142 -@@ -1838,6 +1862,11 @@ out:
18143 +@@ -1836,6 +1860,11 @@ out:
18144 if (par->vbe_modes)
18145 kfree(par->vbe_modes);
18146
18147 @@ -44060,7 +44943,7 @@ index 2f8f82d..191de37 100644
18148 framebuffer_release(info);
18149 return err;
18150 }
18151 -@@ -1864,6 +1893,12 @@ static int uvesafb_remove(struct platform_device *dev)
18152 +@@ -1862,6 +1891,12 @@ static int uvesafb_remove(struct platform_device *dev)
18153 kfree(par->vbe_state_orig);
18154 if (par->vbe_state_saved)
18155 kfree(par->vbe_state_saved);
18156 @@ -44192,19 +45075,6 @@ index 88714ae..16c2e11 100644
18157
18158
18159 static inline u32 get_pll_internal_frequency(u32 ref_freq,
18160 -diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
18161 -index 6b1b7e1..b2fa4d5 100644
18162 ---- a/drivers/virtio/virtio_mmio.c
18163 -+++ b/drivers/virtio/virtio_mmio.c
18164 -@@ -530,7 +530,7 @@ static int vm_cmdline_set(const char *device,
18165 -
18166 - resources[0].end = memparse(device, &str) - 1;
18167 -
18168 -- processed = sscanf(str, "@%lli:%u%n:%d%n",
18169 -+ processed = sscanf(str, "@%lli:%llu%n:%d%n",
18170 - &base, &resources[1].start, &consumed,
18171 - &vm_cmdline_id, &consumed);
18172 -
18173 diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
18174 index fef20db..d28b1ab 100644
18175 --- a/drivers/xen/xenfs/xenstored.c
18176 @@ -44295,10 +45165,10 @@ index 71f613c..9d01f1f 100644
18177 kiocb->ki_cur_seg = 0;
18178 /* ki_nbytes/left now reflect bytes instead of segs */
18179 diff --git a/fs/attr.c b/fs/attr.c
18180 -index cce7df5..eaa2731 100644
18181 +index 1449adb..a2038c2 100644
18182 --- a/fs/attr.c
18183 +++ b/fs/attr.c
18184 -@@ -100,6 +100,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
18185 +@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
18186 unsigned long limit;
18187
18188 limit = rlimit(RLIMIT_FSIZE);
18189 @@ -44307,7 +45177,7 @@ index cce7df5..eaa2731 100644
18190 goto out_sig;
18191 if (offset > inode->i_sb->s_maxbytes)
18192 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
18193 -index dce436e..55e670d 100644
18194 +index 03bc1d3..6205356 100644
18195 --- a/fs/autofs4/waitq.c
18196 +++ b/fs/autofs4/waitq.c
18197 @@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
18198 @@ -44319,7 +45189,7 @@ index dce436e..55e670d 100644
18199 ssize_t wr = 0;
18200
18201 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
18202 -@@ -347,6 +347,10 @@ static int validate_request(struct autofs_wait_queue **wait,
18203 +@@ -348,6 +348,10 @@ static int validate_request(struct autofs_wait_queue **wait,
18204 return 1;
18205 }
18206
18207 @@ -44330,7 +45200,7 @@ index dce436e..55e670d 100644
18208 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
18209 enum autofs_notify notify)
18210 {
18211 -@@ -380,7 +384,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
18212 +@@ -381,7 +385,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
18213
18214 /* If this is a direct mount request create a dummy name */
18215 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
18216 @@ -44357,7 +45227,7 @@ index 2b3bda8..6a2d4be 100644
18217 kfree(link);
18218 }
18219 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
18220 -index 0e7a6f8..332b1ca 100644
18221 +index 6043567..16a9239 100644
18222 --- a/fs/binfmt_aout.c
18223 +++ b/fs/binfmt_aout.c
18224 @@ -16,6 +16,7 @@
18225 @@ -44390,7 +45260,7 @@ index 0e7a6f8..332b1ca 100644
18226 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
18227 dump.u_ssize = 0;
18228
18229 -@@ -233,6 +238,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
18230 +@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
18231 rlim = rlimit(RLIMIT_DATA);
18232 if (rlim >= RLIM_INFINITY)
18233 rlim = ~0;
18234 @@ -44399,7 +45269,7 @@ index 0e7a6f8..332b1ca 100644
18235 if (ex.a_data + ex.a_bss > rlim)
18236 return -ENOMEM;
18237
18238 -@@ -267,6 +274,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
18239 +@@ -268,6 +275,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
18240
18241 install_exec_creds(bprm);
18242
18243 @@ -44427,7 +45297,7 @@ index 0e7a6f8..332b1ca 100644
18244 if (N_MAGIC(ex) == OMAGIC) {
18245 unsigned long text_addr, map_size;
18246 loff_t pos;
18247 -@@ -332,7 +360,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
18248 +@@ -333,7 +361,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
18249 }
18250
18251 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
18252 @@ -44437,7 +45307,7 @@ index 0e7a6f8..332b1ca 100644
18253 fd_offset + ex.a_text);
18254 if (error != N_DATADDR(ex)) {
18255 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
18256 -index fbd9f60..0b845dd 100644
18257 +index 0c42cdb..f4be023 100644
18258 --- a/fs/binfmt_elf.c
18259 +++ b/fs/binfmt_elf.c
18260 @@ -33,6 +33,7 @@
18261 @@ -44911,7 +45781,7 @@ index fbd9f60..0b845dd 100644
18262 if ((current->flags & PF_RANDOMIZE) &&
18263 !(current->personality & ADDR_NO_RANDOMIZE)) {
18264 random_variable = get_random_int() & STACK_RND_MASK;
18265 -@@ -564,7 +899,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
18266 +@@ -564,7 +899,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
18267 unsigned long load_addr = 0, load_bias = 0;
18268 int load_addr_set = 0;
18269 char * elf_interpreter = NULL;
18270 @@ -44920,11 +45790,12 @@ index fbd9f60..0b845dd 100644
18271 struct elf_phdr *elf_ppnt, *elf_phdata;
18272 unsigned long elf_bss, elf_brk;
18273 int retval, i;
18274 -@@ -574,11 +909,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
18275 +@@ -574,12 +909,12 @@ static int load_elf_binary(struct linux_binprm *bprm)
18276 unsigned long start_code, end_code, start_data, end_data;
18277 unsigned long reloc_func_desc __maybe_unused = 0;
18278 int executable_stack = EXSTACK_DEFAULT;
18279 - unsigned long def_flags = 0;
18280 + struct pt_regs *regs = current_pt_regs();
18281 struct {
18282 struct elfhdr elf_ex;
18283 struct elfhdr interp_elf_ex;
18284 @@ -44933,7 +45804,7 @@ index fbd9f60..0b845dd 100644
18285
18286 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
18287 if (!loc) {
18288 -@@ -714,11 +1049,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
18289 +@@ -715,11 +1050,81 @@ static int load_elf_binary(struct linux_binprm *bprm)
18290 goto out_free_dentry;
18291
18292 /* OK, This is the point of no return */
18293 @@ -45016,7 +45887,7 @@ index fbd9f60..0b845dd 100644
18294 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
18295 current->personality |= READ_IMPLIES_EXEC;
18296
18297 -@@ -809,6 +1214,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
18298 +@@ -810,6 +1215,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
18299 #else
18300 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
18301 #endif
18302 @@ -45037,7 +45908,7 @@ index fbd9f60..0b845dd 100644
18303 }
18304
18305 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
18306 -@@ -841,9 +1260,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
18307 +@@ -842,9 +1261,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
18308 * allowed task size. Note that p_filesz must always be
18309 * <= p_memsz so it is only necessary to check p_memsz.
18310 */
18311 @@ -45050,7 +45921,7 @@ index fbd9f60..0b845dd 100644
18312 /* set_brk can never work. Avoid overflows. */
18313 send_sig(SIGKILL, current, 0);
18314 retval = -EINVAL;
18315 -@@ -882,17 +1301,44 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
18316 +@@ -883,17 +1302,44 @@ static int load_elf_binary(struct linux_binprm *bprm)
18317 goto out_free_dentry;
18318 }
18319 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
18320 @@ -45101,7 +45972,7 @@ index fbd9f60..0b845dd 100644
18321 load_bias);
18322 if (!IS_ERR((void *)elf_entry)) {
18323 /*
18324 -@@ -1114,7 +1560,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
18325 +@@ -1115,7 +1561,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
18326 * Decide what to dump of a segment, part, all or none.
18327 */
18328 static unsigned long vma_dump_size(struct vm_area_struct *vma,
18329 @@ -45110,7 +45981,7 @@ index fbd9f60..0b845dd 100644
18330 {
18331 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
18332
18333 -@@ -1151,7 +1597,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
18334 +@@ -1152,7 +1598,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
18335 if (vma->vm_file == NULL)
18336 return 0;
18337
18338 @@ -45119,7 +45990,7 @@ index fbd9f60..0b845dd 100644
18339 goto whole;
18340
18341 /*
18342 -@@ -1373,9 +1819,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
18343 +@@ -1374,9 +1820,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
18344 {
18345 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
18346 int i = 0;
18347 @@ -45131,7 +46002,7 @@ index fbd9f60..0b845dd 100644
18348 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
18349 }
18350
18351 -@@ -2003,14 +2449,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
18352 +@@ -2006,14 +2452,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
18353 }
18354
18355 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
18356 @@ -45148,7 +46019,7 @@ index fbd9f60..0b845dd 100644
18357 return size;
18358 }
18359
18360 -@@ -2104,7 +2550,7 @@ static int elf_core_dump(struct coredump_params *cprm)
18361 +@@ -2107,7 +2553,7 @@ static int elf_core_dump(struct coredump_params *cprm)
18362
18363 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
18364
18365 @@ -45157,7 +46028,7 @@ index fbd9f60..0b845dd 100644
18366 offset += elf_core_extra_data_size();
18367 e_shoff = offset;
18368
18369 -@@ -2118,10 +2564,12 @@ static int elf_core_dump(struct coredump_params *cprm)
18370 +@@ -2121,10 +2567,12 @@ static int elf_core_dump(struct coredump_params *cprm)
18371 offset = dataoff;
18372
18373 size += sizeof(*elf);
18374 @@ -45170,7 +46041,7 @@ index fbd9f60..0b845dd 100644
18375 if (size > cprm->limit
18376 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
18377 goto end_coredump;
18378 -@@ -2135,7 +2583,7 @@ static int elf_core_dump(struct coredump_params *cprm)
18379 +@@ -2138,7 +2586,7 @@ static int elf_core_dump(struct coredump_params *cprm)
18380 phdr.p_offset = offset;
18381 phdr.p_vaddr = vma->vm_start;
18382 phdr.p_paddr = 0;
18383 @@ -45179,7 +46050,7 @@ index fbd9f60..0b845dd 100644
18384 phdr.p_memsz = vma->vm_end - vma->vm_start;
18385 offset += phdr.p_filesz;
18386 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
18387 -@@ -2146,6 +2594,7 @@ static int elf_core_dump(struct coredump_params *cprm)
18388 +@@ -2149,6 +2597,7 @@ static int elf_core_dump(struct coredump_params *cprm)
18389 phdr.p_align = ELF_EXEC_PAGESIZE;
18390
18391 size += sizeof(phdr);
18392 @@ -45187,7 +46058,7 @@ index fbd9f60..0b845dd 100644
18393 if (size > cprm->limit
18394 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
18395 goto end_coredump;
18396 -@@ -2170,7 +2619,7 @@ static int elf_core_dump(struct coredump_params *cprm)
18397 +@@ -2173,7 +2622,7 @@ static int elf_core_dump(struct coredump_params *cprm)
18398 unsigned long addr;
18399 unsigned long end;
18400
18401 @@ -45196,7 +46067,7 @@ index fbd9f60..0b845dd 100644
18402
18403 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
18404 struct page *page;
18405 -@@ -2179,6 +2628,7 @@ static int elf_core_dump(struct coredump_params *cprm)
18406 +@@ -2182,6 +2631,7 @@ static int elf_core_dump(struct coredump_params *cprm)
18407 page = get_dump_page(addr);
18408 if (page) {
18409 void *kaddr = kmap(page);
18410 @@ -45204,7 +46075,7 @@ index fbd9f60..0b845dd 100644
18411 stop = ((size += PAGE_SIZE) > cprm->limit) ||
18412 !dump_write(cprm->file, kaddr,
18413 PAGE_SIZE);
18414 -@@ -2196,6 +2646,7 @@ static int elf_core_dump(struct coredump_params *cprm)
18415 +@@ -2199,6 +2649,7 @@ static int elf_core_dump(struct coredump_params *cprm)
18416
18417 if (e_phnum == PN_XNUM) {
18418 size += sizeof(*shdr4extnum);
18419 @@ -45212,7 +46083,7 @@ index fbd9f60..0b845dd 100644
18420 if (size > cprm->limit
18421 || !dump_write(cprm->file, shdr4extnum,
18422 sizeof(*shdr4extnum)))
18423 -@@ -2216,6 +2667,97 @@ out:
18424 +@@ -2219,6 +2670,97 @@ out:
18425
18426 #endif /* CONFIG_ELF_CORE */
18427
18428 @@ -45311,7 +46182,7 @@ index fbd9f60..0b845dd 100644
18429 {
18430 register_binfmt(&elf_format);
18431 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
18432 -index e280352..7b2f231 100644
18433 +index b563719..3868998 100644
18434 --- a/fs/binfmt_flat.c
18435 +++ b/fs/binfmt_flat.c
18436 @@ -562,7 +562,9 @@ static int load_flat_file(struct linux_binprm * bprm,
18437 @@ -45378,7 +46249,7 @@ index b96fc6c..431d628 100644
18438 __bio_for_each_segment(bvec, bio, i, 0) {
18439 char *addr = page_address(bvec->bv_page);
18440 diff --git a/fs/block_dev.c b/fs/block_dev.c
18441 -index ab3a456..7da538b 100644
18442 +index 172f849..6efbf24 100644
18443 --- a/fs/block_dev.c
18444 +++ b/fs/block_dev.c
18445 @@ -651,7 +651,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
18446 @@ -45391,10 +46262,10 @@ index ab3a456..7da538b 100644
18447 else if (whole->bd_holder != NULL)
18448 return false; /* is a partition of a held device */
18449 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
18450 -index cdfb4c4..da736d4 100644
18451 +index eea5da7..88fead70 100644
18452 --- a/fs/btrfs/ctree.c
18453 +++ b/fs/btrfs/ctree.c
18454 -@@ -1035,9 +1035,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
18455 +@@ -1033,9 +1033,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
18456 free_extent_buffer(buf);
18457 add_root_to_dirty_list(root);
18458 } else {
18459 @@ -45411,10 +46282,10 @@ index cdfb4c4..da736d4 100644
18460
18461 WARN_ON(trans->transid != btrfs_header_generation(parent));
18462 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
18463 -index 95542a1..95a8727 100644
18464 +index cc93b23..f3c42bf 100644
18465 --- a/fs/btrfs/inode.c
18466 +++ b/fs/btrfs/inode.c
18467 -@@ -7243,7 +7243,7 @@ fail:
18468 +@@ -7296,7 +7296,7 @@ fail:
18469 return -ENOMEM;
18470 }
18471
18472 @@ -45423,7 +46294,7 @@ index 95542a1..95a8727 100644
18473 struct dentry *dentry, struct kstat *stat)
18474 {
18475 struct inode *inode = dentry->d_inode;
18476 -@@ -7257,6 +7257,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
18477 +@@ -7310,6 +7310,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
18478 return 0;
18479 }
18480
18481 @@ -45439,10 +46310,10 @@ index 95542a1..95a8727 100644
18482 * If a file is moved, it will inherit the cow and compression flags of the new
18483 * directory.
18484 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
18485 -index 8fcf9a5..a200000 100644
18486 +index 338f259..b657640 100644
18487 --- a/fs/btrfs/ioctl.c
18488 +++ b/fs/btrfs/ioctl.c
18489 -@@ -2965,9 +2965,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
18490 +@@ -3033,9 +3033,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
18491 for (i = 0; i < num_types; i++) {
18492 struct btrfs_space_info *tmp;
18493
18494 @@ -45455,7 +46326,7 @@ index 8fcf9a5..a200000 100644
18495 info = NULL;
18496 rcu_read_lock();
18497 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
18498 -@@ -2989,10 +2992,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
18499 +@@ -3057,10 +3060,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
18500 memcpy(dest, &space, sizeof(space));
18501 dest++;
18502 space_args.total_spaces++;
18503 @@ -45467,7 +46338,7 @@ index 8fcf9a5..a200000 100644
18504 up_read(&info->groups_sem);
18505 }
18506 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
18507 -index 776f0aa..3aad281 100644
18508 +index 300e09a..9fe4539 100644
18509 --- a/fs/btrfs/relocation.c
18510 +++ b/fs/btrfs/relocation.c
18511 @@ -1269,7 +1269,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
18512 @@ -45479,6 +46350,19 @@ index 776f0aa..3aad281 100644
18513
18514 if (!del) {
18515 spin_lock(&rc->reloc_root_tree.lock);
18516 +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
18517 +index d8982e9..29a85fa 100644
18518 +--- a/fs/btrfs/super.c
18519 ++++ b/fs/btrfs/super.c
18520 +@@ -267,7 +267,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
18521 + function, line, errstr);
18522 + return;
18523 + }
18524 +- ACCESS_ONCE(trans->transaction->aborted) = errno;
18525 ++ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
18526 + __btrfs_std_error(root->fs_info, function, line, errno, NULL);
18527 + }
18528 + /*
18529 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
18530 index 622f469..e8d2d55 100644
18531 --- a/fs/cachefiles/bind.c
18532 @@ -45540,10 +46424,10 @@ index 0a1467b..6a53245 100644
18533
18534 cache->bstop_percent = bstop;
18535 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
18536 -index bd6bc1b..b627b53 100644
18537 +index 4938251..7e01445 100644
18538 --- a/fs/cachefiles/internal.h
18539 +++ b/fs/cachefiles/internal.h
18540 -@@ -57,7 +57,7 @@ struct cachefiles_cache {
18541 +@@ -59,7 +59,7 @@ struct cachefiles_cache {
18542 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
18543 struct rb_root active_nodes; /* active nodes (can't be culled) */
18544 rwlock_t active_lock; /* lock for active_nodes */
18545 @@ -45552,7 +46436,7 @@ index bd6bc1b..b627b53 100644
18546 unsigned frun_percent; /* when to stop culling (% files) */
18547 unsigned fcull_percent; /* when to start culling (% files) */
18548 unsigned fstop_percent; /* when to stop allocating (% files) */
18549 -@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
18550 +@@ -171,19 +171,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
18551 * proc.c
18552 */
18553 #ifdef CONFIG_CACHEFILES_HISTOGRAM
18554 @@ -45578,10 +46462,10 @@ index bd6bc1b..b627b53 100644
18555
18556 #else
18557 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
18558 -index b0b5f7c..039bb26 100644
18559 +index 8c01c5fc..15f982e 100644
18560 --- a/fs/cachefiles/namei.c
18561 +++ b/fs/cachefiles/namei.c
18562 -@@ -318,7 +318,7 @@ try_again:
18563 +@@ -317,7 +317,7 @@ try_again:
18564 /* first step is to make up a grave dentry in the graveyard */
18565 sprintf(nbuffer, "%08x%08x",
18566 (uint32_t) get_seconds(),
18567 @@ -45621,10 +46505,10 @@ index eccd339..4c1d995 100644
18568 return 0;
18569
18570 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
18571 -index c994691..2a1537f 100644
18572 +index 4809922..aab2c39 100644
18573 --- a/fs/cachefiles/rdwr.c
18574 +++ b/fs/cachefiles/rdwr.c
18575 -@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
18576 +@@ -965,7 +965,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
18577 old_fs = get_fs();
18578 set_fs(KERNEL_DS);
18579 ret = file->f_op->write(
18580 @@ -45634,7 +46518,7 @@ index c994691..2a1537f 100644
18581 kunmap(page);
18582 if (ret != len)
18583 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
18584 -index e5b7731..b9c59fb 100644
18585 +index 8c1aabe..bbf856a 100644
18586 --- a/fs/ceph/dir.c
18587 +++ b/fs/ceph/dir.c
18588 @@ -243,7 +243,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
18589 @@ -45691,10 +46575,10 @@ index d9ea6ed..1e6c8ac 100644
18590 server->ops->print_stats(m, tcon);
18591 }
18592 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
18593 -index e7931cc..76a1ab9 100644
18594 +index de7f916..6cb22a9 100644
18595 --- a/fs/cifs/cifsfs.c
18596 +++ b/fs/cifs/cifsfs.c
18597 -@@ -999,7 +999,7 @@ cifs_init_request_bufs(void)
18598 +@@ -997,7 +997,7 @@ cifs_init_request_bufs(void)
18599 /* cERROR(1, "CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize); */
18600 cifs_req_cachep = kmem_cache_create("cifs_request",
18601 CIFSMaxBufSize + max_hdr_size, 0,
18602 @@ -45703,7 +46587,7 @@ index e7931cc..76a1ab9 100644
18603 if (cifs_req_cachep == NULL)
18604 return -ENOMEM;
18605
18606 -@@ -1026,7 +1026,7 @@ cifs_init_request_bufs(void)
18607 +@@ -1024,7 +1024,7 @@ cifs_init_request_bufs(void)
18608 efficient to alloc 1 per page off the slab compared to 17K (5page)
18609 alloc of large cifs buffers even when page debugging is on */
18610 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
18611 @@ -45712,7 +46596,7 @@ index e7931cc..76a1ab9 100644
18612 NULL);
18613 if (cifs_sm_req_cachep == NULL) {
18614 mempool_destroy(cifs_req_poolp);
18615 -@@ -1111,8 +1111,8 @@ init_cifs(void)
18616 +@@ -1109,8 +1109,8 @@ init_cifs(void)
18617 atomic_set(&bufAllocCount, 0);
18618 atomic_set(&smBufAllocCount, 0);
18619 #ifdef CONFIG_CIFS_STATS2
18620 @@ -45724,7 +46608,7 @@ index e7931cc..76a1ab9 100644
18621
18622 atomic_set(&midCount, 0);
18623 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
18624 -index f5af252..489b5f2 100644
18625 +index e6899ce..d6b2920 100644
18626 --- a/fs/cifs/cifsglob.h
18627 +++ b/fs/cifs/cifsglob.h
18628 @@ -751,35 +751,35 @@ struct cifs_tcon {
18629 @@ -45787,7 +46671,7 @@ index f5af252..489b5f2 100644
18630 } smb2_stats;
18631 #endif /* CONFIG_CIFS_SMB2 */
18632 } stats;
18633 -@@ -1094,7 +1094,7 @@ build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
18634 +@@ -1080,7 +1080,7 @@ convert_delimiter(char *path, char delim)
18635 }
18636
18637 #ifdef CONFIG_CIFS_STATS
18638 @@ -45796,7 +46680,7 @@ index f5af252..489b5f2 100644
18639
18640 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
18641 unsigned int bytes)
18642 -@@ -1459,8 +1459,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
18643 +@@ -1445,8 +1445,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
18644 /* Various Debug counters */
18645 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
18646 #ifdef CONFIG_CIFS_STATS2
18647 @@ -45843,10 +46727,10 @@ index 3a00c0d..42d901c 100644
18648
18649 }
18650 diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
18651 -index 591bf19..690d600 100644
18652 +index 47bc5a8..10decbe 100644
18653 --- a/fs/cifs/smb1ops.c
18654 +++ b/fs/cifs/smb1ops.c
18655 -@@ -617,27 +617,27 @@ static void
18656 +@@ -586,27 +586,27 @@ static void
18657 cifs_clear_stats(struct cifs_tcon *tcon)
18658 {
18659 #ifdef CONFIG_CIFS_STATS
18660 @@ -45895,7 +46779,7 @@ index 591bf19..690d600 100644
18661 #endif
18662 }
18663
18664 -@@ -646,36 +646,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
18665 +@@ -615,36 +615,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
18666 {
18667 #ifdef CONFIG_CIFS_STATS
18668 seq_printf(m, " Oplocks breaks: %d",
18669 @@ -45952,10 +46836,10 @@ index 591bf19..690d600 100644
18670 }
18671
18672 diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
18673 -index 4d9dbe0..0af4601 100644
18674 +index c9c7aa7..065056a 100644
18675 --- a/fs/cifs/smb2ops.c
18676 +++ b/fs/cifs/smb2ops.c
18677 -@@ -291,8 +291,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
18678 +@@ -274,8 +274,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
18679 #ifdef CONFIG_CIFS_STATS
18680 int i;
18681 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
18682 @@ -45966,7 +46850,7 @@ index 4d9dbe0..0af4601 100644
18683 }
18684 #endif
18685 }
18686 -@@ -301,66 +301,66 @@ static void
18687 +@@ -284,66 +284,66 @@ static void
18688 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
18689 {
18690 #ifdef CONFIG_CIFS_STATS
18691 @@ -46073,6 +46957,20 @@ index 4d9dbe0..0af4601 100644
18692 #endif
18693 }
18694
18695 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
18696 +index 41d9d07..dbb4772 100644
18697 +--- a/fs/cifs/smb2pdu.c
18698 ++++ b/fs/cifs/smb2pdu.c
18699 +@@ -1761,8 +1761,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
18700 + default:
18701 + cERROR(1, "info level %u isn't supported",
18702 + srch_inf->info_level);
18703 +- rc = -EINVAL;
18704 +- goto qdir_exit;
18705 ++ return -EINVAL;
18706 + }
18707 +
18708 + req->FileIndex = cpu_to_le32(index);
18709 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
18710 index 958ae0e..505c9d0 100644
18711 --- a/fs/coda/cache.c
18712 @@ -46251,7 +47149,7 @@ index a81147e..20bf2b5 100644
18713
18714 /*
18715 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
18716 -index 4c6285f..b7a2411 100644
18717 +index e2f57a0..3c78771 100644
18718 --- a/fs/compat_ioctl.c
18719 +++ b/fs/compat_ioctl.c
18720 @@ -623,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
18721 @@ -46272,7 +47170,7 @@ index 4c6285f..b7a2411 100644
18722 return -EFAULT;
18723
18724 return ioctl_preallocate(file, p);
18725 -@@ -1617,8 +1617,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
18726 +@@ -1620,8 +1620,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
18727 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
18728 {
18729 unsigned int a, b;
18730 @@ -46284,7 +47182,7 @@ index 4c6285f..b7a2411 100644
18731 return 1;
18732 if (a < b)
18733 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
18734 -index 7414ae2..d98ad6d 100644
18735 +index 712b10f..6b54d7b 100644
18736 --- a/fs/configfs/dir.c
18737 +++ b/fs/configfs/dir.c
18738 @@ -1564,7 +1564,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
18739 @@ -46312,7 +47210,7 @@ index 7414ae2..d98ad6d 100644
18740 /*
18741 * We'll have a dentry and an inode for
18742 diff --git a/fs/coredump.c b/fs/coredump.c
18743 -index ce47379..68c8e43 100644
18744 +index 1774932..5812106 100644
18745 --- a/fs/coredump.c
18746 +++ b/fs/coredump.c
18747 @@ -52,7 +52,7 @@ struct core_name {
18748 @@ -46365,7 +47263,7 @@ index ce47379..68c8e43 100644
18749 pipe_unlock(pipe);
18750
18751 }
18752 -@@ -471,7 +471,8 @@ void do_coredump(siginfo_t *siginfo, struct pt_regs *regs)
18753 +@@ -471,7 +471,8 @@ void do_coredump(siginfo_t *siginfo)
18754 int ispipe;
18755 struct files_struct *displaced;
18756 bool need_nonrelative = false;
18757 @@ -46374,8 +47272,8 @@ index ce47379..68c8e43 100644
18758 + long signr = siginfo->si_signo;
18759 struct coredump_params cprm = {
18760 .siginfo = siginfo,
18761 - .regs = regs,
18762 -@@ -484,7 +485,10 @@ void do_coredump(siginfo_t *siginfo, struct pt_regs *regs)
18763 + .regs = signal_pt_regs(),
18764 +@@ -484,7 +485,10 @@ void do_coredump(siginfo_t *siginfo)
18765 .mm_flags = mm->flags,
18766 };
18767
18768 @@ -46387,7 +47285,7 @@ index ce47379..68c8e43 100644
18769
18770 binfmt = mm->binfmt;
18771 if (!binfmt || !binfmt->core_dump)
18772 -@@ -508,7 +512,7 @@ void do_coredump(siginfo_t *siginfo, struct pt_regs *regs)
18773 +@@ -508,7 +512,7 @@ void do_coredump(siginfo_t *siginfo)
18774 need_nonrelative = true;
18775 }
18776
18777 @@ -46396,7 +47294,7 @@ index ce47379..68c8e43 100644
18778 if (retval < 0)
18779 goto fail_creds;
18780
18781 -@@ -556,7 +560,7 @@ void do_coredump(siginfo_t *siginfo, struct pt_regs *regs)
18782 +@@ -556,7 +560,7 @@ void do_coredump(siginfo_t *siginfo)
18783 }
18784 cprm.limit = RLIM_INFINITY;
18785
18786 @@ -46405,7 +47303,7 @@ index ce47379..68c8e43 100644
18787 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
18788 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
18789 task_tgid_vnr(current), current->comm);
18790 -@@ -583,6 +587,8 @@ void do_coredump(siginfo_t *siginfo, struct pt_regs *regs)
18791 +@@ -583,6 +587,8 @@ void do_coredump(siginfo_t *siginfo)
18792 } else {
18793 struct inode *inode;
18794
18795 @@ -46433,10 +47331,10 @@ index ce47379..68c8e43 100644
18796 EXPORT_SYMBOL(dump_write);
18797
18798 diff --git a/fs/dcache.c b/fs/dcache.c
18799 -index 0d0adb6..f4646e9 100644
18800 +index 19153a0..428c2f5 100644
18801 --- a/fs/dcache.c
18802 +++ b/fs/dcache.c
18803 -@@ -3164,7 +3164,7 @@ void __init vfs_caches_init(unsigned long mempages)
18804 +@@ -3133,7 +3133,7 @@ void __init vfs_caches_init(unsigned long mempages)
18805 mempages -= reserve;
18806
18807 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
18808 @@ -46446,10 +47344,10 @@ index 0d0adb6..f4646e9 100644
18809 dcache_init();
18810 inode_init();
18811 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
18812 -index b607d92..41fda09 100644
18813 +index a5f12b7..4ee8a6f 100644
18814 --- a/fs/debugfs/inode.c
18815 +++ b/fs/debugfs/inode.c
18816 -@@ -416,7 +416,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
18817 +@@ -415,7 +415,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
18818 */
18819 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
18820 {
18821 @@ -46519,7 +47417,7 @@ index b2a34a1..162fa69 100644
18822 return rc;
18823 }
18824 diff --git a/fs/exec.c b/fs/exec.c
18825 -index c6e6de4..45e71ad 100644
18826 +index 20df02c..1dff97d 100644
18827 --- a/fs/exec.c
18828 +++ b/fs/exec.c
18829 @@ -55,6 +55,17 @@
18830 @@ -46662,7 +47560,7 @@ index c6e6de4..45e71ad 100644
18831
18832 return native;
18833 }
18834 -@@ -431,11 +456,12 @@ static int count(struct user_arg_ptr argv, int max)
18835 +@@ -431,7 +456,7 @@ static int count(struct user_arg_ptr argv, int max)
18836 if (!p)
18837 break;
18838
18839 @@ -46670,14 +47568,8 @@ index c6e6de4..45e71ad 100644
18840 + if (IS_ERR((const char __force_kernel *)p))
18841 return -EFAULT;
18842
18843 -- if (i++ >= max)
18844 -+ if (i >= max)
18845 - return -E2BIG;
18846 -+ ++i;
18847 -
18848 - if (fatal_signal_pending(current))
18849 - return -ERESTARTNOHAND;
18850 -@@ -465,7 +491,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
18851 + if (i >= max)
18852 +@@ -466,7 +491,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
18853
18854 ret = -EFAULT;
18855 str = get_user_arg_ptr(argv, argc);
18856 @@ -46686,7 +47578,7 @@ index c6e6de4..45e71ad 100644
18857 goto out;
18858
18859 len = strnlen_user(str, MAX_ARG_STRLEN);
18860 -@@ -547,7 +573,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
18861 +@@ -548,7 +573,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
18862 int r;
18863 mm_segment_t oldfs = get_fs();
18864 struct user_arg_ptr argv = {
18865 @@ -46695,7 +47587,7 @@ index c6e6de4..45e71ad 100644
18866 };
18867
18868 set_fs(KERNEL_DS);
18869 -@@ -582,7 +608,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
18870 +@@ -583,7 +608,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
18871 unsigned long new_end = old_end - shift;
18872 struct mmu_gather tlb;
18873
18874 @@ -46705,7 +47597,7 @@ index c6e6de4..45e71ad 100644
18875
18876 /*
18877 * ensure there are no vmas between where we want to go
18878 -@@ -591,6 +618,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
18879 +@@ -592,6 +618,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
18880 if (vma != find_vma(mm, new_start))
18881 return -EFAULT;
18882
18883 @@ -46716,7 +47608,7 @@ index c6e6de4..45e71ad 100644
18884 /*
18885 * cover the whole range: [new_start, old_end)
18886 */
18887 -@@ -671,10 +702,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
18888 +@@ -672,10 +702,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
18889 stack_top = arch_align_stack(stack_top);
18890 stack_top = PAGE_ALIGN(stack_top);
18891
18892 @@ -46727,7 +47619,7 @@ index c6e6de4..45e71ad 100644
18893 stack_shift = vma->vm_end - stack_top;
18894
18895 bprm->p -= stack_shift;
18896 -@@ -686,8 +713,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
18897 +@@ -687,8 +713,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
18898 bprm->exec -= stack_shift;
18899
18900 down_write(&mm->mmap_sem);
18901 @@ -46756,7 +47648,7 @@ index c6e6de4..45e71ad 100644
18902 /*
18903 * Adjust stack execute permissions; explicitly enable for
18904 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
18905 -@@ -706,13 +753,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
18906 +@@ -707,13 +753,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
18907 goto out_unlock;
18908 BUG_ON(prev != vma);
18909
18910 @@ -46770,7 +47662,7 @@ index c6e6de4..45e71ad 100644
18911 /* mprotect_fixup is overkill to remove the temporary stack flags */
18912 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
18913
18914 -@@ -736,6 +776,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
18915 +@@ -737,6 +776,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
18916 #endif
18917 current->mm->start_stack = bprm->p;
18918 ret = expand_stack(vma, stack_base);
18919 @@ -46798,7 +47690,7 @@ index c6e6de4..45e71ad 100644
18920 if (ret)
18921 ret = -EFAULT;
18922
18923 -@@ -771,6 +832,8 @@ struct file *open_exec(const char *name)
18924 +@@ -772,6 +832,8 @@ struct file *open_exec(const char *name)
18925
18926 fsnotify_open(file);
18927
18928 @@ -46807,7 +47699,7 @@ index c6e6de4..45e71ad 100644
18929 err = deny_write_access(file);
18930 if (err)
18931 goto exit;
18932 -@@ -794,7 +857,7 @@ int kernel_read(struct file *file, loff_t offset,
18933 +@@ -795,7 +857,7 @@ int kernel_read(struct file *file, loff_t offset,
18934 old_fs = get_fs();
18935 set_fs(get_ds());
18936 /* The cast to a user pointer is valid due to the set_fs() */
18937 @@ -46816,7 +47708,7 @@ index c6e6de4..45e71ad 100644
18938 set_fs(old_fs);
18939 return result;
18940 }
18941 -@@ -1246,7 +1309,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
18942 +@@ -1247,7 +1309,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
18943 }
18944 rcu_read_unlock();
18945
18946 @@ -46825,7 +47717,7 @@ index c6e6de4..45e71ad 100644
18947 bprm->unsafe |= LSM_UNSAFE_SHARE;
18948 } else {
18949 res = -EAGAIN;
18950 -@@ -1449,6 +1512,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
18951 +@@ -1447,6 +1509,28 @@ int search_binary_handler(struct linux_binprm *bprm)
18952
18953 EXPORT_SYMBOL(search_binary_handler);
18954
18955 @@ -46854,9 +47746,9 @@ index c6e6de4..45e71ad 100644
18956 /*
18957 * sys_execve() executes a new program.
18958 */
18959 -@@ -1457,6 +1542,11 @@ static int do_execve_common(const char *filename,
18960 - struct user_arg_ptr envp,
18961 - struct pt_regs *regs)
18962 +@@ -1454,6 +1538,11 @@ static int do_execve_common(const char *filename,
18963 + struct user_arg_ptr argv,
18964 + struct user_arg_ptr envp)
18965 {
18966 +#ifdef CONFIG_GRKERNSEC
18967 + struct file *old_exec_file;
18968 @@ -46866,7 +47758,7 @@ index c6e6de4..45e71ad 100644
18969 struct linux_binprm *bprm;
18970 struct file *file;
18971 struct files_struct *displaced;
18972 -@@ -1464,6 +1554,8 @@ static int do_execve_common(const char *filename,
18973 +@@ -1461,6 +1550,8 @@ static int do_execve_common(const char *filename,
18974 int retval;
18975 const struct cred *cred = current_cred();
18976
18977 @@ -46875,7 +47767,7 @@ index c6e6de4..45e71ad 100644
18978 /*
18979 * We move the actual failure in case of RLIMIT_NPROC excess from
18980 * set*uid() to execve() because too many poorly written programs
18981 -@@ -1504,12 +1596,27 @@ static int do_execve_common(const char *filename,
18982 +@@ -1501,12 +1592,27 @@ static int do_execve_common(const char *filename,
18983 if (IS_ERR(file))
18984 goto out_unmark;
18985
18986 @@ -46903,7 +47795,7 @@ index c6e6de4..45e71ad 100644
18987 retval = bprm_mm_init(bprm);
18988 if (retval)
18989 goto out_file;
18990 -@@ -1526,24 +1633,65 @@ static int do_execve_common(const char *filename,
18991 +@@ -1523,24 +1629,65 @@ static int do_execve_common(const char *filename,
18992 if (retval < 0)
18993 goto out;
18994
18995 @@ -46916,8 +47808,8 @@ index c6e6de4..45e71ad 100644
18996 +#endif
18997 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
18998 + /* limit suid stack to 8MB
18999 -+ we saved the old limits above and will restore them if this exec fails
19000 -+ */
19001 ++ * we saved the old limits above and will restore them if this exec fails
19002 ++ */
19003 + if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
19004 + (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
19005 + current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
19006 @@ -46958,7 +47850,7 @@ index c6e6de4..45e71ad 100644
19007 +
19008 + gr_handle_exec_args(bprm, argv);
19009
19010 - retval = search_binary_handler(bprm,regs);
19011 + retval = search_binary_handler(bprm);
19012 if (retval < 0)
19013 - goto out;
19014 + goto out_fail;
19015 @@ -46973,7 +47865,7 @@ index c6e6de4..45e71ad 100644
19016 current->fs->in_exec = 0;
19017 current->in_execve = 0;
19018 acct_update_integrals(current);
19019 -@@ -1552,6 +1700,14 @@ static int do_execve_common(const char *filename,
19020 +@@ -1549,6 +1696,14 @@ static int do_execve_common(const char *filename,
19021 put_files_struct(displaced);
19022 return retval;
19023
19024 @@ -46988,8 +47880,8 @@ index c6e6de4..45e71ad 100644
19025 out:
19026 if (bprm->mm) {
19027 acct_arg_size(bprm, 0);
19028 -@@ -1727,3 +1883,253 @@ int kernel_execve(const char *filename,
19029 - ret_from_kernel_execve(p);
19030 +@@ -1697,3 +1852,253 @@ asmlinkage long compat_sys_execve(const char __user * filename,
19031 + return error;
19032 }
19033 #endif
19034 +
19035 @@ -47001,7 +47893,7 @@ index c6e6de4..45e71ad 100644
19036 + if (*flags & MF_PAX_SEGMEXEC)
19037 + {
19038 + *flags &= ~MF_PAX_SEGMEXEC;
19039 -+ retval = -EINVAL;
19040 ++ retval = -EINVAL;
19041 + }
19042 +#endif
19043 +
19044 @@ -47026,7 +47918,7 @@ index c6e6de4..45e71ad 100644
19045 + )
19046 + {
19047 + *flags &= ~MF_PAX_MPROTECT;
19048 -+ retval = -EINVAL;
19049 ++ retval = -EINVAL;
19050 + }
19051 +
19052 + if ((*flags & MF_PAX_EMUTRAMP)
19053 @@ -47118,7 +48010,7 @@ index c6e6de4..45e71ad 100644
19054 + info.si_code = SI_KERNEL;
19055 + info.si_pid = 0;
19056 + info.si_uid = 0;
19057 -+ do_coredump(&info, regs);
19058 ++ do_coredump(&info);
19059 +}
19060 +#endif
19061 +
19062 @@ -47292,10 +48184,10 @@ index cf18217..8f6b9c3 100644
19063 if (free_clusters >= (nclusters + dirty_clusters))
19064 return 1;
19065 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
19066 -index 3c20de1..6ff2460 100644
19067 +index 8462eb3..4a71af6 100644
19068 --- a/fs/ext4/ext4.h
19069 +++ b/fs/ext4/ext4.h
19070 -@@ -1247,19 +1247,19 @@ struct ext4_sb_info {
19071 +@@ -1265,19 +1265,19 @@ struct ext4_sb_info {
19072 unsigned long s_mb_last_start;
19073
19074 /* stats for buddy allocator */
19075 @@ -47326,7 +48218,7 @@ index 3c20de1..6ff2460 100644
19076
19077 /* locality groups */
19078 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
19079 -index 526e553..3f2de85 100644
19080 +index 1bf6fe7..1a5bdef 100644
19081 --- a/fs/ext4/mballoc.c
19082 +++ b/fs/ext4/mballoc.c
19083 @@ -1747,7 +1747,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
19084 @@ -47383,7 +48275,7 @@ index 526e553..3f2de85 100644
19085 }
19086
19087 free_percpu(sbi->s_locality_groups);
19088 -@@ -3052,16 +3052,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
19089 +@@ -3060,16 +3060,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
19090 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
19091
19092 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
19093 @@ -47406,7 +48298,7 @@ index 526e553..3f2de85 100644
19094 }
19095
19096 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
19097 -@@ -3461,7 +3461,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
19098 +@@ -3469,7 +3469,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
19099 trace_ext4_mb_new_inode_pa(ac, pa);
19100
19101 ext4_mb_use_inode_pa(ac, pa);
19102 @@ -47415,7 +48307,7 @@ index 526e553..3f2de85 100644
19103
19104 ei = EXT4_I(ac->ac_inode);
19105 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
19106 -@@ -3521,7 +3521,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
19107 +@@ -3529,7 +3529,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
19108 trace_ext4_mb_new_group_pa(ac, pa);
19109
19110 ext4_mb_use_group_pa(ac, pa);
19111 @@ -47424,7 +48316,7 @@ index 526e553..3f2de85 100644
19112
19113 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
19114 lg = ac->ac_lg;
19115 -@@ -3610,7 +3610,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
19116 +@@ -3618,7 +3618,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
19117 * from the bitmap and continue.
19118 */
19119 }
19120 @@ -47433,7 +48325,7 @@ index 526e553..3f2de85 100644
19121
19122 return err;
19123 }
19124 -@@ -3628,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
19125 +@@ -3636,7 +3636,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
19126 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
19127 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
19128 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
19129 @@ -47442,18 +48334,6 @@ index 526e553..3f2de85 100644
19130 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
19131
19132 return 0;
19133 -diff --git a/fs/ext4/super.c b/fs/ext4/super.c
19134 -index d59b351..775f8c8 100644
19135 ---- a/fs/ext4/super.c
19136 -+++ b/fs/ext4/super.c
19137 -@@ -3212,7 +3212,6 @@ int ext4_calculate_overhead(struct super_block *sb)
19138 - ext4_fsblk_t overhead = 0;
19139 - char *buf = (char *) get_zeroed_page(GFP_KERNEL);
19140 -
19141 -- memset(buf, 0, PAGE_SIZE);
19142 - if (!buf)
19143 - return -ENOMEM;
19144 -
19145 diff --git a/fs/fcntl.c b/fs/fcntl.c
19146 index 71a600a..20d87b1 100644
19147 --- a/fs/fcntl.c
19148 @@ -47471,7 +48351,7 @@ index 71a600a..20d87b1 100644
19149 return 0;
19150 }
19151 diff --git a/fs/fhandle.c b/fs/fhandle.c
19152 -index f775bfd..629bd4c 100644
19153 +index 999ff5c..41f4109 100644
19154 --- a/fs/fhandle.c
19155 +++ b/fs/fhandle.c
19156 @@ -67,8 +67,7 @@ static long do_sys_name_to_handle(struct path *path,
19157 @@ -47559,7 +48439,7 @@ index cf6f434..3d7942c 100644
19158
19159 err_nocleanup:
19160 diff --git a/fs/file.c b/fs/file.c
19161 -index eff2316..8c8930c 100644
19162 +index 2b3570b..c57924b 100644
19163 --- a/fs/file.c
19164 +++ b/fs/file.c
19165 @@ -16,6 +16,7 @@
19166 @@ -47570,7 +48450,7 @@ index eff2316..8c8930c 100644
19167 #include <linux/fdtable.h>
19168 #include <linux/bitops.h>
19169 #include <linux/interrupt.h>
19170 -@@ -898,6 +899,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
19171 +@@ -892,6 +893,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
19172 if (!file)
19173 return __close_fd(files, fd);
19174
19175 @@ -47578,7 +48458,7 @@ index eff2316..8c8930c 100644
19176 if (fd >= rlimit(RLIMIT_NOFILE))
19177 return -EBADF;
19178
19179 -@@ -924,6 +926,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
19180 +@@ -918,6 +920,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
19181 if (unlikely(oldfd == newfd))
19182 return -EINVAL;
19183
19184 @@ -47586,7 +48466,7 @@ index eff2316..8c8930c 100644
19185 if (newfd >= rlimit(RLIMIT_NOFILE))
19186 return -EBADF;
19187
19188 -@@ -979,6 +982,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
19189 +@@ -973,6 +976,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
19190 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
19191 {
19192 int err;
19193 @@ -47612,7 +48492,7 @@ index da165f6..3671bdb 100644
19194
19195 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
19196 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
19197 -index 5df4775..9d9336f 100644
19198 +index fe6ca58..65318cf 100644
19199 --- a/fs/fs_struct.c
19200 +++ b/fs/fs_struct.c
19201 @@ -4,6 +4,7 @@
19202 @@ -47718,24 +48598,8 @@ index 5df4775..9d9336f 100644
19203 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
19204 .seq = SEQCNT_ZERO,
19205 .umask = 0022,
19206 -@@ -175,12 +197,13 @@ void daemonize_fs_struct(void)
19207 - task_lock(current);
19208 -
19209 - spin_lock(&init_fs.lock);
19210 -- init_fs.users++;
19211 -+ atomic_inc(&init_fs.users);
19212 - spin_unlock(&init_fs.lock);
19213 -
19214 - spin_lock(&fs->lock);
19215 - current->fs = &init_fs;
19216 -- kill = !--fs->users;
19217 -+ gr_set_chroot_entries(current, &current->fs->root);
19218 -+ kill = !atomic_dec_return(&fs->users);
19219 - spin_unlock(&fs->lock);
19220 -
19221 - task_unlock(current);
19222 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
19223 -index 9905350..02eaec4 100644
19224 +index 8dcb114..b1072e2 100644
19225 --- a/fs/fscache/cookie.c
19226 +++ b/fs/fscache/cookie.c
19227 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
19228 @@ -47818,7 +48682,16 @@ index 9905350..02eaec4 100644
19229
19230 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
19231
19232 -@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
19233 +@@ -378,7 +378,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
19234 +
19235 + _enter("{%s}", cookie->def->name);
19236 +
19237 +- fscache_stat(&fscache_n_invalidates);
19238 ++ fscache_stat_unchecked(&fscache_n_invalidates);
19239 +
19240 + /* Only permit invalidation of data files. Invalidating an index will
19241 + * require the caller to release all its attachments to the tree rooted
19242 +@@ -437,10 +437,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
19243 struct fscache_object *object;
19244 struct hlist_node *_p;
19245
19246 @@ -47831,7 +48704,7 @@ index 9905350..02eaec4 100644
19247 _leave(" [no cookie]");
19248 return;
19249 }
19250 -@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
19251 +@@ -474,12 +474,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
19252 struct fscache_object *object;
19253 unsigned long event;
19254
19255 @@ -47847,7 +48720,7 @@ index 9905350..02eaec4 100644
19256 _leave(" [no cookie]");
19257 return;
19258 }
19259 -@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
19260 +@@ -495,7 +495,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
19261
19262 /* wait for the cookie to finish being instantiated (or to fail) */
19263 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
19264 @@ -47857,12 +48730,17 @@ index 9905350..02eaec4 100644
19265 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
19266 }
19267 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
19268 -index f6aad48..88dcf26 100644
19269 +index ee38fef..0a326d4 100644
19270 --- a/fs/fscache/internal.h
19271 +++ b/fs/fscache/internal.h
19272 -@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
19273 - extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
19274 - extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
19275 +@@ -148,101 +148,101 @@ extern void fscache_proc_cleanup(void);
19276 + * stats.c
19277 + */
19278 + #ifdef CONFIG_FSCACHE_STATS
19279 +-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
19280 +-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
19281 ++extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
19282 ++extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
19283
19284 -extern atomic_t fscache_n_op_pend;
19285 -extern atomic_t fscache_n_op_run;
19286 @@ -47955,10 +48833,12 @@ index f6aad48..88dcf26 100644
19287 -extern atomic_t fscache_n_store_vmscan_gone;
19288 -extern atomic_t fscache_n_store_vmscan_busy;
19289 -extern atomic_t fscache_n_store_vmscan_cancelled;
19290 +-extern atomic_t fscache_n_store_vmscan_wait;
19291 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
19292 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
19293 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
19294 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
19295 ++extern atomic_unchecked_t fscache_n_store_vmscan_wait;
19296
19297 -extern atomic_t fscache_n_marks;
19298 -extern atomic_t fscache_n_uncaches;
19299 @@ -47978,6 +48858,11 @@ index f6aad48..88dcf26 100644
19300 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
19301 +extern atomic_unchecked_t fscache_n_acquires_oom;
19302
19303 +-extern atomic_t fscache_n_invalidates;
19304 +-extern atomic_t fscache_n_invalidates_run;
19305 ++extern atomic_unchecked_t fscache_n_invalidates;
19306 ++extern atomic_unchecked_t fscache_n_invalidates_run;
19307 +
19308 -extern atomic_t fscache_n_updates;
19309 -extern atomic_t fscache_n_updates_null;
19310 -extern atomic_t fscache_n_updates_run;
19311 @@ -48031,7 +48916,7 @@ index f6aad48..88dcf26 100644
19312
19313 extern atomic_t fscache_n_cop_alloc_object;
19314 extern atomic_t fscache_n_cop_lookup_object;
19315 -@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
19316 +@@ -267,6 +267,11 @@ static inline void fscache_stat(atomic_t *stat)
19317 atomic_inc(stat);
19318 }
19319
19320 @@ -48043,7 +48928,7 @@ index f6aad48..88dcf26 100644
19321 static inline void fscache_stat_d(atomic_t *stat)
19322 {
19323 atomic_dec(stat);
19324 -@@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
19325 +@@ -279,6 +284,7 @@ extern const struct file_operations fscache_stats_fops;
19326
19327 #define __fscache_stat(stat) (NULL)
19328 #define fscache_stat(stat) do {} while (0)
19329 @@ -48052,10 +48937,19 @@ index f6aad48..88dcf26 100644
19330 #endif
19331
19332 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
19333 -index b6b897c..0ffff9c 100644
19334 +index 50d41c1..10ee117 100644
19335 --- a/fs/fscache/object.c
19336 +++ b/fs/fscache/object.c
19337 -@@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
19338 +@@ -143,7 +143,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
19339 + /* Invalidate an object on disk */
19340 + case FSCACHE_OBJECT_INVALIDATING:
19341 + clear_bit(FSCACHE_OBJECT_EV_INVALIDATE, &object->events);
19342 +- fscache_stat(&fscache_n_invalidates_run);
19343 ++ fscache_stat_unchecked(&fscache_n_invalidates_run);
19344 + fscache_stat(&fscache_n_cop_invalidate_object);
19345 + fscache_invalidate_object(object);
19346 + fscache_stat_d(&fscache_n_cop_invalidate_object);
19347 +@@ -153,7 +153,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
19348 /* update the object metadata on disk */
19349 case FSCACHE_OBJECT_UPDATING:
19350 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
19351 @@ -48064,7 +48958,7 @@ index b6b897c..0ffff9c 100644
19352 fscache_stat(&fscache_n_cop_update_object);
19353 object->cache->ops->update_object(object);
19354 fscache_stat_d(&fscache_n_cop_update_object);
19355 -@@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
19356 +@@ -242,7 +242,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
19357 spin_lock(&object->lock);
19358 object->state = FSCACHE_OBJECT_DEAD;
19359 spin_unlock(&object->lock);
19360 @@ -48073,7 +48967,7 @@ index b6b897c..0ffff9c 100644
19361 goto terminal_transit;
19362
19363 /* handle the parent cache of this object being withdrawn from
19364 -@@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
19365 +@@ -257,7 +257,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
19366 spin_lock(&object->lock);
19367 object->state = FSCACHE_OBJECT_DEAD;
19368 spin_unlock(&object->lock);
19369 @@ -48082,7 +48976,7 @@ index b6b897c..0ffff9c 100644
19370 goto terminal_transit;
19371
19372 /* complain about the object being woken up once it is
19373 -@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
19374 +@@ -495,7 +495,7 @@ static void fscache_lookup_object(struct fscache_object *object)
19375 parent->cookie->def->name, cookie->def->name,
19376 object->cache->tag->name);
19377
19378 @@ -48091,7 +48985,7 @@ index b6b897c..0ffff9c 100644
19379 fscache_stat(&fscache_n_cop_lookup_object);
19380 ret = object->cache->ops->lookup_object(object);
19381 fscache_stat_d(&fscache_n_cop_lookup_object);
19382 -@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
19383 +@@ -506,7 +506,7 @@ static void fscache_lookup_object(struct fscache_object *object)
19384 if (ret == -ETIMEDOUT) {
19385 /* probably stuck behind another object, so move this one to
19386 * the back of the queue */
19387 @@ -48100,7 +48994,7 @@ index b6b897c..0ffff9c 100644
19388 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
19389 }
19390
19391 -@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
19392 +@@ -529,7 +529,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
19393
19394 spin_lock(&object->lock);
19395 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
19396 @@ -48109,7 +49003,7 @@ index b6b897c..0ffff9c 100644
19397
19398 /* transit here to allow write requests to begin stacking up
19399 * and read requests to begin returning ENODATA */
19400 -@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
19401 +@@ -575,7 +575,7 @@ void fscache_obtained_object(struct fscache_object *object)
19402 * result, in which case there may be data available */
19403 spin_lock(&object->lock);
19404 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
19405 @@ -48118,7 +49012,7 @@ index b6b897c..0ffff9c 100644
19406
19407 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
19408
19409 -@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
19410 +@@ -589,7 +589,7 @@ void fscache_obtained_object(struct fscache_object *object)
19411 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
19412 } else {
19413 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
19414 @@ -48127,7 +49021,7 @@ index b6b897c..0ffff9c 100644
19415
19416 object->state = FSCACHE_OBJECT_AVAILABLE;
19417 spin_unlock(&object->lock);
19418 -@@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
19419 +@@ -634,7 +634,7 @@ static void fscache_object_available(struct fscache_object *object)
19420 fscache_enqueue_dependents(object);
19421
19422 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
19423 @@ -48136,7 +49030,7 @@ index b6b897c..0ffff9c 100644
19424
19425 _leave("");
19426 }
19427 -@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
19428 +@@ -894,7 +894,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
19429 enum fscache_checkaux result;
19430
19431 if (!object->cookie->def->check_aux) {
19432 @@ -48145,7 +49039,7 @@ index b6b897c..0ffff9c 100644
19433 return FSCACHE_CHECKAUX_OKAY;
19434 }
19435
19436 -@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
19437 +@@ -903,17 +903,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
19438 switch (result) {
19439 /* entry okay as is */
19440 case FSCACHE_CHECKAUX_OKAY:
19441 @@ -48167,7 +49061,7 @@ index b6b897c..0ffff9c 100644
19442
19443 default:
19444 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
19445 -index 30afdfa..2256596 100644
19446 +index 762a9ec..2023284 100644
19447 --- a/fs/fscache/operation.c
19448 +++ b/fs/fscache/operation.c
19449 @@ -17,7 +17,7 @@
19450 @@ -48179,16 +49073,16 @@ index 30afdfa..2256596 100644
19451 EXPORT_SYMBOL(fscache_op_debug_id);
19452
19453 /**
19454 -@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
19455 - ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
19456 +@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
19457 ASSERTCMP(atomic_read(&op->usage), >, 0);
19458 + ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
19459
19460 - fscache_stat(&fscache_n_op_enqueue);
19461 + fscache_stat_unchecked(&fscache_n_op_enqueue);
19462 switch (op->flags & FSCACHE_OP_TYPE) {
19463 case FSCACHE_OP_ASYNC:
19464 _debug("queue async");
19465 -@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
19466 +@@ -73,7 +73,7 @@ static void fscache_run_op(struct fscache_object *object,
19467 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
19468 if (op->processor)
19469 fscache_enqueue_operation(op);
19470 @@ -48197,8 +49091,8 @@ index 30afdfa..2256596 100644
19471 }
19472
19473 /*
19474 -@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
19475 - if (object->n_ops > 1) {
19476 +@@ -105,11 +105,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
19477 + if (object->n_in_progress > 0) {
19478 atomic_inc(&op->usage);
19479 list_add_tail(&op->pend_link, &object->pending_ops);
19480 - fscache_stat(&fscache_n_op_pend);
19481 @@ -48211,7 +49105,7 @@ index 30afdfa..2256596 100644
19482 fscache_start_operations(object);
19483 } else {
19484 ASSERTCMP(object->n_in_progress, ==, 0);
19485 -@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
19486 +@@ -125,7 +125,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
19487 object->n_exclusive++; /* reads and writes must wait */
19488 atomic_inc(&op->usage);
19489 list_add_tail(&op->pend_link, &object->pending_ops);
19490 @@ -48219,8 +49113,8 @@ index 30afdfa..2256596 100644
19491 + fscache_stat_unchecked(&fscache_n_op_pend);
19492 ret = 0;
19493 } else {
19494 - /* not allowed to submit ops in any other state */
19495 -@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
19496 + /* If we're in any other state, there must have been an I/O
19497 +@@ -215,11 +215,11 @@ int fscache_submit_op(struct fscache_object *object,
19498 if (object->n_exclusive > 0) {
19499 atomic_inc(&op->usage);
19500 list_add_tail(&op->pend_link, &object->pending_ops);
19501 @@ -48234,7 +49128,7 @@ index 30afdfa..2256596 100644
19502 fscache_start_operations(object);
19503 } else {
19504 ASSERTCMP(object->n_exclusive, ==, 0);
19505 -@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
19506 +@@ -231,12 +231,12 @@ int fscache_submit_op(struct fscache_object *object,
19507 object->n_ops++;
19508 atomic_inc(&op->usage);
19509 list_add_tail(&op->pend_link, &object->pending_ops);
19510 @@ -48246,28 +49140,37 @@ index 30afdfa..2256596 100644
19511 object->state == FSCACHE_OBJECT_WITHDRAWING) {
19512 - fscache_stat(&fscache_n_op_rejected);
19513 + fscache_stat_unchecked(&fscache_n_op_rejected);
19514 + op->state = FSCACHE_OP_ST_CANCELLED;
19515 ret = -ENOBUFS;
19516 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
19517 - fscache_report_unexpected_submission(object, op, ostate);
19518 -@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
19519 -
19520 +@@ -315,7 +315,7 @@ int fscache_cancel_op(struct fscache_operation *op,
19521 ret = -EBUSY;
19522 - if (!list_empty(&op->pend_link)) {
19523 + if (op->state == FSCACHE_OP_ST_PENDING) {
19524 + ASSERT(!list_empty(&op->pend_link));
19525 - fscache_stat(&fscache_n_op_cancelled);
19526 + fscache_stat_unchecked(&fscache_n_op_cancelled);
19527 list_del_init(&op->pend_link);
19528 - object->n_ops--;
19529 - if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
19530 -@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
19531 - if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
19532 - BUG();
19533 + if (do_cancel)
19534 + do_cancel(op);
19535 +@@ -347,7 +347,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
19536 + while (!list_empty(&object->pending_ops)) {
19537 + op = list_entry(object->pending_ops.next,
19538 + struct fscache_operation, pend_link);
19539 +- fscache_stat(&fscache_n_op_cancelled);
19540 ++ fscache_stat_unchecked(&fscache_n_op_cancelled);
19541 + list_del_init(&op->pend_link);
19542 +
19543 + ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
19544 +@@ -419,7 +419,7 @@ void fscache_put_operation(struct fscache_operation *op)
19545 + op->state, ==, FSCACHE_OP_ST_CANCELLED);
19546 + op->state = FSCACHE_OP_ST_DEAD;
19547
19548 - fscache_stat(&fscache_n_op_release);
19549 + fscache_stat_unchecked(&fscache_n_op_release);
19550
19551 if (op->release) {
19552 op->release(op);
19553 -@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
19554 +@@ -442,7 +442,7 @@ void fscache_put_operation(struct fscache_operation *op)
19555 * lock, and defer it otherwise */
19556 if (!spin_trylock(&object->lock)) {
19557 _debug("defer put");
19558 @@ -48276,7 +49179,7 @@ index 30afdfa..2256596 100644
19559
19560 cache = object->cache;
19561 spin_lock(&cache->op_gc_list_lock);
19562 -@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
19563 +@@ -495,7 +495,7 @@ void fscache_operation_gc(struct work_struct *work)
19564
19565 _debug("GC DEFERRED REL OBJ%x OP%x",
19566 object->debug_id, op->debug_id);
19567 @@ -48284,12 +49187,12 @@ index 30afdfa..2256596 100644
19568 + fscache_stat_unchecked(&fscache_n_op_gc);
19569
19570 ASSERTCMP(atomic_read(&op->usage), ==, 0);
19571 -
19572 + ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
19573 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
19574 -index 3f7a59b..cf196cc 100644
19575 +index ff000e5..c44ec6d 100644
19576 --- a/fs/fscache/page.c
19577 +++ b/fs/fscache/page.c
19578 -@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
19579 +@@ -61,7 +61,7 @@ try_again:
19580 val = radix_tree_lookup(&cookie->stores, page->index);
19581 if (!val) {
19582 rcu_read_unlock();
19583 @@ -48298,7 +49201,7 @@ index 3f7a59b..cf196cc 100644
19584 __fscache_uncache_page(cookie, page);
19585 return true;
19586 }
19587 -@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
19588 +@@ -91,11 +91,11 @@ try_again:
19589 spin_unlock(&cookie->stores_lock);
19590
19591 if (xpage) {
19592 @@ -48313,16 +49216,21 @@ index 3f7a59b..cf196cc 100644
19593 }
19594
19595 wake_up_bit(&cookie->flags, 0);
19596 -@@ -107,7 +107,7 @@ page_busy:
19597 - /* we might want to wait here, but that could deadlock the allocator as
19598 - * the work threads writing to the cache may all end up sleeping
19599 - * on memory allocation */
19600 -- fscache_stat(&fscache_n_store_vmscan_busy);
19601 -+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
19602 - return false;
19603 - }
19604 - EXPORT_SYMBOL(__fscache_maybe_release_page);
19605 -@@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
19606 +@@ -110,11 +110,11 @@ page_busy:
19607 + * sleeping on memory allocation, so we may need to impose a timeout
19608 + * too. */
19609 + if (!(gfp & __GFP_WAIT)) {
19610 +- fscache_stat(&fscache_n_store_vmscan_busy);
19611 ++ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
19612 + return false;
19613 + }
19614 +
19615 +- fscache_stat(&fscache_n_store_vmscan_wait);
19616 ++ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
19617 + __fscache_wait_on_page_write(cookie, page);
19618 + gfp &= ~__GFP_WAIT;
19619 + goto try_again;
19620 +@@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object,
19621 FSCACHE_COOKIE_STORING_TAG);
19622 if (!radix_tree_tag_get(&cookie->stores, page->index,
19623 FSCACHE_COOKIE_PENDING_TAG)) {
19624 @@ -48331,7 +49239,7 @@ index 3f7a59b..cf196cc 100644
19625 xpage = radix_tree_delete(&cookie->stores, page->index);
19626 }
19627 spin_unlock(&cookie->stores_lock);
19628 -@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
19629 +@@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
19630
19631 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
19632
19633 @@ -48340,7 +49248,7 @@ index 3f7a59b..cf196cc 100644
19634
19635 if (fscache_object_is_active(object)) {
19636 fscache_stat(&fscache_n_cop_attr_changed);
19637 -@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
19638 +@@ -187,11 +187,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
19639
19640 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
19641
19642 @@ -48354,7 +49262,7 @@ index 3f7a59b..cf196cc 100644
19643 _leave(" = -ENOMEM");
19644 return -ENOMEM;
19645 }
19646 -@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
19647 +@@ -209,7 +209,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
19648 if (fscache_submit_exclusive_op(object, op) < 0)
19649 goto nobufs;
19650 spin_unlock(&cookie->lock);
19651 @@ -48363,7 +49271,7 @@ index 3f7a59b..cf196cc 100644
19652 fscache_put_operation(op);
19653 _leave(" = 0");
19654 return 0;
19655 -@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
19656 +@@ -217,7 +217,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
19657 nobufs:
19658 spin_unlock(&cookie->lock);
19659 kfree(op);
19660 @@ -48372,7 +49280,7 @@ index 3f7a59b..cf196cc 100644
19661 _leave(" = %d", -ENOBUFS);
19662 return -ENOBUFS;
19663 }
19664 -@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
19665 +@@ -255,7 +255,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
19666 /* allocate a retrieval operation and attempt to submit it */
19667 op = kzalloc(sizeof(*op), GFP_NOIO);
19668 if (!op) {
19669 @@ -48381,7 +49289,7 @@ index 3f7a59b..cf196cc 100644
19670 return NULL;
19671 }
19672
19673 -@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
19674 +@@ -283,13 +283,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
19675 return 0;
19676 }
19677
19678 @@ -48397,7 +49305,7 @@ index 3f7a59b..cf196cc 100644
19679 _leave(" = -ERESTARTSYS");
19680 return -ERESTARTSYS;
19681 }
19682 -@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
19683 +@@ -318,8 +318,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
19684 */
19685 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
19686 struct fscache_retrieval *op,
19687 @@ -48408,7 +49316,7 @@ index 3f7a59b..cf196cc 100644
19688 {
19689 int ret;
19690
19691 -@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
19692 +@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
19693 goto check_if_dead;
19694
19695 _debug(">>> WT");
19696 @@ -48416,17 +49324,25 @@ index 3f7a59b..cf196cc 100644
19697 + fscache_stat_unchecked(stat_op_waits);
19698 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
19699 fscache_wait_bit_interruptible,
19700 - TASK_INTERRUPTIBLE) < 0) {
19701 -@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
19702 + TASK_INTERRUPTIBLE) != 0) {
19703 +@@ -344,14 +344,14 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
19704
19705 check_if_dead:
19706 + if (op->op.state == FSCACHE_OP_ST_CANCELLED) {
19707 +- fscache_stat(stat_object_dead);
19708 ++ fscache_stat_unchecked(stat_object_dead);
19709 + _leave(" = -ENOBUFS [cancelled]");
19710 + return -ENOBUFS;
19711 + }
19712 if (unlikely(fscache_object_is_dead(object))) {
19713 + pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->op.state);
19714 + fscache_cancel_op(&op->op, fscache_do_cancel_retrieval);
19715 - fscache_stat(stat_object_dead);
19716 + fscache_stat_unchecked(stat_object_dead);
19717 return -ENOBUFS;
19718 }
19719 return 0;
19720 -@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
19721 +@@ -378,7 +378,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
19722
19723 _enter("%p,%p,,,", cookie, page);
19724
19725 @@ -48435,8 +49351,8 @@ index 3f7a59b..cf196cc 100644
19726
19727 if (hlist_empty(&cookie->backing_objects))
19728 goto nobufs;
19729 -@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
19730 - goto nobufs_unlock;
19731 +@@ -417,7 +417,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
19732 + goto nobufs_unlock_dec;
19733 spin_unlock(&cookie->lock);
19734
19735 - fscache_stat(&fscache_n_retrieval_ops);
19736 @@ -48444,7 +49360,7 @@ index 3f7a59b..cf196cc 100644
19737
19738 /* pin the netfs read context in case we need to do the actual netfs
19739 * read because we've encountered a cache read failure */
19740 -@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
19741 +@@ -447,15 +447,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
19742
19743 error:
19744 if (ret == -ENOMEM)
19745 @@ -48465,7 +49381,7 @@ index 3f7a59b..cf196cc 100644
19746
19747 fscache_put_retrieval(op);
19748 _leave(" = %d", ret);
19749 -@@ -429,7 +429,7 @@ nobufs_unlock:
19750 +@@ -467,7 +467,7 @@ nobufs_unlock:
19751 spin_unlock(&cookie->lock);
19752 kfree(op);
19753 nobufs:
19754 @@ -48474,7 +49390,7 @@ index 3f7a59b..cf196cc 100644
19755 _leave(" = -ENOBUFS");
19756 return -ENOBUFS;
19757 }
19758 -@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
19759 +@@ -505,7 +505,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
19760
19761 _enter("%p,,%d,,,", cookie, *nr_pages);
19762
19763 @@ -48483,8 +49399,8 @@ index 3f7a59b..cf196cc 100644
19764
19765 if (hlist_empty(&cookie->backing_objects))
19766 goto nobufs;
19767 -@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
19768 - goto nobufs_unlock;
19769 +@@ -541,7 +541,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
19770 + goto nobufs_unlock_dec;
19771 spin_unlock(&cookie->lock);
19772
19773 - fscache_stat(&fscache_n_retrieval_ops);
19774 @@ -48492,7 +49408,7 @@ index 3f7a59b..cf196cc 100644
19775
19776 /* pin the netfs read context in case we need to do the actual netfs
19777 * read because we've encountered a cache read failure */
19778 -@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
19779 +@@ -571,15 +571,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
19780
19781 error:
19782 if (ret == -ENOMEM)
19783 @@ -48513,7 +49429,7 @@ index 3f7a59b..cf196cc 100644
19784
19785 fscache_put_retrieval(op);
19786 _leave(" = %d", ret);
19787 -@@ -545,7 +545,7 @@ nobufs_unlock:
19788 +@@ -591,7 +591,7 @@ nobufs_unlock:
19789 spin_unlock(&cookie->lock);
19790 kfree(op);
19791 nobufs:
19792 @@ -48522,7 +49438,7 @@ index 3f7a59b..cf196cc 100644
19793 _leave(" = -ENOBUFS");
19794 return -ENOBUFS;
19795 }
19796 -@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
19797 +@@ -615,7 +615,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
19798
19799 _enter("%p,%p,,,", cookie, page);
19800
19801 @@ -48531,7 +49447,7 @@ index 3f7a59b..cf196cc 100644
19802
19803 if (hlist_empty(&cookie->backing_objects))
19804 goto nobufs;
19805 -@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
19806 +@@ -647,7 +647,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
19807 goto nobufs_unlock;
19808 spin_unlock(&cookie->lock);
19809
19810 @@ -48540,7 +49456,7 @@ index 3f7a59b..cf196cc 100644
19811
19812 ret = fscache_wait_for_retrieval_activation(
19813 object, op,
19814 -@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
19815 +@@ -663,11 +663,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
19816
19817 error:
19818 if (ret == -ERESTARTSYS)
19819 @@ -48555,7 +49471,7 @@ index 3f7a59b..cf196cc 100644
19820
19821 fscache_put_retrieval(op);
19822 _leave(" = %d", ret);
19823 -@@ -625,7 +625,7 @@ nobufs_unlock:
19824 +@@ -677,7 +677,7 @@ nobufs_unlock:
19825 spin_unlock(&cookie->lock);
19826 kfree(op);
19827 nobufs:
19828 @@ -48564,7 +49480,7 @@ index 3f7a59b..cf196cc 100644
19829 _leave(" = -ENOBUFS");
19830 return -ENOBUFS;
19831 }
19832 -@@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
19833 +@@ -736,7 +736,7 @@ static void fscache_write_op(struct fscache_operation *_op)
19834
19835 spin_lock(&cookie->stores_lock);
19836
19837 @@ -48573,7 +49489,7 @@ index 3f7a59b..cf196cc 100644
19838
19839 /* find a page to store */
19840 page = NULL;
19841 -@@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
19842 +@@ -747,7 +747,7 @@ static void fscache_write_op(struct fscache_operation *_op)
19843 page = results[0];
19844 _debug("gang %d [%lx]", n, page->index);
19845 if (page->index > op->store_limit) {
19846 @@ -48582,7 +49498,7 @@ index 3f7a59b..cf196cc 100644
19847 goto superseded;
19848 }
19849
19850 -@@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
19851 +@@ -759,7 +759,7 @@ static void fscache_write_op(struct fscache_operation *_op)
19852 spin_unlock(&cookie->stores_lock);
19853 spin_unlock(&object->lock);
19854
19855 @@ -48591,16 +49507,16 @@ index 3f7a59b..cf196cc 100644
19856 fscache_stat(&fscache_n_cop_write_page);
19857 ret = object->cache->ops->write_page(op, page);
19858 fscache_stat_d(&fscache_n_cop_write_page);
19859 -@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
19860 +@@ -860,7 +860,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
19861 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
19862 ASSERT(PageFsCache(page));
19863
19864 - fscache_stat(&fscache_n_stores);
19865 + fscache_stat_unchecked(&fscache_n_stores);
19866
19867 - op = kzalloc(sizeof(*op), GFP_NOIO);
19868 - if (!op)
19869 -@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
19870 + if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
19871 + _leave(" = -ENOBUFS [invalidating]");
19872 +@@ -916,7 +916,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
19873 spin_unlock(&cookie->stores_lock);
19874 spin_unlock(&object->lock);
19875
19876 @@ -48609,7 +49525,7 @@ index 3f7a59b..cf196cc 100644
19877 op->store_limit = object->store_limit;
19878
19879 if (fscache_submit_op(object, &op->op) < 0)
19880 -@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
19881 +@@ -924,8 +924,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
19882
19883 spin_unlock(&cookie->lock);
19884 radix_tree_preload_end();
19885 @@ -48620,7 +49536,7 @@ index 3f7a59b..cf196cc 100644
19886
19887 /* the work queue now carries its own ref on the object */
19888 fscache_put_operation(&op->op);
19889 -@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
19890 +@@ -933,14 +933,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
19891 return 0;
19892
19893 already_queued:
19894 @@ -48637,7 +49553,7 @@ index 3f7a59b..cf196cc 100644
19895 _leave(" = 0");
19896 return 0;
19897
19898 -@@ -851,14 +851,14 @@ nobufs:
19899 +@@ -959,14 +959,14 @@ nobufs:
19900 spin_unlock(&cookie->lock);
19901 radix_tree_preload_end();
19902 kfree(op);
19903 @@ -48654,7 +49570,7 @@ index 3f7a59b..cf196cc 100644
19904 _leave(" = -ENOMEM");
19905 return -ENOMEM;
19906 }
19907 -@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
19908 +@@ -984,7 +984,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
19909 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
19910 ASSERTCMP(page, !=, NULL);
19911
19912 @@ -48663,20 +49579,20 @@ index 3f7a59b..cf196cc 100644
19913
19914 /* cache withdrawal may beat us to it */
19915 if (!PageFsCache(page))
19916 -@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
19917 - unsigned long loop;
19918 +@@ -1035,7 +1035,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
19919 + struct fscache_cookie *cookie = op->op.object->cookie;
19920
19921 #ifdef CONFIG_FSCACHE_STATS
19922 -- atomic_add(pagevec->nr, &fscache_n_marks);
19923 -+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
19924 +- atomic_inc(&fscache_n_marks);
19925 ++ atomic_inc_unchecked(&fscache_n_marks);
19926 #endif
19927
19928 - for (loop = 0; loop < pagevec->nr; loop++) {
19929 + _debug("- mark %p{%lx}", page, page->index);
19930 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
19931 -index 4765190..2a067f2 100644
19932 +index 8179e8b..5072cc7 100644
19933 --- a/fs/fscache/stats.c
19934 +++ b/fs/fscache/stats.c
19935 -@@ -18,95 +18,95 @@
19936 +@@ -18,99 +18,99 @@
19937 /*
19938 * operation counters
19939 */
19940 @@ -48773,10 +49689,12 @@ index 4765190..2a067f2 100644
19941 -atomic_t fscache_n_store_vmscan_gone;
19942 -atomic_t fscache_n_store_vmscan_busy;
19943 -atomic_t fscache_n_store_vmscan_cancelled;
19944 +-atomic_t fscache_n_store_vmscan_wait;
19945 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
19946 +atomic_unchecked_t fscache_n_store_vmscan_gone;
19947 +atomic_unchecked_t fscache_n_store_vmscan_busy;
19948 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
19949 ++atomic_unchecked_t fscache_n_store_vmscan_wait;
19950
19951 -atomic_t fscache_n_marks;
19952 -atomic_t fscache_n_uncaches;
19953 @@ -48796,6 +49714,11 @@ index 4765190..2a067f2 100644
19954 +atomic_unchecked_t fscache_n_acquires_nobufs;
19955 +atomic_unchecked_t fscache_n_acquires_oom;
19956
19957 +-atomic_t fscache_n_invalidates;
19958 +-atomic_t fscache_n_invalidates_run;
19959 ++atomic_unchecked_t fscache_n_invalidates;
19960 ++atomic_unchecked_t fscache_n_invalidates_run;
19961 +
19962 -atomic_t fscache_n_updates;
19963 -atomic_t fscache_n_updates_null;
19964 -atomic_t fscache_n_updates_run;
19965 @@ -48849,7 +49772,7 @@ index 4765190..2a067f2 100644
19966
19967 atomic_t fscache_n_cop_alloc_object;
19968 atomic_t fscache_n_cop_lookup_object;
19969 -@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
19970 +@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
19971 seq_puts(m, "FS-Cache statistics\n");
19972
19973 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
19974 @@ -48912,6 +49835,12 @@ index 4765190..2a067f2 100644
19975 + atomic_read_unchecked(&fscache_n_object_created),
19976 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
19977
19978 + seq_printf(m, "Invals : n=%u run=%u\n",
19979 +- atomic_read(&fscache_n_invalidates),
19980 +- atomic_read(&fscache_n_invalidates_run));
19981 ++ atomic_read_unchecked(&fscache_n_invalidates),
19982 ++ atomic_read_unchecked(&fscache_n_invalidates_run));
19983 +
19984 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
19985 - atomic_read(&fscache_n_updates),
19986 - atomic_read(&fscache_n_updates_null),
19987 @@ -49008,15 +49937,17 @@ index 4765190..2a067f2 100644
19988 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
19989 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
19990
19991 - seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
19992 + seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
19993 - atomic_read(&fscache_n_store_vmscan_not_storing),
19994 - atomic_read(&fscache_n_store_vmscan_gone),
19995 - atomic_read(&fscache_n_store_vmscan_busy),
19996 -- atomic_read(&fscache_n_store_vmscan_cancelled));
19997 +- atomic_read(&fscache_n_store_vmscan_cancelled),
19998 +- atomic_read(&fscache_n_store_vmscan_wait));
19999 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
20000 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
20001 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
20002 -+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
20003 ++ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
20004 ++ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
20005
20006 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
20007 - atomic_read(&fscache_n_op_pend),
20008 @@ -49040,10 +49971,10 @@ index 4765190..2a067f2 100644
20009 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
20010 atomic_read(&fscache_n_cop_alloc_object),
20011 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
20012 -index ee8d550..7189d8c 100644
20013 +index e397b67..b0d8709 100644
20014 --- a/fs/fuse/cuse.c
20015 +++ b/fs/fuse/cuse.c
20016 -@@ -585,10 +585,12 @@ static int __init cuse_init(void)
20017 +@@ -593,10 +593,12 @@ static int __init cuse_init(void)
20018 INIT_LIST_HEAD(&cuse_conntbl[i]);
20019
20020 /* inherit and extend fuse_dev_operations */
20021 @@ -49061,10 +49992,10 @@ index ee8d550..7189d8c 100644
20022 cuse_class = class_create(THIS_MODULE, "cuse");
20023 if (IS_ERR(cuse_class))
20024 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
20025 -index 8c23fa7..0e3aac7 100644
20026 +index e83351a..41e3c9c 100644
20027 --- a/fs/fuse/dev.c
20028 +++ b/fs/fuse/dev.c
20029 -@@ -1241,7 +1241,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
20030 +@@ -1236,7 +1236,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
20031 ret = 0;
20032 pipe_lock(pipe);
20033
20034 @@ -49074,7 +50005,7 @@ index 8c23fa7..0e3aac7 100644
20035 if (!ret)
20036 ret = -EPIPE;
20037 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
20038 -index 324bc08..4fdd56e 100644
20039 +index b7c09f9..3eff736 100644
20040 --- a/fs/fuse/dir.c
20041 +++ b/fs/fuse/dir.c
20042 @@ -1226,7 +1226,7 @@ static char *read_link(struct dentry *dentry)
20043 @@ -49087,10 +50018,10 @@ index 324bc08..4fdd56e 100644
20044 if (!IS_ERR(link))
20045 free_page((unsigned long) link);
20046 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
20047 -index 381893c..3793318 100644
20048 +index 2b6f569..fcb4d1f 100644
20049 --- a/fs/gfs2/inode.c
20050 +++ b/fs/gfs2/inode.c
20051 -@@ -1490,7 +1490,7 @@ out:
20052 +@@ -1499,7 +1499,7 @@ out:
20053
20054 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
20055 {
20056 @@ -49100,18 +50031,18 @@ index 381893c..3793318 100644
20057 kfree(s);
20058 }
20059 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
20060 -index c5bc355..163a13e 100644
20061 +index 78bde32..767e906 100644
20062 --- a/fs/hugetlbfs/inode.c
20063 +++ b/fs/hugetlbfs/inode.c
20064 -@@ -153,6 +153,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
20065 +@@ -152,6 +152,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
20066 + struct mm_struct *mm = current->mm;
20067 struct vm_area_struct *vma;
20068 - unsigned long start_addr;
20069 struct hstate *h = hstate_file(file);
20070 + unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
20071 + struct vm_unmapped_area_info info;
20072
20073 if (len & ~huge_page_mask(h))
20074 - return -EINVAL;
20075 -@@ -165,18 +166,21 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
20076 +@@ -165,17 +166,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
20077 return addr;
20078 }
20079
20080 @@ -49128,44 +50059,29 @@ index c5bc355..163a13e 100644
20081 return addr;
20082 }
20083
20084 - if (len > mm->cached_hole_size)
20085 - start_addr = mm->free_area_cache;
20086 - else {
20087 -- start_addr = TASK_UNMAPPED_BASE;
20088 -+ start_addr = mm->mmap_base;
20089 - mm->cached_hole_size = 0;
20090 - }
20091 -
20092 -@@ -190,15 +194,15 @@ full_search:
20093 - * Start a new search - just in case we missed
20094 - * some holes.
20095 - */
20096 -- if (start_addr != TASK_UNMAPPED_BASE) {
20097 -- start_addr = TASK_UNMAPPED_BASE;
20098 -+ if (start_addr != mm->mmap_base) {
20099 -+ start_addr = mm->mmap_base;
20100 - mm->cached_hole_size = 0;
20101 - goto full_search;
20102 - }
20103 - return -ENOMEM;
20104 - }
20105 -
20106 -- if (!vma || addr + len <= vma->vm_start) {
20107 -+ if (check_heap_stack_gap(vma, addr, len, offset)) {
20108 - mm->free_area_cache = addr + len;
20109 - return addr;
20110 - }
20111 -@@ -923,7 +927,7 @@ static struct file_system_type hugetlbfs_fs_type = {
20112 + info.flags = 0;
20113 + info.length = len;
20114 + info.low_limit = TASK_UNMAPPED_BASE;
20115 ++
20116 ++#ifdef CONFIG_PAX_RANDMMAP
20117 ++ if (mm->pax_flags & MF_PAX_RANDMMAP)
20118 ++ info.low_limit += mm->delta_mmap;
20119 ++#endif
20120 ++
20121 + info.high_limit = TASK_SIZE;
20122 + info.align_mask = PAGE_MASK & ~huge_page_mask(h);
20123 + info.align_offset = 0;
20124 +@@ -897,7 +907,7 @@ static struct file_system_type hugetlbfs_fs_type = {
20125 .kill_sb = kill_litter_super,
20126 };
20127
20128 --static struct vfsmount *hugetlbfs_vfsmount;
20129 -+struct vfsmount *hugetlbfs_vfsmount;
20130 +-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
20131 ++struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
20132
20133 static int can_do_hugetlb_shm(void)
20134 {
20135 diff --git a/fs/inode.c b/fs/inode.c
20136 -index 64999f1..8fad608 100644
20137 +index 14084b7..29af1d9 100644
20138 --- a/fs/inode.c
20139 +++ b/fs/inode.c
20140 @@ -880,8 +880,8 @@ unsigned int get_next_ino(void)
20141 @@ -49221,7 +50137,7 @@ index 1a543be..d803c40 100644
20142 if (jfs_inode_cachep == NULL)
20143 return -ENOMEM;
20144 diff --git a/fs/libfs.c b/fs/libfs.c
20145 -index 7cc37ca..b3e3eec 100644
20146 +index 916da8c..1588998 100644
20147 --- a/fs/libfs.c
20148 +++ b/fs/libfs.c
20149 @@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
20150 @@ -49249,7 +50165,7 @@ index 7cc37ca..b3e3eec 100644
20151 next->d_inode->i_ino,
20152 dt_type(next->d_inode)) < 0)
20153 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
20154 -index 05d2912..760abfa 100644
20155 +index 54f9e6c..9ed908c 100644
20156 --- a/fs/lockd/clntproc.c
20157 +++ b/fs/lockd/clntproc.c
20158 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
20159 @@ -49292,7 +50208,7 @@ index a94e331..060bce3 100644
20160
20161 lock_flocks();
20162 diff --git a/fs/namei.c b/fs/namei.c
20163 -index 5f4cdf3..959a013 100644
20164 +index 43a97ee..ff3f601 100644
20165 --- a/fs/namei.c
20166 +++ b/fs/namei.c
20167 @@ -319,16 +319,32 @@ int generic_permission(struct inode *inode, int mask)
20168 @@ -49368,7 +50284,7 @@ index 5f4cdf3..959a013 100644
20169 nd->last_type = LAST_BIND;
20170 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
20171 error = PTR_ERR(*p);
20172 -@@ -1605,6 +1619,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
20173 +@@ -1596,6 +1610,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
20174 break;
20175 res = walk_component(nd, path, &nd->last,
20176 nd->last_type, LOOKUP_FOLLOW);
20177 @@ -49377,7 +50293,7 @@ index 5f4cdf3..959a013 100644
20178 put_link(nd, &link, cookie);
20179 } while (res > 0);
20180
20181 -@@ -1703,7 +1719,7 @@ EXPORT_SYMBOL(full_name_hash);
20182 +@@ -1694,7 +1710,7 @@ EXPORT_SYMBOL(full_name_hash);
20183 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
20184 {
20185 unsigned long a, b, adata, bdata, mask, hash, len;
20186 @@ -49386,7 +50302,7 @@ index 5f4cdf3..959a013 100644
20187
20188 hash = a = 0;
20189 len = -sizeof(unsigned long);
20190 -@@ -1993,6 +2009,8 @@ static int path_lookupat(int dfd, const char *name,
20191 +@@ -1979,6 +1995,8 @@ static int path_lookupat(int dfd, const char *name,
20192 if (err)
20193 break;
20194 err = lookup_last(nd, &path);
20195 @@ -49395,7 +50311,7 @@ index 5f4cdf3..959a013 100644
20196 put_link(nd, &link, cookie);
20197 }
20198 }
20199 -@@ -2000,6 +2018,21 @@ static int path_lookupat(int dfd, const char *name,
20200 +@@ -1986,6 +2004,21 @@ static int path_lookupat(int dfd, const char *name,
20201 if (!err)
20202 err = complete_walk(nd);
20203
20204 @@ -49417,7 +50333,7 @@ index 5f4cdf3..959a013 100644
20205 if (!err && nd->flags & LOOKUP_DIRECTORY) {
20206 if (!nd->inode->i_op->lookup) {
20207 path_put(&nd->path);
20208 -@@ -2027,8 +2060,17 @@ static int filename_lookup(int dfd, struct filename *name,
20209 +@@ -2013,8 +2046,17 @@ static int filename_lookup(int dfd, struct filename *name,
20210 retval = path_lookupat(dfd, name->name,
20211 flags | LOOKUP_REVAL, nd);
20212
20213 @@ -49436,7 +50352,7 @@ index 5f4cdf3..959a013 100644
20214 return retval;
20215 }
20216
20217 -@@ -2402,6 +2444,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
20218 +@@ -2392,6 +2434,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
20219 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
20220 return -EPERM;
20221
20222 @@ -49450,7 +50366,7 @@ index 5f4cdf3..959a013 100644
20223 return 0;
20224 }
20225
20226 -@@ -2623,7 +2672,7 @@ looked_up:
20227 +@@ -2613,7 +2662,7 @@ looked_up:
20228 * cleared otherwise prior to returning.
20229 */
20230 static int lookup_open(struct nameidata *nd, struct path *path,
20231 @@ -49459,7 +50375,7 @@ index 5f4cdf3..959a013 100644
20232 const struct open_flags *op,
20233 bool got_write, int *opened)
20234 {
20235 -@@ -2658,6 +2707,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
20236 +@@ -2648,6 +2697,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
20237 /* Negative dentry, just create the file */
20238 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
20239 umode_t mode = op->mode;
20240 @@ -49477,7 +50393,7 @@ index 5f4cdf3..959a013 100644
20241 if (!IS_POSIXACL(dir->d_inode))
20242 mode &= ~current_umask();
20243 /*
20244 -@@ -2679,6 +2739,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
20245 +@@ -2669,6 +2729,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
20246 nd->flags & LOOKUP_EXCL);
20247 if (error)
20248 goto out_dput;
20249 @@ -49486,7 +50402,7 @@ index 5f4cdf3..959a013 100644
20250 }
20251 out_no_open:
20252 path->dentry = dentry;
20253 -@@ -2693,7 +2755,7 @@ out_dput:
20254 +@@ -2683,7 +2745,7 @@ out_dput:
20255 /*
20256 * Handle the last step of open()
20257 */
20258 @@ -49495,7 +50411,7 @@ index 5f4cdf3..959a013 100644
20259 struct file *file, const struct open_flags *op,
20260 int *opened, struct filename *name)
20261 {
20262 -@@ -2722,16 +2784,44 @@ static int do_last(struct nameidata *nd, struct path *path,
20263 +@@ -2712,16 +2774,44 @@ static int do_last(struct nameidata *nd, struct path *path,
20264 error = complete_walk(nd);
20265 if (error)
20266 return error;
20267 @@ -49540,7 +50456,7 @@ index 5f4cdf3..959a013 100644
20268 audit_inode(name, dir, 0);
20269 goto finish_open;
20270 }
20271 -@@ -2780,7 +2870,7 @@ retry_lookup:
20272 +@@ -2770,7 +2860,7 @@ retry_lookup:
20273 */
20274 }
20275 mutex_lock(&dir->d_inode->i_mutex);
20276 @@ -49549,7 +50465,7 @@ index 5f4cdf3..959a013 100644
20277 mutex_unlock(&dir->d_inode->i_mutex);
20278
20279 if (error <= 0) {
20280 -@@ -2804,11 +2894,28 @@ retry_lookup:
20281 +@@ -2794,11 +2884,28 @@ retry_lookup:
20282 goto finish_open_created;
20283 }
20284
20285 @@ -49579,7 +50495,7 @@ index 5f4cdf3..959a013 100644
20286
20287 /*
20288 * If atomic_open() acquired write access it is dropped now due to
20289 -@@ -2849,6 +2956,11 @@ finish_lookup:
20290 +@@ -2839,6 +2946,11 @@ finish_lookup:
20291 }
20292 }
20293 BUG_ON(inode != path->dentry->d_inode);
20294 @@ -49591,7 +50507,7 @@ index 5f4cdf3..959a013 100644
20295 return 1;
20296 }
20297
20298 -@@ -2858,7 +2970,6 @@ finish_lookup:
20299 +@@ -2848,7 +2960,6 @@ finish_lookup:
20300 save_parent.dentry = nd->path.dentry;
20301 save_parent.mnt = mntget(path->mnt);
20302 nd->path.dentry = path->dentry;
20303 @@ -49599,7 +50515,7 @@ index 5f4cdf3..959a013 100644
20304 }
20305 nd->inode = inode;
20306 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
20307 -@@ -2867,6 +2978,22 @@ finish_lookup:
20308 +@@ -2857,6 +2968,22 @@ finish_lookup:
20309 path_put(&save_parent);
20310 return error;
20311 }
20312 @@ -49622,7 +50538,7 @@ index 5f4cdf3..959a013 100644
20313 error = -EISDIR;
20314 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
20315 goto out;
20316 -@@ -2965,7 +3092,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
20317 +@@ -2955,7 +3082,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
20318 if (unlikely(error))
20319 goto out;
20320
20321 @@ -49631,7 +50547,7 @@ index 5f4cdf3..959a013 100644
20322 while (unlikely(error > 0)) { /* trailing symlink */
20323 struct path link = path;
20324 void *cookie;
20325 -@@ -2983,7 +3110,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
20326 +@@ -2973,7 +3100,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
20327 error = follow_link(&link, nd, &cookie);
20328 if (unlikely(error))
20329 break;
20330 @@ -49640,7 +50556,7 @@ index 5f4cdf3..959a013 100644
20331 put_link(nd, &link, cookie);
20332 }
20333 out:
20334 -@@ -3073,8 +3200,12 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
20335 +@@ -3073,8 +3200,12 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
20336 goto unlock;
20337
20338 error = -EEXIST;
20339 @@ -49654,7 +50570,7 @@ index 5f4cdf3..959a013 100644
20340 /*
20341 * Special case - lookup gave negative, but... we had foo/bar/
20342 * From the vfs_mknod() POV we just have a negative dentry -
20343 -@@ -3125,6 +3256,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
20344 +@@ -3126,6 +3257,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
20345 }
20346 EXPORT_SYMBOL(user_path_create);
20347
20348 @@ -49675,7 +50591,7 @@ index 5f4cdf3..959a013 100644
20349 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
20350 {
20351 int error = may_create(dir, dentry);
20352 -@@ -3186,6 +3331,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
20353 +@@ -3188,6 +3333,17 @@ retry:
20354
20355 if (!IS_POSIXACL(path.dentry->d_inode))
20356 mode &= ~current_umask();
20357 @@ -49693,25 +50609,23 @@ index 5f4cdf3..959a013 100644
20358 error = security_path_mknod(&path, dentry, mode, dev);
20359 if (error)
20360 goto out;
20361 -@@ -3202,6 +3358,8 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
20362 +@@ -3204,6 +3360,8 @@ retry:
20363 break;
20364 }
20365 out:
20366 + if (!error)
20367 + gr_handle_create(dentry, path.mnt);
20368 done_path_create(&path, dentry);
20369 - return error;
20370 - }
20371 -@@ -3248,9 +3406,18 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
20372 + if (retry_estale(error, lookup_flags)) {
20373 + lookup_flags |= LOOKUP_REVAL;
20374 +@@ -3256,9 +3414,16 @@ retry:
20375
20376 if (!IS_POSIXACL(path.dentry->d_inode))
20377 mode &= ~current_umask();
20378 -+
20379 + if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
20380 + error = -EACCES;
20381 + goto out;
20382 + }
20383 -+
20384 error = security_path_mkdir(&path, dentry, mode);
20385 if (!error)
20386 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
20387 @@ -49719,18 +50633,18 @@ index 5f4cdf3..959a013 100644
20388 + gr_handle_create(dentry, path.mnt);
20389 +out:
20390 done_path_create(&path, dentry);
20391 - return error;
20392 - }
20393 -@@ -3327,6 +3494,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
20394 + if (retry_estale(error, lookup_flags)) {
20395 + lookup_flags |= LOOKUP_REVAL;
20396 +@@ -3339,6 +3504,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
20397 struct filename *name;
20398 struct dentry *dentry;
20399 struct nameidata nd;
20400 + ino_t saved_ino = 0;
20401 + dev_t saved_dev = 0;
20402 -
20403 - name = user_path_parent(dfd, pathname, &nd);
20404 - if (IS_ERR(name))
20405 -@@ -3358,10 +3527,21 @@ static long do_rmdir(int dfd, const char __user *pathname)
20406 + unsigned int lookup_flags = 0;
20407 + retry:
20408 + name = user_path_parent(dfd, pathname, &nd, lookup_flags);
20409 +@@ -3371,10 +3538,21 @@ retry:
20410 error = -ENOENT;
20411 goto exit3;
20412 }
20413 @@ -49752,16 +50666,16 @@ index 5f4cdf3..959a013 100644
20414 exit3:
20415 dput(dentry);
20416 exit2:
20417 -@@ -3423,6 +3603,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
20418 +@@ -3440,6 +3618,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
20419 struct dentry *dentry;
20420 struct nameidata nd;
20421 struct inode *inode = NULL;
20422 + ino_t saved_ino = 0;
20423 + dev_t saved_dev = 0;
20424 -
20425 - name = user_path_parent(dfd, pathname, &nd);
20426 - if (IS_ERR(name))
20427 -@@ -3448,10 +3630,22 @@ static long do_unlinkat(int dfd, const char __user *pathname)
20428 + unsigned int lookup_flags = 0;
20429 + retry:
20430 + name = user_path_parent(dfd, pathname, &nd, lookup_flags);
20431 +@@ -3466,10 +3646,22 @@ retry:
20432 if (!inode)
20433 goto slashes;
20434 ihold(inode);
20435 @@ -49784,7 +50698,7 @@ index 5f4cdf3..959a013 100644
20436 exit2:
20437 dput(dentry);
20438 }
20439 -@@ -3523,9 +3717,17 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
20440 +@@ -3547,9 +3739,17 @@ retry:
20441 if (IS_ERR(dentry))
20442 goto out_putname;
20443
20444 @@ -49800,9 +50714,9 @@ index 5f4cdf3..959a013 100644
20445 + gr_handle_create(dentry, path.mnt);
20446 +out:
20447 done_path_create(&path, dentry);
20448 - out_putname:
20449 - putname(from);
20450 -@@ -3595,6 +3797,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
20451 + if (retry_estale(error, lookup_flags)) {
20452 + lookup_flags |= LOOKUP_REVAL;
20453 +@@ -3623,6 +3823,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
20454 {
20455 struct dentry *new_dentry;
20456 struct path old_path, new_path;
20457 @@ -49810,16 +50724,16 @@ index 5f4cdf3..959a013 100644
20458 int how = 0;
20459 int error;
20460
20461 -@@ -3618,7 +3821,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
20462 +@@ -3646,7 +3847,7 @@ retry:
20463 if (error)
20464 return error;
20465
20466 -- new_dentry = user_path_create(newdfd, newname, &new_path, 0);
20467 -+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
20468 +- new_dentry = user_path_create(newdfd, newname, &new_path,
20469 ++ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
20470 + (how & LOOKUP_REVAL));
20471 error = PTR_ERR(new_dentry);
20472 if (IS_ERR(new_dentry))
20473 - goto out;
20474 -@@ -3629,11 +3832,28 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
20475 +@@ -3658,11 +3859,28 @@ retry:
20476 error = may_linkat(&old_path);
20477 if (unlikely(error))
20478 goto out_dput;
20479 @@ -49846,9 +50760,9 @@ index 5f4cdf3..959a013 100644
20480 out_dput:
20481 + putname(to);
20482 done_path_create(&new_path, new_dentry);
20483 - out:
20484 - path_put(&old_path);
20485 -@@ -3873,12 +4093,21 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
20486 + if (retry_estale(error, how)) {
20487 + how |= LOOKUP_REVAL;
20488 +@@ -3908,12 +4126,21 @@ retry:
20489 if (new_dentry == trap)
20490 goto exit5;
20491
20492 @@ -49870,7 +50784,7 @@ index 5f4cdf3..959a013 100644
20493 exit5:
20494 dput(new_dentry);
20495 exit4:
20496 -@@ -3903,6 +4132,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
20497 +@@ -3945,6 +4172,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
20498
20499 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
20500 {
20501 @@ -49879,7 +50793,7 @@ index 5f4cdf3..959a013 100644
20502 int len;
20503
20504 len = PTR_ERR(link);
20505 -@@ -3912,7 +4143,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
20506 +@@ -3954,7 +4183,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
20507 len = strlen(link);
20508 if (len > (unsigned) buflen)
20509 len = buflen;
20510 @@ -49896,10 +50810,10 @@ index 5f4cdf3..959a013 100644
20511 out:
20512 return len;
20513 diff --git a/fs/namespace.c b/fs/namespace.c
20514 -index 2496062..e26f6d6 100644
20515 +index 55605c5..22e9a03 100644
20516 --- a/fs/namespace.c
20517 +++ b/fs/namespace.c
20518 -@@ -1212,6 +1212,9 @@ static int do_umount(struct mount *mnt, int flags)
20519 +@@ -1215,6 +1215,9 @@ static int do_umount(struct mount *mnt, int flags)
20520 if (!(sb->s_flags & MS_RDONLY))
20521 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
20522 up_write(&sb->s_umount);
20523 @@ -49909,7 +50823,7 @@ index 2496062..e26f6d6 100644
20524 return retval;
20525 }
20526
20527 -@@ -1231,6 +1234,9 @@ static int do_umount(struct mount *mnt, int flags)
20528 +@@ -1234,6 +1237,9 @@ static int do_umount(struct mount *mnt, int flags)
20529 br_write_unlock(&vfsmount_lock);
20530 up_write(&namespace_sem);
20531 release_mounts(&umount_list);
20532 @@ -49919,7 +50833,7 @@ index 2496062..e26f6d6 100644
20533 return retval;
20534 }
20535
20536 -@@ -2244,6 +2250,16 @@ long do_mount(const char *dev_name, const char *dir_name,
20537 +@@ -2282,6 +2288,16 @@ long do_mount(const char *dev_name, const char *dir_name,
20538 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
20539 MS_STRICTATIME);
20540
20541 @@ -49936,7 +50850,7 @@ index 2496062..e26f6d6 100644
20542 if (flags & MS_REMOUNT)
20543 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
20544 data_page);
20545 -@@ -2258,6 +2274,9 @@ long do_mount(const char *dev_name, const char *dir_name,
20546 +@@ -2296,6 +2312,9 @@ long do_mount(const char *dev_name, const char *dir_name,
20547 dev_name, data_page);
20548 dput_out:
20549 path_put(&path);
20550 @@ -49946,7 +50860,7 @@ index 2496062..e26f6d6 100644
20551 return retval;
20552 }
20553
20554 -@@ -2516,6 +2535,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
20555 +@@ -2582,6 +2601,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
20556 if (error)
20557 goto out2;
20558
20559 @@ -49958,11 +50872,20 @@ index 2496062..e26f6d6 100644
20560 get_fs_root(current->fs, &root);
20561 error = lock_mount(&old);
20562 if (error)
20563 +@@ -2785,7 +2809,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
20564 + !nsown_capable(CAP_SYS_ADMIN))
20565 + return -EPERM;
20566 +
20567 +- if (fs->users != 1)
20568 ++ if (atomic_read(&fs->users) != 1)
20569 + return -EINVAL;
20570 +
20571 + get_mnt_ns(mnt_ns);
20572 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
20573 -index 6fa01ae..2790820 100644
20574 +index ebeb94c..ff35337 100644
20575 --- a/fs/nfs/inode.c
20576 +++ b/fs/nfs/inode.c
20577 -@@ -1029,16 +1029,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
20578 +@@ -1042,16 +1042,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
20579 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
20580 }
20581
20582 @@ -49983,10 +50906,10 @@ index 6fa01ae..2790820 100644
20583
20584 void nfs_fattr_init(struct nfs_fattr *fattr)
20585 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
20586 -index f59169e..fd7d359 100644
20587 +index d586117..143d568 100644
20588 --- a/fs/nfsd/vfs.c
20589 +++ b/fs/nfsd/vfs.c
20590 -@@ -941,7 +941,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
20591 +@@ -939,7 +939,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
20592 } else {
20593 oldfs = get_fs();
20594 set_fs(KERNEL_DS);
20595 @@ -49995,7 +50918,7 @@ index f59169e..fd7d359 100644
20596 set_fs(oldfs);
20597 }
20598
20599 -@@ -1045,7 +1045,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
20600 +@@ -1025,7 +1025,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
20601
20602 /* Write the data. */
20603 oldfs = get_fs(); set_fs(KERNEL_DS);
20604 @@ -50004,7 +50927,7 @@ index f59169e..fd7d359 100644
20605 set_fs(oldfs);
20606 if (host_err < 0)
20607 goto out_nfserr;
20608 -@@ -1587,7 +1587,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
20609 +@@ -1571,7 +1571,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
20610 */
20611
20612 oldfs = get_fs(); set_fs(KERNEL_DS);
20613 @@ -50014,10 +50937,10 @@ index f59169e..fd7d359 100644
20614
20615 if (host_err < 0)
20616 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
20617 -index 6fcaeb8..9d16d04 100644
20618 +index 9ff4a5e..deb1f0f 100644
20619 --- a/fs/notify/fanotify/fanotify_user.c
20620 +++ b/fs/notify/fanotify/fanotify_user.c
20621 -@@ -250,8 +250,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
20622 +@@ -251,8 +251,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
20623
20624 fd = fanotify_event_metadata.fd;
20625 ret = -EFAULT;
20626 @@ -50029,7 +50952,7 @@ index 6fcaeb8..9d16d04 100644
20627
20628 ret = prepare_for_access_response(group, event, fd);
20629 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
20630 -index c887b13..0fdf472 100644
20631 +index 7b51b05..5ea5ef6 100644
20632 --- a/fs/notify/notification.c
20633 +++ b/fs/notify/notification.c
20634 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
20635 @@ -50064,10 +50987,10 @@ index 99e3610..02c1068 100644
20636 "inode 0x%lx or driver bug.", vdir->i_ino);
20637 goto err_out;
20638 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
20639 -index 1ecf464..e1ff8bf 100644
20640 +index 5b2d4f0..c6de396 100644
20641 --- a/fs/ntfs/file.c
20642 +++ b/fs/ntfs/file.c
20643 -@@ -2232,6 +2232,6 @@ const struct inode_operations ntfs_file_inode_ops = {
20644 +@@ -2242,6 +2242,6 @@ const struct inode_operations ntfs_file_inode_ops = {
20645 #endif /* NTFS_RW */
20646 };
20647
20648 @@ -50207,7 +51130,7 @@ index 0e91ec2..f4b3fc6 100644
20649 /* Copy the blockcheck stats from the superblock probe */
20650 osb->osb_ecc_stats = *stats;
20651 diff --git a/fs/open.c b/fs/open.c
20652 -index 59071f5..c6229a0 100644
20653 +index 9b33c0c..2ffcca2 100644
20654 --- a/fs/open.c
20655 +++ b/fs/open.c
20656 @@ -31,6 +31,8 @@
20657 @@ -50219,18 +51142,25 @@ index 59071f5..c6229a0 100644
20658 #include "internal.h"
20659
20660 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
20661 -@@ -112,6 +114,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
20662 +@@ -101,6 +103,8 @@ long vfs_truncate(struct path *path, loff_t length)
20663 error = locks_verify_truncate(inode, NULL, length);
20664 if (!error)
20665 - error = security_path_truncate(&path);
20666 -+
20667 -+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
20668 + error = security_path_truncate(path);
20669 ++ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
20670 + error = -EACCES;
20671 -+
20672 if (!error)
20673 - error = do_truncate(path.dentry, length, 0, NULL);
20674 + error = do_truncate(path->dentry, length, 0, NULL);
20675
20676 -@@ -362,6 +368,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
20677 +@@ -178,6 +182,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
20678 + error = locks_verify_truncate(inode, f.file, length);
20679 + if (!error)
20680 + error = security_path_truncate(&f.file->f_path);
20681 ++ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
20682 ++ error = -EACCES;
20683 + if (!error)
20684 + error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
20685 + sb_end_write(inode->i_sb);
20686 +@@ -373,6 +379,9 @@ retry:
20687 if (__mnt_is_readonly(path.mnt))
20688 res = -EROFS;
20689
20690 @@ -50239,8 +51169,8 @@ index 59071f5..c6229a0 100644
20691 +
20692 out_path_release:
20693 path_put(&path);
20694 - out:
20695 -@@ -388,6 +397,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
20696 + if (retry_estale(res, lookup_flags)) {
20697 +@@ -404,6 +413,8 @@ retry:
20698 if (error)
20699 goto dput_and_out;
20700
20701 @@ -50249,7 +51179,7 @@ index 59071f5..c6229a0 100644
20702 set_fs_pwd(current->fs, &path);
20703
20704 dput_and_out:
20705 -@@ -413,6 +424,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
20706 +@@ -433,6 +444,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
20707 goto out_putf;
20708
20709 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
20710 @@ -50263,7 +51193,7 @@ index 59071f5..c6229a0 100644
20711 if (!error)
20712 set_fs_pwd(current->fs, &f.file->f_path);
20713 out_putf:
20714 -@@ -441,7 +459,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
20715 +@@ -462,7 +480,13 @@ retry:
20716 if (error)
20717 goto dput_and_out;
20718
20719 @@ -50277,7 +51207,7 @@ index 59071f5..c6229a0 100644
20720 error = 0;
20721 dput_and_out:
20722 path_put(&path);
20723 -@@ -459,6 +483,16 @@ static int chmod_common(struct path *path, umode_t mode)
20724 +@@ -484,6 +508,16 @@ static int chmod_common(struct path *path, umode_t mode)
20725 if (error)
20726 return error;
20727 mutex_lock(&inode->i_mutex);
20728 @@ -50294,7 +51224,7 @@ index 59071f5..c6229a0 100644
20729 error = security_path_chmod(path, mode);
20730 if (error)
20731 goto out_unlock;
20732 -@@ -514,6 +548,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
20733 +@@ -544,6 +578,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
20734 uid = make_kuid(current_user_ns(), user);
20735 gid = make_kgid(current_user_ns(), group);
20736
20737 @@ -50304,7 +51234,7 @@ index 59071f5..c6229a0 100644
20738 newattrs.ia_valid = ATTR_CTIME;
20739 if (user != (uid_t) -1) {
20740 if (!uid_valid(uid))
20741 -@@ -925,6 +962,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
20742 +@@ -960,6 +997,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
20743 } else {
20744 fsnotify_open(f);
20745 fd_install(fd, f);
20746 @@ -50471,7 +51401,7 @@ index 15af622..0e9f4467 100644
20747 help
20748 Various /proc files exist to monitor process memory utilization:
20749 diff --git a/fs/proc/array.c b/fs/proc/array.c
20750 -index bd31e02..15cae71 100644
20751 +index 6a91e6f..e54dbc14 100644
20752 --- a/fs/proc/array.c
20753 +++ b/fs/proc/array.c
20754 @@ -60,6 +60,7 @@
20755 @@ -50482,7 +51412,7 @@ index bd31e02..15cae71 100644
20756 #include <linux/proc_fs.h>
20757 #include <linux/ioport.h>
20758 #include <linux/uaccess.h>
20759 -@@ -346,6 +347,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
20760 +@@ -362,6 +363,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
20761 seq_putc(m, '\n');
20762 }
20763
20764 @@ -50504,7 +51434,7 @@ index bd31e02..15cae71 100644
20765 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
20766 struct pid *pid, struct task_struct *task)
20767 {
20768 -@@ -363,9 +379,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
20769 +@@ -380,9 +396,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
20770 task_cpus_allowed(m, task);
20771 cpuset_task_status_allowed(m, task);
20772 task_context_switch_counts(m, task);
20773 @@ -50529,7 +51459,7 @@ index bd31e02..15cae71 100644
20774 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
20775 struct pid *pid, struct task_struct *task, int whole)
20776 {
20777 -@@ -387,6 +418,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
20778 +@@ -404,6 +435,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
20779 char tcomm[sizeof(task->comm)];
20780 unsigned long flags;
20781
20782 @@ -50543,7 +51473,7 @@ index bd31e02..15cae71 100644
20783 state = *get_task_state(task);
20784 vsize = eip = esp = 0;
20785 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
20786 -@@ -458,6 +496,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
20787 +@@ -475,6 +513,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
20788 gtime = task->gtime;
20789 }
20790
20791 @@ -50563,7 +51493,7 @@ index bd31e02..15cae71 100644
20792 /* scale priority and nice values from timeslices to -20..20 */
20793 /* to make it look like a "normal" Unix priority/nice value */
20794 priority = task_prio(task);
20795 -@@ -494,9 +545,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
20796 +@@ -511,9 +562,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
20797 seq_put_decimal_ull(m, ' ', vsize);
20798 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
20799 seq_put_decimal_ull(m, ' ', rsslim);
20800 @@ -50579,7 +51509,7 @@ index bd31e02..15cae71 100644
20801 seq_put_decimal_ull(m, ' ', esp);
20802 seq_put_decimal_ull(m, ' ', eip);
20803 /* The signal information here is obsolete.
20804 -@@ -518,7 +575,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
20805 +@@ -535,7 +592,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
20806 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
20807 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
20808
20809 @@ -50592,7 +51522,7 @@ index bd31e02..15cae71 100644
20810 seq_put_decimal_ull(m, ' ', mm->start_data);
20811 seq_put_decimal_ull(m, ' ', mm->end_data);
20812 seq_put_decimal_ull(m, ' ', mm->start_brk);
20813 -@@ -556,8 +617,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
20814 +@@ -573,8 +634,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
20815 struct pid *pid, struct task_struct *task)
20816 {
20817 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
20818 @@ -50609,7 +51539,7 @@ index bd31e02..15cae71 100644
20819 if (mm) {
20820 size = task_statm(mm, &shared, &text, &data, &resident);
20821 mmput(mm);
20822 -@@ -580,6 +648,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
20823 +@@ -597,6 +665,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
20824 return 0;
20825 }
20826
20827 @@ -50624,7 +51554,7 @@ index bd31e02..15cae71 100644
20828 static struct pid *
20829 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
20830 diff --git a/fs/proc/base.c b/fs/proc/base.c
20831 -index 9e28356..c485b3c 100644
20832 +index 9b43ff77..3d6a99f 100644
20833 --- a/fs/proc/base.c
20834 +++ b/fs/proc/base.c
20835 @@ -111,6 +111,14 @@ struct pid_entry {
20836 @@ -50729,7 +51659,7 @@ index 9e28356..c485b3c 100644
20837 put_task_struct(task);
20838 }
20839 return allowed;
20840 -@@ -562,10 +592,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
20841 +@@ -555,10 +585,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
20842 struct task_struct *task,
20843 int hide_pid_min)
20844 {
20845 @@ -50765,7 +51695,7 @@ index 9e28356..c485b3c 100644
20846 return ptrace_may_access(task, PTRACE_MODE_READ);
20847 }
20848
20849 -@@ -583,7 +638,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
20850 +@@ -576,7 +631,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
20851 put_task_struct(task);
20852
20853 if (!has_perms) {
20854 @@ -50777,7 +51707,7 @@ index 9e28356..c485b3c 100644
20855 /*
20856 * Let's make getdents(), stat(), and open()
20857 * consistent with each other. If a process
20858 -@@ -681,6 +740,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
20859 +@@ -674,6 +733,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
20860 if (!task)
20861 return -ESRCH;
20862
20863 @@ -50789,7 +51719,7 @@ index 9e28356..c485b3c 100644
20864 mm = mm_access(task, mode);
20865 put_task_struct(task);
20866
20867 -@@ -696,6 +760,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
20868 +@@ -689,6 +753,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
20869
20870 file->private_data = mm;
20871
20872 @@ -50800,7 +51730,7 @@ index 9e28356..c485b3c 100644
20873 return 0;
20874 }
20875
20876 -@@ -717,6 +785,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
20877 +@@ -710,6 +778,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
20878 ssize_t copied;
20879 char *page;
20880
20881 @@ -50818,7 +51748,7 @@ index 9e28356..c485b3c 100644
20882 if (!mm)
20883 return 0;
20884
20885 -@@ -821,6 +900,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
20886 +@@ -814,6 +893,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
20887 if (!mm)
20888 return 0;
20889
20890 @@ -50832,7 +51762,7 @@ index 9e28356..c485b3c 100644
20891 page = (char *)__get_free_page(GFP_TEMPORARY);
20892 if (!page)
20893 return -ENOMEM;
20894 -@@ -1436,7 +1522,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
20895 +@@ -1429,7 +1515,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
20896 int error = -EACCES;
20897
20898 /* Are we allowed to snoop on the tasks file descriptors? */
20899 @@ -50841,7 +51771,7 @@ index 9e28356..c485b3c 100644
20900 goto out;
20901
20902 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
20903 -@@ -1480,8 +1566,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
20904 +@@ -1473,8 +1559,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
20905 struct path path;
20906
20907 /* Are we allowed to snoop on the tasks file descriptors? */
20908 @@ -50862,7 +51792,7 @@ index 9e28356..c485b3c 100644
20909
20910 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
20911 if (error)
20912 -@@ -1531,7 +1627,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
20913 +@@ -1524,7 +1620,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
20914 rcu_read_lock();
20915 cred = __task_cred(task);
20916 inode->i_uid = cred->euid;
20917 @@ -50874,7 +51804,7 @@ index 9e28356..c485b3c 100644
20918 rcu_read_unlock();
20919 }
20920 security_task_to_inode(task, inode);
20921 -@@ -1567,10 +1667,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
20922 +@@ -1560,10 +1660,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
20923 return -ENOENT;
20924 }
20925 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
20926 @@ -50894,7 +51824,7 @@ index 9e28356..c485b3c 100644
20927 }
20928 }
20929 rcu_read_unlock();
20930 -@@ -1608,11 +1717,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
20931 +@@ -1601,11 +1710,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
20932
20933 if (task) {
20934 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
20935 @@ -50915,7 +51845,7 @@ index 9e28356..c485b3c 100644
20936 rcu_read_unlock();
20937 } else {
20938 inode->i_uid = GLOBAL_ROOT_UID;
20939 -@@ -2065,6 +2183,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
20940 +@@ -2058,6 +2176,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
20941 if (!task)
20942 goto out_no_task;
20943
20944 @@ -50925,7 +51855,7 @@ index 9e28356..c485b3c 100644
20945 /*
20946 * Yes, it does not scale. And it should not. Don't add
20947 * new entries into /proc/<tgid>/ without very good reasons.
20948 -@@ -2109,6 +2230,9 @@ static int proc_pident_readdir(struct file *filp,
20949 +@@ -2102,6 +2223,9 @@ static int proc_pident_readdir(struct file *filp,
20950 if (!task)
20951 goto out_no_task;
20952
20953 @@ -50935,16 +51865,7 @@ index 9e28356..c485b3c 100644
20954 ret = 0;
20955 i = filp->f_pos;
20956 switch (i) {
20957 -@@ -2380,7 +2504,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
20958 - static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
20959 - void *cookie)
20960 - {
20961 -- char *s = nd_get_link(nd);
20962 -+ const char *s = nd_get_link(nd);
20963 - if (!IS_ERR(s))
20964 - kfree(s);
20965 - }
20966 -@@ -2662,7 +2786,7 @@ static const struct pid_entry tgid_base_stuff[] = {
20967 +@@ -2515,7 +2639,7 @@ static const struct pid_entry tgid_base_stuff[] = {
20968 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
20969 #endif
20970 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
20971 @@ -50953,7 +51874,7 @@ index 9e28356..c485b3c 100644
20972 INF("syscall", S_IRUGO, proc_pid_syscall),
20973 #endif
20974 INF("cmdline", S_IRUGO, proc_pid_cmdline),
20975 -@@ -2687,10 +2811,10 @@ static const struct pid_entry tgid_base_stuff[] = {
20976 +@@ -2540,10 +2664,10 @@ static const struct pid_entry tgid_base_stuff[] = {
20977 #ifdef CONFIG_SECURITY
20978 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
20979 #endif
20980 @@ -50966,7 +51887,7 @@ index 9e28356..c485b3c 100644
20981 ONE("stack", S_IRUGO, proc_pid_stack),
20982 #endif
20983 #ifdef CONFIG_SCHEDSTATS
20984 -@@ -2724,6 +2848,9 @@ static const struct pid_entry tgid_base_stuff[] = {
20985 +@@ -2577,6 +2701,9 @@ static const struct pid_entry tgid_base_stuff[] = {
20986 #ifdef CONFIG_HARDWALL
20987 INF("hardwall", S_IRUGO, proc_pid_hardwall),
20988 #endif
20989 @@ -50976,7 +51897,7 @@ index 9e28356..c485b3c 100644
20990 #ifdef CONFIG_USER_NS
20991 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
20992 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
20993 -@@ -2856,7 +2983,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
20994 +@@ -2705,7 +2832,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
20995 if (!inode)
20996 goto out;
20997
20998 @@ -50991,7 +51912,7 @@ index 9e28356..c485b3c 100644
20999 inode->i_op = &proc_tgid_base_inode_operations;
21000 inode->i_fop = &proc_tgid_base_operations;
21001 inode->i_flags|=S_IMMUTABLE;
21002 -@@ -2898,7 +3032,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
21003 +@@ -2743,7 +2877,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
21004 if (!task)
21005 goto out;
21006
21007 @@ -51003,7 +51924,7 @@ index 9e28356..c485b3c 100644
21008 put_task_struct(task);
21009 out:
21010 return result;
21011 -@@ -2961,6 +3099,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
21012 +@@ -2806,6 +2944,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
21013 static int fake_filldir(void *buf, const char *name, int namelen,
21014 loff_t offset, u64 ino, unsigned d_type)
21015 {
21016 @@ -51012,7 +51933,7 @@ index 9e28356..c485b3c 100644
21017 return 0;
21018 }
21019
21020 -@@ -3027,7 +3167,7 @@ static const struct pid_entry tid_base_stuff[] = {
21021 +@@ -2857,7 +2997,7 @@ static const struct pid_entry tid_base_stuff[] = {
21022 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
21023 #endif
21024 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
21025 @@ -51021,7 +51942,7 @@ index 9e28356..c485b3c 100644
21026 INF("syscall", S_IRUGO, proc_pid_syscall),
21027 #endif
21028 INF("cmdline", S_IRUGO, proc_pid_cmdline),
21029 -@@ -3054,10 +3194,10 @@ static const struct pid_entry tid_base_stuff[] = {
21030 +@@ -2884,10 +3024,10 @@ static const struct pid_entry tid_base_stuff[] = {
21031 #ifdef CONFIG_SECURITY
21032 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
21033 #endif
21034 @@ -51067,7 +51988,7 @@ index b143471..bb105e5 100644
21035 }
21036 module_init(proc_devices_init);
21037 diff --git a/fs/proc/fd.c b/fs/proc/fd.c
21038 -index f28a875..c467953 100644
21039 +index d7a4a28..0201742 100644
21040 --- a/fs/proc/fd.c
21041 +++ b/fs/proc/fd.c
21042 @@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v)
21043 @@ -51080,7 +52001,7 @@ index f28a875..c467953 100644
21044 put_task_struct(task);
21045
21046 if (files) {
21047 -@@ -300,11 +301,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
21048 +@@ -302,11 +303,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
21049 */
21050 int proc_fd_permission(struct inode *inode, int mask)
21051 {
21052 @@ -51105,7 +52026,7 @@ index f28a875..c467953 100644
21053 }
21054
21055 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
21056 -index 3b22bbd..895b58c 100644
21057 +index 439ae688..c21ac36 100644
21058 --- a/fs/proc/inode.c
21059 +++ b/fs/proc/inode.c
21060 @@ -21,11 +21,17 @@
21061 @@ -51126,10 +52047,10 @@ index 3b22bbd..895b58c 100644
21062 static void proc_evict_inode(struct inode *inode)
21063 {
21064 struct proc_dir_entry *de;
21065 -@@ -51,6 +57,13 @@ static void proc_evict_inode(struct inode *inode)
21066 - ns_ops = PROC_I(inode)->ns_ops;
21067 - if (ns_ops && ns_ops->put)
21068 - ns_ops->put(PROC_I(inode)->ns);
21069 +@@ -53,6 +59,13 @@ static void proc_evict_inode(struct inode *inode)
21070 + ns = PROC_I(inode)->ns;
21071 + if (ns_ops && ns)
21072 + ns_ops->put(ns);
21073 +
21074 +#ifdef CONFIG_PROC_SYSCTL
21075 + if (inode->i_op == &proc_sys_inode_operations ||
21076 @@ -51140,7 +52061,7 @@ index 3b22bbd..895b58c 100644
21077 }
21078
21079 static struct kmem_cache * proc_inode_cachep;
21080 -@@ -455,7 +468,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
21081 +@@ -457,7 +470,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
21082 if (de->mode) {
21083 inode->i_mode = de->mode;
21084 inode->i_uid = de->uid;
21085 @@ -51153,10 +52074,10 @@ index 3b22bbd..895b58c 100644
21086 if (de->size)
21087 inode->i_size = de->size;
21088 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
21089 -index 43973b0..a20e704 100644
21090 +index 252544c..04395b9 100644
21091 --- a/fs/proc/internal.h
21092 +++ b/fs/proc/internal.h
21093 -@@ -54,6 +54,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
21094 +@@ -55,6 +55,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
21095 struct pid *pid, struct task_struct *task);
21096 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
21097 struct pid *pid, struct task_struct *task);
21098 @@ -51167,7 +52088,7 @@ index 43973b0..a20e704 100644
21099
21100 extern const struct file_operations proc_tid_children_operations;
21101 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
21102 -index 86c67ee..cdca321 100644
21103 +index e96d4f1..8b116ed 100644
21104 --- a/fs/proc/kcore.c
21105 +++ b/fs/proc/kcore.c
21106 @@ -480,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
21107 @@ -51285,7 +52206,7 @@ index fe72cd0..cb9b67d 100644
21108 rcu_read_lock();
21109 task = pid_task(proc_pid(dir), PIDTYPE_PID);
21110 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
21111 -index a781bdf..6665284 100644
21112 +index 1827d88..9a60b01 100644
21113 --- a/fs/proc/proc_sysctl.c
21114 +++ b/fs/proc/proc_sysctl.c
21115 @@ -12,11 +12,15 @@
21116 @@ -51306,7 +52227,7 @@ index a781bdf..6665284 100644
21117
21118 void proc_sys_poll_notify(struct ctl_table_poll *poll)
21119 {
21120 -@@ -465,6 +469,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
21121 +@@ -466,6 +470,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
21122
21123 err = NULL;
21124 d_set_d_op(dentry, &proc_sys_dentry_operations);
21125 @@ -51316,7 +52237,7 @@ index a781bdf..6665284 100644
21126 d_add(dentry, inode);
21127
21128 out:
21129 -@@ -480,18 +487,20 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
21130 +@@ -481,6 +488,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
21131 struct inode *inode = filp->f_path.dentry->d_inode;
21132 struct ctl_table_header *head = grab_header(inode);
21133 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
21134 @@ -51324,21 +52245,16 @@ index a781bdf..6665284 100644
21135 ssize_t error;
21136 size_t res;
21137
21138 - if (IS_ERR(head))
21139 - return PTR_ERR(head);
21140 -
21141 -+
21142 - /*
21143 - * At this point we know that the sysctl was not unregistered
21144 +@@ -492,7 +500,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
21145 * and won't be until we finish.
21146 */
21147 error = -EPERM;
21148 -- if (sysctl_perm(head->root, table, write ? MAY_WRITE : MAY_READ))
21149 -+ if (sysctl_perm(head->root, table, op))
21150 +- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
21151 ++ if (sysctl_perm(head, table, op))
21152 goto out;
21153
21154 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
21155 -@@ -499,6 +508,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
21156 +@@ -500,6 +508,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
21157 if (!table->proc_handler)
21158 goto out;
21159
21160 @@ -51361,7 +52277,7 @@ index a781bdf..6665284 100644
21161 /* careful: calling conventions are nasty here */
21162 res = count;
21163 error = table->proc_handler(table, write, buf, &res, ppos);
21164 -@@ -596,6 +621,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
21165 +@@ -597,6 +621,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
21166 return -ENOMEM;
21167 } else {
21168 d_set_d_op(child, &proc_sys_dentry_operations);
21169 @@ -51371,7 +52287,7 @@ index a781bdf..6665284 100644
21170 d_add(child, inode);
21171 }
21172 } else {
21173 -@@ -639,6 +667,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
21174 +@@ -640,6 +667,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
21175 if ((*pos)++ < file->f_pos)
21176 return 0;
21177
21178 @@ -51381,7 +52297,7 @@ index a781bdf..6665284 100644
21179 if (unlikely(S_ISLNK(table->mode)))
21180 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
21181 else
21182 -@@ -756,6 +787,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
21183 +@@ -750,6 +780,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
21184 if (IS_ERR(head))
21185 return PTR_ERR(head);
21186
21187 @@ -51391,7 +52307,7 @@ index a781bdf..6665284 100644
21188 generic_fillattr(inode, stat);
21189 if (table)
21190 stat->mode = (stat->mode & S_IFMT) | table->mode;
21191 -@@ -778,13 +812,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
21192 +@@ -772,13 +805,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
21193 .llseek = generic_file_llseek,
21194 };
21195
21196 @@ -51408,10 +52324,10 @@ index a781bdf..6665284 100644
21197 .permission = proc_sys_permission,
21198 .setattr = proc_sys_setattr,
21199 diff --git a/fs/proc/root.c b/fs/proc/root.c
21200 -index 9889a92..2613b48 100644
21201 +index c6e9fac..a740964 100644
21202 --- a/fs/proc/root.c
21203 +++ b/fs/proc/root.c
21204 -@@ -187,7 +187,15 @@ void __init proc_root_init(void)
21205 +@@ -176,7 +176,15 @@ void __init proc_root_init(void)
21206 #ifdef CONFIG_PROC_DEVICETREE
21207 proc_device_tree_init();
21208 #endif
21209 @@ -51427,8 +52343,21 @@ index 9889a92..2613b48 100644
21210 proc_sys_init();
21211 }
21212
21213 +diff --git a/fs/proc/self.c b/fs/proc/self.c
21214 +index aa5cc3b..c91a5d0 100644
21215 +--- a/fs/proc/self.c
21216 ++++ b/fs/proc/self.c
21217 +@@ -37,7 +37,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
21218 + static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
21219 + void *cookie)
21220 + {
21221 +- char *s = nd_get_link(nd);
21222 ++ const char *s = nd_get_link(nd);
21223 + if (!IS_ERR(s))
21224 + kfree(s);
21225 + }
21226 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
21227 -index 90c63f9..e662cfc 100644
21228 +index ca5ce7f..02c1cf0 100644
21229 --- a/fs/proc/task_mmu.c
21230 +++ b/fs/proc/task_mmu.c
21231 @@ -11,12 +11,19 @@
21232 @@ -51554,7 +52483,7 @@ index 90c63f9..e662cfc 100644
21233 show_map_vma(m, vma, is_pid);
21234
21235 if (m->count < m->size) /* vma is copied successfully */
21236 -@@ -538,12 +574,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
21237 +@@ -589,12 +625,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
21238 .private = &mss,
21239 };
21240
21241 @@ -51583,7 +52512,7 @@ index 90c63f9..e662cfc 100644
21242 show_map_vma(m, vma, is_pid);
21243
21244 seq_printf(m,
21245 -@@ -561,7 +608,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
21246 +@@ -612,7 +659,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
21247 "KernelPageSize: %8lu kB\n"
21248 "MMUPageSize: %8lu kB\n"
21249 "Locked: %8lu kB\n",
21250 @@ -51595,7 +52524,7 @@ index 90c63f9..e662cfc 100644
21251 mss.resident >> 10,
21252 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
21253 mss.shared_clean >> 10,
21254 -@@ -1211,6 +1262,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
21255 +@@ -1264,6 +1315,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
21256 int n;
21257 char buffer[50];
21258
21259 @@ -51609,8 +52538,8 @@ index 90c63f9..e662cfc 100644
21260 if (!mm)
21261 return 0;
21262
21263 -@@ -1228,11 +1286,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
21264 - mpol_to_str(buffer, sizeof(buffer), pol, 0);
21265 +@@ -1281,11 +1339,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
21266 + mpol_to_str(buffer, sizeof(buffer), pol);
21267 mpol_cond_put(pol);
21268
21269 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
21270 @@ -51648,21 +52577,6 @@ index 1ccfa53..0848f95 100644
21271 } else if (mm) {
21272 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
21273
21274 -diff --git a/fs/pstore/ftrace.c b/fs/pstore/ftrace.c
21275 -index 2d57e1a..43b1280 100644
21276 ---- a/fs/pstore/ftrace.c
21277 -+++ b/fs/pstore/ftrace.c
21278 -@@ -28,7 +28,9 @@
21279 - #include "internal.h"
21280 -
21281 - static void notrace pstore_ftrace_call(unsigned long ip,
21282 -- unsigned long parent_ip)
21283 -+ unsigned long parent_ip,
21284 -+ struct ftrace_ops *op,
21285 -+ struct pt_regs *regs)
21286 - {
21287 - unsigned long flags;
21288 - struct pstore_ftrace_record rec = {};
21289 diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
21290 index 16e8abb..2dcf914 100644
21291 --- a/fs/quota/netlink.c
21292 @@ -51685,19 +52599,6 @@ index 16e8abb..2dcf914 100644
21293 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
21294 if (!msg_head) {
21295 printk(KERN_ERR
21296 -diff --git a/fs/read_write.c b/fs/read_write.c
21297 -index d065348..8e2b43d 100644
21298 ---- a/fs/read_write.c
21299 -+++ b/fs/read_write.c
21300 -@@ -935,6 +935,8 @@ ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, size_t count,
21301 - if (retval > 0) {
21302 - add_rchar(current, retval);
21303 - add_wchar(current, retval);
21304 -+ fsnotify_access(in.file);
21305 -+ fsnotify_modify(out.file);
21306 - }
21307 -
21308 - inc_syscr(current);
21309 diff --git a/fs/readdir.c b/fs/readdir.c
21310 index 5e69ef5..e5d9099 100644
21311 --- a/fs/readdir.c
21312 @@ -51827,7 +52728,7 @@ index e60e870..f40ac16 100644
21313 SF(s_do_balance), SF(s_unneeded_left_neighbor),
21314 SF(s_good_search_by_key_reada), SF(s_bmaps),
21315 diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
21316 -index 33215f5..c5d427a 100644
21317 +index 157e474..65a6114 100644
21318 --- a/fs/reiserfs/reiserfs.h
21319 +++ b/fs/reiserfs/reiserfs.h
21320 @@ -453,7 +453,7 @@ struct reiserfs_sb_info {
21321 @@ -51869,7 +52770,7 @@ index 2ef72d9..f213b17 100644
21322 return -EINVAL;
21323
21324 diff --git a/fs/seq_file.c b/fs/seq_file.c
21325 -index 99dffab..e4fcb71 100644
21326 +index f2bc3df..239d4f6 100644
21327 --- a/fs/seq_file.c
21328 +++ b/fs/seq_file.c
21329 @@ -10,6 +10,7 @@
21330 @@ -51936,7 +52837,7 @@ index 99dffab..e4fcb71 100644
21331
21332 if (op) {
21333 diff --git a/fs/splice.c b/fs/splice.c
21334 -index 48c7bd1..d0740e4 100644
21335 +index 6909d89..5b2e8f9 100644
21336 --- a/fs/splice.c
21337 +++ b/fs/splice.c
21338 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
21339 @@ -52000,7 +52901,7 @@ index 48c7bd1..d0740e4 100644
21340 return 0;
21341
21342 if (sd->flags & SPLICE_F_NONBLOCK)
21343 -@@ -1192,7 +1192,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
21344 +@@ -1189,7 +1189,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
21345 * out of the pipe right after the splice_to_pipe(). So set
21346 * PIPE_READERS appropriately.
21347 */
21348 @@ -52009,7 +52910,7 @@ index 48c7bd1..d0740e4 100644
21349
21350 current->splice_pipe = pipe;
21351 }
21352 -@@ -1741,9 +1741,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
21353 +@@ -1738,9 +1738,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
21354 ret = -ERESTARTSYS;
21355 break;
21356 }
21357 @@ -52021,7 +52922,7 @@ index 48c7bd1..d0740e4 100644
21358 if (flags & SPLICE_F_NONBLOCK) {
21359 ret = -EAGAIN;
21360 break;
21361 -@@ -1775,7 +1775,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
21362 +@@ -1772,7 +1772,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
21363 pipe_lock(pipe);
21364
21365 while (pipe->nrbufs >= pipe->buffers) {
21366 @@ -52030,7 +52931,7 @@ index 48c7bd1..d0740e4 100644
21367 send_sig(SIGPIPE, current, 0);
21368 ret = -EPIPE;
21369 break;
21370 -@@ -1788,9 +1788,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
21371 +@@ -1785,9 +1785,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
21372 ret = -ERESTARTSYS;
21373 break;
21374 }
21375 @@ -52042,7 +52943,7 @@ index 48c7bd1..d0740e4 100644
21376 }
21377
21378 pipe_unlock(pipe);
21379 -@@ -1826,14 +1826,14 @@ retry:
21380 +@@ -1823,14 +1823,14 @@ retry:
21381 pipe_double_lock(ipipe, opipe);
21382
21383 do {
21384 @@ -52059,7 +52960,7 @@ index 48c7bd1..d0740e4 100644
21385 break;
21386
21387 /*
21388 -@@ -1930,7 +1930,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
21389 +@@ -1927,7 +1927,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
21390 pipe_double_lock(ipipe, opipe);
21391
21392 do {
21393 @@ -52068,7 +52969,7 @@ index 48c7bd1..d0740e4 100644
21394 send_sig(SIGPIPE, current, 0);
21395 if (!ret)
21396 ret = -EPIPE;
21397 -@@ -1975,7 +1975,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
21398 +@@ -1972,7 +1972,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
21399 * return EAGAIN if we have the potential of some data in the
21400 * future, otherwise just return 0
21401 */
21402 @@ -52078,7 +52979,7 @@ index 48c7bd1..d0740e4 100644
21403
21404 pipe_unlock(ipipe);
21405 diff --git a/fs/stat.c b/fs/stat.c
21406 -index eae4946..6198f55 100644
21407 +index 14f4545..9b7f55b 100644
21408 --- a/fs/stat.c
21409 +++ b/fs/stat.c
21410 @@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
21411 @@ -52138,7 +53039,7 @@ index 2fbdff6..5530a61 100644
21412 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
21413 if (!sd)
21414 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
21415 -index 00012e3..8392349 100644
21416 +index 602f56d..6853db8 100644
21417 --- a/fs/sysfs/file.c
21418 +++ b/fs/sysfs/file.c
21419 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
21420 @@ -52213,7 +53114,7 @@ index c175b4d..8f36a16 100644
21421 int i;
21422 for (i = 0; i < sizeof(struct tag); ++i)
21423 diff --git a/fs/utimes.c b/fs/utimes.c
21424 -index bb0696a..552054b 100644
21425 +index f4fb7ec..3fe03c0 100644
21426 --- a/fs/utimes.c
21427 +++ b/fs/utimes.c
21428 @@ -1,6 +1,7 @@
21429 @@ -52238,7 +53139,7 @@ index bb0696a..552054b 100644
21430 error = notify_change(path->dentry, &newattrs);
21431 mutex_unlock(&inode->i_mutex);
21432 diff --git a/fs/xattr.c b/fs/xattr.c
21433 -index e21c119..21dfc7c 100644
21434 +index 3377dff..4feded6 100644
21435 --- a/fs/xattr.c
21436 +++ b/fs/xattr.c
21437 @@ -319,7 +319,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
21438 @@ -52264,7 +53165,7 @@ index e21c119..21dfc7c 100644
21439 out:
21440 if (vvalue)
21441 vfree(vvalue);
21442 -@@ -376,7 +381,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
21443 +@@ -377,7 +382,7 @@ retry:
21444 return error;
21445 error = mnt_want_write(path.mnt);
21446 if (!error) {
21447 @@ -52273,7 +53174,7 @@ index e21c119..21dfc7c 100644
21448 mnt_drop_write(path.mnt);
21449 }
21450 path_put(&path);
21451 -@@ -395,7 +400,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
21452 +@@ -401,7 +406,7 @@ retry:
21453 return error;
21454 error = mnt_want_write(path.mnt);
21455 if (!error) {
21456 @@ -52282,7 +53183,7 @@ index e21c119..21dfc7c 100644
21457 mnt_drop_write(path.mnt);
21458 }
21459 path_put(&path);
21460 -@@ -406,16 +411,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
21461 +@@ -416,16 +421,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
21462 const void __user *,value, size_t, size, int, flags)
21463 {
21464 struct fd f = fdget(fd);
21465 @@ -52317,7 +53218,7 @@ index 9fbea87..6b19972 100644
21466 struct posix_acl *acl;
21467 struct posix_acl_entry *acl_e;
21468 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
21469 -index 83d0cf3..2ef526b 100644
21470 +index cdb2d33..704ce7f 100644
21471 --- a/fs/xfs/xfs_bmap.c
21472 +++ b/fs/xfs/xfs_bmap.c
21473 @@ -189,7 +189,7 @@ xfs_bmap_validate_ret(
21474 @@ -52351,10 +53252,10 @@ index 1b9fc3e..e1bdde0 100644
21475 *offset = off & 0x7fffffff;
21476 return 0;
21477 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
21478 -index c1df3c6..f987db6 100644
21479 +index c1c3ef8..0952438 100644
21480 --- a/fs/xfs/xfs_ioctl.c
21481 +++ b/fs/xfs/xfs_ioctl.c
21482 -@@ -126,7 +126,7 @@ xfs_find_handle(
21483 +@@ -127,7 +127,7 @@ xfs_find_handle(
21484 }
21485
21486 error = -EFAULT;
21487 @@ -52364,10 +53265,10 @@ index c1df3c6..f987db6 100644
21488 goto out_put;
21489
21490 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
21491 -index 4e00cf0..3374374 100644
21492 +index d82efaa..0904a8e 100644
21493 --- a/fs/xfs/xfs_iops.c
21494 +++ b/fs/xfs/xfs_iops.c
21495 -@@ -394,7 +394,7 @@ xfs_vn_put_link(
21496 +@@ -395,7 +395,7 @@ xfs_vn_put_link(
21497 struct nameidata *nd,
21498 void *p)
21499 {
21500 @@ -53449,10 +54350,10 @@ index 0000000..1b9afa9
21501 +endif
21502 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
21503 new file mode 100644
21504 -index 0000000..960766a
21505 +index 0000000..69e1320
21506 --- /dev/null
21507 +++ b/grsecurity/gracl.c
21508 -@@ -0,0 +1,4003 @@
21509 +@@ -0,0 +1,4019 @@
21510 +#include <linux/kernel.h>
21511 +#include <linux/module.h>
21512 +#include <linux/sched.h>
21513 @@ -53479,6 +54380,7 @@ index 0000000..960766a
21514 +#include <linux/fdtable.h>
21515 +#include <linux/percpu.h>
21516 +#include <linux/lglock.h>
21517 ++#include <linux/hugetlb.h>
21518 +#include "../fs/mount.h"
21519 +
21520 +#include <asm/uaccess.h>
21521 @@ -53533,8 +54435,9 @@ index 0000000..960766a
21522 +
21523 +extern struct vfsmount *pipe_mnt;
21524 +extern struct vfsmount *shm_mnt;
21525 ++
21526 +#ifdef CONFIG_HUGETLBFS
21527 -+extern struct vfsmount *hugetlbfs_vfsmount;
21528 ++extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
21529 +#endif
21530 +
21531 +static struct acl_object_label *fakefs_obj_rw;
21532 @@ -55309,6 +56212,20 @@ index 0000000..960766a
21533 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
21534 +}
21535 +
21536 ++#ifdef CONFIG_HUGETLBFS
21537 ++static inline bool
21538 ++is_hugetlbfs_mnt(const struct vfsmount *mnt)
21539 ++{
21540 ++ int i;
21541 ++ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
21542 ++ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
21543 ++ return true;
21544 ++ }
21545 ++
21546 ++ return false;
21547 ++}
21548 ++#endif
21549 ++
21550 +static struct acl_object_label *
21551 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
21552 + const struct acl_subject_label *subj, char *path, const int checkglob)
21553 @@ -55327,7 +56244,7 @@ index 0000000..960766a
21554 + mnt == sock_mnt ||
21555 +#endif
21556 +#ifdef CONFIG_HUGETLBFS
21557 -+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
21558 ++ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
21559 +#endif
21560 + /* ignore Eric Biederman */
21561 + IS_PRIVATE(l_dentry->d_inode))) {
21562 @@ -62785,12 +63702,12 @@ index 810431d..0ec4804f 100644
21563 * (puds are folded into pgds so this doesn't get actually called,
21564 * but the define is needed for a generic inline function.)
21565 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
21566 -index b36ce40..019426d 100644
21567 +index 5cf680a..4b74d62 100644
21568 --- a/include/asm-generic/pgtable.h
21569 +++ b/include/asm-generic/pgtable.h
21570 -@@ -554,6 +554,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
21571 - #endif
21572 +@@ -688,6 +688,14 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
21573 }
21574 + #endif /* CONFIG_NUMA_BALANCING */
21575
21576 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
21577 +static inline unsigned long pax_open_kernel(void) { return 0; }
21578 @@ -62852,7 +63769,7 @@ index 418d270..bfd2794 100644
21579 struct crypto_instance {
21580 struct crypto_alg alg;
21581 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
21582 -index 3fd8280..2b3c415 100644
21583 +index fad21c9..3fff955 100644
21584 --- a/include/drm/drmP.h
21585 +++ b/include/drm/drmP.h
21586 @@ -72,6 +72,7 @@
21587 @@ -62882,7 +63799,7 @@ index 3fd8280..2b3c415 100644
21588
21589 struct list_head filelist;
21590 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
21591 -index e01cc80..6fb6f25 100644
21592 +index f43d556..94d9343 100644
21593 --- a/include/drm/drm_crtc_helper.h
21594 +++ b/include/drm/drm_crtc_helper.h
21595 @@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
21596 @@ -62895,7 +63812,7 @@ index e01cc80..6fb6f25 100644
21597 /**
21598 * drm_connector_helper_funcs - helper operations for connectors
21599 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
21600 -index d6d1da4..fdd1ac5 100644
21601 +index 72dcbe8..8db58d7 100644
21602 --- a/include/drm/ttm/ttm_memory.h
21603 +++ b/include/drm/ttm/ttm_memory.h
21604 @@ -48,7 +48,7 @@
21605 @@ -62908,7 +63825,7 @@ index d6d1da4..fdd1ac5 100644
21606 /**
21607 * struct ttm_mem_global - Global memory accounting structure.
21608 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
21609 -index 22ef21c..75904ba 100644
21610 +index c1da539..4db35ec 100644
21611 --- a/include/linux/atmdev.h
21612 +++ b/include/linux/atmdev.h
21613 @@ -28,7 +28,7 @@ struct compat_atm_iobuf {
21614 @@ -62921,11 +63838,11 @@ index 22ef21c..75904ba 100644
21615 #undef __HANDLE_ITEM
21616 };
21617 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
21618 -index de0628e..38f42eb 100644
21619 +index 0530b98..b127a9e 100644
21620 --- a/include/linux/binfmts.h
21621 +++ b/include/linux/binfmts.h
21622 -@@ -75,6 +75,7 @@ struct linux_binfmt {
21623 - int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
21624 +@@ -73,6 +73,7 @@ struct linux_binfmt {
21625 + int (*load_binary)(struct linux_binprm *);
21626 int (*load_shlib)(struct file *);
21627 int (*core_dump)(struct coredump_params *cprm);
21628 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
21629 @@ -62933,10 +63850,10 @@ index de0628e..38f42eb 100644
21630 };
21631
21632 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
21633 -index 1756001..ab117ec 100644
21634 +index f94bc83..62b9cfe 100644
21635 --- a/include/linux/blkdev.h
21636 +++ b/include/linux/blkdev.h
21637 -@@ -1478,7 +1478,7 @@ struct block_device_operations {
21638 +@@ -1498,7 +1498,7 @@ struct block_device_operations {
21639 /* this callback is with swap_lock and sometimes page table lock held */
21640 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
21641 struct module *owner;
21642 @@ -63017,11 +63934,11 @@ index 42e55de..1cd0e66 100644
21643 extern struct cleancache_ops
21644 cleancache_register_ops(struct cleancache_ops *ops);
21645 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
21646 -index 412bc6c..c31666e 100644
21647 +index 662fd1b..e801992 100644
21648 --- a/include/linux/compiler-gcc4.h
21649 +++ b/include/linux/compiler-gcc4.h
21650 -@@ -32,6 +32,21 @@
21651 - #define __linktime_error(message) __attribute__((__error__(message)))
21652 +@@ -34,6 +34,21 @@
21653 + #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
21654
21655 #if __GNUC_MINOR__ >= 5
21656 +
21657 @@ -63042,7 +63959,7 @@ index 412bc6c..c31666e 100644
21658 /*
21659 * Mark a position in code as unreachable. This can be used to
21660 * suppress control flow warnings after asm blocks that transfer
21661 -@@ -47,6 +62,11 @@
21662 +@@ -49,6 +64,11 @@
21663 #define __noclone __attribute__((__noclone__))
21664
21665 #endif
21666 @@ -63055,10 +63972,10 @@ index 412bc6c..c31666e 100644
21667
21668 #if __GNUC_MINOR__ >= 6
21669 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
21670 -index f430e41..38be90f 100644
21671 +index dd852b7..72924c0 100644
21672 --- a/include/linux/compiler.h
21673 +++ b/include/linux/compiler.h
21674 -@@ -5,31 +5,62 @@
21675 +@@ -5,11 +5,14 @@
21676
21677 #ifdef __CHECKER__
21678 # define __user __attribute__((noderef, address_space(1)))
21679 @@ -63070,9 +63987,10 @@ index f430e41..38be90f 100644
21680 # define __nocast __attribute__((nocast))
21681 # define __iomem __attribute__((noderef, address_space(2)))
21682 +# define __force_iomem __force __iomem
21683 + # define __must_hold(x) __attribute__((context(x,1,1)))
21684 # define __acquires(x) __attribute__((context(x,0,1)))
21685 # define __releases(x) __attribute__((context(x,1,0)))
21686 - # define __acquire(x) __context__(x,1)
21687 +@@ -17,20 +20,48 @@
21688 # define __release(x) __context__(x,-1)
21689 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
21690 # define __percpu __attribute__((noderef, address_space(3)))
21691 @@ -63121,7 +64039,7 @@ index f430e41..38be90f 100644
21692 # define __chk_user_ptr(x) (void)0
21693 # define __chk_io_ptr(x) (void)0
21694 # define __builtin_warning(x, y...) (1)
21695 -@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
21696 +@@ -41,7 +72,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
21697 # define __release(x) (void)0
21698 # define __cond_lock(x,c) (c)
21699 # define __percpu
21700 @@ -63130,8 +64048,8 @@ index f430e41..38be90f 100644
21701 +# define __force_rcu
21702 #endif
21703
21704 - #ifdef __KERNEL__
21705 -@@ -264,6 +297,26 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
21706 + /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
21707 +@@ -275,6 +308,26 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
21708 # define __attribute_const__ /* unimplemented */
21709 #endif
21710
21711 @@ -63158,7 +64076,7 @@ index f430e41..38be90f 100644
21712 /*
21713 * Tell gcc if a function is cold. The compiler will assume any path
21714 * directly leading to the call is unlikely.
21715 -@@ -273,6 +326,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
21716 +@@ -284,6 +337,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
21717 #define __cold
21718 #endif
21719
21720 @@ -63181,7 +64099,7 @@ index f430e41..38be90f 100644
21721 /* Simple shorthand for a section definition */
21722 #ifndef __section
21723 # define __section(S) __attribute__ ((__section__(#S)))
21724 -@@ -312,6 +381,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
21725 +@@ -323,6 +392,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
21726 * use is to mediate communication between process-level code and irq/NMI
21727 * handlers, all running on the same CPU.
21728 */
21729 @@ -63190,11 +64108,24 @@ index f430e41..38be90f 100644
21730 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
21731
21732 #endif /* __LINUX_COMPILER_H */
21733 +diff --git a/include/linux/cpu.h b/include/linux/cpu.h
21734 +index ce7a074..01ab8ac 100644
21735 +--- a/include/linux/cpu.h
21736 ++++ b/include/linux/cpu.h
21737 +@@ -115,7 +115,7 @@ enum {
21738 + /* Need to know about CPUs going up/down? */
21739 + #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
21740 + #define cpu_notifier(fn, pri) { \
21741 +- static struct notifier_block fn##_nb __cpuinitdata = \
21742 ++ static struct notifier_block fn##_nb = \
21743 + { .notifier_call = fn, .priority = pri }; \
21744 + register_cpu_notifier(&fn##_nb); \
21745 + }
21746 diff --git a/include/linux/cred.h b/include/linux/cred.h
21747 -index ebbed2c..908cc2c 100644
21748 +index 04421e8..6bce4ef 100644
21749 --- a/include/linux/cred.h
21750 +++ b/include/linux/cred.h
21751 -@@ -208,6 +208,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
21752 +@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
21753 static inline void validate_process_creds(void)
21754 {
21755 }
21756 @@ -63276,12 +64207,12 @@ index d3201e4..8281e63 100644
21757 unsigned int offset, size_t len);
21758
21759 diff --git a/include/linux/efi.h b/include/linux/efi.h
21760 -index b424f64..fd36c1b 100644
21761 +index 7a9498a..155713d 100644
21762 --- a/include/linux/efi.h
21763 +++ b/include/linux/efi.h
21764 -@@ -656,6 +656,7 @@ struct efivar_operations {
21765 - efi_get_next_variable_t *get_next_variable;
21766 +@@ -733,6 +733,7 @@ struct efivar_operations {
21767 efi_set_variable_t *set_variable;
21768 + efi_query_variable_info_t *query_variable_info;
21769 };
21770 +typedef struct efivar_operations __no_const efivar_operations_no_const;
21771
21772 @@ -63308,7 +64239,7 @@ index 8c9048e..16a4665 100644
21773 #endif
21774
21775 diff --git a/include/linux/filter.h b/include/linux/filter.h
21776 -index 24d251f..7afb83d 100644
21777 +index c45eabc..baa0be5 100644
21778 --- a/include/linux/filter.h
21779 +++ b/include/linux/filter.h
21780 @@ -20,6 +20,7 @@ struct compat_sock_fprog {
21781 @@ -63343,20 +64274,20 @@ index 3044254..9767f41 100644
21782 extern bool frontswap_enabled;
21783 extern struct frontswap_ops
21784 diff --git a/include/linux/fs.h b/include/linux/fs.h
21785 -index 75fe9a1..8417cac 100644
21786 +index 7617ee0..b575199 100644
21787 --- a/include/linux/fs.h
21788 +++ b/include/linux/fs.h
21789 -@@ -1543,7 +1543,8 @@ struct file_operations {
21790 - int (*setlease)(struct file *, long, struct file_lock **);
21791 +@@ -1541,7 +1541,8 @@ struct file_operations {
21792 long (*fallocate)(struct file *file, int mode, loff_t offset,
21793 loff_t len);
21794 + int (*show_fdinfo)(struct seq_file *m, struct file *f);
21795 -};
21796 +} __do_const;
21797 +typedef struct file_operations __no_const file_operations_no_const;
21798
21799 struct inode_operations {
21800 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
21801 -@@ -2667,4 +2668,14 @@ static inline void inode_has_no_xattr(struct inode *inode)
21802 +@@ -2665,4 +2666,14 @@ static inline void inode_has_no_xattr(struct inode *inode)
21803 inode->i_flags |= S_NOSEC;
21804 }
21805
21806 @@ -63372,7 +64303,7 @@ index 75fe9a1..8417cac 100644
21807 +
21808 #endif /* _LINUX_FS_H */
21809 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
21810 -index 003dc0f..3c4ea97 100644
21811 +index d0ae3a8..0244b34 100644
21812 --- a/include/linux/fs_struct.h
21813 +++ b/include/linux/fs_struct.h
21814 @@ -6,7 +6,7 @@
21815 @@ -63385,10 +64316,10 @@ index 003dc0f..3c4ea97 100644
21816 seqcount_t seq;
21817 int umask;
21818 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
21819 -index ce31408..b1ad003 100644
21820 +index 5dfa0aa..6acf322 100644
21821 --- a/include/linux/fscache-cache.h
21822 +++ b/include/linux/fscache-cache.h
21823 -@@ -102,7 +102,7 @@ struct fscache_operation {
21824 +@@ -112,7 +112,7 @@ struct fscache_operation {
21825 fscache_operation_release_t release;
21826 };
21827
21828 @@ -63397,10 +64328,10 @@ index ce31408..b1ad003 100644
21829 extern void fscache_op_work_func(struct work_struct *work);
21830
21831 extern void fscache_enqueue_operation(struct fscache_operation *);
21832 -@@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
21833 - {
21834 +@@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
21835 INIT_WORK(&op->work, fscache_op_work_func);
21836 atomic_set(&op->usage, 1);
21837 + op->state = FSCACHE_OP_ST_INITIALISED;
21838 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
21839 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
21840 op->processor = processor;
21841 @@ -63440,10 +64371,10 @@ index 0fbfb46..508eb0d 100644
21842
21843 /*
21844 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
21845 -index 642928c..93afe6a 100644
21846 +index a3d4895..ddd2a50 100644
21847 --- a/include/linux/ftrace_event.h
21848 +++ b/include/linux/ftrace_event.h
21849 -@@ -266,7 +266,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
21850 +@@ -272,7 +272,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
21851 extern int trace_add_event_call(struct ftrace_event_call *call);
21852 extern void trace_remove_event_call(struct ftrace_event_call *call);
21853
21854 @@ -63453,10 +64384,10 @@ index 642928c..93afe6a 100644
21855 int trace_set_clr_event(const char *system, const char *event, int set);
21856
21857 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
21858 -index 4f440b3..342233a 100644
21859 +index 79b8bba..86b539e 100644
21860 --- a/include/linux/genhd.h
21861 +++ b/include/linux/genhd.h
21862 -@@ -190,7 +190,7 @@ struct gendisk {
21863 +@@ -194,7 +194,7 @@ struct gendisk {
21864 struct kobject *slave_dir;
21865
21866 struct timer_rand_state *random;
21867 @@ -63466,31 +64397,32 @@ index 4f440b3..342233a 100644
21868 #ifdef CONFIG_BLK_DEV_INTEGRITY
21869 struct blk_integrity *integrity;
21870 diff --git a/include/linux/gfp.h b/include/linux/gfp.h
21871 -index d0a7967..63c4c47 100644
21872 +index 0f615eb..5c3832f 100644
21873 --- a/include/linux/gfp.h
21874 +++ b/include/linux/gfp.h
21875 -@@ -35,6 +35,12 @@ struct vm_area_struct;
21876 +@@ -35,6 +35,13 @@ struct vm_area_struct;
21877 + #define ___GFP_NO_KSWAPD 0x400000u
21878 #define ___GFP_OTHER_NODE 0x800000u
21879 #define ___GFP_WRITE 0x1000000u
21880 -
21881 ++
21882 +#ifdef CONFIG_PAX_USERCOPY_SLABS
21883 +#define ___GFP_USERCOPY 0x2000000u
21884 +#else
21885 +#define ___GFP_USERCOPY 0
21886 +#endif
21887 +
21888 + /* If the above are modified, __GFP_BITS_SHIFT may need updating */
21889 +
21890 /*
21891 - * GFP bitmasks..
21892 - *
21893 -@@ -89,6 +95,7 @@ struct vm_area_struct;
21894 - #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
21895 +@@ -92,6 +99,7 @@ struct vm_area_struct;
21896 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
21897 + #define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */
21898 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
21899 +#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
21900
21901 /*
21902 * This may seem redundant, but it's a way of annotating false positives vs.
21903 -@@ -96,7 +103,7 @@ struct vm_area_struct;
21904 +@@ -99,7 +107,7 @@ struct vm_area_struct;
21905 */
21906 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
21907
21908 @@ -63499,7 +64431,7 @@ index d0a7967..63c4c47 100644
21909 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
21910
21911 /* This equals 0, but use constants in case they ever change */
21912 -@@ -150,6 +157,8 @@ struct vm_area_struct;
21913 +@@ -153,6 +161,8 @@ struct vm_area_struct;
21914 /* 4GB DMA on some platforms */
21915 #define GFP_DMA32 __GFP_DMA32
21916
21917 @@ -64644,10 +65576,10 @@ index ef788b5..ac41b7b 100644
21918 unsigned start1, unsigned end1,
21919 unsigned start2, unsigned end2)
21920 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
21921 -index 800de22..7a2fa46 100644
21922 +index d0c4db7..61b3577 100644
21923 --- a/include/linux/i2c.h
21924 +++ b/include/linux/i2c.h
21925 -@@ -367,6 +367,7 @@ struct i2c_algorithm {
21926 +@@ -369,6 +369,7 @@ struct i2c_algorithm {
21927 /* To determine what the adapter supports */
21928 u32 (*functionality) (struct i2c_adapter *);
21929 };
21930 @@ -64682,7 +65614,7 @@ index aff7ad8..3942bbd 100644
21931 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
21932 extern void unregister_pppox_proto(int proto_num);
21933 diff --git a/include/linux/init.h b/include/linux/init.h
21934 -index e59041e..df0a975 100644
21935 +index 10ed4f4..8e8490d 100644
21936 --- a/include/linux/init.h
21937 +++ b/include/linux/init.h
21938 @@ -39,9 +39,36 @@
21939 @@ -64726,22 +65658,13 @@ index e59041e..df0a975 100644
21940 @@ -94,7 +121,7 @@
21941 #define __exit __section(.exit.text) __exitused __cold notrace
21942
21943 - /* Used for HOTPLUG */
21944 --#define __devinit __section(.devinit.text) __cold notrace
21945 -+#define __devinit __section(.devinit.text) __cold notrace add_devinit_latent_entropy
21946 - #define __devinitdata __section(.devinit.data)
21947 - #define __devinitconst __constsection(.devinit.rodata)
21948 - #define __devexit __section(.devexit.text) __exitused __cold notrace
21949 -@@ -102,7 +129,7 @@
21950 - #define __devexitconst __constsection(.devexit.rodata)
21951 -
21952 /* Used for HOTPLUG_CPU */
21953 -#define __cpuinit __section(.cpuinit.text) __cold notrace
21954 +#define __cpuinit __section(.cpuinit.text) __cold notrace add_cpuinit_latent_entropy
21955 #define __cpuinitdata __section(.cpuinit.data)
21956 #define __cpuinitconst __constsection(.cpuinit.rodata)
21957 #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
21958 -@@ -110,7 +137,7 @@
21959 +@@ -102,7 +129,7 @@
21960 #define __cpuexitconst __constsection(.cpuexit.rodata)
21961
21962 /* Used for MEMORY_HOTPLUG */
21963 @@ -64776,10 +65699,10 @@ index 6d087c5..401cab8 100644
21964 .files = &init_files, \
21965 .signal = &init_signals, \
21966 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
21967 -index 5e4e617..073b866 100644
21968 +index 5fa5afe..ac55b25 100644
21969 --- a/include/linux/interrupt.h
21970 +++ b/include/linux/interrupt.h
21971 -@@ -435,7 +435,7 @@ enum
21972 +@@ -430,7 +430,7 @@ enum
21973 /* map softirq index to softirq name. update 'softirq_to_name' in
21974 * kernel/softirq.c when adding a new softirq.
21975 */
21976 @@ -64788,7 +65711,7 @@ index 5e4e617..073b866 100644
21977
21978 /* softirq mask and active fields moved to irq_cpustat_t in
21979 * asm/hardirq.h to get better cache usage. KAO
21980 -@@ -443,12 +443,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
21981 +@@ -438,12 +438,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
21982
21983 struct softirq_action
21984 {
21985 @@ -64881,7 +65804,7 @@ index 5398d58..5883a34 100644
21986 #define request_module_nowait(mod...) __request_module(false, mod)
21987 #define try_then_request_module(x, mod...) \
21988 diff --git a/include/linux/kobject.h b/include/linux/kobject.h
21989 -index 1e57449..4fede7b 100644
21990 +index 939b112..90b7f44 100644
21991 --- a/include/linux/kobject.h
21992 +++ b/include/linux/kobject.h
21993 @@ -111,7 +111,7 @@ struct kobj_type {
21994 @@ -64894,7 +65817,7 @@ index 1e57449..4fede7b 100644
21995 struct kobj_uevent_env {
21996 char *envp[UEVENT_NUM_ENVP];
21997 diff --git a/include/linux/kref.h b/include/linux/kref.h
21998 -index 65af688..0592677 100644
21999 +index 4972e6e..de4d19b 100644
22000 --- a/include/linux/kref.h
22001 +++ b/include/linux/kref.h
22002 @@ -64,7 +64,7 @@ static inline void kref_get(struct kref *kref)
22003 @@ -64907,10 +65830,10 @@ index 65af688..0592677 100644
22004 if (atomic_sub_and_test((int) count, &kref->refcount)) {
22005 release(kref);
22006 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
22007 -index ecc5543..0e96bcc 100644
22008 +index 2c497ab..afe32f5 100644
22009 --- a/include/linux/kvm_host.h
22010 +++ b/include/linux/kvm_host.h
22011 -@@ -403,7 +403,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
22012 +@@ -418,7 +418,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
22013 int __must_check vcpu_load(struct kvm_vcpu *vcpu);
22014 void vcpu_put(struct kvm_vcpu *vcpu);
22015
22016 @@ -64919,7 +65842,7 @@ index ecc5543..0e96bcc 100644
22017 struct module *module);
22018 void kvm_exit(void);
22019
22020 -@@ -558,7 +558,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
22021 +@@ -574,7 +574,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
22022 struct kvm_guest_debug *dbg);
22023 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
22024
22025 @@ -64929,7 +65852,7 @@ index ecc5543..0e96bcc 100644
22026
22027 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
22028 diff --git a/include/linux/libata.h b/include/linux/libata.h
22029 -index 1e36c63..0c5046e 100644
22030 +index 649e5f8..ead5194 100644
22031 --- a/include/linux/libata.h
22032 +++ b/include/linux/libata.h
22033 @@ -915,7 +915,7 @@ struct ata_port_operations {
22034 @@ -64956,7 +65879,7 @@ index cc6d2aa..71febca 100644
22035 * list_replace - replace old entry by new one
22036 * @old : the element to be replaced
22037 diff --git a/include/linux/mm.h b/include/linux/mm.h
22038 -index 280dae5..baea6c8 100644
22039 +index 66e2f7c..ea88001 100644
22040 --- a/include/linux/mm.h
22041 +++ b/include/linux/mm.h
22042 @@ -101,6 +101,11 @@ extern unsigned int kobjsize(const void *objp);
22043 @@ -64979,7 +65902,7 @@ index 280dae5..baea6c8 100644
22044
22045 struct mmu_gather;
22046 struct inode;
22047 -@@ -1039,34 +1045,6 @@ int set_page_dirty(struct page *page);
22048 +@@ -1068,34 +1074,6 @@ int set_page_dirty(struct page *page);
22049 int set_page_dirty_lock(struct page *page);
22050 int clear_page_dirty_for_io(struct page *page);
22051
22052 @@ -65014,7 +65937,7 @@ index 280dae5..baea6c8 100644
22053 extern pid_t
22054 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
22055
22056 -@@ -1166,6 +1144,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
22057 +@@ -1198,6 +1176,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
22058 }
22059 #endif
22060
22061 @@ -65030,7 +65953,7 @@ index 280dae5..baea6c8 100644
22062 int vma_wants_writenotify(struct vm_area_struct *vma);
22063
22064 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
22065 -@@ -1184,8 +1171,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
22066 +@@ -1216,8 +1203,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
22067 {
22068 return 0;
22069 }
22070 @@ -65046,7 +65969,7 @@ index 280dae5..baea6c8 100644
22071 #endif
22072
22073 #ifdef __PAGETABLE_PMD_FOLDED
22074 -@@ -1194,8 +1188,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
22075 +@@ -1226,8 +1220,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
22076 {
22077 return 0;
22078 }
22079 @@ -65062,7 +65985,7 @@ index 280dae5..baea6c8 100644
22080 #endif
22081
22082 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
22083 -@@ -1213,11 +1214,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
22084 +@@ -1245,11 +1246,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
22085 NULL: pud_offset(pgd, address);
22086 }
22087
22088 @@ -65086,7 +66009,7 @@ index 280dae5..baea6c8 100644
22089 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
22090
22091 #if USE_SPLIT_PTLOCKS
22092 -@@ -1447,6 +1460,7 @@ extern unsigned long do_mmap_pgoff(struct file *, unsigned long,
22093 +@@ -1479,6 +1492,7 @@ extern unsigned long do_mmap_pgoff(struct file *, unsigned long,
22094 unsigned long, unsigned long,
22095 unsigned long, unsigned long);
22096 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
22097 @@ -65094,7 +66017,7 @@ index 280dae5..baea6c8 100644
22098
22099 /* These take the mm semaphore themselves */
22100 extern unsigned long vm_brk(unsigned long, unsigned long);
22101 -@@ -1510,6 +1524,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
22102 +@@ -1573,6 +1587,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
22103 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
22104 struct vm_area_struct **pprev);
22105
22106 @@ -65105,7 +66028,7 @@ index 280dae5..baea6c8 100644
22107 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
22108 NULL if none. Assume start_addr < end_addr. */
22109 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
22110 -@@ -1538,15 +1556,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
22111 +@@ -1601,15 +1619,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
22112 return vma;
22113 }
22114
22115 @@ -65118,10 +66041,10 @@ index 280dae5..baea6c8 100644
22116 -}
22117 -#endif
22118 -
22119 - struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
22120 - int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
22121 - unsigned long pfn, unsigned long size, pgprot_t);
22122 -@@ -1652,7 +1661,7 @@ extern int unpoison_memory(unsigned long pfn);
22123 + #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
22124 + unsigned long change_prot_numa(struct vm_area_struct *vma,
22125 + unsigned long start, unsigned long end);
22126 +@@ -1721,7 +1730,7 @@ extern int unpoison_memory(unsigned long pfn);
22127 extern int sysctl_memory_failure_early_kill;
22128 extern int sysctl_memory_failure_recovery;
22129 extern void shake_page(struct page *p, int access);
22130 @@ -65130,7 +66053,7 @@ index 280dae5..baea6c8 100644
22131 extern int soft_offline_page(struct page *page, int flags);
22132
22133 extern void dump_page(struct page *page);
22134 -@@ -1683,5 +1692,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
22135 +@@ -1752,5 +1761,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
22136 static inline bool page_is_guard(struct page *page) { return false; }
22137 #endif /* CONFIG_DEBUG_PAGEALLOC */
22138
22139 @@ -65143,10 +66066,10 @@ index 280dae5..baea6c8 100644
22140 #endif /* __KERNEL__ */
22141 #endif /* _LINUX_MM_H */
22142 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
22143 -index 31f8a3a..499f1db 100644
22144 +index f8f5162..6276a36 100644
22145 --- a/include/linux/mm_types.h
22146 +++ b/include/linux/mm_types.h
22147 -@@ -275,6 +275,8 @@ struct vm_area_struct {
22148 +@@ -288,6 +288,8 @@ struct vm_area_struct {
22149 #ifdef CONFIG_NUMA
22150 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
22151 #endif
22152 @@ -65155,7 +66078,7 @@ index 31f8a3a..499f1db 100644
22153 };
22154
22155 struct core_thread {
22156 -@@ -348,7 +350,7 @@ struct mm_struct {
22157 +@@ -362,7 +364,7 @@ struct mm_struct {
22158 unsigned long def_flags;
22159 unsigned long nr_ptes; /* Page table pages */
22160 unsigned long start_code, end_code, start_data, end_data;
22161 @@ -65164,8 +66087,8 @@ index 31f8a3a..499f1db 100644
22162 unsigned long arg_start, arg_end, env_start, env_end;
22163
22164 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
22165 -@@ -399,6 +401,24 @@ struct mm_struct {
22166 - struct cpumask cpumask_allocation;
22167 +@@ -436,6 +438,24 @@ struct mm_struct {
22168 + int first_nid;
22169 #endif
22170 struct uprobes_state uprobes_state;
22171 +
22172 @@ -65188,7 +66111,7 @@ index 31f8a3a..499f1db 100644
22173 +
22174 };
22175
22176 - static inline void mm_init_cpumask(struct mm_struct *mm)
22177 + /* first nid will either be a valid NID or one of these values */
22178 diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
22179 index c5d5278..f0b68c8 100644
22180 --- a/include/linux/mmiotrace.h
22181 @@ -65212,10 +66135,10 @@ index c5d5278..f0b68c8 100644
22182 }
22183
22184 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
22185 -index a23923b..073fee4 100644
22186 +index 73b64a3..6562925 100644
22187 --- a/include/linux/mmzone.h
22188 +++ b/include/linux/mmzone.h
22189 -@@ -421,7 +421,7 @@ struct zone {
22190 +@@ -412,7 +412,7 @@ struct zone {
22191 unsigned long flags; /* zone flags, see below */
22192
22193 /* Zone statistics */
22194 @@ -65398,10 +66321,10 @@ index 560ca53..5ee8d73 100644
22195 }
22196 #endif
22197 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
22198 -index d6a5806..7c13347 100644
22199 +index 137b419..fe663ec 100644
22200 --- a/include/linux/moduleparam.h
22201 +++ b/include/linux/moduleparam.h
22202 -@@ -286,7 +286,7 @@ static inline void __kernel_param_unlock(void)
22203 +@@ -284,7 +284,7 @@ static inline void __kernel_param_unlock(void)
22204 * @len is usually just sizeof(string).
22205 */
22206 #define module_param_string(name, string, len, perm) \
22207 @@ -65410,7 +66333,7 @@ index d6a5806..7c13347 100644
22208 = { len, string }; \
22209 __module_param_call(MODULE_PARAM_PREFIX, name, \
22210 &param_ops_string, \
22211 -@@ -425,7 +425,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
22212 +@@ -423,7 +423,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
22213 */
22214 #define module_param_array_named(name, array, type, nump, perm) \
22215 param_check_##type(name, &(array)[0]); \
22216 @@ -65420,10 +66343,10 @@ index d6a5806..7c13347 100644
22217 .ops = &param_ops_##type, \
22218 .elemsize = sizeof(array[0]), .elem = array }; \
22219 diff --git a/include/linux/namei.h b/include/linux/namei.h
22220 -index 4bf19d8..5268cea 100644
22221 +index 5a5ff57..5ae5070 100644
22222 --- a/include/linux/namei.h
22223 +++ b/include/linux/namei.h
22224 -@@ -18,7 +18,7 @@ struct nameidata {
22225 +@@ -19,7 +19,7 @@ struct nameidata {
22226 unsigned seq;
22227 int last_type;
22228 unsigned depth;
22229 @@ -65432,7 +66355,7 @@ index 4bf19d8..5268cea 100644
22230 };
22231
22232 /*
22233 -@@ -83,12 +83,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
22234 +@@ -84,12 +84,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
22235
22236 extern void nd_jump_link(struct nameidata *nd, struct path *path);
22237
22238 @@ -65448,18 +66371,18 @@ index 4bf19d8..5268cea 100644
22239 return nd->saved_names[nd->depth];
22240 }
22241 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
22242 -index 825fb7e..24cdd41 100644
22243 +index 9ef07d0..130a5d9 100644
22244 --- a/include/linux/netdevice.h
22245 +++ b/include/linux/netdevice.h
22246 -@@ -1002,6 +1002,7 @@ struct net_device_ops {
22247 - struct net_device *dev,
22248 - int idx);
22249 +@@ -1012,6 +1012,7 @@ struct net_device_ops {
22250 + u32 pid, u32 seq,
22251 + struct net_device *dev);
22252 };
22253 +typedef struct net_device_ops __no_const net_device_ops_no_const;
22254
22255 /*
22256 * The DEVICE structure.
22257 -@@ -1062,7 +1063,7 @@ struct net_device {
22258 +@@ -1078,7 +1079,7 @@ struct net_device {
22259 int iflink;
22260
22261 struct net_device_stats stats;
22262 @@ -65540,7 +66463,7 @@ index a4c5624..79d6d88 100644
22263 /** create a directory */
22264 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
22265 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
22266 -index 6bfb2faa..1204767 100644
22267 +index 6bfb2faa..e5bc5e5 100644
22268 --- a/include/linux/perf_event.h
22269 +++ b/include/linux/perf_event.h
22270 @@ -328,8 +328,8 @@ struct perf_event {
22271 @@ -65565,6 +66488,15 @@ index 6bfb2faa..1204767 100644
22272
22273 /*
22274 * Protect attach/detach and child_list:
22275 +@@ -801,7 +801,7 @@ static inline void perf_event_task_tick(void) { }
22276 + */
22277 + #define perf_cpu_notifier(fn) \
22278 + do { \
22279 +- static struct notifier_block fn##_nb __cpuinitdata = \
22280 ++ static struct notifier_block fn##_nb = \
22281 + { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
22282 + unsigned long cpu = smp_processor_id(); \
22283 + unsigned long flags; \
22284 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
22285 index ad1a427..6419649 100644
22286 --- a/include/linux/pipe_fs_i.h
22287 @@ -65624,7 +66556,7 @@ index 2110a81..13a11bb 100644
22288 /********** include/linux/timer.h **********/
22289 /*
22290 diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
22291 -index 4a496eb..d9c5659 100644
22292 +index c0f44c2..1572583 100644
22293 --- a/include/linux/power/smartreflex.h
22294 +++ b/include/linux/power/smartreflex.h
22295 @@ -238,7 +238,7 @@ struct omap_sr_class_data {
22296 @@ -65658,10 +66590,10 @@ index 9afc01e..92c32e8 100644
22297 void log_buf_kexec_setup(void);
22298 void __init setup_log_buf(int early);
22299 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
22300 -index 3fd2e87..75db910 100644
22301 +index 32676b3..8f7a182 100644
22302 --- a/include/linux/proc_fs.h
22303 +++ b/include/linux/proc_fs.h
22304 -@@ -155,6 +155,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
22305 +@@ -159,6 +159,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
22306 return proc_create_data(name, mode, parent, proc_fops, NULL);
22307 }
22308
22309 @@ -65681,28 +66613,21 @@ index 3fd2e87..75db910 100644
22310 umode_t mode, struct proc_dir_entry *base,
22311 read_proc_t *read_proc, void * data)
22312 diff --git a/include/linux/random.h b/include/linux/random.h
22313 -index 6330ed4..419c6c3 100644
22314 +index d984608..d6f0042 100644
22315 --- a/include/linux/random.h
22316 +++ b/include/linux/random.h
22317 -@@ -30,12 +30,17 @@ void srandom32(u32 seed);
22318 -
22319 - u32 prandom32(struct rnd_state *);
22320 +@@ -39,6 +39,11 @@ void prandom_seed(u32 seed);
22321 + u32 prandom_u32_state(struct rnd_state *);
22322 + void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
22323
22324 +static inline unsigned long pax_get_random_long(void)
22325 +{
22326 -+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
22327 ++ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
22328 +}
22329 +
22330 /*
22331 * Handle minimum values for seeds
22332 */
22333 - static inline u32 __seed(u32 x, u32 m)
22334 - {
22335 -- return (x < m) ? x + m : x;
22336 -+ return (x <= m) ? x + m + 1 : x;
22337 - }
22338 -
22339 - /**
22340 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
22341 index 23b3630..e1bc12b 100644
22342 --- a/include/linux/reboot.h
22343 @@ -65783,10 +66708,10 @@ index a3e7842..d973ca6 100644
22344 #define RIO_RESOURCE_MEM 0x00000100
22345 #define RIO_RESOURCE_DOORBELL 0x00000200
22346 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
22347 -index bfe1f47..6a33ee3 100644
22348 +index c20635c..2f5def4 100644
22349 --- a/include/linux/rmap.h
22350 +++ b/include/linux/rmap.h
22351 -@@ -134,8 +134,8 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
22352 +@@ -145,8 +145,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
22353 void anon_vma_init(void); /* create anon_vma_cachep */
22354 int anon_vma_prepare(struct vm_area_struct *);
22355 void unlink_anon_vmas(struct vm_area_struct *);
22356 @@ -65798,7 +66723,7 @@ index bfe1f47..6a33ee3 100644
22357 static inline void anon_vma_merge(struct vm_area_struct *vma,
22358 struct vm_area_struct *next)
22359 diff --git a/include/linux/sched.h b/include/linux/sched.h
22360 -index 3e63925..6c93b17 100644
22361 +index d211247..d64a165 100644
22362 --- a/include/linux/sched.h
22363 +++ b/include/linux/sched.h
22364 @@ -61,6 +61,7 @@ struct bio_list;
22365 @@ -65809,7 +66734,7 @@ index 3e63925..6c93b17 100644
22366
22367 /*
22368 * List of flags we want to share for kernel threads,
22369 -@@ -344,10 +345,23 @@ struct user_namespace;
22370 +@@ -354,10 +355,23 @@ struct user_namespace;
22371 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
22372
22373 extern int sysctl_max_map_count;
22374 @@ -65833,7 +66758,7 @@ index 3e63925..6c93b17 100644
22375 extern void arch_pick_mmap_layout(struct mm_struct *mm);
22376 extern unsigned long
22377 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
22378 -@@ -614,6 +628,17 @@ struct signal_struct {
22379 +@@ -639,6 +653,17 @@ struct signal_struct {
22380 #ifdef CONFIG_TASKSTATS
22381 struct taskstats *stats;
22382 #endif
22383 @@ -65851,7 +66776,7 @@ index 3e63925..6c93b17 100644
22384 #ifdef CONFIG_AUDIT
22385 unsigned audit_tty;
22386 struct tty_audit_buf *tty_audit_buf;
22387 -@@ -691,6 +716,11 @@ struct user_struct {
22388 +@@ -717,6 +742,11 @@ struct user_struct {
22389 struct key *session_keyring; /* UID's default session keyring */
22390 #endif
22391
22392 @@ -65863,7 +66788,7 @@ index 3e63925..6c93b17 100644
22393 /* Hash table maintenance information */
22394 struct hlist_node uidhash_node;
22395 kuid_t uid;
22396 -@@ -1312,8 +1342,8 @@ struct task_struct {
22397 +@@ -1360,8 +1390,8 @@ struct task_struct {
22398 struct list_head thread_group;
22399
22400 struct completion *vfork_done; /* for vfork() */
22401 @@ -65874,7 +66799,7 @@ index 3e63925..6c93b17 100644
22402
22403 cputime_t utime, stime, utimescaled, stimescaled;
22404 cputime_t gtime;
22405 -@@ -1329,11 +1359,6 @@ struct task_struct {
22406 +@@ -1377,11 +1407,6 @@ struct task_struct {
22407 struct task_cputime cputime_expires;
22408 struct list_head cpu_timers[3];
22409
22410 @@ -65886,7 +66811,7 @@ index 3e63925..6c93b17 100644
22411 char comm[TASK_COMM_LEN]; /* executable name excluding path
22412 - access with [gs]et_task_comm (which lock
22413 it with task_lock())
22414 -@@ -1350,6 +1375,10 @@ struct task_struct {
22415 +@@ -1398,6 +1423,10 @@ struct task_struct {
22416 #endif
22417 /* CPU-specific state of this task */
22418 struct thread_struct thread;
22419 @@ -65897,7 +66822,7 @@ index 3e63925..6c93b17 100644
22420 /* filesystem information */
22421 struct fs_struct *fs;
22422 /* open file information */
22423 -@@ -1423,6 +1452,10 @@ struct task_struct {
22424 +@@ -1471,6 +1500,10 @@ struct task_struct {
22425 gfp_t lockdep_reclaim_gfp;
22426 #endif
22427
22428 @@ -65908,7 +66833,7 @@ index 3e63925..6c93b17 100644
22429 /* journalling filesystem info */
22430 void *journal_info;
22431
22432 -@@ -1461,6 +1494,10 @@ struct task_struct {
22433 +@@ -1509,6 +1542,10 @@ struct task_struct {
22434 /* cg_list protected by css_set_lock and tsk->alloc_lock */
22435 struct list_head cg_list;
22436 #endif
22437 @@ -65919,7 +66844,7 @@ index 3e63925..6c93b17 100644
22438 #ifdef CONFIG_FUTEX
22439 struct robust_list_head __user *robust_list;
22440 #ifdef CONFIG_COMPAT
22441 -@@ -1548,8 +1585,74 @@ struct task_struct {
22442 +@@ -1605,8 +1642,74 @@ struct task_struct {
22443 #ifdef CONFIG_UPROBES
22444 struct uprobe_task *utask;
22445 #endif
22446 @@ -65994,7 +66919,7 @@ index 3e63925..6c93b17 100644
22447 /* Future-safe accessor for struct task_struct's cpus_allowed. */
22448 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
22449
22450 -@@ -2092,7 +2195,9 @@ void yield(void);
22451 +@@ -2155,7 +2258,9 @@ void yield(void);
22452 extern struct exec_domain default_exec_domain;
22453
22454 union thread_union {
22455 @@ -66004,7 +66929,7 @@ index 3e63925..6c93b17 100644
22456 unsigned long stack[THREAD_SIZE/sizeof(long)];
22457 };
22458
22459 -@@ -2125,6 +2230,7 @@ extern struct pid_namespace init_pid_ns;
22460 +@@ -2188,6 +2293,7 @@ extern struct pid_namespace init_pid_ns;
22461 */
22462
22463 extern struct task_struct *find_task_by_vpid(pid_t nr);
22464 @@ -66012,16 +66937,16 @@ index 3e63925..6c93b17 100644
22465 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
22466 struct pid_namespace *ns);
22467
22468 -@@ -2281,7 +2387,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
22469 +@@ -2344,7 +2450,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
22470 extern void exit_itimers(struct signal_struct *);
22471 extern void flush_itimer_signals(void);
22472
22473 -extern void do_group_exit(int);
22474 +extern __noreturn void do_group_exit(int);
22475
22476 - extern void daemonize(const char *, ...);
22477 extern int allow_signal(int);
22478 -@@ -2485,9 +2591,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
22479 + extern int disallow_signal(int);
22480 +@@ -2545,9 +2651,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
22481
22482 #endif
22483
22484 @@ -66034,7 +66959,7 @@ index 3e63925..6c93b17 100644
22485 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
22486 }
22487 diff --git a/include/linux/security.h b/include/linux/security.h
22488 -index 05e88bd..5cda002 100644
22489 +index eee7478..290f7ba 100644
22490 --- a/include/linux/security.h
22491 +++ b/include/linux/security.h
22492 @@ -26,6 +26,7 @@
22493 @@ -66068,7 +66993,7 @@ index 68a04a3..866e6a1 100644
22494 #define SEQ_SKIP 1
22495
22496 diff --git a/include/linux/shm.h b/include/linux/shm.h
22497 -index bcf8a6a..4d0af77 100644
22498 +index 429c199..4d42e38 100644
22499 --- a/include/linux/shm.h
22500 +++ b/include/linux/shm.h
22501 @@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */
22502 @@ -66083,10 +67008,10 @@ index bcf8a6a..4d0af77 100644
22503
22504 /* shm_mode upper byte flags */
22505 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
22506 -index 6a2c34e..a1f320f 100644
22507 +index 320e976..fd52553 100644
22508 --- a/include/linux/skbuff.h
22509 +++ b/include/linux/skbuff.h
22510 -@@ -577,7 +577,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
22511 +@@ -590,7 +590,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
22512 extern struct sk_buff *__alloc_skb(unsigned int size,
22513 gfp_t priority, int flags, int node);
22514 extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
22515 @@ -66095,7 +67020,7 @@ index 6a2c34e..a1f320f 100644
22516 gfp_t priority)
22517 {
22518 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
22519 -@@ -687,7 +687,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
22520 +@@ -700,7 +700,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
22521 */
22522 static inline int skb_queue_empty(const struct sk_buff_head *list)
22523 {
22524 @@ -66104,7 +67029,7 @@ index 6a2c34e..a1f320f 100644
22525 }
22526
22527 /**
22528 -@@ -700,7 +700,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
22529 +@@ -713,7 +713,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
22530 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
22531 const struct sk_buff *skb)
22532 {
22533 @@ -66113,7 +67038,7 @@ index 6a2c34e..a1f320f 100644
22534 }
22535
22536 /**
22537 -@@ -713,7 +713,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
22538 +@@ -726,7 +726,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
22539 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
22540 const struct sk_buff *skb)
22541 {
22542 @@ -66122,7 +67047,7 @@ index 6a2c34e..a1f320f 100644
22543 }
22544
22545 /**
22546 -@@ -1626,7 +1626,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
22547 +@@ -1722,7 +1722,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
22548 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
22549 */
22550 #ifndef NET_SKB_PAD
22551 @@ -66131,7 +67056,7 @@ index 6a2c34e..a1f320f 100644
22552 #endif
22553
22554 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
22555 -@@ -2204,7 +2204,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
22556 +@@ -2300,7 +2300,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
22557 int noblock, int *err);
22558 extern unsigned int datagram_poll(struct file *file, struct socket *sock,
22559 struct poll_table_struct *wait);
22560 @@ -66141,13 +67066,14 @@ index 6a2c34e..a1f320f 100644
22561 int size);
22562 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
22563 diff --git a/include/linux/slab.h b/include/linux/slab.h
22564 -index 83d1a14..e23d723 100644
22565 +index 5d168d7..720bff3 100644
22566 --- a/include/linux/slab.h
22567 +++ b/include/linux/slab.h
22568 -@@ -11,12 +11,20 @@
22569 -
22570 +@@ -12,13 +12,20 @@
22571 #include <linux/gfp.h>
22572 #include <linux/types.h>
22573 + #include <linux/workqueue.h>
22574 +-
22575 +#include <linux/err.h>
22576
22577 /*
22578 @@ -66165,7 +67091,7 @@ index 83d1a14..e23d723 100644
22579 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
22580 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
22581 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
22582 -@@ -87,10 +95,13 @@
22583 +@@ -89,10 +96,13 @@
22584 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
22585 * Both make kfree a no-op.
22586 */
22587 @@ -66182,7 +67108,7 @@ index 83d1a14..e23d723 100644
22588
22589 /*
22590 * Common fields provided in kmem_cache by all slab allocators
22591 -@@ -110,7 +121,7 @@ struct kmem_cache {
22592 +@@ -112,7 +122,7 @@ struct kmem_cache {
22593 unsigned int align; /* Alignment as calculated */
22594 unsigned long flags; /* Active flags on the slab */
22595 const char *name; /* Slab name for sysfs */
22596 @@ -66191,7 +67117,7 @@ index 83d1a14..e23d723 100644
22597 void (*ctor)(void *); /* Called on object slot creation */
22598 struct list_head list; /* List of all slab caches on the system */
22599 };
22600 -@@ -185,6 +196,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
22601 +@@ -232,6 +242,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
22602 void kfree(const void *);
22603 void kzfree(const void *);
22604 size_t ksize(const void *);
22605 @@ -66200,26 +67126,15 @@ index 83d1a14..e23d723 100644
22606
22607 /*
22608 * Allocator specific definitions. These are mainly used to establish optimized
22609 -@@ -264,8 +277,18 @@ size_t ksize(const void *);
22610 +@@ -311,6 +323,7 @@ size_t ksize(const void *);
22611 * for general use, and so are not documented here. For a full list of
22612 * potential flags, always refer to linux/gfp.h.
22613 */
22614 +
22615 -+extern void kmalloc_array_error(void)
22616 -+#if defined(CONFIG_GCOV_KERNEL) && defined(CONFIG_PAX_SIZE_OVERFLOW)
22617 -+__compiletime_warning("kmalloc_array called with swapped arguments?");
22618 -+#else
22619 -+__compiletime_error("kmalloc_array called with swapped arguments?");
22620 -+#endif
22621 -+
22622 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
22623 {
22624 -+ if (__builtin_constant_p(n) && !__builtin_constant_p(size))
22625 -+ kmalloc_array_error();
22626 if (size != 0 && n > SIZE_MAX / size)
22627 - return NULL;
22628 - return __kmalloc(n * size, flags);
22629 -@@ -323,7 +346,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
22630 +@@ -370,7 +383,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
22631 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
22632 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
22633 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
22634 @@ -66228,7 +67143,7 @@ index 83d1a14..e23d723 100644
22635 #define kmalloc_track_caller(size, flags) \
22636 __kmalloc_track_caller(size, flags, _RET_IP_)
22637 #else
22638 -@@ -343,7 +366,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
22639 +@@ -390,7 +403,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
22640 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
22641 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
22642 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
22643 @@ -66238,7 +67153,7 @@ index 83d1a14..e23d723 100644
22644 __kmalloc_node_track_caller(size, flags, node, \
22645 _RET_IP_)
22646 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
22647 -index cc290f0..0ba60931 100644
22648 +index 8bb6e0e..8eb0dbe 100644
22649 --- a/include/linux/slab_def.h
22650 +++ b/include/linux/slab_def.h
22651 @@ -52,7 +52,7 @@ struct kmem_cache {
22652 @@ -66265,7 +67180,7 @@ index cc290f0..0ba60931 100644
22653
22654 /*
22655 * If debugging is enabled, then the allocator can add additional
22656 -@@ -104,11 +104,16 @@ struct cache_sizes {
22657 +@@ -111,11 +111,16 @@ struct cache_sizes {
22658 #ifdef CONFIG_ZONE_DMA
22659 struct kmem_cache *cs_dmacachep;
22660 #endif
22661 @@ -66283,7 +67198,7 @@ index cc290f0..0ba60931 100644
22662
22663 #ifdef CONFIG_TRACING
22664 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
22665 -@@ -145,6 +150,13 @@ found:
22666 +@@ -152,6 +157,13 @@ found:
22667 cachep = malloc_sizes[i].cs_dmacachep;
22668 else
22669 #endif
22670 @@ -66297,7 +67212,7 @@ index cc290f0..0ba60931 100644
22671 cachep = malloc_sizes[i].cs_cachep;
22672
22673 ret = kmem_cache_alloc_trace(cachep, flags, size);
22674 -@@ -155,7 +167,7 @@ found:
22675 +@@ -162,7 +174,7 @@ found:
22676 }
22677
22678 #ifdef CONFIG_NUMA
22679 @@ -66306,7 +67221,7 @@ index cc290f0..0ba60931 100644
22680 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
22681
22682 #ifdef CONFIG_TRACING
22683 -@@ -198,6 +210,13 @@ found:
22684 +@@ -205,6 +217,13 @@ found:
22685 cachep = malloc_sizes[i].cs_dmacachep;
22686 else
22687 #endif
22688 @@ -66343,7 +67258,7 @@ index f28e14a..7831211 100644
22689 return kmalloc(size, flags);
22690 }
22691 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
22692 -index df448ad..b99e7f6 100644
22693 +index 9db4825..ed42fb5 100644
22694 --- a/include/linux/slub_def.h
22695 +++ b/include/linux/slub_def.h
22696 @@ -91,7 +91,7 @@ struct kmem_cache {
22697 @@ -66355,7 +67270,7 @@ index df448ad..b99e7f6 100644
22698 void (*ctor)(void *);
22699 int inuse; /* Offset to metadata */
22700 int align; /* Alignment */
22701 -@@ -152,7 +152,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
22702 +@@ -156,7 +156,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
22703 * Sorry that the following has to be that ugly but some versions of GCC
22704 * have trouble with constant propagation and loops.
22705 */
22706 @@ -66364,7 +67279,7 @@ index df448ad..b99e7f6 100644
22707 {
22708 if (!size)
22709 return 0;
22710 -@@ -217,7 +217,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
22711 +@@ -221,7 +221,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
22712 }
22713
22714 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
22715 @@ -66373,7 +67288,7 @@ index df448ad..b99e7f6 100644
22716
22717 static __always_inline void *
22718 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
22719 -@@ -258,7 +258,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
22720 +@@ -265,7 +265,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
22721 }
22722 #endif
22723
22724 @@ -66382,7 +67297,7 @@ index df448ad..b99e7f6 100644
22725 {
22726 unsigned int order = get_order(size);
22727 return kmalloc_order_trace(size, flags, order);
22728 -@@ -283,7 +283,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
22729 +@@ -290,7 +290,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
22730 }
22731
22732 #ifdef CONFIG_NUMA
22733 @@ -66468,7 +67383,7 @@ index 0b8e3e6..33e0a01 100644
22734 #define RPCRDMA_VERSION 1
22735
22736 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
22737 -index cd844a6..3ca3592 100644
22738 +index 14a8ff2..21fe4c7 100644
22739 --- a/include/linux/sysctl.h
22740 +++ b/include/linux/sysctl.h
22741 @@ -41,6 +41,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
22742 @@ -66494,10 +67409,10 @@ index 7faf933..eb6f5e3 100644
22743 #ifdef CONFIG_MAGIC_SYSRQ
22744
22745 diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
22746 -index ccc1899..b1aaceb 100644
22747 +index e7e0473..39b7b52 100644
22748 --- a/include/linux/thread_info.h
22749 +++ b/include/linux/thread_info.h
22750 -@@ -146,6 +146,15 @@ static inline bool test_and_clear_restore_sigmask(void)
22751 +@@ -148,6 +148,15 @@ static inline bool test_and_clear_restore_sigmask(void)
22752 #error "no set_restore_sigmask() provided and default one won't work"
22753 #endif
22754
22755 @@ -66514,10 +67429,10 @@ index ccc1899..b1aaceb 100644
22756
22757 #endif /* _LINUX_THREAD_INFO_H */
22758 diff --git a/include/linux/tty.h b/include/linux/tty.h
22759 -index f0b4eb4..1c4854e 100644
22760 +index 8db1b56..c16a040 100644
22761 --- a/include/linux/tty.h
22762 +++ b/include/linux/tty.h
22763 -@@ -192,7 +192,7 @@ struct tty_port {
22764 +@@ -194,7 +194,7 @@ struct tty_port {
22765 const struct tty_port_operations *ops; /* Port operations */
22766 spinlock_t lock; /* Lock protecting tty field */
22767 int blocked_open; /* Waiting to open */
22768 @@ -66526,7 +67441,7 @@ index f0b4eb4..1c4854e 100644
22769 wait_queue_head_t open_wait; /* Open waiters */
22770 wait_queue_head_t close_wait; /* Close waiters */
22771 wait_queue_head_t delta_msr_wait; /* Modem status change */
22772 -@@ -513,7 +513,7 @@ extern int tty_port_open(struct tty_port *port,
22773 +@@ -490,7 +490,7 @@ extern int tty_port_open(struct tty_port *port,
22774 struct tty_struct *tty, struct file *filp);
22775 static inline int tty_port_users(struct tty_port *port)
22776 {
22777 @@ -66562,10 +67477,10 @@ index fb79dd8d..07d4773 100644
22778
22779 struct tty_ldisc {
22780 diff --git a/include/linux/types.h b/include/linux/types.h
22781 -index 1cc0e4b..0d50edf 100644
22782 +index 4d118ba..c3ee9bf 100644
22783 --- a/include/linux/types.h
22784 +++ b/include/linux/types.h
22785 -@@ -175,10 +175,26 @@ typedef struct {
22786 +@@ -176,10 +176,26 @@ typedef struct {
22787 int counter;
22788 } atomic_t;
22789
22790 @@ -66655,10 +67570,10 @@ index 99c1b4d..bb94261 100644
22791
22792 static inline void put_unaligned_le16(u16 val, void *p)
22793 diff --git a/include/linux/usb.h b/include/linux/usb.h
22794 -index 10278d1..e21ec3c 100644
22795 +index 4d22d0f..ac43c2f 100644
22796 --- a/include/linux/usb.h
22797 +++ b/include/linux/usb.h
22798 -@@ -551,7 +551,7 @@ struct usb_device {
22799 +@@ -554,7 +554,7 @@ struct usb_device {
22800 int maxchild;
22801
22802 u32 quirks;
22803 @@ -66680,27 +67595,6 @@ index c5d36c6..108f4f9 100644
22804
22805 /*
22806 * callback functions for platform
22807 -diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
22808 -index ddbbb7d..9134611 100644
22809 ---- a/include/linux/usb/usbnet.h
22810 -+++ b/include/linux/usb/usbnet.h
22811 -@@ -33,6 +33,7 @@ struct usbnet {
22812 - wait_queue_head_t *wait;
22813 - struct mutex phy_mutex;
22814 - unsigned char suspend_count;
22815 -+ unsigned char pkt_cnt, pkt_err;
22816 -
22817 - /* i/o info: pipes etc */
22818 - unsigned in, out;
22819 -@@ -69,6 +70,8 @@ struct usbnet {
22820 - # define EVENT_DEV_ASLEEP 6
22821 - # define EVENT_DEV_OPEN 7
22822 - # define EVENT_DEVICE_REPORT_IDLE 8
22823 -+# define EVENT_NO_RUNTIME_PM 9
22824 -+# define EVENT_RX_KILL 10
22825 - };
22826 -
22827 - static inline struct usb_driver *driver_of(struct usb_interface *intf)
22828 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
22829 index 6f8fbcf..8259001 100644
22830 --- a/include/linux/vermagic.h
22831 @@ -66779,10 +67673,10 @@ index 6071e91..ca6a489 100644
22832 /*
22833 * Internals. Dont't use..
22834 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
22835 -index 92a86b2..1d9eb3c 100644
22836 +index a13291f..af51fa3 100644
22837 --- a/include/linux/vmstat.h
22838 +++ b/include/linux/vmstat.h
22839 -@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
22840 +@@ -95,18 +95,18 @@ static inline void vm_events_fold_cpu(int cpu)
22841 /*
22842 * Zone based page accounting with per cpu differentials.
22843 */
22844 @@ -66805,7 +67699,7 @@ index 92a86b2..1d9eb3c 100644
22845 #ifdef CONFIG_SMP
22846 if (x < 0)
22847 x = 0;
22848 -@@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
22849 +@@ -117,7 +117,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
22850 static inline unsigned long zone_page_state(struct zone *zone,
22851 enum zone_stat_item item)
22852 {
22853 @@ -66814,7 +67708,7 @@ index 92a86b2..1d9eb3c 100644
22854 #ifdef CONFIG_SMP
22855 if (x < 0)
22856 x = 0;
22857 -@@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
22858 +@@ -134,7 +134,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
22859 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
22860 enum zone_stat_item item)
22861 {
22862 @@ -66823,7 +67717,7 @@ index 92a86b2..1d9eb3c 100644
22863
22864 #ifdef CONFIG_SMP
22865 int cpu;
22866 -@@ -218,8 +218,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
22867 +@@ -226,8 +226,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
22868
22869 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
22870 {
22871 @@ -66834,7 +67728,7 @@ index 92a86b2..1d9eb3c 100644
22872 }
22873
22874 static inline void __inc_zone_page_state(struct page *page,
22875 -@@ -230,8 +230,8 @@ static inline void __inc_zone_page_state(struct page *page,
22876 +@@ -238,8 +238,8 @@ static inline void __inc_zone_page_state(struct page *page,
22877
22878 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
22879 {
22880 @@ -66859,10 +67753,10 @@ index 95d1c91..6798cca 100644
22881 /*
22882 * Newer version of video_device, handled by videodev2.c
22883 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
22884 -index e48b571..7e40de4 100644
22885 +index 4118ad1..cb7e25f 100644
22886 --- a/include/media/v4l2-ioctl.h
22887 +++ b/include/media/v4l2-ioctl.h
22888 -@@ -282,7 +282,6 @@ struct v4l2_ioctl_ops {
22889 +@@ -284,7 +284,6 @@ struct v4l2_ioctl_ops {
22890 bool valid_prio, int cmd, void *arg);
22891 };
22892
22893 @@ -66986,10 +67880,10 @@ index 9497be1..5a4fafe 100644
22894 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
22895 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
22896 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
22897 -index ee75ccd..2cc2b95 100644
22898 +index 68c69d5..2ee192b 100644
22899 --- a/include/net/ip_vs.h
22900 +++ b/include/net/ip_vs.h
22901 -@@ -510,7 +510,7 @@ struct ip_vs_conn {
22902 +@@ -599,7 +599,7 @@ struct ip_vs_conn {
22903 struct ip_vs_conn *control; /* Master control connection */
22904 atomic_t n_control; /* Number of controlled ones */
22905 struct ip_vs_dest *dest; /* real server */
22906 @@ -66998,7 +67892,7 @@ index ee75ccd..2cc2b95 100644
22907
22908 /* packet transmitter for different forwarding methods. If it
22909 mangles the packet, it must return NF_DROP or better NF_STOLEN,
22910 -@@ -648,7 +648,7 @@ struct ip_vs_dest {
22911 +@@ -737,7 +737,7 @@ struct ip_vs_dest {
22912 __be16 port; /* port number of the server */
22913 union nf_inet_addr addr; /* IP address of the server */
22914 volatile unsigned int flags; /* dest status flags */
22915 @@ -67046,10 +67940,10 @@ index 0dab173..1b76af0 100644
22916 struct pneigh_entry {
22917 struct pneigh_entry *next;
22918 diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
22919 -index 95e6466..251016d 100644
22920 +index de644bc..666aed3 100644
22921 --- a/include/net/net_namespace.h
22922 +++ b/include/net/net_namespace.h
22923 -@@ -110,7 +110,7 @@ struct net {
22924 +@@ -115,7 +115,7 @@ struct net {
22925 #endif
22926 struct netns_ipvs *ipvs;
22927 struct sock *diag_nlsk;
22928 @@ -67058,7 +67952,7 @@ index 95e6466..251016d 100644
22929 };
22930
22931 /*
22932 -@@ -320,12 +320,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
22933 +@@ -330,12 +330,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
22934
22935 static inline int rt_genid(struct net *net)
22936 {
22937 @@ -67086,19 +67980,6 @@ index 8ba8ce2..99b7fff 100644
22938 struct sk_buff *skb, int offset, struct iovec *to,
22939 size_t len, struct dma_pinned_list *pinned_list);
22940
22941 -diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
22942 -index 252fd10..aa1421f 100644
22943 ---- a/include/net/netfilter/nf_queue.h
22944 -+++ b/include/net/netfilter/nf_queue.h
22945 -@@ -22,7 +22,7 @@ struct nf_queue_handler {
22946 - int (*outfn)(struct nf_queue_entry *entry,
22947 - unsigned int queuenum);
22948 - char *name;
22949 --};
22950 -+} __do_const;
22951 -
22952 - extern int nf_register_queue_handler(u_int8_t pf,
22953 - const struct nf_queue_handler *qh);
22954 diff --git a/include/net/netlink.h b/include/net/netlink.h
22955 index 9690b0f..87aded7 100644
22956 --- a/include/net/netlink.h
22957 @@ -67126,11 +68007,11 @@ index 2ae2b83..dbdc85e 100644
22958 #ifdef CONFIG_IP_MROUTE
22959 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
22960 diff --git a/include/net/protocol.h b/include/net/protocol.h
22961 -index 929528c..c84d4f6 100644
22962 +index 047c047..b9dad15 100644
22963 --- a/include/net/protocol.h
22964 +++ b/include/net/protocol.h
22965 -@@ -48,7 +48,7 @@ struct net_protocol {
22966 - int (*gro_complete)(struct sk_buff *skb);
22967 +@@ -44,7 +44,7 @@ struct net_protocol {
22968 + void (*err_handler)(struct sk_buff *skb, u32 info);
22969 unsigned int no_policy:1,
22970 netns_ok:1;
22971 -};
22972 @@ -67138,9 +68019,9 @@ index 929528c..c84d4f6 100644
22973
22974 #if IS_ENABLED(CONFIG_IPV6)
22975 struct inet6_protocol {
22976 -@@ -69,7 +69,7 @@ struct inet6_protocol {
22977 - int (*gro_complete)(struct sk_buff *skb);
22978 -
22979 +@@ -57,7 +57,7 @@ struct inet6_protocol {
22980 + u8 type, u8 code, int offset,
22981 + __be32 info);
22982 unsigned int flags; /* INET6_PROTO_xxx */
22983 -};
22984 +} __do_const;
22985 @@ -67148,10 +68029,10 @@ index 929528c..c84d4f6 100644
22986 #define INET6_PROTO_NOPOLICY 0x1
22987 #define INET6_PROTO_FINAL 0x2
22988 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
22989 -index 9c6414f..fbd0524 100644
22990 +index 7fdf298..197e9f7 100644
22991 --- a/include/net/sctp/sctp.h
22992 +++ b/include/net/sctp/sctp.h
22993 -@@ -318,9 +318,9 @@ do { \
22994 +@@ -330,9 +330,9 @@ do { \
22995
22996 #else /* SCTP_DEBUG */
22997
22998 @@ -67165,10 +68046,10 @@ index 9c6414f..fbd0524 100644
22999 #define SCTP_DISABLE_DEBUG
23000 #define SCTP_ASSERT(expr, str, func)
23001 diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
23002 -index 64158aa..b65533c 100644
23003 +index fdeb85a..0c554d5 100644
23004 --- a/include/net/sctp/structs.h
23005 +++ b/include/net/sctp/structs.h
23006 -@@ -496,7 +496,7 @@ struct sctp_af {
23007 +@@ -497,7 +497,7 @@ struct sctp_af {
23008 int sockaddr_len;
23009 sa_family_t sa_family;
23010 struct list_head list;
23011 @@ -67177,7 +68058,7 @@ index 64158aa..b65533c 100644
23012
23013 struct sctp_af *sctp_get_af_specific(sa_family_t);
23014 int sctp_register_af(struct sctp_af *);
23015 -@@ -516,7 +516,7 @@ struct sctp_pf {
23016 +@@ -517,7 +517,7 @@ struct sctp_pf {
23017 struct sctp_association *asoc);
23018 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
23019 struct sctp_af *af;
23020 @@ -67187,10 +68068,10 @@ index 64158aa..b65533c 100644
23021
23022 /* Structure to track chunk fragments that have been acked, but peer
23023 diff --git a/include/net/sock.h b/include/net/sock.h
23024 -index c945fba..e162e56 100644
23025 +index 182ca99..b7dc290 100644
23026 --- a/include/net/sock.h
23027 +++ b/include/net/sock.h
23028 -@@ -304,7 +304,7 @@ struct sock {
23029 +@@ -322,7 +322,7 @@ struct sock {
23030 #ifdef CONFIG_RPS
23031 __u32 sk_rxhash;
23032 #endif
23033 @@ -67199,7 +68080,7 @@ index c945fba..e162e56 100644
23034 int sk_rcvbuf;
23035
23036 struct sk_filter __rcu *sk_filter;
23037 -@@ -1763,7 +1763,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
23038 +@@ -1781,7 +1781,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
23039 }
23040
23041 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
23042 @@ -67208,7 +68089,7 @@ index c945fba..e162e56 100644
23043 int copy, int offset)
23044 {
23045 if (skb->ip_summed == CHECKSUM_NONE) {
23046 -@@ -2022,7 +2022,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
23047 +@@ -2040,7 +2040,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
23048 }
23049 }
23050
23051 @@ -67218,10 +68099,10 @@ index c945fba..e162e56 100644
23052 /**
23053 * sk_page_frag - return an appropriate page_frag
23054 diff --git a/include/net/tcp.h b/include/net/tcp.h
23055 -index 4af45e3..af97861 100644
23056 +index aed42c7..43890c6 100644
23057 --- a/include/net/tcp.h
23058 +++ b/include/net/tcp.h
23059 -@@ -531,7 +531,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
23060 +@@ -530,7 +530,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
23061 extern void tcp_xmit_retransmit_queue(struct sock *);
23062 extern void tcp_simple_retransmit(struct sock *);
23063 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
23064 @@ -67230,7 +68111,7 @@ index 4af45e3..af97861 100644
23065
23066 extern void tcp_send_probe0(struct sock *);
23067 extern void tcp_send_partial(struct sock *);
23068 -@@ -702,8 +702,8 @@ struct tcp_skb_cb {
23069 +@@ -701,8 +701,8 @@ struct tcp_skb_cb {
23070 struct inet6_skb_parm h6;
23071 #endif
23072 } header; /* For incoming frames */
23073 @@ -67241,7 +68122,7 @@ index 4af45e3..af97861 100644
23074 __u32 when; /* used to compute rtt's */
23075 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
23076
23077 -@@ -717,7 +717,7 @@ struct tcp_skb_cb {
23078 +@@ -716,7 +716,7 @@ struct tcp_skb_cb {
23079
23080 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
23081 /* 1 byte hole */
23082 @@ -67307,10 +68188,10 @@ index 399162b..b337f1a 100644
23083 u8 qfull;
23084 enum fc_lport_state state;
23085 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
23086 -index 55367b0..d97bd2a 100644
23087 +index e65c62e..aa2e5a2 100644
23088 --- a/include/scsi/scsi_device.h
23089 +++ b/include/scsi/scsi_device.h
23090 -@@ -169,9 +169,9 @@ struct scsi_device {
23091 +@@ -170,9 +170,9 @@ struct scsi_device {
23092 unsigned int max_device_blocked; /* what device_blocked counts down from */
23093 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
23094
23095 @@ -67338,10 +68219,10 @@ index b797e8f..8e2c3aa 100644
23096
23097 /**
23098 diff --git a/include/sound/soc.h b/include/sound/soc.h
23099 -index 91244a0..89ca1a7 100644
23100 +index bc56738..a4be132 100644
23101 --- a/include/sound/soc.h
23102 +++ b/include/sound/soc.h
23103 -@@ -769,7 +769,7 @@ struct snd_soc_codec_driver {
23104 +@@ -771,7 +771,7 @@ struct snd_soc_codec_driver {
23105 /* probe ordering - for components with runtime dependencies */
23106 int probe_order;
23107 int remove_order;
23108 @@ -67350,7 +68231,7 @@ index 91244a0..89ca1a7 100644
23109
23110 /* SoC platform interface */
23111 struct snd_soc_platform_driver {
23112 -@@ -815,7 +815,7 @@ struct snd_soc_platform_driver {
23113 +@@ -817,7 +817,7 @@ struct snd_soc_platform_driver {
23114 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
23115 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
23116 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
23117 @@ -67360,10 +68241,10 @@ index 91244a0..89ca1a7 100644
23118 struct snd_soc_platform {
23119 const char *name;
23120 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
23121 -index fca8bbe..c0242ea 100644
23122 +index 663e34a..91b306a 100644
23123 --- a/include/target/target_core_base.h
23124 +++ b/include/target/target_core_base.h
23125 -@@ -760,7 +760,7 @@ struct se_device {
23126 +@@ -654,7 +654,7 @@ struct se_device {
23127 spinlock_t stats_lock;
23128 /* Active commands on this virtual SE device */
23129 atomic_t simple_cmds;
23130 @@ -67371,7 +68252,7 @@ index fca8bbe..c0242ea 100644
23131 + atomic_unchecked_t dev_ordered_id;
23132 atomic_t dev_ordered_sync;
23133 atomic_t dev_qf_count;
23134 - struct se_obj dev_obj;
23135 + int export_count;
23136 diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
23137 new file mode 100644
23138 index 0000000..fb634b7
23139 @@ -67689,10 +68570,10 @@ index 0993a22..32ba2fe 100644
23140 void *pmi_pal;
23141 u8 *vbe_state_orig; /*
23142 diff --git a/init/Kconfig b/init/Kconfig
23143 -index 6fdd6e3..5b01610 100644
23144 +index be8b7f5..b13cb62 100644
23145 --- a/init/Kconfig
23146 +++ b/init/Kconfig
23147 -@@ -925,6 +925,7 @@ endif # CGROUPS
23148 +@@ -990,6 +990,7 @@ endif # CGROUPS
23149
23150 config CHECKPOINT_RESTORE
23151 bool "Checkpoint/restore support" if EXPERT
23152 @@ -67700,7 +68581,7 @@ index 6fdd6e3..5b01610 100644
23153 default n
23154 help
23155 Enables additional kernel features in a sake of checkpoint/restore.
23156 -@@ -1016,6 +1017,8 @@ config UIDGID_CONVERTED
23157 +@@ -1079,6 +1080,8 @@ config UIDGID_CONVERTED
23158 depends on OCFS2_FS = n
23159 depends on XFS_FS = n
23160
23161 @@ -67709,7 +68590,7 @@ index 6fdd6e3..5b01610 100644
23162 config UIDGID_STRICT_TYPE_CHECKS
23163 bool "Require conversions between uid/gids and their internal representation"
23164 depends on UIDGID_CONVERTED
23165 -@@ -1405,7 +1408,7 @@ config SLUB_DEBUG
23166 +@@ -1468,7 +1471,7 @@ config SLUB_DEBUG
23167
23168 config COMPAT_BRK
23169 bool "Disable heap randomization"
23170 @@ -67718,7 +68599,7 @@ index 6fdd6e3..5b01610 100644
23171 help
23172 Randomizing heap placement makes heap exploits harder, but it
23173 also breaks ancient binaries (including anything libc5 based).
23174 -@@ -1648,7 +1651,7 @@ config INIT_ALL_POSSIBLE
23175 +@@ -1711,7 +1714,7 @@ config INIT_ALL_POSSIBLE
23176 config STOP_MACHINE
23177 bool
23178 default y
23179 @@ -67742,10 +68623,10 @@ index 7bc47ee..6da2dc7 100644
23180 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
23181 obj-y += noinitramfs.o
23182 diff --git a/init/do_mounts.c b/init/do_mounts.c
23183 -index f8a6642..4e5ee1b 100644
23184 +index 1d1b634..a1c810f 100644
23185 --- a/init/do_mounts.c
23186 +++ b/init/do_mounts.c
23187 -@@ -336,11 +336,11 @@ static void __init get_fs_names(char *page)
23188 +@@ -355,11 +355,11 @@ static void __init get_fs_names(char *page)
23189 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
23190 {
23191 struct super_block *s;
23192 @@ -67759,7 +68640,7 @@ index f8a6642..4e5ee1b 100644
23193 s = current->fs->pwd.dentry->d_sb;
23194 ROOT_DEV = s->s_dev;
23195 printk(KERN_INFO
23196 -@@ -461,18 +461,18 @@ void __init change_floppy(char *fmt, ...)
23197 +@@ -480,18 +480,18 @@ void __init change_floppy(char *fmt, ...)
23198 va_start(args, fmt);
23199 vsprintf(buf, fmt, args);
23200 va_end(args);
23201 @@ -67781,7 +68662,7 @@ index f8a6642..4e5ee1b 100644
23202 termios.c_lflag |= ICANON;
23203 sys_ioctl(fd, TCSETSF, (long)&termios);
23204 sys_close(fd);
23205 -@@ -566,6 +566,6 @@ void __init prepare_namespace(void)
23206 +@@ -585,6 +585,6 @@ void __init prepare_namespace(void)
23207 mount_root();
23208 out:
23209 devtmpfs_mount("dev");
23210 @@ -68048,7 +68929,7 @@ index 84c6bf1..8899338 100644
23211 next_state = Reset;
23212 return 0;
23213 diff --git a/init/main.c b/init/main.c
23214 -index 857166f..9df1d8e 100644
23215 +index cee4b5c..47f445e 100644
23216 --- a/init/main.c
23217 +++ b/init/main.c
23218 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
23219 @@ -68193,7 +69074,7 @@ index 857166f..9df1d8e 100644
23220 }
23221
23222 static int run_init_process(const char *init_filename)
23223 -@@ -876,7 +950,7 @@ static noinline void __init kernel_init_freeable(void)
23224 +@@ -877,7 +951,7 @@ static noinline void __init kernel_init_freeable(void)
23225 do_basic_setup();
23226
23227 /* Open the /dev/console on the rootfs, this should never fail */
23228 @@ -68202,7 +69083,7 @@ index 857166f..9df1d8e 100644
23229 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
23230
23231 (void) sys_dup(0);
23232 -@@ -889,11 +963,13 @@ static noinline void __init kernel_init_freeable(void)
23233 +@@ -890,11 +964,13 @@ static noinline void __init kernel_init_freeable(void)
23234 if (!ramdisk_execute_command)
23235 ramdisk_execute_command = "/init";
23236
23237 @@ -68230,7 +69111,7 @@ index 71a3ca1..cc330ee 100644
23238 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
23239 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
23240 diff --git a/ipc/msg.c b/ipc/msg.c
23241 -index a71af5a..a90a110 100644
23242 +index 950572f..266c15f 100644
23243 --- a/ipc/msg.c
23244 +++ b/ipc/msg.c
23245 @@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
23246 @@ -68291,7 +69172,7 @@ index 58d31f1..cce7a55 100644
23247 sem_params.flg = semflg;
23248 sem_params.u.nsems = nsems;
23249 diff --git a/ipc/shm.c b/ipc/shm.c
23250 -index dff40c9..9450e27 100644
23251 +index 4fa6d8f..38dfd0c 100644
23252 --- a/ipc/shm.c
23253 +++ b/ipc/shm.c
23254 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
23255 @@ -68309,7 +69190,7 @@ index dff40c9..9450e27 100644
23256 void shm_init_ns(struct ipc_namespace *ns)
23257 {
23258 ns->shm_ctlmax = SHMMAX;
23259 -@@ -520,6 +528,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
23260 +@@ -521,6 +529,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
23261 shp->shm_lprid = 0;
23262 shp->shm_atim = shp->shm_dtim = 0;
23263 shp->shm_ctim = get_seconds();
23264 @@ -68324,7 +69205,7 @@ index dff40c9..9450e27 100644
23265 shp->shm_segsz = size;
23266 shp->shm_nattch = 0;
23267 shp->shm_file = file;
23268 -@@ -571,18 +587,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
23269 +@@ -572,18 +588,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
23270 return 0;
23271 }
23272
23273 @@ -68349,7 +69230,7 @@ index dff40c9..9450e27 100644
23274 shm_params.key = key;
23275 shm_params.flg = shmflg;
23276 shm_params.u.size = size;
23277 -@@ -1003,6 +1020,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
23278 +@@ -1004,6 +1021,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
23279 f_mode = FMODE_READ | FMODE_WRITE;
23280 }
23281 if (shmflg & SHM_EXEC) {
23282 @@ -68362,7 +69243,7 @@ index dff40c9..9450e27 100644
23283 prot |= PROT_EXEC;
23284 acc_mode |= S_IXUGO;
23285 }
23286 -@@ -1026,9 +1049,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
23287 +@@ -1027,9 +1050,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
23288 if (err)
23289 goto out_unlock;
23290
23291 @@ -68398,7 +69279,7 @@ index 051e071..15e0920 100644
23292 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
23293 set_fs(fs);
23294 diff --git a/kernel/audit.c b/kernel/audit.c
23295 -index 40414e9..c920b72 100644
23296 +index d596e53..dbef3c3 100644
23297 --- a/kernel/audit.c
23298 +++ b/kernel/audit.c
23299 @@ -116,7 +116,7 @@ u32 audit_sig_sid = 0;
23300 @@ -68428,7 +69309,7 @@ index 40414e9..c920b72 100644
23301 audit_rate_limit,
23302 audit_backlog_limit);
23303 audit_panic(message);
23304 -@@ -677,7 +677,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
23305 +@@ -681,7 +681,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
23306 status_set.pid = audit_pid;
23307 status_set.rate_limit = audit_rate_limit;
23308 status_set.backlog_limit = audit_backlog_limit;
23309 @@ -68438,10 +69319,10 @@ index 40414e9..c920b72 100644
23310 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
23311 &status_set, sizeof(status_set));
23312 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
23313 -index 157e989..b28b365 100644
23314 +index a371f85..da826c1 100644
23315 --- a/kernel/auditsc.c
23316 +++ b/kernel/auditsc.c
23317 -@@ -2352,7 +2352,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
23318 +@@ -2292,7 +2292,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
23319 }
23320
23321 /* global counter which is incremented every time something logs in */
23322 @@ -68450,7 +69331,7 @@ index 157e989..b28b365 100644
23323
23324 /**
23325 * audit_set_loginuid - set current task's audit_context loginuid
23326 -@@ -2376,7 +2376,7 @@ int audit_set_loginuid(kuid_t loginuid)
23327 +@@ -2316,7 +2316,7 @@ int audit_set_loginuid(kuid_t loginuid)
23328 return -EPERM;
23329 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
23330
23331 @@ -68556,10 +69437,10 @@ index 493d972..f87dfbd 100644
23332 + return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
23333 +}
23334 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
23335 -index ad99830..992d8a7 100644
23336 +index 4855892..30d23b4 100644
23337 --- a/kernel/cgroup.c
23338 +++ b/kernel/cgroup.c
23339 -@@ -5514,7 +5514,7 @@ static int cgroup_css_links_read(struct cgroup *cont,
23340 +@@ -5535,7 +5535,7 @@ static int cgroup_css_links_read(struct cgroup *cont,
23341 struct css_set *cg = link->cg;
23342 struct task_struct *task;
23343 int count = 0;
23344 @@ -68569,7 +69450,7 @@ index ad99830..992d8a7 100644
23345 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
23346 seq_puts(seq, " ...\n");
23347 diff --git a/kernel/compat.c b/kernel/compat.c
23348 -index c28a306..b4d0cf3 100644
23349 +index 36700e9..73d770c 100644
23350 --- a/kernel/compat.c
23351 +++ b/kernel/compat.c
23352 @@ -13,6 +13,7 @@
23353 @@ -68625,7 +69506,7 @@ index c28a306..b4d0cf3 100644
23354 set_fs(old_fs);
23355
23356 if (ret)
23357 -@@ -550,8 +551,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
23358 +@@ -552,8 +553,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
23359 set_fs (KERNEL_DS);
23360 ret = sys_wait4(pid,
23361 (stat_addr ?
23362 @@ -68636,7 +69517,7 @@ index c28a306..b4d0cf3 100644
23363 set_fs (old_fs);
23364
23365 if (ret > 0) {
23366 -@@ -576,8 +577,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
23367 +@@ -579,8 +580,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
23368 memset(&info, 0, sizeof(info));
23369
23370 set_fs(KERNEL_DS);
23371 @@ -68647,7 +69528,7 @@ index c28a306..b4d0cf3 100644
23372 set_fs(old_fs);
23373
23374 if ((ret < 0) || (info.si_signo == 0))
23375 -@@ -707,8 +708,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
23376 +@@ -714,8 +715,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
23377 oldfs = get_fs();
23378 set_fs(KERNEL_DS);
23379 err = sys_timer_settime(timer_id, flags,
23380 @@ -68658,7 +69539,7 @@ index c28a306..b4d0cf3 100644
23381 set_fs(oldfs);
23382 if (!err && old && put_compat_itimerspec(old, &oldts))
23383 return -EFAULT;
23384 -@@ -725,7 +726,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
23385 +@@ -732,7 +733,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
23386 oldfs = get_fs();
23387 set_fs(KERNEL_DS);
23388 err = sys_timer_gettime(timer_id,
23389 @@ -68667,7 +69548,7 @@ index c28a306..b4d0cf3 100644
23390 set_fs(oldfs);
23391 if (!err && put_compat_itimerspec(setting, &ts))
23392 return -EFAULT;
23393 -@@ -744,7 +745,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
23394 +@@ -751,7 +752,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
23395 oldfs = get_fs();
23396 set_fs(KERNEL_DS);
23397 err = sys_clock_settime(which_clock,
23398 @@ -68676,7 +69557,7 @@ index c28a306..b4d0cf3 100644
23399 set_fs(oldfs);
23400 return err;
23401 }
23402 -@@ -759,7 +760,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
23403 +@@ -766,7 +767,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
23404 oldfs = get_fs();
23405 set_fs(KERNEL_DS);
23406 err = sys_clock_gettime(which_clock,
23407 @@ -68685,7 +69566,7 @@ index c28a306..b4d0cf3 100644
23408 set_fs(oldfs);
23409 if (!err && put_compat_timespec(&ts, tp))
23410 return -EFAULT;
23411 -@@ -779,7 +780,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
23412 +@@ -786,7 +787,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
23413
23414 oldfs = get_fs();
23415 set_fs(KERNEL_DS);
23416 @@ -68694,7 +69575,7 @@ index c28a306..b4d0cf3 100644
23417 set_fs(oldfs);
23418
23419 err = compat_put_timex(utp, &txc);
23420 -@@ -799,7 +800,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
23421 +@@ -806,7 +807,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
23422 oldfs = get_fs();
23423 set_fs(KERNEL_DS);
23424 err = sys_clock_getres(which_clock,
23425 @@ -68703,7 +69584,7 @@ index c28a306..b4d0cf3 100644
23426 set_fs(oldfs);
23427 if (!err && tp && put_compat_timespec(&ts, tp))
23428 return -EFAULT;
23429 -@@ -811,9 +812,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
23430 +@@ -818,9 +819,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
23431 long err;
23432 mm_segment_t oldfs;
23433 struct timespec tu;
23434 @@ -68715,7 +69596,7 @@ index c28a306..b4d0cf3 100644
23435 oldfs = get_fs();
23436 set_fs(KERNEL_DS);
23437 err = clock_nanosleep_restart(restart);
23438 -@@ -845,8 +846,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
23439 +@@ -852,8 +853,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
23440 oldfs = get_fs();
23441 set_fs(KERNEL_DS);
23442 err = sys_clock_nanosleep(which_clock, flags,
23443 @@ -68751,10 +69632,10 @@ index 42e8fa0..9e7406b 100644
23444 return -ENOMEM;
23445
23446 diff --git a/kernel/cred.c b/kernel/cred.c
23447 -index 48cea3d..3476734 100644
23448 +index e0573a4..eefe488 100644
23449 --- a/kernel/cred.c
23450 +++ b/kernel/cred.c
23451 -@@ -207,6 +207,16 @@ void exit_creds(struct task_struct *tsk)
23452 +@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
23453 validate_creds(cred);
23454 alter_cred_subscribers(cred, -1);
23455 put_cred(cred);
23456 @@ -68771,7 +69652,7 @@ index 48cea3d..3476734 100644
23457 }
23458
23459 /**
23460 -@@ -469,7 +479,7 @@ error_put:
23461 +@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
23462 * Always returns 0 thus allowing this function to be tail-called at the end
23463 * of, say, sys_setgid().
23464 */
23465 @@ -68780,7 +69661,7 @@ index 48cea3d..3476734 100644
23466 {
23467 struct task_struct *task = current;
23468 const struct cred *old = task->real_cred;
23469 -@@ -488,6 +498,8 @@ int commit_creds(struct cred *new)
23470 +@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
23471
23472 get_cred(new); /* we will require a ref for the subj creds too */
23473
23474 @@ -68789,7 +69670,7 @@ index 48cea3d..3476734 100644
23475 /* dumpability changes */
23476 if (!uid_eq(old->euid, new->euid) ||
23477 !gid_eq(old->egid, new->egid) ||
23478 -@@ -537,6 +549,101 @@ int commit_creds(struct cred *new)
23479 +@@ -479,6 +491,101 @@ int commit_creds(struct cred *new)
23480 put_cred(old);
23481 return 0;
23482 }
23483 @@ -68978,7 +69859,7 @@ index 8875254..7cf4928 100644
23484 #ifdef CONFIG_MODULE_UNLOAD
23485 {
23486 diff --git a/kernel/events/core.c b/kernel/events/core.c
23487 -index dbccf83..8c66482 100644
23488 +index 7b6646a..3cb1135 100644
23489 --- a/kernel/events/core.c
23490 +++ b/kernel/events/core.c
23491 @@ -182,7 +182,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
23492 @@ -68990,7 +69871,7 @@ index dbccf83..8c66482 100644
23493
23494 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
23495 enum event_type_t event_type);
23496 -@@ -2668,7 +2668,7 @@ static void __perf_event_read(void *info)
23497 +@@ -2677,7 +2677,7 @@ static void __perf_event_read(void *info)
23498
23499 static inline u64 perf_event_count(struct perf_event *event)
23500 {
23501 @@ -68999,7 +69880,7 @@ index dbccf83..8c66482 100644
23502 }
23503
23504 static u64 perf_event_read(struct perf_event *event)
23505 -@@ -2998,9 +2998,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
23506 +@@ -3007,9 +3007,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
23507 mutex_lock(&event->child_mutex);
23508 total += perf_event_read(event);
23509 *enabled += event->total_time_enabled +
23510 @@ -69011,7 +69892,7 @@ index dbccf83..8c66482 100644
23511
23512 list_for_each_entry(child, &event->child_list, child_list) {
23513 total += perf_event_read(child);
23514 -@@ -3403,10 +3403,10 @@ void perf_event_update_userpage(struct perf_event *event)
23515 +@@ -3412,10 +3412,10 @@ void perf_event_update_userpage(struct perf_event *event)
23516 userpg->offset -= local64_read(&event->hw.prev_count);
23517
23518 userpg->time_enabled = enabled +
23519 @@ -69024,7 +69905,7 @@ index dbccf83..8c66482 100644
23520
23521 arch_perf_update_userpage(userpg, now);
23522
23523 -@@ -3965,11 +3965,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
23524 +@@ -3974,11 +3974,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
23525 values[n++] = perf_event_count(event);
23526 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
23527 values[n++] = enabled +
23528 @@ -69038,7 +69919,7 @@ index dbccf83..8c66482 100644
23529 }
23530 if (read_format & PERF_FORMAT_ID)
23531 values[n++] = primary_event_id(event);
23532 -@@ -4712,12 +4712,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
23533 +@@ -4721,12 +4721,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
23534 * need to add enough zero bytes after the string to handle
23535 * the 64bit alignment we do later.
23536 */
23537 @@ -69053,16 +69934,16 @@ index dbccf83..8c66482 100644
23538 if (IS_ERR(name)) {
23539 name = strncpy(tmp, "//toolong", sizeof(tmp));
23540 goto got_name;
23541 -@@ -6156,7 +6156,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
23542 +@@ -6165,7 +6165,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
23543 event->parent = parent_event;
23544
23545 - event->ns = get_pid_ns(current->nsproxy->pid_ns);
23546 + event->ns = get_pid_ns(task_active_pid_ns(current));
23547 - event->id = atomic64_inc_return(&perf_event_id);
23548 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
23549
23550 event->state = PERF_EVENT_STATE_INACTIVE;
23551
23552 -@@ -6774,10 +6774,10 @@ static void sync_child_event(struct perf_event *child_event,
23553 +@@ -6790,10 +6790,10 @@ static void sync_child_event(struct perf_event *child_event,
23554 /*
23555 * Add back the child's count to the parent's count:
23556 */
23557 @@ -69077,10 +69958,10 @@ index dbccf83..8c66482 100644
23558
23559 /*
23560 diff --git a/kernel/exit.c b/kernel/exit.c
23561 -index 346616c..f103b28 100644
23562 +index b4df219..f13c02d 100644
23563 --- a/kernel/exit.c
23564 +++ b/kernel/exit.c
23565 -@@ -182,6 +182,10 @@ void release_task(struct task_struct * p)
23566 +@@ -170,6 +170,10 @@ void release_task(struct task_struct * p)
23567 struct task_struct *leader;
23568 int zap_leader;
23569 repeat:
23570 @@ -69091,7 +69972,7 @@ index 346616c..f103b28 100644
23571 /* don't need to get the RCU readlock here - the process is dead and
23572 * can't be modifying its own credentials. But shut RCU-lockdep up */
23573 rcu_read_lock();
23574 -@@ -394,7 +398,7 @@ int allow_signal(int sig)
23575 +@@ -338,7 +342,7 @@ int allow_signal(int sig)
23576 * know it'll be handled, so that they don't get converted to
23577 * SIGKILL or just silently dropped.
23578 */
23579 @@ -69100,17 +69981,7 @@ index 346616c..f103b28 100644
23580 recalc_sigpending();
23581 spin_unlock_irq(&current->sighand->siglock);
23582 return 0;
23583 -@@ -430,6 +434,9 @@ void daemonize(const char *name, ...)
23584 - vsnprintf(current->comm, sizeof(current->comm), name, args);
23585 - va_end(args);
23586 -
23587 -+ gr_put_exec_file(current);
23588 -+ gr_set_kernel_label(current);
23589 -+
23590 - /*
23591 - * If we were started as result of loading a module, close all of the
23592 - * user space pages. We don't need them, and if we didn't close them
23593 -@@ -812,6 +819,8 @@ void do_exit(long code)
23594 +@@ -708,6 +712,8 @@ void do_exit(long code)
23595 struct task_struct *tsk = current;
23596 int group_dead;
23597
23598 @@ -69119,7 +69990,7 @@ index 346616c..f103b28 100644
23599 profile_task_exit(tsk);
23600
23601 WARN_ON(blk_needs_flush_plug(tsk));
23602 -@@ -828,7 +837,6 @@ void do_exit(long code)
23603 +@@ -724,7 +730,6 @@ void do_exit(long code)
23604 * mm_release()->clear_child_tid() from writing to a user-controlled
23605 * kernel address.
23606 */
23607 @@ -69127,7 +69998,7 @@ index 346616c..f103b28 100644
23608
23609 ptrace_event(PTRACE_EVENT_EXIT, code);
23610
23611 -@@ -887,6 +895,9 @@ void do_exit(long code)
23612 +@@ -783,6 +788,9 @@ void do_exit(long code)
23613 tsk->exit_code = code;
23614 taskstats_exit(tsk, group_dead);
23615
23616 @@ -69137,7 +70008,7 @@ index 346616c..f103b28 100644
23617 exit_mm(tsk);
23618
23619 if (group_dead)
23620 -@@ -1007,7 +1018,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
23621 +@@ -903,7 +911,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
23622 * Take down every thread in the group. This is called by fatal signals
23623 * as well as by sys_exit_group (below).
23624 */
23625 @@ -69147,7 +70018,7 @@ index 346616c..f103b28 100644
23626 {
23627 struct signal_struct *sig = current->signal;
23628 diff --git a/kernel/fork.c b/kernel/fork.c
23629 -index acc4cb6..b524cb5 100644
23630 +index c535f33..1d768f9 100644
23631 --- a/kernel/fork.c
23632 +++ b/kernel/fork.c
23633 @@ -318,7 +318,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
23634 @@ -69241,9 +70112,9 @@ index acc4cb6..b524cb5 100644
23635 - unsigned long charge;
23636 - struct mempolicy *pol;
23637
23638 + uprobe_start_dup_mmap();
23639 down_write(&oldmm->mmap_sem);
23640 - flush_cache_dup_mm(oldmm);
23641 -@@ -363,8 +431,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
23642 +@@ -364,8 +432,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
23643 mm->locked_vm = 0;
23644 mm->mmap = NULL;
23645 mm->mmap_cache = NULL;
23646 @@ -69254,7 +70125,7 @@ index acc4cb6..b524cb5 100644
23647 mm->map_count = 0;
23648 cpumask_clear(mm_cpumask(mm));
23649 mm->mm_rb = RB_ROOT;
23650 -@@ -380,57 +448,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
23651 +@@ -381,57 +449,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
23652
23653 prev = NULL;
23654 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
23655 @@ -69316,7 +70187,7 @@ index acc4cb6..b524cb5 100644
23656 }
23657
23658 /*
23659 -@@ -462,6 +488,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
23660 +@@ -463,6 +489,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
23661 if (retval)
23662 goto out;
23663 }
23664 @@ -69348,9 +70219,9 @@ index acc4cb6..b524cb5 100644
23665 /* a new mm has just been created */
23666 arch_dup_mmap(oldmm, mm);
23667 retval = 0;
23668 -@@ -470,14 +521,6 @@ out:
23669 - flush_tlb_mm(oldmm);
23670 +@@ -472,14 +523,6 @@ out:
23671 up_write(&oldmm->mmap_sem);
23672 + uprobe_end_dup_mmap();
23673 return retval;
23674 -fail_nomem_anon_vma_fork:
23675 - mpol_put(pol);
23676 @@ -69363,7 +70234,7 @@ index acc4cb6..b524cb5 100644
23677 }
23678
23679 static inline int mm_alloc_pgd(struct mm_struct *mm)
23680 -@@ -692,8 +735,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
23681 +@@ -694,8 +737,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
23682 return ERR_PTR(err);
23683
23684 mm = get_task_mm(task);
23685 @@ -69374,7 +70245,7 @@ index acc4cb6..b524cb5 100644
23686 mmput(mm);
23687 mm = ERR_PTR(-EACCES);
23688 }
23689 -@@ -912,13 +955,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
23690 +@@ -917,13 +960,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
23691 spin_unlock(&fs->lock);
23692 return -EAGAIN;
23693 }
23694 @@ -69396,7 +70267,7 @@ index acc4cb6..b524cb5 100644
23695 return 0;
23696 }
23697
23698 -@@ -1183,6 +1233,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
23699 +@@ -1193,6 +1243,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
23700 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
23701 #endif
23702 retval = -EAGAIN;
23703 @@ -69406,7 +70277,7 @@ index acc4cb6..b524cb5 100644
23704 if (atomic_read(&p->real_cred->user->processes) >=
23705 task_rlimit(p, RLIMIT_NPROC)) {
23706 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
23707 -@@ -1422,6 +1475,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
23708 +@@ -1432,6 +1485,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
23709 goto bad_fork_free_pid;
23710 }
23711
23712 @@ -69418,7 +70289,7 @@ index acc4cb6..b524cb5 100644
23713 if (clone_flags & CLONE_THREAD) {
23714 current->signal->nr_threads++;
23715 atomic_inc(&current->signal->live);
23716 -@@ -1505,6 +1563,8 @@ bad_fork_cleanup_count:
23717 +@@ -1515,6 +1573,8 @@ bad_fork_cleanup_count:
23718 bad_fork_free:
23719 free_task(p);
23720 fork_out:
23721 @@ -69427,7 +70298,31 @@ index acc4cb6..b524cb5 100644
23722 return ERR_PTR(retval);
23723 }
23724
23725 -@@ -1605,6 +1665,8 @@ long do_fork(unsigned long clone_flags,
23726 +@@ -1565,6 +1625,23 @@ long do_fork(unsigned long clone_flags,
23727 + return -EINVAL;
23728 + }
23729 +
23730 ++#ifdef CONFIG_GRKERNSEC
23731 ++ if (clone_flags & CLONE_NEWUSER) {
23732 ++ /*
23733 ++ * This doesn't really inspire confidence:
23734 ++ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
23735 ++ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
23736 ++ * Increases kernel attack surface in areas developers
23737 ++ * previously cared little about ("low importance due
23738 ++ * to requiring "root" capability")
23739 ++ * To be removed when this code receives *proper* review
23740 ++ */
23741 ++ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
23742 ++ !capable(CAP_SETGID))
23743 ++ return -EPERM;
23744 ++ }
23745 ++#endif
23746 ++
23747 + /*
23748 + * Determine whether and which event to report to ptracer. When
23749 + * called from kernel_thread or CLONE_UNTRACED is explicitly
23750 +@@ -1599,6 +1676,8 @@ long do_fork(unsigned long clone_flags,
23751 if (clone_flags & CLONE_PARENT_SETTID)
23752 put_user(nr, parent_tidptr);
23753
23754 @@ -69436,7 +70331,7 @@ index acc4cb6..b524cb5 100644
23755 if (clone_flags & CLONE_VFORK) {
23756 p->vfork_done = &vfork;
23757 init_completion(&vfork);
23758 -@@ -1714,7 +1776,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
23759 +@@ -1752,7 +1831,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
23760 return 0;
23761
23762 /* don't need lock here; in the worst case we'll do useless copy */
23763 @@ -69445,7 +70340,7 @@ index acc4cb6..b524cb5 100644
23764 return 0;
23765
23766 *new_fsp = copy_fs_struct(fs);
23767 -@@ -1803,7 +1865,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
23768 +@@ -1866,7 +1945,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
23769 fs = current->fs;
23770 spin_lock(&fs->lock);
23771 current->fs = new_fs;
23772 @@ -69525,7 +70420,7 @@ index 9b22d03..6295b62 100644
23773 prev->next = info->next;
23774 else
23775 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
23776 -index 6db7a5e..25b6648 100644
23777 +index 6db7a5e..0d600bd 100644
23778 --- a/kernel/hrtimer.c
23779 +++ b/kernel/hrtimer.c
23780 @@ -1407,7 +1407,7 @@ void hrtimer_peek_ahead_timers(void)
23781 @@ -69537,6 +70432,15 @@ index 6db7a5e..25b6648 100644
23782 {
23783 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
23784
23785 +@@ -1751,7 +1751,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
23786 + return NOTIFY_OK;
23787 + }
23788 +
23789 +-static struct notifier_block __cpuinitdata hrtimers_nb = {
23790 ++static struct notifier_block hrtimers_nb = {
23791 + .notifier_call = hrtimer_cpu_notify,
23792 + };
23793 +
23794 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
23795 index 60f48fa..7f3a770 100644
23796 --- a/kernel/jump_label.c
23797 @@ -69687,10 +70591,10 @@ index 2169fee..45c017a 100644
23798 return -ENOMEM;
23799 reset_iter(iter, 0);
23800 diff --git a/kernel/kcmp.c b/kernel/kcmp.c
23801 -index 30b7b22..c726387 100644
23802 +index e30ac0f..3528cac 100644
23803 --- a/kernel/kcmp.c
23804 +++ b/kernel/kcmp.c
23805 -@@ -98,6 +98,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
23806 +@@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
23807 struct task_struct *task1, *task2;
23808 int ret;
23809
23810 @@ -69716,7 +70620,7 @@ index 5e4bd78..00c5b91 100644
23811
23812 /* Don't allow clients that don't understand the native
23813 diff --git a/kernel/kmod.c b/kernel/kmod.c
23814 -index 1c317e3..4a92a55 100644
23815 +index 0023a87..3fe3781 100644
23816 --- a/kernel/kmod.c
23817 +++ b/kernel/kmod.c
23818 @@ -74,7 +74,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
23819 @@ -69908,10 +70812,10 @@ index 098f396..fe85ff1 100644
23820 head = &kprobe_table[i];
23821 preempt_disable();
23822 diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
23823 -index 4e316e1..5501eef 100644
23824 +index 6ada93c..55baf4d 100644
23825 --- a/kernel/ksysfs.c
23826 +++ b/kernel/ksysfs.c
23827 -@@ -47,6 +47,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
23828 +@@ -46,6 +46,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
23829 {
23830 if (count+1 > UEVENT_HELPER_PATH_LEN)
23831 return -ENOENT;
23832 @@ -69953,18 +70857,9 @@ index 7981e5b..7f2105c 100644
23833 printk("\nacquire class [%p] %s", class->key, class->name);
23834 if (class->name_version > 1)
23835 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
23836 -index 91c32a0..7b88d63 100644
23837 +index b2c71c5..7b88d63 100644
23838 --- a/kernel/lockdep_proc.c
23839 +++ b/kernel/lockdep_proc.c
23840 -@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
23841 -
23842 - static void print_name(struct seq_file *m, struct lock_class *class)
23843 - {
23844 -- char str[128];
23845 -+ char str[KSYM_NAME_LEN];
23846 - const char *name = class->name;
23847 -
23848 - if (!name) {
23849 @@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
23850 return 0;
23851 }
23852 @@ -70011,18 +70906,18 @@ index 91c32a0..7b88d63 100644
23853 seq_printf(m, "%40s %14lu %29s %pS\n",
23854 name, stats->contending_point[i],
23855 diff --git a/kernel/module.c b/kernel/module.c
23856 -index 3e544f4..34c3008 100644
23857 +index eab0827..75ede66 100644
23858 --- a/kernel/module.c
23859 +++ b/kernel/module.c
23860 -@@ -59,6 +59,7 @@
23861 +@@ -61,6 +61,7 @@
23862 #include <linux/pfn.h>
23863 #include <linux/bsearch.h>
23864 #include <linux/fips.h>
23865 +#include <linux/grsecurity.h>
23866 + #include <uapi/linux/module.h>
23867 #include "module-internal.h"
23868
23869 - #define CREATE_TRACE_POINTS
23870 -@@ -153,7 +154,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
23871 +@@ -156,7 +157,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
23872
23873 /* Bounds of module allocation, for speeding __module_address.
23874 * Protected by module_mutex. */
23875 @@ -70032,7 +70927,7 @@ index 3e544f4..34c3008 100644
23876
23877 int register_module_notifier(struct notifier_block * nb)
23878 {
23879 -@@ -319,7 +321,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
23880 +@@ -322,7 +324,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
23881 return true;
23882
23883 list_for_each_entry_rcu(mod, &modules, list) {
23884 @@ -70041,7 +70936,7 @@ index 3e544f4..34c3008 100644
23885 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
23886 NOT_GPL_ONLY, false },
23887 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
23888 -@@ -344,7 +346,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
23889 +@@ -347,7 +349,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
23890 if (mod->state == MODULE_STATE_UNFORMED)
23891 continue;
23892
23893 @@ -70309,7 +71204,7 @@ index 3e544f4..34c3008 100644
23894 info->index.sym) | INIT_OFFSET_MASK;
23895 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
23896
23897 -@@ -2326,13 +2344,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
23898 +@@ -2323,13 +2341,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
23899 }
23900
23901 /* Append room for core symbols at end of core part. */
23902 @@ -70327,7 +71222,7 @@ index 3e544f4..34c3008 100644
23903 info->index.str) | INIT_OFFSET_MASK;
23904 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
23905 }
23906 -@@ -2350,12 +2368,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
23907 +@@ -2347,12 +2365,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
23908 /* Make sure we get permanent strtab: don't use info->strtab. */
23909 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
23910
23911 @@ -70342,9 +71237,9 @@ index 3e544f4..34c3008 100644
23912 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
23913 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
23914 src = mod->symtab;
23915 - *s++ = 0;
23916 for (ndst = i = 0; i < mod->num_symtab; i++) {
23917 -@@ -2368,6 +2388,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
23918 + if (i == 0 ||
23919 +@@ -2364,6 +2384,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
23920 }
23921 }
23922 mod->core_num_syms = ndst;
23923 @@ -70353,8 +71248,8 @@ index 3e544f4..34c3008 100644
23924 }
23925 #else
23926 static inline void layout_symtab(struct module *mod, struct load_info *info)
23927 -@@ -2401,17 +2423,33 @@ void * __weak module_alloc(unsigned long size)
23928 - return size == 0 ? NULL : vmalloc_exec(size);
23929 +@@ -2397,17 +2419,33 @@ void * __weak module_alloc(unsigned long size)
23930 + return vmalloc_exec(size);
23931 }
23932
23933 -static void *module_alloc_update_bounds(unsigned long size)
23934 @@ -70392,8 +71287,8 @@ index 3e544f4..34c3008 100644
23935 mutex_unlock(&module_mutex);
23936 }
23937 return ret;
23938 -@@ -2630,8 +2668,14 @@ static struct module *setup_load_info(struct load_info *info)
23939 - static int check_modinfo(struct module *mod, struct load_info *info)
23940 +@@ -2683,8 +2721,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
23941 + static int check_modinfo(struct module *mod, struct load_info *info, int flags)
23942 {
23943 const char *modmagic = get_modinfo(info, "vermagic");
23944 + const char *license = get_modinfo(info, "license");
23945 @@ -70404,10 +71299,10 @@ index 3e544f4..34c3008 100644
23946 + return -ENOEXEC;
23947 +#endif
23948 +
23949 - /* This is allowed: modprobe --force will invalidate it. */
23950 - if (!modmagic) {
23951 - err = try_to_force_load(mod, "bad vermagic");
23952 -@@ -2654,7 +2698,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
23953 + if (flags & MODULE_INIT_IGNORE_VERMAGIC)
23954 + modmagic = NULL;
23955 +
23956 +@@ -2710,7 +2754,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
23957 }
23958
23959 /* Set up license info based on the info section */
23960 @@ -70416,7 +71311,7 @@ index 3e544f4..34c3008 100644
23961
23962 return 0;
23963 }
23964 -@@ -2748,7 +2792,7 @@ static int move_module(struct module *mod, struct load_info *info)
23965 +@@ -2804,7 +2848,7 @@ static int move_module(struct module *mod, struct load_info *info)
23966 void *ptr;
23967
23968 /* Do the allocs. */
23969 @@ -70425,7 +71320,7 @@ index 3e544f4..34c3008 100644
23970 /*
23971 * The pointer to this block is stored in the module structure
23972 * which is inside the block. Just mark it as not being a
23973 -@@ -2758,10 +2802,10 @@ static int move_module(struct module *mod, struct load_info *info)
23974 +@@ -2814,11 +2858,11 @@ static int move_module(struct module *mod, struct load_info *info)
23975 if (!ptr)
23976 return -ENOMEM;
23977
23978 @@ -70434,30 +71329,34 @@ index 3e544f4..34c3008 100644
23979 + memset(ptr, 0, mod->core_size_rw);
23980 + mod->module_core_rw = ptr;
23981
23982 -- ptr = module_alloc_update_bounds(mod->init_size);
23983 -+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
23984 - /*
23985 - * The pointer to this block is stored in the module structure
23986 - * which is inside the block. This block doesn't need to be
23987 -@@ -2769,12 +2813,39 @@ static int move_module(struct module *mod, struct load_info *info)
23988 - * after the module is initialized.
23989 - */
23990 - kmemleak_ignore(ptr);
23991 -- if (!ptr && mod->init_size) {
23992 -- module_free(mod, mod->module_core);
23993 -+ if (!ptr && mod->init_size_rw) {
23994 -+ module_free(mod, mod->module_core_rw);
23995 - return -ENOMEM;
23996 - }
23997 -- memset(ptr, 0, mod->init_size);
23998 -- mod->module_init = ptr;
23999 -+ memset(ptr, 0, mod->init_size_rw);
24000 -+ mod->module_init_rw = ptr;
24001 +- if (mod->init_size) {
24002 +- ptr = module_alloc_update_bounds(mod->init_size);
24003 ++ if (mod->init_size_rw) {
24004 ++ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
24005 + /*
24006 + * The pointer to this block is stored in the module structure
24007 + * which is inside the block. This block doesn't need to be
24008 +@@ -2827,13 +2871,45 @@ static int move_module(struct module *mod, struct load_info *info)
24009 + */
24010 + kmemleak_ignore(ptr);
24011 + if (!ptr) {
24012 +- module_free(mod, mod->module_core);
24013 ++ module_free(mod, mod->module_core_rw);
24014 + return -ENOMEM;
24015 + }
24016 +- memset(ptr, 0, mod->init_size);
24017 +- mod->module_init = ptr;
24018 ++ memset(ptr, 0, mod->init_size_rw);
24019 ++ mod->module_init_rw = ptr;
24020 + } else
24021 +- mod->module_init = NULL;
24022 ++ mod->module_init_rw = NULL;
24023 +
24024 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
24025 + kmemleak_not_leak(ptr);
24026 + if (!ptr) {
24027 -+ module_free(mod, mod->module_init_rw);
24028 ++ if (mod->module_init_rw)
24029 ++ module_free(mod, mod->module_init_rw);
24030 + module_free(mod, mod->module_core_rw);
24031 + return -ENOMEM;
24032 + }
24033 @@ -70467,23 +71366,27 @@ index 3e544f4..34c3008 100644
24034 + pax_close_kernel();
24035 + mod->module_core_rx = ptr;
24036 +
24037 -+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
24038 -+ kmemleak_ignore(ptr);
24039 -+ if (!ptr && mod->init_size_rx) {
24040 -+ module_free_exec(mod, mod->module_core_rx);
24041 -+ module_free(mod, mod->module_init_rw);
24042 -+ module_free(mod, mod->module_core_rw);
24043 -+ return -ENOMEM;
24044 -+ }
24045 ++ if (mod->init_size_rx) {
24046 ++ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
24047 ++ kmemleak_ignore(ptr);
24048 ++ if (!ptr && mod->init_size_rx) {
24049 ++ module_free_exec(mod, mod->module_core_rx);
24050 ++ if (mod->module_init_rw)
24051 ++ module_free(mod, mod->module_init_rw);
24052 ++ module_free(mod, mod->module_core_rw);
24053 ++ return -ENOMEM;
24054 ++ }
24055 +
24056 -+ pax_open_kernel();
24057 -+ memset(ptr, 0, mod->init_size_rx);
24058 -+ pax_close_kernel();
24059 -+ mod->module_init_rx = ptr;
24060 ++ pax_open_kernel();
24061 ++ memset(ptr, 0, mod->init_size_rx);
24062 ++ pax_close_kernel();
24063 ++ mod->module_init_rx = ptr;
24064 ++ } else
24065 ++ mod->module_init_rx = NULL;
24066
24067 /* Transfer each section which specifies SHF_ALLOC */
24068 pr_debug("final section addresses:\n");
24069 -@@ -2785,16 +2856,45 @@ static int move_module(struct module *mod, struct load_info *info)
24070 +@@ -2844,16 +2920,45 @@ static int move_module(struct module *mod, struct load_info *info)
24071 if (!(shdr->sh_flags & SHF_ALLOC))
24072 continue;
24073
24074 @@ -70536,7 +71439,7 @@ index 3e544f4..34c3008 100644
24075 pr_debug("\t0x%lx %s\n",
24076 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
24077 }
24078 -@@ -2849,12 +2949,12 @@ static void flush_module_icache(const struct module *mod)
24079 +@@ -2908,12 +3013,12 @@ static void flush_module_icache(const struct module *mod)
24080 * Do it before processing of module parameters, so the module
24081 * can provide parameter accessor functions of its own.
24082 */
24083 @@ -70555,7 +71458,7 @@ index 3e544f4..34c3008 100644
24084
24085 set_fs(old_fs);
24086 }
24087 -@@ -2924,8 +3024,10 @@ out:
24088 +@@ -2983,8 +3088,10 @@ out:
24089 static void module_deallocate(struct module *mod, struct load_info *info)
24090 {
24091 percpu_modfree(mod);
24092 @@ -70568,7 +71471,7 @@ index 3e544f4..34c3008 100644
24093 }
24094
24095 int __weak module_finalize(const Elf_Ehdr *hdr,
24096 -@@ -2938,7 +3040,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
24097 +@@ -2997,7 +3104,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
24098 static int post_relocation(struct module *mod, const struct load_info *info)
24099 {
24100 /* Sort exception table now relocations are done. */
24101 @@ -70578,7 +71481,50 @@ index 3e544f4..34c3008 100644
24102
24103 /* Copy relocated percpu area over. */
24104 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
24105 -@@ -3036,9 +3140,38 @@ again:
24106 +@@ -3051,16 +3160,16 @@ static int do_init_module(struct module *mod)
24107 + MODULE_STATE_COMING, mod);
24108 +
24109 + /* Set RO and NX regions for core */
24110 +- set_section_ro_nx(mod->module_core,
24111 +- mod->core_text_size,
24112 +- mod->core_ro_size,
24113 +- mod->core_size);
24114 ++ set_section_ro_nx(mod->module_core_rx,
24115 ++ mod->core_size_rx,
24116 ++ mod->core_size_rx,
24117 ++ mod->core_size_rx);
24118 +
24119 + /* Set RO and NX regions for init */
24120 +- set_section_ro_nx(mod->module_init,
24121 +- mod->init_text_size,
24122 +- mod->init_ro_size,
24123 +- mod->init_size);
24124 ++ set_section_ro_nx(mod->module_init_rx,
24125 ++ mod->init_size_rx,
24126 ++ mod->init_size_rx,
24127 ++ mod->init_size_rx);
24128 +
24129 + do_mod_ctors(mod);
24130 + /* Start the module */
24131 +@@ -3122,11 +3231,12 @@ static int do_init_module(struct module *mod)
24132 + mod->strtab = mod->core_strtab;
24133 + #endif
24134 + unset_module_init_ro_nx(mod);
24135 +- module_free(mod, mod->module_init);
24136 +- mod->module_init = NULL;
24137 +- mod->init_size = 0;
24138 +- mod->init_ro_size = 0;
24139 +- mod->init_text_size = 0;
24140 ++ module_free(mod, mod->module_init_rw);
24141 ++ module_free_exec(mod, mod->module_init_rx);
24142 ++ mod->module_init_rw = NULL;
24143 ++ mod->module_init_rx = NULL;
24144 ++ mod->init_size_rw = 0;
24145 ++ mod->init_size_rx = 0;
24146 + mutex_unlock(&module_mutex);
24147 + wake_up_all(&module_wq);
24148 +
24149 +@@ -3209,9 +3319,38 @@ again:
24150 if (err)
24151 goto free_unload;
24152
24153 @@ -70590,7 +71536,7 @@ index 3e544f4..34c3008 100644
24154 + }
24155 +
24156 /* Set up MODINFO_ATTR fields */
24157 - setup_modinfo(mod, &info);
24158 + setup_modinfo(mod, info);
24159
24160 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
24161 + {
24162 @@ -70615,14 +71561,28 @@ index 3e544f4..34c3008 100644
24163 +#endif
24164 +
24165 /* Fix up syms, so that st_value is a pointer to location. */
24166 - err = simplify_symbols(mod, &info);
24167 + err = simplify_symbols(mod, info);
24168 if (err < 0)
24169 -@@ -3104,11 +3237,11 @@ again:
24170 +@@ -3227,13 +3366,6 @@ again:
24171 +
24172 + flush_module_icache(mod);
24173 +
24174 +- /* Now copy in args */
24175 +- mod->args = strndup_user(uargs, ~0UL >> 1);
24176 +- if (IS_ERR(mod->args)) {
24177 +- err = PTR_ERR(mod->args);
24178 +- goto free_arch_cleanup;
24179 +- }
24180 +-
24181 + dynamic_debug_setup(info->debug, info->num_debug);
24182 +
24183 + mutex_lock(&module_mutex);
24184 +@@ -3278,11 +3410,10 @@ again:
24185 mutex_unlock(&module_mutex);
24186 - dynamic_debug_remove(info.debug);
24187 + dynamic_debug_remove(info->debug);
24188 synchronize_sched();
24189 - kfree(mod->args);
24190 - free_arch_cleanup:
24191 +- free_arch_cleanup:
24192 module_arch_cleanup(mod);
24193 free_modinfo:
24194 free_modinfo(mod);
24195 @@ -70630,50 +71590,7 @@ index 3e544f4..34c3008 100644
24196 free_unload:
24197 module_unload_free(mod);
24198 unlink_mod:
24199 -@@ -3155,16 +3288,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
24200 - MODULE_STATE_COMING, mod);
24201 -
24202 - /* Set RO and NX regions for core */
24203 -- set_section_ro_nx(mod->module_core,
24204 -- mod->core_text_size,
24205 -- mod->core_ro_size,
24206 -- mod->core_size);
24207 -+ set_section_ro_nx(mod->module_core_rx,
24208 -+ mod->core_size_rx,
24209 -+ mod->core_size_rx,
24210 -+ mod->core_size_rx);
24211 -
24212 - /* Set RO and NX regions for init */
24213 -- set_section_ro_nx(mod->module_init,
24214 -- mod->init_text_size,
24215 -- mod->init_ro_size,
24216 -- mod->init_size);
24217 -+ set_section_ro_nx(mod->module_init_rx,
24218 -+ mod->init_size_rx,
24219 -+ mod->init_size_rx,
24220 -+ mod->init_size_rx);
24221 -
24222 - do_mod_ctors(mod);
24223 - /* Start the module */
24224 -@@ -3209,11 +3342,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
24225 - mod->strtab = mod->core_strtab;
24226 - #endif
24227 - unset_module_init_ro_nx(mod);
24228 -- module_free(mod, mod->module_init);
24229 -- mod->module_init = NULL;
24230 -- mod->init_size = 0;
24231 -- mod->init_ro_size = 0;
24232 -- mod->init_text_size = 0;
24233 -+ module_free(mod, mod->module_init_rw);
24234 -+ module_free_exec(mod, mod->module_init_rx);
24235 -+ mod->module_init_rw = NULL;
24236 -+ mod->module_init_rx = NULL;
24237 -+ mod->init_size_rw = 0;
24238 -+ mod->init_size_rx = 0;
24239 - mutex_unlock(&module_mutex);
24240 - wake_up_all(&module_wq);
24241 -
24242 -@@ -3245,10 +3379,16 @@ static const char *get_ksymbol(struct module *mod,
24243 +@@ -3365,10 +3496,16 @@ static const char *get_ksymbol(struct module *mod,
24244 unsigned long nextval;
24245
24246 /* At worse, next value is at end of module */
24247 @@ -70693,7 +71610,7 @@ index 3e544f4..34c3008 100644
24248
24249 /* Scan for closest preceding symbol, and next symbol. (ELF
24250 starts real symbols at 1). */
24251 -@@ -3501,7 +3641,7 @@ static int m_show(struct seq_file *m, void *p)
24252 +@@ -3621,7 +3758,7 @@ static int m_show(struct seq_file *m, void *p)
24253 return 0;
24254
24255 seq_printf(m, "%s %u",
24256 @@ -70702,7 +71619,7 @@ index 3e544f4..34c3008 100644
24257 print_unload_info(m, mod);
24258
24259 /* Informative for users. */
24260 -@@ -3510,7 +3650,7 @@ static int m_show(struct seq_file *m, void *p)
24261 +@@ -3630,7 +3767,7 @@ static int m_show(struct seq_file *m, void *p)
24262 mod->state == MODULE_STATE_COMING ? "Loading":
24263 "Live");
24264 /* Used by oprofile and other similar tools. */
24265 @@ -70711,7 +71628,7 @@ index 3e544f4..34c3008 100644
24266
24267 /* Taints info */
24268 if (mod->taints)
24269 -@@ -3546,7 +3686,17 @@ static const struct file_operations proc_modules_operations = {
24270 +@@ -3666,7 +3803,17 @@ static const struct file_operations proc_modules_operations = {
24271
24272 static int __init proc_modules_init(void)
24273 {
24274 @@ -70729,7 +71646,7 @@ index 3e544f4..34c3008 100644
24275 return 0;
24276 }
24277 module_init(proc_modules_init);
24278 -@@ -3607,14 +3757,14 @@ struct module *__module_address(unsigned long addr)
24279 +@@ -3727,14 +3874,14 @@ struct module *__module_address(unsigned long addr)
24280 {
24281 struct module *mod;
24282
24283 @@ -70747,7 +71664,7 @@ index 3e544f4..34c3008 100644
24284 return mod;
24285 }
24286 return NULL;
24287 -@@ -3649,11 +3799,20 @@ bool is_module_text_address(unsigned long addr)
24288 +@@ -3769,11 +3916,20 @@ bool is_module_text_address(unsigned long addr)
24289 */
24290 struct module *__module_text_address(unsigned long addr)
24291 {
24292 @@ -70931,7 +71848,7 @@ index e1b2822..5edc1d9 100644
24293 }
24294 EXPORT_SYMBOL(__stack_chk_fail);
24295 diff --git a/kernel/pid.c b/kernel/pid.c
24296 -index aebd4f5..1693c13 100644
24297 +index f2c6a68..4922d97 100644
24298 --- a/kernel/pid.c
24299 +++ b/kernel/pid.c
24300 @@ -33,6 +33,7 @@
24301 @@ -70942,7 +71859,7 @@ index aebd4f5..1693c13 100644
24302 #include <linux/pid_namespace.h>
24303 #include <linux/init_task.h>
24304 #include <linux/syscalls.h>
24305 -@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
24306 +@@ -46,7 +47,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
24307
24308 int pid_max = PID_MAX_DEFAULT;
24309
24310 @@ -70951,7 +71868,7 @@ index aebd4f5..1693c13 100644
24311
24312 int pid_max_min = RESERVED_PIDS + 1;
24313 int pid_max_max = PID_MAX_LIMIT;
24314 -@@ -420,10 +421,18 @@ EXPORT_SYMBOL(pid_task);
24315 +@@ -441,10 +442,18 @@ EXPORT_SYMBOL(pid_task);
24316 */
24317 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
24318 {
24319 @@ -70971,8 +71888,8 @@ index aebd4f5..1693c13 100644
24320 }
24321
24322 struct task_struct *find_task_by_vpid(pid_t vnr)
24323 -@@ -431,6 +440,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
24324 - return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
24325 +@@ -452,6 +461,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
24326 + return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
24327 }
24328
24329 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
24330 @@ -70980,38 +71897,17 @@ index aebd4f5..1693c13 100644
24331 + rcu_lockdep_assert(rcu_read_lock_held(),
24332 + "find_task_by_pid_ns() needs rcu_read_lock()"
24333 + " protection");
24334 -+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
24335 ++ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
24336 +}
24337 +
24338 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
24339 {
24340 struct pid *pid;
24341 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
24342 -index 125cb67..2e5c8ad 100644
24343 +index a278cad..bff5bd3 100644
24344 --- a/kernel/posix-cpu-timers.c
24345 +++ b/kernel/posix-cpu-timers.c
24346 -@@ -6,9 +6,11 @@
24347 - #include <linux/posix-timers.h>
24348 - #include <linux/errno.h>
24349 - #include <linux/math64.h>
24350 -+#include <linux/security.h>
24351 - #include <asm/uaccess.h>
24352 - #include <linux/kernel_stat.h>
24353 - #include <trace/events/timer.h>
24354 -+#include <linux/random.h>
24355 -
24356 - /*
24357 - * Called after updating RLIMIT_CPU to run cpu timer and update
24358 -@@ -494,6 +496,8 @@ static void cleanup_timers(struct list_head *head,
24359 - */
24360 - void posix_cpu_timers_exit(struct task_struct *tsk)
24361 - {
24362 -+ add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
24363 -+ sizeof(unsigned long long));
24364 - cleanup_timers(tsk->cpu_timers,
24365 - tsk->utime, tsk->stime, tsk->se.sum_exec_runtime);
24366 -
24367 -@@ -1578,14 +1582,14 @@ struct k_clock clock_posix_cpu = {
24368 +@@ -1557,14 +1557,14 @@ struct k_clock clock_posix_cpu = {
24369
24370 static __init int init_posix_cpu_timers(void)
24371 {
24372 @@ -71127,7 +72023,7 @@ index 69185ae..cc2847a 100644
24373 }
24374
24375 diff --git a/kernel/power/process.c b/kernel/power/process.c
24376 -index 87da817..30ddd13 100644
24377 +index d5a258b..4271191 100644
24378 --- a/kernel/power/process.c
24379 +++ b/kernel/power/process.c
24380 @@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
24381 @@ -71138,7 +72034,7 @@ index 87da817..30ddd13 100644
24382
24383 do_gettimeofday(&start);
24384
24385 -@@ -43,6 +44,8 @@ static int try_to_freeze_tasks(bool user_only)
24386 +@@ -43,13 +44,20 @@ static int try_to_freeze_tasks(bool user_only)
24387
24388 while (true) {
24389 todo = 0;
24390 @@ -71147,13 +72043,10 @@ index 87da817..30ddd13 100644
24391 read_lock(&tasklist_lock);
24392 do_each_thread(g, p) {
24393 if (p == current || !freeze_task(p))
24394 -@@ -58,9 +61,13 @@ static int try_to_freeze_tasks(bool user_only)
24395 - * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING
24396 - * transition can't race with task state testing here.
24397 - */
24398 -- if (!task_is_stopped_or_traced(p) &&
24399 -- !freezer_should_skip(p))
24400 -+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
24401 + continue;
24402 +
24403 +- if (!freezer_should_skip(p))
24404 ++ if (!freezer_should_skip(p)) {
24405 todo++;
24406 + if (timedout) {
24407 + printk(KERN_ERR "Task refusing to freeze:\n");
24408 @@ -71163,7 +72056,7 @@ index 87da817..30ddd13 100644
24409 } while_each_thread(g, p);
24410 read_unlock(&tasklist_lock);
24411
24412 -@@ -69,7 +76,7 @@ static int try_to_freeze_tasks(bool user_only)
24413 +@@ -58,7 +66,7 @@ static int try_to_freeze_tasks(bool user_only)
24414 todo += wq_busy;
24415 }
24416
24417 @@ -71173,10 +72066,10 @@ index 87da817..30ddd13 100644
24418
24419 if (pm_wakeup_pending()) {
24420 diff --git a/kernel/printk.c b/kernel/printk.c
24421 -index f8e0b5a..dda2a5c 100644
24422 +index 267ce78..952f8a8 100644
24423 --- a/kernel/printk.c
24424 +++ b/kernel/printk.c
24425 -@@ -817,6 +817,11 @@ static int check_syslog_permissions(int type, bool from_file)
24426 +@@ -834,6 +834,11 @@ static int check_syslog_permissions(int type, bool from_file)
24427 if (from_file && type != SYSLOG_ACTION_OPEN)
24428 return 0;
24429
24430 @@ -71189,10 +72082,10 @@ index f8e0b5a..dda2a5c 100644
24431 if (capable(CAP_SYSLOG))
24432 return 0;
24433 diff --git a/kernel/profile.c b/kernel/profile.c
24434 -index 76b8e77..a2930e8 100644
24435 +index 1f39181..86093471 100644
24436 --- a/kernel/profile.c
24437 +++ b/kernel/profile.c
24438 -@@ -39,7 +39,7 @@ struct profile_hit {
24439 +@@ -40,7 +40,7 @@ struct profile_hit {
24440 /* Oprofile timer tick hook */
24441 static int (*timer_hook)(struct pt_regs *) __read_mostly;
24442
24443 @@ -71201,7 +72094,7 @@ index 76b8e77..a2930e8 100644
24444 static unsigned long prof_len, prof_shift;
24445
24446 int prof_on __read_mostly;
24447 -@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
24448 +@@ -282,7 +282,7 @@ static void profile_flip_buffers(void)
24449 hits[i].pc = 0;
24450 continue;
24451 }
24452 @@ -71210,7 +72103,7 @@ index 76b8e77..a2930e8 100644
24453 hits[i].hits = hits[i].pc = 0;
24454 }
24455 }
24456 -@@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
24457 +@@ -343,9 +343,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
24458 * Add the current hit(s) and flush the write-queue out
24459 * to the global buffer:
24460 */
24461 @@ -71222,7 +72115,7 @@ index 76b8e77..a2930e8 100644
24462 hits[i].pc = hits[i].hits = 0;
24463 }
24464 out:
24465 -@@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
24466 +@@ -420,7 +420,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
24467 {
24468 unsigned long pc;
24469 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
24470 @@ -71231,7 +72124,7 @@ index 76b8e77..a2930e8 100644
24471 }
24472 #endif /* !CONFIG_SMP */
24473
24474 -@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
24475 +@@ -518,7 +518,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
24476 return -EFAULT;
24477 buf++; p++; count--; read++;
24478 }
24479 @@ -71240,7 +72133,7 @@ index 76b8e77..a2930e8 100644
24480 if (copy_to_user(buf, (void *)pnt, count))
24481 return -EFAULT;
24482 read += count;
24483 -@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
24484 +@@ -549,7 +549,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
24485 }
24486 #endif
24487 profile_discard_flip_buffers();
24488 @@ -71250,19 +72143,19 @@ index 76b8e77..a2930e8 100644
24489 }
24490
24491 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
24492 -index fbea91d..9bf15e8 100644
24493 +index 6cbeaae..363c48a 100644
24494 --- a/kernel/ptrace.c
24495 +++ b/kernel/ptrace.c
24496 -@@ -319,7 +319,7 @@ static int ptrace_attach(struct task_struct *task, long request,
24497 -
24498 +@@ -324,7 +324,7 @@ static int ptrace_attach(struct task_struct *task, long request,
24499 if (seize)
24500 flags |= PT_SEIZED;
24501 -- if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE))
24502 -+ if (ns_capable_nolog(task_user_ns(task), CAP_SYS_PTRACE))
24503 + rcu_read_lock();
24504 +- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
24505 ++ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
24506 flags |= PT_PTRACE_CAP;
24507 + rcu_read_unlock();
24508 task->ptrace = flags;
24509 -
24510 -@@ -526,7 +526,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
24511 +@@ -535,7 +535,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
24512 break;
24513 return -EIO;
24514 }
24515 @@ -71271,7 +72164,7 @@ index fbea91d..9bf15e8 100644
24516 return -EFAULT;
24517 copied += retval;
24518 src += retval;
24519 -@@ -711,7 +711,7 @@ int ptrace_request(struct task_struct *child, long request,
24520 +@@ -720,7 +720,7 @@ int ptrace_request(struct task_struct *child, long request,
24521 bool seized = child->ptrace & PT_SEIZED;
24522 int ret = -EIO;
24523 siginfo_t siginfo, *si;
24524 @@ -71280,7 +72173,7 @@ index fbea91d..9bf15e8 100644
24525 unsigned long __user *datalp = datavp;
24526 unsigned long flags;
24527
24528 -@@ -913,14 +913,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
24529 +@@ -922,14 +922,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
24530 goto out;
24531 }
24532
24533 @@ -71303,7 +72196,7 @@ index fbea91d..9bf15e8 100644
24534 goto out_put_task_struct;
24535 }
24536
24537 -@@ -948,7 +955,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
24538 +@@ -957,7 +964,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
24539 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
24540 if (copied != sizeof(tmp))
24541 return -EIO;
24542 @@ -71312,7 +72205,7 @@ index fbea91d..9bf15e8 100644
24543 }
24544
24545 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
24546 -@@ -1058,14 +1065,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
24547 +@@ -1067,14 +1074,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
24548 goto out;
24549 }
24550
24551 @@ -71336,7 +72229,7 @@ index fbea91d..9bf15e8 100644
24552 }
24553
24554 diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
24555 -index e4c6a59..c86621a 100644
24556 +index e7dce58..ad0d7b7 100644
24557 --- a/kernel/rcutiny.c
24558 +++ b/kernel/rcutiny.c
24559 @@ -46,7 +46,7 @@
24560 @@ -71358,10 +72251,10 @@ index e4c6a59..c86621a 100644
24561 __rcu_process_callbacks(&rcu_sched_ctrlblk);
24562 __rcu_process_callbacks(&rcu_bh_ctrlblk);
24563 diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
24564 -index 3d01902..afbf46e 100644
24565 +index f85016a..91cb03b 100644
24566 --- a/kernel/rcutiny_plugin.h
24567 +++ b/kernel/rcutiny_plugin.h
24568 -@@ -893,7 +893,7 @@ static int rcu_kthread(void *arg)
24569 +@@ -896,7 +896,7 @@ static int rcu_kthread(void *arg)
24570 have_rcu_kthread_work = morework;
24571 local_irq_restore(flags);
24572 if (work)
24573 @@ -71371,7 +72264,7 @@ index 3d01902..afbf46e 100644
24574 }
24575
24576 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
24577 -index aaa7b9f..055ff1e 100644
24578 +index 31dea01..ad91ffb 100644
24579 --- a/kernel/rcutorture.c
24580 +++ b/kernel/rcutorture.c
24581 @@ -163,12 +163,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
24582 @@ -71416,7 +72309,7 @@ index aaa7b9f..055ff1e 100644
24583 spin_lock_bh(&rcu_torture_lock);
24584 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
24585 spin_unlock_bh(&rcu_torture_lock);
24586 -@@ -410,7 +410,7 @@ rcu_torture_cb(struct rcu_head *p)
24587 +@@ -409,7 +409,7 @@ rcu_torture_cb(struct rcu_head *p)
24588 i = rp->rtort_pipe_count;
24589 if (i > RCU_TORTURE_PIPE_LEN)
24590 i = RCU_TORTURE_PIPE_LEN;
24591 @@ -71425,7 +72318,7 @@ index aaa7b9f..055ff1e 100644
24592 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
24593 rp->rtort_mbtest = 0;
24594 rcu_torture_free(rp);
24595 -@@ -459,7 +459,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
24596 +@@ -457,7 +457,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
24597 i = rp->rtort_pipe_count;
24598 if (i > RCU_TORTURE_PIPE_LEN)
24599 i = RCU_TORTURE_PIPE_LEN;
24600 @@ -71434,7 +72327,7 @@ index aaa7b9f..055ff1e 100644
24601 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
24602 rp->rtort_mbtest = 0;
24603 list_del(&rp->rtort_free);
24604 -@@ -1002,7 +1002,7 @@ rcu_torture_writer(void *arg)
24605 +@@ -975,7 +975,7 @@ rcu_torture_writer(void *arg)
24606 i = old_rp->rtort_pipe_count;
24607 if (i > RCU_TORTURE_PIPE_LEN)
24608 i = RCU_TORTURE_PIPE_LEN;
24609 @@ -71443,7 +72336,7 @@ index aaa7b9f..055ff1e 100644
24610 old_rp->rtort_pipe_count++;
24611 cur_ops->deferred_free(old_rp);
24612 }
24613 -@@ -1087,7 +1087,7 @@ static void rcu_torture_timer(unsigned long unused)
24614 +@@ -1060,7 +1060,7 @@ static void rcu_torture_timer(unsigned long unused)
24615 }
24616 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
24617 if (p->rtort_mbtest == 0)
24618 @@ -71452,7 +72345,7 @@ index aaa7b9f..055ff1e 100644
24619 spin_lock(&rand_lock);
24620 cur_ops->read_delay(&rand);
24621 n_rcu_torture_timers++;
24622 -@@ -1151,7 +1151,7 @@ rcu_torture_reader(void *arg)
24623 +@@ -1124,7 +1124,7 @@ rcu_torture_reader(void *arg)
24624 }
24625 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
24626 if (p->rtort_mbtest == 0)
24627 @@ -71461,7 +72354,7 @@ index aaa7b9f..055ff1e 100644
24628 cur_ops->read_delay(&rand);
24629 preempt_disable();
24630 pipe_count = p->rtort_pipe_count;
24631 -@@ -1210,11 +1210,11 @@ rcu_torture_printk(char *page)
24632 +@@ -1183,11 +1183,11 @@ rcu_torture_printk(char *page)
24633 rcu_torture_current,
24634 rcu_torture_current_version,
24635 list_empty(&rcu_torture_freelist),
24636 @@ -71477,7 +72370,7 @@ index aaa7b9f..055ff1e 100644
24637 n_rcu_torture_boost_ktrerror,
24638 n_rcu_torture_boost_rterror);
24639 cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
24640 -@@ -1233,14 +1233,14 @@ rcu_torture_printk(char *page)
24641 +@@ -1206,14 +1206,14 @@ rcu_torture_printk(char *page)
24642 n_barrier_attempts,
24643 n_rcu_torture_barrier_error);
24644 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
24645 @@ -71494,7 +72387,7 @@ index aaa7b9f..055ff1e 100644
24646 WARN_ON_ONCE(1);
24647 }
24648 cnt += sprintf(&page[cnt], "Reader Pipe: ");
24649 -@@ -1254,7 +1254,7 @@ rcu_torture_printk(char *page)
24650 +@@ -1227,7 +1227,7 @@ rcu_torture_printk(char *page)
24651 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
24652 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
24653 cnt += sprintf(&page[cnt], " %d",
24654 @@ -71503,16 +72396,16 @@ index aaa7b9f..055ff1e 100644
24655 }
24656 cnt += sprintf(&page[cnt], "\n");
24657 if (cur_ops->stats)
24658 -@@ -1938,7 +1938,7 @@ rcu_torture_cleanup(void)
24659 +@@ -1920,7 +1920,7 @@ rcu_torture_cleanup(void)
24660 +
24661 + rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
24662
24663 - if (cur_ops->cleanup)
24664 - cur_ops->cleanup();
24665 - if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
24666 + if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
24667 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
24668 else if (n_online_successes != n_online_attempts ||
24669 n_offline_successes != n_offline_attempts)
24670 -@@ -2007,18 +2007,18 @@ rcu_torture_init(void)
24671 +@@ -1989,18 +1989,18 @@ rcu_torture_init(void)
24672
24673 rcu_torture_current = NULL;
24674 rcu_torture_current_version = 0;
24675 @@ -71538,10 +72431,10 @@ index aaa7b9f..055ff1e 100644
24676 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
24677 per_cpu(rcu_torture_count, cpu)[i] = 0;
24678 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
24679 -index 2682295..0f2297e 100644
24680 +index e441b77..dd54f17 100644
24681 --- a/kernel/rcutree.c
24682 +++ b/kernel/rcutree.c
24683 -@@ -348,9 +348,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
24684 +@@ -349,9 +349,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
24685 rcu_prepare_for_idle(smp_processor_id());
24686 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
24687 smp_mb__before_atomic_inc(); /* See above. */
24688 @@ -71553,7 +72446,7 @@ index 2682295..0f2297e 100644
24689
24690 /*
24691 * It is illegal to enter an extended quiescent state while
24692 -@@ -508,10 +508,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
24693 +@@ -487,10 +487,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
24694 int user)
24695 {
24696 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
24697 @@ -71566,7 +72459,7 @@ index 2682295..0f2297e 100644
24698 rcu_cleanup_after_idle(smp_processor_id());
24699 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
24700 if (!user && !is_idle_task(current)) {
24701 -@@ -670,14 +670,14 @@ void rcu_nmi_enter(void)
24702 +@@ -629,14 +629,14 @@ void rcu_nmi_enter(void)
24703 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
24704
24705 if (rdtp->dynticks_nmi_nesting == 0 &&
24706 @@ -71584,7 +72477,7 @@ index 2682295..0f2297e 100644
24707 }
24708
24709 /**
24710 -@@ -696,9 +696,9 @@ void rcu_nmi_exit(void)
24711 +@@ -655,9 +655,9 @@ void rcu_nmi_exit(void)
24712 return;
24713 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
24714 smp_mb__before_atomic_inc(); /* See above. */
24715 @@ -71596,7 +72489,7 @@ index 2682295..0f2297e 100644
24716 }
24717
24718 /**
24719 -@@ -712,7 +712,7 @@ int rcu_is_cpu_idle(void)
24720 +@@ -671,7 +671,7 @@ int rcu_is_cpu_idle(void)
24721 int ret;
24722
24723 preempt_disable();
24724 @@ -71605,7 +72498,7 @@ index 2682295..0f2297e 100644
24725 preempt_enable();
24726 return ret;
24727 }
24728 -@@ -795,7 +795,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
24729 +@@ -739,7 +739,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
24730 */
24731 static int dyntick_save_progress_counter(struct rcu_data *rdp)
24732 {
24733 @@ -71614,7 +72507,7 @@ index 2682295..0f2297e 100644
24734 return (rdp->dynticks_snap & 0x1) == 0;
24735 }
24736
24737 -@@ -810,7 +810,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
24738 +@@ -754,7 +754,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
24739 unsigned int curr;
24740 unsigned int snap;
24741
24742 @@ -71623,7 +72516,7 @@ index 2682295..0f2297e 100644
24743 snap = (unsigned int)rdp->dynticks_snap;
24744
24745 /*
24746 -@@ -858,10 +858,10 @@ static int jiffies_till_stall_check(void)
24747 +@@ -802,10 +802,10 @@ static int jiffies_till_stall_check(void)
24748 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
24749 */
24750 if (till_stall_check < 3) {
24751 @@ -71636,7 +72529,7 @@ index 2682295..0f2297e 100644
24752 till_stall_check = 300;
24753 }
24754 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
24755 -@@ -1589,7 +1589,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
24756 +@@ -1592,7 +1592,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
24757 rsp->qlen += rdp->qlen;
24758 rdp->n_cbs_orphaned += rdp->qlen;
24759 rdp->qlen_lazy = 0;
24760 @@ -71645,7 +72538,7 @@ index 2682295..0f2297e 100644
24761 }
24762
24763 /*
24764 -@@ -1831,7 +1831,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
24765 +@@ -1838,7 +1838,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
24766 }
24767 smp_mb(); /* List handling before counting for rcu_barrier(). */
24768 rdp->qlen_lazy -= count_lazy;
24769 @@ -71654,7 +72547,7 @@ index 2682295..0f2297e 100644
24770 rdp->n_cbs_invoked += count;
24771
24772 /* Reinstate batch limit if we have worked down the excess. */
24773 -@@ -2024,7 +2024,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
24774 +@@ -2031,7 +2031,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
24775 /*
24776 * Do RCU core processing for the current CPU.
24777 */
24778 @@ -71663,7 +72556,7 @@ index 2682295..0f2297e 100644
24779 {
24780 struct rcu_state *rsp;
24781
24782 -@@ -2136,7 +2136,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
24783 +@@ -2154,7 +2154,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
24784 local_irq_restore(flags);
24785 return;
24786 }
24787 @@ -71672,60 +72565,98 @@ index 2682295..0f2297e 100644
24788 if (lazy)
24789 rdp->qlen_lazy++;
24790 else
24791 -@@ -2250,8 +2250,8 @@ void synchronize_rcu_bh(void)
24792 - }
24793 - EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
24794 -
24795 --static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
24796 --static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
24797 -+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
24798 -+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
24799 -
24800 - static int synchronize_sched_expedited_cpu_stop(void *data)
24801 - {
24802 -@@ -2312,7 +2312,7 @@ void synchronize_sched_expedited(void)
24803 - int firstsnap, s, snap, trycount = 0;
24804 +@@ -2363,11 +2363,11 @@ void synchronize_sched_expedited(void)
24805 + * counter wrap on a 32-bit system. Quite a few more CPUs would of
24806 + * course be required on a 64-bit system.
24807 + */
24808 +- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
24809 ++ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
24810 + (ulong)atomic_long_read(&rsp->expedited_done) +
24811 + ULONG_MAX / 8)) {
24812 + synchronize_sched();
24813 +- atomic_long_inc(&rsp->expedited_wrap);
24814 ++ atomic_long_inc_unchecked(&rsp->expedited_wrap);
24815 + return;
24816 + }
24817
24818 - /* Note that atomic_inc_return() implies full memory barrier. */
24819 -- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
24820 -+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
24821 +@@ -2375,7 +2375,7 @@ void synchronize_sched_expedited(void)
24822 + * Take a ticket. Note that atomic_inc_return() implies a
24823 + * full memory barrier.
24824 + */
24825 +- snap = atomic_long_inc_return(&rsp->expedited_start);
24826 ++ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
24827 + firstsnap = snap;
24828 get_online_cpus();
24829 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
24830 +@@ -2388,14 +2388,14 @@ void synchronize_sched_expedited(void)
24831 + synchronize_sched_expedited_cpu_stop,
24832 + NULL) == -EAGAIN) {
24833 + put_online_cpus();
24834 +- atomic_long_inc(&rsp->expedited_tryfail);
24835 ++ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
24836
24837 -@@ -2334,7 +2334,7 @@ void synchronize_sched_expedited(void)
24838 + /* Check to see if someone else did our work for us. */
24839 + s = atomic_long_read(&rsp->expedited_done);
24840 + if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
24841 + /* ensure test happens before caller kfree */
24842 + smp_mb__before_atomic_inc(); /* ^^^ */
24843 +- atomic_long_inc(&rsp->expedited_workdone1);
24844 ++ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
24845 + return;
24846 }
24847
24848 - /* Check to see if someone else did our work for us. */
24849 -- s = atomic_read(&sync_sched_expedited_done);
24850 -+ s = atomic_read_unchecked(&sync_sched_expedited_done);
24851 - if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
24852 - smp_mb(); /* ensure test happens before caller kfree */
24853 +@@ -2404,7 +2404,7 @@ void synchronize_sched_expedited(void)
24854 + udelay(trycount * num_online_cpus());
24855 + } else {
24856 + wait_rcu_gp(call_rcu_sched);
24857 +- atomic_long_inc(&rsp->expedited_normal);
24858 ++ atomic_long_inc_unchecked(&rsp->expedited_normal);
24859 return;
24860 -@@ -2349,7 +2349,7 @@ void synchronize_sched_expedited(void)
24861 - * grace period works for us.
24862 + }
24863 +
24864 +@@ -2413,7 +2413,7 @@ void synchronize_sched_expedited(void)
24865 + if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
24866 + /* ensure test happens before caller kfree */
24867 + smp_mb__before_atomic_inc(); /* ^^^ */
24868 +- atomic_long_inc(&rsp->expedited_workdone2);
24869 ++ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
24870 + return;
24871 + }
24872 +
24873 +@@ -2425,10 +2425,10 @@ void synchronize_sched_expedited(void)
24874 + * period works for us.
24875 */
24876 get_online_cpus();
24877 -- snap = atomic_read(&sync_sched_expedited_started);
24878 -+ snap = atomic_read_unchecked(&sync_sched_expedited_started);
24879 +- snap = atomic_long_read(&rsp->expedited_start);
24880 ++ snap = atomic_long_read_unchecked(&rsp->expedited_start);
24881 smp_mb(); /* ensure read is before try_stop_cpus(). */
24882 }
24883 +- atomic_long_inc(&rsp->expedited_stoppedcpus);
24884 ++ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
24885
24886 -@@ -2360,12 +2360,12 @@ void synchronize_sched_expedited(void)
24887 - * than we did beat us to the punch.
24888 + /*
24889 + * Everyone up to our most recent fetch is covered by our grace
24890 +@@ -2437,16 +2437,16 @@ void synchronize_sched_expedited(void)
24891 + * than we did already did their update.
24892 */
24893 do {
24894 -- s = atomic_read(&sync_sched_expedited_done);
24895 -+ s = atomic_read_unchecked(&sync_sched_expedited_done);
24896 - if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
24897 - smp_mb(); /* ensure test happens before caller kfree */
24898 +- atomic_long_inc(&rsp->expedited_done_tries);
24899 ++ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
24900 + s = atomic_long_read(&rsp->expedited_done);
24901 + if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
24902 + /* ensure test happens before caller kfree */
24903 + smp_mb__before_atomic_inc(); /* ^^^ */
24904 +- atomic_long_inc(&rsp->expedited_done_lost);
24905 ++ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
24906 break;
24907 }
24908 -- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
24909 -+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
24910 + } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
24911 +- atomic_long_inc(&rsp->expedited_done_exit);
24912 ++ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
24913
24914 put_online_cpus();
24915 }
24916 -@@ -2539,7 +2539,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
24917 +@@ -2620,7 +2620,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
24918 * ACCESS_ONCE() to prevent the compiler from speculating
24919 * the increment to precede the early-exit check.
24920 */
24921 @@ -71734,7 +72665,7 @@ index 2682295..0f2297e 100644
24922 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
24923 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
24924 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
24925 -@@ -2581,7 +2581,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
24926 +@@ -2670,7 +2670,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
24927
24928 /* Increment ->n_barrier_done to prevent duplicate work. */
24929 smp_mb(); /* Keep increment after above mechanism. */
24930 @@ -71743,7 +72674,7 @@ index 2682295..0f2297e 100644
24931 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
24932 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
24933 smp_mb(); /* Keep increment before caller's subsequent code. */
24934 -@@ -2626,10 +2626,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
24935 +@@ -2715,10 +2715,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
24936 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
24937 init_callback_list(rdp);
24938 rdp->qlen_lazy = 0;
24939 @@ -71756,7 +72687,7 @@ index 2682295..0f2297e 100644
24940 #ifdef CONFIG_RCU_USER_QS
24941 WARN_ON_ONCE(rdp->dynticks->in_user);
24942 #endif
24943 -@@ -2664,8 +2664,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
24944 +@@ -2754,8 +2754,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
24945 rdp->blimit = blimit;
24946 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
24947 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
24948 @@ -71768,7 +72699,7 @@ index 2682295..0f2297e 100644
24949 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
24950
24951 diff --git a/kernel/rcutree.h b/kernel/rcutree.h
24952 -index a240f03..d469618 100644
24953 +index 4b69291..704c92e 100644
24954 --- a/kernel/rcutree.h
24955 +++ b/kernel/rcutree.h
24956 @@ -86,7 +86,7 @@ struct rcu_dynticks {
24957 @@ -71780,11 +72711,40 @@ index a240f03..d469618 100644
24958 #ifdef CONFIG_RCU_FAST_NO_HZ
24959 int dyntick_drain; /* Prepare-for-idle state variable. */
24960 unsigned long dyntick_holdoff;
24961 +@@ -423,17 +423,17 @@ struct rcu_state {
24962 + /* _rcu_barrier(). */
24963 + /* End of fields guarded by barrier_mutex. */
24964 +
24965 +- atomic_long_t expedited_start; /* Starting ticket. */
24966 +- atomic_long_t expedited_done; /* Done ticket. */
24967 +- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
24968 +- atomic_long_t expedited_tryfail; /* # acquisition failures. */
24969 +- atomic_long_t expedited_workdone1; /* # done by others #1. */
24970 +- atomic_long_t expedited_workdone2; /* # done by others #2. */
24971 +- atomic_long_t expedited_normal; /* # fallbacks to normal. */
24972 +- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
24973 +- atomic_long_t expedited_done_tries; /* # tries to update _done. */
24974 +- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
24975 +- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
24976 ++ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
24977 ++ atomic_long_t expedited_done; /* Done ticket. */
24978 ++ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
24979 ++ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
24980 ++ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
24981 ++ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
24982 ++ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
24983 ++ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
24984 ++ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
24985 ++ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
24986 ++ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
24987 +
24988 + unsigned long jiffies_force_qs; /* Time at which to invoke */
24989 + /* force_quiescent_state(). */
24990 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
24991 -index f921154..34c4873 100644
24992 +index c1cc7e1..5043e0e 100644
24993 --- a/kernel/rcutree_plugin.h
24994 +++ b/kernel/rcutree_plugin.h
24995 -@@ -865,7 +865,7 @@ void synchronize_rcu_expedited(void)
24996 +@@ -892,7 +892,7 @@ void synchronize_rcu_expedited(void)
24997
24998 /* Clean up and exit. */
24999 smp_mb(); /* ensure expedited GP seen before counter increment. */
25000 @@ -71793,7 +72753,7 @@ index f921154..34c4873 100644
25001 unlock_mb_ret:
25002 mutex_unlock(&sync_rcu_preempt_exp_mutex);
25003 mb_ret:
25004 -@@ -2040,7 +2040,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
25005 +@@ -2072,7 +2072,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
25006 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
25007 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
25008 cpu, ticks_value, ticks_title,
25009 @@ -71802,12 +72762,57 @@ index f921154..34c4873 100644
25010 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
25011 fast_no_hz);
25012 }
25013 +@@ -2192,7 +2192,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
25014 +
25015 + /* Enqueue the callback on the nocb list and update counts. */
25016 + old_rhpp = xchg(&rdp->nocb_tail, rhtp);
25017 +- ACCESS_ONCE(*old_rhpp) = rhp;
25018 ++ ACCESS_ONCE_RW(*old_rhpp) = rhp;
25019 + atomic_long_add(rhcount, &rdp->nocb_q_count);
25020 + atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
25021 +
25022 +@@ -2384,12 +2384,12 @@ static int rcu_nocb_kthread(void *arg)
25023 + * Extract queued callbacks, update counts, and wait
25024 + * for a grace period to elapse.
25025 + */
25026 +- ACCESS_ONCE(rdp->nocb_head) = NULL;
25027 ++ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
25028 + tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
25029 + c = atomic_long_xchg(&rdp->nocb_q_count, 0);
25030 + cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
25031 +- ACCESS_ONCE(rdp->nocb_p_count) += c;
25032 +- ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
25033 ++ ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
25034 ++ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
25035 + wait_rcu_gp(rdp->rsp->call_remote);
25036 +
25037 + /* Each pass through the following loop invokes a callback. */
25038 +@@ -2411,8 +2411,8 @@ static int rcu_nocb_kthread(void *arg)
25039 + list = next;
25040 + }
25041 + trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
25042 +- ACCESS_ONCE(rdp->nocb_p_count) -= c;
25043 +- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
25044 ++ ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
25045 ++ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
25046 + rdp->n_nocbs_invoked += c;
25047 + }
25048 + return 0;
25049 +@@ -2438,7 +2438,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
25050 + rdp = per_cpu_ptr(rsp->rda, cpu);
25051 + t = kthread_run(rcu_nocb_kthread, rdp, "rcuo%d", cpu);
25052 + BUG_ON(IS_ERR(t));
25053 +- ACCESS_ONCE(rdp->nocb_kthread) = t;
25054 ++ ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
25055 + }
25056 + }
25057 +
25058 diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
25059 -index 693513b..b9f1d63 100644
25060 +index 0d095dc..1985b19 100644
25061 --- a/kernel/rcutree_trace.c
25062 +++ b/kernel/rcutree_trace.c
25063 -@@ -92,7 +92,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
25064 - rdp->completed, rdp->gpnum,
25065 +@@ -123,7 +123,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
25066 + ulong2long(rdp->completed), ulong2long(rdp->gpnum),
25067 rdp->passed_quiesce, rdp->qs_pending);
25068 seq_printf(m, " dt=%d/%llx/%d df=%lu",
25069 - atomic_read(&rdp->dynticks->dynticks),
25070 @@ -71815,15 +72820,34 @@ index 693513b..b9f1d63 100644
25071 rdp->dynticks->dynticks_nesting,
25072 rdp->dynticks->dynticks_nmi_nesting,
25073 rdp->dynticks_fqs);
25074 -@@ -154,7 +154,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
25075 - rdp->completed, rdp->gpnum,
25076 - rdp->passed_quiesce, rdp->qs_pending);
25077 - seq_printf(m, ",%d,%llx,%d,%lu",
25078 -- atomic_read(&rdp->dynticks->dynticks),
25079 -+ atomic_read_unchecked(&rdp->dynticks->dynticks),
25080 - rdp->dynticks->dynticks_nesting,
25081 - rdp->dynticks->dynticks_nmi_nesting,
25082 - rdp->dynticks_fqs);
25083 +@@ -184,17 +184,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
25084 + struct rcu_state *rsp = (struct rcu_state *)m->private;
25085 +
25086 + seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
25087 +- atomic_long_read(&rsp->expedited_start),
25088 ++ atomic_long_read_unchecked(&rsp->expedited_start),
25089 + atomic_long_read(&rsp->expedited_done),
25090 +- atomic_long_read(&rsp->expedited_wrap),
25091 +- atomic_long_read(&rsp->expedited_tryfail),
25092 +- atomic_long_read(&rsp->expedited_workdone1),
25093 +- atomic_long_read(&rsp->expedited_workdone2),
25094 +- atomic_long_read(&rsp->expedited_normal),
25095 +- atomic_long_read(&rsp->expedited_stoppedcpus),
25096 +- atomic_long_read(&rsp->expedited_done_tries),
25097 +- atomic_long_read(&rsp->expedited_done_lost),
25098 +- atomic_long_read(&rsp->expedited_done_exit));
25099 ++ atomic_long_read_unchecked(&rsp->expedited_wrap),
25100 ++ atomic_long_read_unchecked(&rsp->expedited_tryfail),
25101 ++ atomic_long_read_unchecked(&rsp->expedited_workdone1),
25102 ++ atomic_long_read_unchecked(&rsp->expedited_workdone2),
25103 ++ atomic_long_read_unchecked(&rsp->expedited_normal),
25104 ++ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
25105 ++ atomic_long_read_unchecked(&rsp->expedited_done_tries),
25106 ++ atomic_long_read_unchecked(&rsp->expedited_done_lost),
25107 ++ atomic_long_read_unchecked(&rsp->expedited_done_exit));
25108 + return 0;
25109 + }
25110 +
25111 diff --git a/kernel/resource.c b/kernel/resource.c
25112 index 73f35d4..4684fc4 100644
25113 --- a/kernel/resource.c
25114 @@ -71942,7 +72966,7 @@ index 98ec494..4241d6d 100644
25115
25116 default:
25117 diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
25118 -index 15f60d0..7e50319 100644
25119 +index 0984a21..939f183 100644
25120 --- a/kernel/sched/auto_group.c
25121 +++ b/kernel/sched/auto_group.c
25122 @@ -11,7 +11,7 @@
25123 @@ -71964,10 +72988,10 @@ index 15f60d0..7e50319 100644
25124 #ifdef CONFIG_RT_GROUP_SCHED
25125 /*
25126 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
25127 -index c529d00..d00b4f3 100644
25128 +index 26058d0..06f15dd 100644
25129 --- a/kernel/sched/core.c
25130 +++ b/kernel/sched/core.c
25131 -@@ -3563,6 +3563,8 @@ int can_nice(const struct task_struct *p, const int nice)
25132 +@@ -3631,6 +3631,8 @@ int can_nice(const struct task_struct *p, const int nice)
25133 /* convert nice value [19,-20] to rlimit style value [1,40] */
25134 int nice_rlim = 20 - nice;
25135
25136 @@ -71976,7 +73000,7 @@ index c529d00..d00b4f3 100644
25137 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
25138 capable(CAP_SYS_NICE));
25139 }
25140 -@@ -3596,7 +3598,8 @@ SYSCALL_DEFINE1(nice, int, increment)
25141 +@@ -3664,7 +3666,8 @@ SYSCALL_DEFINE1(nice, int, increment)
25142 if (nice > 19)
25143 nice = 19;
25144
25145 @@ -71986,7 +73010,7 @@ index c529d00..d00b4f3 100644
25146 return -EPERM;
25147
25148 retval = security_task_setnice(current, nice);
25149 -@@ -3750,6 +3753,7 @@ recheck:
25150 +@@ -3818,6 +3821,7 @@ recheck:
25151 unsigned long rlim_rtprio =
25152 task_rlimit(p, RLIMIT_RTPRIO);
25153
25154 @@ -71994,11 +73018,29 @@ index c529d00..d00b4f3 100644
25155 /* can't set/change the rt policy */
25156 if (policy != p->policy && !rlim_rtprio)
25157 return -EPERM;
25158 +@@ -5162,7 +5166,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
25159 + * happens before everything else. This has to be lower priority than
25160 + * the notifier in the perf_event subsystem, though.
25161 + */
25162 +-static struct notifier_block __cpuinitdata migration_notifier = {
25163 ++static struct notifier_block migration_notifier = {
25164 + .notifier_call = migration_call,
25165 + .priority = CPU_PRI_MIGRATION,
25166 + };
25167 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
25168 -index 6b800a1..0c36227 100644
25169 +index 81fa536..80fa821 100644
25170 --- a/kernel/sched/fair.c
25171 +++ b/kernel/sched/fair.c
25172 -@@ -4890,7 +4890,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
25173 +@@ -830,7 +830,7 @@ void task_numa_fault(int node, int pages, bool migrated)
25174 +
25175 + static void reset_ptenuma_scan(struct task_struct *p)
25176 + {
25177 +- ACCESS_ONCE(p->mm->numa_scan_seq)++;
25178 ++ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
25179 + p->mm->numa_scan_offset = 0;
25180 + }
25181 +
25182 +@@ -5663,7 +5663,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
25183 * run_rebalance_domains is triggered when needed from the scheduler tick.
25184 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
25185 */
25186 @@ -72008,10 +73050,10 @@ index 6b800a1..0c36227 100644
25187 int this_cpu = smp_processor_id();
25188 struct rq *this_rq = cpu_rq(this_cpu);
25189 diff --git a/kernel/signal.c b/kernel/signal.c
25190 -index 57dde52..2c561f0 100644
25191 +index 3d09cf6..a67d2c6 100644
25192 --- a/kernel/signal.c
25193 +++ b/kernel/signal.c
25194 -@@ -49,12 +49,12 @@ static struct kmem_cache *sigqueue_cachep;
25195 +@@ -50,12 +50,12 @@ static struct kmem_cache *sigqueue_cachep;
25196
25197 int print_fatal_signals __read_mostly;
25198
25199 @@ -72026,7 +73068,7 @@ index 57dde52..2c561f0 100644
25200 {
25201 /* Is it explicitly or implicitly ignored? */
25202 return handler == SIG_IGN ||
25203 -@@ -63,7 +63,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
25204 +@@ -64,7 +64,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
25205
25206 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
25207 {
25208 @@ -72035,7 +73077,7 @@ index 57dde52..2c561f0 100644
25209
25210 handler = sig_handler(t, sig);
25211
25212 -@@ -367,6 +367,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
25213 +@@ -368,6 +368,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
25214 atomic_inc(&user->sigpending);
25215 rcu_read_unlock();
25216
25217 @@ -72045,7 +73087,7 @@ index 57dde52..2c561f0 100644
25218 if (override_rlimit ||
25219 atomic_read(&user->sigpending) <=
25220 task_rlimit(t, RLIMIT_SIGPENDING)) {
25221 -@@ -491,7 +494,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
25222 +@@ -492,7 +495,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
25223
25224 int unhandled_signal(struct task_struct *tsk, int sig)
25225 {
25226 @@ -72054,7 +73096,7 @@ index 57dde52..2c561f0 100644
25227 if (is_global_init(tsk))
25228 return 1;
25229 if (handler != SIG_IGN && handler != SIG_DFL)
25230 -@@ -811,6 +814,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
25231 +@@ -812,6 +815,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
25232 }
25233 }
25234
25235 @@ -72068,7 +73110,7 @@ index 57dde52..2c561f0 100644
25236 return security_task_kill(t, info, sig, 0);
25237 }
25238
25239 -@@ -1192,7 +1202,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
25240 +@@ -1194,7 +1204,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
25241 return send_signal(sig, info, p, 1);
25242 }
25243
25244 @@ -72077,7 +73119,7 @@ index 57dde52..2c561f0 100644
25245 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
25246 {
25247 return send_signal(sig, info, t, 0);
25248 -@@ -1229,6 +1239,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
25249 +@@ -1231,6 +1241,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
25250 unsigned long int flags;
25251 int ret, blocked, ignored;
25252 struct k_sigaction *action;
25253 @@ -72085,7 +73127,7 @@ index 57dde52..2c561f0 100644
25254
25255 spin_lock_irqsave(&t->sighand->siglock, flags);
25256 action = &t->sighand->action[sig-1];
25257 -@@ -1243,9 +1254,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
25258 +@@ -1245,9 +1256,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
25259 }
25260 if (action->sa.sa_handler == SIG_DFL)
25261 t->signal->flags &= ~SIGNAL_UNKILLABLE;
25262 @@ -72104,7 +73146,7 @@ index 57dde52..2c561f0 100644
25263 return ret;
25264 }
25265
25266 -@@ -1312,8 +1332,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
25267 +@@ -1314,8 +1334,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
25268 ret = check_kill_permission(sig, info, p);
25269 rcu_read_unlock();
25270
25271 @@ -72117,7 +73159,7 @@ index 57dde52..2c561f0 100644
25272
25273 return ret;
25274 }
25275 -@@ -2863,7 +2886,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
25276 +@@ -2852,7 +2875,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
25277 int error = -ESRCH;
25278
25279 rcu_read_lock();
25280 @@ -72134,8 +73176,32 @@ index 57dde52..2c561f0 100644
25281 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
25282 error = check_kill_permission(sig, info, p);
25283 /*
25284 +@@ -3135,8 +3166,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
25285 + }
25286 + seg = get_fs();
25287 + set_fs(KERNEL_DS);
25288 +- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
25289 +- (stack_t __force __user *) &uoss,
25290 ++ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
25291 ++ (stack_t __force_user *) &uoss,
25292 + compat_user_stack_pointer());
25293 + set_fs(seg);
25294 + if (ret >= 0 && uoss_ptr) {
25295 +diff --git a/kernel/smp.c b/kernel/smp.c
25296 +index 69f38bd..77bbf12 100644
25297 +--- a/kernel/smp.c
25298 ++++ b/kernel/smp.c
25299 +@@ -77,7 +77,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
25300 + return NOTIFY_OK;
25301 + }
25302 +
25303 +-static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
25304 ++static struct notifier_block hotplug_cfd_notifier = {
25305 + .notifier_call = hotplug_cfd,
25306 + };
25307 +
25308 diff --git a/kernel/softirq.c b/kernel/softirq.c
25309 -index cc96bdc..6a96894 100644
25310 +index ed567ba..dc61b61 100644
25311 --- a/kernel/softirq.c
25312 +++ b/kernel/softirq.c
25313 @@ -53,11 +53,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
25314 @@ -72188,11 +73254,29 @@ index cc96bdc..6a96894 100644
25315 {
25316 struct tasklet_struct *list;
25317
25318 +@@ -718,7 +718,7 @@ static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
25319 + return NOTIFY_OK;
25320 + }
25321 +
25322 +-static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
25323 ++static struct notifier_block remote_softirq_cpu_notifier = {
25324 + .notifier_call = remote_softirq_cpu_notify,
25325 + };
25326 +
25327 +@@ -835,7 +835,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
25328 + return NOTIFY_OK;
25329 + }
25330 +
25331 +-static struct notifier_block __cpuinitdata cpu_nfb = {
25332 ++static struct notifier_block cpu_nfb = {
25333 + .notifier_call = cpu_callback
25334 + };
25335 +
25336 diff --git a/kernel/srcu.c b/kernel/srcu.c
25337 -index 97c465e..d83f3bb 100644
25338 +index 2b85982..d52ab26 100644
25339 --- a/kernel/srcu.c
25340 +++ b/kernel/srcu.c
25341 -@@ -302,9 +302,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
25342 +@@ -305,9 +305,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
25343 preempt_disable();
25344 idx = rcu_dereference_index_check(sp->completed,
25345 rcu_read_lock_sched_held()) & 0x1;
25346 @@ -72204,7 +73288,7 @@ index 97c465e..d83f3bb 100644
25347 preempt_enable();
25348 return idx;
25349 }
25350 -@@ -320,7 +320,7 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
25351 +@@ -323,7 +323,7 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
25352 {
25353 preempt_disable();
25354 smp_mb(); /* C */ /* Avoid leaking the critical section. */
25355 @@ -72213,8 +73297,21 @@ index 97c465e..d83f3bb 100644
25356 preempt_enable();
25357 }
25358 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
25359 +diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
25360 +index 2f194e9..2c05ea9 100644
25361 +--- a/kernel/stop_machine.c
25362 ++++ b/kernel/stop_machine.c
25363 +@@ -362,7 +362,7 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
25364 + * cpu notifiers. It currently shares the same priority as sched
25365 + * migration_notifier.
25366 + */
25367 +-static struct notifier_block __cpuinitdata cpu_stop_cpu_notifier = {
25368 ++static struct notifier_block cpu_stop_cpu_notifier = {
25369 + .notifier_call = cpu_stop_cpu_callback,
25370 + .priority = 10,
25371 + };
25372 diff --git a/kernel/sys.c b/kernel/sys.c
25373 -index e6e0ece..1f2e413 100644
25374 +index 265b376..b0cd50d 100644
25375 --- a/kernel/sys.c
25376 +++ b/kernel/sys.c
25377 @@ -157,6 +157,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
25378 @@ -72373,7 +73470,7 @@ index e6e0ece..1f2e413 100644
25379 break;
25380 }
25381 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
25382 -index 26f65ea..df8e5ad 100644
25383 +index c88878d..99d321b 100644
25384 --- a/kernel/sysctl.c
25385 +++ b/kernel/sysctl.c
25386 @@ -92,7 +92,6 @@
25387 @@ -72425,7 +73522,7 @@ index 26f65ea..df8e5ad 100644
25388 /* The default sysctl tables: */
25389
25390 static struct ctl_table sysctl_base_table[] = {
25391 -@@ -266,6 +279,22 @@ static int max_extfrag_threshold = 1000;
25392 +@@ -268,6 +281,22 @@ static int max_extfrag_threshold = 1000;
25393 #endif
25394
25395 static struct ctl_table kern_table[] = {
25396 @@ -72448,7 +73545,7 @@ index 26f65ea..df8e5ad 100644
25397 {
25398 .procname = "sched_child_runs_first",
25399 .data = &sysctl_sched_child_runs_first,
25400 -@@ -552,7 +581,7 @@ static struct ctl_table kern_table[] = {
25401 +@@ -593,7 +622,7 @@ static struct ctl_table kern_table[] = {
25402 .data = &modprobe_path,
25403 .maxlen = KMOD_PATH_LEN,
25404 .mode = 0644,
25405 @@ -72457,7 +73554,7 @@ index 26f65ea..df8e5ad 100644
25406 },
25407 {
25408 .procname = "modules_disabled",
25409 -@@ -719,16 +748,20 @@ static struct ctl_table kern_table[] = {
25410 +@@ -760,16 +789,20 @@ static struct ctl_table kern_table[] = {
25411 .extra1 = &zero,
25412 .extra2 = &one,
25413 },
25414 @@ -72479,7 +73576,7 @@ index 26f65ea..df8e5ad 100644
25415 {
25416 .procname = "ngroups_max",
25417 .data = &ngroups_max,
25418 -@@ -1225,6 +1258,13 @@ static struct ctl_table vm_table[] = {
25419 +@@ -1266,6 +1299,13 @@ static struct ctl_table vm_table[] = {
25420 .proc_handler = proc_dointvec_minmax,
25421 .extra1 = &zero,
25422 },
25423 @@ -72493,7 +73590,7 @@ index 26f65ea..df8e5ad 100644
25424 #else
25425 {
25426 .procname = "nr_trim_pages",
25427 -@@ -1675,6 +1715,16 @@ int proc_dostring(struct ctl_table *table, int write,
25428 +@@ -1716,6 +1756,16 @@ int proc_dostring(struct ctl_table *table, int write,
25429 buffer, lenp, ppos);
25430 }
25431
25432 @@ -72510,7 +73607,7 @@ index 26f65ea..df8e5ad 100644
25433 static size_t proc_skip_spaces(char **buf)
25434 {
25435 size_t ret;
25436 -@@ -1780,6 +1830,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
25437 +@@ -1821,6 +1871,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
25438 len = strlen(tmp);
25439 if (len > *size)
25440 len = *size;
25441 @@ -72519,7 +73616,7 @@ index 26f65ea..df8e5ad 100644
25442 if (copy_to_user(*buf, tmp, len))
25443 return -EFAULT;
25444 *size -= len;
25445 -@@ -1972,7 +2024,6 @@ static int proc_taint(struct ctl_table *table, int write,
25446 +@@ -2013,7 +2065,6 @@ static int proc_taint(struct ctl_table *table, int write,
25447 return err;
25448 }
25449
25450 @@ -72527,7 +73624,7 @@ index 26f65ea..df8e5ad 100644
25451 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
25452 void __user *buffer, size_t *lenp, loff_t *ppos)
25453 {
25454 -@@ -1981,7 +2032,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
25455 +@@ -2022,7 +2073,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
25456
25457 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
25458 }
25459 @@ -72535,7 +73632,7 @@ index 26f65ea..df8e5ad 100644
25460
25461 struct do_proc_dointvec_minmax_conv_param {
25462 int *min;
25463 -@@ -2128,8 +2178,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
25464 +@@ -2169,8 +2219,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
25465 *i = val;
25466 } else {
25467 val = convdiv * (*i) / convmul;
25468 @@ -72548,7 +73645,7 @@ index 26f65ea..df8e5ad 100644
25469 err = proc_put_long(&buffer, &left, val, false);
25470 if (err)
25471 break;
25472 -@@ -2521,6 +2574,12 @@ int proc_dostring(struct ctl_table *table, int write,
25473 +@@ -2562,6 +2615,12 @@ int proc_dostring(struct ctl_table *table, int write,
25474 return -ENOSYS;
25475 }
25476
25477 @@ -72561,7 +73658,7 @@ index 26f65ea..df8e5ad 100644
25478 int proc_dointvec(struct ctl_table *table, int write,
25479 void __user *buffer, size_t *lenp, loff_t *ppos)
25480 {
25481 -@@ -2577,5 +2636,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
25482 +@@ -2618,5 +2677,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
25483 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
25484 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
25485 EXPORT_SYMBOL(proc_dostring);
25486 @@ -72569,7 +73666,7 @@ index 26f65ea..df8e5ad 100644
25487 EXPORT_SYMBOL(proc_doulongvec_minmax);
25488 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
25489 diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
25490 -index 65bdcf1..21eb831 100644
25491 +index 5a63844..25dfc5c 100644
25492 --- a/kernel/sysctl_binary.c
25493 +++ b/kernel/sysctl_binary.c
25494 @@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
25495 @@ -72705,7 +73802,7 @@ index f113755..ec24223 100644
25496 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
25497 tick_broadcast_clear_oneshot(cpu);
25498 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
25499 -index e424970..4c7962b 100644
25500 +index cbc6acb..3a77191 100644
25501 --- a/kernel/time/timekeeping.c
25502 +++ b/kernel/time/timekeeping.c
25503 @@ -15,6 +15,7 @@
25504 @@ -72716,7 +73813,7 @@ index e424970..4c7962b 100644
25505 #include <linux/syscore_ops.h>
25506 #include <linux/clocksource.h>
25507 #include <linux/jiffies.h>
25508 -@@ -368,6 +369,8 @@ int do_settimeofday(const struct timespec *tv)
25509 +@@ -412,6 +413,8 @@ int do_settimeofday(const struct timespec *tv)
25510 if (!timespec_valid_strict(tv))
25511 return -EINVAL;
25512
25513 @@ -72844,7 +73941,7 @@ index 0b537f2..40d6c20 100644
25514 return -ENOMEM;
25515 return 0;
25516 diff --git a/kernel/timer.c b/kernel/timer.c
25517 -index 367d008..46857a0 100644
25518 +index 367d008..1ee9ed9 100644
25519 --- a/kernel/timer.c
25520 +++ b/kernel/timer.c
25521 @@ -1363,7 +1363,7 @@ void update_process_times(int user_tick)
25522 @@ -72861,7 +73958,7 @@ index 367d008..46857a0 100644
25523 }
25524
25525 -static struct notifier_block __cpuinitdata timers_nb = {
25526 -+static struct notifier_block __cpuinitconst timers_nb = {
25527 ++static struct notifier_block timers_nb = {
25528 .notifier_call = timer_cpu_notify,
25529 };
25530
25531 @@ -72897,7 +73994,7 @@ index c0bd030..62a1927 100644
25532 ret = -EIO;
25533 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
25534 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
25535 -index 356bc2f..7c94fc0 100644
25536 +index 41473b4..325fcfc 100644
25537 --- a/kernel/trace/ftrace.c
25538 +++ b/kernel/trace/ftrace.c
25539 @@ -1874,12 +1874,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
25540 @@ -72969,7 +74066,7 @@ index 356bc2f..7c94fc0 100644
25541
25542 ftrace_graph_active++;
25543 diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
25544 -index 4cb5e51..e7e05d9 100644
25545 +index ce8514f..8233573 100644
25546 --- a/kernel/trace/ring_buffer.c
25547 +++ b/kernel/trace/ring_buffer.c
25548 @@ -346,9 +346,9 @@ struct buffer_data_page {
25549 @@ -72984,18 +74081,18 @@ index 4cb5e51..e7e05d9 100644
25550 unsigned long real_end; /* real end of data */
25551 struct buffer_data_page *page; /* Actual data page */
25552 };
25553 -@@ -460,8 +460,8 @@ struct ring_buffer_per_cpu {
25554 - unsigned long lost_events;
25555 +@@ -461,8 +461,8 @@ struct ring_buffer_per_cpu {
25556 unsigned long last_overrun;
25557 local_t entries_bytes;
25558 -- local_t commit_overrun;
25559 + local_t entries;
25560 - local_t overrun;
25561 -+ local_unchecked_t commit_overrun;
25562 +- local_t commit_overrun;
25563 + local_unchecked_t overrun;
25564 - local_t entries;
25565 ++ local_unchecked_t commit_overrun;
25566 + local_t dropped_events;
25567 local_t committing;
25568 local_t commits;
25569 -@@ -860,8 +860,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
25570 +@@ -861,8 +861,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
25571 *
25572 * We add a counter to the write field to denote this.
25573 */
25574 @@ -73006,7 +74103,7 @@ index 4cb5e51..e7e05d9 100644
25575
25576 /*
25577 * Just make sure we have seen our old_write and synchronize
25578 -@@ -889,8 +889,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
25579 +@@ -890,8 +890,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
25580 * cmpxchg to only update if an interrupt did not already
25581 * do it for us. If the cmpxchg fails, we don't care.
25582 */
25583 @@ -73017,7 +74114,7 @@ index 4cb5e51..e7e05d9 100644
25584
25585 /*
25586 * No need to worry about races with clearing out the commit.
25587 -@@ -1249,12 +1249,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
25588 +@@ -1250,12 +1250,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
25589
25590 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
25591 {
25592 @@ -73032,7 +74129,7 @@ index 4cb5e51..e7e05d9 100644
25593 }
25594
25595 static int
25596 -@@ -1349,7 +1349,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
25597 +@@ -1350,7 +1350,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
25598 * bytes consumed in ring buffer from here.
25599 * Increment overrun to account for the lost events.
25600 */
25601 @@ -73041,7 +74138,7 @@ index 4cb5e51..e7e05d9 100644
25602 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
25603 }
25604
25605 -@@ -1905,7 +1905,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
25606 +@@ -1906,7 +1906,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
25607 * it is our responsibility to update
25608 * the counters.
25609 */
25610 @@ -73050,7 +74147,7 @@ index 4cb5e51..e7e05d9 100644
25611 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
25612
25613 /*
25614 -@@ -2055,7 +2055,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
25615 +@@ -2056,7 +2056,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
25616 if (tail == BUF_PAGE_SIZE)
25617 tail_page->real_end = 0;
25618
25619 @@ -73059,7 +74156,7 @@ index 4cb5e51..e7e05d9 100644
25620 return;
25621 }
25622
25623 -@@ -2090,7 +2090,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
25624 +@@ -2091,7 +2091,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
25625 rb_event_set_padding(event);
25626
25627 /* Set the write back to the previous setting */
25628 @@ -73068,7 +74165,7 @@ index 4cb5e51..e7e05d9 100644
25629 return;
25630 }
25631
25632 -@@ -2102,7 +2102,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
25633 +@@ -2103,7 +2103,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
25634
25635 /* Set write to end of buffer */
25636 length = (tail + length) - BUF_PAGE_SIZE;
25637 @@ -73077,7 +74174,7 @@ index 4cb5e51..e7e05d9 100644
25638 }
25639
25640 /*
25641 -@@ -2128,7 +2128,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
25642 +@@ -2129,7 +2129,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
25643 * about it.
25644 */
25645 if (unlikely(next_page == commit_page)) {
25646 @@ -73086,7 +74183,7 @@ index 4cb5e51..e7e05d9 100644
25647 goto out_reset;
25648 }
25649
25650 -@@ -2182,7 +2182,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
25651 +@@ -2185,7 +2185,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
25652 cpu_buffer->tail_page) &&
25653 (cpu_buffer->commit_page ==
25654 cpu_buffer->reader_page))) {
25655 @@ -73095,7 +74192,7 @@ index 4cb5e51..e7e05d9 100644
25656 goto out_reset;
25657 }
25658 }
25659 -@@ -2230,7 +2230,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
25660 +@@ -2233,7 +2233,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
25661 length += RB_LEN_TIME_EXTEND;
25662
25663 tail_page = cpu_buffer->tail_page;
25664 @@ -73104,7 +74201,7 @@ index 4cb5e51..e7e05d9 100644
25665
25666 /* set write to only the index of the write */
25667 write &= RB_WRITE_MASK;
25668 -@@ -2247,7 +2247,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
25669 +@@ -2250,7 +2250,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
25670 kmemcheck_annotate_bitfield(event, bitfield);
25671 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
25672
25673 @@ -73113,7 +74210,7 @@ index 4cb5e51..e7e05d9 100644
25674
25675 /*
25676 * If this is the first commit on the page, then update
25677 -@@ -2280,7 +2280,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
25678 +@@ -2283,7 +2283,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
25679
25680 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
25681 unsigned long write_mask =
25682 @@ -73122,7 +74219,7 @@ index 4cb5e51..e7e05d9 100644
25683 unsigned long event_length = rb_event_length(event);
25684 /*
25685 * This is on the tail page. It is possible that
25686 -@@ -2290,7 +2290,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
25687 +@@ -2293,7 +2293,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
25688 */
25689 old_index += write_mask;
25690 new_index += write_mask;
25691 @@ -73131,7 +74228,7 @@ index 4cb5e51..e7e05d9 100644
25692 if (index == old_index) {
25693 /* update counters */
25694 local_sub(event_length, &cpu_buffer->entries_bytes);
25695 -@@ -2629,7 +2629,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
25696 +@@ -2632,7 +2632,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
25697
25698 /* Do the likely case first */
25699 if (likely(bpage->page == (void *)addr)) {
25700 @@ -73140,7 +74237,7 @@ index 4cb5e51..e7e05d9 100644
25701 return;
25702 }
25703
25704 -@@ -2641,7 +2641,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
25705 +@@ -2644,7 +2644,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
25706 start = bpage;
25707 do {
25708 if (bpage->page == (void *)addr) {
25709 @@ -73149,7 +74246,7 @@ index 4cb5e51..e7e05d9 100644
25710 return;
25711 }
25712 rb_inc_page(cpu_buffer, &bpage);
25713 -@@ -2923,7 +2923,7 @@ static inline unsigned long
25714 +@@ -2926,7 +2926,7 @@ static inline unsigned long
25715 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
25716 {
25717 return local_read(&cpu_buffer->entries) -
25718 @@ -73158,7 +74255,7 @@ index 4cb5e51..e7e05d9 100644
25719 }
25720
25721 /**
25722 -@@ -3011,7 +3011,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
25723 +@@ -3015,7 +3015,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
25724 return 0;
25725
25726 cpu_buffer = buffer->buffers[cpu];
25727 @@ -73167,7 +74264,7 @@ index 4cb5e51..e7e05d9 100644
25728
25729 return ret;
25730 }
25731 -@@ -3032,7 +3032,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
25732 +@@ -3038,7 +3038,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
25733 return 0;
25734
25735 cpu_buffer = buffer->buffers[cpu];
25736 @@ -73176,7 +74273,7 @@ index 4cb5e51..e7e05d9 100644
25737
25738 return ret;
25739 }
25740 -@@ -3077,7 +3077,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
25741 +@@ -3105,7 +3105,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
25742 /* if you care about this being correct, lock the buffer */
25743 for_each_buffer_cpu(buffer, cpu) {
25744 cpu_buffer = buffer->buffers[cpu];
25745 @@ -73185,7 +74282,7 @@ index 4cb5e51..e7e05d9 100644
25746 }
25747
25748 return overruns;
25749 -@@ -3253,8 +3253,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
25750 +@@ -3281,8 +3281,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
25751 /*
25752 * Reset the reader page to size zero.
25753 */
25754 @@ -73196,7 +74293,7 @@ index 4cb5e51..e7e05d9 100644
25755 local_set(&cpu_buffer->reader_page->page->commit, 0);
25756 cpu_buffer->reader_page->real_end = 0;
25757
25758 -@@ -3288,7 +3288,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
25759 +@@ -3316,7 +3316,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
25760 * want to compare with the last_overrun.
25761 */
25762 smp_mb();
25763 @@ -73205,7 +74302,7 @@ index 4cb5e51..e7e05d9 100644
25764
25765 /*
25766 * Here's the tricky part.
25767 -@@ -3858,8 +3858,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
25768 +@@ -3886,8 +3886,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
25769
25770 cpu_buffer->head_page
25771 = list_entry(cpu_buffer->pages, struct buffer_page, list);
25772 @@ -73216,7 +74313,7 @@ index 4cb5e51..e7e05d9 100644
25773 local_set(&cpu_buffer->head_page->page->commit, 0);
25774
25775 cpu_buffer->head_page->read = 0;
25776 -@@ -3869,14 +3869,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
25777 +@@ -3897,14 +3897,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
25778
25779 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
25780 INIT_LIST_HEAD(&cpu_buffer->new_pages);
25781 @@ -73227,15 +74324,15 @@ index 4cb5e51..e7e05d9 100644
25782 local_set(&cpu_buffer->reader_page->page->commit, 0);
25783 cpu_buffer->reader_page->read = 0;
25784
25785 -- local_set(&cpu_buffer->commit_overrun, 0);
25786 -+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
25787 local_set(&cpu_buffer->entries_bytes, 0);
25788 - local_set(&cpu_buffer->overrun, 0);
25789 +- local_set(&cpu_buffer->commit_overrun, 0);
25790 + local_set_unchecked(&cpu_buffer->overrun, 0);
25791 ++ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
25792 + local_set(&cpu_buffer->dropped_events, 0);
25793 local_set(&cpu_buffer->entries, 0);
25794 local_set(&cpu_buffer->committing, 0);
25795 - local_set(&cpu_buffer->commits, 0);
25796 -@@ -4279,8 +4279,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
25797 +@@ -4308,8 +4308,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
25798 rb_init_page(bpage);
25799 bpage = reader->page;
25800 reader->page = *data_page;
25801 @@ -73247,10 +74344,10 @@ index 4cb5e51..e7e05d9 100644
25802 *data_page = bpage;
25803
25804 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
25805 -index 31e4f55..62da00f 100644
25806 +index 3c13e46..883d039 100644
25807 --- a/kernel/trace/trace.c
25808 +++ b/kernel/trace/trace.c
25809 -@@ -4436,10 +4436,9 @@ static const struct file_operations tracing_dyn_info_fops = {
25810 +@@ -4465,10 +4465,9 @@ static const struct file_operations tracing_dyn_info_fops = {
25811 };
25812 #endif
25813
25814 @@ -73262,7 +74359,7 @@ index 31e4f55..62da00f 100644
25815 static int once;
25816
25817 if (d_tracer)
25818 -@@ -4459,10 +4458,9 @@ struct dentry *tracing_init_dentry(void)
25819 +@@ -4488,10 +4487,9 @@ struct dentry *tracing_init_dentry(void)
25820 return d_tracer;
25821 }
25822
25823 @@ -73275,10 +74372,10 @@ index 31e4f55..62da00f 100644
25824 struct dentry *d_tracer;
25825
25826 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
25827 -index d608d09..bd3801f 100644
25828 +index 880073d..42db7c3 100644
25829 --- a/kernel/trace/trace_events.c
25830 +++ b/kernel/trace/trace_events.c
25831 -@@ -1320,10 +1320,6 @@ static LIST_HEAD(ftrace_module_file_list);
25832 +@@ -1330,10 +1330,6 @@ static LIST_HEAD(ftrace_module_file_list);
25833 struct ftrace_module_file_ops {
25834 struct list_head list;
25835 struct module *mod;
25836 @@ -73289,7 +74386,7 @@ index d608d09..bd3801f 100644
25837 };
25838
25839 static struct ftrace_module_file_ops *
25840 -@@ -1344,17 +1340,12 @@ trace_create_file_ops(struct module *mod)
25841 +@@ -1354,17 +1350,12 @@ trace_create_file_ops(struct module *mod)
25842
25843 file_ops->mod = mod;
25844
25845 @@ -73313,7 +74410,7 @@ index d608d09..bd3801f 100644
25846
25847 list_add(&file_ops->list, &ftrace_module_file_list);
25848
25849 -@@ -1378,8 +1369,8 @@ static void trace_module_add_events(struct module *mod)
25850 +@@ -1388,8 +1379,8 @@ static void trace_module_add_events(struct module *mod)
25851
25852 for_each_event(call, start, end) {
25853 __trace_add_event_call(*call, mod,
25854 @@ -73365,7 +74462,7 @@ index fd3c8aa..5f324a6 100644
25855 }
25856 entry = ring_buffer_event_data(event);
25857 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
25858 -index 123b189..1e9e2a6 100644
25859 +index 194d796..76edb8f 100644
25860 --- a/kernel/trace/trace_output.c
25861 +++ b/kernel/trace/trace_output.c
25862 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
25863 @@ -73377,7 +74474,7 @@ index 123b189..1e9e2a6 100644
25864 if (p) {
25865 s->len = p - s->buffer;
25866 return 1;
25867 -@@ -824,14 +824,16 @@ int register_ftrace_event(struct trace_event *event)
25868 +@@ -852,14 +852,16 @@ int register_ftrace_event(struct trace_event *event)
25869 goto out;
25870 }
25871
25872 @@ -73399,10 +74496,10 @@ index 123b189..1e9e2a6 100644
25873 key = event->type & (EVENT_HASHSIZE - 1);
25874
25875 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
25876 -index 0c1b1657..95337e9 100644
25877 +index 42ca822..cdcacc6 100644
25878 --- a/kernel/trace/trace_stack.c
25879 +++ b/kernel/trace/trace_stack.c
25880 -@@ -53,7 +53,7 @@ static inline void check_stack(void)
25881 +@@ -52,7 +52,7 @@ static inline void check_stack(void)
25882 return;
25883
25884 /* we do not handle interrupt stacks yet */
25885 @@ -73412,7 +74509,7 @@ index 0c1b1657..95337e9 100644
25886
25887 local_irq_save(flags);
25888 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
25889 -index 28e9d6c9..50381bd 100644
25890 +index 67604e5..3ebb003 100644
25891 --- a/lib/Kconfig.debug
25892 +++ b/lib/Kconfig.debug
25893 @@ -1278,6 +1278,7 @@ config LATENCYTOP
25894 @@ -73442,10 +74539,10 @@ index 28e9d6c9..50381bd 100644
25895 This option lets you use the FireWire bus for remote debugging
25896 with help of the firewire-ohci driver. It enables unfiltered
25897 diff --git a/lib/Makefile b/lib/Makefile
25898 -index a08b791..a3ff1eb 100644
25899 +index 02ed6c0..bd243da 100644
25900 --- a/lib/Makefile
25901 +++ b/lib/Makefile
25902 -@@ -46,7 +46,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
25903 +@@ -47,7 +47,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
25904
25905 obj-$(CONFIG_BTREE) += btree.o
25906 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
25907 @@ -73455,7 +74552,7 @@ index a08b791..a3ff1eb 100644
25908
25909 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
25910 diff --git a/lib/bitmap.c b/lib/bitmap.c
25911 -index 06fdfa1..97c5c7d 100644
25912 +index 06f7e4f..f3cf2b0 100644
25913 --- a/lib/bitmap.c
25914 +++ b/lib/bitmap.c
25915 @@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
25916 @@ -73543,10 +74640,10 @@ index 80b9c76..9e32279 100644
25917 EXPORT_SYMBOL(devm_ioport_unmap);
25918
25919 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
25920 -index d84beb9..da44791 100644
25921 +index 5e396ac..58d5de1 100644
25922 --- a/lib/dma-debug.c
25923 +++ b/lib/dma-debug.c
25924 -@@ -754,7 +754,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
25925 +@@ -768,7 +768,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
25926
25927 void dma_debug_add_bus(struct bus_type *bus)
25928 {
25929 @@ -73555,7 +74652,7 @@ index d84beb9..da44791 100644
25930
25931 if (global_disable)
25932 return;
25933 -@@ -919,7 +919,7 @@ out:
25934 +@@ -942,7 +942,7 @@ out:
25935
25936 static void check_for_stack(struct device *dev, void *addr)
25937 {
25938 @@ -73795,7 +74892,7 @@ index a28df52..3d55877 100644
25939 unsigned long c;
25940
25941 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
25942 -index 39c99fe..18f060b 100644
25943 +index fab33a9..3b5fe68 100644
25944 --- a/lib/vsprintf.c
25945 +++ b/lib/vsprintf.c
25946 @@ -16,6 +16,9 @@
25947 @@ -73808,7 +74905,7 @@ index 39c99fe..18f060b 100644
25948 #include <stdarg.h>
25949 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
25950 #include <linux/types.h>
25951 -@@ -533,7 +536,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
25952 +@@ -541,7 +544,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
25953 char sym[KSYM_SYMBOL_LEN];
25954 if (ext == 'B')
25955 sprint_backtrace(sym, value);
25956 @@ -73817,7 +74914,7 @@ index 39c99fe..18f060b 100644
25957 sprint_symbol(sym, value);
25958 else
25959 sprint_symbol_no_offset(sym, value);
25960 -@@ -966,7 +969,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
25961 +@@ -974,7 +977,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
25962 return number(buf, end, *(const netdev_features_t *)addr, spec);
25963 }
25964
25965 @@ -73829,7 +74926,7 @@ index 39c99fe..18f060b 100644
25966
25967 /*
25968 * Show a '%p' thing. A kernel extension is that the '%p' is followed
25969 -@@ -980,6 +987,8 @@ int kptr_restrict __read_mostly;
25970 +@@ -988,6 +995,8 @@ int kptr_restrict __read_mostly;
25971 * - 'S' For symbolic direct pointers with offset
25972 * - 's' For symbolic direct pointers without offset
25973 * - 'B' For backtraced symbolic direct pointers with offset
25974 @@ -73838,7 +74935,7 @@ index 39c99fe..18f060b 100644
25975 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
25976 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
25977 * - 'M' For a 6-byte MAC address, it prints the address in the
25978 -@@ -1035,12 +1044,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
25979 +@@ -1043,12 +1052,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
25980
25981 if (!ptr && *fmt != 'K') {
25982 /*
25983 @@ -73853,7 +74950,7 @@ index 39c99fe..18f060b 100644
25984 }
25985
25986 switch (*fmt) {
25987 -@@ -1050,6 +1059,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
25988 +@@ -1058,6 +1067,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
25989 /* Fallthrough */
25990 case 'S':
25991 case 's':
25992 @@ -73867,7 +74964,7 @@ index 39c99fe..18f060b 100644
25993 case 'B':
25994 return symbol_string(buf, end, ptr, spec, *fmt);
25995 case 'R':
25996 -@@ -1090,6 +1106,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
25997 +@@ -1098,6 +1114,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
25998 va_end(va);
25999 return buf;
26000 }
26001 @@ -73876,7 +74973,7 @@ index 39c99fe..18f060b 100644
26002 case 'K':
26003 /*
26004 * %pK cannot be used in IRQ context because its test
26005 -@@ -1113,6 +1131,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
26006 +@@ -1121,6 +1139,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
26007 }
26008 break;
26009 }
26010 @@ -73898,7 +74995,7 @@ index 39c99fe..18f060b 100644
26011 spec.flags |= SMALL;
26012 if (spec.field_width == -1) {
26013 spec.field_width = default_width;
26014 -@@ -1831,11 +1864,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
26015 +@@ -1842,11 +1875,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
26016 typeof(type) value; \
26017 if (sizeof(type) == 8) { \
26018 args = PTR_ALIGN(args, sizeof(u32)); \
26019 @@ -73913,7 +75010,7 @@ index 39c99fe..18f060b 100644
26020 } \
26021 args += sizeof(type); \
26022 value; \
26023 -@@ -1898,7 +1931,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
26024 +@@ -1909,7 +1942,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
26025 case FORMAT_TYPE_STR: {
26026 const char *str_arg = args;
26027 args += strlen(str_arg) + 1;
26028 @@ -73930,10 +75027,10 @@ index 0000000..7cd6065
26029 @@ -0,0 +1 @@
26030 +-grsec
26031 diff --git a/mm/Kconfig b/mm/Kconfig
26032 -index a3f8ddd..f31e92e 100644
26033 +index 278e3ab..87c384d 100644
26034 --- a/mm/Kconfig
26035 +++ b/mm/Kconfig
26036 -@@ -252,10 +252,10 @@ config KSM
26037 +@@ -286,10 +286,10 @@ config KSM
26038 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
26039
26040 config DEFAULT_MMAP_MIN_ADDR
26041 @@ -73947,7 +75044,7 @@ index a3f8ddd..f31e92e 100644
26042 This is the portion of low virtual memory which should be protected
26043 from userspace allocation. Keeping a user from writing to low pages
26044 can help reduce the impact of kernel NULL pointer bugs.
26045 -@@ -286,7 +286,7 @@ config MEMORY_FAILURE
26046 +@@ -320,7 +320,7 @@ config MEMORY_FAILURE
26047
26048 config HWPOISON_INJECT
26049 tristate "HWPoison pages injector"
26050 @@ -73994,22 +75091,21 @@ index a0aaf0e..20325c3 100644
26051 * Make sure the vma is shared, that it supports prefaulting,
26052 * and that the remapped range is valid and fully within
26053 diff --git a/mm/highmem.c b/mm/highmem.c
26054 -index 09fc744..3936897 100644
26055 +index b32b70c..e512eb0 100644
26056 --- a/mm/highmem.c
26057 +++ b/mm/highmem.c
26058 -@@ -138,9 +138,10 @@ static void flush_all_zero_pkmaps(void)
26059 +@@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
26060 * So no dangers, even with speculative execution.
26061 */
26062 page = pte_page(pkmap_page_table[i]);
26063 + pax_open_kernel();
26064 - pte_clear(&init_mm, (unsigned long)page_address(page),
26065 - &pkmap_page_table[i]);
26066 + pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
26067 -
26068 + pax_close_kernel();
26069 set_page_address(page, NULL);
26070 need_flush = 1;
26071 }
26072 -@@ -199,9 +200,11 @@ start:
26073 +@@ -198,9 +199,11 @@ start:
26074 }
26075 }
26076 vaddr = PKMAP_ADDR(last_pkmap_nr);
26077 @@ -74022,24 +75118,11 @@ index 09fc744..3936897 100644
26078 pkmap_count[last_pkmap_nr] = 1;
26079 set_page_address(page, (void *)vaddr);
26080
26081 -diff --git a/mm/huge_memory.c b/mm/huge_memory.c
26082 -index 40f17c3..c1cc011 100644
26083 ---- a/mm/huge_memory.c
26084 -+++ b/mm/huge_memory.c
26085 -@@ -710,7 +710,7 @@ out:
26086 - * run pte_offset_map on the pmd, if an huge pmd could
26087 - * materialize from under us from a different thread.
26088 - */
26089 -- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
26090 -+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
26091 - return VM_FAULT_OOM;
26092 - /* if an huge pmd materialized from under us just retry later */
26093 - if (unlikely(pmd_trans_huge(*pmd)))
26094 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
26095 -index b969ed4..10e3e37 100644
26096 +index 546db81..01d5c53 100644
26097 --- a/mm/hugetlb.c
26098 +++ b/mm/hugetlb.c
26099 -@@ -2509,6 +2509,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
26100 +@@ -2511,6 +2511,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
26101 return 1;
26102 }
26103
26104 @@ -74067,7 +75150,7 @@ index b969ed4..10e3e37 100644
26105 /*
26106 * Hugetlb_cow() should be called with page lock of the original hugepage held.
26107 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
26108 -@@ -2627,6 +2648,11 @@ retry_avoidcopy:
26109 +@@ -2629,6 +2650,11 @@ retry_avoidcopy:
26110 make_huge_pte(vma, new_page, 1));
26111 page_remove_rmap(old_page);
26112 hugepage_add_new_anon_rmap(new_page, vma, address);
26113 @@ -74079,7 +75162,7 @@ index b969ed4..10e3e37 100644
26114 /* Make the old page be freed below */
26115 new_page = old_page;
26116 }
26117 -@@ -2786,6 +2812,10 @@ retry:
26118 +@@ -2788,6 +2814,10 @@ retry:
26119 && (vma->vm_flags & VM_SHARED)));
26120 set_huge_pte_at(mm, address, ptep, new_pte);
26121
26122 @@ -74090,7 +75173,7 @@ index b969ed4..10e3e37 100644
26123 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
26124 /* Optimization, do the COW without a second fault */
26125 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
26126 -@@ -2815,6 +2845,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
26127 +@@ -2817,6 +2847,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
26128 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
26129 struct hstate *h = hstate_vma(vma);
26130
26131 @@ -74101,7 +75184,7 @@ index b969ed4..10e3e37 100644
26132 address &= huge_page_mask(h);
26133
26134 ptep = huge_pte_offset(mm, address);
26135 -@@ -2828,6 +2862,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
26136 +@@ -2830,6 +2864,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
26137 VM_FAULT_SET_HINDEX(hstate_index(h));
26138 }
26139
26140 @@ -74129,10 +75212,10 @@ index b969ed4..10e3e37 100644
26141 if (!ptep)
26142 return VM_FAULT_OOM;
26143 diff --git a/mm/internal.h b/mm/internal.h
26144 -index 3c5197d..08d0065 100644
26145 +index 9ba2110..eaf0674 100644
26146 --- a/mm/internal.h
26147 +++ b/mm/internal.h
26148 -@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
26149 +@@ -100,6 +100,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
26150 * in mm/page_alloc.c
26151 */
26152 extern void __free_pages_bootmem(struct page *page, unsigned int order);
26153 @@ -74141,7 +75224,7 @@ index 3c5197d..08d0065 100644
26154 #ifdef CONFIG_MEMORY_FAILURE
26155 extern bool is_free_buddy_page(struct page *page);
26156 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
26157 -index a217cc5..44b2b35 100644
26158 +index 752a705..6c3102e 100644
26159 --- a/mm/kmemleak.c
26160 +++ b/mm/kmemleak.c
26161 @@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
26162 @@ -74153,7 +75236,7 @@ index a217cc5..44b2b35 100644
26163 }
26164 }
26165
26166 -@@ -1852,7 +1852,7 @@ static int __init kmemleak_late_init(void)
26167 +@@ -1853,7 +1853,7 @@ static int __init kmemleak_late_init(void)
26168 return -ENOMEM;
26169 }
26170
26171 @@ -74265,7 +75348,7 @@ index 03dfa5c..b032917 100644
26172 if (end == start)
26173 goto out;
26174 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
26175 -index 8b20278..05dac18 100644
26176 +index c6e4dd3..fdb2ca6 100644
26177 --- a/mm/memory-failure.c
26178 +++ b/mm/memory-failure.c
26179 @@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
26180 @@ -74340,7 +75423,7 @@ index 8b20278..05dac18 100644
26181 &mce_bad_pages);
26182 set_page_hwpoison_huge_page(hpage);
26183 dequeue_hwpoisoned_huge_page(hpage);
26184 -@@ -1582,7 +1582,7 @@ int soft_offline_page(struct page *page, int flags)
26185 +@@ -1583,7 +1583,7 @@ int soft_offline_page(struct page *page, int flags)
26186 return ret;
26187
26188 done:
26189 @@ -74350,10 +75433,10 @@ index 8b20278..05dac18 100644
26190 /* keep elevated page count for bad page */
26191 return ret;
26192 diff --git a/mm/memory.c b/mm/memory.c
26193 -index f2973b2..fd020a7 100644
26194 +index bb1369f..efb96b5 100644
26195 --- a/mm/memory.c
26196 +++ b/mm/memory.c
26197 -@@ -431,6 +431,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
26198 +@@ -433,6 +433,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
26199 free_pte_range(tlb, pmd, addr);
26200 } while (pmd++, addr = next, addr != end);
26201
26202 @@ -74361,7 +75444,7 @@ index f2973b2..fd020a7 100644
26203 start &= PUD_MASK;
26204 if (start < floor)
26205 return;
26206 -@@ -445,6 +446,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
26207 +@@ -447,6 +448,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
26208 pmd = pmd_offset(pud, start);
26209 pud_clear(pud);
26210 pmd_free_tlb(tlb, pmd, start);
26211 @@ -74370,7 +75453,7 @@ index f2973b2..fd020a7 100644
26212 }
26213
26214 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
26215 -@@ -464,6 +467,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
26216 +@@ -466,6 +469,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
26217 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
26218 } while (pud++, addr = next, addr != end);
26219
26220 @@ -74378,7 +75461,7 @@ index f2973b2..fd020a7 100644
26221 start &= PGDIR_MASK;
26222 if (start < floor)
26223 return;
26224 -@@ -478,6 +482,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
26225 +@@ -480,6 +484,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
26226 pud = pud_offset(pgd, start);
26227 pgd_clear(pgd);
26228 pud_free_tlb(tlb, pud, start);
26229 @@ -74387,7 +75470,7 @@ index f2973b2..fd020a7 100644
26230 }
26231
26232 /*
26233 -@@ -1626,12 +1632,6 @@ no_page_table:
26234 +@@ -1618,12 +1624,6 @@ no_page_table:
26235 return page;
26236 }
26237
26238 @@ -74400,8 +75483,8 @@ index f2973b2..fd020a7 100644
26239 /**
26240 * __get_user_pages() - pin user pages in memory
26241 * @tsk: task_struct of target task
26242 -@@ -1704,10 +1704,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
26243 - (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
26244 +@@ -1709,10 +1709,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
26245 +
26246 i = 0;
26247
26248 - do {
26249 @@ -74413,7 +75496,7 @@ index f2973b2..fd020a7 100644
26250 if (!vma && in_gate_area(mm, start)) {
26251 unsigned long pg = start & PAGE_MASK;
26252 pgd_t *pgd;
26253 -@@ -1755,7 +1755,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
26254 +@@ -1760,7 +1760,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
26255 goto next_page;
26256 }
26257
26258 @@ -74422,7 +75505,7 @@ index f2973b2..fd020a7 100644
26259 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
26260 !(vm_flags & vma->vm_flags))
26261 return i ? : -EFAULT;
26262 -@@ -1782,11 +1782,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
26263 +@@ -1787,11 +1787,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
26264 int ret;
26265 unsigned int fault_flags = 0;
26266
26267 @@ -74434,7 +75517,7 @@ index f2973b2..fd020a7 100644
26268 if (foll_flags & FOLL_WRITE)
26269 fault_flags |= FAULT_FLAG_WRITE;
26270 if (nonblocking)
26271 -@@ -1860,7 +1855,7 @@ next_page:
26272 +@@ -1865,7 +1860,7 @@ next_page:
26273 start += PAGE_SIZE;
26274 nr_pages--;
26275 } while (nr_pages && start < vma->vm_end);
26276 @@ -74443,7 +75526,7 @@ index f2973b2..fd020a7 100644
26277 return i;
26278 }
26279 EXPORT_SYMBOL(__get_user_pages);
26280 -@@ -2067,6 +2062,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
26281 +@@ -2072,6 +2067,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
26282 page_add_file_rmap(page);
26283 set_pte_at(mm, addr, pte, mk_pte(page, prot));
26284
26285 @@ -74454,7 +75537,7 @@ index f2973b2..fd020a7 100644
26286 retval = 0;
26287 pte_unmap_unlock(pte, ptl);
26288 return retval;
26289 -@@ -2111,9 +2110,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
26290 +@@ -2116,9 +2115,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
26291 if (!page_count(page))
26292 return -EINVAL;
26293 if (!(vma->vm_flags & VM_MIXEDMAP)) {
26294 @@ -74476,7 +75559,7 @@ index f2973b2..fd020a7 100644
26295 }
26296 return insert_page(vma, addr, page, vma->vm_page_prot);
26297 }
26298 -@@ -2196,6 +2207,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
26299 +@@ -2201,6 +2212,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
26300 unsigned long pfn)
26301 {
26302 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
26303 @@ -74484,7 +75567,7 @@ index f2973b2..fd020a7 100644
26304
26305 if (addr < vma->vm_start || addr >= vma->vm_end)
26306 return -EFAULT;
26307 -@@ -2396,7 +2408,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
26308 +@@ -2401,7 +2413,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
26309
26310 BUG_ON(pud_huge(*pud));
26311
26312 @@ -74495,7 +75578,7 @@ index f2973b2..fd020a7 100644
26313 if (!pmd)
26314 return -ENOMEM;
26315 do {
26316 -@@ -2416,7 +2430,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
26317 +@@ -2421,7 +2435,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
26318 unsigned long next;
26319 int err;
26320
26321 @@ -74506,7 +75589,7 @@ index f2973b2..fd020a7 100644
26322 if (!pud)
26323 return -ENOMEM;
26324 do {
26325 -@@ -2504,6 +2520,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
26326 +@@ -2509,6 +2525,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
26327 copy_user_highpage(dst, src, va, vma);
26328 }
26329
26330 @@ -74693,7 +75776,7 @@ index f2973b2..fd020a7 100644
26331 /*
26332 * This routine handles present pages, when users try to write
26333 * to a shared page. It is done by copying the page to a new address
26334 -@@ -2720,6 +2916,12 @@ gotten:
26335 +@@ -2725,6 +2921,12 @@ gotten:
26336 */
26337 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
26338 if (likely(pte_same(*page_table, orig_pte))) {
26339 @@ -74706,7 +75789,7 @@ index f2973b2..fd020a7 100644
26340 if (old_page) {
26341 if (!PageAnon(old_page)) {
26342 dec_mm_counter_fast(mm, MM_FILEPAGES);
26343 -@@ -2771,6 +2973,10 @@ gotten:
26344 +@@ -2776,6 +2978,10 @@ gotten:
26345 page_remove_rmap(old_page);
26346 }
26347
26348 @@ -74868,7 +75951,7 @@ index f2973b2..fd020a7 100644
26349 } else {
26350 if (cow_page)
26351 mem_cgroup_uncharge_page(cow_page);
26352 -@@ -3497,6 +3700,12 @@ int handle_pte_fault(struct mm_struct *mm,
26353 +@@ -3664,6 +3867,12 @@ int handle_pte_fault(struct mm_struct *mm,
26354 if (flags & FAULT_FLAG_WRITE)
26355 flush_tlb_fix_spurious_fault(vma, address);
26356 }
26357 @@ -74881,7 +75964,7 @@ index f2973b2..fd020a7 100644
26358 unlock:
26359 pte_unmap_unlock(pte, ptl);
26360 return 0;
26361 -@@ -3513,6 +3722,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
26362 +@@ -3680,6 +3889,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
26363 pmd_t *pmd;
26364 pte_t *pte;
26365
26366 @@ -74892,7 +75975,7 @@ index f2973b2..fd020a7 100644
26367 __set_current_state(TASK_RUNNING);
26368
26369 count_vm_event(PGFAULT);
26370 -@@ -3524,6 +3737,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
26371 +@@ -3691,6 +3904,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
26372 if (unlikely(is_vm_hugetlb_page(vma)))
26373 return hugetlb_fault(mm, vma, address, flags);
26374
26375 @@ -74927,16 +76010,7 @@ index f2973b2..fd020a7 100644
26376 retry:
26377 pgd = pgd_offset(mm, address);
26378 pud = pud_alloc(mm, pgd, address);
26379 -@@ -3565,7 +3806,7 @@ retry:
26380 - * run pte_offset_map on the pmd, if an huge pmd could
26381 - * materialize from under us from a different thread.
26382 - */
26383 -- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
26384 -+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
26385 - return VM_FAULT_OOM;
26386 - /* if an huge pmd materialized from under us just retry later */
26387 - if (unlikely(pmd_trans_huge(*pmd)))
26388 -@@ -3602,6 +3843,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
26389 +@@ -3789,6 +4030,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
26390 spin_unlock(&mm->page_table_lock);
26391 return 0;
26392 }
26393 @@ -74960,7 +76034,7 @@ index f2973b2..fd020a7 100644
26394 #endif /* __PAGETABLE_PUD_FOLDED */
26395
26396 #ifndef __PAGETABLE_PMD_FOLDED
26397 -@@ -3632,6 +3890,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
26398 +@@ -3819,6 +4077,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
26399 spin_unlock(&mm->page_table_lock);
26400 return 0;
26401 }
26402 @@ -74991,7 +76065,7 @@ index f2973b2..fd020a7 100644
26403 #endif /* __PAGETABLE_PMD_FOLDED */
26404
26405 int make_pages_present(unsigned long addr, unsigned long end)
26406 -@@ -3669,7 +3951,7 @@ static int __init gate_vma_init(void)
26407 +@@ -3856,7 +4138,7 @@ static int __init gate_vma_init(void)
26408 gate_vma.vm_start = FIXADDR_USER_START;
26409 gate_vma.vm_end = FIXADDR_USER_END;
26410 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
26411 @@ -75001,10 +76075,10 @@ index f2973b2..fd020a7 100644
26412 return 0;
26413 }
26414 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
26415 -index 002c281..9429765 100644
26416 +index e2df1c1..1e31d57 100644
26417 --- a/mm/mempolicy.c
26418 +++ b/mm/mempolicy.c
26419 -@@ -655,6 +655,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
26420 +@@ -721,6 +721,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
26421 unsigned long vmstart;
26422 unsigned long vmend;
26423
26424 @@ -75015,7 +76089,7 @@ index 002c281..9429765 100644
26425 vma = find_vma(mm, start);
26426 if (!vma || vma->vm_start > start)
26427 return -EFAULT;
26428 -@@ -691,9 +695,20 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
26429 +@@ -757,9 +761,20 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
26430 if (err)
26431 goto out;
26432 }
26433 @@ -75036,7 +76110,7 @@ index 002c281..9429765 100644
26434 }
26435
26436 out:
26437 -@@ -1150,6 +1165,17 @@ static long do_mbind(unsigned long start, unsigned long len,
26438 +@@ -1216,6 +1231,17 @@ static long do_mbind(unsigned long start, unsigned long len,
26439
26440 if (end < start)
26441 return -EINVAL;
26442 @@ -75054,7 +76128,7 @@ index 002c281..9429765 100644
26443 if (end == start)
26444 return 0;
26445
26446 -@@ -1373,8 +1399,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
26447 +@@ -1445,8 +1471,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
26448 */
26449 tcred = __task_cred(task);
26450 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
26451 @@ -75064,7 +76138,7 @@ index 002c281..9429765 100644
26452 rcu_read_unlock();
26453 err = -EPERM;
26454 goto out_put;
26455 -@@ -1405,6 +1430,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
26456 +@@ -1477,6 +1502,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
26457 goto out;
26458 }
26459
26460 @@ -75081,10 +76155,10 @@ index 002c281..9429765 100644
26461 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
26462
26463 diff --git a/mm/migrate.c b/mm/migrate.c
26464 -index 346d32d..d7adff2 100644
26465 +index 2fd8b4a..d70358f 100644
26466 --- a/mm/migrate.c
26467 +++ b/mm/migrate.c
26468 -@@ -1352,8 +1352,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
26469 +@@ -1401,8 +1401,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
26470 */
26471 tcred = __task_cred(task);
26472 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
26473 @@ -75167,18 +76241,18 @@ index c9bd528..da8d069 100644
26474 capable(CAP_IPC_LOCK))
26475 ret = do_mlockall(flags);
26476 diff --git a/mm/mmap.c b/mm/mmap.c
26477 -index 9a796c4..e2c9724 100644
26478 +index d1e4124..32a6988 100644
26479 --- a/mm/mmap.c
26480 +++ b/mm/mmap.c
26481 -@@ -31,6 +31,7 @@
26482 - #include <linux/audit.h>
26483 +@@ -32,6 +32,7 @@
26484 #include <linux/khugepaged.h>
26485 #include <linux/uprobes.h>
26486 + #include <linux/rbtree_augmented.h>
26487 +#include <linux/random.h>
26488
26489 #include <asm/uaccess.h>
26490 #include <asm/cacheflush.h>
26491 -@@ -47,6 +48,16 @@
26492 +@@ -48,6 +49,16 @@
26493 #define arch_rebalance_pgtables(addr, len) (addr)
26494 #endif
26495
26496 @@ -75195,7 +76269,7 @@ index 9a796c4..e2c9724 100644
26497 static void unmap_region(struct mm_struct *mm,
26498 struct vm_area_struct *vma, struct vm_area_struct *prev,
26499 unsigned long start, unsigned long end);
26500 -@@ -66,22 +77,32 @@ static void unmap_region(struct mm_struct *mm,
26501 +@@ -67,22 +78,32 @@ static void unmap_region(struct mm_struct *mm,
26502 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
26503 *
26504 */
26505 @@ -75231,7 +76305,7 @@ index 9a796c4..e2c9724 100644
26506 /*
26507 * Make sure vm_committed_as in one cacheline and not cacheline shared with
26508 * other variables. It can be updated by several CPUs frequently.
26509 -@@ -223,6 +244,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
26510 +@@ -238,6 +259,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
26511 struct vm_area_struct *next = vma->vm_next;
26512
26513 might_sleep();
26514 @@ -75239,7 +76313,7 @@ index 9a796c4..e2c9724 100644
26515 if (vma->vm_ops && vma->vm_ops->close)
26516 vma->vm_ops->close(vma);
26517 if (vma->vm_file)
26518 -@@ -266,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
26519 +@@ -281,6 +303,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
26520 * not page aligned -Ram Gupta
26521 */
26522 rlim = rlimit(RLIMIT_DATA);
26523 @@ -75247,7 +76321,7 @@ index 9a796c4..e2c9724 100644
26524 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
26525 (mm->end_data - mm->start_data) > rlim)
26526 goto out;
26527 -@@ -736,6 +759,12 @@ static int
26528 +@@ -888,6 +911,12 @@ static int
26529 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
26530 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
26531 {
26532 @@ -75260,7 +76334,7 @@ index 9a796c4..e2c9724 100644
26533 if (is_mergeable_vma(vma, file, vm_flags) &&
26534 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
26535 if (vma->vm_pgoff == vm_pgoff)
26536 -@@ -755,6 +784,12 @@ static int
26537 +@@ -907,6 +936,12 @@ static int
26538 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
26539 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
26540 {
26541 @@ -75273,7 +76347,7 @@ index 9a796c4..e2c9724 100644
26542 if (is_mergeable_vma(vma, file, vm_flags) &&
26543 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
26544 pgoff_t vm_pglen;
26545 -@@ -797,13 +832,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
26546 +@@ -949,13 +984,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
26547 struct vm_area_struct *vma_merge(struct mm_struct *mm,
26548 struct vm_area_struct *prev, unsigned long addr,
26549 unsigned long end, unsigned long vm_flags,
26550 @@ -75295,7 +76369,7 @@ index 9a796c4..e2c9724 100644
26551 /*
26552 * We later require that vma->vm_flags == vm_flags,
26553 * so this tests vma->vm_flags & VM_SPECIAL, too.
26554 -@@ -819,6 +861,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
26555 +@@ -971,6 +1013,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
26556 if (next && next->vm_end == end) /* cases 6, 7, 8 */
26557 next = next->vm_next;
26558
26559 @@ -75311,7 +76385,7 @@ index 9a796c4..e2c9724 100644
26560 /*
26561 * Can it merge with the predecessor?
26562 */
26563 -@@ -838,9 +889,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
26564 +@@ -990,9 +1041,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
26565 /* cases 1, 6 */
26566 err = vma_adjust(prev, prev->vm_start,
26567 next->vm_end, prev->vm_pgoff, NULL);
26568 @@ -75337,7 +76411,7 @@ index 9a796c4..e2c9724 100644
26569 if (err)
26570 return NULL;
26571 khugepaged_enter_vma_merge(prev);
26572 -@@ -854,12 +920,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
26573 +@@ -1006,12 +1072,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
26574 mpol_equal(policy, vma_policy(next)) &&
26575 can_vma_merge_before(next, vm_flags,
26576 anon_vma, file, pgoff+pglen)) {
26577 @@ -75367,7 +76441,7 @@ index 9a796c4..e2c9724 100644
26578 if (err)
26579 return NULL;
26580 khugepaged_enter_vma_merge(area);
26581 -@@ -968,16 +1049,13 @@ none:
26582 +@@ -1120,16 +1201,13 @@ none:
26583 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
26584 struct file *file, long pages)
26585 {
26586 @@ -75385,7 +76459,7 @@ index 9a796c4..e2c9724 100644
26587 mm->stack_vm += pages;
26588 }
26589 #endif /* CONFIG_PROC_FS */
26590 -@@ -1013,7 +1091,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
26591 +@@ -1165,7 +1243,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
26592 * (the exception is when the underlying filesystem is noexec
26593 * mounted, in which case we dont add PROT_EXEC.)
26594 */
26595 @@ -75394,7 +76468,7 @@ index 9a796c4..e2c9724 100644
26596 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
26597 prot |= PROT_EXEC;
26598
26599 -@@ -1039,7 +1117,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
26600 +@@ -1191,7 +1269,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
26601 /* Obtain the address to map to. we verify (or select) it and ensure
26602 * that it represents a valid section of the address space.
26603 */
26604 @@ -75403,7 +76477,7 @@ index 9a796c4..e2c9724 100644
26605 if (addr & ~PAGE_MASK)
26606 return addr;
26607
26608 -@@ -1050,6 +1128,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
26609 +@@ -1202,6 +1280,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
26610 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
26611 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
26612
26613 @@ -75440,7 +76514,7 @@ index 9a796c4..e2c9724 100644
26614 if (flags & MAP_LOCKED)
26615 if (!can_do_mlock())
26616 return -EPERM;
26617 -@@ -1061,6 +1169,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
26618 +@@ -1213,6 +1321,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
26619 locked += mm->locked_vm;
26620 lock_limit = rlimit(RLIMIT_MEMLOCK);
26621 lock_limit >>= PAGE_SHIFT;
26622 @@ -75448,7 +76522,7 @@ index 9a796c4..e2c9724 100644
26623 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
26624 return -EAGAIN;
26625 }
26626 -@@ -1127,6 +1236,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
26627 +@@ -1279,6 +1388,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
26628 }
26629 }
26630
26631 @@ -75458,7 +76532,7 @@ index 9a796c4..e2c9724 100644
26632 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
26633 }
26634
26635 -@@ -1203,7 +1315,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
26636 +@@ -1356,7 +1468,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
26637 vm_flags_t vm_flags = vma->vm_flags;
26638
26639 /* If it was private or non-writable, the write bit is already clear */
26640 @@ -75467,7 +76541,7 @@ index 9a796c4..e2c9724 100644
26641 return 0;
26642
26643 /* The backer wishes to know when pages are first written to? */
26644 -@@ -1252,13 +1364,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
26645 +@@ -1405,13 +1517,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
26646 unsigned long charged = 0;
26647 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
26648
26649 @@ -75492,7 +76566,7 @@ index 9a796c4..e2c9724 100644
26650 }
26651
26652 /* Check against address space limit. */
26653 -@@ -1307,6 +1428,16 @@ munmap_back:
26654 +@@ -1460,6 +1581,16 @@ munmap_back:
26655 goto unacct_error;
26656 }
26657
26658 @@ -75509,7 +76583,7 @@ index 9a796c4..e2c9724 100644
26659 vma->vm_mm = mm;
26660 vma->vm_start = addr;
26661 vma->vm_end = addr + len;
26662 -@@ -1331,6 +1462,13 @@ munmap_back:
26663 +@@ -1484,6 +1615,13 @@ munmap_back:
26664 if (error)
26665 goto unmap_and_free_vma;
26666
26667 @@ -75523,7 +76597,7 @@ index 9a796c4..e2c9724 100644
26668 /* Can addr have changed??
26669 *
26670 * Answer: Yes, several device drivers can do it in their
26671 -@@ -1365,6 +1503,11 @@ munmap_back:
26672 +@@ -1522,6 +1660,11 @@ munmap_back:
26673 vma_link(mm, vma, prev, rb_link, rb_parent);
26674 file = vma->vm_file;
26675
26676 @@ -75535,7 +76609,7 @@ index 9a796c4..e2c9724 100644
26677 /* Once vma denies write, undo our temporary denial count */
26678 if (correct_wcount)
26679 atomic_inc(&inode->i_writecount);
26680 -@@ -1372,6 +1515,7 @@ out:
26681 +@@ -1529,6 +1672,7 @@ out:
26682 perf_event_mmap(vma);
26683
26684 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
26685 @@ -75543,7 +76617,7 @@ index 9a796c4..e2c9724 100644
26686 if (vm_flags & VM_LOCKED) {
26687 if (!mlock_vma_pages_range(vma, addr, addr + len))
26688 mm->locked_vm += (len >> PAGE_SHIFT);
26689 -@@ -1393,6 +1537,12 @@ unmap_and_free_vma:
26690 +@@ -1550,6 +1694,12 @@ unmap_and_free_vma:
26691 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
26692 charged = 0;
26693 free_vma:
26694 @@ -75556,7 +76630,7 @@ index 9a796c4..e2c9724 100644
26695 kmem_cache_free(vm_area_cachep, vma);
26696 unacct_error:
26697 if (charged)
26698 -@@ -1400,6 +1550,62 @@ unacct_error:
26699 +@@ -1557,6 +1707,62 @@ unacct_error:
26700 return error;
26701 }
26702
26703 @@ -75616,18 +76690,18 @@ index 9a796c4..e2c9724 100644
26704 + return -ENOMEM;
26705 +}
26706 +
26707 - /* Get an address range which is currently unmapped.
26708 - * For shmat() with addr=0.
26709 - *
26710 -@@ -1419,6 +1625,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
26711 + unsigned long unmapped_area(struct vm_unmapped_area_info *info)
26712 + {
26713 + /*
26714 +@@ -1776,6 +1982,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
26715 struct mm_struct *mm = current->mm;
26716 struct vm_area_struct *vma;
26717 - unsigned long start_addr;
26718 + struct vm_unmapped_area_info info;
26719 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
26720
26721 if (len > TASK_SIZE)
26722 return -ENOMEM;
26723 -@@ -1426,18 +1633,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
26724 +@@ -1783,17 +1990,26 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
26725 if (flags & MAP_FIXED)
26726 return addr;
26727
26728 @@ -75637,62 +76711,26 @@ index 9a796c4..e2c9724 100644
26729 +
26730 if (addr) {
26731 addr = PAGE_ALIGN(addr);
26732 -- vma = find_vma(mm, addr);
26733 + vma = find_vma(mm, addr);
26734 - if (TASK_SIZE - len >= addr &&
26735 - (!vma || addr + len <= vma->vm_start))
26736 -- return addr;
26737 -+ if (TASK_SIZE - len >= addr) {
26738 -+ vma = find_vma(mm, addr);
26739 -+ if (check_heap_stack_gap(vma, addr, len, offset))
26740 -+ return addr;
26741 -+ }
26742 - }
26743 - if (len > mm->cached_hole_size) {
26744 -- start_addr = addr = mm->free_area_cache;
26745 -+ start_addr = addr = mm->free_area_cache;
26746 - } else {
26747 -- start_addr = addr = TASK_UNMAPPED_BASE;
26748 -- mm->cached_hole_size = 0;
26749 -+ start_addr = addr = mm->mmap_base;
26750 -+ mm->cached_hole_size = 0;
26751 ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
26752 + return addr;
26753 }
26754
26755 - full_search:
26756 -@@ -1448,34 +1660,40 @@ full_search:
26757 - * Start a new search - just in case we missed
26758 - * some holes.
26759 - */
26760 -- if (start_addr != TASK_UNMAPPED_BASE) {
26761 -- addr = TASK_UNMAPPED_BASE;
26762 -- start_addr = addr;
26763 -+ if (start_addr != mm->mmap_base) {
26764 -+ start_addr = addr = mm->mmap_base;
26765 - mm->cached_hole_size = 0;
26766 - goto full_search;
26767 - }
26768 - return -ENOMEM;
26769 - }
26770 -- if (!vma || addr + len <= vma->vm_start) {
26771 -- /*
26772 -- * Remember the place where we stopped the search:
26773 -- */
26774 -- mm->free_area_cache = addr + len;
26775 -- return addr;
26776 -- }
26777 -+ if (check_heap_stack_gap(vma, addr, len, offset))
26778 -+ break;
26779 - if (addr + mm->cached_hole_size < vma->vm_start)
26780 - mm->cached_hole_size = vma->vm_start - addr;
26781 - addr = vma->vm_end;
26782 - }
26783 + info.flags = 0;
26784 + info.length = len;
26785 + info.low_limit = TASK_UNMAPPED_BASE;
26786 +
26787 -+ /*
26788 -+ * Remember the place where we stopped the search:
26789 -+ */
26790 -+ mm->free_area_cache = addr + len;
26791 -+ return addr;
26792 - }
26793 - #endif
26794 ++#ifdef CONFIG_PAX_RANDMMAP
26795 ++ if (mm->pax_flags & MF_PAX_RANDMMAP)
26796 ++ info.low_limit += mm->delta_mmap;
26797 ++#endif
26798 ++
26799 + info.high_limit = TASK_SIZE;
26800 + info.align_mask = 0;
26801 + return vm_unmapped_area(&info);
26802 +@@ -1802,10 +2018,16 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
26803
26804 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
26805 {
26806 @@ -75710,17 +76748,15 @@ index 9a796c4..e2c9724 100644
26807 mm->free_area_cache = addr;
26808 }
26809
26810 -@@ -1491,7 +1709,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
26811 - {
26812 - struct vm_area_struct *vma;
26813 +@@ -1823,6 +2045,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
26814 struct mm_struct *mm = current->mm;
26815 -- unsigned long addr = addr0, start_addr;
26816 -+ unsigned long base = mm->mmap_base, addr = addr0, start_addr;
26817 + unsigned long addr = addr0;
26818 + struct vm_unmapped_area_info info;
26819 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
26820
26821 /* requested length too big for entire address space */
26822 if (len > TASK_SIZE)
26823 -@@ -1500,13 +1719,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
26824 +@@ -1831,12 +2054,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
26825 if (flags & MAP_FIXED)
26826 return addr;
26827
26828 @@ -75731,63 +76767,27 @@ index 9a796c4..e2c9724 100644
26829 /* requesting a specific address */
26830 if (addr) {
26831 addr = PAGE_ALIGN(addr);
26832 -- vma = find_vma(mm, addr);
26833 + vma = find_vma(mm, addr);
26834 - if (TASK_SIZE - len >= addr &&
26835 - (!vma || addr + len <= vma->vm_start))
26836 -- return addr;
26837 -+ if (TASK_SIZE - len >= addr) {
26838 -+ vma = find_vma(mm, addr);
26839 -+ if (check_heap_stack_gap(vma, addr, len, offset))
26840 -+ return addr;
26841 -+ }
26842 ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
26843 + return addr;
26844 }
26845
26846 - /* check if free_area_cache is useful for us */
26847 -@@ -1530,7 +1754,7 @@ try_again:
26848 - * return with success:
26849 - */
26850 - vma = find_vma(mm, addr);
26851 -- if (!vma || addr+len <= vma->vm_start)
26852 -+ if (check_heap_stack_gap(vma, addr, len, offset))
26853 - /* remember the address as a hint for next time */
26854 - return (mm->free_area_cache = addr);
26855 -
26856 -@@ -1539,8 +1763,8 @@ try_again:
26857 - mm->cached_hole_size = vma->vm_start - addr;
26858 -
26859 - /* try just below the current vma->vm_start */
26860 -- addr = vma->vm_start-len;
26861 -- } while (len < vma->vm_start);
26862 -+ addr = skip_heap_stack_gap(vma, len, offset);
26863 -+ } while (!IS_ERR_VALUE(addr));
26864 -
26865 - fail:
26866 - /*
26867 -@@ -1563,13 +1787,21 @@ fail:
26868 - * can happen with large stack limits and large mmap()
26869 - * allocations.
26870 - */
26871 -+ mm->mmap_base = TASK_UNMAPPED_BASE;
26872 +@@ -1857,6 +2083,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
26873 + VM_BUG_ON(addr != -ENOMEM);
26874 + info.flags = 0;
26875 + info.low_limit = TASK_UNMAPPED_BASE;
26876 +
26877 +#ifdef CONFIG_PAX_RANDMMAP
26878 -+ if (mm->pax_flags & MF_PAX_RANDMMAP)
26879 -+ mm->mmap_base += mm->delta_mmap;
26880 ++ if (mm->pax_flags & MF_PAX_RANDMMAP)
26881 ++ info.low_limit += mm->delta_mmap;
26882 +#endif
26883 +
26884 -+ mm->free_area_cache = mm->mmap_base;
26885 - mm->cached_hole_size = ~0UL;
26886 -- mm->free_area_cache = TASK_UNMAPPED_BASE;
26887 - addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
26888 - /*
26889 - * Restore the topdown base:
26890 - */
26891 -- mm->free_area_cache = mm->mmap_base;
26892 -+ mm->mmap_base = base;
26893 -+ mm->free_area_cache = base;
26894 - mm->cached_hole_size = ~0UL;
26895 -
26896 - return addr;
26897 -@@ -1578,6 +1810,12 @@ fail:
26898 + info.high_limit = TASK_SIZE;
26899 + addr = vm_unmapped_area(&info);
26900 + }
26901 +@@ -1867,6 +2099,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
26902
26903 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
26904 {
26905 @@ -75800,7 +76800,7 @@ index 9a796c4..e2c9724 100644
26906 /*
26907 * Is this a new hole at the highest possible address?
26908 */
26909 -@@ -1585,8 +1823,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
26910 +@@ -1874,8 +2112,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
26911 mm->free_area_cache = addr;
26912
26913 /* dont allow allocations above current base */
26914 @@ -75812,7 +76812,7 @@ index 9a796c4..e2c9724 100644
26915 }
26916
26917 unsigned long
26918 -@@ -1685,6 +1925,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
26919 +@@ -1974,6 +2214,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
26920 return vma;
26921 }
26922
26923 @@ -75841,7 +76841,7 @@ index 9a796c4..e2c9724 100644
26924 /*
26925 * Verify that the stack growth is acceptable and
26926 * update accounting. This is shared with both the
26927 -@@ -1701,6 +1963,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
26928 +@@ -1990,6 +2252,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
26929 return -ENOMEM;
26930
26931 /* Stack limit test */
26932 @@ -75849,7 +76849,7 @@ index 9a796c4..e2c9724 100644
26933 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
26934 return -ENOMEM;
26935
26936 -@@ -1711,6 +1974,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
26937 +@@ -2000,6 +2263,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
26938 locked = mm->locked_vm + grow;
26939 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
26940 limit >>= PAGE_SHIFT;
26941 @@ -75857,7 +76857,7 @@ index 9a796c4..e2c9724 100644
26942 if (locked > limit && !capable(CAP_IPC_LOCK))
26943 return -ENOMEM;
26944 }
26945 -@@ -1740,37 +2004,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
26946 +@@ -2029,37 +2293,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
26947 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
26948 * vma is the last one with address > vma->vm_end. Have to extend vma.
26949 */
26950 @@ -75915,7 +76915,7 @@ index 9a796c4..e2c9724 100644
26951 unsigned long size, grow;
26952
26953 size = address - vma->vm_start;
26954 -@@ -1787,6 +2062,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
26955 +@@ -2094,6 +2369,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
26956 }
26957 }
26958 }
26959 @@ -75924,7 +76924,7 @@ index 9a796c4..e2c9724 100644
26960 vma_unlock_anon_vma(vma);
26961 khugepaged_enter_vma_merge(vma);
26962 validate_mm(vma->vm_mm);
26963 -@@ -1801,6 +2078,8 @@ int expand_downwards(struct vm_area_struct *vma,
26964 +@@ -2108,6 +2385,8 @@ int expand_downwards(struct vm_area_struct *vma,
26965 unsigned long address)
26966 {
26967 int error;
26968 @@ -75933,7 +76933,7 @@ index 9a796c4..e2c9724 100644
26969
26970 /*
26971 * We must make sure the anon_vma is allocated
26972 -@@ -1814,6 +2093,15 @@ int expand_downwards(struct vm_area_struct *vma,
26973 +@@ -2121,6 +2400,15 @@ int expand_downwards(struct vm_area_struct *vma,
26974 if (error)
26975 return error;
26976
26977 @@ -75949,7 +76949,7 @@ index 9a796c4..e2c9724 100644
26978 vma_lock_anon_vma(vma);
26979
26980 /*
26981 -@@ -1823,9 +2111,17 @@ int expand_downwards(struct vm_area_struct *vma,
26982 +@@ -2130,9 +2418,17 @@ int expand_downwards(struct vm_area_struct *vma,
26983 */
26984
26985 /* Somebody else might have raced and expanded it already */
26986 @@ -75968,10 +76968,10 @@ index 9a796c4..e2c9724 100644
26987 size = vma->vm_end - address;
26988 grow = (vma->vm_start - address) >> PAGE_SHIFT;
26989
26990 -@@ -1837,6 +2133,17 @@ int expand_downwards(struct vm_area_struct *vma,
26991 - vma->vm_start = address;
26992 +@@ -2157,6 +2453,18 @@ int expand_downwards(struct vm_area_struct *vma,
26993 vma->vm_pgoff -= grow;
26994 anon_vma_interval_tree_post_update_vma(vma);
26995 + vma_gap_update(vma);
26996 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
26997 +
26998 +#ifdef CONFIG_PAX_SEGMEXEC
26999 @@ -75980,13 +76980,14 @@ index 9a796c4..e2c9724 100644
27000 + vma_m->vm_start -= grow << PAGE_SHIFT;
27001 + vma_m->vm_pgoff -= grow;
27002 + anon_vma_interval_tree_post_update_vma(vma_m);
27003 ++ vma_gap_update(vma_m);
27004 + }
27005 +#endif
27006 +
27007 + spin_unlock(&vma->vm_mm->page_table_lock);
27008 +
27009 perf_event_mmap(vma);
27010 - }
27011 - }
27012 -@@ -1914,6 +2221,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
27013 +@@ -2236,6 +2544,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
27014 do {
27015 long nrpages = vma_pages(vma);
27016
27017 @@ -76000,7 +77001,7 @@ index 9a796c4..e2c9724 100644
27018 if (vma->vm_flags & VM_ACCOUNT)
27019 nr_accounted += nrpages;
27020 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
27021 -@@ -1959,6 +2273,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
27022 +@@ -2281,6 +2596,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
27023 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
27024 vma->vm_prev = NULL;
27025 do {
27026 @@ -76014,10 +77015,10 @@ index 9a796c4..e2c9724 100644
27027 + }
27028 +#endif
27029 +
27030 - rb_erase(&vma->vm_rb, &mm->mm_rb);
27031 + vma_rb_erase(vma, &mm->mm_rb);
27032 mm->map_count--;
27033 tail_vma = vma;
27034 -@@ -1987,14 +2311,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
27035 +@@ -2312,14 +2637,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
27036 struct vm_area_struct *new;
27037 int err = -ENOMEM;
27038
27039 @@ -76051,7 +77052,7 @@ index 9a796c4..e2c9724 100644
27040 /* most fields are the same, copy all, and then fixup */
27041 *new = *vma;
27042
27043 -@@ -2007,6 +2350,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
27044 +@@ -2332,6 +2676,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
27045 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
27046 }
27047
27048 @@ -76074,7 +77075,7 @@ index 9a796c4..e2c9724 100644
27049 pol = mpol_dup(vma_policy(vma));
27050 if (IS_ERR(pol)) {
27051 err = PTR_ERR(pol);
27052 -@@ -2029,6 +2388,36 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
27053 +@@ -2354,6 +2714,36 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
27054 else
27055 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
27056
27057 @@ -76111,7 +77112,7 @@ index 9a796c4..e2c9724 100644
27058 /* Success. */
27059 if (!err)
27060 return 0;
27061 -@@ -2038,10 +2427,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
27062 +@@ -2363,10 +2753,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
27063 new->vm_ops->close(new);
27064 if (new->vm_file)
27065 fput(new->vm_file);
27066 @@ -76131,7 +77132,7 @@ index 9a796c4..e2c9724 100644
27067 kmem_cache_free(vm_area_cachep, new);
27068 out_err:
27069 return err;
27070 -@@ -2054,6 +2451,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
27071 +@@ -2379,6 +2777,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
27072 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
27073 unsigned long addr, int new_below)
27074 {
27075 @@ -76147,7 +77148,7 @@ index 9a796c4..e2c9724 100644
27076 if (mm->map_count >= sysctl_max_map_count)
27077 return -ENOMEM;
27078
27079 -@@ -2065,11 +2471,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
27080 +@@ -2390,11 +2797,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
27081 * work. This now handles partial unmappings.
27082 * Jeremy Fitzhardinge <jeremy@××××.org>
27083 */
27084 @@ -76178,7 +77179,7 @@ index 9a796c4..e2c9724 100644
27085 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
27086 return -EINVAL;
27087
27088 -@@ -2144,6 +2569,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
27089 +@@ -2469,6 +2895,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
27090 /* Fix up all other VM information */
27091 remove_vma_list(mm, vma);
27092
27093 @@ -76187,7 +77188,7 @@ index 9a796c4..e2c9724 100644
27094 return 0;
27095 }
27096
27097 -@@ -2152,6 +2579,13 @@ int vm_munmap(unsigned long start, size_t len)
27098 +@@ -2477,6 +2905,13 @@ int vm_munmap(unsigned long start, size_t len)
27099 int ret;
27100 struct mm_struct *mm = current->mm;
27101
27102 @@ -76201,7 +77202,7 @@ index 9a796c4..e2c9724 100644
27103 down_write(&mm->mmap_sem);
27104 ret = do_munmap(mm, start, len);
27105 up_write(&mm->mmap_sem);
27106 -@@ -2165,16 +2599,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
27107 +@@ -2490,16 +2925,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
27108 return vm_munmap(addr, len);
27109 }
27110
27111 @@ -76218,7 +77219,7 @@ index 9a796c4..e2c9724 100644
27112 /*
27113 * this is really a simplified "do_mmap". it only handles
27114 * anonymous maps. eventually we may be able to do some
27115 -@@ -2188,6 +2612,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
27116 +@@ -2513,6 +2938,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
27117 struct rb_node ** rb_link, * rb_parent;
27118 pgoff_t pgoff = addr >> PAGE_SHIFT;
27119 int error;
27120 @@ -76226,7 +77227,7 @@ index 9a796c4..e2c9724 100644
27121
27122 len = PAGE_ALIGN(len);
27123 if (!len)
27124 -@@ -2195,16 +2620,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
27125 +@@ -2520,16 +2946,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
27126
27127 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
27128
27129 @@ -76258,7 +77259,7 @@ index 9a796c4..e2c9724 100644
27130 locked += mm->locked_vm;
27131 lock_limit = rlimit(RLIMIT_MEMLOCK);
27132 lock_limit >>= PAGE_SHIFT;
27133 -@@ -2221,21 +2660,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
27134 +@@ -2546,21 +2986,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
27135 /*
27136 * Clear old maps. this also does some error checking for us
27137 */
27138 @@ -76283,7 +77284,7 @@ index 9a796c4..e2c9724 100644
27139 return -ENOMEM;
27140
27141 /* Can we just expand an old private anonymous mapping? */
27142 -@@ -2249,7 +2687,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
27143 +@@ -2574,7 +3013,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
27144 */
27145 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
27146 if (!vma) {
27147 @@ -76292,7 +77293,7 @@ index 9a796c4..e2c9724 100644
27148 return -ENOMEM;
27149 }
27150
27151 -@@ -2263,11 +2701,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
27152 +@@ -2588,11 +3027,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
27153 vma_link(mm, vma, prev, rb_link, rb_parent);
27154 out:
27155 perf_event_mmap(vma);
27156 @@ -76307,7 +77308,7 @@ index 9a796c4..e2c9724 100644
27157 return addr;
27158 }
27159
27160 -@@ -2325,6 +2764,7 @@ void exit_mmap(struct mm_struct *mm)
27161 +@@ -2650,6 +3090,7 @@ void exit_mmap(struct mm_struct *mm)
27162 while (vma) {
27163 if (vma->vm_flags & VM_ACCOUNT)
27164 nr_accounted += vma_pages(vma);
27165 @@ -76315,7 +77316,7 @@ index 9a796c4..e2c9724 100644
27166 vma = remove_vma(vma);
27167 }
27168 vm_unacct_memory(nr_accounted);
27169 -@@ -2341,6 +2781,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
27170 +@@ -2666,6 +3107,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
27171 struct vm_area_struct *prev;
27172 struct rb_node **rb_link, *rb_parent;
27173
27174 @@ -76329,7 +77330,7 @@ index 9a796c4..e2c9724 100644
27175 /*
27176 * The vm_pgoff of a purely anonymous vma should be irrelevant
27177 * until its first write fault, when page's anon_vma and index
27178 -@@ -2364,7 +2811,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
27179 +@@ -2689,7 +3137,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
27180 security_vm_enough_memory_mm(mm, vma_pages(vma)))
27181 return -ENOMEM;
27182
27183 @@ -76351,7 +77352,7 @@ index 9a796c4..e2c9724 100644
27184 return 0;
27185 }
27186
27187 -@@ -2384,6 +2845,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
27188 +@@ -2709,6 +3171,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
27189 struct mempolicy *pol;
27190 bool faulted_in_anon_vma = true;
27191
27192 @@ -76360,7 +77361,7 @@ index 9a796c4..e2c9724 100644
27193 /*
27194 * If anonymous vma has not yet been faulted, update new pgoff
27195 * to match new location, to increase its chance of merging.
27196 -@@ -2450,6 +2913,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
27197 +@@ -2775,6 +3239,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
27198 return NULL;
27199 }
27200
27201 @@ -76400,7 +77401,7 @@ index 9a796c4..e2c9724 100644
27202 /*
27203 * Return true if the calling process may expand its vm space by the passed
27204 * number of pages
27205 -@@ -2461,6 +2957,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
27206 +@@ -2786,6 +3283,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
27207
27208 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
27209
27210 @@ -76413,7 +77414,7 @@ index 9a796c4..e2c9724 100644
27211 if (cur + npages > lim)
27212 return 0;
27213 return 1;
27214 -@@ -2531,6 +3033,22 @@ int install_special_mapping(struct mm_struct *mm,
27215 +@@ -2856,6 +3359,22 @@ int install_special_mapping(struct mm_struct *mm,
27216 vma->vm_start = addr;
27217 vma->vm_end = addr + len;
27218
27219 @@ -76437,7 +77438,7 @@ index 9a796c4..e2c9724 100644
27220 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
27221
27222 diff --git a/mm/mprotect.c b/mm/mprotect.c
27223 -index a409926..8b32e6d 100644
27224 +index 94722a4..9837984 100644
27225 --- a/mm/mprotect.c
27226 +++ b/mm/mprotect.c
27227 @@ -23,10 +23,17 @@
27228 @@ -76458,8 +77459,8 @@ index a409926..8b32e6d 100644
27229
27230 #ifndef pgprot_modify
27231 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
27232 -@@ -141,6 +148,48 @@ static void change_protection(struct vm_area_struct *vma,
27233 - flush_tlb_range(vma, start, end);
27234 +@@ -233,6 +240,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
27235 + return pages;
27236 }
27237
27238 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
27239 @@ -76499,7 +77500,7 @@ index a409926..8b32e6d 100644
27240 + if (is_vm_hugetlb_page(vma))
27241 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
27242 + else
27243 -+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
27244 ++ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
27245 + }
27246 +}
27247 +#endif
27248 @@ -76507,7 +77508,7 @@ index a409926..8b32e6d 100644
27249 int
27250 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
27251 unsigned long start, unsigned long end, unsigned long newflags)
27252 -@@ -153,11 +202,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
27253 +@@ -245,11 +294,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
27254 int error;
27255 int dirty_accountable = 0;
27256
27257 @@ -76537,7 +77538,7 @@ index a409926..8b32e6d 100644
27258 /*
27259 * If we make a private mapping writable we increase our commit;
27260 * but (without finer accounting) cannot reduce our commit if we
27261 -@@ -174,6 +241,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
27262 +@@ -266,6 +333,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
27263 }
27264 }
27265
27266 @@ -76580,7 +77581,7 @@ index a409926..8b32e6d 100644
27267 /*
27268 * First try to merge with previous and/or next vma.
27269 */
27270 -@@ -204,9 +307,21 @@ success:
27271 +@@ -296,9 +399,21 @@ success:
27272 * vm_flags and vm_page_prot are protected by the mmap_sem
27273 * held in write mode.
27274 */
27275 @@ -76603,7 +77604,7 @@ index a409926..8b32e6d 100644
27276
27277 if (vma_wants_writenotify(vma)) {
27278 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
27279 -@@ -248,6 +363,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
27280 +@@ -337,6 +452,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
27281 end = start + len;
27282 if (end <= start)
27283 return -ENOMEM;
27284 @@ -76621,7 +77622,7 @@ index a409926..8b32e6d 100644
27285 if (!arch_validate_prot(prot))
27286 return -EINVAL;
27287
27288 -@@ -255,7 +381,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
27289 +@@ -344,7 +470,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
27290 /*
27291 * Does the application expect PROT_READ to imply PROT_EXEC:
27292 */
27293 @@ -76630,7 +77631,7 @@ index a409926..8b32e6d 100644
27294 prot |= PROT_EXEC;
27295
27296 vm_flags = calc_vm_prot_bits(prot);
27297 -@@ -288,6 +414,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
27298 +@@ -376,6 +502,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
27299 if (start > vma->vm_start)
27300 prev = vma;
27301
27302 @@ -76642,7 +77643,7 @@ index a409926..8b32e6d 100644
27303 for (nstart = start ; ; ) {
27304 unsigned long newflags;
27305
27306 -@@ -297,6 +428,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
27307 +@@ -386,6 +517,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
27308
27309 /* newflags >> 4 shift VM_MAY% in place of VM_% */
27310 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
27311 @@ -76657,7 +77658,7 @@ index a409926..8b32e6d 100644
27312 error = -EACCES;
27313 goto out;
27314 }
27315 -@@ -311,6 +450,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
27316 +@@ -400,6 +539,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
27317 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
27318 if (error)
27319 goto out;
27320 @@ -76668,7 +77669,7 @@ index a409926..8b32e6d 100644
27321
27322 if (nstart < prev->vm_end)
27323 diff --git a/mm/mremap.c b/mm/mremap.c
27324 -index 1b61c2d..1cc0e3c 100644
27325 +index e1031e1..1f2a0a1 100644
27326 --- a/mm/mremap.c
27327 +++ b/mm/mremap.c
27328 @@ -125,6 +125,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
27329 @@ -76775,7 +77776,7 @@ index 1b61c2d..1cc0e3c 100644
27330 out:
27331 if (ret & ~PAGE_MASK)
27332 diff --git a/mm/nommu.c b/mm/nommu.c
27333 -index 45131b4..c521665 100644
27334 +index 79c3cac..4d357e0 100644
27335 --- a/mm/nommu.c
27336 +++ b/mm/nommu.c
27337 @@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
27338 @@ -76786,7 +77787,7 @@ index 45131b4..c521665 100644
27339
27340 atomic_long_t mmap_pages_allocated;
27341
27342 -@@ -824,15 +823,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
27343 +@@ -839,15 +838,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
27344 EXPORT_SYMBOL(find_vma);
27345
27346 /*
27347 @@ -76802,7 +77803,7 @@ index 45131b4..c521665 100644
27348 * expand a stack to a given address
27349 * - not supported under NOMMU conditions
27350 */
27351 -@@ -1540,6 +1530,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
27352 +@@ -1555,6 +1545,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
27353
27354 /* most fields are the same, copy all, and then fixup */
27355 *new = *vma;
27356 @@ -76810,11 +77811,24 @@ index 45131b4..c521665 100644
27357 *region = *vma->vm_region;
27358 new->vm_region = region;
27359
27360 +diff --git a/mm/page-writeback.c b/mm/page-writeback.c
27361 +index 0713bfb..e3774e0 100644
27362 +--- a/mm/page-writeback.c
27363 ++++ b/mm/page-writeback.c
27364 +@@ -1630,7 +1630,7 @@ ratelimit_handler(struct notifier_block *self, unsigned long action,
27365 + }
27366 + }
27367 +
27368 +-static struct notifier_block __cpuinitdata ratelimit_nb = {
27369 ++static struct notifier_block ratelimit_nb = {
27370 + .notifier_call = ratelimit_handler,
27371 + .next = NULL,
27372 + };
27373 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
27374 -index ceb4168..d7774f2 100644
27375 +index 6a83cd3..bc2dcb6 100644
27376 --- a/mm/page_alloc.c
27377 +++ b/mm/page_alloc.c
27378 -@@ -340,7 +340,7 @@ out:
27379 +@@ -338,7 +338,7 @@ out:
27380 * This usage means that zero-order pages may not be compound.
27381 */
27382
27383 @@ -76847,7 +77861,7 @@ index ceb4168..d7774f2 100644
27384 arch_free_page(page, order);
27385 kernel_map_pages(page, 1 << order, 0);
27386
27387 -@@ -849,8 +859,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
27388 +@@ -861,8 +871,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
27389 arch_alloc_page(page, order);
27390 kernel_map_pages(page, 1 << order, 1);
27391
27392 @@ -76858,7 +77872,7 @@ index ceb4168..d7774f2 100644
27393
27394 if (order && (gfp_flags & __GFP_COMP))
27395 prep_compound_page(page, order);
27396 -@@ -3684,7 +3696,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
27397 +@@ -3752,7 +3764,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
27398 unsigned long pfn;
27399
27400 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
27401 @@ -76873,7 +77887,7 @@ index ceb4168..d7774f2 100644
27402 }
27403 return 0;
27404 diff --git a/mm/percpu.c b/mm/percpu.c
27405 -index ddc5efb..f632d2c 100644
27406 +index 8c8e08f..73a5cda 100644
27407 --- a/mm/percpu.c
27408 +++ b/mm/percpu.c
27409 @@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
27410 @@ -76937,7 +77951,7 @@ index 926b466..b23df53 100644
27411 if (!mm || IS_ERR(mm)) {
27412 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
27413 diff --git a/mm/rmap.c b/mm/rmap.c
27414 -index 2ee1ef0..2e175ba 100644
27415 +index 2c78f8c..9e9c624 100644
27416 --- a/mm/rmap.c
27417 +++ b/mm/rmap.c
27418 @@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
27419 @@ -77027,7 +78041,7 @@ index 2ee1ef0..2e175ba 100644
27420 struct anon_vma_chain *avc;
27421 struct anon_vma *anon_vma;
27422 diff --git a/mm/shmem.c b/mm/shmem.c
27423 -index 50c5b8f..0bc87f7 100644
27424 +index 5dd56f6..7c51725 100644
27425 --- a/mm/shmem.c
27426 +++ b/mm/shmem.c
27427 @@ -31,7 +31,7 @@
27428 @@ -77048,7 +78062,7 @@ index 50c5b8f..0bc87f7 100644
27429
27430 /*
27431 * shmem_fallocate and shmem_writepage communicate via inode->i_private
27432 -@@ -2112,6 +2112,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
27433 +@@ -2202,6 +2202,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
27434 static int shmem_xattr_validate(const char *name)
27435 {
27436 struct { const char *prefix; size_t len; } arr[] = {
27437 @@ -77060,7 +78074,7 @@ index 50c5b8f..0bc87f7 100644
27438 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
27439 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
27440 };
27441 -@@ -2167,6 +2172,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
27442 +@@ -2257,6 +2262,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
27443 if (err)
27444 return err;
27445
27446 @@ -77076,7 +78090,7 @@ index 50c5b8f..0bc87f7 100644
27447 return simple_xattr_set(&info->xattrs, name, value, size, flags);
27448 }
27449
27450 -@@ -2466,8 +2480,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
27451 +@@ -2556,8 +2570,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
27452 int err = -ENOMEM;
27453
27454 /* Round up to L1_CACHE_BYTES to resist false sharing */
27455 @@ -77087,28 +78101,10 @@ index 50c5b8f..0bc87f7 100644
27456 return -ENOMEM;
27457
27458 diff --git a/mm/slab.c b/mm/slab.c
27459 -index 33d3363..3851c61 100644
27460 +index e7667a3..b62c169 100644
27461 --- a/mm/slab.c
27462 +++ b/mm/slab.c
27463 -@@ -164,7 +164,7 @@ static bool pfmemalloc_active __read_mostly;
27464 -
27465 - /* Legal flag mask for kmem_cache_create(). */
27466 - #if DEBUG
27467 --# define CREATE_MASK (SLAB_RED_ZONE | \
27468 -+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
27469 - SLAB_POISON | SLAB_HWCACHE_ALIGN | \
27470 - SLAB_CACHE_DMA | \
27471 - SLAB_STORE_USER | \
27472 -@@ -172,7 +172,7 @@ static bool pfmemalloc_active __read_mostly;
27473 - SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
27474 - SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
27475 - #else
27476 --# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
27477 -+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
27478 - SLAB_CACHE_DMA | \
27479 - SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
27480 - SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
27481 -@@ -322,7 +322,7 @@ struct kmem_list3 {
27482 +@@ -306,7 +306,7 @@ struct kmem_list3 {
27483 * Need this for bootstrapping a per node allocator.
27484 */
27485 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
27486 @@ -77117,7 +78113,7 @@ index 33d3363..3851c61 100644
27487 #define CACHE_CACHE 0
27488 #define SIZE_AC MAX_NUMNODES
27489 #define SIZE_L3 (2 * MAX_NUMNODES)
27490 -@@ -423,10 +423,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
27491 +@@ -407,10 +407,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
27492 if ((x)->max_freeable < i) \
27493 (x)->max_freeable = i; \
27494 } while (0)
27495 @@ -77132,7 +78128,7 @@ index 33d3363..3851c61 100644
27496 #else
27497 #define STATS_INC_ACTIVE(x) do { } while (0)
27498 #define STATS_DEC_ACTIVE(x) do { } while (0)
27499 -@@ -534,7 +534,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
27500 +@@ -518,7 +518,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
27501 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
27502 */
27503 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
27504 @@ -77141,7 +78137,7 @@ index 33d3363..3851c61 100644
27505 {
27506 u32 offset = (obj - slab->s_mem);
27507 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
27508 -@@ -555,12 +555,13 @@ EXPORT_SYMBOL(malloc_sizes);
27509 +@@ -539,12 +539,13 @@ EXPORT_SYMBOL(malloc_sizes);
27510 struct cache_names {
27511 char *name;
27512 char *name_dma;
27513 @@ -77157,7 +78153,7 @@ index 33d3363..3851c61 100644
27514 #undef CACHE
27515 };
27516
27517 -@@ -721,6 +722,12 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
27518 +@@ -729,6 +730,12 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
27519 if (unlikely(gfpflags & GFP_DMA))
27520 return csizep->cs_dmacachep;
27521 #endif
27522 @@ -77170,52 +78166,53 @@ index 33d3363..3851c61 100644
27523 return csizep->cs_cachep;
27524 }
27525
27526 -@@ -1676,7 +1683,7 @@ void __init kmem_cache_init(void)
27527 - sizes[INDEX_AC].cs_cachep->size = sizes[INDEX_AC].cs_size;
27528 - sizes[INDEX_AC].cs_cachep->object_size = sizes[INDEX_AC].cs_size;
27529 - sizes[INDEX_AC].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
27530 -- __kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
27531 -+ __kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY);
27532 - list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches);
27533 -
27534 - if (INDEX_AC != INDEX_L3) {
27535 -@@ -1685,7 +1692,7 @@ void __init kmem_cache_init(void)
27536 - sizes[INDEX_L3].cs_cachep->size = sizes[INDEX_L3].cs_size;
27537 - sizes[INDEX_L3].cs_cachep->object_size = sizes[INDEX_L3].cs_size;
27538 - sizes[INDEX_L3].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
27539 -- __kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
27540 -+ __kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY);
27541 - list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches);
27542 - }
27543 -
27544 -@@ -1705,7 +1712,7 @@ void __init kmem_cache_init(void)
27545 - sizes->cs_cachep->size = sizes->cs_size;
27546 - sizes->cs_cachep->object_size = sizes->cs_size;
27547 - sizes->cs_cachep->align = ARCH_KMALLOC_MINALIGN;
27548 -- __kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
27549 -+ __kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY);
27550 - list_add(&sizes->cs_cachep->list, &slab_caches);
27551 - }
27552 +@@ -1482,7 +1489,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
27553 + return notifier_from_errno(err);
27554 + }
27555 +
27556 +-static struct notifier_block __cpuinitdata cpucache_notifier = {
27557 ++static struct notifier_block cpucache_notifier = {
27558 + &cpuup_callback, NULL, 0
27559 + };
27560 +
27561 +@@ -1667,12 +1674,12 @@ void __init kmem_cache_init(void)
27562 + */
27563 +
27564 + sizes[INDEX_AC].cs_cachep = create_kmalloc_cache(names[INDEX_AC].name,
27565 +- sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS);
27566 ++ sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
27567 +
27568 + if (INDEX_AC != INDEX_L3)
27569 + sizes[INDEX_L3].cs_cachep =
27570 + create_kmalloc_cache(names[INDEX_L3].name,
27571 +- sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS);
27572 ++ sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
27573 +
27574 + slab_early_init = 0;
27575 +
27576 +@@ -1686,13 +1693,20 @@ void __init kmem_cache_init(void)
27577 + */
27578 + if (!sizes->cs_cachep)
27579 + sizes->cs_cachep = create_kmalloc_cache(names->name,
27580 +- sizes->cs_size, ARCH_KMALLOC_FLAGS);
27581 ++ sizes->cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
27582 +
27583 #ifdef CONFIG_ZONE_DMA
27584 -@@ -1718,6 +1725,17 @@ void __init kmem_cache_init(void)
27585 - ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC);
27586 - list_add(&sizes->cs_dmacachep->list, &slab_caches);
27587 + sizes->cs_dmacachep = create_kmalloc_cache(
27588 + names->name_dma, sizes->cs_size,
27589 + SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS);
27590 #endif
27591 +
27592 +#ifdef CONFIG_PAX_USERCOPY_SLABS
27593 -+ sizes->cs_usercopycachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
27594 -+ sizes->cs_usercopycachep->name = names->name_usercopy;
27595 -+ sizes->cs_usercopycachep->size = sizes->cs_size;
27596 -+ sizes->cs_usercopycachep->object_size = sizes->cs_size;
27597 -+ sizes->cs_usercopycachep->align = ARCH_KMALLOC_MINALIGN;
27598 -+ __kmem_cache_create(sizes->cs_usercopycachep, ARCH_KMALLOC_FLAGS| SLAB_PANIC|SLAB_USERCOPY);
27599 -+ list_add(&sizes->cs_usercopycachep->list, &slab_caches);
27600 ++ sizes->cs_usercopycachep = create_kmalloc_cache(
27601 ++ names->name_usercopy, sizes->cs_size,
27602 ++ ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
27603 +#endif
27604 +
27605 sizes++;
27606 names++;
27607 }
27608 -@@ -4405,10 +4423,10 @@ static int s_show(struct seq_file *m, void *p)
27609 +@@ -4365,10 +4379,10 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
27610 }
27611 /* cpu stats */
27612 {
27613 @@ -77230,9 +78227,9 @@ index 33d3363..3851c61 100644
27614
27615 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
27616 allochit, allocmiss, freehit, freemiss);
27617 -@@ -4667,13 +4685,71 @@ static int __init slab_proc_init(void)
27618 +@@ -4600,13 +4614,71 @@ static const struct file_operations proc_slabstats_operations = {
27619 + static int __init slab_proc_init(void)
27620 {
27621 - proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
27622 #ifdef CONFIG_DEBUG_SLAB_LEAK
27623 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
27624 + proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
27625 @@ -77303,21 +78300,44 @@ index 33d3363..3851c61 100644
27626 /**
27627 * ksize - get the actual amount of memory allocated for a given object
27628 * @objp: Pointer to the object
27629 +diff --git a/mm/slab.h b/mm/slab.h
27630 +index 34a98d6..73633d1 100644
27631 +--- a/mm/slab.h
27632 ++++ b/mm/slab.h
27633 +@@ -58,7 +58,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
27634 +
27635 + /* Legal flag mask for kmem_cache_create(), for various configurations */
27636 + #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
27637 +- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
27638 ++ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | SLAB_USERCOPY)
27639 +
27640 + #if defined(CONFIG_DEBUG_SLAB)
27641 + #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
27642 +@@ -220,6 +220,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
27643 + return s;
27644 +
27645 + page = virt_to_head_page(x);
27646 ++
27647 ++ BUG_ON(!PageSlab(page));
27648 ++
27649 + cachep = page->slab_cache;
27650 + if (slab_equal_or_root(cachep, s))
27651 + return cachep;
27652 diff --git a/mm/slab_common.c b/mm/slab_common.c
27653 -index 069a24e6..226a310 100644
27654 +index 3f3cd97..e050794 100644
27655 --- a/mm/slab_common.c
27656 +++ b/mm/slab_common.c
27657 -@@ -127,7 +127,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
27658 +@@ -209,7 +209,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
27659 +
27660 err = __kmem_cache_create(s, flags);
27661 if (!err) {
27662 -
27663 - s->refcount = 1;
27664 + atomic_set(&s->refcount, 1);
27665 list_add(&s->list, &slab_caches);
27666 -
27667 + memcg_cache_list_add(memcg, s);
27668 } else {
27669 -@@ -163,8 +163,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
27670 - {
27671 +@@ -255,8 +255,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
27672 +
27673 get_online_cpus();
27674 mutex_lock(&slab_mutex);
27675 - s->refcount--;
27676 @@ -77326,11 +78346,29 @@ index 069a24e6..226a310 100644
27677 list_del(&s->list);
27678
27679 if (!__kmem_cache_shutdown(s)) {
27680 +@@ -302,7 +301,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
27681 + panic("Creation of kmalloc slab %s size=%zd failed. Reason %d\n",
27682 + name, size, err);
27683 +
27684 +- s->refcount = -1; /* Exempt from merging for now */
27685 ++ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
27686 + }
27687 +
27688 + struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
27689 +@@ -315,7 +314,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
27690 +
27691 + create_boot_cache(s, name, size, flags);
27692 + list_add(&s->list, &slab_caches);
27693 +- s->refcount = 1;
27694 ++ atomic_set(&s->refcount, 1);
27695 + return s;
27696 + }
27697 +
27698 diff --git a/mm/slob.c b/mm/slob.c
27699 -index 1e921c5..1ce12c2 100644
27700 +index a99fdf7..f5b6577 100644
27701 --- a/mm/slob.c
27702 +++ b/mm/slob.c
27703 -@@ -159,7 +159,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
27704 +@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
27705 /*
27706 * Return the size of a slob block.
27707 */
27708 @@ -77339,7 +78377,7 @@ index 1e921c5..1ce12c2 100644
27709 {
27710 if (s->units > 0)
27711 return s->units;
27712 -@@ -169,7 +169,7 @@ static slobidx_t slob_units(slob_t *s)
27713 +@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
27714 /*
27715 * Return the next free slob block pointer after this one.
27716 */
27717 @@ -77348,7 +78386,7 @@ index 1e921c5..1ce12c2 100644
27718 {
27719 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
27720 slobidx_t next;
27721 -@@ -184,14 +184,14 @@ static slob_t *slob_next(slob_t *s)
27722 +@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
27723 /*
27724 * Returns true if s is the last free block in its page.
27725 */
27726 @@ -77366,7 +78404,7 @@ index 1e921c5..1ce12c2 100644
27727
27728 #ifdef CONFIG_NUMA
27729 if (node != NUMA_NO_NODE)
27730 -@@ -203,14 +203,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
27731 +@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
27732 if (!page)
27733 return NULL;
27734
27735 @@ -77388,7 +78426,7 @@ index 1e921c5..1ce12c2 100644
27736 }
27737
27738 /*
27739 -@@ -315,15 +319,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
27740 +@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
27741
27742 /* Not enough space: must allocate a new page */
27743 if (!b) {
27744 @@ -77408,7 +78446,7 @@ index 1e921c5..1ce12c2 100644
27745 INIT_LIST_HEAD(&sp->list);
27746 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
27747 set_slob_page_free(sp, slob_list);
27748 -@@ -361,9 +365,7 @@ static void slob_free(void *block, int size)
27749 +@@ -359,9 +363,7 @@ static void slob_free(void *block, int size)
27750 if (slob_page_free(sp))
27751 clear_slob_page_free(sp);
27752 spin_unlock_irqrestore(&slob_lock, flags);
27753 @@ -77419,7 +78457,7 @@ index 1e921c5..1ce12c2 100644
27754 return;
27755 }
27756
27757 -@@ -426,11 +428,10 @@ out:
27758 +@@ -424,11 +426,10 @@ out:
27759 */
27760
27761 static __always_inline void *
27762 @@ -77434,7 +78472,7 @@ index 1e921c5..1ce12c2 100644
27763
27764 gfp &= gfp_allowed_mask;
27765
27766 -@@ -444,20 +445,23 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
27767 +@@ -442,23 +443,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
27768
27769 if (!m)
27770 return NULL;
27771 @@ -77454,16 +78492,13 @@ index 1e921c5..1ce12c2 100644
27772 if (likely(order))
27773 gfp |= __GFP_COMP;
27774 - ret = slob_new_pages(gfp, order, node);
27775 -- if (ret) {
27776 -- struct page *page;
27777 -- page = virt_to_page(ret);
27778 + page = slob_new_pages(gfp, order, node);
27779 + if (page) {
27780 + ret = page_address(page);
27781 - page->private = size;
27782 - }
27783 ++ page->private = size;
27784 ++ }
27785
27786 -@@ -465,7 +469,17 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
27787 + trace_kmalloc_node(caller, ret,
27788 size, PAGE_SIZE << order, gfp, node);
27789 }
27790
27791 @@ -77482,7 +78517,7 @@ index 1e921c5..1ce12c2 100644
27792 return ret;
27793 }
27794
27795 -@@ -501,15 +515,91 @@ void kfree(const void *block)
27796 +@@ -494,33 +513,110 @@ void kfree(const void *block)
27797 kmemleak_free(block);
27798
27799 sp = virt_to_page(block);
27800 @@ -77499,7 +78534,7 @@ index 1e921c5..1ce12c2 100644
27801 + __ClearPageSlab(sp);
27802 + reset_page_mapcount(sp);
27803 + sp->private = 0;
27804 - put_page(sp);
27805 + __free_pages(sp, compound_order(sp));
27806 + }
27807 }
27808 EXPORT_SYMBOL(kfree);
27809 @@ -77578,22 +78613,31 @@ index 1e921c5..1ce12c2 100644
27810 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
27811 size_t ksize(const void *block)
27812 {
27813 -@@ -520,10 +610,11 @@ size_t ksize(const void *block)
27814 + struct page *sp;
27815 + int align;
27816 +- unsigned int *m;
27817 ++ slob_t *m;
27818 +
27819 + BUG_ON(!block);
27820 + if (unlikely(block == ZERO_SIZE_PTR))
27821 return 0;
27822
27823 sp = virt_to_page(block);
27824 -- if (PageSlab(sp)) {
27825 +- if (unlikely(!PageSlab(sp)))
27826 +- return PAGE_SIZE << compound_order(sp);
27827 + VM_BUG_ON(!PageSlab(sp));
27828 -+ if (!sp->private) {
27829 - int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
27830 -- unsigned int *m = (unsigned int *)(block - align);
27831 -- return SLOB_UNITS(*m) * SLOB_UNIT;
27832 -+ slob_t *m = (slob_t *)(block - align);
27833 -+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
27834 - } else
27835 - return sp->private;
27836 ++ if (sp->private)
27837 ++ return sp->private;
27838 +
27839 + align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
27840 +- m = (unsigned int *)(block - align);
27841 +- return SLOB_UNITS(*m) * SLOB_UNIT;
27842 ++ m = (slob_t *)(block - align);
27843 ++ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
27844 }
27845 -@@ -550,23 +641,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
27846 + EXPORT_SYMBOL(ksize);
27847 +
27848 +@@ -536,23 +632,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
27849
27850 void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
27851 {
27852 @@ -77609,7 +78653,7 @@ index 1e921c5..1ce12c2 100644
27853 +#else
27854 if (c->size < PAGE_SIZE) {
27855 b = slob_alloc(c->size, flags, c->align, node);
27856 - trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
27857 + trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
27858 SLOB_UNITS(c->size) * SLOB_UNIT,
27859 flags, node);
27860 } else {
27861 @@ -77621,7 +78665,7 @@ index 1e921c5..1ce12c2 100644
27862 + b = page_address(sp);
27863 + sp->private = c->size;
27864 + }
27865 - trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
27866 + trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
27867 PAGE_SIZE << get_order(c->size),
27868 flags, node);
27869 }
27870 @@ -77629,7 +78673,7 @@ index 1e921c5..1ce12c2 100644
27871
27872 if (c->ctor)
27873 c->ctor(b);
27874 -@@ -578,10 +679,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
27875 +@@ -564,10 +670,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
27876
27877 static void __kmem_cache_free(void *b, int size)
27878 {
27879 @@ -77646,7 +78690,7 @@ index 1e921c5..1ce12c2 100644
27880 }
27881
27882 static void kmem_rcu_free(struct rcu_head *head)
27883 -@@ -594,17 +699,31 @@ static void kmem_rcu_free(struct rcu_head *head)
27884 +@@ -580,17 +690,31 @@ static void kmem_rcu_free(struct rcu_head *head)
27885
27886 void kmem_cache_free(struct kmem_cache *c, void *b)
27887 {
27888 @@ -77682,10 +78726,10 @@ index 1e921c5..1ce12c2 100644
27889 EXPORT_SYMBOL(kmem_cache_free);
27890
27891 diff --git a/mm/slub.c b/mm/slub.c
27892 -index 321afab..9595170 100644
27893 +index ba2ca53..00b1f4e 100644
27894 --- a/mm/slub.c
27895 +++ b/mm/slub.c
27896 -@@ -201,7 +201,7 @@ struct track {
27897 +@@ -197,7 +197,7 @@ struct track {
27898
27899 enum track_item { TRACK_ALLOC, TRACK_FREE };
27900
27901 @@ -77694,7 +78738,7 @@ index 321afab..9595170 100644
27902 static int sysfs_slab_add(struct kmem_cache *);
27903 static int sysfs_slab_alias(struct kmem_cache *, const char *);
27904 static void sysfs_slab_remove(struct kmem_cache *);
27905 -@@ -521,7 +521,7 @@ static void print_track(const char *s, struct track *t)
27906 +@@ -518,7 +518,7 @@ static void print_track(const char *s, struct track *t)
27907 if (!t->addr)
27908 return;
27909
27910 @@ -77703,16 +78747,7 @@ index 321afab..9595170 100644
27911 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
27912 #ifdef CONFIG_STACKTRACE
27913 {
27914 -@@ -2623,6 +2623,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
27915 -
27916 - page = virt_to_head_page(x);
27917 -
27918 -+ BUG_ON(!PageSlab(page));
27919 -+
27920 - if (kmem_cache_debug(s) && page->slab != s) {
27921 - pr_err("kmem_cache_free: Wrong slab cache. %s but object"
27922 - " is from %s\n", page->slab->name, s->name);
27923 -@@ -2663,7 +2665,7 @@ static int slub_min_objects;
27924 +@@ -2653,7 +2653,7 @@ static int slub_min_objects;
27925 * Merge control. If this is set then no merging of slab caches will occur.
27926 * (Could be removed. This was introduced to pacify the merge skeptics.)
27927 */
27928 @@ -77721,7 +78756,7 @@ index 321afab..9595170 100644
27929
27930 /*
27931 * Calculate the order of allocation given an slab object size.
27932 -@@ -3225,6 +3227,10 @@ EXPORT_SYMBOL(kmalloc_caches);
27933 +@@ -3181,6 +3181,10 @@ EXPORT_SYMBOL(kmalloc_caches);
27934 static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
27935 #endif
27936
27937 @@ -77732,16 +78767,7 @@ index 321afab..9595170 100644
27938 static int __init setup_slub_min_order(char *str)
27939 {
27940 get_option(&str, &slub_min_order);
27941 -@@ -3279,7 +3285,7 @@ static struct kmem_cache *__init create_kmalloc_cache(const char *name,
27942 - if (kmem_cache_open(s, flags))
27943 - goto panic;
27944 -
27945 -- s->refcount = 1;
27946 -+ atomic_set(&s->refcount, 1);
27947 - list_add(&s->list, &slab_caches);
27948 - return s;
27949 -
27950 -@@ -3343,6 +3349,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
27951 +@@ -3272,6 +3276,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
27952 return kmalloc_dma_caches[index];
27953
27954 #endif
27955 @@ -77755,7 +78781,7 @@ index 321afab..9595170 100644
27956 return kmalloc_caches[index];
27957 }
27958
27959 -@@ -3411,6 +3424,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
27960 +@@ -3340,6 +3351,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
27961 EXPORT_SYMBOL(__kmalloc_node);
27962 #endif
27963
27964 @@ -77778,7 +78804,7 @@ index 321afab..9595170 100644
27965 + if (!PageSlab(page))
27966 + return false;
27967 +
27968 -+ s = page->slab;
27969 ++ s = page->slab_cache;
27970 + return s->flags & SLAB_USERCOPY;
27971 +}
27972 +
27973 @@ -77800,7 +78826,7 @@ index 321afab..9595170 100644
27974 + if (!PageSlab(page))
27975 + return NULL;
27976 +
27977 -+ s = page->slab;
27978 ++ s = page->slab_cache;
27979 + if (!(s->flags & SLAB_USERCOPY))
27980 + return s->name;
27981 +
27982 @@ -77815,16 +78841,7 @@ index 321afab..9595170 100644
27983 size_t ksize(const void *object)
27984 {
27985 struct page *page;
27986 -@@ -3685,7 +3751,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
27987 - int node;
27988 -
27989 - list_add(&s->list, &slab_caches);
27990 -- s->refcount = -1;
27991 -+ atomic_set(&s->refcount, -1);
27992 -
27993 - for_each_node_state(node, N_NORMAL_MEMORY) {
27994 - struct kmem_cache_node *n = get_node(s, node);
27995 -@@ -3808,17 +3874,17 @@ void __init kmem_cache_init(void)
27996 +@@ -3712,17 +3776,17 @@ void __init kmem_cache_init(void)
27997
27998 /* Caches that are not of the two-to-the-power-of size */
27999 if (KMALLOC_MIN_SIZE <= 32) {
28000 @@ -77845,7 +78862,7 @@ index 321afab..9595170 100644
28001 caches++;
28002 }
28003
28004 -@@ -3860,6 +3926,22 @@ void __init kmem_cache_init(void)
28005 +@@ -3764,6 +3828,22 @@ void __init kmem_cache_init(void)
28006 }
28007 }
28008 #endif
28009 @@ -77868,7 +78885,7 @@ index 321afab..9595170 100644
28010 printk(KERN_INFO
28011 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
28012 " CPUs=%d, Nodes=%d\n",
28013 -@@ -3886,7 +3968,7 @@ static int slab_unmergeable(struct kmem_cache *s)
28014 +@@ -3790,7 +3870,7 @@ static int slab_unmergeable(struct kmem_cache *s)
28015 /*
28016 * We may have set a slab to be unmergeable during bootstrap.
28017 */
28018 @@ -77877,16 +78894,16 @@ index 321afab..9595170 100644
28019 return 1;
28020
28021 return 0;
28022 -@@ -3940,7 +4022,7 @@ struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
28023 +@@ -3848,7 +3928,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
28024
28025 - s = find_mergeable(size, align, flags, name, ctor);
28026 + s = find_mergeable(memcg, size, align, flags, name, ctor);
28027 if (s) {
28028 - s->refcount++;
28029 + atomic_inc(&s->refcount);
28030 /*
28031 * Adjust the object sizes so that we clear
28032 * the complete object on kzalloc.
28033 -@@ -3949,7 +4031,7 @@ struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
28034 +@@ -3857,7 +3937,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
28035 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
28036
28037 if (sysfs_slab_alias(s, name)) {
28038 @@ -77895,7 +78912,16 @@ index 321afab..9595170 100644
28039 s = NULL;
28040 }
28041 }
28042 -@@ -4064,7 +4146,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
28043 +@@ -3919,7 +3999,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
28044 + return NOTIFY_OK;
28045 + }
28046 +
28047 +-static struct notifier_block __cpuinitdata slab_notifier = {
28048 ++static struct notifier_block slab_notifier = {
28049 + .notifier_call = slab_cpuup_callback
28050 + };
28051 +
28052 +@@ -3977,7 +4057,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
28053 }
28054 #endif
28055
28056 @@ -77904,7 +78930,7 @@ index 321afab..9595170 100644
28057 static int count_inuse(struct page *page)
28058 {
28059 return page->inuse;
28060 -@@ -4451,12 +4533,12 @@ static void resiliency_test(void)
28061 +@@ -4364,12 +4444,12 @@ static void resiliency_test(void)
28062 validate_slab_cache(kmalloc_caches[9]);
28063 }
28064 #else
28065 @@ -77919,7 +78945,7 @@ index 321afab..9595170 100644
28066 enum slab_stat_type {
28067 SL_ALL, /* All slabs */
28068 SL_PARTIAL, /* Only partially allocated slabs */
28069 -@@ -4700,7 +4782,7 @@ SLAB_ATTR_RO(ctor);
28070 +@@ -4613,7 +4693,7 @@ SLAB_ATTR_RO(ctor);
28071
28072 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
28073 {
28074 @@ -77928,7 +78954,7 @@ index 321afab..9595170 100644
28075 }
28076 SLAB_ATTR_RO(aliases);
28077
28078 -@@ -5262,6 +5344,7 @@ static char *create_unique_id(struct kmem_cache *s)
28079 +@@ -5266,6 +5346,7 @@ static char *create_unique_id(struct kmem_cache *s)
28080 return name;
28081 }
28082
28083 @@ -77936,7 +78962,7 @@ index 321afab..9595170 100644
28084 static int sysfs_slab_add(struct kmem_cache *s)
28085 {
28086 int err;
28087 -@@ -5324,6 +5407,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
28088 +@@ -5323,6 +5404,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
28089 kobject_del(&s->kobj);
28090 kobject_put(&s->kobj);
28091 }
28092 @@ -77944,7 +78970,7 @@ index 321afab..9595170 100644
28093
28094 /*
28095 * Need to buffer aliases during bootup until sysfs becomes
28096 -@@ -5337,6 +5421,7 @@ struct saved_alias {
28097 +@@ -5336,6 +5418,7 @@ struct saved_alias {
28098
28099 static struct saved_alias *alias_list;
28100
28101 @@ -77952,7 +78978,7 @@ index 321afab..9595170 100644
28102 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
28103 {
28104 struct saved_alias *al;
28105 -@@ -5359,6 +5444,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
28106 +@@ -5358,6 +5441,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
28107 alias_list = al;
28108 return 0;
28109 }
28110 @@ -77982,6 +79008,19 @@ index 1b7e22a..3fcd4f3 100644
28111 }
28112 return pgd;
28113 }
28114 +diff --git a/mm/sparse.c b/mm/sparse.c
28115 +index 6b5fb76..db0c190 100644
28116 +--- a/mm/sparse.c
28117 ++++ b/mm/sparse.c
28118 +@@ -782,7 +782,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
28119 +
28120 + for (i = 0; i < PAGES_PER_SECTION; i++) {
28121 + if (PageHWPoison(&memmap[i])) {
28122 +- atomic_long_sub(1, &mce_bad_pages);
28123 ++ atomic_long_sub_unchecked(1, &mce_bad_pages);
28124 + ClearPageHWPoison(&memmap[i]);
28125 + }
28126 + }
28127 diff --git a/mm/swap.c b/mm/swap.c
28128 index 6310dc2..3662b3f 100644
28129 --- a/mm/swap.c
28130 @@ -78004,7 +79043,7 @@ index 6310dc2..3662b3f 100644
28131 }
28132
28133 diff --git a/mm/swapfile.c b/mm/swapfile.c
28134 -index f91a255..9dcac21 100644
28135 +index e97a0e5..b50e796 100644
28136 --- a/mm/swapfile.c
28137 +++ b/mm/swapfile.c
28138 @@ -64,7 +64,7 @@ static DEFINE_MUTEX(swapon_mutex);
28139 @@ -78016,7 +79055,7 @@ index f91a255..9dcac21 100644
28140
28141 static inline unsigned char swap_count(unsigned char ent)
28142 {
28143 -@@ -1601,7 +1601,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
28144 +@@ -1608,7 +1608,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
28145 }
28146 filp_close(swap_file, NULL);
28147 err = 0;
28148 @@ -78025,7 +79064,7 @@ index f91a255..9dcac21 100644
28149 wake_up_interruptible(&proc_poll_wait);
28150
28151 out_dput:
28152 -@@ -1618,8 +1618,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
28153 +@@ -1625,8 +1625,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
28154
28155 poll_wait(file, &proc_poll_wait, wait);
28156
28157 @@ -78036,7 +79075,7 @@ index f91a255..9dcac21 100644
28158 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
28159 }
28160
28161 -@@ -1717,7 +1717,7 @@ static int swaps_open(struct inode *inode, struct file *file)
28162 +@@ -1724,7 +1724,7 @@ static int swaps_open(struct inode *inode, struct file *file)
28163 return ret;
28164
28165 seq = file->private_data;
28166 @@ -78045,7 +79084,7 @@ index f91a255..9dcac21 100644
28167 return 0;
28168 }
28169
28170 -@@ -2059,7 +2059,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
28171 +@@ -2066,7 +2066,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
28172 (frontswap_map) ? "FS" : "");
28173
28174 mutex_unlock(&swapon_mutex);
28175 @@ -78055,7 +79094,7 @@ index f91a255..9dcac21 100644
28176
28177 if (S_ISREG(inode->i_mode))
28178 diff --git a/mm/util.c b/mm/util.c
28179 -index dc3036c..b6c7c9d 100644
28180 +index c55e26b..3f913a9 100644
28181 --- a/mm/util.c
28182 +++ b/mm/util.c
28183 @@ -292,6 +292,12 @@ done:
28184 @@ -78072,7 +79111,7 @@ index dc3036c..b6c7c9d 100644
28185 mm->unmap_area = arch_unmap_area;
28186 }
28187 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
28188 -index 78e0830..bc6bbd8 100644
28189 +index 5123a16..f234a48 100644
28190 --- a/mm/vmalloc.c
28191 +++ b/mm/vmalloc.c
28192 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
28193 @@ -78270,7 +79309,7 @@ index 78e0830..bc6bbd8 100644
28194 if (v->nr_pages)
28195 seq_printf(m, " pages=%d", v->nr_pages);
28196 diff --git a/mm/vmstat.c b/mm/vmstat.c
28197 -index c737057..a49753a 100644
28198 +index 9800306..76b4b27 100644
28199 --- a/mm/vmstat.c
28200 +++ b/mm/vmstat.c
28201 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
28202 @@ -78311,7 +79350,16 @@ index c737057..a49753a 100644
28203 }
28204 }
28205 #endif
28206 -@@ -1224,10 +1224,20 @@ static int __init setup_vmstat(void)
28207 +@@ -1223,7 +1223,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
28208 + return NOTIFY_OK;
28209 + }
28210 +
28211 +-static struct notifier_block __cpuinitdata vmstat_notifier =
28212 ++static struct notifier_block vmstat_notifier =
28213 + { &vmstat_cpuup_callback, NULL, 0 };
28214 + #endif
28215 +
28216 +@@ -1238,10 +1238,20 @@ static int __init setup_vmstat(void)
28217 start_cpu_timer(cpu);
28218 #endif
28219 #ifdef CONFIG_PROC_FS
28220 @@ -78337,10 +79385,10 @@ index c737057..a49753a 100644
28221 return 0;
28222 }
28223 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
28224 -index ee07072..593e3fd 100644
28225 +index a292e80..785ee68 100644
28226 --- a/net/8021q/vlan.c
28227 +++ b/net/8021q/vlan.c
28228 -@@ -484,7 +484,7 @@ out:
28229 +@@ -485,7 +485,7 @@ out:
28230 return NOTIFY_DONE;
28231 }
28232
28233 @@ -78349,9 +79397,9 @@ index ee07072..593e3fd 100644
28234 .notifier_call = vlan_device_event,
28235 };
28236
28237 -@@ -559,8 +559,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
28238 +@@ -560,8 +560,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
28239 err = -EPERM;
28240 - if (!capable(CAP_NET_ADMIN))
28241 + if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
28242 break;
28243 - if ((args.u.name_type >= 0) &&
28244 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
28245 @@ -78465,44 +79513,44 @@ index 0447d5d..3cf4728 100644
28246 #undef __HANDLE_ITEM
28247 }
28248 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
28249 -index c6fcc76..1270d14 100644
28250 +index 7d02ebd..4d4cc01 100644
28251 --- a/net/batman-adv/bat_iv_ogm.c
28252 +++ b/net/batman-adv/bat_iv_ogm.c
28253 -@@ -62,7 +62,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
28254 +@@ -63,7 +63,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
28255
28256 /* randomize initial seqno to avoid collision */
28257 get_random_bytes(&random_seqno, sizeof(random_seqno));
28258 -- atomic_set(&hard_iface->seqno, random_seqno);
28259 -+ atomic_set_unchecked(&hard_iface->seqno, random_seqno);
28260 +- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
28261 ++ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
28262
28263 - hard_iface->packet_len = BATADV_OGM_HLEN;
28264 - hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
28265 -@@ -608,9 +608,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
28266 - batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
28267 + hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
28268 + ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
28269 +@@ -615,9 +615,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
28270 + batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff);
28271
28272 /* change sequence number to network order */
28273 -- seqno = (uint32_t)atomic_read(&hard_iface->seqno);
28274 -+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->seqno);
28275 +- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
28276 ++ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
28277 batadv_ogm_packet->seqno = htonl(seqno);
28278 -- atomic_inc(&hard_iface->seqno);
28279 -+ atomic_inc_unchecked(&hard_iface->seqno);
28280 +- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
28281 ++ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
28282
28283 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
28284 batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
28285 -@@ -1015,7 +1015,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
28286 +@@ -1022,7 +1022,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
28287 return;
28288
28289 /* could be changed by schedule_own_packet() */
28290 -- if_incoming_seqno = atomic_read(&if_incoming->seqno);
28291 -+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
28292 +- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
28293 ++ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
28294
28295 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
28296 has_directlink_flag = 1;
28297 diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
28298 -index d112fd6..686a447 100644
28299 +index f1d37cd..4190879 100644
28300 --- a/net/batman-adv/hard-interface.c
28301 +++ b/net/batman-adv/hard-interface.c
28302 -@@ -327,7 +327,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
28303 +@@ -370,7 +370,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
28304 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
28305 dev_add_pack(&hard_iface->batman_adv_ptype);
28306
28307 @@ -78511,20 +79559,20 @@ index d112fd6..686a447 100644
28308 batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
28309 hard_iface->net_dev->name);
28310
28311 -@@ -450,7 +450,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
28312 +@@ -493,7 +493,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
28313 /* This can't be called via a bat_priv callback because
28314 * we have no bat_priv yet.
28315 */
28316 -- atomic_set(&hard_iface->seqno, 1);
28317 -+ atomic_set_unchecked(&hard_iface->seqno, 1);
28318 - hard_iface->packet_buff = NULL;
28319 +- atomic_set(&hard_iface->bat_iv.ogm_seqno, 1);
28320 ++ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, 1);
28321 + hard_iface->bat_iv.ogm_buff = NULL;
28322
28323 return hard_iface;
28324 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
28325 -index ce0684a..4a0cbf1 100644
28326 +index 6b548fd..fc32c8d 100644
28327 --- a/net/batman-adv/soft-interface.c
28328 +++ b/net/batman-adv/soft-interface.c
28329 -@@ -234,7 +234,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
28330 +@@ -252,7 +252,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
28331 primary_if->net_dev->dev_addr, ETH_ALEN);
28332
28333 /* set broadcast sequence number */
28334 @@ -78532,8 +79580,8 @@ index ce0684a..4a0cbf1 100644
28335 + seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
28336 bcast_packet->seqno = htonl(seqno);
28337
28338 - batadv_add_bcast_packet_to_list(bat_priv, skb, 1);
28339 -@@ -427,7 +427,7 @@ struct net_device *batadv_softif_create(const char *name)
28340 + batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
28341 +@@ -497,7 +497,7 @@ struct net_device *batadv_softif_create(const char *name)
28342 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
28343
28344 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
28345 @@ -78543,21 +79591,28 @@ index ce0684a..4a0cbf1 100644
28346 atomic_set(&bat_priv->tt.local_changes, 0);
28347 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
28348 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
28349 -index ac1e07a..4c846e2 100644
28350 +index ae9ac9a..11e0fe7 100644
28351 --- a/net/batman-adv/types.h
28352 +++ b/net/batman-adv/types.h
28353 -@@ -33,8 +33,8 @@ struct batadv_hard_iface {
28354 +@@ -48,7 +48,7 @@
28355 + struct batadv_hard_iface_bat_iv {
28356 + unsigned char *ogm_buff;
28357 + int ogm_buff_len;
28358 +- atomic_t ogm_seqno;
28359 ++ atomic_unchecked_t ogm_seqno;
28360 + };
28361 +
28362 + struct batadv_hard_iface {
28363 +@@ -56,7 +56,7 @@ struct batadv_hard_iface {
28364 int16_t if_num;
28365 char if_status;
28366 struct net_device *net_dev;
28367 -- atomic_t seqno;
28368 - atomic_t frag_seqno;
28369 -+ atomic_unchecked_t seqno;
28370 + atomic_unchecked_t frag_seqno;
28371 - unsigned char *packet_buff;
28372 - int packet_len;
28373 struct kobject *hardif_obj;
28374 -@@ -244,7 +244,7 @@ struct batadv_priv {
28375 + atomic_t refcount;
28376 + struct packet_type batman_adv_ptype;
28377 +@@ -284,7 +284,7 @@ struct batadv_priv {
28378 atomic_t orig_interval; /* uint */
28379 atomic_t hop_penalty; /* uint */
28380 atomic_t log_level; /* uint */
28381 @@ -78567,7 +79622,7 @@ index ac1e07a..4c846e2 100644
28382 atomic_t batman_queue_left;
28383 char num_ifaces;
28384 diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
28385 -index f397232..3206a33 100644
28386 +index 10aff49..ea8e021 100644
28387 --- a/net/batman-adv/unicast.c
28388 +++ b/net/batman-adv/unicast.c
28389 @@ -272,7 +272,7 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
28390 @@ -78593,10 +79648,10 @@ index 07f0739..3c42e34 100644
28391 err = -EFAULT;
28392 break;
28393 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
28394 -index a91239d..d7ed533 100644
28395 +index 22e6583..426e2f3 100644
28396 --- a/net/bluetooth/l2cap_core.c
28397 +++ b/net/bluetooth/l2cap_core.c
28398 -@@ -3183,8 +3183,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
28399 +@@ -3400,8 +3400,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
28400 break;
28401
28402 case L2CAP_CONF_RFC:
28403 @@ -78608,12 +79663,12 @@ index a91239d..d7ed533 100644
28404 + memcpy(&rfc, (void *)val, olen);
28405
28406 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
28407 - rfc.mode != chan->mode)
28408 + rfc.mode != chan->mode)
28409 diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
28410 -index 083f2bf..799f9448 100644
28411 +index 1bcfb84..dad9f98 100644
28412 --- a/net/bluetooth/l2cap_sock.c
28413 +++ b/net/bluetooth/l2cap_sock.c
28414 -@@ -471,7 +471,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
28415 +@@ -479,7 +479,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
28416 struct sock *sk = sock->sk;
28417 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
28418 struct l2cap_options opts;
28419 @@ -78623,7 +79678,7 @@ index 083f2bf..799f9448 100644
28420 u32 opt;
28421
28422 BT_DBG("sk %p", sk);
28423 -@@ -493,7 +494,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
28424 +@@ -501,7 +502,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
28425 opts.max_tx = chan->max_tx;
28426 opts.txwin_size = chan->tx_win;
28427
28428 @@ -78632,7 +79687,7 @@ index 083f2bf..799f9448 100644
28429 if (copy_from_user((char *) &opts, optval, len)) {
28430 err = -EFAULT;
28431 break;
28432 -@@ -571,7 +572,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
28433 +@@ -581,7 +582,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
28434 struct bt_security sec;
28435 struct bt_power pwr;
28436 struct l2cap_conn *conn;
28437 @@ -78642,7 +79697,7 @@ index 083f2bf..799f9448 100644
28438 u32 opt;
28439
28440 BT_DBG("sk %p", sk);
28441 -@@ -594,7 +596,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
28442 +@@ -604,7 +606,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
28443
28444 sec.level = BT_SECURITY_LOW;
28445
28446 @@ -78651,7 +79706,7 @@ index 083f2bf..799f9448 100644
28447 if (copy_from_user((char *) &sec, optval, len)) {
28448 err = -EFAULT;
28449 break;
28450 -@@ -691,7 +693,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
28451 +@@ -701,7 +703,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
28452
28453 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
28454
28455 @@ -78661,7 +79716,7 @@ index 083f2bf..799f9448 100644
28456 err = -EFAULT;
28457 break;
28458 diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
28459 -index 868a909..d044bc3 100644
28460 +index ce3f665..2c7d08f 100644
28461 --- a/net/bluetooth/rfcomm/sock.c
28462 +++ b/net/bluetooth/rfcomm/sock.c
28463 @@ -667,7 +667,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
28464 @@ -78683,7 +79738,7 @@ index 868a909..d044bc3 100644
28465 err = -EFAULT;
28466 break;
28467 diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
28468 -index ccc2487..921073d 100644
28469 +index bd6fd0f..6492cba 100644
28470 --- a/net/bluetooth/rfcomm/tty.c
28471 +++ b/net/bluetooth/rfcomm/tty.c
28472 @@ -309,7 +309,7 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
28473 @@ -78698,9 +79753,9 @@ index ccc2487..921073d 100644
28474 @@ -664,10 +664,10 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
28475 return -ENODEV;
28476
28477 - BT_DBG("dev %p dst %s channel %d opened %d", dev, batostr(&dev->dst),
28478 -- dev->channel, dev->port.count);
28479 -+ dev->channel, atomic_read(&dev->port.count));
28480 + BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
28481 +- dev->channel, dev->port.count);
28482 ++ dev->channel, atomic_read(&dev->port.count));
28483
28484 spin_lock_irqsave(&dev->port.lock, flags);
28485 - if (++dev->port.count > 1) {
28486 @@ -78753,7 +79808,7 @@ index 5fe2ff3..121d696 100644
28487 break;
28488 }
28489 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
28490 -index 44f270f..1f5602d 100644
28491 +index a376ec1..1fbd6be 100644
28492 --- a/net/caif/cfctrl.c
28493 +++ b/net/caif/cfctrl.c
28494 @@ -10,6 +10,7 @@
28495 @@ -78809,7 +79864,7 @@ index ddac1ee..3ee0a78 100644
28496 };
28497
28498 diff --git a/net/can/gw.c b/net/can/gw.c
28499 -index 1f5c978..ef714c7 100644
28500 +index 574dda78e..3d2b3da 100644
28501 --- a/net/can/gw.c
28502 +++ b/net/can/gw.c
28503 @@ -67,7 +67,6 @@ MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@××××××××××.de>");
28504 @@ -78820,7 +79875,7 @@ index 1f5c978..ef714c7 100644
28505
28506 static struct kmem_cache *cgw_cache __read_mostly;
28507
28508 -@@ -887,6 +886,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
28509 +@@ -893,6 +892,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
28510 return err;
28511 }
28512
28513 @@ -78831,7 +79886,7 @@ index 1f5c978..ef714c7 100644
28514 static __init int cgw_module_init(void)
28515 {
28516 printk(banner);
28517 -@@ -898,7 +901,6 @@ static __init int cgw_module_init(void)
28518 +@@ -904,7 +907,6 @@ static __init int cgw_module_init(void)
28519 return -ENOMEM;
28520
28521 /* set notifier */
28522 @@ -78980,18 +80035,9 @@ index 79ae884..17c5c09 100644
28523 a0 = a[0];
28524 a1 = a[1];
28525 diff --git a/net/core/datagram.c b/net/core/datagram.c
28526 -index 0337e2b..f82d4a3 100644
28527 +index 368f9c3..f82d4a3 100644
28528 --- a/net/core/datagram.c
28529 +++ b/net/core/datagram.c
28530 -@@ -187,7 +187,7 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
28531 - skb_queue_walk(queue, skb) {
28532 - *peeked = skb->peeked;
28533 - if (flags & MSG_PEEK) {
28534 -- if (*off >= skb->len) {
28535 -+ if (*off >= skb->len && skb->len) {
28536 - *off -= skb->len;
28537 - continue;
28538 - }
28539 @@ -289,7 +289,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
28540 }
28541
28542 @@ -79002,10 +80048,10 @@ index 0337e2b..f82d4a3 100644
28543
28544 return err;
28545 diff --git a/net/core/dev.c b/net/core/dev.c
28546 -index 3470794..eb5008c 100644
28547 +index f64e439..8f959e6 100644
28548 --- a/net/core/dev.c
28549 +++ b/net/core/dev.c
28550 -@@ -1162,9 +1162,13 @@ void dev_load(struct net *net, const char *name)
28551 +@@ -1250,9 +1250,13 @@ void dev_load(struct net *net, const char *name)
28552 if (no_module && capable(CAP_NET_ADMIN))
28553 no_module = request_module("netdev-%s", name);
28554 if (no_module && capable(CAP_SYS_MODULE)) {
28555 @@ -79019,7 +80065,7 @@ index 3470794..eb5008c 100644
28556 }
28557 }
28558 EXPORT_SYMBOL(dev_load);
28559 -@@ -1627,7 +1631,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
28560 +@@ -1715,7 +1719,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
28561 {
28562 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
28563 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
28564 @@ -79028,7 +80074,7 @@ index 3470794..eb5008c 100644
28565 kfree_skb(skb);
28566 return NET_RX_DROP;
28567 }
28568 -@@ -1637,7 +1641,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
28569 +@@ -1725,7 +1729,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
28570 nf_reset(skb);
28571
28572 if (unlikely(!is_skb_forwardable(dev, skb))) {
28573 @@ -79037,7 +80083,7 @@ index 3470794..eb5008c 100644
28574 kfree_skb(skb);
28575 return NET_RX_DROP;
28576 }
28577 -@@ -2093,7 +2097,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
28578 +@@ -2180,7 +2184,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
28579
28580 struct dev_gso_cb {
28581 void (*destructor)(struct sk_buff *skb);
28582 @@ -79046,7 +80092,7 @@ index 3470794..eb5008c 100644
28583
28584 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
28585
28586 -@@ -2955,7 +2959,7 @@ enqueue:
28587 +@@ -3053,7 +3057,7 @@ enqueue:
28588
28589 local_irq_restore(flags);
28590
28591 @@ -79055,7 +80101,7 @@ index 3470794..eb5008c 100644
28592 kfree_skb(skb);
28593 return NET_RX_DROP;
28594 }
28595 -@@ -3027,7 +3031,7 @@ int netif_rx_ni(struct sk_buff *skb)
28596 +@@ -3125,7 +3129,7 @@ int netif_rx_ni(struct sk_buff *skb)
28597 }
28598 EXPORT_SYMBOL(netif_rx_ni);
28599
28600 @@ -79064,7 +80110,7 @@ index 3470794..eb5008c 100644
28601 {
28602 struct softnet_data *sd = &__get_cpu_var(softnet_data);
28603
28604 -@@ -3358,7 +3362,7 @@ ncls:
28605 +@@ -3456,7 +3460,7 @@ ncls:
28606 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
28607 } else {
28608 drop:
28609 @@ -79073,7 +80119,7 @@ index 3470794..eb5008c 100644
28610 kfree_skb(skb);
28611 /* Jamal, now you will not able to escape explaining
28612 * me how you were going to use this. :-)
28613 -@@ -3944,7 +3948,7 @@ void netif_napi_del(struct napi_struct *napi)
28614 +@@ -4039,7 +4043,7 @@ void netif_napi_del(struct napi_struct *napi)
28615 }
28616 EXPORT_SYMBOL(netif_napi_del);
28617
28618 @@ -79082,7 +80128,7 @@ index 3470794..eb5008c 100644
28619 {
28620 struct softnet_data *sd = &__get_cpu_var(softnet_data);
28621 unsigned long time_limit = jiffies + 2;
28622 -@@ -4423,8 +4427,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
28623 +@@ -4523,8 +4527,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
28624 else
28625 seq_printf(seq, "%04x", ntohs(pt->type));
28626
28627 @@ -79096,7 +80142,7 @@ index 3470794..eb5008c 100644
28628 }
28629
28630 return 0;
28631 -@@ -5987,7 +5996,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
28632 +@@ -6096,7 +6105,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
28633 } else {
28634 netdev_stats_to_stats64(storage, &dev->stats);
28635 }
28636 @@ -79106,7 +80152,7 @@ index 3470794..eb5008c 100644
28637 }
28638 EXPORT_SYMBOL(dev_get_stats);
28639 diff --git a/net/core/flow.c b/net/core/flow.c
28640 -index e318c7e..168b1d0 100644
28641 +index b0901ee..7d3c2ca 100644
28642 --- a/net/core/flow.c
28643 +++ b/net/core/flow.c
28644 @@ -61,7 +61,7 @@ struct flow_cache {
28645 @@ -79168,7 +80214,7 @@ index 7e7aeb0..2a998cb 100644
28646
28647 m->msg_iov = iov;
28648 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
28649 -index fad649a..df5891e 100644
28650 +index 1868625..5f4de62 100644
28651 --- a/net/core/rtnetlink.c
28652 +++ b/net/core/rtnetlink.c
28653 @@ -58,7 +58,7 @@ struct rtnl_link {
28654 @@ -79181,10 +80227,10 @@ index fad649a..df5891e 100644
28655 static DEFINE_MUTEX(rtnl_mutex);
28656
28657 diff --git a/net/core/scm.c b/net/core/scm.c
28658 -index ab57084..0190c8f 100644
28659 +index 905dcc6..14ee2d6 100644
28660 --- a/net/core/scm.c
28661 +++ b/net/core/scm.c
28662 -@@ -223,7 +223,7 @@ EXPORT_SYMBOL(__scm_send);
28663 +@@ -224,7 +224,7 @@ EXPORT_SYMBOL(__scm_send);
28664 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
28665 {
28666 struct cmsghdr __user *cm
28667 @@ -79193,7 +80239,7 @@ index ab57084..0190c8f 100644
28668 struct cmsghdr cmhdr;
28669 int cmlen = CMSG_LEN(len);
28670 int err;
28671 -@@ -246,7 +246,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
28672 +@@ -247,7 +247,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
28673 err = -EFAULT;
28674 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
28675 goto out;
28676 @@ -79202,7 +80248,7 @@ index ab57084..0190c8f 100644
28677 goto out;
28678 cmlen = CMSG_SPACE(len);
28679 if (msg->msg_controllen < cmlen)
28680 -@@ -262,7 +262,7 @@ EXPORT_SYMBOL(put_cmsg);
28681 +@@ -263,7 +263,7 @@ EXPORT_SYMBOL(put_cmsg);
28682 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
28683 {
28684 struct cmsghdr __user *cm
28685 @@ -79211,7 +80257,7 @@ index ab57084..0190c8f 100644
28686
28687 int fdmax = 0;
28688 int fdnum = scm->fp->count;
28689 -@@ -282,7 +282,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
28690 +@@ -283,7 +283,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
28691 if (fdnum < fdmax)
28692 fdmax = fdnum;
28693
28694 @@ -79221,7 +80267,7 @@ index ab57084..0190c8f 100644
28695 {
28696 struct socket *sock;
28697 diff --git a/net/core/sock.c b/net/core/sock.c
28698 -index 8a146cf..ee08914d 100644
28699 +index bc131d4..029e378 100644
28700 --- a/net/core/sock.c
28701 +++ b/net/core/sock.c
28702 @@ -388,7 +388,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
28703 @@ -79269,7 +80315,7 @@ index 8a146cf..ee08914d 100644
28704 goto discard_and_relse;
28705 }
28706
28707 -@@ -875,12 +875,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
28708 +@@ -930,12 +930,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
28709 struct timeval tm;
28710 } v;
28711
28712 @@ -79285,7 +80331,7 @@ index 8a146cf..ee08914d 100644
28713 return -EINVAL;
28714
28715 memset(&v, 0, sizeof(v));
28716 -@@ -1028,11 +1028,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
28717 +@@ -1083,11 +1083,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
28718
28719 case SO_PEERNAME:
28720 {
28721 @@ -79299,7 +80345,7 @@ index 8a146cf..ee08914d 100644
28722 return -EINVAL;
28723 if (copy_to_user(optval, address, len))
28724 return -EFAULT;
28725 -@@ -1080,7 +1080,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
28726 +@@ -1146,7 +1146,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
28727
28728 if (len > lv)
28729 len = lv;
28730 @@ -79308,7 +80354,7 @@ index 8a146cf..ee08914d 100644
28731 return -EFAULT;
28732 lenout:
28733 if (put_user(len, optlen))
28734 -@@ -2212,7 +2212,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
28735 +@@ -2276,7 +2276,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
28736 */
28737 smp_wmb();
28738 atomic_set(&sk->sk_refcnt, 1);
28739 @@ -79318,7 +80364,7 @@ index 8a146cf..ee08914d 100644
28740 EXPORT_SYMBOL(sock_init_data);
28741
28742 diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
28743 -index 602cd63..05c6c60 100644
28744 +index 602cd63..0a699b1 100644
28745 --- a/net/core/sock_diag.c
28746 +++ b/net/core/sock_diag.c
28747 @@ -15,20 +15,27 @@ static DEFINE_MUTEX(sock_diag_table_mutex);
28748 @@ -79349,6 +80395,51 @@ index 602cd63..05c6c60 100644
28749 }
28750 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
28751
28752 +@@ -97,21 +104,6 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
28753 + }
28754 + EXPORT_SYMBOL_GPL(sock_diag_unregister);
28755 +
28756 +-static const inline struct sock_diag_handler *sock_diag_lock_handler(int family)
28757 +-{
28758 +- if (sock_diag_handlers[family] == NULL)
28759 +- request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
28760 +- NETLINK_SOCK_DIAG, family);
28761 +-
28762 +- mutex_lock(&sock_diag_table_mutex);
28763 +- return sock_diag_handlers[family];
28764 +-}
28765 +-
28766 +-static inline void sock_diag_unlock_handler(const struct sock_diag_handler *h)
28767 +-{
28768 +- mutex_unlock(&sock_diag_table_mutex);
28769 +-}
28770 +-
28771 + static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
28772 + {
28773 + int err;
28774 +@@ -121,12 +113,20 @@ static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
28775 + if (nlmsg_len(nlh) < sizeof(*req))
28776 + return -EINVAL;
28777 +
28778 +- hndl = sock_diag_lock_handler(req->sdiag_family);
28779 ++ if (req->sdiag_family >= AF_MAX)
28780 ++ return -EINVAL;
28781 ++
28782 ++ if (sock_diag_handlers[req->sdiag_family] == NULL)
28783 ++ request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
28784 ++ NETLINK_SOCK_DIAG, req->sdiag_family);
28785 ++
28786 ++ mutex_lock(&sock_diag_table_mutex);
28787 ++ hndl = sock_diag_handlers[req->sdiag_family];
28788 + if (hndl == NULL)
28789 + err = -ENOENT;
28790 + else
28791 + err = hndl->dump(skb, nlh);
28792 +- sock_diag_unlock_handler(hndl);
28793 ++ mutex_unlock(&sock_diag_table_mutex);
28794 +
28795 + return err;
28796 + }
28797 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
28798 index a55eecc..dd8428c 100644
28799 --- a/net/decnet/sysctl_net_decnet.c
28800 @@ -79371,24 +80462,34 @@ index a55eecc..dd8428c 100644
28801 return -EFAULT;
28802
28803 *lenp = len;
28804 -diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
28805 -index 2a6abc1..c379ba7 100644
28806 ---- a/net/ipv4/devinet.c
28807 -+++ b/net/ipv4/devinet.c
28808 -@@ -822,9 +822,9 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
28809 - if (!ifa) {
28810 - ret = -ENOBUFS;
28811 - ifa = inet_alloc_ifa();
28812 -+ if (!ifa)
28813 -+ break;
28814 - INIT_HLIST_NODE(&ifa->hash);
28815 -- if (!ifa)
28816 -- break;
28817 - if (colon)
28818 - memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ);
28819 - else
28820 +diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
28821 +index a69b4e4..dbccba5 100644
28822 +--- a/net/ipv4/ah4.c
28823 ++++ b/net/ipv4/ah4.c
28824 +@@ -421,7 +421,7 @@ static void ah4_err(struct sk_buff *skb, u32 info)
28825 + return;
28826 +
28827 + if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
28828 +- atomic_inc(&flow_cache_genid);
28829 ++ atomic_inc_unchecked(&flow_cache_genid);
28830 + rt_genid_bump(net);
28831 +
28832 + ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
28833 +diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
28834 +index 3b4f0cd..8cb864c 100644
28835 +--- a/net/ipv4/esp4.c
28836 ++++ b/net/ipv4/esp4.c
28837 +@@ -503,7 +503,7 @@ static void esp4_err(struct sk_buff *skb, u32 info)
28838 + return;
28839 +
28840 + if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
28841 +- atomic_inc(&flow_cache_genid);
28842 ++ atomic_inc_unchecked(&flow_cache_genid);
28843 + rt_genid_bump(net);
28844 +
28845 + ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
28846 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
28847 -index 825c608..750ff29 100644
28848 +index 5cd75e2..f57ef39 100644
28849 --- a/net/ipv4/fib_frontend.c
28850 +++ b/net/ipv4/fib_frontend.c
28851 @@ -1020,12 +1020,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
28852 @@ -79416,7 +80517,7 @@ index 825c608..750ff29 100644
28853 break;
28854 case NETDEV_DOWN:
28855 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
28856 -index 71b125c..f4c70b0 100644
28857 +index 4797a80..2bd54e9 100644
28858 --- a/net/ipv4/fib_semantics.c
28859 +++ b/net/ipv4/fib_semantics.c
28860 @@ -767,7 +767,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
28861 @@ -79429,7 +80530,7 @@ index 71b125c..f4c70b0 100644
28862 return nh->nh_saddr;
28863 }
28864 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
28865 -index 7880af9..70f92a3 100644
28866 +index fa3ae81..0dbe6b8 100644
28867 --- a/net/ipv4/inet_hashtables.c
28868 +++ b/net/ipv4/inet_hashtables.c
28869 @@ -18,12 +18,15 @@
28870 @@ -79448,7 +80549,7 @@ index 7880af9..70f92a3 100644
28871 /*
28872 * Allocate and initialize a new local port bind bucket.
28873 * The bindhash mutex for snum's hash chain must be held here.
28874 -@@ -530,6 +533,8 @@ ok:
28875 +@@ -540,6 +543,8 @@ ok:
28876 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
28877 spin_unlock(&head->lock);
28878
28879 @@ -79473,7 +80574,7 @@ index 000e3d2..5472da3 100644
28880 secure_ip_id(daddr->addr.a4) :
28881 secure_ipv6_id(daddr->addr.a6));
28882 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
28883 -index 8d5cc75..821fd11 100644
28884 +index eb9d63a..50babc1 100644
28885 --- a/net/ipv4/ip_fragment.c
28886 +++ b/net/ipv4/ip_fragment.c
28887 @@ -322,7 +322,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
28888 @@ -79486,10 +80587,10 @@ index 8d5cc75..821fd11 100644
28889
28890 rc = qp->q.fragments && (end - start) > max;
28891 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
28892 -index e95d72b..5268ac0 100644
28893 +index d9c4f11..02b82db 100644
28894 --- a/net/ipv4/ip_sockglue.c
28895 +++ b/net/ipv4/ip_sockglue.c
28896 -@@ -1151,7 +1151,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
28897 +@@ -1152,7 +1152,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
28898 len = min_t(unsigned int, len, opt->optlen);
28899 if (put_user(len, optlen))
28900 return -EFAULT;
28901 @@ -79499,7 +80600,7 @@ index e95d72b..5268ac0 100644
28902 return -EFAULT;
28903 return 0;
28904 }
28905 -@@ -1282,7 +1283,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
28906 +@@ -1283,7 +1284,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
28907 if (sk->sk_type != SOCK_STREAM)
28908 return -ENOPROTOOPT;
28909
28910 @@ -79508,11 +80609,24 @@ index e95d72b..5268ac0 100644
28911 msg.msg_controllen = len;
28912 msg.msg_flags = flags;
28913
28914 +diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
28915 +index 9a46dae..5f793a0 100644
28916 +--- a/net/ipv4/ipcomp.c
28917 ++++ b/net/ipv4/ipcomp.c
28918 +@@ -48,7 +48,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
28919 + return;
28920 +
28921 + if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
28922 +- atomic_inc(&flow_cache_genid);
28923 ++ atomic_inc_unchecked(&flow_cache_genid);
28924 + rt_genid_bump(net);
28925 +
28926 + ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
28927 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
28928 -index 798358b..73570b7 100644
28929 +index a2e50ae..e152b7c 100644
28930 --- a/net/ipv4/ipconfig.c
28931 +++ b/net/ipv4/ipconfig.c
28932 -@@ -321,7 +321,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
28933 +@@ -323,7 +323,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
28934
28935 mm_segment_t oldfs = get_fs();
28936 set_fs(get_ds());
28937 @@ -79521,7 +80635,7 @@ index 798358b..73570b7 100644
28938 set_fs(oldfs);
28939 return res;
28940 }
28941 -@@ -332,7 +332,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
28942 +@@ -334,7 +334,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
28943
28944 mm_segment_t oldfs = get_fs();
28945 set_fs(get_ds());
28946 @@ -79530,7 +80644,7 @@ index 798358b..73570b7 100644
28947 set_fs(oldfs);
28948 return res;
28949 }
28950 -@@ -343,7 +343,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
28951 +@@ -345,7 +345,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
28952
28953 mm_segment_t oldfs = get_fs();
28954 set_fs(get_ds());
28955 @@ -79540,7 +80654,7 @@ index 798358b..73570b7 100644
28956 return res;
28957 }
28958 diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
28959 -index 97e61ea..cac1bbb 100644
28960 +index 3ea4127..849297b 100644
28961 --- a/net/ipv4/netfilter/arp_tables.c
28962 +++ b/net/ipv4/netfilter/arp_tables.c
28963 @@ -879,14 +879,14 @@ static int compat_table_info(const struct xt_table_info *info,
28964 @@ -79589,7 +80703,7 @@ index 97e61ea..cac1bbb 100644
28965
28966 case ARPT_SO_GET_ENTRIES:
28967 diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
28968 -index 170b1fd..6105b91 100644
28969 +index 17c5e06..1b91206 100644
28970 --- a/net/ipv4/netfilter/ip_tables.c
28971 +++ b/net/ipv4/netfilter/ip_tables.c
28972 @@ -1068,14 +1068,14 @@ static int compat_table_info(const struct xt_table_info *info,
28973 @@ -79706,10 +80820,10 @@ index 6f08991..55867ad 100644
28974
28975 static int raw_seq_show(struct seq_file *seq, void *v)
28976 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
28977 -index 0fdfe4c..e7ea542 100644
28978 +index a0fcc47..5949bba1 100644
28979 --- a/net/ipv4/route.c
28980 +++ b/net/ipv4/route.c
28981 -@@ -2579,7 +2579,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
28982 +@@ -2602,7 +2602,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
28983
28984 static __net_init int rt_genid_init(struct net *net)
28985 {
28986 @@ -79719,10 +80833,10 @@ index 0fdfe4c..e7ea542 100644
28987 sizeof(net->ipv4.dev_addr_genid));
28988 return 0;
28989 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
28990 -index beabc80..48a6a10 100644
28991 +index ad70a96..50cb55b 100644
28992 --- a/net/ipv4/tcp_input.c
28993 +++ b/net/ipv4/tcp_input.c
28994 -@@ -4709,7 +4709,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
28995 +@@ -4733,7 +4733,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
28996 * simplifies code)
28997 */
28998 static void
28999 @@ -79731,26 +80845,7 @@ index beabc80..48a6a10 100644
29000 struct sk_buff *head, struct sk_buff *tail,
29001 u32 start, u32 end)
29002 {
29003 -@@ -5541,6 +5541,9 @@ slow_path:
29004 - if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
29005 - goto csum_error;
29006 -
29007 -+ if (!th->ack)
29008 -+ goto discard;
29009 -+
29010 - /*
29011 - * Standard slow path.
29012 - */
29013 -@@ -5549,7 +5552,7 @@ slow_path:
29014 - return 0;
29015 -
29016 - step5:
29017 -- if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
29018 -+ if (tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
29019 - goto discard;
29020 -
29021 - /* ts_recent update must be made after we are sure that the packet
29022 -@@ -5840,6 +5843,7 @@ discard:
29023 +@@ -5850,6 +5850,7 @@ discard:
29024 tcp_paws_reject(&tp->rx_opt, 0))
29025 goto discard_and_undo;
29026
29027 @@ -79758,7 +80853,7 @@ index beabc80..48a6a10 100644
29028 if (th->syn) {
29029 /* We see SYN without ACK. It is attempt of
29030 * simultaneous connect with crossed SYNs.
29031 -@@ -5890,6 +5894,7 @@ discard:
29032 +@@ -5900,6 +5901,7 @@ discard:
29033 goto discard;
29034 #endif
29035 }
29036 @@ -79766,7 +80861,7 @@ index beabc80..48a6a10 100644
29037 /* "fifth, if neither of the SYN or RST bits is set then
29038 * drop the segment and return."
29039 */
29040 -@@ -5934,7 +5939,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
29041 +@@ -5944,7 +5946,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
29042 goto discard;
29043
29044 if (th->syn) {
29045 @@ -79775,35 +80870,8 @@ index beabc80..48a6a10 100644
29046 goto discard;
29047 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
29048 return 1;
29049 -@@ -5981,11 +5986,15 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
29050 - if (tcp_check_req(sk, skb, req, NULL, true) == NULL)
29051 - goto discard;
29052 - }
29053 -+
29054 -+ if (!th->ack)
29055 -+ goto discard;
29056 -+
29057 - if (!tcp_validate_incoming(sk, skb, th, 0))
29058 - return 0;
29059 -
29060 - /* step 5: check the ACK field */
29061 -- if (th->ack) {
29062 -+ if (true) {
29063 - int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
29064 -
29065 - switch (sk->sk_state) {
29066 -@@ -6135,8 +6144,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
29067 - }
29068 - break;
29069 - }
29070 -- } else
29071 -- goto discard;
29072 -+ }
29073 -
29074 - /* ts_recent update must be made after we are sure that the packet
29075 - * is in window.
29076 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
29077 -index e637770..364ff02 100644
29078 +index eadb693..e8f7251 100644
29079 --- a/net/ipv4/tcp_ipv4.c
29080 +++ b/net/ipv4/tcp_ipv4.c
29081 @@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
29082 @@ -79817,7 +80885,7 @@ index e637770..364ff02 100644
29083 #ifdef CONFIG_TCP_MD5SIG
29084 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
29085 __be32 daddr, __be32 saddr, const struct tcphdr *th);
29086 -@@ -1898,6 +1902,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
29087 +@@ -1895,6 +1899,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
29088 return 0;
29089
29090 reset:
29091 @@ -79827,7 +80895,7 @@ index e637770..364ff02 100644
29092 tcp_v4_send_reset(rsk, skb);
29093 discard:
29094 kfree_skb(skb);
29095 -@@ -1998,12 +2005,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
29096 +@@ -1994,12 +2001,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
29097 TCP_SKB_CB(skb)->sacked = 0;
29098
29099 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
29100 @@ -79850,7 +80918,7 @@ index e637770..364ff02 100644
29101
29102 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
29103 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
29104 -@@ -2054,6 +2068,10 @@ no_tcp_socket:
29105 +@@ -2050,6 +2064,10 @@ no_tcp_socket:
29106 bad_packet:
29107 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
29108 } else {
29109 @@ -79862,7 +80930,7 @@ index e637770..364ff02 100644
29110 }
29111
29112 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
29113 -index a7302d9..e3ec754 100644
29114 +index f35f2df..ccb5ca6 100644
29115 --- a/net/ipv4/tcp_minisocks.c
29116 +++ b/net/ipv4/tcp_minisocks.c
29117 @@ -27,6 +27,10 @@
29118 @@ -79902,7 +80970,7 @@ index 4526fe6..1a34e43 100644
29119 cnt += width;
29120 }
29121 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
29122 -index d47c1b4..b0584de 100644
29123 +index b78aac3..e18230b 100644
29124 --- a/net/ipv4/tcp_timer.c
29125 +++ b/net/ipv4/tcp_timer.c
29126 @@ -22,6 +22,10 @@
29127 @@ -80049,10 +81117,10 @@ index 1f4d405..3524677 100644
29128
29129 int udp4_seq_show(struct seq_file *seq, void *v)
29130 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
29131 -index a468a36..b50ffde 100644
29132 +index 1b5d8cb..2e8c2d9 100644
29133 --- a/net/ipv6/addrconf.c
29134 +++ b/net/ipv6/addrconf.c
29135 -@@ -2121,7 +2121,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
29136 +@@ -2272,7 +2272,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
29137 p.iph.ihl = 5;
29138 p.iph.protocol = IPPROTO_IPV6;
29139 p.iph.ttl = 64;
29140 @@ -80062,10 +81130,10 @@ index a468a36..b50ffde 100644
29141 if (ops->ndo_do_ioctl) {
29142 mm_segment_t oldfs = get_fs();
29143 diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
29144 -index a23350c..899c62c 100644
29145 +index 131dd09..7647ada 100644
29146 --- a/net/ipv6/ip6_gre.c
29147 +++ b/net/ipv6/ip6_gre.c
29148 -@@ -1353,7 +1353,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
29149 +@@ -1337,7 +1337,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
29150 }
29151
29152
29153 @@ -80075,10 +81143,10 @@ index a23350c..899c62c 100644
29154 .err_handler = ip6gre_err,
29155 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
29156 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
29157 -index e02faed..9780f28 100644
29158 +index d1e2e8e..51c19ae 100644
29159 --- a/net/ipv6/ipv6_sockglue.c
29160 +++ b/net/ipv6/ipv6_sockglue.c
29161 -@@ -990,7 +990,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
29162 +@@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
29163 if (sk->sk_type != SOCK_STREAM)
29164 return -ENOPROTOOPT;
29165
29166 @@ -80088,10 +81156,10 @@ index e02faed..9780f28 100644
29167 msg.msg_flags = flags;
29168
29169 diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
29170 -index d7cb045..8c0ded6 100644
29171 +index 125a90d..2a11f36 100644
29172 --- a/net/ipv6/netfilter/ip6_tables.c
29173 +++ b/net/ipv6/netfilter/ip6_tables.c
29174 -@@ -1078,14 +1078,14 @@ static int compat_table_info(const struct xt_table_info *info,
29175 +@@ -1076,14 +1076,14 @@ static int compat_table_info(const struct xt_table_info *info,
29176 #endif
29177
29178 static int get_info(struct net *net, void __user *user,
29179 @@ -80109,7 +81177,7 @@ index d7cb045..8c0ded6 100644
29180 sizeof(struct ip6t_getinfo));
29181 return -EINVAL;
29182 }
29183 -@@ -1122,7 +1122,7 @@ static int get_info(struct net *net, void __user *user,
29184 +@@ -1120,7 +1120,7 @@ static int get_info(struct net *net, void __user *user,
29185 info.size = private->size;
29186 strcpy(info.name, name);
29187
29188 @@ -80118,7 +81186,7 @@ index d7cb045..8c0ded6 100644
29189 ret = -EFAULT;
29190 else
29191 ret = 0;
29192 -@@ -1976,7 +1976,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
29193 +@@ -1974,7 +1974,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
29194
29195 switch (cmd) {
29196 case IP6T_SO_GET_INFO:
29197 @@ -80127,7 +81195,7 @@ index d7cb045..8c0ded6 100644
29198 break;
29199 case IP6T_SO_GET_ENTRIES:
29200 ret = compat_get_entries(sock_net(sk), user, len);
29201 -@@ -2023,7 +2023,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
29202 +@@ -2021,7 +2021,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
29203
29204 switch (cmd) {
29205 case IP6T_SO_GET_INFO:
29206 @@ -80137,7 +81205,7 @@ index d7cb045..8c0ded6 100644
29207
29208 case IP6T_SO_GET_ENTRIES:
29209 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
29210 -index d8e95c7..81422bc 100644
29211 +index 70fa814..d70c28c 100644
29212 --- a/net/ipv6/raw.c
29213 +++ b/net/ipv6/raw.c
29214 @@ -379,7 +379,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
29215 @@ -80221,10 +81289,10 @@ index d8e95c7..81422bc 100644
29216
29217 static int raw6_seq_show(struct seq_file *seq, void *v)
29218 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
29219 -index 73f2a6b..f8049a1 100644
29220 +index 4f435371..5de9da7 100644
29221 --- a/net/ipv6/tcp_ipv6.c
29222 +++ b/net/ipv6/tcp_ipv6.c
29223 -@@ -106,6 +106,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
29224 +@@ -103,6 +103,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
29225 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
29226 }
29227
29228 @@ -80235,7 +81303,7 @@ index 73f2a6b..f8049a1 100644
29229 static void tcp_v6_hash(struct sock *sk)
29230 {
29231 if (sk->sk_state != TCP_CLOSE) {
29232 -@@ -1525,6 +1529,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
29233 +@@ -1433,6 +1437,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
29234 return 0;
29235
29236 reset:
29237 @@ -80245,7 +81313,7 @@ index 73f2a6b..f8049a1 100644
29238 tcp_v6_send_reset(sk, skb);
29239 discard:
29240 if (opt_skb)
29241 -@@ -1606,12 +1613,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
29242 +@@ -1514,12 +1521,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
29243 TCP_SKB_CB(skb)->sacked = 0;
29244
29245 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
29246 @@ -80268,7 +81336,7 @@ index 73f2a6b..f8049a1 100644
29247
29248 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
29249 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
29250 -@@ -1660,6 +1675,10 @@ no_tcp_socket:
29251 +@@ -1568,6 +1583,10 @@ no_tcp_socket:
29252 bad_packet:
29253 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
29254 } else {
29255 @@ -80280,7 +81348,7 @@ index 73f2a6b..f8049a1 100644
29256 }
29257
29258 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
29259 -index fc99972..69397e8 100644
29260 +index fb08329..2d6919e 100644
29261 --- a/net/ipv6/udp.c
29262 +++ b/net/ipv6/udp.c
29263 @@ -51,6 +51,10 @@
29264 @@ -80331,7 +81399,7 @@ index fc99972..69397e8 100644
29265 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
29266
29267 kfree_skb(skb);
29268 -@@ -1473,7 +1480,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
29269 +@@ -1379,7 +1386,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
29270 0,
29271 sock_i_ino(sp),
29272 atomic_read(&sp->sk_refcnt), sp,
29273 @@ -80341,10 +81409,10 @@ index fc99972..69397e8 100644
29274
29275 int udp6_seq_show(struct seq_file *seq, void *v)
29276 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
29277 -index 496ce2c..f79fac8 100644
29278 +index a68c88c..d55b0c5 100644
29279 --- a/net/irda/ircomm/ircomm_tty.c
29280 +++ b/net/irda/ircomm/ircomm_tty.c
29281 -@@ -311,12 +311,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
29282 +@@ -312,12 +312,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
29283 add_wait_queue(&port->open_wait, &wait);
29284
29285 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
29286 @@ -80359,7 +81427,7 @@ index 496ce2c..f79fac8 100644
29287 }
29288 spin_unlock_irqrestore(&port->lock, flags);
29289 port->blocked_open++;
29290 -@@ -352,7 +352,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
29291 +@@ -353,7 +353,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
29292 }
29293
29294 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
29295 @@ -80368,7 +81436,7 @@ index 496ce2c..f79fac8 100644
29296
29297 schedule();
29298 }
29299 -@@ -363,13 +363,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
29300 +@@ -364,13 +364,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
29301 if (extra_count) {
29302 /* ++ is not atomic, so this should be protected - Jean II */
29303 spin_lock_irqsave(&port->lock, flags);
29304 @@ -80384,7 +81452,7 @@ index 496ce2c..f79fac8 100644
29305
29306 if (!retval)
29307 port->flags |= ASYNC_NORMAL_ACTIVE;
29308 -@@ -443,12 +443,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
29309 +@@ -444,12 +444,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
29310
29311 /* ++ is not atomic, so this should be protected - Jean II */
29312 spin_lock_irqsave(&self->port.lock, flags);
29313 @@ -80399,7 +81467,7 @@ index 496ce2c..f79fac8 100644
29314
29315 /* Not really used by us, but lets do it anyway */
29316 tty->low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
29317 -@@ -985,7 +985,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
29318 +@@ -986,7 +986,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
29319 tty_kref_put(port->tty);
29320 }
29321 port->tty = NULL;
29322 @@ -80408,7 +81476,7 @@ index 496ce2c..f79fac8 100644
29323 spin_unlock_irqrestore(&port->lock, flags);
29324
29325 wake_up_interruptible(&port->open_wait);
29326 -@@ -1342,7 +1342,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
29327 +@@ -1343,7 +1343,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
29328 seq_putc(m, '\n');
29329
29330 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
29331 @@ -80434,8 +81502,21 @@ index cd6f7a9..e63fe89 100644
29332 }
29333
29334 write_unlock_bh(&iucv_sk_list.lock);
29335 +diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
29336 +index df08250..02021fe 100644
29337 +--- a/net/iucv/iucv.c
29338 ++++ b/net/iucv/iucv.c
29339 +@@ -690,7 +690,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
29340 + return NOTIFY_OK;
29341 + }
29342 +
29343 +-static struct notifier_block __refdata iucv_cpu_notifier = {
29344 ++static struct notifier_block iucv_cpu_notifier = {
29345 + .notifier_call = iucv_cpu_notify,
29346 + };
29347 +
29348 diff --git a/net/key/af_key.c b/net/key/af_key.c
29349 -index 08897a3..0b812ab 100644
29350 +index 5b426a6..970032b 100644
29351 --- a/net/key/af_key.c
29352 +++ b/net/key/af_key.c
29353 @@ -3019,10 +3019,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
29354 @@ -80452,10 +81533,19 @@ index 08897a3..0b812ab 100644
29355 return res;
29356 }
29357 diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
29358 -index 494da7f..6ce2ffd 100644
29359 +index 0479c64..d031db6 100644
29360 --- a/net/mac80211/cfg.c
29361 +++ b/net/mac80211/cfg.c
29362 -@@ -2604,7 +2604,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
29363 +@@ -790,7 +790,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
29364 + ret = ieee80211_vif_use_channel(sdata, chandef,
29365 + IEEE80211_CHANCTX_EXCLUSIVE);
29366 + }
29367 +- } else if (local->open_count == local->monitors) {
29368 ++ } else if (local_read(&local->open_count) == local->monitors) {
29369 + local->_oper_channel = chandef->chan;
29370 + local->_oper_channel_type = cfg80211_get_chandef_type(chandef);
29371 + ieee80211_hw_config(local, 0);
29372 +@@ -2716,7 +2716,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
29373 else
29374 local->probe_req_reg--;
29375
29376 @@ -80465,7 +81555,7 @@ index 494da7f..6ce2ffd 100644
29377
29378 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
29379 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
29380 -index 493e2e8..be76574 100644
29381 +index 2ed065c..948177f 100644
29382 --- a/net/mac80211/ieee80211_i.h
29383 +++ b/net/mac80211/ieee80211_i.h
29384 @@ -28,6 +28,7 @@
29385 @@ -80476,7 +81566,7 @@ index 493e2e8..be76574 100644
29386 #include "key.h"
29387 #include "sta_info.h"
29388 #include "debug.h"
29389 -@@ -852,7 +853,7 @@ struct ieee80211_local {
29390 +@@ -909,7 +910,7 @@ struct ieee80211_local {
29391 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
29392 spinlock_t queue_stop_reason_lock;
29393
29394 @@ -80486,10 +81576,10 @@ index 493e2e8..be76574 100644
29395 /* number of interfaces with corresponding FIF_ flags */
29396 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
29397 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
29398 -index 0f5af91..4dba9e7 100644
29399 +index 8be854e..ad72a69 100644
29400 --- a/net/mac80211/iface.c
29401 +++ b/net/mac80211/iface.c
29402 -@@ -465,7 +465,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
29403 +@@ -546,7 +546,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
29404 break;
29405 }
29406
29407 @@ -80498,7 +81588,7 @@ index 0f5af91..4dba9e7 100644
29408 res = drv_start(local);
29409 if (res)
29410 goto err_del_bss;
29411 -@@ -508,7 +508,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
29412 +@@ -591,7 +591,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
29413 break;
29414 }
29415
29416 @@ -80507,7 +81597,7 @@ index 0f5af91..4dba9e7 100644
29417 res = ieee80211_add_virtual_monitor(local);
29418 if (res)
29419 goto err_stop;
29420 -@@ -616,7 +616,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
29421 +@@ -699,7 +699,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
29422 mutex_unlock(&local->mtx);
29423
29424 if (coming_up)
29425 @@ -80516,7 +81606,7 @@ index 0f5af91..4dba9e7 100644
29426
29427 if (hw_reconf_flags)
29428 ieee80211_hw_config(local, hw_reconf_flags);
29429 -@@ -630,7 +630,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
29430 +@@ -713,7 +713,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
29431 err_del_interface:
29432 drv_remove_interface(local, sdata);
29433 err_stop:
29434 @@ -80525,7 +81615,7 @@ index 0f5af91..4dba9e7 100644
29435 drv_stop(local);
29436 err_del_bss:
29437 sdata->bss = NULL;
29438 -@@ -762,7 +762,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
29439 +@@ -827,7 +827,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
29440 }
29441
29442 if (going_down)
29443 @@ -80534,7 +81624,7 @@ index 0f5af91..4dba9e7 100644
29444
29445 switch (sdata->vif.type) {
29446 case NL80211_IFTYPE_AP_VLAN:
29447 -@@ -818,7 +818,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
29448 +@@ -884,7 +884,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
29449
29450 ieee80211_recalc_ps(local, -1);
29451
29452 @@ -80543,7 +81633,7 @@ index 0f5af91..4dba9e7 100644
29453 if (local->ops->napi_poll)
29454 napi_disable(&local->napi);
29455 ieee80211_clear_tx_pending(local);
29456 -@@ -850,7 +850,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
29457 +@@ -910,7 +910,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
29458 }
29459 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
29460
29461 @@ -80553,12 +81643,12 @@ index 0f5af91..4dba9e7 100644
29462 }
29463
29464 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
29465 -index f57f597..e0a7c03 100644
29466 +index 1b087ff..bf600e9 100644
29467 --- a/net/mac80211/main.c
29468 +++ b/net/mac80211/main.c
29469 -@@ -164,7 +164,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
29470 - local->hw.conf.power_level = power;
29471 - }
29472 +@@ -181,7 +181,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
29473 + changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
29474 + IEEE80211_CONF_CHANGE_POWER);
29475
29476 - if (changed && local->open_count) {
29477 + if (changed && local_read(&local->open_count)) {
29478 @@ -80566,19 +81656,19 @@ index f57f597..e0a7c03 100644
29479 /*
29480 * Goal:
29481 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
29482 -index 5c572e7..ecf75ce 100644
29483 +index 79a48f3..5e185c9 100644
29484 --- a/net/mac80211/pm.c
29485 +++ b/net/mac80211/pm.c
29486 -@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
29487 - struct ieee80211_sub_if_data *sdata;
29488 +@@ -35,7 +35,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
29489 struct sta_info *sta;
29490 + struct ieee80211_chanctx *ctx;
29491
29492 - if (!local->open_count)
29493 + if (!local_read(&local->open_count))
29494 goto suspend;
29495
29496 ieee80211_scan_cancel(local);
29497 -@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
29498 +@@ -73,7 +73,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
29499 cancel_work_sync(&local->dynamic_ps_enable_work);
29500 del_timer_sync(&local->dynamic_ps_timer);
29501
29502 @@ -80587,8 +81677,8 @@ index 5c572e7..ecf75ce 100644
29503 if (local->wowlan) {
29504 int err = drv_suspend(local, wowlan);
29505 if (err < 0) {
29506 -@@ -143,7 +143,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
29507 - drv_remove_interface(local, sdata);
29508 +@@ -187,7 +187,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
29509 + mutex_unlock(&local->chanctx_mtx);
29510
29511 /* stop hardware - this must stop RX */
29512 - if (local->open_count)
29513 @@ -80597,10 +81687,10 @@ index 5c572e7..ecf75ce 100644
29514
29515 suspend:
29516 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
29517 -index 3313c11..bec9f17 100644
29518 +index dd88381..eef4dd6 100644
29519 --- a/net/mac80211/rate.c
29520 +++ b/net/mac80211/rate.c
29521 -@@ -494,7 +494,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
29522 +@@ -493,7 +493,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
29523
29524 ASSERT_RTNL();
29525
29526 @@ -80623,10 +81713,10 @@ index c97a065..ff61928 100644
29527
29528 return p;
29529 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
29530 -index 0151ae3..26709d3 100644
29531 +index f11e8c5..08d0013 100644
29532 --- a/net/mac80211/util.c
29533 +++ b/net/mac80211/util.c
29534 -@@ -1332,7 +1332,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
29535 +@@ -1380,7 +1380,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
29536 }
29537 #endif
29538 /* everything else happens only if HW was up & running */
29539 @@ -80669,10 +81759,10 @@ index 3259697..54d5393 100644
29540 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
29541 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
29542 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
29543 -index 1548df9..98ad9b4 100644
29544 +index 30e764a..c3b6a9d 100644
29545 --- a/net/netfilter/ipvs/ip_vs_conn.c
29546 +++ b/net/netfilter/ipvs/ip_vs_conn.c
29547 -@@ -557,7 +557,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
29548 +@@ -554,7 +554,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
29549 /* Increase the refcnt counter of the dest */
29550 atomic_inc(&dest->refcnt);
29551
29552 @@ -80681,7 +81771,7 @@ index 1548df9..98ad9b4 100644
29553 if (cp->protocol != IPPROTO_UDP)
29554 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
29555 flags = cp->flags;
29556 -@@ -902,7 +902,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
29557 +@@ -899,7 +899,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
29558 atomic_set(&cp->refcnt, 1);
29559
29560 atomic_set(&cp->n_control, 0);
29561 @@ -80690,7 +81780,7 @@ index 1548df9..98ad9b4 100644
29562
29563 atomic_inc(&ipvs->conn_count);
29564 if (flags & IP_VS_CONN_F_NO_CPORT)
29565 -@@ -1183,7 +1183,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
29566 +@@ -1180,7 +1180,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
29567
29568 /* Don't drop the entry if its number of incoming packets is not
29569 located in [0, 8] */
29570 @@ -80700,11 +81790,11 @@ index 1548df9..98ad9b4 100644
29571
29572 if (!todrop_rate[i]) return 0;
29573 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
29574 -index 58918e2..4d177a9 100644
29575 +index 47edf5a..235b07d 100644
29576 --- a/net/netfilter/ipvs/ip_vs_core.c
29577 +++ b/net/netfilter/ipvs/ip_vs_core.c
29578 -@@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
29579 - ret = cp->packet_xmit(skb, cp, pd->pp);
29580 +@@ -559,7 +559,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
29581 + ret = cp->packet_xmit(skb, cp, pd->pp, iph);
29582 /* do not touch skb anymore */
29583
29584 - atomic_inc(&cp->in_pkts);
29585 @@ -80712,7 +81802,7 @@ index 58918e2..4d177a9 100644
29586 ip_vs_conn_put(cp);
29587 return ret;
29588 }
29589 -@@ -1681,7 +1681,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
29590 +@@ -1691,7 +1691,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
29591 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
29592 pkts = sysctl_sync_threshold(ipvs);
29593 else
29594 @@ -80722,7 +81812,7 @@ index 58918e2..4d177a9 100644
29595 if (ipvs->sync_state & IP_VS_STATE_MASTER)
29596 ip_vs_sync_conn(net, cp, pkts);
29597 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
29598 -index c4ee437..a774a74 100644
29599 +index ec664cb..cd576ab 100644
29600 --- a/net/netfilter/ipvs/ip_vs_ctl.c
29601 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
29602 @@ -787,7 +787,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
29603 @@ -80771,7 +81861,7 @@ index c4ee437..a774a74 100644
29604 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
29605 atomic_read(&dest->weight)) ||
29606 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
29607 -index effa10c..9058928 100644
29608 +index 44fd10c..2a163b3 100644
29609 --- a/net/netfilter/ipvs/ip_vs_sync.c
29610 +++ b/net/netfilter/ipvs/ip_vs_sync.c
29611 @@ -596,7 +596,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
29612 @@ -80802,10 +81892,10 @@ index effa10c..9058928 100644
29613 cp->old_state = cp->state;
29614 /*
29615 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
29616 -index cc4c809..50f8fe5 100644
29617 +index ee6b7a9..f9a89f6 100644
29618 --- a/net/netfilter/ipvs/ip_vs_xmit.c
29619 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
29620 -@@ -1202,7 +1202,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
29621 +@@ -1210,7 +1210,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
29622 else
29623 rc = NF_ACCEPT;
29624 /* do not touch skb anymore */
29625 @@ -80814,7 +81904,7 @@ index cc4c809..50f8fe5 100644
29626 goto out;
29627 }
29628
29629 -@@ -1323,7 +1323,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
29630 +@@ -1332,7 +1332,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
29631 else
29632 rc = NF_ACCEPT;
29633 /* do not touch skb anymore */
29634 @@ -80824,12 +81914,12 @@ index cc4c809..50f8fe5 100644
29635 }
29636
29637 diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
29638 -index ec02168..f0caab6 100644
29639 +index e4a0c4f..c263f28 100644
29640 --- a/net/netfilter/nf_conntrack_core.c
29641 +++ b/net/netfilter/nf_conntrack_core.c
29642 -@@ -1533,6 +1533,10 @@ err_extend:
29643 - #define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
29644 +@@ -1529,6 +1529,10 @@ err_extend:
29645 #define DYING_NULLS_VAL ((1<<30)+1)
29646 + #define TEMPLATE_NULLS_VAL ((1<<30)+2)
29647
29648 +#ifdef CONFIG_GRKERNSEC_HIDESYM
29649 +static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
29650 @@ -80838,7 +81928,7 @@ index ec02168..f0caab6 100644
29651 static int nf_conntrack_init_net(struct net *net)
29652 {
29653 int ret;
29654 -@@ -1546,7 +1550,11 @@ static int nf_conntrack_init_net(struct net *net)
29655 +@@ -1543,7 +1547,11 @@ static int nf_conntrack_init_net(struct net *net)
29656 goto err_stat;
29657 }
29658
29659 @@ -80964,10 +82054,10 @@ index 4fe4fb4..87a89e5 100644
29660 return 0;
29661 }
29662 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
29663 -index 4da797f..eb1df70 100644
29664 +index c0353d5..fcb0270 100644
29665 --- a/net/netlink/af_netlink.c
29666 +++ b/net/netlink/af_netlink.c
29667 -@@ -782,7 +782,7 @@ static void netlink_overrun(struct sock *sk)
29668 +@@ -785,7 +785,7 @@ static void netlink_overrun(struct sock *sk)
29669 sk->sk_error_report(sk);
29670 }
29671 }
29672 @@ -80976,7 +82066,7 @@ index 4da797f..eb1df70 100644
29673 }
29674
29675 static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
29676 -@@ -2068,7 +2068,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
29677 +@@ -2071,7 +2071,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
29678 sk_wmem_alloc_get(s),
29679 nlk->cb,
29680 atomic_read(&s->sk_refcnt),
29681 @@ -81006,7 +82096,7 @@ index 7261eb8..44e8ac6 100644
29682 *uaddr_len = sizeof(struct sockaddr_ax25);
29683 }
29684 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
29685 -index 5db6316..c9bf90e 100644
29686 +index c111bd0..7788ff7 100644
29687 --- a/net/packet/af_packet.c
29688 +++ b/net/packet/af_packet.c
29689 @@ -1578,7 +1578,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
29690 @@ -81027,7 +82117,7 @@ index 5db6316..c9bf90e 100644
29691 spin_unlock(&sk->sk_receive_queue.lock);
29692
29693 drop_n_restore:
29694 -@@ -2539,6 +2539,7 @@ out:
29695 +@@ -2565,6 +2565,7 @@ out:
29696
29697 static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
29698 {
29699 @@ -81035,7 +82125,7 @@ index 5db6316..c9bf90e 100644
29700 struct sock_exterr_skb *serr;
29701 struct sk_buff *skb, *skb2;
29702 int copied, err;
29703 -@@ -2560,8 +2561,9 @@ static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
29704 +@@ -2586,8 +2587,9 @@ static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
29705 sock_recv_timestamp(msg, sk, skb);
29706
29707 serr = SKB_EXT_ERR(skb);
29708 @@ -81046,7 +82136,7 @@ index 5db6316..c9bf90e 100644
29709
29710 msg->msg_flags |= MSG_ERRQUEUE;
29711 err = copied;
29712 -@@ -3173,7 +3175,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
29713 +@@ -3212,7 +3214,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
29714 case PACKET_HDRLEN:
29715 if (len > sizeof(int))
29716 len = sizeof(int);
29717 @@ -81055,7 +82145,7 @@ index 5db6316..c9bf90e 100644
29718 return -EFAULT;
29719 switch (val) {
29720 case TPACKET_V1:
29721 -@@ -3212,7 +3214,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
29722 +@@ -3254,7 +3256,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
29723 len = lv;
29724 if (put_user(len, optlen))
29725 return -EFAULT;
29726 @@ -81162,7 +82252,7 @@ index e5b65ac..f3b6fb7 100644
29727 if (likely(*recent == gen))
29728 return 0;
29729 diff --git a/net/rds/ib.h b/net/rds/ib.h
29730 -index 8d2b3d5..227ec5b 100644
29731 +index 7280ab8..e04f4ea 100644
29732 --- a/net/rds/ib.h
29733 +++ b/net/rds/ib.h
29734 @@ -128,7 +128,7 @@ struct rds_ib_connection {
29735 @@ -81175,10 +82265,10 @@ index 8d2b3d5..227ec5b 100644
29736 spinlock_t i_ack_lock; /* protect i_ack_next */
29737 u64 i_ack_next; /* next ACK to send */
29738 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
29739 -index a1e1162..265e129 100644
29740 +index 31b74f5..dc1fbfa 100644
29741 --- a/net/rds/ib_cm.c
29742 +++ b/net/rds/ib_cm.c
29743 -@@ -718,7 +718,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
29744 +@@ -717,7 +717,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
29745 /* Clear the ACK state */
29746 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
29747 #ifdef KERNEL_HAS_ATOMIC64
29748 @@ -81188,10 +82278,10 @@ index a1e1162..265e129 100644
29749 ic->i_ack_next = 0;
29750 #endif
29751 diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
29752 -index 8d19491..05a3e65 100644
29753 +index 8eb9501..0c386ff 100644
29754 --- a/net/rds/ib_recv.c
29755 +++ b/net/rds/ib_recv.c
29756 -@@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
29757 +@@ -597,7 +597,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
29758 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
29759 int ack_required)
29760 {
29761 @@ -81200,7 +82290,7 @@ index 8d19491..05a3e65 100644
29762 if (ack_required) {
29763 smp_mb__before_clear_bit();
29764 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
29765 -@@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
29766 +@@ -609,7 +609,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
29767 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
29768 smp_mb__after_clear_bit();
29769
29770 @@ -81543,24 +82633,11 @@ index f226709..0e735a8 100644
29771 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
29772
29773 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
29774 -diff --git a/net/sctp/auth.c b/net/sctp/auth.c
29775 -index 159b9bc..d8420ae 100644
29776 ---- a/net/sctp/auth.c
29777 -+++ b/net/sctp/auth.c
29778 -@@ -71,7 +71,7 @@ void sctp_auth_key_put(struct sctp_auth_bytes *key)
29779 - return;
29780 -
29781 - if (atomic_dec_and_test(&key->refcnt)) {
29782 -- kfree(key);
29783 -+ kzfree(key);
29784 - SCTP_DBG_OBJCNT_DEC(keys);
29785 - }
29786 - }
29787 diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
29788 -index ea14cb4..834e8e4 100644
29789 +index 391a245..8f6a898 100644
29790 --- a/net/sctp/ipv6.c
29791 +++ b/net/sctp/ipv6.c
29792 -@@ -1037,7 +1037,7 @@ void sctp_v6_pf_init(void)
29793 +@@ -1038,7 +1038,7 @@ void sctp_v6_pf_init(void)
29794
29795 void sctp_v6_pf_exit(void)
29796 {
29797 @@ -81570,10 +82647,10 @@ index ea14cb4..834e8e4 100644
29798
29799 /* Initialize IPv6 support and register with socket layer. */
29800 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
29801 -index 9966e7b..540c575 100644
29802 +index 8c19e97..16264b8 100644
29803 --- a/net/sctp/proc.c
29804 +++ b/net/sctp/proc.c
29805 -@@ -328,7 +328,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
29806 +@@ -338,7 +338,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
29807 seq_printf(seq,
29808 "%8pK %8pK %-3d %-3d %-2d %-4d "
29809 "%4d %8d %8d %7d %5lu %-5d %5d ",
29810 @@ -81584,7 +82661,7 @@ index 9966e7b..540c575 100644
29811 assoc->assoc_id,
29812 assoc->sndbuf_used,
29813 diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
29814 -index 2d51842..150ba5c 100644
29815 +index f898b1c..60bf8f2 100644
29816 --- a/net/sctp/protocol.c
29817 +++ b/net/sctp/protocol.c
29818 @@ -834,8 +834,10 @@ int sctp_register_af(struct sctp_af *af)
29819 @@ -81610,10 +82687,10 @@ index 2d51842..150ba5c 100644
29820
29821 static int sctp_v4_protosw_init(void)
29822 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
29823 -index 9261d9a..0a6ae623 100644
29824 +index cedd9bf..b1fddeb 100644
29825 --- a/net/sctp/socket.c
29826 +++ b/net/sctp/socket.c
29827 -@@ -4661,6 +4661,8 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
29828 +@@ -4665,6 +4665,8 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
29829 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
29830 if (space_left < addrlen)
29831 return -ENOMEM;
29832 @@ -81623,7 +82700,7 @@ index 9261d9a..0a6ae623 100644
29833 return -EFAULT;
29834 to += addrlen;
29835 diff --git a/net/socket.c b/net/socket.c
29836 -index d92c490..b4bc863 100644
29837 +index 2ca51c7..45d0b31 100644
29838 --- a/net/socket.c
29839 +++ b/net/socket.c
29840 @@ -89,6 +89,7 @@
29841 @@ -81652,7 +82729,7 @@ index d92c490..b4bc863 100644
29842
29843 static struct file_system_type sock_fs_type = {
29844 .name = "sockfs",
29845 -@@ -1276,6 +1279,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
29846 +@@ -1270,6 +1273,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
29847 return -EAFNOSUPPORT;
29848 if (type < 0 || type >= SOCK_MAX)
29849 return -EINVAL;
29850 @@ -81661,7 +82738,7 @@ index d92c490..b4bc863 100644
29851
29852 /* Compatibility.
29853
29854 -@@ -1407,6 +1412,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
29855 +@@ -1401,6 +1406,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
29856 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
29857 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
29858
29859 @@ -81678,7 +82755,7 @@ index d92c490..b4bc863 100644
29860 retval = sock_create(family, type, protocol, &sock);
29861 if (retval < 0)
29862 goto out;
29863 -@@ -1534,6 +1549,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
29864 +@@ -1528,6 +1543,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
29865 if (sock) {
29866 err = move_addr_to_kernel(umyaddr, addrlen, &address);
29867 if (err >= 0) {
29868 @@ -81693,7 +82770,7 @@ index d92c490..b4bc863 100644
29869 err = security_socket_bind(sock,
29870 (struct sockaddr *)&address,
29871 addrlen);
29872 -@@ -1542,6 +1565,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
29873 +@@ -1536,6 +1559,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
29874 (struct sockaddr *)
29875 &address, addrlen);
29876 }
29877 @@ -81701,7 +82778,7 @@ index d92c490..b4bc863 100644
29878 fput_light(sock->file, fput_needed);
29879 }
29880 return err;
29881 -@@ -1565,10 +1589,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
29882 +@@ -1559,10 +1583,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
29883 if ((unsigned int)backlog > somaxconn)
29884 backlog = somaxconn;
29885
29886 @@ -81722,7 +82799,7 @@ index d92c490..b4bc863 100644
29887 fput_light(sock->file, fput_needed);
29888 }
29889 return err;
29890 -@@ -1612,6 +1646,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
29891 +@@ -1606,6 +1640,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
29892 newsock->type = sock->type;
29893 newsock->ops = sock->ops;
29894
29895 @@ -81741,7 +82818,7 @@ index d92c490..b4bc863 100644
29896 /*
29897 * We don't need try_module_get here, as the listening socket (sock)
29898 * has the protocol module (sock->ops->owner) held.
29899 -@@ -1657,6 +1703,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
29900 +@@ -1651,6 +1697,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
29901 fd_install(newfd, newfile);
29902 err = newfd;
29903
29904 @@ -81750,7 +82827,7 @@ index d92c490..b4bc863 100644
29905 out_put:
29906 fput_light(sock->file, fput_needed);
29907 out:
29908 -@@ -1689,6 +1737,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
29909 +@@ -1683,6 +1731,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
29910 int, addrlen)
29911 {
29912 struct socket *sock;
29913 @@ -81758,7 +82835,7 @@ index d92c490..b4bc863 100644
29914 struct sockaddr_storage address;
29915 int err, fput_needed;
29916
29917 -@@ -1699,6 +1748,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
29918 +@@ -1693,6 +1742,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
29919 if (err < 0)
29920 goto out_put;
29921
29922 @@ -81776,7 +82853,7 @@ index d92c490..b4bc863 100644
29923 err =
29924 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
29925 if (err)
29926 -@@ -2053,7 +2113,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
29927 +@@ -2047,7 +2107,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
29928 * checking falls down on this.
29929 */
29930 if (copy_from_user(ctl_buf,
29931 @@ -81785,7 +82862,7 @@ index d92c490..b4bc863 100644
29932 ctl_len))
29933 goto out_freectl;
29934 msg_sys->msg_control = ctl_buf;
29935 -@@ -2221,7 +2281,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
29936 +@@ -2215,7 +2275,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
29937 * kernel msghdr to use the kernel address space)
29938 */
29939
29940 @@ -81794,7 +82871,7 @@ index d92c490..b4bc863 100644
29941 uaddr_len = COMPAT_NAMELEN(msg);
29942 if (MSG_CMSG_COMPAT & flags) {
29943 err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
29944 -@@ -2844,7 +2904,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
29945 +@@ -2838,7 +2898,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
29946 }
29947
29948 ifr = compat_alloc_user_space(buf_size);
29949 @@ -81803,7 +82880,7 @@ index d92c490..b4bc863 100644
29950
29951 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
29952 return -EFAULT;
29953 -@@ -2868,12 +2928,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
29954 +@@ -2862,12 +2922,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
29955 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
29956
29957 if (copy_in_user(rxnfc, compat_rxnfc,
29958 @@ -81820,7 +82897,7 @@ index d92c490..b4bc863 100644
29959 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
29960 sizeof(rxnfc->rule_cnt)))
29961 return -EFAULT;
29962 -@@ -2885,12 +2945,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
29963 +@@ -2879,12 +2939,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
29964
29965 if (convert_out) {
29966 if (copy_in_user(compat_rxnfc, rxnfc,
29967 @@ -81837,7 +82914,7 @@ index d92c490..b4bc863 100644
29968 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
29969 sizeof(rxnfc->rule_cnt)))
29970 return -EFAULT;
29971 -@@ -2960,7 +3020,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
29972 +@@ -2954,7 +3014,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
29973 old_fs = get_fs();
29974 set_fs(KERNEL_DS);
29975 err = dev_ioctl(net, cmd,
29976 @@ -81846,7 +82923,7 @@ index d92c490..b4bc863 100644
29977 set_fs(old_fs);
29978
29979 return err;
29980 -@@ -3069,7 +3129,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
29981 +@@ -3063,7 +3123,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
29982
29983 old_fs = get_fs();
29984 set_fs(KERNEL_DS);
29985 @@ -81855,7 +82932,7 @@ index d92c490..b4bc863 100644
29986 set_fs(old_fs);
29987
29988 if (cmd == SIOCGIFMAP && !err) {
29989 -@@ -3174,7 +3234,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
29990 +@@ -3168,7 +3228,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
29991 ret |= __get_user(rtdev, &(ur4->rt_dev));
29992 if (rtdev) {
29993 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
29994 @@ -81864,7 +82941,7 @@ index d92c490..b4bc863 100644
29995 devname[15] = 0;
29996 } else
29997 r4.rt_dev = NULL;
29998 -@@ -3400,8 +3460,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
29999 +@@ -3394,8 +3454,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
30000 int __user *uoptlen;
30001 int err;
30002
30003 @@ -81875,7 +82952,7 @@ index d92c490..b4bc863 100644
30004
30005 set_fs(KERNEL_DS);
30006 if (level == SOL_SOCKET)
30007 -@@ -3421,7 +3481,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
30008 +@@ -3415,7 +3475,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
30009 char __user *uoptval;
30010 int err;
30011
30012 @@ -81885,10 +82962,10 @@ index d92c490..b4bc863 100644
30013 set_fs(KERNEL_DS);
30014 if (level == SOL_SOCKET)
30015 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
30016 -index 7865b44..174662e 100644
30017 +index fb20f25..e3ba316 100644
30018 --- a/net/sunrpc/sched.c
30019 +++ b/net/sunrpc/sched.c
30020 -@@ -240,9 +240,9 @@ static int rpc_wait_bit_killable(void *word)
30021 +@@ -259,9 +259,9 @@ static int rpc_wait_bit_killable(void *word)
30022 #ifdef RPC_DEBUG
30023 static void rpc_task_set_debuginfo(struct rpc_task *task)
30024 {
30025 @@ -82012,7 +83089,7 @@ index 8343737..677025e 100644
30026 .proc_handler = read_reset_stat,
30027 },
30028 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
30029 -index 41cb63b..c4a1489 100644
30030 +index 0ce7552..d074459 100644
30031 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
30032 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
30033 @@ -501,7 +501,7 @@ next_sge:
30034 @@ -82043,7 +83120,7 @@ index 41cb63b..c4a1489 100644
30035 /* Build up the XDR from the receive buffers. */
30036 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
30037 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
30038 -index 42eb7ba..c887c45 100644
30039 +index c1d124d..acfc59e 100644
30040 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
30041 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
30042 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
30043 @@ -82105,23 +83182,23 @@ index 62e4f9b..dd3f2d7 100644
30044 /* See if we can opportunistically reap SQ WR to make room */
30045 sq_cq_reap(xprt);
30046 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
30047 -index e3a6e37..be2ea77 100644
30048 +index 9bc6db0..47ac8c0 100644
30049 --- a/net/sysctl_net.c
30050 +++ b/net/sysctl_net.c
30051 -@@ -43,7 +43,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
30052 - struct ctl_table *table)
30053 - {
30054 +@@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ctl_table_header *head,
30055 + kgid_t root_gid = make_kgid(net->user_ns, 0);
30056 +
30057 /* Allow network administrator to have same access as root. */
30058 -- if (capable(CAP_NET_ADMIN)) {
30059 -+ if (capable_nolog(CAP_NET_ADMIN)) {
30060 +- if (ns_capable(net->user_ns, CAP_NET_ADMIN) ||
30061 ++ if (ns_capable_nolog(net->user_ns, CAP_NET_ADMIN) ||
30062 + uid_eq(root_uid, current_uid())) {
30063 int mode = (table->mode >> 6) & 7;
30064 return (mode << 6) | (mode << 3) | mode;
30065 - }
30066 diff --git a/net/tipc/link.c b/net/tipc/link.c
30067 -index a79c755..eca357d 100644
30068 +index daa6080..02d357f 100644
30069 --- a/net/tipc/link.c
30070 +++ b/net/tipc/link.c
30071 -@@ -1169,7 +1169,7 @@ static int link_send_sections_long(struct tipc_port *sender,
30072 +@@ -1201,7 +1201,7 @@ static int link_send_sections_long(struct tipc_port *sender,
30073 struct tipc_msg fragm_hdr;
30074 struct sk_buff *buf, *buf_chain, *prev;
30075 u32 fragm_crs, fragm_rest, hsz, sect_rest;
30076 @@ -82130,7 +83207,7 @@ index a79c755..eca357d 100644
30077 int curr_sect;
30078 u32 fragm_no;
30079
30080 -@@ -1210,7 +1210,7 @@ again:
30081 +@@ -1242,7 +1242,7 @@ again:
30082
30083 if (!sect_rest) {
30084 sect_rest = msg_sect[++curr_sect].iov_len;
30085 @@ -82139,7 +83216,7 @@ index a79c755..eca357d 100644
30086 }
30087
30088 if (sect_rest < fragm_rest)
30089 -@@ -1229,7 +1229,7 @@ error:
30090 +@@ -1261,7 +1261,7 @@ error:
30091 }
30092 } else
30093 skb_copy_to_linear_data_offset(buf, fragm_crs,
30094 @@ -82162,7 +83239,7 @@ index f2db8a8..9245aa4 100644
30095 pos += msg_sect[cnt].iov_len;
30096 }
30097 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
30098 -index 0f7d0d0..00f89bf 100644
30099 +index 6b42d47..2ac24d5 100644
30100 --- a/net/tipc/subscr.c
30101 +++ b/net/tipc/subscr.c
30102 @@ -96,7 +96,7 @@ static void subscr_send_event(struct tipc_subscription *sub,
30103 @@ -82262,7 +83339,7 @@ index c8717c1..08539f5 100644
30104
30105 iwp->length += essid_compat;
30106 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
30107 -index 41eabc4..8d4e6d6 100644
30108 +index 07c5857..edc6dc0 100644
30109 --- a/net/xfrm/xfrm_policy.c
30110 +++ b/net/xfrm/xfrm_policy.c
30111 @@ -317,7 +317,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
30112 @@ -82499,12 +83576,12 @@ index cb1f50c..cef2a7c 100644
30113 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
30114 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
30115 new file mode 100644
30116 -index 0000000..008ac1a
30117 +index 0000000..5e0222d
30118 --- /dev/null
30119 +++ b/scripts/gcc-plugin.sh
30120 @@ -0,0 +1,17 @@
30121 +#!/bin/bash
30122 -+plugincc=`$1 -x c -shared - -o /dev/null -I\`$3 -print-file-name=plugin\`/include 2>&1 <<EOF
30123 ++plugincc=`$1 -E -shared - -o /dev/null -I\`$3 -print-file-name=plugin\`/include 2>&1 <<EOF
30124 +#include "gcc-plugin.h"
30125 +#include "tree.h"
30126 +#include "tm.h"
30127 @@ -82601,10 +83678,10 @@ index df4fc23..0ea719d 100644
30128 sprintf(alias, "dmi*");
30129
30130 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
30131 -index 0d93856..e828363 100644
30132 +index ff36c50..7ab4fa9 100644
30133 --- a/scripts/mod/modpost.c
30134 +++ b/scripts/mod/modpost.c
30135 -@@ -933,6 +933,7 @@ enum mismatch {
30136 +@@ -929,6 +929,7 @@ enum mismatch {
30137 ANY_INIT_TO_ANY_EXIT,
30138 ANY_EXIT_TO_ANY_INIT,
30139 EXPORT_TO_INIT_EXIT,
30140 @@ -82612,7 +83689,7 @@ index 0d93856..e828363 100644
30141 };
30142
30143 struct sectioncheck {
30144 -@@ -1047,6 +1048,12 @@ const struct sectioncheck sectioncheck[] = {
30145 +@@ -1043,6 +1044,12 @@ const struct sectioncheck sectioncheck[] = {
30146 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
30147 .mismatch = EXPORT_TO_INIT_EXIT,
30148 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
30149 @@ -82625,7 +83702,7 @@ index 0d93856..e828363 100644
30150 }
30151 };
30152
30153 -@@ -1169,10 +1176,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
30154 +@@ -1165,10 +1172,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
30155 continue;
30156 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
30157 continue;
30158 @@ -82638,7 +83715,7 @@ index 0d93856..e828363 100644
30159 if (d < 0)
30160 d = addr - sym->st_value;
30161 if (d < distance) {
30162 -@@ -1451,6 +1458,14 @@ static void report_sec_mismatch(const char *modname,
30163 +@@ -1447,6 +1454,14 @@ static void report_sec_mismatch(const char *modname,
30164 tosym, prl_to, prl_to, tosym);
30165 free(prl_to);
30166 break;
30167 @@ -82653,7 +83730,7 @@ index 0d93856..e828363 100644
30168 }
30169 fprintf(stderr, "\n");
30170 }
30171 -@@ -1685,7 +1700,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
30172 +@@ -1681,7 +1696,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
30173 static void check_sec_ref(struct module *mod, const char *modname,
30174 struct elf_info *elf)
30175 {
30176 @@ -82662,7 +83739,7 @@ index 0d93856..e828363 100644
30177 Elf_Shdr *sechdrs = elf->sechdrs;
30178
30179 /* Walk through all sections */
30180 -@@ -1783,7 +1798,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
30181 +@@ -1779,7 +1794,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
30182 va_end(ap);
30183 }
30184
30185 @@ -82671,7 +83748,7 @@ index 0d93856..e828363 100644
30186 {
30187 if (buf->size - buf->pos < len) {
30188 buf->size += len + SZ;
30189 -@@ -2001,7 +2016,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
30190 +@@ -1997,7 +2012,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
30191 if (fstat(fileno(file), &st) < 0)
30192 goto close_write;
30193
30194 @@ -82717,10 +83794,10 @@ index 9dfcd6d..099068e 100644
30195 filename, strerror(errno));
30196 goto out;
30197 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
30198 -index 5c11312..72742b5 100644
30199 +index 68bb4ef..2f419e1 100644
30200 --- a/scripts/pnmtologo.c
30201 +++ b/scripts/pnmtologo.c
30202 -@@ -237,14 +237,14 @@ static void write_header(void)
30203 +@@ -244,14 +244,14 @@ static void write_header(void)
30204 fprintf(out, " * Linux logo %s\n", logoname);
30205 fputs(" */\n\n", out);
30206 fputs("#include <linux/linux_logo.h>\n\n", out);
30207 @@ -82737,7 +83814,7 @@ index 5c11312..72742b5 100644
30208 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
30209 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
30210 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
30211 -@@ -374,7 +374,7 @@ static void write_logo_clut224(void)
30212 +@@ -381,7 +381,7 @@ static void write_logo_clut224(void)
30213 fputs("\n};\n\n", out);
30214
30215 /* write logo clut */
30216 @@ -82747,7 +83824,7 @@ index 5c11312..72742b5 100644
30217 write_hex_cnt = 0;
30218 for (i = 0; i < logo_clutsize; i++) {
30219 diff --git a/security/Kconfig b/security/Kconfig
30220 -index e9c6ac7..01c698c 100644
30221 +index e9c6ac7..ab9590d 100644
30222 --- a/security/Kconfig
30223 +++ b/security/Kconfig
30224 @@ -4,6 +4,902 @@
30225 @@ -83328,7 +84405,7 @@ index e9c6ac7..01c698c 100644
30226 +config PAX_KERNEXEC
30227 + bool "Enforce non-executable kernel pages"
30228 + default y if GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_NONE || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_GUEST) || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_KVM))
30229 -+ depends on (X86 || ARM_LPAE) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
30230 ++ depends on ((X86 && (!X86_32 || X86_WP_WORKS_OK)) || (ARM && (CPU_V6 || CPU_V7) && !(ARM_LPAE && MODULES))) && !XEN
30231 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
30232 + select PAX_KERNEXEC_PLUGIN if X86_64
30233 + help
30234 @@ -83530,8 +84607,8 @@ index e9c6ac7..01c698c 100644
30235 +
30236 +config PAX_MEMORY_UDEREF
30237 + bool "Prevent invalid userland pointer dereference"
30238 -+ default y if GRKERNSEC_CONFIG_AUTO && (X86_32 || (X86_64 && GRKERNSEC_CONFIG_PRIORITY_SECURITY)) && (GRKERNSEC_CONFIG_VIRT_NONE || GRKERNSEC_CONFIG_VIRT_EPT)
30239 -+ depends on X86 && !UML_X86 && !XEN
30240 ++ default y if GRKERNSEC_CONFIG_AUTO && !(X86_64 && GRKERNSEC_CONFIG_PRIORITY_PERF) && (GRKERNSEC_CONFIG_VIRT_NONE || GRKERNSEC_CONFIG_VIRT_EPT)
30241 ++ depends on (X86 || (ARM && (CPU_V6 || CPU_V7) && !ARM_LPAE)) && !UML_X86 && !XEN
30242 + select PAX_PER_CPU_PGD if X86_64
30243 + help
30244 + By saying Y here the kernel will be prevented from dereferencing
30245 @@ -83676,10 +84753,10 @@ index 8c2a7f6..b133ac9 100644
30246
30247 .ptrace_access_check = apparmor_ptrace_access_check,
30248 diff --git a/security/commoncap.c b/security/commoncap.c
30249 -index 6dbae46..d5611fd 100644
30250 +index 7ee08c7..8d1a9d6 100644
30251 --- a/security/commoncap.c
30252 +++ b/security/commoncap.c
30253 -@@ -415,6 +415,32 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
30254 +@@ -424,6 +424,32 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
30255 return 0;
30256 }
30257
30258 @@ -83712,7 +84789,7 @@ index 6dbae46..d5611fd 100644
30259 /*
30260 * Attempt to get the on-exec apply capability sets for an executable file from
30261 * its xattrs and, if present, apply them to the proposed credentials being
30262 -@@ -583,6 +609,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
30263 +@@ -592,6 +618,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
30264 const struct cred *cred = current_cred();
30265 kuid_t root_uid = make_kuid(cred->user_ns, 0);
30266
30267 @@ -83723,7 +84800,7 @@ index 6dbae46..d5611fd 100644
30268 if (bprm->cap_effective)
30269 return 1;
30270 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
30271 -index 6ee8826..6350060 100644
30272 +index 079a85d..12e93f8 100644
30273 --- a/security/integrity/ima/ima.h
30274 +++ b/security/integrity/ima/ima.h
30275 @@ -96,8 +96,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
30276 @@ -83738,7 +84815,7 @@ index 6ee8826..6350060 100644
30277 };
30278 extern struct ima_h_table ima_htable;
30279 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
30280 -index b356884..fd9676e 100644
30281 +index 0cea3db..2f0ef77 100644
30282 --- a/security/integrity/ima/ima_api.c
30283 +++ b/security/integrity/ima/ima_api.c
30284 @@ -79,7 +79,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
30285 @@ -83796,7 +84873,7 @@ index 1c26176..64a1ba2 100644
30286 if (iov != iovstack)
30287 kfree(iov);
30288 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
30289 -index 5d34b4e..2456674 100644
30290 +index 4b5c948..2054dc1 100644
30291 --- a/security/keys/keyctl.c
30292 +++ b/security/keys/keyctl.c
30293 @@ -986,7 +986,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
30294 @@ -83832,11 +84909,11 @@ index 5d34b4e..2456674 100644
30295
30296 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
30297 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
30298 -
30299 + err:
30300 if (iov != iovstack)
30301 kfree(iov);
30302 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
30303 -index 6e42df1..aba52bd 100644
30304 +index 6ece7f2..ecdb55c 100644
30305 --- a/security/keys/keyring.c
30306 +++ b/security/keys/keyring.c
30307 @@ -227,16 +227,16 @@ static long keyring_read(const struct key *keyring,
30308 @@ -83880,7 +84957,7 @@ index f728728..6457a0c 100644
30309
30310 /*
30311 diff --git a/security/security.c b/security/security.c
30312 -index 8dcd4ae..1124de7 100644
30313 +index 7b88c6a..1e3ea8f 100644
30314 --- a/security/security.c
30315 +++ b/security/security.c
30316 @@ -20,6 +20,7 @@
30317 @@ -83913,7 +84990,7 @@ index 8dcd4ae..1124de7 100644
30318
30319 /* Save user chosen LSM */
30320 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
30321 -index 61a5336..27215d8 100644
30322 +index ef26e96..642fb78 100644
30323 --- a/security/selinux/hooks.c
30324 +++ b/security/selinux/hooks.c
30325 @@ -95,8 +95,6 @@
30326 @@ -83925,7 +85002,7 @@ index 61a5336..27215d8 100644
30327 /* SECMARK reference count */
30328 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
30329
30330 -@@ -5476,7 +5474,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
30331 +@@ -5501,7 +5499,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
30332
30333 #endif
30334
30335 @@ -84105,7 +85182,7 @@ index 4c1cc51..16040040 100644
30336 }
30337 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
30338 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
30339 -index 91cdf943..4085161 100644
30340 +index af49721..e85058e 100644
30341 --- a/sound/core/pcm_compat.c
30342 +++ b/sound/core/pcm_compat.c
30343 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
30344 @@ -84118,10 +85195,10 @@ index 91cdf943..4085161 100644
30345 if (err < 0)
30346 return err;
30347 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
30348 -index f9ddecf..e27404d 100644
30349 +index 09b4286..8620fac 100644
30350 --- a/sound/core/pcm_native.c
30351 +++ b/sound/core/pcm_native.c
30352 -@@ -2804,11 +2804,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
30353 +@@ -2806,11 +2806,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
30354 switch (substream->stream) {
30355 case SNDRV_PCM_STREAM_PLAYBACK:
30356 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
30357 @@ -84136,7 +85213,7 @@ index f9ddecf..e27404d 100644
30358 default:
30359 result = -EINVAL;
30360 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
30361 -index 60e8fc1..786abcb 100644
30362 +index 040c60e..989a19a 100644
30363 --- a/sound/core/seq/seq_device.c
30364 +++ b/sound/core/seq/seq_device.c
30365 @@ -64,7 +64,7 @@ struct ops_list {
30366 @@ -84146,7 +85223,7 @@ index 60e8fc1..786abcb 100644
30367 - struct snd_seq_dev_ops ops;
30368 + struct snd_seq_dev_ops *ops;
30369
30370 - /* registred devices */
30371 + /* registered devices */
30372 struct list_head dev_list; /* list of devices */
30373 @@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
30374
30375 @@ -84176,7 +85253,7 @@ index 60e8fc1..786abcb 100644
30376 dev->driver_data = NULL;
30377 ops->num_init_devices--;
30378 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
30379 -index 2d5514b..3afae9c 100644
30380 +index 4e0dd22..7a1f32c 100644
30381 --- a/sound/drivers/mts64.c
30382 +++ b/sound/drivers/mts64.c
30383 @@ -29,6 +29,7 @@
30384 @@ -84249,7 +85326,7 @@ index b953fb4..1999c01 100644
30385 int timeout = 10;
30386 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
30387 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
30388 -index 8364855..59f2e2b 100644
30389 +index 991018d..8984740 100644
30390 --- a/sound/drivers/portman2x4.c
30391 +++ b/sound/drivers/portman2x4.c
30392 @@ -48,6 +48,7 @@
30393 @@ -84348,11 +85425,44 @@ index d428ffe..751ef78 100644
30394 break;
30395 default:
30396 return -EINVAL;
30397 +diff --git a/sound/firewire/scs1x.c b/sound/firewire/scs1x.c
30398 +index 844a555..985ab83 100644
30399 +--- a/sound/firewire/scs1x.c
30400 ++++ b/sound/firewire/scs1x.c
30401 +@@ -74,7 +74,7 @@ static void scs_output_trigger(struct snd_rawmidi_substream *stream, int up)
30402 + {
30403 + struct scs *scs = stream->rmidi->private_data;
30404 +
30405 +- ACCESS_ONCE(scs->output) = up ? stream : NULL;
30406 ++ ACCESS_ONCE_RW(scs->output) = up ? stream : NULL;
30407 + if (up) {
30408 + scs->output_idle = false;
30409 + tasklet_schedule(&scs->tasklet);
30410 +@@ -257,7 +257,7 @@ static void scs_input_trigger(struct snd_rawmidi_substream *stream, int up)
30411 + {
30412 + struct scs *scs = stream->rmidi->private_data;
30413 +
30414 +- ACCESS_ONCE(scs->input) = up ? stream : NULL;
30415 ++ ACCESS_ONCE_RW(scs->input) = up ? stream : NULL;
30416 + }
30417 +
30418 + static void scs_input_escaped_byte(struct snd_rawmidi_substream *stream,
30419 +@@ -457,8 +457,8 @@ static int scs_remove(struct device *dev)
30420 +
30421 + snd_card_disconnect(scs->card);
30422 +
30423 +- ACCESS_ONCE(scs->output) = NULL;
30424 +- ACCESS_ONCE(scs->input) = NULL;
30425 ++ ACCESS_ONCE_RW(scs->output) = NULL;
30426 ++ ACCESS_ONCE_RW(scs->input) = NULL;
30427 +
30428 + wait_event(scs->idle_wait, scs->output_idle);
30429 +
30430 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
30431 -index b2b3c01..e1c1e1f 100644
30432 +index 048439a..3be9f6f 100644
30433 --- a/sound/oss/sb_audio.c
30434 +++ b/sound/oss/sb_audio.c
30435 -@@ -903,7 +903,7 @@ sb16_copy_from_user(int dev,
30436 +@@ -904,7 +904,7 @@ sb16_copy_from_user(int dev,
30437 buf16 = (signed short *)(localbuf + localoffs);
30438 while (c)
30439 {
30440 @@ -84413,10 +85523,10 @@ index 4631a23..001ae57 100644
30441 const struct firmware *dsp_microcode;
30442 const struct firmware *controller_microcode;
30443 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
30444 -index 3a6f03f..bc5c86c 100644
30445 +index 22056c5..25d3244 100644
30446 --- a/sound/pci/ymfpci/ymfpci_main.c
30447 +++ b/sound/pci/ymfpci/ymfpci_main.c
30448 -@@ -203,8 +203,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
30449 +@@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
30450 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
30451 break;
30452 }
30453 @@ -84427,7 +85537,7 @@ index 3a6f03f..bc5c86c 100644
30454 wake_up(&chip->interrupt_sleep);
30455 }
30456 __end:
30457 -@@ -788,7 +788,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
30458 +@@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
30459 continue;
30460 init_waitqueue_entry(&wait, current);
30461 add_wait_queue(&chip->interrupt_sleep, &wait);
30462 @@ -84436,7 +85546,7 @@ index 3a6f03f..bc5c86c 100644
30463 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
30464 remove_wait_queue(&chip->interrupt_sleep, &wait);
30465 }
30466 -@@ -826,8 +826,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
30467 +@@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
30468 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
30469 spin_unlock(&chip->reg_lock);
30470
30471 @@ -84447,7 +85557,7 @@ index 3a6f03f..bc5c86c 100644
30472 wake_up(&chip->interrupt_sleep);
30473 }
30474 }
30475 -@@ -2420,7 +2420,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
30476 +@@ -2421,7 +2421,7 @@ int snd_ymfpci_create(struct snd_card *card,
30477 spin_lock_init(&chip->reg_lock);
30478 spin_lock_init(&chip->voice_lock);
30479 init_waitqueue_head(&chip->interrupt_sleep);
30480 @@ -84848,7 +85958,7 @@ index 0000000..414fe5e
30481 +}
30482 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
30483 new file mode 100644
30484 -index 0000000..c415c9d
30485 +index 0000000..8bd6f995
30486 --- /dev/null
30487 +++ b/tools/gcc/constify_plugin.c
30488 @@ -0,0 +1,359 @@
30489 @@ -84901,7 +86011,7 @@ index 0000000..c415c9d
30490 + return strip_array_types(TREE_TYPE(field));
30491 +}
30492 +
30493 -+static bool walk_struct(tree node);
30494 ++static bool walk_struct(tree node, bool all);
30495 +static void deconstify_tree(tree node);
30496 +
30497 +static void deconstify_type(tree type)
30498 @@ -84915,7 +86025,7 @@ index 0000000..c415c9d
30499 + continue;
30500 + if (!TYPE_READONLY(fieldtype))
30501 + continue;
30502 -+ if (!walk_struct(fieldtype))
30503 ++ if (!walk_struct(fieldtype, true))
30504 + continue;
30505 +
30506 + deconstify_tree(field);
30507 @@ -85063,7 +86173,7 @@ index 0000000..c415c9d
30508 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
30509 +}
30510 +
30511 -+static bool walk_struct(tree node)
30512 ++static bool walk_struct(tree node, bool all)
30513 +{
30514 + tree field;
30515 +
30516 @@ -85086,9 +86196,9 @@ index 0000000..c415c9d
30517 + if (node == type)
30518 + return false;
30519 + if (code == RECORD_TYPE || code == UNION_TYPE) {
30520 -+ if (!(walk_struct(type)))
30521 ++ if (!(walk_struct(type, all)))
30522 + return false;
30523 -+ } else if (!is_fptr(field) && !TREE_READONLY(field))
30524 ++ } else if (!is_fptr(field) && (!all || !TREE_READONLY(field)))
30525 + return false;
30526 + }
30527 + return true;
30528 @@ -85104,7 +86214,7 @@ index 0000000..c415c9d
30529 + if (TYPE_READONLY(type))
30530 + return;
30531 +
30532 -+ if (walk_struct(type))
30533 ++ if (walk_struct(type, true))
30534 + constify_type(type);
30535 + else
30536 + deconstify_type(type);
30537 @@ -85145,7 +86255,7 @@ index 0000000..c415c9d
30538 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
30539 + continue;
30540 +
30541 -+ if (walk_struct(type)) {
30542 ++ if (walk_struct(type, false)) {
30543 + error_at(DECL_SOURCE_LOCATION(var), "constified variable %qE cannot be local", var);
30544 + ret = 1;
30545 + }
30546 @@ -92297,7 +93407,7 @@ index 6789d78..4afd019e 100644
30547 +
30548 #endif
30549 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
30550 -index 6e8fa7e..f0ec393 100644
30551 +index 1cd693a..f4a7b20 100644
30552 --- a/virt/kvm/kvm_main.c
30553 +++ b/virt/kvm/kvm_main.c
30554 @@ -75,12 +75,17 @@ LIST_HEAD(vm_list);
30555 @@ -92320,7 +93430,7 @@ index 6e8fa7e..f0ec393 100644
30556
30557 struct dentry *kvm_debugfs_dir;
30558
30559 -@@ -726,7 +731,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
30560 +@@ -731,7 +736,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
30561 /* We can read the guest memory with __xxx_user() later on. */
30562 if (user_alloc &&
30563 ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
30564 @@ -92329,7 +93439,7 @@ index 6e8fa7e..f0ec393 100644
30565 (void __user *)(unsigned long)mem->userspace_addr,
30566 mem->memory_size)))
30567 goto out;
30568 -@@ -1778,7 +1783,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
30569 +@@ -1783,7 +1788,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
30570 return 0;
30571 }
30572
30573 @@ -92338,7 +93448,7 @@ index 6e8fa7e..f0ec393 100644
30574 .release = kvm_vcpu_release,
30575 .unlocked_ioctl = kvm_vcpu_ioctl,
30576 #ifdef CONFIG_COMPAT
30577 -@@ -2326,7 +2331,7 @@ static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
30578 +@@ -2304,7 +2309,7 @@ static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
30579 return 0;
30580 }
30581
30582 @@ -92347,7 +93457,7 @@ index 6e8fa7e..f0ec393 100644
30583 .release = kvm_vm_release,
30584 .unlocked_ioctl = kvm_vm_ioctl,
30585 #ifdef CONFIG_COMPAT
30586 -@@ -2424,7 +2429,7 @@ out:
30587 +@@ -2402,7 +2407,7 @@ out:
30588 return r;
30589 }
30590
30591 @@ -92356,7 +93466,7 @@ index 6e8fa7e..f0ec393 100644
30592 .unlocked_ioctl = kvm_dev_ioctl,
30593 .compat_ioctl = kvm_dev_ioctl,
30594 .llseek = noop_llseek,
30595 -@@ -2450,7 +2455,7 @@ static void hardware_enable_nolock(void *junk)
30596 +@@ -2428,7 +2433,7 @@ static void hardware_enable_nolock(void *junk)
30597
30598 if (r) {
30599 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
30600 @@ -92365,7 +93475,7 @@ index 6e8fa7e..f0ec393 100644
30601 printk(KERN_INFO "kvm: enabling virtualization on "
30602 "CPU%d failed\n", cpu);
30603 }
30604 -@@ -2504,10 +2509,10 @@ static int hardware_enable_all(void)
30605 +@@ -2482,10 +2487,10 @@ static int hardware_enable_all(void)
30606
30607 kvm_usage_count++;
30608 if (kvm_usage_count == 1) {
30609 @@ -92378,7 +93488,7 @@ index 6e8fa7e..f0ec393 100644
30610 hardware_disable_all_nolock();
30611 r = -EBUSY;
30612 }
30613 -@@ -2865,7 +2870,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
30614 +@@ -2843,7 +2848,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
30615 kvm_arch_vcpu_put(vcpu);
30616 }
30617
30618 @@ -92387,7 +93497,7 @@ index 6e8fa7e..f0ec393 100644
30619 struct module *module)
30620 {
30621 int r;
30622 -@@ -2901,7 +2906,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
30623 +@@ -2879,7 +2884,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
30624 if (!vcpu_align)
30625 vcpu_align = __alignof__(struct kvm_vcpu);
30626 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
30627 @@ -92396,7 +93506,7 @@ index 6e8fa7e..f0ec393 100644
30628 if (!kvm_vcpu_cache) {
30629 r = -ENOMEM;
30630 goto out_free_3;
30631 -@@ -2911,9 +2916,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
30632 +@@ -2889,9 +2894,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
30633 if (r)
30634 goto out_free;
30635
30636 @@ -92408,7 +93518,7 @@ index 6e8fa7e..f0ec393 100644
30637
30638 r = misc_register(&kvm_dev);
30639 if (r) {
30640 -@@ -2923,9 +2930,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
30641 +@@ -2901,9 +2908,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
30642
30643 register_syscore_ops(&kvm_syscore_ops);
30644
30645
30646 diff --git a/3.7.9/4425_grsec_remove_EI_PAX.patch b/3.8.0/4425_grsec_remove_EI_PAX.patch
30647 similarity index 100%
30648 rename from 3.7.9/4425_grsec_remove_EI_PAX.patch
30649 rename to 3.8.0/4425_grsec_remove_EI_PAX.patch
30650
30651 diff --git a/3.7.9/4430_grsec-remove-localversion-grsec.patch b/3.8.0/4430_grsec-remove-localversion-grsec.patch
30652 similarity index 100%
30653 rename from 3.7.9/4430_grsec-remove-localversion-grsec.patch
30654 rename to 3.8.0/4430_grsec-remove-localversion-grsec.patch
30655
30656 diff --git a/3.7.9/4435_grsec-mute-warnings.patch b/3.8.0/4435_grsec-mute-warnings.patch
30657 similarity index 100%
30658 rename from 3.7.9/4435_grsec-mute-warnings.patch
30659 rename to 3.8.0/4435_grsec-mute-warnings.patch
30660
30661 diff --git a/3.7.9/4440_grsec-remove-protected-paths.patch b/3.8.0/4440_grsec-remove-protected-paths.patch
30662 similarity index 100%
30663 rename from 3.7.9/4440_grsec-remove-protected-paths.patch
30664 rename to 3.8.0/4440_grsec-remove-protected-paths.patch
30665
30666 diff --git a/3.7.9/4450_grsec-kconfig-default-gids.patch b/3.8.0/4450_grsec-kconfig-default-gids.patch
30667 similarity index 100%
30668 rename from 3.7.9/4450_grsec-kconfig-default-gids.patch
30669 rename to 3.8.0/4450_grsec-kconfig-default-gids.patch
30670
30671 diff --git a/3.7.9/4465_selinux-avc_audit-log-curr_ip.patch b/3.8.0/4465_selinux-avc_audit-log-curr_ip.patch
30672 similarity index 100%
30673 rename from 3.7.9/4465_selinux-avc_audit-log-curr_ip.patch
30674 rename to 3.8.0/4465_selinux-avc_audit-log-curr_ip.patch
30675
30676 diff --git a/3.7.9/4470_disable-compat_vdso.patch b/3.8.0/4470_disable-compat_vdso.patch
30677 similarity index 100%
30678 rename from 3.7.9/4470_disable-compat_vdso.patch
30679 rename to 3.8.0/4470_disable-compat_vdso.patch