Gentoo Archives: gentoo-commits

From: "Anthony G. Basile" <blueness@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/hardened-patchset:master commit in: 3.4.6/, 3.2.24/, 2.6.32/, 3.2.23/
Date: Mon, 30 Jul 2012 00:15:19
Message-Id: 1343607282.186635ac6f72fb55b072ad23c93c102dd65e50d6.blueness@gentoo
1 commit: 186635ac6f72fb55b072ad23c93c102dd65e50d6
2 Author: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
3 AuthorDate: Mon Jul 30 00:14:42 2012 +0000
4 Commit: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
5 CommitDate: Mon Jul 30 00:14:42 2012 +0000
6 URL: http://git.overlays.gentoo.org/gitweb/?p=proj/hardened-patchset.git;a=commit;h=186635ac
7
8 Grsec/PaX: 2.9.1-{2.6.32.59,3.2.24,3.4.6}-201207281946
9
10 ---
11 2.6.32/0000_README | 2 +-
12 ..._grsecurity-2.9.1-2.6.32.59-201207281944.patch} | 1612 +++++--
13 2.6.32/4450_grsec-kconfig-default-gids.patch | 2 +-
14 {3.2.23 => 3.2.24}/0000_README | 6 +-
15 {3.2.23 => 3.2.24}/1021_linux-3.2.22.patch | 0
16 {3.2.23 => 3.2.24}/1022_linux-3.2.23.patch | 0
17 3.2.24/1023_linux-3.2.24.patch | 4684 ++++++++++++++++++++
18 ...4420_grsecurity-2.9.1-3.2.24-201207281946.patch | 1872 +++++----
19 .../4430_grsec-remove-localversion-grsec.patch | 0
20 {3.2.23 => 3.2.24}/4435_grsec-mute-warnings.patch | 0
21 .../4440_grsec-remove-protected-paths.patch | 0
22 .../4450_grsec-kconfig-default-gids.patch | 0
23 .../4465_selinux-avc_audit-log-curr_ip.patch | 0
24 {3.2.23 => 3.2.24}/4470_disable-compat_vdso.patch | 0
25 3.4.6/0000_README | 2 +-
26 ...4420_grsecurity-2.9.1-3.4.6-201207281946.patch} | 294 +-
27 16 files changed, 7212 insertions(+), 1262 deletions(-)
28
29 diff --git a/2.6.32/0000_README b/2.6.32/0000_README
30 index 9b8dd9a..d4f6601 100644
31 --- a/2.6.32/0000_README
32 +++ b/2.6.32/0000_README
33 @@ -30,7 +30,7 @@ Patch: 1058_linux-2.6.32.59.patch
34 From: http://www.kernel.org
35 Desc: Linux 2.6.32.59
36
37 -Patch: 4420_grsecurity-2.9.1-2.6.32.59-201207242236.patch
38 +Patch: 4420_grsecurity-2.9.1-2.6.32.59-201207281944.patch
39 From: http://www.grsecurity.net
40 Desc: hardened-sources base patch from upstream grsecurity
41
42
43 diff --git a/2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201207242236.patch b/2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201207281944.patch
44 similarity index 98%
45 rename from 2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201207242236.patch
46 rename to 2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201207281944.patch
47 index adbc4d5..227df5e 100644
48 --- a/2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201207242236.patch
49 +++ b/2.6.32/4420_grsecurity-2.9.1-2.6.32.59-201207281944.patch
50 @@ -1,5 +1,5 @@
51 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
52 -index e1efc40..e7a5667 100644
53 +index e1efc40..3569a2f 100644
54 --- a/Documentation/dontdiff
55 +++ b/Documentation/dontdiff
56 @@ -1,15 +1,20 @@
57 @@ -23,7 +23,7 @@ index e1efc40..e7a5667 100644
58 *.grep
59 *.grp
60 *.gz
61 -@@ -38,8 +43,10 @@
62 +@@ -38,22 +43,30 @@
63 *.tab.h
64 *.tex
65 *.ver
66 @@ -34,7 +34,11 @@ index e1efc40..e7a5667 100644
67 *_vga16.c
68 *~
69 *.9
70 -@@ -49,11 +56,16 @@
71 + *.9.gz
72 +-.*
73 ++.[^g]*
74 ++.gen*
75 + .mm
76 53c700_d.h
77 CVS
78 ChangeSet
79 @@ -51,7 +55,7 @@ index e1efc40..e7a5667 100644
80 SCCS
81 System.map*
82 TAGS
83 -@@ -62,6 +74,7 @@ aic7*reg_print.c*
84 +@@ -62,6 +75,7 @@ aic7*reg_print.c*
85 aic7*seq.h*
86 aicasm
87 aicdb.h*
88 @@ -59,7 +63,7 @@ index e1efc40..e7a5667 100644
89 asm-offsets.h
90 asm_offsets.h
91 autoconf.h*
92 -@@ -76,7 +89,11 @@ btfixupprep
93 +@@ -76,7 +90,11 @@ btfixupprep
94 build
95 bvmlinux
96 bzImage*
97 @@ -71,7 +75,7 @@ index e1efc40..e7a5667 100644
98 comp*.log
99 compile.h*
100 conf
101 -@@ -84,6 +101,8 @@ config
102 +@@ -84,6 +102,8 @@ config
103 config-*
104 config_data.h*
105 config_data.gz*
106 @@ -80,7 +84,7 @@ index e1efc40..e7a5667 100644
107 conmakehash
108 consolemap_deftbl.c*
109 cpustr.h
110 -@@ -97,19 +116,23 @@ elfconfig.h*
111 +@@ -97,19 +117,23 @@ elfconfig.h*
112 fixdep
113 fore200e_mkfirm
114 fore200e_pca_fw.c*
115 @@ -105,7 +109,7 @@ index e1efc40..e7a5667 100644
116 keywords.c
117 ksym.c*
118 ksym.h*
119 -@@ -117,6 +140,7 @@ kxgettext
120 +@@ -117,6 +141,7 @@ kxgettext
121 lkc_defs.h
122 lex.c
123 lex.*.c
124 @@ -113,7 +117,7 @@ index e1efc40..e7a5667 100644
125 logo_*.c
126 logo_*_clut224.c
127 logo_*_mono.c
128 -@@ -127,13 +151,16 @@ machtypes.h
129 +@@ -127,13 +152,16 @@ machtypes.h
130 map
131 maui_boot.h
132 mconf
133 @@ -130,7 +134,7 @@ index e1efc40..e7a5667 100644
134 mktables
135 mktree
136 modpost
137 -@@ -149,6 +176,7 @@ patches*
138 +@@ -149,6 +177,7 @@ patches*
139 pca200e.bin
140 pca200e_ecd.bin2
141 piggy.gz
142 @@ -138,7 +142,7 @@ index e1efc40..e7a5667 100644
143 piggyback
144 pnmtologo
145 ppc_defs.h*
146 -@@ -157,12 +185,16 @@ qconf
147 +@@ -157,12 +186,16 @@ qconf
148 raid6altivec*.c
149 raid6int*.c
150 raid6tables.c
151 @@ -155,7 +159,7 @@ index e1efc40..e7a5667 100644
152 sm_tbl*
153 split-include
154 syscalltab.h
155 -@@ -171,6 +203,7 @@ tftpboot.img
156 +@@ -171,6 +204,7 @@ tftpboot.img
157 timeconst.h
158 times.h*
159 trix_boot.h
160 @@ -163,7 +167,7 @@ index e1efc40..e7a5667 100644
161 utsrelease.h*
162 vdso-syms.lds
163 vdso.lds
164 -@@ -186,14 +219,20 @@ version.h*
165 +@@ -186,14 +220,20 @@ version.h*
166 vmlinux
167 vmlinux-*
168 vmlinux.aout
169 @@ -185,10 +189,23 @@ index e1efc40..e7a5667 100644
170 zconf.hash.c
171 +zoffset.h
172 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
173 -index c840e7d..ad11cac 100644
174 +index c840e7d..30f0efe 100644
175 --- a/Documentation/kernel-parameters.txt
176 +++ b/Documentation/kernel-parameters.txt
177 -@@ -1725,6 +1725,11 @@ and is between 256 and 4096 characters. It is defined in the file
178 +@@ -856,6 +856,12 @@ and is between 256 and 4096 characters. It is defined in the file
179 + If specified, z/VM IUCV HVC accepts connections
180 + from listed z/VM user IDs only.
181 +
182 ++ keep_bootcon [KNL]
183 ++ Do not unregister boot console at start. This is only
184 ++ useful for debugging when something happens in the window
185 ++ between unregistering the boot console and initializing
186 ++ the real console.
187 ++
188 + i2c_bus= [HW] Override the default board specific I2C bus speed
189 + or register an additional I2C bus that is not
190 + registered from board initialization code.
191 +@@ -1725,6 +1731,11 @@ and is between 256 and 4096 characters. It is defined in the file
192
193 noresidual [PPC] Don't use residual data on PReP machines.
194
195 @@ -200,7 +217,7 @@ index c840e7d..ad11cac 100644
196 noresume [SWSUSP] Disables resume and restores original swap
197 space.
198
199 -@@ -1837,6 +1842,13 @@ and is between 256 and 4096 characters. It is defined in the file
200 +@@ -1837,6 +1848,13 @@ and is between 256 and 4096 characters. It is defined in the file
201 the specified number of seconds. This is to be used if
202 your oopses keep scrolling off the screen.
203
204 @@ -234,7 +251,7 @@ index 613da5d..4fe3eda 100644
205 M: Liam Girdwood <lrg@××××××××××××.uk>
206 M: Mark Brown <broonie@×××××××××××××××××××××××.com>
207 diff --git a/Makefile b/Makefile
208 -index 3a9a721..b81a4d5 100644
209 +index 3a9a721..20e2d81 100644
210 --- a/Makefile
211 +++ b/Makefile
212 @@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
213 @@ -269,7 +286,7 @@ index 3a9a721..b81a4d5 100644
214 include/linux/version.h headers_% \
215 kernelrelease kernelversion
216
217 -@@ -526,6 +527,56 @@ else
218 +@@ -526,6 +527,60 @@ else
219 KBUILD_CFLAGS += -O2
220 endif
221
222 @@ -302,10 +319,14 @@ index 3a9a721..b81a4d5 100644
223 +ifdef CONFIG_PAX_SIZE_OVERFLOW
224 +SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
225 +endif
226 ++ifdef CONFIG_PAX_LATENT_ENTROPY
227 ++LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
228 ++endif
229 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
230 -+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS) $(SIZE_OVERFLOW_PLUGIN_CFLAGS)
231 ++GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
232 ++GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS)
233 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
234 -+export PLUGINCC CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN SIZE_OVERFLOW_PLUGIN
235 ++export PLUGINCC CONSTIFY_PLUGIN
236 +ifeq ($(KBUILD_EXTMOD),)
237 +gcc-plugins:
238 + $(Q)$(MAKE) $(build)=tools/gcc
239 @@ -326,7 +347,7 @@ index 3a9a721..b81a4d5 100644
240 include $(srctree)/arch/$(SRCARCH)/Makefile
241
242 ifneq ($(CONFIG_FRAME_WARN),0)
243 -@@ -647,7 +698,7 @@ export mod_strip_cmd
244 +@@ -647,7 +702,7 @@ export mod_strip_cmd
245
246
247 ifeq ($(KBUILD_EXTMOD),)
248 @@ -335,7 +356,7 @@ index 3a9a721..b81a4d5 100644
249
250 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
251 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
252 -@@ -868,6 +919,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
253 +@@ -868,6 +923,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
254
255 # The actual objects are generated when descending,
256 # make sure no implicit rule kicks in
257 @@ -344,7 +365,7 @@ index 3a9a721..b81a4d5 100644
258 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
259
260 # Handle descending into subdirectories listed in $(vmlinux-dirs)
261 -@@ -877,7 +930,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
262 +@@ -877,7 +934,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
263 # Error messages still appears in the original language
264
265 PHONY += $(vmlinux-dirs)
266 @@ -353,7 +374,7 @@ index 3a9a721..b81a4d5 100644
267 $(Q)$(MAKE) $(build)=$@
268
269 # Build the kernel release string
270 -@@ -986,6 +1039,7 @@ prepare0: archprepare FORCE
271 +@@ -986,6 +1043,7 @@ prepare0: archprepare FORCE
272 $(Q)$(MAKE) $(build)=. missing-syscalls
273
274 # All the preparing..
275 @@ -361,7 +382,7 @@ index 3a9a721..b81a4d5 100644
276 prepare: prepare0
277
278 # The asm symlink changes when $(ARCH) changes.
279 -@@ -1127,6 +1181,8 @@ all: modules
280 +@@ -1127,6 +1185,8 @@ all: modules
281 # using awk while concatenating to the final file.
282
283 PHONY += modules
284 @@ -370,7 +391,7 @@ index 3a9a721..b81a4d5 100644
285 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
286 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
287 @$(kecho) ' Building modules, stage 2.';
288 -@@ -1136,7 +1192,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
289 +@@ -1136,7 +1196,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
290
291 # Target to prepare building external modules
292 PHONY += modules_prepare
293 @@ -379,7 +400,7 @@ index 3a9a721..b81a4d5 100644
294
295 # Target to install modules
296 PHONY += modules_install
297 -@@ -1199,9 +1255,9 @@ CLEAN_FILES += vmlinux System.map \
298 +@@ -1199,9 +1259,9 @@ CLEAN_FILES += vmlinux System.map \
299 MRPROPER_DIRS += include/config include2 usr/include include/generated
300 MRPROPER_FILES += .config .config.old include/asm .version .old_version \
301 include/linux/autoconf.h include/linux/version.h \
302 @@ -391,7 +412,7 @@ index 3a9a721..b81a4d5 100644
303
304 # clean - Delete most, but leave enough to build external modules
305 #
306 -@@ -1245,7 +1301,7 @@ distclean: mrproper
307 +@@ -1245,7 +1305,7 @@ distclean: mrproper
308 @find $(srctree) $(RCS_FIND_IGNORE) \
309 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
310 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
311 @@ -400,7 +421,7 @@ index 3a9a721..b81a4d5 100644
312 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
313 -type f -print | xargs rm -f
314
315 -@@ -1292,6 +1348,7 @@ help:
316 +@@ -1292,6 +1352,7 @@ help:
317 @echo ' modules_prepare - Set up for building external modules'
318 @echo ' tags/TAGS - Generate tags file for editors'
319 @echo ' cscope - Generate cscope index'
320 @@ -408,7 +429,7 @@ index 3a9a721..b81a4d5 100644
321 @echo ' kernelrelease - Output the release version string'
322 @echo ' kernelversion - Output the version stored in Makefile'
323 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
324 -@@ -1393,6 +1450,8 @@ PHONY += $(module-dirs) modules
325 +@@ -1393,6 +1454,8 @@ PHONY += $(module-dirs) modules
326 $(module-dirs): crmodverdir $(objtree)/Module.symvers
327 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
328
329 @@ -417,7 +438,7 @@ index 3a9a721..b81a4d5 100644
330 modules: $(module-dirs)
331 @$(kecho) ' Building modules, stage 2.';
332 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
333 -@@ -1448,7 +1507,7 @@ endif # KBUILD_EXTMOD
334 +@@ -1448,7 +1511,7 @@ endif # KBUILD_EXTMOD
335 quiet_cmd_tags = GEN $@
336 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
337
338 @@ -426,7 +447,7 @@ index 3a9a721..b81a4d5 100644
339 $(call cmd,tags)
340
341 # Scripts to check various things for consistency
342 -@@ -1513,17 +1572,21 @@ else
343 +@@ -1513,17 +1576,21 @@ else
344 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
345 endif
346
347 @@ -452,7 +473,7 @@ index 3a9a721..b81a4d5 100644
348 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
349 %.symtypes: %.c prepare scripts FORCE
350 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
351 -@@ -1533,11 +1596,15 @@ endif
352 +@@ -1533,11 +1600,15 @@ endif
353 $(cmd_crmodverdir)
354 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
355 $(build)=$(build-dir)
356 @@ -7481,7 +7502,7 @@ index 79836a7..62f47a2 100644
357 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
358 obj-y += fault_$(BITS).o
359 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
360 -index b99f81c..3453e93 100644
361 +index b99f81c..16c0132 100644
362 --- a/arch/sparc/mm/fault_32.c
363 +++ b/arch/sparc/mm/fault_32.c
364 @@ -21,6 +21,9 @@
365 @@ -7494,7 +7515,7 @@ index b99f81c..3453e93 100644
366
367 #include <asm/system.h>
368 #include <asm/page.h>
369 -@@ -167,6 +170,267 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
370 +@@ -167,6 +170,276 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
371 return safe_compute_effective_address(regs, insn);
372 }
373
374 @@ -7584,40 +7605,49 @@ index b99f81c..3453e93 100644
375 + }
376 + } while (0);
377 +
378 -+ { /* PaX: patched PLT emulation #2 */
379 ++ do { /* PaX: patched PLT emulation #2 */
380 + unsigned int ba;
381 +
382 + err = get_user(ba, (unsigned int *)regs->pc);
383 +
384 -+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
385 ++ if (err)
386 ++ break;
387 ++
388 ++ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
389 + unsigned int addr;
390 +
391 -+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
392 ++ if ((ba & 0xFFC00000U) == 0x30800000U)
393 ++ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
394 ++ else
395 ++ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
396 + regs->pc = addr;
397 + regs->npc = addr+4;
398 + return 2;
399 + }
400 -+ }
401 ++ } while (0);
402 +
403 + do { /* PaX: patched PLT emulation #3 */
404 -+ unsigned int sethi, jmpl, nop;
405 ++ unsigned int sethi, bajmpl, nop;
406 +
407 + err = get_user(sethi, (unsigned int *)regs->pc);
408 -+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
409 ++ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
410 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
411 +
412 + if (err)
413 + break;
414 +
415 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
416 -+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
417 ++ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
418 + nop == 0x01000000U)
419 + {
420 + unsigned int addr;
421 +
422 + addr = (sethi & 0x003FFFFFU) << 10;
423 + regs->u_regs[UREG_G1] = addr;
424 -+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
425 ++ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
426 ++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
427 ++ else
428 ++ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
429 + regs->pc = addr;
430 + regs->npc = addr+4;
431 + return 2;
432 @@ -7762,7 +7792,7 @@ index b99f81c..3453e93 100644
433 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
434 unsigned long address)
435 {
436 -@@ -231,6 +495,24 @@ good_area:
437 +@@ -231,6 +504,24 @@ good_area:
438 if(!(vma->vm_flags & VM_WRITE))
439 goto bad_area;
440 } else {
441 @@ -7788,7 +7818,7 @@ index b99f81c..3453e93 100644
442 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
443 goto bad_area;
444 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
445 -index 43b0da9..a0b78f9 100644
446 +index 43b0da9..f9f9985 100644
447 --- a/arch/sparc/mm/fault_64.c
448 +++ b/arch/sparc/mm/fault_64.c
449 @@ -20,6 +20,9 @@
450 @@ -7810,7 +7840,7 @@ index 43b0da9..a0b78f9 100644
451 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
452 dump_stack();
453 unhandled_fault(regs->tpc, current, regs);
454 -@@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_address(struct pt_regs *regs,
455 +@@ -249,6 +252,465 @@ static void noinline bogus_32bit_fault_address(struct pt_regs *regs,
456 show_regs(regs);
457 }
458
459 @@ -7904,15 +7934,21 @@ index 43b0da9..a0b78f9 100644
460 + }
461 + } while (0);
462 +
463 -+ { /* PaX: patched PLT emulation #2 */
464 ++ do { /* PaX: patched PLT emulation #2 */
465 + unsigned int ba;
466 +
467 + err = get_user(ba, (unsigned int *)regs->tpc);
468 +
469 -+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
470 ++ if (err)
471 ++ break;
472 ++
473 ++ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
474 + unsigned long addr;
475 +
476 -+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
477 ++ if ((ba & 0xFFC00000U) == 0x30800000U)
478 ++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
479 ++ else
480 ++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
481 +
482 + if (test_thread_flag(TIF_32BIT))
483 + addr &= 0xFFFFFFFFUL;
484 @@ -7921,27 +7957,30 @@ index 43b0da9..a0b78f9 100644
485 + regs->tnpc = addr+4;
486 + return 2;
487 + }
488 -+ }
489 ++ } while (0);
490 +
491 + do { /* PaX: patched PLT emulation #3 */
492 -+ unsigned int sethi, jmpl, nop;
493 ++ unsigned int sethi, bajmpl, nop;
494 +
495 + err = get_user(sethi, (unsigned int *)regs->tpc);
496 -+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
497 ++ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
498 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
499 +
500 + if (err)
501 + break;
502 +
503 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
504 -+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
505 ++ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
506 + nop == 0x01000000U)
507 + {
508 + unsigned long addr;
509 +
510 + addr = (sethi & 0x003FFFFFU) << 10;
511 + regs->u_regs[UREG_G1] = addr;
512 -+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
513 ++ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
514 ++ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
515 ++ else
516 ++ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
517 +
518 + if (test_thread_flag(TIF_32BIT))
519 + addr &= 0xFFFFFFFFUL;
520 @@ -8267,7 +8306,7 @@ index 43b0da9..a0b78f9 100644
521 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
522 {
523 struct mm_struct *mm = current->mm;
524 -@@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
525 +@@ -315,6 +777,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
526 if (!vma)
527 goto bad_area;
528
529 @@ -10177,7 +10216,7 @@ index 0000000..0d9ec77
530 +
531 +#endif /* ASM_X86_ARCHRANDOM_H */
532 diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h
533 -index dc5a667..939040c 100644
534 +index dc5a667..7a2470f 100644
535 --- a/arch/x86/include/asm/atomic_32.h
536 +++ b/arch/x86/include/asm/atomic_32.h
537 @@ -25,6 +25,17 @@ static inline int atomic_read(const atomic_t *v)
538 @@ -10506,7 +10545,7 @@ index dc5a667..939040c 100644
539 /**
540 * atomic_add_unless - add unless the number is already a given value
541 * @v: pointer of type atomic_t
542 -@@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *v, int new)
543 +@@ -227,32 +439,73 @@ static inline int atomic_xchg(atomic_t *v, int new)
544 */
545 static inline int atomic_add_unless(atomic_t *v, int a, int u)
546 {
547 @@ -10550,7 +10589,47 @@ index dc5a667..939040c 100644
548 #define atomic_dec_return(v) (atomic_sub_return(1, v))
549
550 /* These are x86-specific, used by some header files */
551 -@@ -266,9 +495,18 @@ typedef struct {
552 +-#define atomic_clear_mask(mask, addr) \
553 +- asm volatile(LOCK_PREFIX "andl %0,%1" \
554 +- : : "r" (~(mask)), "m" (*(addr)) : "memory")
555 ++static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
556 ++{
557 ++ asm volatile(LOCK_PREFIX "andl %1,%0"
558 ++ : "+m" (v->counter)
559 ++ : "r" (~(mask))
560 ++ : "memory");
561 ++}
562 +
563 +-#define atomic_set_mask(mask, addr) \
564 +- asm volatile(LOCK_PREFIX "orl %0,%1" \
565 +- : : "r" (mask), "m" (*(addr)) : "memory")
566 ++static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
567 ++{
568 ++ asm volatile(LOCK_PREFIX "andl %1,%0"
569 ++ : "+m" (v->counter)
570 ++ : "r" (~(mask))
571 ++ : "memory");
572 ++}
573 ++
574 ++static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
575 ++{
576 ++ asm volatile(LOCK_PREFIX "orl %1,%0"
577 ++ : "+m" (v->counter)
578 ++ : "r" (mask)
579 ++ : "memory");
580 ++}
581 ++
582 ++static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
583 ++{
584 ++ asm volatile(LOCK_PREFIX "orl %1,%0"
585 ++ : "+m" (v->counter)
586 ++ : "r" (mask)
587 ++ : "memory");
588 ++}
589 +
590 + /* Atomic operations are already serializing on x86 */
591 + #define smp_mb__before_atomic_dec() barrier()
592 +@@ -266,9 +519,18 @@ typedef struct {
593 u64 __aligned(8) counter;
594 } atomic64_t;
595
596 @@ -10569,7 +10648,7 @@ index dc5a667..939040c 100644
597
598 /**
599 * atomic64_xchg - xchg atomic64 variable
600 -@@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
601 +@@ -279,6 +541,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
602 * the old value.
603 */
604 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
605 @@ -10577,7 +10656,7 @@ index dc5a667..939040c 100644
606
607 /**
608 * atomic64_set - set atomic64 variable
609 -@@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
610 +@@ -290,6 +553,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
611 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
612
613 /**
614 @@ -10593,7 +10672,7 @@ index dc5a667..939040c 100644
615 * atomic64_read - read atomic64 variable
616 * @ptr: pointer to type atomic64_t
617 *
618 -@@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64_t *ptr)
619 +@@ -317,7 +589,33 @@ static inline u64 atomic64_read(atomic64_t *ptr)
620 return res;
621 }
622
623 @@ -10628,7 +10707,7 @@ index dc5a667..939040c 100644
624
625 /**
626 * atomic64_add_return - add and return
627 -@@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
628 +@@ -332,8 +630,11 @@ extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
629 * Other variants with different arithmetic operators:
630 */
631 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
632 @@ -10640,7 +10719,7 @@ index dc5a667..939040c 100644
633
634 /**
635 * atomic64_add - add integer to atomic64 variable
636 -@@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_t *ptr);
637 +@@ -345,6 +646,15 @@ extern u64 atomic64_dec_return(atomic64_t *ptr);
638 extern void atomic64_add(u64 delta, atomic64_t *ptr);
639
640 /**
641 @@ -10656,7 +10735,7 @@ index dc5a667..939040c 100644
642 * atomic64_sub - subtract the atomic64 variable
643 * @delta: integer value to subtract
644 * @ptr: pointer to type atomic64_t
645 -@@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atomic64_t *ptr);
646 +@@ -354,6 +664,15 @@ extern void atomic64_add(u64 delta, atomic64_t *ptr);
647 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
648
649 /**
650 @@ -10672,7 +10751,7 @@ index dc5a667..939040c 100644
651 * atomic64_sub_and_test - subtract value from variable and test result
652 * @delta: integer value to subtract
653 * @ptr: pointer to type atomic64_t
654 -@@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
655 +@@ -373,6 +692,14 @@ extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
656 extern void atomic64_inc(atomic64_t *ptr);
657
658 /**
659 @@ -10687,7 +10766,7 @@ index dc5a667..939040c 100644
660 * atomic64_dec - decrement atomic64 variable
661 * @ptr: pointer to type atomic64_t
662 *
663 -@@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr);
664 +@@ -381,6 +708,14 @@ extern void atomic64_inc(atomic64_t *ptr);
665 extern void atomic64_dec(atomic64_t *ptr);
666
667 /**
668 @@ -10703,7 +10782,7 @@ index dc5a667..939040c 100644
669 * @ptr: pointer to type atomic64_t
670 *
671 diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h
672 -index d605dc2..fafd7bd 100644
673 +index d605dc2..72cb5cd 100644
674 --- a/arch/x86/include/asm/atomic_64.h
675 +++ b/arch/x86/include/asm/atomic_64.h
676 @@ -24,6 +24,17 @@ static inline int atomic_read(const atomic_t *v)
677 @@ -11370,6 +11449,51 @@ index d605dc2..fafd7bd 100644
678 }
679
680 /**
681 +@@ -466,14 +864,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
682 + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
683 +
684 + /* These are x86-specific, used by some header files */
685 +-#define atomic_clear_mask(mask, addr) \
686 +- asm volatile(LOCK_PREFIX "andl %0,%1" \
687 +- : : "r" (~(mask)), "m" (*(addr)) : "memory")
688 ++static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
689 ++{
690 ++ asm volatile(LOCK_PREFIX "andl %1,%0"
691 ++ : "+m" (v->counter)
692 ++ : "r" (~(mask))
693 ++ : "memory");
694 ++}
695 +
696 +-#define atomic_set_mask(mask, addr) \
697 +- asm volatile(LOCK_PREFIX "orl %0,%1" \
698 +- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
699 +- : "memory")
700 ++static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
701 ++{
702 ++ asm volatile(LOCK_PREFIX "andl %1,%0"
703 ++ : "+m" (v->counter)
704 ++ : "r" (~(mask))
705 ++ : "memory");
706 ++}
707 ++
708 ++static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
709 ++{
710 ++ asm volatile(LOCK_PREFIX "orl %1,%0"
711 ++ : "+m" (v->counter)
712 ++ : "r" (mask)
713 ++ : "memory");
714 ++}
715 ++
716 ++static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
717 ++{
718 ++ asm volatile(LOCK_PREFIX "orl %1,%0"
719 ++ : "+m" (v->counter)
720 ++ : "r" (mask)
721 ++ : "memory");
722 ++}
723 +
724 + /* Atomic operations are already serializing on x86 */
725 + #define smp_mb__before_atomic_dec() barrier()
726 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
727 index 02b47a6..d5c4b15 100644
728 --- a/arch/x86/include/asm/bitops.h
729 @@ -33762,7 +33886,7 @@ index 87c67b4..230527a 100644
730 .part_num = MBCS_PART_NUM,
731 .mfg_num = MBCS_MFG_NUM,
732 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
733 -index 1270f64..8495f49 100644
734 +index 1270f64..3b87405 100644
735 --- a/drivers/char/mem.c
736 +++ b/drivers/char/mem.c
737 @@ -18,6 +18,7 @@
738 @@ -33825,7 +33949,7 @@ index 1270f64..8495f49 100644
739
740 - if (copy_to_user(buf, ptr, sz)) {
741 +#ifdef CONFIG_PAX_USERCOPY
742 -+ temp = kmalloc(sz, GFP_KERNEL);
743 ++ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
744 + if (!temp) {
745 + unxlate_dev_mem_ptr(p, ptr);
746 + return -ENOMEM;
747 @@ -33878,7 +34002,7 @@ index 1270f64..8495f49 100644
748
749 - if (copy_to_user(buf, kbuf, sz))
750 +#ifdef CONFIG_PAX_USERCOPY
751 -+ temp = kmalloc(sz, GFP_KERNEL);
752 ++ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
753 + if (!temp)
754 + return -ENOMEM;
755 + memcpy(temp, kbuf, sz);
756 @@ -34062,7 +34186,7 @@ index 62f282e..e45c45c 100644
757 cdev_init(&ptmx_cdev, &ptmx_fops);
758 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
759 diff --git a/drivers/char/random.c b/drivers/char/random.c
760 -index 3a19e2d..8eb80fc 100644
761 +index 3a19e2d..7d9aaad 100644
762 --- a/drivers/char/random.c
763 +++ b/drivers/char/random.c
764 @@ -125,20 +125,32 @@
765 @@ -34114,7 +34238,7 @@ index 3a19e2d..8eb80fc 100644
766
767 #ifdef CONFIG_GENERIC_HARDIRQS
768 # include <linux/irq.h>
769 -@@ -249,14 +262,21 @@
770 +@@ -249,14 +262,23 @@
771 #include <asm/processor.h>
772 #include <asm/uaccess.h>
773 #include <asm/irq.h>
774 @@ -34133,10 +34257,12 @@ index 3a19e2d..8eb80fc 100644
775 +#endif
776 #define SEC_XFER_SIZE 512
777 +#define EXTRACT_SIZE 10
778 ++
779 ++#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
780
781 /*
782 * The minimum number of bits of entropy before we wake up a read on
783 -@@ -292,10 +312,17 @@ static struct poolinfo {
784 +@@ -292,10 +314,17 @@ static struct poolinfo {
785 int poolwords;
786 int tap1, tap2, tap3, tap4, tap5;
787 } poolinfo_table[] = {
788 @@ -34154,7 +34280,7 @@ index 3a19e2d..8eb80fc 100644
789 #if 0
790 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
791 { 2048, 1638, 1231, 819, 411, 1 },
792 -@@ -412,9 +439,11 @@ struct entropy_store {
793 +@@ -412,9 +441,11 @@ struct entropy_store {
794 /* read-write data: */
795 spinlock_t lock;
796 unsigned add_ptr;
797 @@ -34168,7 +34294,7 @@ index 3a19e2d..8eb80fc 100644
798 };
799
800 static __u32 input_pool_data[INPUT_POOL_WORDS];
801 -@@ -446,6 +475,10 @@ static struct entropy_store nonblocking_pool = {
802 +@@ -446,6 +477,10 @@ static struct entropy_store nonblocking_pool = {
803 .pool = nonblocking_pool_data
804 };
805
806 @@ -34179,7 +34305,7 @@ index 3a19e2d..8eb80fc 100644
807 /*
808 * This function adds bytes into the entropy "pool". It does not
809 * update the entropy estimate. The caller should call
810 -@@ -456,29 +489,24 @@ static struct entropy_store nonblocking_pool = {
811 +@@ -456,29 +491,24 @@ static struct entropy_store nonblocking_pool = {
812 * it's cheap to do so and helps slightly in the expected case where
813 * the entropy is concentrated in the low-order bits.
814 */
815 @@ -34214,7 +34340,7 @@ index 3a19e2d..8eb80fc 100644
816
817 /* mix one byte at a time to simplify size handling and churn faster */
818 while (nbytes--) {
819 -@@ -505,19 +533,53 @@ static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
820 +@@ -505,19 +535,53 @@ static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
821 input_rotate += i ? 7 : 14;
822 }
823
824 @@ -34272,7 +34398,7 @@ index 3a19e2d..8eb80fc 100644
825 }
826
827 /*
828 -@@ -525,30 +587,34 @@ static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
829 +@@ -525,30 +589,34 @@ static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
830 */
831 static void credit_entropy_bits(struct entropy_store *r, int nbits)
832 {
833 @@ -34314,7 +34440,7 @@ index 3a19e2d..8eb80fc 100644
834 }
835
836 /*********************************************************************
837 -@@ -601,6 +667,25 @@ static void set_timer_rand_state(unsigned int irq,
838 +@@ -601,6 +669,25 @@ static void set_timer_rand_state(unsigned int irq,
839 }
840 #endif
841
842 @@ -34340,7 +34466,7 @@ index 3a19e2d..8eb80fc 100644
843 static struct timer_rand_state input_timer_state;
844
845 /*
846 -@@ -631,7 +716,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
847 +@@ -631,7 +718,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
848 sample.jiffies = jiffies;
849 sample.cycles = get_cycles();
850 sample.num = num;
851 @@ -34349,7 +34475,7 @@ index 3a19e2d..8eb80fc 100644
852
853 /*
854 * Calculate number of bits of randomness we probably added.
855 -@@ -688,17 +773,48 @@ void add_input_randomness(unsigned int type, unsigned int code,
856 +@@ -688,17 +775,48 @@ void add_input_randomness(unsigned int type, unsigned int code,
857 }
858 EXPORT_SYMBOL_GPL(add_input_randomness);
859
860 @@ -34404,77 +34530,108 @@ index 3a19e2d..8eb80fc 100644
861 }
862
863 #ifdef CONFIG_BLOCK
864 -@@ -714,8 +830,6 @@ void add_disk_randomness(struct gendisk *disk)
865 +@@ -714,7 +832,16 @@ void add_disk_randomness(struct gendisk *disk)
866 }
867 #endif
868
869 -#define EXTRACT_SIZE 10
870 --
871 ++#ifdef CONFIG_PAX_LATENT_ENTROPY
872 ++u64 latent_entropy;
873 ++
874 ++__init void transfer_latent_entropy(void)
875 ++{
876 ++ mix_pool_bytes(&input_pool, &latent_entropy, sizeof(latent_entropy), NULL);
877 ++ mix_pool_bytes(&nonblocking_pool, &latent_entropy, sizeof(latent_entropy), NULL);
878 ++// printk(KERN_INFO "PAX: transferring latent entropy: %16llx\n", latent_entropy);
879 ++}
880 ++#endif
881 +
882 /*********************************************************************
883 *
884 - * Entropy extraction routines
885 -@@ -732,7 +846,11 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
886 +@@ -732,7 +859,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
887 */
888 static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
889 {
890 - __u32 tmp[OUTPUT_POOL_WORDS];
891 -+ union {
892 -+ __u32 tmp[OUTPUT_POOL_WORDS];
893 -+ long hwrand[4];
894 -+ } u;
895 -+ int i;
896 ++ __u32 tmp[OUTPUT_POOL_WORDS];
897
898 if (r->pull && r->entropy_count < nbytes * 8 &&
899 r->entropy_count < r->poolinfo->POOLBITS) {
900 -@@ -743,17 +861,22 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
901 - /* pull at least as many as BYTES as wakeup BITS */
902 - bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
903 - /* but never more than the buffer size */
904 -- bytes = min_t(int, bytes, sizeof(tmp));
905 -+ bytes = min_t(int, bytes, sizeof(u.tmp));
906 -
907 - DEBUG_ENT("going to reseed %s with %d bits "
908 - "(%d of %d requested)\n",
909 - r->name, bytes * 8, nbytes * 8, r->entropy_count);
910 -
911 -- bytes = extract_entropy(r->pull, tmp, bytes,
912 -+ bytes = extract_entropy(r->pull, u.tmp, bytes,
913 +@@ -751,7 +878,7 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
914 +
915 + bytes = extract_entropy(r->pull, tmp, bytes,
916 random_read_wakeup_thresh / 8, rsvd);
917 - mix_pool_bytes(r, tmp, bytes);
918 -+ mix_pool_bytes(r, u.tmp, bytes, NULL);
919 ++ mix_pool_bytes(r, tmp, bytes, NULL);
920 credit_entropy_bits(r, bytes*8);
921 }
922 -+ for (i = 0; i < 4; i++)
923 -+ if (arch_get_random_long(&u.hwrand[i]))
924 -+ break;
925 -+ if (i)
926 -+ mix_pool_bytes(r, &u.hwrand, sizeof(u.hwrand), 0);
927 }
928 -
929 - /*
930 -@@ -812,9 +935,11 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
931 +@@ -810,13 +937,19 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
932 + static void extract_buf(struct entropy_store *r, __u8 *out)
933 + {
934 int i;
935 - __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
936 +- __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
937 ++ union {
938 ++ __u32 w[5];
939 ++ unsigned long l[LONGS(EXTRACT_SIZE)];
940 ++ } hash;
941 ++ __u32 workspace[SHA_WORKSPACE_WORDS];
942 __u8 extract[64];
943 + unsigned long flags;
944
945 /* Generate a hash across the pool, 16 words (512 bits) at a time */
946 - sha_init(hash);
947 +- sha_init(hash);
948 ++ sha_init(hash.w);
949 + spin_lock_irqsave(&r->lock, flags);
950 for (i = 0; i < r->poolinfo->poolwords; i += 16)
951 - sha_transform(hash, (__u8 *)(r->pool + i), workspace);
952 +- sha_transform(hash, (__u8 *)(r->pool + i), workspace);
953 ++ sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
954
955 -@@ -827,7 +952,8 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
956 + /*
957 + * We mix the hash back into the pool to prevent backtracking
958 +@@ -827,13 +960,14 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
959 * brute-forcing the feedback as hard as brute-forcing the
960 * hash.
961 */
962 - mix_pool_bytes_extract(r, hash, sizeof(hash), extract);
963 -+ __mix_pool_bytes(r, hash, sizeof(hash), extract);
964 ++ __mix_pool_bytes(r, hash.w, sizeof(hash.w), extract);
965 + spin_unlock_irqrestore(&r->lock, flags);
966
967 /*
968 * To avoid duplicates, we atomically extract a portion of the
969 -@@ -850,11 +976,10 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
970 + * pool while mixing, and hash one final time.
971 + */
972 +- sha_transform(hash, extract, workspace);
973 ++ sha_transform(hash.w, extract, workspace);
974 + memset(extract, 0, sizeof(extract));
975 + memset(workspace, 0, sizeof(workspace));
976 +
977 +@@ -842,19 +976,30 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
978 + * pattern, we fold it in half. Thus, we always feed back
979 + * twice as much data as we output.
980 + */
981 +- hash[0] ^= hash[3];
982 +- hash[1] ^= hash[4];
983 +- hash[2] ^= rol32(hash[2], 16);
984 +- memcpy(out, hash, EXTRACT_SIZE);
985 +- memset(hash, 0, sizeof(hash));
986 ++ hash.w[0] ^= hash.w[3];
987 ++ hash.w[1] ^= hash.w[4];
988 ++ hash.w[2] ^= rol32(hash.w[2], 16);
989 ++
990 ++ /*
991 ++ * If we have a architectural hardware random number
992 ++ * generator, mix that in, too.
993 ++ */
994 ++ for (i = 0; i < LONGS(EXTRACT_SIZE); i++) {
995 ++ unsigned long v;
996 ++ if (!arch_get_random_long(&v))
997 ++ break;
998 ++ hash.l[i] ^= v;
999 ++ }
1000 ++
1001 ++ memcpy(out, &hash, EXTRACT_SIZE);
1002 ++ memset(&hash, 0, sizeof(hash));
1003 }
1004
1005 static ssize_t extract_entropy(struct entropy_store *r, void *buf,
1006 @@ -34487,7 +34644,7 @@ index 3a19e2d..8eb80fc 100644
1007
1008 xfer_secondary_pool(r, nbytes);
1009 nbytes = account(r, nbytes, min, reserved);
1010 -@@ -862,7 +987,9 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
1011 +@@ -862,7 +1007,9 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
1012 while (nbytes) {
1013 extract_buf(r, tmp);
1014
1015 @@ -34498,7 +34655,7 @@ index 3a19e2d..8eb80fc 100644
1016 spin_lock_irqsave(&r->lock, flags);
1017 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
1018 panic("Hardware RNG duplicated output!\n");
1019 -@@ -926,7 +1053,21 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
1020 +@@ -926,7 +1073,21 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
1021 */
1022 void get_random_bytes(void *buf, int nbytes)
1023 {
1024 @@ -34521,7 +34678,7 @@ index 3a19e2d..8eb80fc 100644
1025 }
1026 EXPORT_SYMBOL(get_random_bytes);
1027
1028 -@@ -941,19 +1082,19 @@ EXPORT_SYMBOL(get_random_bytes);
1029 +@@ -941,19 +1102,19 @@ EXPORT_SYMBOL(get_random_bytes);
1030 */
1031 static void init_std_data(struct entropy_store *r)
1032 {
1033 @@ -34552,7 +34709,7 @@ index 3a19e2d..8eb80fc 100644
1034 }
1035
1036 static int rand_initialize(void)
1037 -@@ -1090,7 +1231,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
1038 +@@ -1090,7 +1251,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
1039 count -= bytes;
1040 p += bytes;
1041
1042 @@ -34561,7 +34718,7 @@ index 3a19e2d..8eb80fc 100644
1043 cond_resched();
1044 }
1045
1046 -@@ -1209,7 +1350,7 @@ EXPORT_SYMBOL(generate_random_uuid);
1047 +@@ -1209,7 +1370,7 @@ EXPORT_SYMBOL(generate_random_uuid);
1048 #include <linux/sysctl.h>
1049
1050 static int min_read_thresh = 8, min_write_thresh;
1051 @@ -34570,7 +34727,7 @@ index 3a19e2d..8eb80fc 100644
1052 static int max_write_thresh = INPUT_POOL_WORDS * 32;
1053 static char sysctl_bootid[16];
1054
1055 -@@ -1231,10 +1372,15 @@ static int proc_do_uuid(ctl_table *table, int write,
1056 +@@ -1231,10 +1392,15 @@ static int proc_do_uuid(ctl_table *table, int write,
1057 uuid = table->data;
1058 if (!uuid) {
1059 uuid = tmp_uuid;
1060 @@ -34589,7 +34746,7 @@ index 3a19e2d..8eb80fc 100644
1061
1062 sprintf(buf, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-"
1063 "%02x%02x%02x%02x%02x%02x",
1064 -@@ -1279,6 +1425,7 @@ static int uuid_strategy(ctl_table *table,
1065 +@@ -1279,6 +1445,7 @@ static int uuid_strategy(ctl_table *table,
1066 }
1067
1068 static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
1069 @@ -34597,7 +34754,7 @@ index 3a19e2d..8eb80fc 100644
1070 ctl_table random_table[] = {
1071 {
1072 .ctl_name = RANDOM_POOLSIZE,
1073 -@@ -1354,12 +1501,17 @@ late_initcall(random_int_secret_init);
1074 +@@ -1354,12 +1521,17 @@ late_initcall(random_int_secret_init);
1075 * value is not cryptographically secure but for several uses the cost of
1076 * depleting entropy is too high
1077 */
1078 @@ -35490,10 +35647,27 @@ index 7ff6e75..a2965d9 100644
1079 void fw_card_initialize(struct fw_card *card,
1080 const struct fw_card_driver *driver, struct device *device);
1081 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
1082 -index 3a2ccb0..82fd7c4 100644
1083 +index 3a2ccb0..8365cd1 100644
1084 --- a/drivers/firmware/dmi_scan.c
1085 +++ b/drivers/firmware/dmi_scan.c
1086 -@@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
1087 +@@ -6,6 +6,7 @@
1088 + #include <linux/efi.h>
1089 + #include <linux/bootmem.h>
1090 + #include <linux/slab.h>
1091 ++#include <linux/random.h>
1092 + #include <asm/dmi.h>
1093 +
1094 + /*
1095 +@@ -111,6 +112,8 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
1096 +
1097 + dmi_table(buf, dmi_len, dmi_num, decode, NULL);
1098 +
1099 ++ add_device_randomness(buf, dmi_len);
1100 ++
1101 + dmi_iounmap(buf, dmi_len);
1102 + return 0;
1103 + }
1104 +@@ -391,11 +394,6 @@ void __init dmi_scan_machine(void)
1105 }
1106 }
1107 else {
1108 @@ -35505,7 +35679,7 @@ index 3a2ccb0..82fd7c4 100644
1109 p = dmi_ioremap(0xF0000, 0x10000);
1110 if (p == NULL)
1111 goto error;
1112 -@@ -667,7 +662,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
1113 +@@ -667,7 +665,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
1114 if (buf == NULL)
1115 return -1;
1116
1117 @@ -62700,7 +62874,7 @@ index e4e4d43..66bcbcc 100644
1118 .update_status = aty128_bl_update_status,
1119 };
1120 diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
1121 -index 913b4a4..9295a38 100644
1122 +index 913b4a4..4de325a9 100644
1123 --- a/drivers/video/aty/atyfb_base.c
1124 +++ b/drivers/video/aty/atyfb_base.c
1125 @@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct backlight_device *bd)
1126 @@ -62712,6 +62886,55 @@ index 913b4a4..9295a38 100644
1127 .get_brightness = aty_bl_get_brightness,
1128 .update_status = aty_bl_update_status,
1129 };
1130 +@@ -2970,9 +2970,8 @@ static int __devinit atyfb_setup_sparc(struct pci_dev *pdev,
1131 + {
1132 + struct atyfb_par *par = info->par;
1133 + struct device_node *dp;
1134 +- char prop[128];
1135 +- int node, len, i, j, ret;
1136 + u32 mem, chip_id;
1137 ++ int i, j, ret;
1138 +
1139 + /*
1140 + * Map memory-mapped registers.
1141 +@@ -3088,23 +3087,8 @@ static int __devinit atyfb_setup_sparc(struct pci_dev *pdev,
1142 + aty_st_le32(MEM_CNTL, mem, par);
1143 + }
1144 +
1145 +- /*
1146 +- * If this is the console device, we will set default video
1147 +- * settings to what the PROM left us with.
1148 +- */
1149 +- node = prom_getchild(prom_root_node);
1150 +- node = prom_searchsiblings(node, "aliases");
1151 +- if (node) {
1152 +- len = prom_getproperty(node, "screen", prop, sizeof(prop));
1153 +- if (len > 0) {
1154 +- prop[len] = '\0';
1155 +- node = prom_finddevice(prop);
1156 +- } else
1157 +- node = 0;
1158 +- }
1159 +-
1160 + dp = pci_device_to_OF_node(pdev);
1161 +- if (node == dp->node) {
1162 ++ if (dp == of_console_device) {
1163 + struct fb_var_screeninfo *var = &default_var;
1164 + unsigned int N, P, Q, M, T, R;
1165 + u32 v_total, h_total;
1166 +@@ -3112,9 +3096,9 @@ static int __devinit atyfb_setup_sparc(struct pci_dev *pdev,
1167 + u8 pll_regs[16];
1168 + u8 clock_cntl;
1169 +
1170 +- crtc.vxres = prom_getintdefault(node, "width", 1024);
1171 +- crtc.vyres = prom_getintdefault(node, "height", 768);
1172 +- var->bits_per_pixel = prom_getintdefault(node, "depth", 8);
1173 ++ crtc.vxres = of_getintprop_default(dp, "width", 1024);
1174 ++ crtc.vyres = of_getintprop_default(dp, "height", 768);
1175 ++ var->bits_per_pixel = of_getintprop_default(dp, "depth", 8);
1176 + var->xoffset = var->yoffset = 0;
1177 + crtc.h_tot_disp = aty_ld_le32(CRTC_H_TOTAL_DISP, par);
1178 + crtc.h_sync_strt_wid = aty_ld_le32(CRTC_H_SYNC_STRT_WID, par);
1179 diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
1180 index 1a056ad..221bd6a 100644
1181 --- a/drivers/video/aty/radeon_backlight.c
1182 @@ -66305,7 +66528,7 @@ index 0133b5a..3710d09 100644
1183 (unsigned long) create_aout_tables((char __user *) bprm->p, bprm);
1184 #ifdef __alpha__
1185 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
1186 -index a64fde6..36d9464 100644
1187 +index a64fde6..6583da2 100644
1188 --- a/fs/binfmt_elf.c
1189 +++ b/fs/binfmt_elf.c
1190 @@ -31,6 +31,7 @@
1191 @@ -66438,7 +66661,7 @@ index a64fde6..36d9464 100644
1192 error = -ENOMEM;
1193 goto out_close;
1194 }
1195 -@@ -532,6 +558,349 @@ out:
1196 +@@ -532,6 +558,311 @@ out:
1197 return error;
1198 }
1199
1200 @@ -66458,15 +66681,6 @@ index a64fde6..36d9464 100644
1201 + pax_flags |= MF_PAX_SEGMEXEC;
1202 +#endif
1203 +
1204 -+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
1205 -+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
1206 -+ if (nx_enabled)
1207 -+ pax_flags &= ~MF_PAX_SEGMEXEC;
1208 -+ else
1209 -+ pax_flags &= ~MF_PAX_PAGEEXEC;
1210 -+ }
1211 -+#endif
1212 -+
1213 +#ifdef CONFIG_PAX_EMUTRAMP
1214 + if (elf_phdata->p_flags & PF_EMUTRAMP)
1215 + pax_flags |= MF_PAX_EMUTRAMP;
1216 @@ -66500,15 +66714,6 @@ index a64fde6..36d9464 100644
1217 + pax_flags |= MF_PAX_SEGMEXEC;
1218 +#endif
1219 +
1220 -+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
1221 -+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
1222 -+ if (nx_enabled)
1223 -+ pax_flags &= ~MF_PAX_SEGMEXEC;
1224 -+ else
1225 -+ pax_flags &= ~MF_PAX_PAGEEXEC;
1226 -+ }
1227 -+#endif
1228 -+
1229 +#ifdef CONFIG_PAX_EMUTRAMP
1230 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
1231 + pax_flags |= MF_PAX_EMUTRAMP;
1232 @@ -66544,15 +66749,6 @@ index a64fde6..36d9464 100644
1233 + pax_flags |= MF_PAX_SEGMEXEC;
1234 +#endif
1235 +
1236 -+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
1237 -+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
1238 -+ if (nx_enabled)
1239 -+ pax_flags &= ~MF_PAX_SEGMEXEC;
1240 -+ else
1241 -+ pax_flags &= ~MF_PAX_PAGEEXEC;
1242 -+ }
1243 -+#endif
1244 -+
1245 +#ifdef CONFIG_PAX_EMUTRAMP
1246 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
1247 + pax_flags |= MF_PAX_EMUTRAMP;
1248 @@ -66586,15 +66782,6 @@ index a64fde6..36d9464 100644
1249 + pax_flags |= MF_PAX_SEGMEXEC;
1250 +#endif
1251 +
1252 -+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
1253 -+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
1254 -+ if ((__supported_pte_mask & _PAGE_NX))
1255 -+ pax_flags &= ~MF_PAX_SEGMEXEC;
1256 -+ else
1257 -+ pax_flags &= ~MF_PAX_PAGEEXEC;
1258 -+ }
1259 -+#endif
1260 -+
1261 +#ifdef CONFIG_PAX_EMUTRAMP
1262 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
1263 + pax_flags |= MF_PAX_EMUTRAMP;
1264 @@ -66614,7 +66801,7 @@ index a64fde6..36d9464 100644
1265 +}
1266 +#endif
1267 +
1268 -+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
1269 ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
1270 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
1271 +{
1272 + unsigned long pax_flags = 0UL;
1273 @@ -66631,15 +66818,6 @@ index a64fde6..36d9464 100644
1274 + pax_flags |= MF_PAX_SEGMEXEC;
1275 +#endif
1276 +
1277 -+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
1278 -+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
1279 -+ if ((__supported_pte_mask & _PAGE_NX))
1280 -+ pax_flags &= ~MF_PAX_SEGMEXEC;
1281 -+ else
1282 -+ pax_flags &= ~MF_PAX_PAGEEXEC;
1283 -+ }
1284 -+#endif
1285 -+
1286 +#ifdef CONFIG_PAX_EMUTRAMP
1287 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
1288 + pax_flags |= MF_PAX_EMUTRAMP;
1289 @@ -66661,19 +66839,17 @@ index a64fde6..36d9464 100644
1290 + pax_flags |= MF_PAX_PAGEEXEC;
1291 +#endif
1292 +
1293 ++#ifdef CONFIG_PAX_SEGMEXEC
1294 ++ pax_flags |= MF_PAX_SEGMEXEC;
1295 ++#endif
1296 ++
1297 +#ifdef CONFIG_PAX_MPROTECT
1298 + pax_flags |= MF_PAX_MPROTECT;
1299 +#endif
1300 +
1301 +#ifdef CONFIG_PAX_RANDMMAP
1302 -+ pax_flags |= MF_PAX_RANDMMAP;
1303 -+#endif
1304 -+
1305 -+#ifdef CONFIG_PAX_SEGMEXEC
1306 -+ if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
1307 -+ pax_flags &= ~MF_PAX_PAGEEXEC;
1308 -+ pax_flags |= MF_PAX_SEGMEXEC;
1309 -+ }
1310 ++ if (randomize_va_space)
1311 ++ pax_flags |= MF_PAX_RANDMMAP;
1312 +#endif
1313 +
1314 +#endif
1315 @@ -66777,6 +66953,15 @@ index a64fde6..36d9464 100644
1316 + if (pt_pax_flags != ~0UL)
1317 + pax_flags = pt_pax_flags;
1318 +
1319 ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
1320 ++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
1321 ++ if ((__supported_pte_mask & _PAGE_NX))
1322 ++ pax_flags &= ~MF_PAX_SEGMEXEC;
1323 ++ else
1324 ++ pax_flags &= ~MF_PAX_PAGEEXEC;
1325 ++ }
1326 ++#endif
1327 ++
1328 + if (0 > pax_check_flags(&pax_flags))
1329 + return -EINVAL;
1330 +
1331 @@ -66788,7 +66973,7 @@ index a64fde6..36d9464 100644
1332 /*
1333 * These are the functions used to load ELF style executables and shared
1334 * libraries. There is no binary dependent code anywhere else.
1335 -@@ -548,6 +917,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
1336 +@@ -548,6 +879,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
1337 {
1338 unsigned int random_variable = 0;
1339
1340 @@ -66800,7 +66985,7 @@ index a64fde6..36d9464 100644
1341 if ((current->flags & PF_RANDOMIZE) &&
1342 !(current->personality & ADDR_NO_RANDOMIZE)) {
1343 random_variable = get_random_int() & STACK_RND_MASK;
1344 -@@ -566,7 +940,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
1345 +@@ -566,7 +902,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
1346 unsigned long load_addr = 0, load_bias = 0;
1347 int load_addr_set = 0;
1348 char * elf_interpreter = NULL;
1349 @@ -66809,7 +66994,7 @@ index a64fde6..36d9464 100644
1350 struct elf_phdr *elf_ppnt, *elf_phdata;
1351 unsigned long elf_bss, elf_brk;
1352 int retval, i;
1353 -@@ -576,11 +950,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
1354 +@@ -576,11 +912,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
1355 unsigned long start_code, end_code, start_data, end_data;
1356 unsigned long reloc_func_desc = 0;
1357 int executable_stack = EXSTACK_DEFAULT;
1358 @@ -66822,7 +67007,7 @@ index a64fde6..36d9464 100644
1359
1360 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
1361 if (!loc) {
1362 -@@ -718,11 +1092,80 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
1363 +@@ -718,11 +1054,80 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
1364
1365 /* OK, This is the point of no return */
1366 current->flags &= ~PF_FORKNOEXEC;
1367 @@ -66847,7 +67032,7 @@ index a64fde6..36d9464 100644
1368 +
1369 + current->mm->def_flags = 0;
1370 +
1371 -+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
1372 ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
1373 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
1374 + send_sig(SIGKILL, current, 0);
1375 + goto out_free_dentry;
1376 @@ -66904,7 +67089,7 @@ index a64fde6..36d9464 100644
1377 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
1378 current->personality |= READ_IMPLIES_EXEC;
1379
1380 -@@ -800,10 +1243,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
1381 +@@ -800,10 +1205,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
1382 * might try to exec. This is because the brk will
1383 * follow the loader, and is not movable. */
1384 #ifdef CONFIG_X86
1385 @@ -66933,7 +67118,7 @@ index a64fde6..36d9464 100644
1386 }
1387
1388 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
1389 -@@ -836,9 +1296,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
1390 +@@ -836,9 +1258,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
1391 * allowed task size. Note that p_filesz must always be
1392 * <= p_memsz so it is only necessary to check p_memsz.
1393 */
1394 @@ -66946,7 +67131,7 @@ index a64fde6..36d9464 100644
1395 /* set_brk can never work. Avoid overflows. */
1396 send_sig(SIGKILL, current, 0);
1397 retval = -EINVAL;
1398 -@@ -877,11 +1337,40 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
1399 +@@ -877,11 +1299,40 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
1400 goto out_free_dentry;
1401 }
1402 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
1403 @@ -66990,7 +67175,7 @@ index a64fde6..36d9464 100644
1404 if (elf_interpreter) {
1405 unsigned long uninitialized_var(interp_map_addr);
1406
1407 -@@ -1112,8 +1601,10 @@ static int dump_seek(struct file *file, loff_t off)
1408 +@@ -1112,8 +1563,10 @@ static int dump_seek(struct file *file, loff_t off)
1409 unsigned long n = off;
1410 if (n > PAGE_SIZE)
1411 n = PAGE_SIZE;
1412 @@ -67002,7 +67187,7 @@ index a64fde6..36d9464 100644
1413 off -= n;
1414 }
1415 free_page((unsigned long)buf);
1416 -@@ -1125,7 +1616,7 @@ static int dump_seek(struct file *file, loff_t off)
1417 +@@ -1125,7 +1578,7 @@ static int dump_seek(struct file *file, loff_t off)
1418 * Decide what to dump of a segment, part, all or none.
1419 */
1420 static unsigned long vma_dump_size(struct vm_area_struct *vma,
1421 @@ -67011,7 +67196,7 @@ index a64fde6..36d9464 100644
1422 {
1423 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
1424
1425 -@@ -1159,7 +1650,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
1426 +@@ -1159,7 +1612,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
1427 if (vma->vm_file == NULL)
1428 return 0;
1429
1430 @@ -67020,7 +67205,7 @@ index a64fde6..36d9464 100644
1431 goto whole;
1432
1433 /*
1434 -@@ -1255,8 +1746,11 @@ static int writenote(struct memelfnote *men, struct file *file,
1435 +@@ -1255,8 +1708,11 @@ static int writenote(struct memelfnote *men, struct file *file,
1436 #undef DUMP_WRITE
1437
1438 #define DUMP_WRITE(addr, nr) \
1439 @@ -67033,7 +67218,7 @@ index a64fde6..36d9464 100644
1440
1441 static void fill_elf_header(struct elfhdr *elf, int segs,
1442 u16 machine, u32 flags, u8 osabi)
1443 -@@ -1385,9 +1879,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
1444 +@@ -1385,9 +1841,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
1445 {
1446 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
1447 int i = 0;
1448 @@ -67045,7 +67230,7 @@ index a64fde6..36d9464 100644
1449 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
1450 }
1451
1452 -@@ -1973,7 +2467,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
1453 +@@ -1973,7 +2429,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
1454 phdr.p_offset = offset;
1455 phdr.p_vaddr = vma->vm_start;
1456 phdr.p_paddr = 0;
1457 @@ -67054,7 +67239,7 @@ index a64fde6..36d9464 100644
1458 phdr.p_memsz = vma->vm_end - vma->vm_start;
1459 offset += phdr.p_filesz;
1460 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1461 -@@ -2006,7 +2500,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
1462 +@@ -2006,7 +2462,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
1463 unsigned long addr;
1464 unsigned long end;
1465
1466 @@ -67063,7 +67248,7 @@ index a64fde6..36d9464 100644
1467
1468 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
1469 struct page *page;
1470 -@@ -2015,6 +2509,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
1471 +@@ -2015,6 +2471,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
1472 page = get_dump_page(addr);
1473 if (page) {
1474 void *kaddr = kmap(page);
1475 @@ -67071,7 +67256,7 @@ index a64fde6..36d9464 100644
1476 stop = ((size += PAGE_SIZE) > limit) ||
1477 !dump_write(file, kaddr, PAGE_SIZE);
1478 kunmap(page);
1479 -@@ -2042,6 +2537,97 @@ out:
1480 +@@ -2042,6 +2499,97 @@ out:
1481
1482 #endif /* USE_ELF_CORE_DUMP */
1483
1484 @@ -68898,7 +69083,7 @@ index f539204..068db1f 100644
1485
1486 fput(tfile);
1487 diff --git a/fs/exec.c b/fs/exec.c
1488 -index 86fafc6..d54d849 100644
1489 +index 86fafc6..a9275f4 100644
1490 --- a/fs/exec.c
1491 +++ b/fs/exec.c
1492 @@ -56,12 +56,33 @@
1493 @@ -69390,7 +69575,7 @@ index 86fafc6..d54d849 100644
1494 out:
1495 if (bprm->mm) {
1496 acct_arg_size(bprm, 0);
1497 -@@ -1591,6 +1746,229 @@ out:
1498 +@@ -1591,6 +1746,251 @@ out:
1499 return ispipe;
1500 }
1501
1502 @@ -69535,7 +69720,7 @@ index 86fafc6..d54d849 100644
1503 +
1504 +#ifdef CONFIG_PAX_USERCOPY
1505 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
1506 -+int object_is_on_stack(const void *obj, unsigned long len)
1507 ++static noinline int check_stack_object(const void *obj, unsigned long len)
1508 +{
1509 + const void * const stack = task_stack_page(current);
1510 + const void * const stackend = stack + THREAD_SIZE;
1511 @@ -69581,7 +69766,7 @@ index 86fafc6..d54d849 100644
1512 +#endif
1513 +}
1514 +
1515 -+__noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
1516 ++static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
1517 +{
1518 + if (current->signal->curr_ip)
1519 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
1520 @@ -69596,6 +69781,28 @@ index 86fafc6..d54d849 100644
1521 +}
1522 +#endif
1523 +
1524 ++void check_object_size(const void *ptr, unsigned long n, bool to)
1525 ++{
1526 ++
1527 ++#ifdef CONFIG_PAX_USERCOPY
1528 ++ const char *type;
1529 ++
1530 ++ if (!n)
1531 ++ return;
1532 ++
1533 ++ type = check_heap_object(ptr, n, to);
1534 ++ if (!type) {
1535 ++ if (check_stack_object(ptr, n) != -1)
1536 ++ return;
1537 ++ type = "<process stack>";
1538 ++ }
1539 ++
1540 ++ pax_report_usercopy(ptr, n, to, type);
1541 ++#endif
1542 ++
1543 ++}
1544 ++EXPORT_SYMBOL(check_object_size);
1545 ++
1546 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
1547 +void pax_track_stack(void)
1548 +{
1549 @@ -69620,7 +69827,7 @@ index 86fafc6..d54d849 100644
1550 static int zap_process(struct task_struct *start)
1551 {
1552 struct task_struct *t;
1553 -@@ -1793,17 +2171,17 @@ static void wait_for_dump_helpers(struct file *file)
1554 +@@ -1793,17 +2193,17 @@ static void wait_for_dump_helpers(struct file *file)
1555 pipe = file->f_path.dentry->d_inode->i_pipe;
1556
1557 pipe_lock(pipe);
1558 @@ -69643,7 +69850,7 @@ index 86fafc6..d54d849 100644
1559 pipe_unlock(pipe);
1560
1561 }
1562 -@@ -1826,10 +2204,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1563 +@@ -1826,10 +2226,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1564 char **helper_argv = NULL;
1565 int helper_argc = 0;
1566 int dump_count = 0;
1567 @@ -69658,7 +69865,7 @@ index 86fafc6..d54d849 100644
1568 binfmt = mm->binfmt;
1569 if (!binfmt || !binfmt->core_dump)
1570 goto fail;
1571 -@@ -1874,6 +2255,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1572 +@@ -1874,6 +2277,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1573 */
1574 clear_thread_flag(TIF_SIGPENDING);
1575
1576 @@ -69667,7 +69874,7 @@ index 86fafc6..d54d849 100644
1577 /*
1578 * lock_kernel() because format_corename() is controlled by sysctl, which
1579 * uses lock_kernel()
1580 -@@ -1908,7 +2291,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1581 +@@ -1908,7 +2313,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1582 goto fail_unlock;
1583 }
1584
1585 @@ -69676,7 +69883,7 @@ index 86fafc6..d54d849 100644
1586 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
1587 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
1588 task_tgid_vnr(current), current->comm);
1589 -@@ -1972,7 +2355,7 @@ close_fail:
1590 +@@ -1972,7 +2377,7 @@ close_fail:
1591 filp_close(file, NULL);
1592 fail_dropcount:
1593 if (dump_count)
1594 @@ -86059,7 +86266,7 @@ index f4906f6..71feb73 100644
1595 {
1596 return -ENODEV;
1597 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
1598 -index b7babf0..a9ac9fc 100644
1599 +index b7babf0..1df7140 100644
1600 --- a/include/asm-generic/atomic-long.h
1601 +++ b/include/asm-generic/atomic-long.h
1602 @@ -22,6 +22,12 @@
1603 @@ -86280,7 +86487,7 @@ index b7babf0..a9ac9fc 100644
1604 static inline long atomic_long_dec_return(atomic_long_t *l)
1605 {
1606 atomic_t *v = (atomic_t *)l;
1607 -@@ -255,4 +375,47 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
1608 +@@ -255,4 +375,53 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
1609
1610 #endif /* BITS_PER_LONG == 64 */
1611
1612 @@ -86298,6 +86505,10 @@ index b7babf0..a9ac9fc 100644
1613 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
1614 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
1615 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
1616 ++#ifdef CONFIG_X86
1617 ++ atomic_clear_mask_unchecked(0, NULL);
1618 ++ atomic_set_mask_unchecked(0, NULL);
1619 ++#endif
1620 +
1621 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
1622 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
1623 @@ -86318,6 +86529,8 @@ index b7babf0..a9ac9fc 100644
1624 +#define atomic_dec_unchecked(v) atomic_dec(v)
1625 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
1626 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
1627 ++#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
1628 ++#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
1629 +
1630 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
1631 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
1632 @@ -86328,6 +86541,19 @@ index b7babf0..a9ac9fc 100644
1633 +#endif
1634 +
1635 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
1636 +diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
1637 +index c99c64d..f173e40 100644
1638 +--- a/include/asm-generic/atomic.h
1639 ++++ b/include/asm-generic/atomic.h
1640 +@@ -134,7 +134,7 @@ static inline void atomic_dec(atomic_t *v)
1641 +
1642 + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
1643 +
1644 +-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1645 ++static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
1646 + {
1647 + unsigned long flags;
1648 +
1649 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
1650 index b18ce4f..2ee2843 100644
1651 --- a/include/asm-generic/atomic64.h
1652 @@ -87030,20 +87256,25 @@ index c8f2a5f7..1618a5c 100644
1653 /* audit system wants to get cap info from files as well */
1654 struct dentry;
1655 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
1656 -index 450fa59..7c875cb 100644
1657 +index 450fa59..b658078 100644
1658 --- a/include/linux/compiler-gcc4.h
1659 +++ b/include/linux/compiler-gcc4.h
1660 -@@ -14,6 +14,9 @@
1661 +@@ -14,6 +14,14 @@
1662 #define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
1663 #define __always_inline inline __attribute__((always_inline))
1664
1665 +#ifdef SIZE_OVERFLOW_PLUGIN
1666 +#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
1667 +#endif
1668 ++
1669 ++#ifdef LATENT_ENTROPY_PLUGIN
1670 ++#define __latent_entropy __attribute__((latent_entropy))
1671 ++#endif
1672 ++
1673 /*
1674 * A trick to suppress uninitialized variable warning without generating any
1675 * code
1676 -@@ -36,4 +39,23 @@
1677 +@@ -36,4 +44,23 @@
1678 the kernel context */
1679 #define __cold __attribute__((__cold__))
1680
1681 @@ -87068,7 +87299,7 @@ index 450fa59..7c875cb 100644
1682 +#define __compiletime_error(message) __attribute__((error(message)))
1683 #endif
1684 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
1685 -index 04fb513..edaeada 100644
1686 +index 04fb513..7ab44ac 100644
1687 --- a/include/linux/compiler.h
1688 +++ b/include/linux/compiler.h
1689 @@ -5,11 +5,14 @@
1690 @@ -87121,7 +87352,7 @@ index 04fb513..edaeada 100644
1691 # define __chk_user_ptr(x) (void)0
1692 # define __chk_io_ptr(x) (void)0
1693 # define __builtin_warning(x, y...) (1)
1694 -@@ -247,6 +271,17 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
1695 +@@ -247,6 +271,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
1696 # define __attribute_const__ /* unimplemented */
1697 #endif
1698
1699 @@ -87136,10 +87367,15 @@ index 04fb513..edaeada 100644
1700 +#ifndef __size_overflow
1701 +# define __size_overflow(...)
1702 +#endif
1703 ++
1704 ++#ifndef __latent_entropy
1705 ++# define __latent_entropy
1706 ++#endif
1707 ++
1708 /*
1709 * Tell gcc if a function is cold. The compiler will assume any path
1710 * directly leading to the call is unlikely.
1711 -@@ -256,6 +291,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
1712 +@@ -256,6 +296,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
1713 #define __cold
1714 #endif
1715
1716 @@ -87162,7 +87398,7 @@ index 04fb513..edaeada 100644
1717 /* Simple shorthand for a section definition */
1718 #ifndef __section
1719 # define __section(S) __attribute__ ((__section__(#S)))
1720 -@@ -266,6 +317,19 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
1721 +@@ -266,6 +322,19 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
1722 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
1723 #endif
1724
1725 @@ -87182,7 +87418,7 @@ index 04fb513..edaeada 100644
1726 /*
1727 * Prevent the compiler from merging or refetching accesses. The compiler
1728 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
1729 -@@ -278,6 +342,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
1730 +@@ -278,6 +347,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
1731 * use is to mediate communication between process-level code and irq/NMI
1732 * handlers, all running on the same CPU.
1733 */
1734 @@ -87662,6 +87898,41 @@ index 297df45..b6a74ff 100644
1735 struct work_struct async_notify;
1736 #ifdef CONFIG_BLK_DEV_INTEGRITY
1737 struct blk_integrity *integrity;
1738 +diff --git a/include/linux/gfp.h b/include/linux/gfp.h
1739 +index 557bdad..b5e8c98 100644
1740 +--- a/include/linux/gfp.h
1741 ++++ b/include/linux/gfp.h
1742 +@@ -53,6 +53,12 @@ struct vm_area_struct;
1743 + #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */
1744 + #define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */
1745 +
1746 ++#ifdef CONFIG_PAX_USERCOPY_SLABS
1747 ++#define __GFP_USERCOPY ((__force gfp_t)0x1000000u)
1748 ++#else
1749 ++#define __GFP_USERCOPY ((__force gfp_t)0)
1750 ++#endif
1751 ++
1752 + #ifdef CONFIG_KMEMCHECK
1753 + #define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */
1754 + #else
1755 +@@ -65,7 +71,7 @@ struct vm_area_struct;
1756 + */
1757 + #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
1758 +
1759 +-#define __GFP_BITS_SHIFT 22 /* Room for 22 __GFP_FOO bits */
1760 ++#define __GFP_BITS_SHIFT 26 /* Room for 26 __GFP_FOO bits */
1761 + #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
1762 +
1763 + /* This equals 0, but use constants in case they ever change */
1764 +@@ -115,6 +121,8 @@ struct vm_area_struct;
1765 + /* 4GB DMA on some platforms */
1766 + #define GFP_DMA32 __GFP_DMA32
1767 +
1768 ++#define GFP_USERCOPY __GFP_USERCOPY
1769 ++
1770 + /* Convert GFP flags to their corresponding migrate type */
1771 + static inline int allocflags_to_migratetype(gfp_t gfp_flags)
1772 + {
1773 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
1774 new file mode 100644
1775 index 0000000..fc80ba3
1776 @@ -88779,6 +89050,54 @@ index 4c4e57d..f3c5303 100644
1777 struct list_head context_list; /* list of context id's
1778 and pointers */
1779 #endif
1780 +diff --git a/include/linux/init.h b/include/linux/init.h
1781 +index ff8bde5..0296174 100644
1782 +--- a/include/linux/init.h
1783 ++++ b/include/linux/init.h
1784 +@@ -38,9 +38,15 @@
1785 + * Also note, that this data cannot be "const".
1786 + */
1787 +
1788 ++#ifdef MODULE
1789 ++#define add_latent_entropy
1790 ++#else
1791 ++#define add_latent_entropy __latent_entropy
1792 ++#endif
1793 ++
1794 + /* These are for everybody (although not all archs will actually
1795 + discard it in modules) */
1796 +-#define __init __section(.init.text) __cold notrace
1797 ++#define __init __section(.init.text) __cold notrace add_latent_entropy
1798 + #define __initdata __section(.init.data)
1799 + #define __initconst __section(.init.rodata)
1800 + #define __exitdata __section(.exit.data)
1801 +@@ -75,7 +81,7 @@
1802 + #define __exit __section(.exit.text) __exitused __cold
1803 +
1804 + /* Used for HOTPLUG */
1805 +-#define __devinit __section(.devinit.text) __cold
1806 ++#define __devinit __section(.devinit.text) __cold add_latent_entropy
1807 + #define __devinitdata __section(.devinit.data)
1808 + #define __devinitconst __section(.devinit.rodata)
1809 + #define __devexit __section(.devexit.text) __exitused __cold
1810 +@@ -83,7 +89,7 @@
1811 + #define __devexitconst __section(.devexit.rodata)
1812 +
1813 + /* Used for HOTPLUG_CPU */
1814 +-#define __cpuinit __section(.cpuinit.text) __cold
1815 ++#define __cpuinit __section(.cpuinit.text) __cold add_latent_entropy
1816 + #define __cpuinitdata __section(.cpuinit.data)
1817 + #define __cpuinitconst __section(.cpuinit.rodata)
1818 + #define __cpuexit __section(.cpuexit.text) __exitused __cold
1819 +@@ -91,7 +97,7 @@
1820 + #define __cpuexitconst __section(.cpuexit.rodata)
1821 +
1822 + /* Used for MEMORY_HOTPLUG */
1823 +-#define __meminit __section(.meminit.text) __cold
1824 ++#define __meminit __section(.meminit.text) __cold add_latent_entropy
1825 + #define __meminitdata __section(.meminit.data)
1826 + #define __meminitconst __section(.meminit.rodata)
1827 + #define __memexit __section(.memexit.text) __exitused __cold
1828 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
1829 index 21a6f5d..7c7d19f 100644
1830 --- a/include/linux/init_task.h
1831 @@ -89837,10 +90156,10 @@ index 7456d7d..6c1cfc9 100644
1832 static inline int ptrace_reparented(struct task_struct *child)
1833 {
1834 diff --git a/include/linux/random.h b/include/linux/random.h
1835 -index 2948046..6fe7065 100644
1836 +index 2948046..16bad29 100644
1837 --- a/include/linux/random.h
1838 +++ b/include/linux/random.h
1839 -@@ -46,9 +46,10 @@ struct rand_pool_info {
1840 +@@ -46,9 +46,14 @@ struct rand_pool_info {
1841
1842 extern void rand_initialize_irq(int irq);
1843
1844 @@ -89849,10 +90168,14 @@ index 2948046..6fe7065 100644
1845 unsigned int value);
1846 -extern void add_interrupt_randomness(int irq);
1847 +extern void add_interrupt_randomness(int irq, int irq_flags);
1848 ++
1849 ++#ifdef CONFIG_PAX_LATENT_ENTROPY
1850 ++extern void transfer_latent_entropy(void);
1851 ++#endif
1852
1853 extern void get_random_bytes(void *buf, int nbytes);
1854 void generate_random_uuid(unsigned char uuid_out[16]);
1855 -@@ -63,6 +64,24 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l
1856 +@@ -63,6 +68,24 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l
1857 u32 random32(void);
1858 void srandom32(u32 seed);
1859
1860 @@ -90005,7 +90328,7 @@ index 3392c59..a746428 100644
1861 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
1862 /**
1863 diff --git a/include/linux/sched.h b/include/linux/sched.h
1864 -index 71849bf..90ac063 100644
1865 +index 71849bf..903514a 100644
1866 --- a/include/linux/sched.h
1867 +++ b/include/linux/sched.h
1868 @@ -101,6 +101,7 @@ struct bio;
1869 @@ -90208,7 +90531,7 @@ index 71849bf..90ac063 100644
1870 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
1871 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
1872 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
1873 -+extern __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
1874 ++extern void check_object_size(const void *ptr, unsigned long n, bool to);
1875 +
1876 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
1877 +extern void pax_track_stack(void);
1878 @@ -90255,7 +90578,7 @@ index 71849bf..90ac063 100644
1879
1880 extern void daemonize(const char *, ...);
1881 extern int allow_signal(int);
1882 -@@ -2284,13 +2384,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
1883 +@@ -2284,9 +2384,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
1884
1885 #endif
1886
1887 @@ -90267,15 +90590,7 @@ index 71849bf..90ac063 100644
1888
1889 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
1890 }
1891 -
1892 -+#ifdef CONFIG_PAX_USERCOPY
1893 -+extern int object_is_on_stack(const void *obj, unsigned long len);
1894 -+#endif
1895 -+
1896 - extern void thread_info_cache_init(void);
1897 -
1898 - #ifdef CONFIG_DEBUG_STACK_USAGE
1899 -@@ -2616,6 +2720,23 @@ static inline unsigned long rlimit_max(unsigned int limit)
1900 +@@ -2616,6 +2716,23 @@ static inline unsigned long rlimit_max(unsigned int limit)
1901 return task_rlimit_max(current, limit);
1902 }
1903
1904 @@ -90481,7 +90796,7 @@ index bcdd660..fd2e332 100644
1905
1906 /**
1907 diff --git a/include/linux/slab.h b/include/linux/slab.h
1908 -index 2da8372..96b37db 100644
1909 +index 2da8372..a462292 100644
1910 --- a/include/linux/slab.h
1911 +++ b/include/linux/slab.h
1912 @@ -11,12 +11,20 @@
1913 @@ -90496,7 +90811,7 @@ index 2da8372..96b37db 100644
1914 */
1915 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
1916 +
1917 -+#ifdef CONFIG_PAX_USERCOPY
1918 ++#ifdef CONFIG_PAX_USERCOPY_SLABS
1919 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
1920 +#else
1921 +#define SLAB_USERCOPY 0x00000000UL
1922 @@ -90522,15 +90837,16 @@ index 2da8372..96b37db 100644
1923
1924 /*
1925 * struct kmem_cache related prototypes
1926 -@@ -138,6 +149,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
1927 +@@ -138,6 +149,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
1928 void kfree(const void *);
1929 void kzfree(const void *);
1930 size_t ksize(const void *);
1931 -+void check_object_size(const void *ptr, unsigned long n, bool to);
1932 ++const char *check_heap_object(const void *ptr, unsigned long n, bool to);
1933 ++bool is_usercopy_object(const void *ptr);
1934
1935 /*
1936 * Allocator specific definitions. These are mainly used to establish optimized
1937 -@@ -263,7 +275,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
1938 +@@ -263,7 +276,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
1939 * request comes from.
1940 */
1941 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
1942 @@ -90539,7 +90855,7 @@ index 2da8372..96b37db 100644
1943 #define kmalloc_track_caller(size, flags) \
1944 __kmalloc_track_caller(size, flags, _RET_IP_)
1945 #else
1946 -@@ -281,7 +293,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
1947 +@@ -281,7 +294,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
1948 * allocation request comes from.
1949 */
1950 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
1951 @@ -90549,7 +90865,7 @@ index 2da8372..96b37db 100644
1952 __kmalloc_node_track_caller(size, flags, node, \
1953 _RET_IP_)
1954 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
1955 -index 850d057..6de7888 100644
1956 +index 850d057..aa58075 100644
1957 --- a/include/linux/slab_def.h
1958 +++ b/include/linux/slab_def.h
1959 @@ -69,10 +69,10 @@ struct kmem_cache {
1960 @@ -90567,7 +90883,16 @@ index 850d057..6de7888 100644
1961
1962 /*
1963 * If debugging is enabled, then the allocator can add additional
1964 -@@ -108,7 +108,7 @@ struct cache_sizes {
1965 +@@ -104,11 +104,16 @@ struct cache_sizes {
1966 + #ifdef CONFIG_ZONE_DMA
1967 + struct kmem_cache *cs_dmacachep;
1968 + #endif
1969 ++
1970 ++#ifdef CONFIG_PAX_USERCOPY_SLABS
1971 ++ struct kmem_cache *cs_usercopycachep;
1972 ++#endif
1973 ++
1974 + };
1975 extern struct cache_sizes malloc_sizes[];
1976
1977 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
1978 @@ -90576,7 +90901,21 @@ index 850d057..6de7888 100644
1979
1980 #ifdef CONFIG_KMEMTRACE
1981 extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
1982 -@@ -163,7 +163,7 @@ found:
1983 +@@ -150,6 +155,13 @@ found:
1984 + cachep = malloc_sizes[i].cs_dmacachep;
1985 + else
1986 + #endif
1987 ++
1988 ++#ifdef CONFIG_PAX_USERCOPY_SLABS
1989 ++ if (flags & GFP_USERCOPY)
1990 ++ cachep = malloc_sizes[i].cs_usercopycachep;
1991 ++ else
1992 ++#endif
1993 ++
1994 + cachep = malloc_sizes[i].cs_cachep;
1995 +
1996 + ret = kmem_cache_alloc_notrace(cachep, flags);
1997 +@@ -163,7 +175,7 @@ found:
1998 }
1999
2000 #ifdef CONFIG_NUMA
2001 @@ -90585,6 +90924,20 @@ index 850d057..6de7888 100644
2002 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
2003
2004 #ifdef CONFIG_KMEMTRACE
2005 +@@ -205,6 +217,13 @@ found:
2006 + cachep = malloc_sizes[i].cs_dmacachep;
2007 + else
2008 + #endif
2009 ++
2010 ++#ifdef CONFIG_PAX_USERCOPY_SLABS
2011 ++ if (flags & GFP_USERCOPY)
2012 ++ cachep = malloc_sizes[i].cs_usercopycachep;
2013 ++ else
2014 ++#endif
2015 ++
2016 + cachep = malloc_sizes[i].cs_cachep;
2017 +
2018 + ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
2019 diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
2020 index 0ec00b3..39cb7fc 100644
2021 --- a/include/linux/slob_def.h
2022 @@ -92254,7 +92607,7 @@ index 1fd59b8..a01b079 100644
2023 next_state = Reset;
2024 return 0;
2025 diff --git a/init/main.c b/init/main.c
2026 -index 1eb4bd5..fea5bbe 100644
2027 +index 1eb4bd5..7bc6316 100644
2028 --- a/init/main.c
2029 +++ b/init/main.c
2030 @@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void) { }
2031 @@ -92385,7 +92738,40 @@ index 1eb4bd5..fea5bbe 100644
2032 }
2033
2034
2035 -@@ -893,11 +938,13 @@ static int __init kernel_init(void * unused)
2036 +@@ -760,9 +805,15 @@ static void __init do_initcalls(void)
2037 + {
2038 + initcall_t *call;
2039 +
2040 +- for (call = __early_initcall_end; call < __initcall_end; call++)
2041 ++ for (call = __early_initcall_end; call < __initcall_end; call++) {
2042 + do_one_initcall(*call);
2043 +
2044 ++#ifdef CONFIG_PAX_LATENT_ENTROPY
2045 ++ transfer_latent_entropy();
2046 ++#endif
2047 ++
2048 ++ }
2049 ++
2050 + /* Make sure there is no pending stuff from the initcall sequence */
2051 + flush_scheduled_work();
2052 + }
2053 +@@ -790,8 +841,14 @@ static void __init do_pre_smp_initcalls(void)
2054 + {
2055 + initcall_t *call;
2056 +
2057 +- for (call = __initcall_start; call < __early_initcall_end; call++)
2058 ++ for (call = __initcall_start; call < __early_initcall_end; call++) {
2059 + do_one_initcall(*call);
2060 ++
2061 ++#ifdef CONFIG_PAX_LATENT_ENTROPY
2062 ++ transfer_latent_entropy();
2063 ++#endif
2064 ++
2065 ++ }
2066 + }
2067 +
2068 + static void run_init_process(char *init_filename)
2069 +@@ -893,11 +950,13 @@ static int __init kernel_init(void * unused)
2070 if (!ramdisk_execute_command)
2071 ramdisk_execute_command = "/init";
2072
2073 @@ -95662,7 +96048,7 @@ index 40dd021..fb30ceb 100644
2074 mutex_lock(&pm_mutex);
2075 suspend_ops = ops;
2076 diff --git a/kernel/printk.c b/kernel/printk.c
2077 -index 4cade47..4d17900 100644
2078 +index 4cade47..4ddd097 100644
2079 --- a/kernel/printk.c
2080 +++ b/kernel/printk.c
2081 @@ -33,6 +33,7 @@
2082 @@ -95793,6 +96179,36 @@ index 4cade47..4d17900 100644
2083 }
2084
2085 /*
2086 +@@ -1153,6 +1154,18 @@ void console_start(struct console *console)
2087 + }
2088 + EXPORT_SYMBOL(console_start);
2089 +
2090 ++static int __read_mostly keep_bootcon = 0;
2091 ++
2092 ++static int __init keep_bootcon_setup(char *str)
2093 ++{
2094 ++ keep_bootcon = 1;
2095 ++ printk(KERN_INFO "debug: skip boot console de-registration.\n");
2096 ++
2097 ++ return 0;
2098 ++}
2099 ++
2100 ++early_param("keep_bootcon", keep_bootcon_setup);
2101 ++
2102 + /*
2103 + * The console driver calls this routine during kernel initialization
2104 + * to register the console printing procedure with printk() and to
2105 +@@ -1299,7 +1312,9 @@ void register_console(struct console *newcon)
2106 + * users know there might be something in the kernel's log buffer that
2107 + * went to the bootconsole (that they do not see on the real console)
2108 + */
2109 +- if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) {
2110 ++ if (bcon &&
2111 ++ ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) &&
2112 ++ !keep_bootcon) {
2113 + /* we need to iterate through twice, to make sure we print
2114 + * everything out, before we unregister the console(s)
2115 + */
2116 diff --git a/kernel/profile.c b/kernel/profile.c
2117 index dfadc5b..7f59404 100644
2118 --- a/kernel/profile.c
2119 @@ -99244,7 +99660,7 @@ index aaca868..2ebecdc 100644
2120 err = -EPERM;
2121 goto out;
2122 diff --git a/mm/mlock.c b/mm/mlock.c
2123 -index 2d846cf..98134d2 100644
2124 +index 2d846cf..8d5cdd8 100644
2125 --- a/mm/mlock.c
2126 +++ b/mm/mlock.c
2127 @@ -13,6 +13,7 @@
2128 @@ -99287,7 +99703,7 @@ index 2d846cf..98134d2 100644
2129 unsigned long nstart, end, tmp;
2130 struct vm_area_struct * vma, * prev;
2131 - int error;
2132 -+ int error = -EINVAL;
2133 ++ int error = 0;
2134
2135 len = PAGE_ALIGN(len);
2136 end = start + len;
2137 @@ -101176,7 +101592,7 @@ index 3e0005b..1d659a8 100644
2138 return -ENOMEM;
2139
2140 diff --git a/mm/slab.c b/mm/slab.c
2141 -index c8d466a..909e01e 100644
2142 +index c8d466a..60546da 100644
2143 --- a/mm/slab.c
2144 +++ b/mm/slab.c
2145 @@ -174,7 +174,7 @@
2146 @@ -101230,7 +101646,33 @@ index c8d466a..909e01e 100644
2147 {
2148 u32 offset = (obj - slab->s_mem);
2149 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
2150 -@@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
2151 +@@ -579,10 +579,11 @@ EXPORT_SYMBOL(malloc_sizes);
2152 + struct cache_names {
2153 + char *name;
2154 + char *name_dma;
2155 ++ char *name_usercopy;
2156 + };
2157 +
2158 + static struct cache_names __initdata cache_names[] = {
2159 +-#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
2160 ++#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)", .name_usercopy = "size-" #x "(USERCOPY)" },
2161 + #include <linux/kmalloc_sizes.h>
2162 + {NULL,}
2163 + #undef CACHE
2164 +@@ -719,6 +720,12 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
2165 + if (unlikely(gfpflags & GFP_DMA))
2166 + return csizep->cs_dmacachep;
2167 + #endif
2168 ++
2169 ++#ifdef CONFIG_PAX_USERCOPY_SLABS
2170 ++ if (unlikely(gfpflags & GFP_USERCOPY))
2171 ++ return csizep->cs_usercopycachep;
2172 ++#endif
2173 ++
2174 + return csizep->cs_cachep;
2175 + }
2176 +
2177 +@@ -1453,7 +1460,7 @@ void __init kmem_cache_init(void)
2178 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
2179 sizes[INDEX_AC].cs_size,
2180 ARCH_KMALLOC_MINALIGN,
2181 @@ -101239,7 +101681,7 @@ index c8d466a..909e01e 100644
2182 NULL);
2183
2184 if (INDEX_AC != INDEX_L3) {
2185 -@@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
2186 +@@ -1461,7 +1468,7 @@ void __init kmem_cache_init(void)
2187 kmem_cache_create(names[INDEX_L3].name,
2188 sizes[INDEX_L3].cs_size,
2189 ARCH_KMALLOC_MINALIGN,
2190 @@ -101248,7 +101690,7 @@ index c8d466a..909e01e 100644
2191 NULL);
2192 }
2193
2194 -@@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
2195 +@@ -1479,7 +1486,7 @@ void __init kmem_cache_init(void)
2196 sizes->cs_cachep = kmem_cache_create(names->name,
2197 sizes->cs_size,
2198 ARCH_KMALLOC_MINALIGN,
2199 @@ -101257,7 +101699,24 @@ index c8d466a..909e01e 100644
2200 NULL);
2201 }
2202 #ifdef CONFIG_ZONE_DMA
2203 -@@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, void *p)
2204 +@@ -1491,6 +1498,16 @@ void __init kmem_cache_init(void)
2205 + SLAB_PANIC,
2206 + NULL);
2207 + #endif
2208 ++
2209 ++#ifdef CONFIG_PAX_USERCOPY_SLABS
2210 ++ sizes->cs_usercopycachep = kmem_cache_create(
2211 ++ names->name_usercopy,
2212 ++ sizes->cs_size,
2213 ++ ARCH_KMALLOC_MINALIGN,
2214 ++ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
2215 ++ NULL);
2216 ++#endif
2217 ++
2218 + sizes++;
2219 + names++;
2220 + }
2221 +@@ -4211,10 +4228,10 @@ static int s_show(struct seq_file *m, void *p)
2222 }
2223 /* cpu stats */
2224 {
2225 @@ -101272,7 +101731,7 @@ index c8d466a..909e01e 100644
2226
2227 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
2228 allochit, allocmiss, freehit, freemiss);
2229 -@@ -4471,15 +4471,70 @@ static const struct file_operations proc_slabstats_operations = {
2230 +@@ -4471,15 +4488,76 @@ static const struct file_operations proc_slabstats_operations = {
2231
2232 static int __init slab_proc_init(void)
2233 {
2234 @@ -101293,60 +101752,66 @@ index c8d466a..909e01e 100644
2235 module_init(slab_proc_init);
2236 #endif
2237
2238 -+void check_object_size(const void *ptr, unsigned long n, bool to)
2239 ++bool is_usercopy_object(const void *ptr)
2240 +{
2241 ++ struct page *page;
2242 ++ struct kmem_cache *cachep;
2243 ++
2244 ++ if (ZERO_OR_NULL_PTR(ptr))
2245 ++ return false;
2246 ++
2247 ++ if (!virt_addr_valid(ptr))
2248 ++ return false;
2249 ++
2250 ++ page = virt_to_head_page(ptr);
2251 ++
2252 ++ if (!PageSlab(page))
2253 ++ return false;
2254 ++
2255 ++ cachep = page_get_cache(page);
2256 ++ return cachep->flags & SLAB_USERCOPY;
2257 ++}
2258 +
2259 +#ifdef CONFIG_PAX_USERCOPY
2260 ++const char *check_heap_object(const void *ptr, unsigned long n, bool to)
2261 ++{
2262 + struct page *page;
2263 -+ struct kmem_cache *cachep = NULL;
2264 ++ struct kmem_cache *cachep;
2265 + struct slab *slabp;
2266 + unsigned int objnr;
2267 + unsigned long offset;
2268 -+ const char *type;
2269 -+
2270 -+ if (!n)
2271 -+ return;
2272 +
2273 -+ type = "<null>";
2274 + if (ZERO_OR_NULL_PTR(ptr))
2275 -+ goto report;
2276 ++ return "<null>";
2277 +
2278 + if (!virt_addr_valid(ptr))
2279 -+ return;
2280 ++ return NULL;
2281 +
2282 + page = virt_to_head_page(ptr);
2283 +
2284 -+ type = "<process stack>";
2285 -+ if (!PageSlab(page)) {
2286 -+ if (object_is_on_stack(ptr, n) == -1)
2287 -+ goto report;
2288 -+ return;
2289 -+ }
2290 ++ if (!PageSlab(page))
2291 ++ return NULL;
2292 +
2293 + cachep = page_get_cache(page);
2294 -+ type = cachep->name;
2295 + if (!(cachep->flags & SLAB_USERCOPY))
2296 -+ goto report;
2297 ++ return cachep->name;
2298 +
2299 + slabp = page_get_slab(page);
2300 + objnr = obj_to_index(cachep, slabp, ptr);
2301 + BUG_ON(objnr >= cachep->num);
2302 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
2303 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
2304 -+ return;
2305 -+
2306 -+report:
2307 -+ pax_report_usercopy(ptr, n, to, type);
2308 -+#endif
2309 ++ return NULL;
2310 +
2311 ++ return cachep->name;
2312 +}
2313 -+EXPORT_SYMBOL(check_object_size);
2314 ++#endif
2315 +
2316 /**
2317 * ksize - get the actual amount of memory allocated for a given object
2318 * @objp: Pointer to the object
2319 diff --git a/mm/slob.c b/mm/slob.c
2320 -index 837ebd6..0bd23bc 100644
2321 +index 837ebd6..d24d63b 100644
2322 --- a/mm/slob.c
2323 +++ b/mm/slob.c
2324 @@ -29,7 +29,7 @@
2325 @@ -101497,7 +101962,7 @@ index 837ebd6..0bd23bc 100644
2326 return ret;
2327 }
2328 EXPORT_SYMBOL(__kmalloc_node);
2329 -@@ -528,13 +542,92 @@ void kfree(const void *block)
2330 +@@ -528,13 +542,83 @@ void kfree(const void *block)
2331 sp = slob_page(block);
2332 if (is_slob_page(sp)) {
2333 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
2334 @@ -101515,40 +101980,34 @@ index 837ebd6..0bd23bc 100644
2335 }
2336 EXPORT_SYMBOL(kfree);
2337
2338 -+void check_object_size(const void *ptr, unsigned long n, bool to)
2339 ++bool is_usercopy_object(const void *ptr)
2340 +{
2341 ++ return false;
2342 ++}
2343 +
2344 +#ifdef CONFIG_PAX_USERCOPY
2345 ++const char *check_heap_object(const void *ptr, unsigned long n, bool to)
2346 ++{
2347 + struct slob_page *sp;
2348 + const slob_t *free;
2349 + const void *base;
2350 + unsigned long flags;
2351 -+ const char *type;
2352 +
2353 -+ if (!n)
2354 -+ return;
2355 -+
2356 -+ type = "<null>";
2357 + if (ZERO_OR_NULL_PTR(ptr))
2358 -+ goto report;
2359 ++ return "<null>";
2360 +
2361 + if (!virt_addr_valid(ptr))
2362 -+ return;
2363 ++ return NULL;
2364 +
2365 -+ type = "<process stack>";
2366 + sp = slob_page(ptr);
2367 -+ if (!PageSlab((struct page *)sp)) {
2368 -+ if (object_is_on_stack(ptr, n) == -1)
2369 -+ goto report;
2370 -+ return;
2371 -+ }
2372 ++ if (!PageSlab((struct page *)sp))
2373 ++ return NULL;
2374 +
2375 -+ type = "<slob>";
2376 + if (sp->size) {
2377 + base = page_address(&sp->page);
2378 + if (base <= ptr && n <= sp->size - (ptr - base))
2379 -+ return;
2380 -+ goto report;
2381 ++ return NULL;
2382 ++ return "<slob>";
2383 + }
2384 +
2385 + /* some tricky double walking to find the chunk */
2386 @@ -101579,21 +102038,18 @@ index 837ebd6..0bd23bc 100644
2387 + break;
2388 +
2389 + spin_unlock_irqrestore(&slob_lock, flags);
2390 -+ return;
2391 ++ return NULL;
2392 + }
2393 +
2394 + spin_unlock_irqrestore(&slob_lock, flags);
2395 -+report:
2396 -+ pax_report_usercopy(ptr, n, to, type);
2397 -+#endif
2398 -+
2399 ++ return "<slob>";
2400 +}
2401 -+EXPORT_SYMBOL(check_object_size);
2402 ++#endif
2403 +
2404 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
2405 size_t ksize(const void *block)
2406 {
2407 -@@ -547,10 +640,10 @@ size_t ksize(const void *block)
2408 +@@ -547,10 +631,10 @@ size_t ksize(const void *block)
2409 sp = slob_page(block);
2410 if (is_slob_page(sp)) {
2411 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
2412 @@ -101607,11 +102063,11 @@ index 837ebd6..0bd23bc 100644
2413 }
2414 EXPORT_SYMBOL(ksize);
2415
2416 -@@ -566,8 +659,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
2417 +@@ -566,8 +650,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
2418 {
2419 struct kmem_cache *c;
2420
2421 -+#ifdef CONFIG_PAX_USERCOPY
2422 ++#ifdef CONFIG_PAX_USERCOPY_SLABS
2423 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
2424 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
2425 +#else
2426 @@ -101621,11 +102077,11 @@ index 837ebd6..0bd23bc 100644
2427
2428 if (c) {
2429 c->name = name;
2430 -@@ -605,17 +703,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
2431 +@@ -605,17 +694,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
2432 {
2433 void *b;
2434
2435 -+#ifdef CONFIG_PAX_USERCOPY
2436 ++#ifdef CONFIG_PAX_USERCOPY_SLABS
2437 + b = __kmalloc_node_align(c->size, flags, node, c->align);
2438 +#else
2439 if (c->size < PAGE_SIZE) {
2440 @@ -101647,7 +102103,7 @@ index 837ebd6..0bd23bc 100644
2441
2442 if (c->ctor)
2443 c->ctor(b);
2444 -@@ -627,10 +733,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
2445 +@@ -627,10 +724,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
2446
2447 static void __kmem_cache_free(void *b, int size)
2448 {
2449 @@ -101666,13 +102122,13 @@ index 837ebd6..0bd23bc 100644
2450 }
2451
2452 static void kmem_rcu_free(struct rcu_head *head)
2453 -@@ -643,18 +755,32 @@ static void kmem_rcu_free(struct rcu_head *head)
2454 +@@ -643,18 +746,32 @@ static void kmem_rcu_free(struct rcu_head *head)
2455
2456 void kmem_cache_free(struct kmem_cache *c, void *b)
2457 {
2458 + int size = c->size;
2459 +
2460 -+#ifdef CONFIG_PAX_USERCOPY
2461 ++#ifdef CONFIG_PAX_USERCOPY_SLABS
2462 + if (size + c->align < PAGE_SIZE) {
2463 + size += c->align;
2464 + b -= c->align;
2465 @@ -101693,7 +102149,7 @@ index 837ebd6..0bd23bc 100644
2466 + __kmem_cache_free(b, size);
2467 }
2468
2469 -+#ifdef CONFIG_PAX_USERCOPY
2470 ++#ifdef CONFIG_PAX_USERCOPY_SLABS
2471 + trace_kfree(_RET_IP_, b);
2472 +#else
2473 trace_kmem_cache_free(_RET_IP_, b);
2474 @@ -101703,7 +102159,7 @@ index 837ebd6..0bd23bc 100644
2475 EXPORT_SYMBOL(kmem_cache_free);
2476
2477 diff --git a/mm/slub.c b/mm/slub.c
2478 -index 4996fc7..87e01d0 100644
2479 +index 4996fc7..38850dd 100644
2480 --- a/mm/slub.c
2481 +++ b/mm/slub.c
2482 @@ -201,7 +201,7 @@ struct track {
2483 @@ -101776,58 +102232,89 @@ index 4996fc7..87e01d0 100644
2484
2485 /*
2486 * This function is called with IRQs disabled during early-boot on
2487 -@@ -2915,6 +2914,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
2488 +@@ -2792,6 +2791,10 @@ out:
2489 + }
2490 + #endif
2491 +
2492 ++#ifdef CONFIG_PAX_USERCOPY_SLABS
2493 ++static struct kmem_cache kmalloc_caches_usercopy[SLUB_PAGE_SHIFT];
2494 ++#endif
2495 ++
2496 + /*
2497 + * Conversion table for small slabs sizes / 8 to the index in the
2498 + * kmalloc array. This is necessary for slabs < 192 since we have non power
2499 +@@ -2847,6 +2850,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2500 + return dma_kmalloc_cache(index, flags);
2501 +
2502 + #endif
2503 ++
2504 ++#ifdef CONFIG_PAX_USERCOPY_SLABS
2505 ++ if (flags & SLAB_USERCOPY)
2506 ++ return &kmalloc_caches_usercopy[index];
2507 ++
2508 ++#endif
2509 ++
2510 + return &kmalloc_caches[index];
2511 + }
2512 +
2513 +@@ -2915,6 +2925,56 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
2514 EXPORT_SYMBOL(__kmalloc_node);
2515 #endif
2516
2517 -+void check_object_size(const void *ptr, unsigned long n, bool to)
2518 ++bool is_usercopy_object(const void *ptr)
2519 +{
2520 ++ struct page *page;
2521 ++ struct kmem_cache *s;
2522 ++
2523 ++ if (ZERO_OR_NULL_PTR(ptr))
2524 ++ return false;
2525 ++
2526 ++ if (!virt_addr_valid(ptr))
2527 ++ return false;
2528 ++
2529 ++ page = virt_to_head_page(ptr);
2530 ++
2531 ++ if (!PageSlab(page))
2532 ++ return false;
2533 ++
2534 ++ s = page->slab;
2535 ++ return s->flags & SLAB_USERCOPY;
2536 ++}
2537 +
2538 +#ifdef CONFIG_PAX_USERCOPY
2539 ++const char *check_heap_object(const void *ptr, unsigned long n, bool to)
2540 ++{
2541 + struct page *page;
2542 -+ struct kmem_cache *s = NULL;
2543 ++ struct kmem_cache *s;
2544 + unsigned long offset;
2545 -+ const char *type;
2546 +
2547 -+ if (!n)
2548 -+ return;
2549 -+
2550 -+ type = "<null>";
2551 + if (ZERO_OR_NULL_PTR(ptr))
2552 -+ goto report;
2553 ++ return "<null>";
2554 +
2555 + if (!virt_addr_valid(ptr))
2556 -+ return;
2557 ++ return NULL;
2558 +
2559 + page = get_object_page(ptr);
2560 +
2561 -+ type = "<process stack>";
2562 -+ if (!page) {
2563 -+ if (object_is_on_stack(ptr, n) == -1)
2564 -+ goto report;
2565 -+ return;
2566 -+ }
2567 ++ if (!page)
2568 ++ return NULL;
2569 +
2570 + s = page->slab;
2571 -+ type = s->name;
2572 + if (!(s->flags & SLAB_USERCOPY))
2573 -+ goto report;
2574 ++ return s->name;
2575 +
2576 + offset = (ptr - page_address(page)) % s->size;
2577 + if (offset <= s->objsize && n <= s->objsize - offset)
2578 -+ return;
2579 -+
2580 -+report:
2581 -+ pax_report_usercopy(ptr, n, to, type);
2582 -+#endif
2583 ++ return NULL;
2584 +
2585 ++ return s->name;
2586 +}
2587 -+EXPORT_SYMBOL(check_object_size);
2588 ++#endif
2589 +
2590 size_t ksize(const void *object)
2591 {
2592 struct page *page;
2593 -@@ -3185,8 +3228,8 @@ void __init kmem_cache_init(void)
2594 +@@ -3185,8 +3245,8 @@ void __init kmem_cache_init(void)
2595 * kmem_cache_open for slab_state == DOWN.
2596 */
2597 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
2598 @@ -101838,7 +102325,7 @@ index 4996fc7..87e01d0 100644
2599 caches++;
2600
2601 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
2602 -@@ -3198,18 +3241,18 @@ void __init kmem_cache_init(void)
2603 +@@ -3198,18 +3258,18 @@ void __init kmem_cache_init(void)
2604 /* Caches that are not of the two-to-the-power-of size */
2605 if (KMALLOC_MIN_SIZE <= 32) {
2606 create_kmalloc_cache(&kmalloc_caches[1],
2607 @@ -101860,7 +102347,28 @@ index 4996fc7..87e01d0 100644
2608 caches++;
2609 }
2610
2611 -@@ -3293,7 +3336,7 @@ static int slab_unmergeable(struct kmem_cache *s)
2612 +@@ -3267,6 +3327,20 @@ void __init kmem_cache_init(void)
2613 + kmem_size = sizeof(struct kmem_cache);
2614 + #endif
2615 +
2616 ++#ifdef CONFIG_PAX_USERCOPY_SLABS
2617 ++ for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
2618 ++ struct kmem_cache *s = &kmalloc_caches[i];
2619 ++
2620 ++ if (s->size) {
2621 ++ char *name = kasprintf(GFP_NOWAIT, "kmalloc-usercopy-%d", s->objsize);
2622 ++
2623 ++ BUG_ON(!name);
2624 ++ create_kmalloc_cache(&kmalloc_caches_usercopy[i], name,
2625 ++ s->objsize, GFP_NOWAIT, SLAB_USERCOPY);
2626 ++ }
2627 ++ }
2628 ++#endif
2629 ++
2630 + printk(KERN_INFO
2631 + "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
2632 + " CPUs=%d, Nodes=%d\n",
2633 +@@ -3293,7 +3367,7 @@ static int slab_unmergeable(struct kmem_cache *s)
2634 /*
2635 * We may have set a slab to be unmergeable during bootstrap.
2636 */
2637 @@ -101869,7 +102377,7 @@ index 4996fc7..87e01d0 100644
2638 return 1;
2639
2640 return 0;
2641 -@@ -3353,7 +3396,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
2642 +@@ -3353,7 +3427,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
2643 if (s) {
2644 int cpu;
2645
2646 @@ -101878,7 +102386,7 @@ index 4996fc7..87e01d0 100644
2647 /*
2648 * Adjust the object sizes so that we clear
2649 * the complete object on kzalloc.
2650 -@@ -3372,7 +3415,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
2651 +@@ -3372,7 +3446,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
2652
2653 if (sysfs_slab_alias(s, name)) {
2654 down_write(&slub_lock);
2655 @@ -101887,7 +102395,7 @@ index 4996fc7..87e01d0 100644
2656 up_write(&slub_lock);
2657 goto err;
2658 }
2659 -@@ -4101,7 +4144,7 @@ SLAB_ATTR_RO(ctor);
2660 +@@ -4101,7 +4175,7 @@ SLAB_ATTR_RO(ctor);
2661
2662 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
2663 {
2664 @@ -101896,7 +102404,7 @@ index 4996fc7..87e01d0 100644
2665 }
2666 SLAB_ATTR_RO(aliases);
2667
2668 -@@ -4503,7 +4546,7 @@ static void kmem_cache_release(struct kobject *kobj)
2669 +@@ -4503,7 +4577,7 @@ static void kmem_cache_release(struct kobject *kobj)
2670 kfree(s);
2671 }
2672
2673 @@ -101905,7 +102413,7 @@ index 4996fc7..87e01d0 100644
2674 .show = slab_attr_show,
2675 .store = slab_attr_store,
2676 };
2677 -@@ -4522,7 +4565,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
2678 +@@ -4522,7 +4596,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
2679 return 0;
2680 }
2681
2682 @@ -101914,7 +102422,7 @@ index 4996fc7..87e01d0 100644
2683 .filter = uevent_filter,
2684 };
2685
2686 -@@ -4564,6 +4607,7 @@ static char *create_unique_id(struct kmem_cache *s)
2687 +@@ -4564,6 +4638,7 @@ static char *create_unique_id(struct kmem_cache *s)
2688 return name;
2689 }
2690
2691 @@ -101922,7 +102430,7 @@ index 4996fc7..87e01d0 100644
2692 static int sysfs_slab_add(struct kmem_cache *s)
2693 {
2694 int err;
2695 -@@ -4619,6 +4663,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
2696 +@@ -4619,6 +4694,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
2697 kobject_del(&s->kobj);
2698 kobject_put(&s->kobj);
2699 }
2700 @@ -101930,7 +102438,7 @@ index 4996fc7..87e01d0 100644
2701
2702 /*
2703 * Need to buffer aliases during bootup until sysfs becomes
2704 -@@ -4632,6 +4677,7 @@ struct saved_alias {
2705 +@@ -4632,6 +4708,7 @@ struct saved_alias {
2706
2707 static struct saved_alias *alias_list;
2708
2709 @@ -101938,7 +102446,7 @@ index 4996fc7..87e01d0 100644
2710 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
2711 {
2712 struct saved_alias *al;
2713 -@@ -4654,6 +4700,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
2714 +@@ -4654,6 +4731,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
2715 alias_list = al;
2716 return 0;
2717 }
2718 @@ -101946,7 +102454,7 @@ index 4996fc7..87e01d0 100644
2719
2720 static int __init slab_sysfs_init(void)
2721 {
2722 -@@ -4785,7 +4832,13 @@ static const struct file_operations proc_slabinfo_operations = {
2723 +@@ -4785,7 +4863,13 @@ static const struct file_operations proc_slabinfo_operations = {
2724
2725 static int __init slab_proc_init(void)
2726 {
2727 @@ -106452,7 +106960,7 @@ index 62a9025..65b82ad 100644
2728 sprintf(alias, "dmi*");
2729
2730 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
2731 -index 03efeab..f65608f 100644
2732 +index 03efeab..35e35ff 100644
2733 --- a/scripts/mod/modpost.c
2734 +++ b/scripts/mod/modpost.c
2735 @@ -764,7 +764,7 @@ static void check_section(const char *modname, struct elf_info *elf,
2736 @@ -106503,12 +107011,12 @@ index 03efeab..f65608f 100644
2737 "or drop the export.\n",
2738 tosym, sec2annotation(tosec), sec2annotation(tosec), tosym);
2739 + case DATA_TO_TEXT:
2740 -+/*
2741 ++#if 0
2742 + fprintf(stderr,
2743 -+ "The variable %s references\n"
2744 -+ "the %s %s%s%s\n",
2745 -+ fromsym, to, sec2annotation(tosec), tosym, to_p);
2746 -+*/
2747 ++ "The %s %s:%s references\n"
2748 ++ "the %s %s:%s%s\n",
2749 ++ from, fromsec, fromsym, to, tosec, tosym, to_p);
2750 ++#endif
2751 + break;
2752 case NO_MISMATCH:
2753 /* To get warnings on missing members */
2754 @@ -106647,10 +107155,10 @@ index d52f7a0..b66cdd9 100755
2755 rm -f tags
2756 xtags ctags
2757 diff --git a/security/Kconfig b/security/Kconfig
2758 -index fb363cd..b6ce7c6 100644
2759 +index fb363cd..6426142 100644
2760 --- a/security/Kconfig
2761 +++ b/security/Kconfig
2762 -@@ -4,6 +4,855 @@
2763 +@@ -4,6 +4,869 @@
2764
2765 menu "Security options"
2766
2767 @@ -106675,6 +107183,9 @@ index fb363cd..b6ce7c6 100644
2768 + bool
2769 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
2770 +
2771 ++ config PAX_USERCOPY_SLABS
2772 ++ bool
2773 ++
2774 +config GRKERNSEC
2775 + bool "Grsecurity"
2776 + select CRYPTO
2777 @@ -106909,13 +107420,12 @@ index fb363cd..b6ce7c6 100644
2778 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
2779 + support.
2780 +
2781 -+ If you have applications not marked by the PT_PAX_FLAGS ELF program
2782 -+ header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
2783 -+ option otherwise they will not get any protection.
2784 -+
2785 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
2786 + support as well, they will override the legacy EI_PAX marks.
2787 +
2788 ++ If you enable none of the marking options then all applications
2789 ++ will run with PaX enabled on them by default.
2790 ++
2791 +config PAX_PT_PAX_FLAGS
2792 + bool 'Use ELF program header marking'
2793 + default y if GRKERNSEC_CONFIG_AUTO
2794 @@ -106928,15 +107438,14 @@ index fb363cd..b6ce7c6 100644
2795 + integrated into the toolchain (the binutils patch is available
2796 + from http://pax.grsecurity.net).
2797 +
2798 -+ If you have applications not marked by the PT_PAX_FLAGS ELF program
2799 -+ header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
2800 -+ support otherwise they will not get any protection.
2801 ++ Note that if you enable the legacy EI_PAX marking support as well,
2802 ++ the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
2803 +
2804 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
2805 + must make sure that the marks are the same if a binary has both marks.
2806 +
2807 -+ Note that if you enable the legacy EI_PAX marking support as well,
2808 -+ the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
2809 ++ If you enable none of the marking options then all applications
2810 ++ will run with PaX enabled on them by default.
2811 +
2812 +config PAX_XATTR_PAX_FLAGS
2813 + bool 'Use filesystem extended attributes marking'
2814 @@ -106959,15 +107468,14 @@ index fb363cd..b6ce7c6 100644
2815 + isofs, squashfs, tmpfs, udf, vfat) so copying files through such
2816 + filesystems will lose the extended attributes and these PaX markings.
2817 +
2818 -+ If you have applications not marked by the PT_PAX_FLAGS ELF program
2819 -+ header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
2820 -+ support otherwise they will not get any protection.
2821 ++ Note that if you enable the legacy EI_PAX marking support as well,
2822 ++ the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
2823 +
2824 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
2825 + must make sure that the marks are the same if a binary has both marks.
2826 +
2827 -+ Note that if you enable the legacy EI_PAX marking support as well,
2828 -+ the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
2829 ++ If you enable none of the marking options then all applications
2830 ++ will run with PaX enabled on them by default.
2831 +
2832 +choice
2833 + prompt 'MAC system integration'
2834 @@ -107457,6 +107965,7 @@ index fb363cd..b6ce7c6 100644
2835 + default y if GRKERNSEC_CONFIG_AUTO
2836 + depends on X86 || PPC || SPARC || ARM
2837 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
2838 ++ select PAX_USERCOPY_SLABS
2839 + help
2840 + By saying Y here the kernel will enforce the size of heap objects
2841 + when they are copied in either direction between the kernel and
2842 @@ -107493,6 +108002,19 @@ index fb363cd..b6ce7c6 100644
2843 + Homepage:
2844 + http://www.grsecurity.net/~ephox/overflow_plugin/
2845 +
2846 ++config PAX_LATENT_ENTROPY
2847 ++ bool "Generate some entropy during boot"
2848 ++ default y if GRKERNSEC_CONFIG_AUTO
2849 ++ help
2850 ++ By saying Y here the kernel will instrument early boot code to
2851 ++ extract some entropy from both original and artificially created
2852 ++ program state. This will help especially embedded systems where
2853 ++ there is little 'natural' source of entropy normally. The cost
2854 ++ is some slowdown of the boot process.
2855 ++
2856 ++ Note that entropy extracted this way is not cryptographically
2857 ++ secure!
2858 ++
2859 +endmenu
2860 +
2861 +endmenu
2862 @@ -107506,7 +108028,7 @@ index fb363cd..b6ce7c6 100644
2863 config KEYS
2864 bool "Enable access key retention support"
2865 help
2866 -@@ -146,7 +995,7 @@ config INTEL_TXT
2867 +@@ -146,7 +1009,7 @@ config INTEL_TXT
2868 config LSM_MMAP_MIN_ADDR
2869 int "Low address space for LSM to protect from user allocation"
2870 depends on SECURITY && SECURITY_SELINUX
2871 @@ -108798,12 +109320,19 @@ index 79633ea..9732e90 100644
2872 break;
2873 }
2874 }
2875 +diff --git a/tools/gcc/.gitignore b/tools/gcc/.gitignore
2876 +new file mode 100644
2877 +index 0000000..50f2f2f
2878 +--- /dev/null
2879 ++++ b/tools/gcc/.gitignore
2880 +@@ -0,0 +1 @@
2881 ++size_overflow_hash.h
2882 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
2883 new file mode 100644
2884 -index 0000000..f4f9986
2885 +index 0000000..1d09b7e
2886 --- /dev/null
2887 +++ b/tools/gcc/Makefile
2888 -@@ -0,0 +1,41 @@
2889 +@@ -0,0 +1,43 @@
2890 +#CC := gcc
2891 +#PLUGIN_SOURCE_FILES := pax_plugin.c
2892 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
2893 @@ -108825,6 +109354,7 @@ index 0000000..f4f9986
2894 +$(HOSTLIBS)-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
2895 +$(HOSTLIBS)-y += colorize_plugin.so
2896 +$(HOSTLIBS)-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so
2897 ++$(HOSTLIBS)-$(CONFIG_PAX_LATENT_ENTROPY) += latent_entropy_plugin.so
2898 +
2899 +always := $($(HOSTLIBS)-y)
2900 +
2901 @@ -108835,6 +109365,7 @@ index 0000000..f4f9986
2902 +checker_plugin-objs := checker_plugin.o
2903 +colorize_plugin-objs := colorize_plugin.o
2904 +size_overflow_plugin-objs := size_overflow_plugin.o
2905 ++latent_entropy_plugin-objs := latent_entropy_plugin.o
2906 +
2907 +$(obj)/size_overflow_plugin.o: $(objtree)/$(obj)/size_overflow_hash.h
2908 +
2909 @@ -109024,7 +109555,7 @@ index 0000000..d41b5af
2910 +}
2911 diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c
2912 new file mode 100644
2913 -index 0000000..7a5e311
2914 +index 0000000..846aeb0
2915 --- /dev/null
2916 +++ b/tools/gcc/colorize_plugin.c
2917 @@ -0,0 +1,148 @@
2918 @@ -109162,7 +109693,7 @@ index 0000000..7a5e311
2919 + struct register_pass_info colorize_rearm_pass_info = {
2920 + .pass = &pass_ipa_colorize_rearm.pass,
2921 + .reference_pass_name = "*free_lang_data",
2922 -+ .ref_pass_instance_number = 0,
2923 ++ .ref_pass_instance_number = 1,
2924 + .pos_op = PASS_POS_INSERT_AFTER
2925 + };
2926 +
2927 @@ -109178,7 +109709,7 @@ index 0000000..7a5e311
2928 +}
2929 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
2930 new file mode 100644
2931 -index 0000000..89b7f56
2932 +index 0000000..048d4ff
2933 --- /dev/null
2934 +++ b/tools/gcc/constify_plugin.c
2935 @@ -0,0 +1,328 @@
2936 @@ -109484,7 +110015,7 @@ index 0000000..89b7f56
2937 + struct register_pass_info local_variable_pass_info = {
2938 + .pass = &pass_local_variable.pass,
2939 + .reference_pass_name = "*referenced_vars",
2940 -+ .ref_pass_instance_number = 0,
2941 ++ .ref_pass_instance_number = 1,
2942 + .pos_op = PASS_POS_INSERT_AFTER
2943 + };
2944 +
2945 @@ -109612,7 +110143,7 @@ index 0000000..a0fe8b2
2946 +exit 0
2947 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
2948 new file mode 100644
2949 -index 0000000..a5eabce
2950 +index 0000000..a86e422
2951 --- /dev/null
2952 +++ b/tools/gcc/kallocstat_plugin.c
2953 @@ -0,0 +1,167 @@
2954 @@ -109769,7 +110300,7 @@ index 0000000..a5eabce
2955 + struct register_pass_info kallocstat_pass_info = {
2956 + .pass = &kallocstat_pass.pass,
2957 + .reference_pass_name = "ssa",
2958 -+ .ref_pass_instance_number = 0,
2959 ++ .ref_pass_instance_number = 1,
2960 + .pos_op = PASS_POS_INSERT_AFTER
2961 + };
2962 +
2963 @@ -109785,7 +110316,7 @@ index 0000000..a5eabce
2964 +}
2965 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
2966 new file mode 100644
2967 -index 0000000..d8a8da2
2968 +index 0000000..98011fa
2969 --- /dev/null
2970 +++ b/tools/gcc/kernexec_plugin.c
2971 @@ -0,0 +1,427 @@
2972 @@ -110161,19 +110692,19 @@ index 0000000..d8a8da2
2973 + struct register_pass_info kernexec_reload_pass_info = {
2974 + .pass = &kernexec_reload_pass.pass,
2975 + .reference_pass_name = "ssa",
2976 -+ .ref_pass_instance_number = 0,
2977 ++ .ref_pass_instance_number = 1,
2978 + .pos_op = PASS_POS_INSERT_AFTER
2979 + };
2980 + struct register_pass_info kernexec_fptr_pass_info = {
2981 + .pass = &kernexec_fptr_pass.pass,
2982 + .reference_pass_name = "ssa",
2983 -+ .ref_pass_instance_number = 0,
2984 ++ .ref_pass_instance_number = 1,
2985 + .pos_op = PASS_POS_INSERT_AFTER
2986 + };
2987 + struct register_pass_info kernexec_retaddr_pass_info = {
2988 + .pass = &kernexec_retaddr_pass.pass,
2989 + .reference_pass_name = "pro_and_epilogue",
2990 -+ .ref_pass_instance_number = 0,
2991 ++ .ref_pass_instance_number = 1,
2992 + .pos_op = PASS_POS_INSERT_AFTER
2993 + };
2994 +
2995 @@ -110216,6 +110747,307 @@ index 0000000..d8a8da2
2996 +
2997 + return 0;
2998 +}
2999 +diff --git a/tools/gcc/latent_entropy_plugin.c b/tools/gcc/latent_entropy_plugin.c
3000 +new file mode 100644
3001 +index 0000000..b8008f7
3002 +--- /dev/null
3003 ++++ b/tools/gcc/latent_entropy_plugin.c
3004 +@@ -0,0 +1,295 @@
3005 ++/*
3006 ++ * Copyright 2012 by the PaX Team <pageexec@××××××××.hu>
3007 ++ * Licensed under the GPL v2
3008 ++ *
3009 ++ * Note: the choice of the license means that the compilation process is
3010 ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
3011 ++ * but for the kernel it doesn't matter since it doesn't link against
3012 ++ * any of the gcc libraries
3013 ++ *
3014 ++ * gcc plugin to help generate a little bit of entropy from program state,
3015 ++ * used during boot in the kernel
3016 ++ *
3017 ++ * TODO:
3018 ++ * - add ipa pass to identify not explicitly marked candidate functions
3019 ++ * - mix in more program state (function arguments/return values, loop variables, etc)
3020 ++ * - more instrumentation control via attribute parameters
3021 ++ *
3022 ++ * BUGS:
3023 ++ * - LTO needs -flto-partition=none for now
3024 ++ */
3025 ++#include "gcc-plugin.h"
3026 ++#include "config.h"
3027 ++#include "system.h"
3028 ++#include "coretypes.h"
3029 ++#include "tree.h"
3030 ++#include "tree-pass.h"
3031 ++#include "flags.h"
3032 ++#include "intl.h"
3033 ++#include "toplev.h"
3034 ++#include "plugin.h"
3035 ++//#include "expr.h" where are you...
3036 ++#include "diagnostic.h"
3037 ++#include "plugin-version.h"
3038 ++#include "tm.h"
3039 ++#include "function.h"
3040 ++#include "basic-block.h"
3041 ++#include "gimple.h"
3042 ++#include "rtl.h"
3043 ++#include "emit-rtl.h"
3044 ++#include "tree-flow.h"
3045 ++
3046 ++int plugin_is_GPL_compatible;
3047 ++
3048 ++static tree latent_entropy_decl;
3049 ++
3050 ++static struct plugin_info latent_entropy_plugin_info = {
3051 ++ .version = "201207271820",
3052 ++ .help = NULL
3053 ++};
3054 ++
3055 ++static unsigned int execute_latent_entropy(void);
3056 ++static bool gate_latent_entropy(void);
3057 ++
3058 ++static struct gimple_opt_pass latent_entropy_pass = {
3059 ++ .pass = {
3060 ++ .type = GIMPLE_PASS,
3061 ++ .name = "latent_entropy",
3062 ++ .gate = gate_latent_entropy,
3063 ++ .execute = execute_latent_entropy,
3064 ++ .sub = NULL,
3065 ++ .next = NULL,
3066 ++ .static_pass_number = 0,
3067 ++ .tv_id = TV_NONE,
3068 ++ .properties_required = PROP_gimple_leh | PROP_cfg,
3069 ++ .properties_provided = 0,
3070 ++ .properties_destroyed = 0,
3071 ++ .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
3072 ++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
3073 ++ }
3074 ++};
3075 ++
3076 ++static tree handle_latent_entropy_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
3077 ++{
3078 ++ if (TREE_CODE(*node) != FUNCTION_DECL) {
3079 ++ *no_add_attrs = true;
3080 ++ error("%qE attribute only applies to functions", name);
3081 ++ }
3082 ++ return NULL_TREE;
3083 ++}
3084 ++
3085 ++static struct attribute_spec latent_entropy_attr = {
3086 ++ .name = "latent_entropy",
3087 ++ .min_length = 0,
3088 ++ .max_length = 0,
3089 ++ .decl_required = true,
3090 ++ .type_required = false,
3091 ++ .function_type_required = false,
3092 ++ .handler = handle_latent_entropy_attribute,
3093 ++#if BUILDING_GCC_VERSION >= 4007
3094 ++ .affects_type_identity = false
3095 ++#endif
3096 ++};
3097 ++
3098 ++static void register_attributes(void *event_data, void *data)
3099 ++{
3100 ++ register_attribute(&latent_entropy_attr);
3101 ++}
3102 ++
3103 ++static bool gate_latent_entropy(void)
3104 ++{
3105 ++ tree latent_entropy_attr;
3106 ++
3107 ++ latent_entropy_attr = lookup_attribute("latent_entropy", DECL_ATTRIBUTES(current_function_decl));
3108 ++ return latent_entropy_attr != NULL_TREE;
3109 ++}
3110 ++
3111 ++static unsigned HOST_WIDE_INT seed;
3112 ++static unsigned HOST_WIDE_INT get_random_const(void)
3113 ++{
3114 ++ seed = (seed >> 1U) ^ (-(seed & 1ULL) & 0xD800000000000000ULL);
3115 ++ return seed;
3116 ++}
3117 ++
3118 ++static enum tree_code get_op(tree *rhs)
3119 ++{
3120 ++ static enum tree_code op;
3121 ++ unsigned HOST_WIDE_INT random_const;
3122 ++
3123 ++ random_const = get_random_const();
3124 ++
3125 ++ switch (op) {
3126 ++ case BIT_XOR_EXPR:
3127 ++ op = PLUS_EXPR;
3128 ++ break;
3129 ++
3130 ++ case PLUS_EXPR:
3131 ++ if (rhs) {
3132 ++ op = LROTATE_EXPR;
3133 ++ random_const &= HOST_BITS_PER_WIDE_INT - 1;
3134 ++ break;
3135 ++ }
3136 ++
3137 ++ case LROTATE_EXPR:
3138 ++ default:
3139 ++ op = BIT_XOR_EXPR;
3140 ++ break;
3141 ++ }
3142 ++ if (rhs)
3143 ++ *rhs = build_int_cstu(unsigned_intDI_type_node, random_const);
3144 ++ return op;
3145 ++}
3146 ++
3147 ++static void perturb_local_entropy(basic_block bb, tree local_entropy)
3148 ++{
3149 ++ gimple_stmt_iterator gsi;
3150 ++ gimple assign;
3151 ++ tree addxorrol, rhs;
3152 ++ enum tree_code op;
3153 ++
3154 ++ op = get_op(&rhs);
3155 ++ addxorrol = fold_build2_loc(UNKNOWN_LOCATION, op, unsigned_intDI_type_node, local_entropy, rhs);
3156 ++ assign = gimple_build_assign(local_entropy, addxorrol);
3157 ++ find_referenced_vars_in(assign);
3158 ++//debug_bb(bb);
3159 ++ gsi = gsi_after_labels(bb);
3160 ++ gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
3161 ++ update_stmt(assign);
3162 ++}
3163 ++
3164 ++static void perturb_latent_entropy(basic_block bb, tree rhs)
3165 ++{
3166 ++ gimple_stmt_iterator gsi;
3167 ++ gimple assign;
3168 ++ tree addxorrol, temp;
3169 ++
3170 ++ // 1. create temporary copy of latent_entropy
3171 ++ temp = create_tmp_var(unsigned_intDI_type_node, "temp_latent_entropy");
3172 ++ add_referenced_var(temp);
3173 ++ mark_sym_for_renaming(temp);
3174 ++
3175 ++ // 2. read...
3176 ++ assign = gimple_build_assign(temp, latent_entropy_decl);
3177 ++ find_referenced_vars_in(assign);
3178 ++ gsi = gsi_after_labels(bb);
3179 ++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
3180 ++ update_stmt(assign);
3181 ++
3182 ++ // 3. ...modify...
3183 ++ addxorrol = fold_build2_loc(UNKNOWN_LOCATION, get_op(NULL), unsigned_intDI_type_node, temp, rhs);
3184 ++ assign = gimple_build_assign(temp, addxorrol);
3185 ++ find_referenced_vars_in(assign);
3186 ++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
3187 ++ update_stmt(assign);
3188 ++
3189 ++ // 4. ...write latent_entropy
3190 ++ assign = gimple_build_assign(latent_entropy_decl, temp);
3191 ++ find_referenced_vars_in(assign);
3192 ++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
3193 ++ update_stmt(assign);
3194 ++}
3195 ++
3196 ++static unsigned int execute_latent_entropy(void)
3197 ++{
3198 ++ basic_block bb;
3199 ++ gimple assign;
3200 ++ gimple_stmt_iterator gsi;
3201 ++ tree local_entropy;
3202 ++
3203 ++ if (!latent_entropy_decl) {
3204 ++ struct varpool_node *node;
3205 ++
3206 ++ for (node = varpool_nodes; node; node = node->next) {
3207 ++ tree var = node->decl;
3208 ++ if (strcmp(IDENTIFIER_POINTER(DECL_NAME(var)), "latent_entropy"))
3209 ++ continue;
3210 ++ latent_entropy_decl = var;
3211 ++// debug_tree(var);
3212 ++ break;
3213 ++ }
3214 ++ if (!latent_entropy_decl) {
3215 ++// debug_tree(current_function_decl);
3216 ++ return 0;
3217 ++ }
3218 ++ }
3219 ++
3220 ++//fprintf(stderr, "latent_entropy: %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl)));
3221 ++
3222 ++ // 1. create local entropy variable
3223 ++ local_entropy = create_tmp_var(unsigned_intDI_type_node, "local_entropy");
3224 ++ add_referenced_var(local_entropy);
3225 ++ mark_sym_for_renaming(local_entropy);
3226 ++
3227 ++ // 2. initialize local entropy variable
3228 ++ bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
3229 ++ if (dom_info_available_p(CDI_DOMINATORS))
3230 ++ set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
3231 ++ gsi = gsi_start_bb(bb);
3232 ++
3233 ++ assign = gimple_build_assign(local_entropy, build_int_cstu(unsigned_intDI_type_node, get_random_const()));
3234 ++// gimple_set_location(assign, loc);
3235 ++ find_referenced_vars_in(assign);
3236 ++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
3237 ++ update_stmt(assign);
3238 ++ bb = bb->next_bb;
3239 ++
3240 ++ // 3. instrument each BB with an operation on the local entropy variable
3241 ++ while (bb != EXIT_BLOCK_PTR) {
3242 ++ perturb_local_entropy(bb, local_entropy);
3243 ++ bb = bb->next_bb;
3244 ++ };
3245 ++
3246 ++ // 4. mix local entropy into the global entropy variable
3247 ++ perturb_latent_entropy(EXIT_BLOCK_PTR->prev_bb, local_entropy);
3248 ++ return 0;
3249 ++}
3250 ++
3251 ++static void start_unit_callback(void *gcc_data, void *user_data)
3252 ++{
3253 ++#if BUILDING_GCC_VERSION >= 4007
3254 ++ seed = get_random_seed(false);
3255 ++#else
3256 ++ sscanf(get_random_seed(false), "%" HOST_WIDE_INT_PRINT "x", &seed);
3257 ++ seed *= seed;
3258 ++#endif
3259 ++
3260 ++ if (in_lto_p)
3261 ++ return;
3262 ++
3263 ++ // extern u64 latent_entropy
3264 ++ latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, get_identifier("latent_entropy"), unsigned_intDI_type_node);
3265 ++
3266 ++ TREE_STATIC(latent_entropy_decl) = 1;
3267 ++ TREE_PUBLIC(latent_entropy_decl) = 1;
3268 ++ TREE_USED(latent_entropy_decl) = 1;
3269 ++ TREE_THIS_VOLATILE(latent_entropy_decl) = 1;
3270 ++ DECL_EXTERNAL(latent_entropy_decl) = 1;
3271 ++ DECL_ARTIFICIAL(latent_entropy_decl) = 0;
3272 ++ DECL_INITIAL(latent_entropy_decl) = NULL;
3273 ++// DECL_ASSEMBLER_NAME(latent_entropy_decl);
3274 ++// varpool_finalize_decl(latent_entropy_decl);
3275 ++// varpool_mark_needed_node(latent_entropy_decl);
3276 ++}
3277 ++
3278 ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
3279 ++{
3280 ++ const char * const plugin_name = plugin_info->base_name;
3281 ++ struct register_pass_info latent_entropy_pass_info = {
3282 ++ .pass = &latent_entropy_pass.pass,
3283 ++ .reference_pass_name = "optimized",
3284 ++ .ref_pass_instance_number = 1,
3285 ++ .pos_op = PASS_POS_INSERT_BEFORE
3286 ++ };
3287 ++
3288 ++ if (!plugin_default_version_check(version, &gcc_version)) {
3289 ++ error(G_("incompatible gcc/plugin versions"));
3290 ++ return 1;
3291 ++ }
3292 ++
3293 ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &latent_entropy_plugin_info);
3294 ++ register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
3295 ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &latent_entropy_pass_info);
3296 ++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
3297 ++
3298 ++ return 0;
3299 ++}
3300 diff --git a/tools/gcc/size_overflow_hash.data b/tools/gcc/size_overflow_hash.data
3301 new file mode 100644
3302 index 0000000..eb35e4a
3303 @@ -113285,7 +114117,7 @@ index 0000000..cc96254
3304 +}
3305 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
3306 new file mode 100644
3307 -index 0000000..b87ec9d
3308 +index 0000000..38d2014
3309 --- /dev/null
3310 +++ b/tools/gcc/stackleak_plugin.c
3311 @@ -0,0 +1,313 @@
3312 @@ -113558,13 +114390,13 @@ index 0000000..b87ec9d
3313 + .pass = &stackleak_tree_instrument_pass.pass,
3314 +// .reference_pass_name = "tree_profile",
3315 + .reference_pass_name = "optimized",
3316 -+ .ref_pass_instance_number = 0,
3317 ++ .ref_pass_instance_number = 1,
3318 + .pos_op = PASS_POS_INSERT_BEFORE
3319 + };
3320 + struct register_pass_info stackleak_final_pass_info = {
3321 + .pass = &stackleak_final_rtl_opt_pass.pass,
3322 + .reference_pass_name = "final",
3323 -+ .ref_pass_instance_number = 0,
3324 ++ .ref_pass_instance_number = 1,
3325 + .pos_op = PASS_POS_INSERT_BEFORE
3326 + };
3327 +
3328
3329 diff --git a/2.6.32/4450_grsec-kconfig-default-gids.patch b/2.6.32/4450_grsec-kconfig-default-gids.patch
3330 index 3bf6bd2..e7b920b 100644
3331 --- a/2.6.32/4450_grsec-kconfig-default-gids.patch
3332 +++ b/2.6.32/4450_grsec-kconfig-default-gids.patch
3333 @@ -73,7 +73,7 @@ diff -Nuar a/grsecurity/Kconfig b/Kconfig
3334 diff -Nuar a/security/Kconfig b/security/Kconfig
3335 --- a/security/Kconfig 2012-07-01 12:51:41.000000000 -0400
3336 +++ b/security/Kconfig 2012-07-01 13:00:23.000000000 -0400
3337 -@@ -187,7 +187,7 @@
3338 +@@ -190,7 +190,7 @@
3339
3340 config GRKERNSEC_PROC_GID
3341 int "GID exempted from /proc restrictions"
3342
3343 diff --git a/3.2.23/0000_README b/3.2.24/0000_README
3344 similarity index 91%
3345 rename from 3.2.23/0000_README
3346 rename to 3.2.24/0000_README
3347 index 998a3bc..51bc4a5 100644
3348 --- a/3.2.23/0000_README
3349 +++ b/3.2.24/0000_README
3350 @@ -10,7 +10,11 @@ Patch: 1022_linux-3.2.23.patch
3351 From: http://www.kernel.org
3352 Desc: Linux 3.2.23
3353
3354 -Patch: 4420_grsecurity-2.9.1-3.2.23-201207242236.patch
3355 +Patch: 1023_linux-3.2.24.patch
3356 +From: http://www.kernel.org
3357 +Desc: Linux 3.2.24
3358 +
3359 +Patch: 4420_grsecurity-2.9.1-3.2.24-201207281946.patch
3360 From: http://www.grsecurity.net
3361 Desc: hardened-sources base patch from upstream grsecurity
3362
3363
3364 diff --git a/3.2.23/1021_linux-3.2.22.patch b/3.2.24/1021_linux-3.2.22.patch
3365 similarity index 100%
3366 rename from 3.2.23/1021_linux-3.2.22.patch
3367 rename to 3.2.24/1021_linux-3.2.22.patch
3368
3369 diff --git a/3.2.23/1022_linux-3.2.23.patch b/3.2.24/1022_linux-3.2.23.patch
3370 similarity index 100%
3371 rename from 3.2.23/1022_linux-3.2.23.patch
3372 rename to 3.2.24/1022_linux-3.2.23.patch
3373
3374 diff --git a/3.2.24/1023_linux-3.2.24.patch b/3.2.24/1023_linux-3.2.24.patch
3375 new file mode 100644
3376 index 0000000..4692eb4
3377 --- /dev/null
3378 +++ b/3.2.24/1023_linux-3.2.24.patch
3379 @@ -0,0 +1,4684 @@
3380 +diff --git a/Makefile b/Makefile
3381 +index 40d1e3b..80bb4fd 100644
3382 +--- a/Makefile
3383 ++++ b/Makefile
3384 +@@ -1,6 +1,6 @@
3385 + VERSION = 3
3386 + PATCHLEVEL = 2
3387 +-SUBLEVEL = 23
3388 ++SUBLEVEL = 24
3389 + EXTRAVERSION =
3390 + NAME = Saber-toothed Squirrel
3391 +
3392 +diff --git a/arch/arm/plat-samsung/adc.c b/arch/arm/plat-samsung/adc.c
3393 +index 33ecd0c..b1e05cc 100644
3394 +--- a/arch/arm/plat-samsung/adc.c
3395 ++++ b/arch/arm/plat-samsung/adc.c
3396 +@@ -157,11 +157,13 @@ int s3c_adc_start(struct s3c_adc_client *client,
3397 + return -EINVAL;
3398 + }
3399 +
3400 +- if (client->is_ts && adc->ts_pend)
3401 +- return -EAGAIN;
3402 +-
3403 + spin_lock_irqsave(&adc->lock, flags);
3404 +
3405 ++ if (client->is_ts && adc->ts_pend) {
3406 ++ spin_unlock_irqrestore(&adc->lock, flags);
3407 ++ return -EAGAIN;
3408 ++ }
3409 ++
3410 + client->channel = channel;
3411 + client->nr_samples = nr_samples;
3412 +
3413 +diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
3414 +index 97f8bf6..adda036 100644
3415 +--- a/arch/mips/include/asm/thread_info.h
3416 ++++ b/arch/mips/include/asm/thread_info.h
3417 +@@ -60,6 +60,8 @@ struct thread_info {
3418 + register struct thread_info *__current_thread_info __asm__("$28");
3419 + #define current_thread_info() __current_thread_info
3420 +
3421 ++#endif /* !__ASSEMBLY__ */
3422 ++
3423 + /* thread information allocation */
3424 + #if defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_32BIT)
3425 + #define THREAD_SIZE_ORDER (1)
3426 +@@ -97,8 +99,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
3427 +
3428 + #define free_thread_info(info) kfree(info)
3429 +
3430 +-#endif /* !__ASSEMBLY__ */
3431 +-
3432 + #define PREEMPT_ACTIVE 0x10000000
3433 +
3434 + /*
3435 +diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
3436 +index a81176f..be281c6 100644
3437 +--- a/arch/mips/kernel/vmlinux.lds.S
3438 ++++ b/arch/mips/kernel/vmlinux.lds.S
3439 +@@ -1,5 +1,6 @@
3440 + #include <asm/asm-offsets.h>
3441 + #include <asm/page.h>
3442 ++#include <asm/thread_info.h>
3443 + #include <asm-generic/vmlinux.lds.h>
3444 +
3445 + #undef mips
3446 +@@ -73,7 +74,7 @@ SECTIONS
3447 + .data : { /* Data */
3448 + . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */
3449 +
3450 +- INIT_TASK_DATA(PAGE_SIZE)
3451 ++ INIT_TASK_DATA(THREAD_SIZE)
3452 + NOSAVE_DATA
3453 + CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
3454 + READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
3455 +diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
3456 +index 98b7c4b..fa3f921 100644
3457 +--- a/arch/powerpc/include/asm/cputime.h
3458 ++++ b/arch/powerpc/include/asm/cputime.h
3459 +@@ -126,11 +126,11 @@ static inline u64 cputime64_to_jiffies64(const cputime_t ct)
3460 + /*
3461 + * Convert cputime <-> microseconds
3462 + */
3463 +-extern u64 __cputime_msec_factor;
3464 ++extern u64 __cputime_usec_factor;
3465 +
3466 + static inline unsigned long cputime_to_usecs(const cputime_t ct)
3467 + {
3468 +- return mulhdu(ct, __cputime_msec_factor) * USEC_PER_MSEC;
3469 ++ return mulhdu(ct, __cputime_usec_factor);
3470 + }
3471 +
3472 + static inline cputime_t usecs_to_cputime(const unsigned long us)
3473 +@@ -143,7 +143,7 @@ static inline cputime_t usecs_to_cputime(const unsigned long us)
3474 + sec = us / 1000000;
3475 + if (ct) {
3476 + ct *= tb_ticks_per_sec;
3477 +- do_div(ct, 1000);
3478 ++ do_div(ct, 1000000);
3479 + }
3480 + if (sec)
3481 + ct += (cputime_t) sec * tb_ticks_per_sec;
3482 +diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
3483 +index 5db163c..ec8affe 100644
3484 +--- a/arch/powerpc/kernel/time.c
3485 ++++ b/arch/powerpc/kernel/time.c
3486 +@@ -168,13 +168,13 @@ EXPORT_SYMBOL_GPL(ppc_tb_freq);
3487 + #ifdef CONFIG_VIRT_CPU_ACCOUNTING
3488 + /*
3489 + * Factors for converting from cputime_t (timebase ticks) to
3490 +- * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
3491 ++ * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds).
3492 + * These are all stored as 0.64 fixed-point binary fractions.
3493 + */
3494 + u64 __cputime_jiffies_factor;
3495 + EXPORT_SYMBOL(__cputime_jiffies_factor);
3496 +-u64 __cputime_msec_factor;
3497 +-EXPORT_SYMBOL(__cputime_msec_factor);
3498 ++u64 __cputime_usec_factor;
3499 ++EXPORT_SYMBOL(__cputime_usec_factor);
3500 + u64 __cputime_sec_factor;
3501 + EXPORT_SYMBOL(__cputime_sec_factor);
3502 + u64 __cputime_clockt_factor;
3503 +@@ -192,8 +192,8 @@ static void calc_cputime_factors(void)
3504 +
3505 + div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
3506 + __cputime_jiffies_factor = res.result_low;
3507 +- div128_by_32(1000, 0, tb_ticks_per_sec, &res);
3508 +- __cputime_msec_factor = res.result_low;
3509 ++ div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
3510 ++ __cputime_usec_factor = res.result_low;
3511 + div128_by_32(1, 0, tb_ticks_per_sec, &res);
3512 + __cputime_sec_factor = res.result_low;
3513 + div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
3514 +diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
3515 +index 4558f0d..479d03c 100644
3516 +--- a/arch/x86/kernel/acpi/boot.c
3517 ++++ b/arch/x86/kernel/acpi/boot.c
3518 +@@ -416,12 +416,14 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
3519 + return 0;
3520 + }
3521 +
3522 +- if (intsrc->source_irq == 0 && intsrc->global_irq == 2) {
3523 ++ if (intsrc->source_irq == 0) {
3524 + if (acpi_skip_timer_override) {
3525 +- printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
3526 ++ printk(PREFIX "BIOS IRQ0 override ignored.\n");
3527 + return 0;
3528 + }
3529 +- if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
3530 ++
3531 ++ if ((intsrc->global_irq == 2) && acpi_fix_pin2_polarity
3532 ++ && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
3533 + intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
3534 + printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
3535 + }
3536 +@@ -1327,17 +1329,12 @@ static int __init dmi_disable_acpi(const struct dmi_system_id *d)
3537 + }
3538 +
3539 + /*
3540 +- * Force ignoring BIOS IRQ0 pin2 override
3541 ++ * Force ignoring BIOS IRQ0 override
3542 + */
3543 + static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
3544 + {
3545 +- /*
3546 +- * The ati_ixp4x0_rev() early PCI quirk should have set
3547 +- * the acpi_skip_timer_override flag already:
3548 +- */
3549 + if (!acpi_skip_timer_override) {
3550 +- WARN(1, KERN_ERR "ati_ixp4x0 quirk not complete.\n");
3551 +- pr_notice("%s detected: Ignoring BIOS IRQ0 pin2 override\n",
3552 ++ pr_notice("%s detected: Ignoring BIOS IRQ0 override\n",
3553 + d->ident);
3554 + acpi_skip_timer_override = 1;
3555 + }
3556 +@@ -1431,7 +1428,7 @@ static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
3557 + * is enabled. This input is incorrectly designated the
3558 + * ISA IRQ 0 via an interrupt source override even though
3559 + * it is wired to the output of the master 8259A and INTIN0
3560 +- * is not connected at all. Force ignoring BIOS IRQ0 pin2
3561 ++ * is not connected at all. Force ignoring BIOS IRQ0
3562 + * override in that cases.
3563 + */
3564 + {
3565 +@@ -1466,6 +1463,14 @@ static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
3566 + DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"),
3567 + },
3568 + },
3569 ++ {
3570 ++ .callback = dmi_ignore_irq0_timer_override,
3571 ++ .ident = "FUJITSU SIEMENS",
3572 ++ .matches = {
3573 ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
3574 ++ DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
3575 ++ },
3576 ++ },
3577 + {}
3578 + };
3579 +
3580 +diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
3581 +index 37a458b..e61f79c 100644
3582 +--- a/arch/x86/kernel/reboot.c
3583 ++++ b/arch/x86/kernel/reboot.c
3584 +@@ -460,6 +460,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
3585 + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
3586 + },
3587 + },
3588 ++ { /* Handle problems with rebooting on the Precision M6600. */
3589 ++ .callback = set_pci_reboot,
3590 ++ .ident = "Dell OptiPlex 990",
3591 ++ .matches = {
3592 ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
3593 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
3594 ++ },
3595 ++ },
3596 + { }
3597 + };
3598 +
3599 +diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
3600 +index 688be8a..9e76a32 100644
3601 +--- a/block/scsi_ioctl.c
3602 ++++ b/block/scsi_ioctl.c
3603 +@@ -721,11 +721,14 @@ int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
3604 + break;
3605 + }
3606 +
3607 ++ if (capable(CAP_SYS_RAWIO))
3608 ++ return 0;
3609 ++
3610 + /* In particular, rule out all resets and host-specific ioctls. */
3611 + printk_ratelimited(KERN_WARNING
3612 + "%s: sending ioctl %x to a partition!\n", current->comm, cmd);
3613 +
3614 +- return capable(CAP_SYS_RAWIO) ? 0 : -ENOTTY;
3615 ++ return -ENOTTY;
3616 + }
3617 + EXPORT_SYMBOL(scsi_verify_blk_ioctl);
3618 +
3619 +diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
3620 +index c850de4..eff7222 100644
3621 +--- a/drivers/acpi/processor_core.c
3622 ++++ b/drivers/acpi/processor_core.c
3623 +@@ -189,10 +189,12 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
3624 + * Processor (CPU3, 0x03, 0x00000410, 0x06) {}
3625 + * }
3626 + *
3627 +- * Ignores apic_id and always return 0 for CPU0's handle.
3628 ++ * Ignores apic_id and always returns 0 for the processor
3629 ++ * handle with acpi id 0 if nr_cpu_ids is 1.
3630 ++ * This should be the case if SMP tables are not found.
3631 + * Return -1 for other CPU's handle.
3632 + */
3633 +- if (acpi_id == 0)
3634 ++ if (nr_cpu_ids <= 1 && acpi_id == 0)
3635 + return acpi_id;
3636 + else
3637 + return apic_id;
3638 +diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
3639 +index ca191ff..ed6bc52 100644
3640 +--- a/drivers/acpi/sleep.c
3641 ++++ b/drivers/acpi/sleep.c
3642 +@@ -702,8 +702,8 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
3643 + * can wake the system. _S0W may be valid, too.
3644 + */
3645 + if (acpi_target_sleep_state == ACPI_STATE_S0 ||
3646 +- (device_may_wakeup(dev) &&
3647 +- adev->wakeup.sleep_state <= acpi_target_sleep_state)) {
3648 ++ (device_may_wakeup(dev) && adev->wakeup.flags.valid &&
3649 ++ adev->wakeup.sleep_state >= acpi_target_sleep_state)) {
3650 + acpi_status status;
3651 +
3652 + acpi_method[3] = 'W';
3653 +diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
3654 +index 9f66181..240a244 100644
3655 +--- a/drivers/acpi/sysfs.c
3656 ++++ b/drivers/acpi/sysfs.c
3657 +@@ -173,7 +173,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
3658 + {
3659 + int result = 0;
3660 +
3661 +- if (!strncmp(val, "enable", strlen("enable") - 1)) {
3662 ++ if (!strncmp(val, "enable", strlen("enable"))) {
3663 + result = acpi_debug_trace(trace_method_name, trace_debug_level,
3664 + trace_debug_layer, 0);
3665 + if (result)
3666 +@@ -181,7 +181,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
3667 + goto exit;
3668 + }
3669 +
3670 +- if (!strncmp(val, "disable", strlen("disable") - 1)) {
3671 ++ if (!strncmp(val, "disable", strlen("disable"))) {
3672 + int name = 0;
3673 + result = acpi_debug_trace((char *)&name, trace_debug_level,
3674 + trace_debug_layer, 0);
3675 +diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
3676 +index 96198f3..a2da8f2 100644
3677 +--- a/drivers/gpio/gpio-wm8994.c
3678 ++++ b/drivers/gpio/gpio-wm8994.c
3679 +@@ -89,8 +89,11 @@ static int wm8994_gpio_direction_out(struct gpio_chip *chip,
3680 + struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
3681 + struct wm8994 *wm8994 = wm8994_gpio->wm8994;
3682 +
3683 ++ if (value)
3684 ++ value = WM8994_GPN_LVL;
3685 ++
3686 + return wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset,
3687 +- WM8994_GPN_DIR, 0);
3688 ++ WM8994_GPN_DIR | WM8994_GPN_LVL, value);
3689 + }
3690 +
3691 + static void wm8994_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
3692 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
3693 +index 6aa7716..cc75c4b 100644
3694 +--- a/drivers/gpu/drm/i915/intel_display.c
3695 ++++ b/drivers/gpu/drm/i915/intel_display.c
3696 +@@ -8043,8 +8043,8 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
3697 + I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
3698 +
3699 + if (intel_enable_rc6(dev_priv->dev))
3700 +- rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
3701 +- GEN6_RC_CTL_RC6_ENABLE;
3702 ++ rc6_mask = GEN6_RC_CTL_RC6_ENABLE |
3703 ++ ((IS_GEN7(dev_priv->dev)) ? GEN6_RC_CTL_RC6p_ENABLE : 0);
3704 +
3705 + I915_WRITE(GEN6_RC_CONTROL,
3706 + rc6_mask |
3707 +diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
3708 +index 299d238..899c712 100644
3709 +--- a/drivers/hid/hid-apple.c
3710 ++++ b/drivers/hid/hid-apple.c
3711 +@@ -514,6 +514,12 @@ static const struct hid_device_id apple_devices[] = {
3712 + .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
3713 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
3714 + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
3715 ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
3716 ++ .driver_data = APPLE_HAS_FN },
3717 ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
3718 ++ .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
3719 ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
3720 ++ .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
3721 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
3722 + .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
3723 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
3724 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
3725 +index c27b402..95430a0 100644
3726 +--- a/drivers/hid/hid-core.c
3727 ++++ b/drivers/hid/hid-core.c
3728 +@@ -1374,6 +1374,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
3729 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
3730 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
3731 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
3732 ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
3733 ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
3734 ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
3735 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
3736 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
3737 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
3738 +@@ -1884,6 +1887,7 @@ static const struct hid_device_id hid_ignore_list[] = {
3739 + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MCT) },
3740 + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
3741 + { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
3742 ++ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
3743 + { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
3744 + { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
3745 + { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
3746 +@@ -1968,6 +1972,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
3747 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
3748 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
3749 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
3750 ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
3751 ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
3752 ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
3753 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
3754 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
3755 + { }
3756 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
3757 +index fba3fc4..7db934d 100644
3758 +--- a/drivers/hid/hid-ids.h
3759 ++++ b/drivers/hid/hid-ids.h
3760 +@@ -125,6 +125,9 @@
3761 + #define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI 0x024c
3762 + #define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO 0x024d
3763 + #define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS 0x024e
3764 ++#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI 0x0262
3765 ++#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO 0x0263
3766 ++#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS 0x0264
3767 + #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239
3768 + #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a
3769 + #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
3770 +@@ -491,6 +494,9 @@
3771 + #define USB_DEVICE_ID_CRYSTALTOUCH 0x0006
3772 + #define USB_DEVICE_ID_CRYSTALTOUCH_DUAL 0x0007
3773 +
3774 ++#define USB_VENDOR_ID_MADCATZ 0x0738
3775 ++#define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540
3776 ++
3777 + #define USB_VENDOR_ID_MCC 0x09db
3778 + #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
3779 + #define USB_DEVICE_ID_MCC_PMD1208LS 0x007a
3780 +diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
3781 +index d912649..1ba7af2 100644
3782 +--- a/drivers/hwmon/it87.c
3783 ++++ b/drivers/hwmon/it87.c
3784 +@@ -2086,7 +2086,7 @@ static void __devinit it87_init_device(struct platform_device *pdev)
3785 +
3786 + /* Start monitoring */
3787 + it87_write_value(data, IT87_REG_CONFIG,
3788 +- (it87_read_value(data, IT87_REG_CONFIG) & 0x36)
3789 ++ (it87_read_value(data, IT87_REG_CONFIG) & 0x3e)
3790 + | (update_vbat ? 0x41 : 0x01));
3791 + }
3792 +
3793 +diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
3794 +index 61c9cf1..1201a15 100644
3795 +--- a/drivers/hwspinlock/hwspinlock_core.c
3796 ++++ b/drivers/hwspinlock/hwspinlock_core.c
3797 +@@ -345,7 +345,7 @@ int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
3798 + spin_lock_init(&hwlock->lock);
3799 + hwlock->bank = bank;
3800 +
3801 +- ret = hwspin_lock_register_single(hwlock, i);
3802 ++ ret = hwspin_lock_register_single(hwlock, base_id + i);
3803 + if (ret)
3804 + goto reg_failed;
3805 + }
3806 +@@ -354,7 +354,7 @@ int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
3807 +
3808 + reg_failed:
3809 + while (--i >= 0)
3810 +- hwspin_lock_unregister_single(i);
3811 ++ hwspin_lock_unregister_single(base_id + i);
3812 + return ret;
3813 + }
3814 + EXPORT_SYMBOL_GPL(hwspin_lock_register);
3815 +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
3816 +index d728875..2189cbf 100644
3817 +--- a/drivers/input/joystick/xpad.c
3818 ++++ b/drivers/input/joystick/xpad.c
3819 +@@ -142,6 +142,7 @@ static const struct xpad_device {
3820 + { 0x0c12, 0x880a, "Pelican Eclipse PL-2023", 0, XTYPE_XBOX },
3821 + { 0x0c12, 0x8810, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
3822 + { 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX },
3823 ++ { 0x0d2f, 0x0002, "Andamiro Pump It Up pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
3824 + { 0x0e4c, 0x1097, "Radica Gamester Controller", 0, XTYPE_XBOX },
3825 + { 0x0e4c, 0x2390, "Radica Games Jtech Controller", 0, XTYPE_XBOX },
3826 + { 0x0e6f, 0x0003, "Logic3 Freebird wireless Controller", 0, XTYPE_XBOX },
3827 +@@ -164,6 +165,7 @@ static const struct xpad_device {
3828 + { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
3829 + { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
3830 + { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
3831 ++ { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
3832 + { 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
3833 + { 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
3834 + };
3835 +@@ -238,12 +240,14 @@ static struct usb_device_id xpad_table [] = {
3836 + XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */
3837 + XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
3838 + XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */
3839 ++ { USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
3840 + XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
3841 + XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
3842 + XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
3843 + XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
3844 + XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
3845 +- XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
3846 ++ XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
3847 ++ XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
3848 + { }
3849 + };
3850 +
3851 +diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
3852 +index 5ec617e..ec58f48 100644
3853 +--- a/drivers/input/mouse/bcm5974.c
3854 ++++ b/drivers/input/mouse/bcm5974.c
3855 +@@ -79,6 +79,10 @@
3856 + #define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI 0x0252
3857 + #define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO 0x0253
3858 + #define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS 0x0254
3859 ++/* MacbookPro10,1 (unibody, June 2012) */
3860 ++#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI 0x0262
3861 ++#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO 0x0263
3862 ++#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS 0x0264
3863 +
3864 + #define BCM5974_DEVICE(prod) { \
3865 + .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
3866 +@@ -128,6 +132,10 @@ static const struct usb_device_id bcm5974_table[] = {
3867 + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI),
3868 + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO),
3869 + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
3870 ++ /* MacbookPro10,1 */
3871 ++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
3872 ++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
3873 ++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
3874 + /* Terminating entry */
3875 + {}
3876 + };
3877 +@@ -354,6 +362,18 @@ static const struct bcm5974_config bcm5974_config_table[] = {
3878 + { DIM_X, DIM_X / SN_COORD, -4620, 5140 },
3879 + { DIM_Y, DIM_Y / SN_COORD, -150, 6600 }
3880 + },
3881 ++ {
3882 ++ USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI,
3883 ++ USB_DEVICE_ID_APPLE_WELLSPRING7_ISO,
3884 ++ USB_DEVICE_ID_APPLE_WELLSPRING7_JIS,
3885 ++ HAS_INTEGRATED_BUTTON,
3886 ++ 0x84, sizeof(struct bt_data),
3887 ++ 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
3888 ++ { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
3889 ++ { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
3890 ++ { DIM_X, DIM_X / SN_COORD, -4750, 5280 },
3891 ++ { DIM_Y, DIM_Y / SN_COORD, -150, 6730 }
3892 ++ },
3893 + {}
3894 + };
3895 +
3896 +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
3897 +index f1d5408..a1b8caa 100644
3898 +--- a/drivers/iommu/amd_iommu.c
3899 ++++ b/drivers/iommu/amd_iommu.c
3900 +@@ -59,6 +59,8 @@ static struct protection_domain *pt_domain;
3901 +
3902 + static struct iommu_ops amd_iommu_ops;
3903 +
3904 ++static struct dma_map_ops amd_iommu_dma_ops;
3905 ++
3906 + /*
3907 + * general struct to manage commands send to an IOMMU
3908 + */
3909 +@@ -1878,6 +1880,11 @@ static int device_change_notifier(struct notifier_block *nb,
3910 + list_add_tail(&dma_domain->list, &iommu_pd_list);
3911 + spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
3912 +
3913 ++ if (!iommu_pass_through)
3914 ++ dev->archdata.dma_ops = &amd_iommu_dma_ops;
3915 ++ else
3916 ++ dev->archdata.dma_ops = &nommu_dma_ops;
3917 ++
3918 + break;
3919 + case BUS_NOTIFY_DEL_DEVICE:
3920 +
3921 +diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
3922 +index 6269eb0..ef2d493 100644
3923 +--- a/drivers/iommu/amd_iommu_init.c
3924 ++++ b/drivers/iommu/amd_iommu_init.c
3925 +@@ -1468,6 +1468,8 @@ static int __init amd_iommu_init(void)
3926 +
3927 + register_syscore_ops(&amd_iommu_syscore_ops);
3928 +
3929 ++ x86_platform.iommu_shutdown = disable_iommus;
3930 ++
3931 + if (iommu_pass_through)
3932 + goto out;
3933 +
3934 +@@ -1476,7 +1478,6 @@ static int __init amd_iommu_init(void)
3935 + else
3936 + printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
3937 +
3938 +- x86_platform.iommu_shutdown = disable_iommus;
3939 + out:
3940 + return ret;
3941 +
3942 +diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
3943 +index 9bfd057..dae2b7a 100644
3944 +--- a/drivers/md/dm-raid1.c
3945 ++++ b/drivers/md/dm-raid1.c
3946 +@@ -1080,6 +1080,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3947 + ti->split_io = dm_rh_get_region_size(ms->rh);
3948 + ti->num_flush_requests = 1;
3949 + ti->num_discard_requests = 1;
3950 ++ ti->discard_zeroes_data_unsupported = 1;
3951 +
3952 + ms->kmirrord_wq = alloc_workqueue("kmirrord",
3953 + WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
3954 +@@ -1210,7 +1211,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
3955 + * We need to dec pending if this was a write.
3956 + */
3957 + if (rw == WRITE) {
3958 +- if (!(bio->bi_rw & REQ_FLUSH))
3959 ++ if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD)))
3960 + dm_rh_dec(ms->rh, map_context->ll);
3961 + return error;
3962 + }
3963 +diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
3964 +index 7771ed2..69732e0 100644
3965 +--- a/drivers/md/dm-region-hash.c
3966 ++++ b/drivers/md/dm-region-hash.c
3967 +@@ -404,6 +404,9 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
3968 + return;
3969 + }
3970 +
3971 ++ if (bio->bi_rw & REQ_DISCARD)
3972 ++ return;
3973 ++
3974 + /* We must inform the log that the sync count has changed. */
3975 + log->type->set_region_sync(log, region, 0);
3976 +
3977 +@@ -524,7 +527,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
3978 + struct bio *bio;
3979 +
3980 + for (bio = bios->head; bio; bio = bio->bi_next) {
3981 +- if (bio->bi_rw & REQ_FLUSH)
3982 ++ if (bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))
3983 + continue;
3984 + rh_inc(rh, dm_rh_bio_to_region(rh, bio));
3985 + }
3986 +diff --git a/drivers/md/md.c b/drivers/md/md.c
3987 +index 700ecae..d8646d7 100644
3988 +--- a/drivers/md/md.c
3989 ++++ b/drivers/md/md.c
3990 +@@ -3700,8 +3700,8 @@ array_state_show(struct mddev *mddev, char *page)
3991 + return sprintf(page, "%s\n", array_states[st]);
3992 + }
3993 +
3994 +-static int do_md_stop(struct mddev * mddev, int ro, int is_open);
3995 +-static int md_set_readonly(struct mddev * mddev, int is_open);
3996 ++static int do_md_stop(struct mddev * mddev, int ro, struct block_device *bdev);
3997 ++static int md_set_readonly(struct mddev * mddev, struct block_device *bdev);
3998 + static int do_md_run(struct mddev * mddev);
3999 + static int restart_array(struct mddev *mddev);
4000 +
4001 +@@ -3717,14 +3717,14 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
4002 + /* stopping an active array */
4003 + if (atomic_read(&mddev->openers) > 0)
4004 + return -EBUSY;
4005 +- err = do_md_stop(mddev, 0, 0);
4006 ++ err = do_md_stop(mddev, 0, NULL);
4007 + break;
4008 + case inactive:
4009 + /* stopping an active array */
4010 + if (mddev->pers) {
4011 + if (atomic_read(&mddev->openers) > 0)
4012 + return -EBUSY;
4013 +- err = do_md_stop(mddev, 2, 0);
4014 ++ err = do_md_stop(mddev, 2, NULL);
4015 + } else
4016 + err = 0; /* already inactive */
4017 + break;
4018 +@@ -3732,7 +3732,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
4019 + break; /* not supported yet */
4020 + case readonly:
4021 + if (mddev->pers)
4022 +- err = md_set_readonly(mddev, 0);
4023 ++ err = md_set_readonly(mddev, NULL);
4024 + else {
4025 + mddev->ro = 1;
4026 + set_disk_ro(mddev->gendisk, 1);
4027 +@@ -3742,7 +3742,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
4028 + case read_auto:
4029 + if (mddev->pers) {
4030 + if (mddev->ro == 0)
4031 +- err = md_set_readonly(mddev, 0);
4032 ++ err = md_set_readonly(mddev, NULL);
4033 + else if (mddev->ro == 1)
4034 + err = restart_array(mddev);
4035 + if (err == 0) {
4036 +@@ -5078,15 +5078,17 @@ void md_stop(struct mddev *mddev)
4037 + }
4038 + EXPORT_SYMBOL_GPL(md_stop);
4039 +
4040 +-static int md_set_readonly(struct mddev *mddev, int is_open)
4041 ++static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
4042 + {
4043 + int err = 0;
4044 + mutex_lock(&mddev->open_mutex);
4045 +- if (atomic_read(&mddev->openers) > is_open) {
4046 ++ if (atomic_read(&mddev->openers) > !!bdev) {
4047 + printk("md: %s still in use.\n",mdname(mddev));
4048 + err = -EBUSY;
4049 + goto out;
4050 + }
4051 ++ if (bdev)
4052 ++ sync_blockdev(bdev);
4053 + if (mddev->pers) {
4054 + __md_stop_writes(mddev);
4055 +
4056 +@@ -5108,18 +5110,26 @@ out:
4057 + * 0 - completely stop and dis-assemble array
4058 + * 2 - stop but do not disassemble array
4059 + */
4060 +-static int do_md_stop(struct mddev * mddev, int mode, int is_open)
4061 ++static int do_md_stop(struct mddev * mddev, int mode,
4062 ++ struct block_device *bdev)
4063 + {
4064 + struct gendisk *disk = mddev->gendisk;
4065 + struct md_rdev *rdev;
4066 +
4067 + mutex_lock(&mddev->open_mutex);
4068 +- if (atomic_read(&mddev->openers) > is_open ||
4069 ++ if (atomic_read(&mddev->openers) > !!bdev ||
4070 + mddev->sysfs_active) {
4071 + printk("md: %s still in use.\n",mdname(mddev));
4072 + mutex_unlock(&mddev->open_mutex);
4073 + return -EBUSY;
4074 + }
4075 ++ if (bdev)
4076 ++ /* It is possible IO was issued on some other
4077 ++ * open file which was closed before we took ->open_mutex.
4078 ++ * As that was not the last close __blkdev_put will not
4079 ++ * have called sync_blockdev, so we must.
4080 ++ */
4081 ++ sync_blockdev(bdev);
4082 +
4083 + if (mddev->pers) {
4084 + if (mddev->ro)
4085 +@@ -5193,7 +5203,7 @@ static void autorun_array(struct mddev *mddev)
4086 + err = do_md_run(mddev);
4087 + if (err) {
4088 + printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
4089 +- do_md_stop(mddev, 0, 0);
4090 ++ do_md_stop(mddev, 0, NULL);
4091 + }
4092 + }
4093 +
4094 +@@ -6184,11 +6194,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
4095 + goto done_unlock;
4096 +
4097 + case STOP_ARRAY:
4098 +- err = do_md_stop(mddev, 0, 1);
4099 ++ err = do_md_stop(mddev, 0, bdev);
4100 + goto done_unlock;
4101 +
4102 + case STOP_ARRAY_RO:
4103 +- err = md_set_readonly(mddev, 1);
4104 ++ err = md_set_readonly(mddev, bdev);
4105 + goto done_unlock;
4106 +
4107 + case BLKROSET:
4108 +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
4109 +index 7af60ec..2d97bf0 100644
4110 +--- a/drivers/md/raid1.c
4111 ++++ b/drivers/md/raid1.c
4112 +@@ -1713,8 +1713,14 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
4113 +
4114 + if (atomic_dec_and_test(&r1_bio->remaining)) {
4115 + /* if we're here, all write(s) have completed, so clean up */
4116 +- md_done_sync(mddev, r1_bio->sectors, 1);
4117 +- put_buf(r1_bio);
4118 ++ int s = r1_bio->sectors;
4119 ++ if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
4120 ++ test_bit(R1BIO_WriteError, &r1_bio->state))
4121 ++ reschedule_retry(r1_bio);
4122 ++ else {
4123 ++ put_buf(r1_bio);
4124 ++ md_done_sync(mddev, s, 1);
4125 ++ }
4126 + }
4127 + }
4128 +
4129 +@@ -2378,9 +2384,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
4130 + */
4131 + if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
4132 + atomic_set(&r1_bio->remaining, read_targets);
4133 +- for (i=0; i<conf->raid_disks; i++) {
4134 ++ for (i = 0; i < conf->raid_disks && read_targets; i++) {
4135 + bio = r1_bio->bios[i];
4136 + if (bio->bi_end_io == end_sync_read) {
4137 ++ read_targets--;
4138 + md_sync_acct(bio->bi_bdev, nr_sectors);
4139 + generic_make_request(bio);
4140 + }
4141 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
4142 +index 6ba4954..26ef63a 100644
4143 +--- a/drivers/md/raid5.c
4144 ++++ b/drivers/md/raid5.c
4145 +@@ -196,12 +196,14 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
4146 + BUG_ON(!list_empty(&sh->lru));
4147 + BUG_ON(atomic_read(&conf->active_stripes)==0);
4148 + if (test_bit(STRIPE_HANDLE, &sh->state)) {
4149 +- if (test_bit(STRIPE_DELAYED, &sh->state))
4150 ++ if (test_bit(STRIPE_DELAYED, &sh->state) &&
4151 ++ !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
4152 + list_add_tail(&sh->lru, &conf->delayed_list);
4153 + else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
4154 + sh->bm_seq - conf->seq_write > 0)
4155 + list_add_tail(&sh->lru, &conf->bitmap_list);
4156 + else {
4157 ++ clear_bit(STRIPE_DELAYED, &sh->state);
4158 + clear_bit(STRIPE_BIT_DELAY, &sh->state);
4159 + list_add_tail(&sh->lru, &conf->handle_list);
4160 + }
4161 +diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
4162 +index f732877..d5cda35 100644
4163 +--- a/drivers/media/dvb/dvb-core/dvbdev.c
4164 ++++ b/drivers/media/dvb/dvb-core/dvbdev.c
4165 +@@ -243,6 +243,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
4166 + if (minor == MAX_DVB_MINORS) {
4167 + kfree(dvbdevfops);
4168 + kfree(dvbdev);
4169 ++ up_write(&minor_rwsem);
4170 + mutex_unlock(&dvbdev_register_lock);
4171 + return -EINVAL;
4172 + }
4173 +diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
4174 +index 34c03be..83e8e1b 100644
4175 +--- a/drivers/mtd/nand/nandsim.c
4176 ++++ b/drivers/mtd/nand/nandsim.c
4177 +@@ -28,7 +28,7 @@
4178 + #include <linux/module.h>
4179 + #include <linux/moduleparam.h>
4180 + #include <linux/vmalloc.h>
4181 +-#include <asm/div64.h>
4182 ++#include <linux/math64.h>
4183 + #include <linux/slab.h>
4184 + #include <linux/errno.h>
4185 + #include <linux/string.h>
4186 +@@ -547,12 +547,6 @@ static char *get_partition_name(int i)
4187 + return kstrdup(buf, GFP_KERNEL);
4188 + }
4189 +
4190 +-static uint64_t divide(uint64_t n, uint32_t d)
4191 +-{
4192 +- do_div(n, d);
4193 +- return n;
4194 +-}
4195 +-
4196 + /*
4197 + * Initialize the nandsim structure.
4198 + *
4199 +@@ -581,7 +575,7 @@ static int init_nandsim(struct mtd_info *mtd)
4200 + ns->geom.oobsz = mtd->oobsize;
4201 + ns->geom.secsz = mtd->erasesize;
4202 + ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz;
4203 +- ns->geom.pgnum = divide(ns->geom.totsz, ns->geom.pgsz);
4204 ++ ns->geom.pgnum = div_u64(ns->geom.totsz, ns->geom.pgsz);
4205 + ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
4206 + ns->geom.secshift = ffs(ns->geom.secsz) - 1;
4207 + ns->geom.pgshift = chip->page_shift;
4208 +@@ -924,7 +918,7 @@ static int setup_wear_reporting(struct mtd_info *mtd)
4209 +
4210 + if (!rptwear)
4211 + return 0;
4212 +- wear_eb_count = divide(mtd->size, mtd->erasesize);
4213 ++ wear_eb_count = div_u64(mtd->size, mtd->erasesize);
4214 + mem = wear_eb_count * sizeof(unsigned long);
4215 + if (mem / sizeof(unsigned long) != wear_eb_count) {
4216 + NS_ERR("Too many erase blocks for wear reporting\n");
4217 +diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
4218 +index 3680aa2..2cf084e 100644
4219 +--- a/drivers/net/bonding/bond_debugfs.c
4220 ++++ b/drivers/net/bonding/bond_debugfs.c
4221 +@@ -6,7 +6,7 @@
4222 + #include "bonding.h"
4223 + #include "bond_alb.h"
4224 +
4225 +-#ifdef CONFIG_DEBUG_FS
4226 ++#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_NET_NS)
4227 +
4228 + #include <linux/debugfs.h>
4229 + #include <linux/seq_file.h>
4230 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
4231 +index 1a88e38..6c284d1 100644
4232 +--- a/drivers/net/bonding/bond_main.c
4233 ++++ b/drivers/net/bonding/bond_main.c
4234 +@@ -3184,6 +3184,12 @@ static int bond_master_netdev_event(unsigned long event,
4235 + switch (event) {
4236 + case NETDEV_CHANGENAME:
4237 + return bond_event_changename(event_bond);
4238 ++ case NETDEV_UNREGISTER:
4239 ++ bond_remove_proc_entry(event_bond);
4240 ++ break;
4241 ++ case NETDEV_REGISTER:
4242 ++ bond_create_proc_entry(event_bond);
4243 ++ break;
4244 + default:
4245 + break;
4246 + }
4247 +@@ -4391,8 +4397,6 @@ static void bond_uninit(struct net_device *bond_dev)
4248 +
4249 + bond_work_cancel_all(bond);
4250 +
4251 +- bond_remove_proc_entry(bond);
4252 +-
4253 + bond_debug_unregister(bond);
4254 +
4255 + __hw_addr_flush(&bond->mc_list);
4256 +@@ -4794,7 +4798,6 @@ static int bond_init(struct net_device *bond_dev)
4257 +
4258 + bond_set_lockdep_class(bond_dev);
4259 +
4260 +- bond_create_proc_entry(bond);
4261 + list_add_tail(&bond->bond_list, &bn->dev_list);
4262 +
4263 + bond_prepare_sysfs_group(bond);
4264 +diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
4265 +index eccdcff..5ae7df7 100644
4266 +--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
4267 ++++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
4268 +@@ -267,7 +267,6 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
4269 + dev_warn(&pdev->dev, "stop mac failed\n");
4270 + atl1c_set_aspm(hw, false);
4271 + netif_carrier_off(netdev);
4272 +- netif_stop_queue(netdev);
4273 + atl1c_phy_reset(hw);
4274 + atl1c_phy_init(&adapter->hw);
4275 + } else {
4276 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
4277 +index aec7212..8dda46a 100644
4278 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
4279 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
4280 +@@ -723,21 +723,6 @@ struct bnx2x_fastpath {
4281 +
4282 + #define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
4283 +
4284 +-#define BNX2X_IP_CSUM_ERR(cqe) \
4285 +- (!((cqe)->fast_path_cqe.status_flags & \
4286 +- ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
4287 +- ((cqe)->fast_path_cqe.type_error_flags & \
4288 +- ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
4289 +-
4290 +-#define BNX2X_L4_CSUM_ERR(cqe) \
4291 +- (!((cqe)->fast_path_cqe.status_flags & \
4292 +- ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
4293 +- ((cqe)->fast_path_cqe.type_error_flags & \
4294 +- ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
4295 +-
4296 +-#define BNX2X_RX_CSUM_OK(cqe) \
4297 +- (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
4298 +-
4299 + #define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \
4300 + (((le16_to_cpu(flags) & \
4301 + PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \
4302 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
4303 +index 580b44e..2c1a5c0 100644
4304 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
4305 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
4306 +@@ -220,7 +220,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
4307 +
4308 + if ((netif_tx_queue_stopped(txq)) &&
4309 + (bp->state == BNX2X_STATE_OPEN) &&
4310 +- (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
4311 ++ (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4))
4312 + netif_tx_wake_queue(txq);
4313 +
4314 + __netif_tx_unlock(txq);
4315 +@@ -551,6 +551,26 @@ static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
4316 + le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
4317 + }
4318 +
4319 ++static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
4320 ++ struct bnx2x_fastpath *fp)
4321 ++{
4322 ++ /* Do nothing if no IP/L4 csum validation was done */
4323 ++
4324 ++ if (cqe->fast_path_cqe.status_flags &
4325 ++ (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG |
4326 ++ ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
4327 ++ return;
4328 ++
4329 ++ /* If both IP/L4 validation were done, check if an error was found. */
4330 ++
4331 ++ if (cqe->fast_path_cqe.type_error_flags &
4332 ++ (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
4333 ++ ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
4334 ++ fp->eth_q_stats.hw_csum_err++;
4335 ++ else
4336 ++ skb->ip_summed = CHECKSUM_UNNECESSARY;
4337 ++}
4338 ++
4339 + int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
4340 + {
4341 + struct bnx2x *bp = fp->bp;
4342 +@@ -746,13 +766,9 @@ reuse_rx:
4343 +
4344 + skb_checksum_none_assert(skb);
4345 +
4346 +- if (bp->dev->features & NETIF_F_RXCSUM) {
4347 ++ if (bp->dev->features & NETIF_F_RXCSUM)
4348 ++ bnx2x_csum_validate(skb, cqe, fp);
4349 +
4350 +- if (likely(BNX2X_RX_CSUM_OK(cqe)))
4351 +- skb->ip_summed = CHECKSUM_UNNECESSARY;
4352 +- else
4353 +- fp->eth_q_stats.hw_csum_err++;
4354 +- }
4355 + }
4356 +
4357 + skb_record_rx_queue(skb, fp->index);
4358 +@@ -2238,8 +2254,6 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
4359 + /* we split the first BD into headers and data BDs
4360 + * to ease the pain of our fellow microcode engineers
4361 + * we use one mapping for both BDs
4362 +- * So far this has only been observed to happen
4363 +- * in Other Operating Systems(TM)
4364 + */
4365 + static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
4366 + struct bnx2x_fp_txdata *txdata,
4367 +@@ -2890,7 +2904,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
4368 +
4369 + txdata->tx_bd_prod += nbd;
4370 +
4371 +- if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
4372 ++ if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 4)) {
4373 + netif_tx_stop_queue(txq);
4374 +
4375 + /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4376 +@@ -2899,7 +2913,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
4377 + smp_mb();
4378 +
4379 + fp->eth_q_stats.driver_xoff++;
4380 +- if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
4381 ++ if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4)
4382 + netif_tx_wake_queue(txq);
4383 + }
4384 + txdata->tx_pkt++;
4385 +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
4386 +index 2dcac28..6b258d9 100644
4387 +--- a/drivers/net/ethernet/broadcom/tg3.c
4388 ++++ b/drivers/net/ethernet/broadcom/tg3.c
4389 +@@ -14046,7 +14046,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
4390 + }
4391 + }
4392 +
4393 +- if (tg3_flag(tp, 5755_PLUS))
4394 ++ if (tg3_flag(tp, 5755_PLUS) ||
4395 ++ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
4396 + tg3_flag_set(tp, SHORT_DMA_BUG);
4397 +
4398 + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
4399 +diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
4400 +index e556fc3..3072d35 100644
4401 +--- a/drivers/net/ethernet/intel/e1000e/82571.c
4402 ++++ b/drivers/net/ethernet/intel/e1000e/82571.c
4403 +@@ -1571,6 +1571,9 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
4404 + ctrl = er32(CTRL);
4405 + status = er32(STATUS);
4406 + rxcw = er32(RXCW);
4407 ++ /* SYNCH bit and IV bit are sticky */
4408 ++ udelay(10);
4409 ++ rxcw = er32(RXCW);
4410 +
4411 + if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
4412 +
4413 +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
4414 +index cc2565c..9e61d6b 100644
4415 +--- a/drivers/net/ethernet/realtek/r8169.c
4416 ++++ b/drivers/net/ethernet/realtek/r8169.c
4417 +@@ -4185,6 +4185,7 @@ out:
4418 + return rc;
4419 +
4420 + err_out_msi_4:
4421 ++ netif_napi_del(&tp->napi);
4422 + rtl_disable_msi(pdev, tp);
4423 + iounmap(ioaddr);
4424 + err_out_free_res_3:
4425 +@@ -4210,6 +4211,8 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
4426 +
4427 + cancel_delayed_work_sync(&tp->task);
4428 +
4429 ++ netif_napi_del(&tp->napi);
4430 ++
4431 + unregister_netdev(dev);
4432 +
4433 + rtl_release_firmware(tp);
4434 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
4435 +index 72cd190..d4d2bc1 100644
4436 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
4437 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
4438 +@@ -1174,6 +1174,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4439 + priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
4440 + wmb();
4441 + priv->hw->desc->set_tx_owner(desc);
4442 ++ wmb();
4443 + }
4444 +
4445 + /* Interrupt on completition only for the latest segment */
4446 +@@ -1189,6 +1190,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4447 +
4448 + /* To avoid raise condition */
4449 + priv->hw->desc->set_tx_owner(first);
4450 ++ wmb();
4451 +
4452 + priv->cur_tx++;
4453 +
4454 +@@ -1252,6 +1254,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
4455 + }
4456 + wmb();
4457 + priv->hw->desc->set_rx_owner(p + entry);
4458 ++ wmb();
4459 + }
4460 + }
4461 +
4462 +diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
4463 +index 1b7082d..26106c0 100644
4464 +--- a/drivers/net/macvtap.c
4465 ++++ b/drivers/net/macvtap.c
4466 +@@ -504,10 +504,11 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
4467 + if (copy > size) {
4468 + ++from;
4469 + --count;
4470 +- }
4471 ++ offset = 0;
4472 ++ } else
4473 ++ offset += size;
4474 + copy -= size;
4475 + offset1 += size;
4476 +- offset = 0;
4477 + }
4478 +
4479 + if (len == offset1)
4480 +@@ -517,24 +518,29 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
4481 + struct page *page[MAX_SKB_FRAGS];
4482 + int num_pages;
4483 + unsigned long base;
4484 ++ unsigned long truesize;
4485 +
4486 +- len = from->iov_len - offset1;
4487 ++ len = from->iov_len - offset;
4488 + if (!len) {
4489 +- offset1 = 0;
4490 ++ offset = 0;
4491 + ++from;
4492 + continue;
4493 + }
4494 +- base = (unsigned long)from->iov_base + offset1;
4495 ++ base = (unsigned long)from->iov_base + offset;
4496 + size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
4497 ++ if (i + size > MAX_SKB_FRAGS)
4498 ++ return -EMSGSIZE;
4499 + num_pages = get_user_pages_fast(base, size, 0, &page[i]);
4500 +- if ((num_pages != size) ||
4501 +- (num_pages > MAX_SKB_FRAGS - skb_shinfo(skb)->nr_frags))
4502 +- /* put_page is in skb free */
4503 ++ if (num_pages != size) {
4504 ++ for (i = 0; i < num_pages; i++)
4505 ++ put_page(page[i]);
4506 + return -EFAULT;
4507 ++ }
4508 ++ truesize = size * PAGE_SIZE;
4509 + skb->data_len += len;
4510 + skb->len += len;
4511 +- skb->truesize += len;
4512 +- atomic_add(len, &skb->sk->sk_wmem_alloc);
4513 ++ skb->truesize += truesize;
4514 ++ atomic_add(truesize, &skb->sk->sk_wmem_alloc);
4515 + while (len) {
4516 + int off = base & ~PAGE_MASK;
4517 + int size = min_t(int, len, PAGE_SIZE - off);
4518 +@@ -545,7 +551,7 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
4519 + len -= size;
4520 + i++;
4521 + }
4522 +- offset1 = 0;
4523 ++ offset = 0;
4524 + ++from;
4525 + }
4526 + return 0;
4527 +@@ -645,7 +651,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
4528 + int err;
4529 + struct virtio_net_hdr vnet_hdr = { 0 };
4530 + int vnet_hdr_len = 0;
4531 +- int copylen;
4532 ++ int copylen = 0;
4533 + bool zerocopy = false;
4534 +
4535 + if (q->flags & IFF_VNET_HDR) {
4536 +@@ -674,15 +680,31 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
4537 + if (unlikely(len < ETH_HLEN))
4538 + goto err;
4539 +
4540 ++ err = -EMSGSIZE;
4541 ++ if (unlikely(count > UIO_MAXIOV))
4542 ++ goto err;
4543 ++
4544 + if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY))
4545 + zerocopy = true;
4546 +
4547 + if (zerocopy) {
4548 ++ /* Userspace may produce vectors with count greater than
4549 ++ * MAX_SKB_FRAGS, so we need to linearize parts of the skb
4550 ++ * to let the rest of data to be fit in the frags.
4551 ++ */
4552 ++ if (count > MAX_SKB_FRAGS) {
4553 ++ copylen = iov_length(iv, count - MAX_SKB_FRAGS);
4554 ++ if (copylen < vnet_hdr_len)
4555 ++ copylen = 0;
4556 ++ else
4557 ++ copylen -= vnet_hdr_len;
4558 ++ }
4559 + /* There are 256 bytes to be copied in skb, so there is enough
4560 + * room for skb expand head in case it is used.
4561 + * The rest buffer is mapped from userspace.
4562 + */
4563 +- copylen = vnet_hdr.hdr_len;
4564 ++ if (copylen < vnet_hdr.hdr_len)
4565 ++ copylen = vnet_hdr.hdr_len;
4566 + if (!copylen)
4567 + copylen = GOODCOPY_LEN;
4568 + } else
4569 +@@ -693,10 +715,9 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
4570 + if (!skb)
4571 + goto err;
4572 +
4573 +- if (zerocopy) {
4574 ++ if (zerocopy)
4575 + err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
4576 +- skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
4577 +- } else
4578 ++ else
4579 + err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
4580 + len);
4581 + if (err)
4582 +@@ -715,8 +736,10 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
4583 + rcu_read_lock_bh();
4584 + vlan = rcu_dereference_bh(q->vlan);
4585 + /* copy skb_ubuf_info for callback when skb has no error */
4586 +- if (zerocopy)
4587 ++ if (zerocopy) {
4588 + skb_shinfo(skb)->destructor_arg = m->msg_control;
4589 ++ skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
4590 ++ }
4591 + if (vlan)
4592 + macvlan_start_xmit(skb, vlan->dev);
4593 + else
4594 +diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
4595 +index ad96164..00ed9c1 100644
4596 +--- a/drivers/net/usb/ipheth.c
4597 ++++ b/drivers/net/usb/ipheth.c
4598 +@@ -59,6 +59,7 @@
4599 + #define USB_PRODUCT_IPHONE_3G 0x1292
4600 + #define USB_PRODUCT_IPHONE_3GS 0x1294
4601 + #define USB_PRODUCT_IPHONE_4 0x1297
4602 ++#define USB_PRODUCT_IPAD 0x129a
4603 + #define USB_PRODUCT_IPHONE_4_VZW 0x129c
4604 + #define USB_PRODUCT_IPHONE_4S 0x12a0
4605 +
4606 +@@ -101,6 +102,10 @@ static struct usb_device_id ipheth_table[] = {
4607 + IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
4608 + IPHETH_USBINTF_PROTO) },
4609 + { USB_DEVICE_AND_INTERFACE_INFO(
4610 ++ USB_VENDOR_APPLE, USB_PRODUCT_IPAD,
4611 ++ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
4612 ++ IPHETH_USBINTF_PROTO) },
4613 ++ { USB_DEVICE_AND_INTERFACE_INFO(
4614 + USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
4615 + IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
4616 + IPHETH_USBINTF_PROTO) },
4617 +diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
4618 +index 833cbef..8a40ff9 100644
4619 +--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
4620 ++++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
4621 +@@ -900,8 +900,7 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
4622 + */
4623 + if (!(txs->status & TX_STATUS_AMPDU)
4624 + && (txs->status & TX_STATUS_INTERMEDIATE)) {
4625 +- wiphy_err(wlc->wiphy, "%s: INTERMEDIATE but not AMPDU\n",
4626 +- __func__);
4627 ++ BCMMSG(wlc->wiphy, "INTERMEDIATE but not AMPDU\n");
4628 + return false;
4629 + }
4630 +
4631 +diff --git a/drivers/net/wireless/ipw2x00/ipw.h b/drivers/net/wireless/ipw2x00/ipw.h
4632 +new file mode 100644
4633 +index 0000000..4007bf5
4634 +--- /dev/null
4635 ++++ b/drivers/net/wireless/ipw2x00/ipw.h
4636 +@@ -0,0 +1,23 @@
4637 ++/*
4638 ++ * Intel Pro/Wireless 2100, 2200BG, 2915ABG network connection driver
4639 ++ *
4640 ++ * Copyright 2012 Stanislav Yakovlev <stas.yakovlev@×××××.com>
4641 ++ *
4642 ++ * This program is free software; you can redistribute it and/or modify
4643 ++ * it under the terms of the GNU General Public License version 2 as
4644 ++ * published by the Free Software Foundation.
4645 ++ */
4646 ++
4647 ++#ifndef __IPW_H__
4648 ++#define __IPW_H__
4649 ++
4650 ++#include <linux/ieee80211.h>
4651 ++
4652 ++static const u32 ipw_cipher_suites[] = {
4653 ++ WLAN_CIPHER_SUITE_WEP40,
4654 ++ WLAN_CIPHER_SUITE_WEP104,
4655 ++ WLAN_CIPHER_SUITE_TKIP,
4656 ++ WLAN_CIPHER_SUITE_CCMP,
4657 ++};
4658 ++
4659 ++#endif
4660 +diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
4661 +index 127e9c6..10862d4 100644
4662 +--- a/drivers/net/wireless/ipw2x00/ipw2100.c
4663 ++++ b/drivers/net/wireless/ipw2x00/ipw2100.c
4664 +@@ -166,6 +166,7 @@ that only one external action is invoked at a time.
4665 + #include <net/lib80211.h>
4666 +
4667 + #include "ipw2100.h"
4668 ++#include "ipw.h"
4669 +
4670 + #define IPW2100_VERSION "git-1.2.2"
4671 +
4672 +@@ -1955,6 +1956,9 @@ static int ipw2100_wdev_init(struct net_device *dev)
4673 + wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
4674 + }
4675 +
4676 ++ wdev->wiphy->cipher_suites = ipw_cipher_suites;
4677 ++ wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
4678 ++
4679 + set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
4680 + if (wiphy_register(wdev->wiphy)) {
4681 + ipw2100_down(priv);
4682 +diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
4683 +index 827889b..56bd370 100644
4684 +--- a/drivers/net/wireless/ipw2x00/ipw2200.c
4685 ++++ b/drivers/net/wireless/ipw2x00/ipw2200.c
4686 +@@ -34,6 +34,7 @@
4687 + #include <linux/slab.h>
4688 + #include <net/cfg80211-wext.h>
4689 + #include "ipw2200.h"
4690 ++#include "ipw.h"
4691 +
4692 +
4693 + #ifndef KBUILD_EXTMOD
4694 +@@ -11535,6 +11536,9 @@ static int ipw_wdev_init(struct net_device *dev)
4695 + wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
4696 + }
4697 +
4698 ++ wdev->wiphy->cipher_suites = ipw_cipher_suites;
4699 ++ wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
4700 ++
4701 + set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
4702 +
4703 + /* With that information in place, we can now register the wiphy... */
4704 +diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
4705 +index a262c23..0116ca8 100644
4706 +--- a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
4707 ++++ b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
4708 +@@ -466,7 +466,7 @@ int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
4709 + return 0;
4710 + }
4711 +
4712 +- if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
4713 ++ if (priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) {
4714 + IWL_WARN(priv, "Removing wrong key %d 0x%x\n",
4715 + keyconf->keyidx, key_flags);
4716 + spin_unlock_irqrestore(&priv->sta_lock, flags);
4717 +@@ -483,7 +483,7 @@ int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
4718 + sizeof(struct iwl4965_keyinfo));
4719 + priv->stations[sta_id].sta.key.key_flags =
4720 + STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
4721 +- priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
4722 ++ priv->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx;
4723 + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
4724 + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
4725 +
4726 +diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
4727 +index 2bd5659..1bb64c9 100644
4728 +--- a/drivers/net/wireless/iwlegacy/iwl-core.c
4729 ++++ b/drivers/net/wireless/iwlegacy/iwl-core.c
4730 +@@ -1884,14 +1884,12 @@ void iwl_legacy_bg_watchdog(unsigned long data)
4731 + return;
4732 +
4733 + /* monitor and check for other stuck queues */
4734 +- if (iwl_legacy_is_any_associated(priv)) {
4735 +- for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
4736 +- /* skip as we already checked the command queue */
4737 +- if (cnt == priv->cmd_queue)
4738 +- continue;
4739 +- if (iwl_legacy_check_stuck_queue(priv, cnt))
4740 +- return;
4741 +- }
4742 ++ for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
4743 ++ /* skip as we already checked the command queue */
4744 ++ if (cnt == priv->cmd_queue)
4745 ++ continue;
4746 ++ if (iwl_legacy_check_stuck_queue(priv, cnt))
4747 ++ return;
4748 + }
4749 +
4750 + mod_timer(&priv->watchdog, jiffies +
4751 +diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
4752 +index 1e31050..ba28807 100644
4753 +--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
4754 ++++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
4755 +@@ -426,8 +426,8 @@ void rt2x00usb_kick_queue(struct data_queue *queue)
4756 + case QID_RX:
4757 + if (!rt2x00queue_full(queue))
4758 + rt2x00queue_for_each_entry(queue,
4759 +- Q_INDEX_DONE,
4760 + Q_INDEX,
4761 ++ Q_INDEX_DONE,
4762 + NULL,
4763 + rt2x00usb_kick_rx_entry);
4764 + break;
4765 +diff --git a/drivers/net/wireless/rtl818x/rtl8187/leds.c b/drivers/net/wireless/rtl818x/rtl8187/leds.c
4766 +index 2e0de2f..c2d5b49 100644
4767 +--- a/drivers/net/wireless/rtl818x/rtl8187/leds.c
4768 ++++ b/drivers/net/wireless/rtl818x/rtl8187/leds.c
4769 +@@ -117,7 +117,7 @@ static void rtl8187_led_brightness_set(struct led_classdev *led_dev,
4770 + radio_on = true;
4771 + } else if (radio_on) {
4772 + radio_on = false;
4773 +- cancel_delayed_work_sync(&priv->led_on);
4774 ++ cancel_delayed_work(&priv->led_on);
4775 + ieee80211_queue_delayed_work(hw, &priv->led_off, 0);
4776 + }
4777 + } else if (radio_on) {
4778 +diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
4779 +index 12d1e81..d024f83 100644
4780 +--- a/drivers/pci/pci-driver.c
4781 ++++ b/drivers/pci/pci-driver.c
4782 +@@ -742,6 +742,18 @@ static int pci_pm_suspend_noirq(struct device *dev)
4783 +
4784 + pci_pm_set_unknown_state(pci_dev);
4785 +
4786 ++ /*
4787 ++ * Some BIOSes from ASUS have a bug: If a USB EHCI host controller's
4788 ++ * PCI COMMAND register isn't 0, the BIOS assumes that the controller
4789 ++ * hasn't been quiesced and tries to turn it off. If the controller
4790 ++ * is already in D3, this can hang or cause memory corruption.
4791 ++ *
4792 ++ * Since the value of the COMMAND register doesn't matter once the
4793 ++ * device has been suspended, we can safely set it to 0 here.
4794 ++ */
4795 ++ if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
4796 ++ pci_write_config_word(pci_dev, PCI_COMMAND, 0);
4797 ++
4798 + return 0;
4799 + }
4800 +
4801 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
4802 +index e5b75eb..6d4a531 100644
4803 +--- a/drivers/pci/pci.c
4804 ++++ b/drivers/pci/pci.c
4805 +@@ -1689,11 +1689,6 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
4806 + if (target_state == PCI_POWER_ERROR)
4807 + return -EIO;
4808 +
4809 +- /* Some devices mustn't be in D3 during system sleep */
4810 +- if (target_state == PCI_D3hot &&
4811 +- (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP))
4812 +- return 0;
4813 +-
4814 + pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
4815 +
4816 + error = pci_set_power_state(dev, target_state);
4817 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
4818 +index 3c56fec..78fda9c 100644
4819 +--- a/drivers/pci/quirks.c
4820 ++++ b/drivers/pci/quirks.c
4821 +@@ -2940,32 +2940,6 @@ static void __devinit disable_igfx_irq(struct pci_dev *dev)
4822 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
4823 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
4824 +
4825 +-/*
4826 +- * The Intel 6 Series/C200 Series chipset's EHCI controllers on many
4827 +- * ASUS motherboards will cause memory corruption or a system crash
4828 +- * if they are in D3 while the system is put into S3 sleep.
4829 +- */
4830 +-static void __devinit asus_ehci_no_d3(struct pci_dev *dev)
4831 +-{
4832 +- const char *sys_info;
4833 +- static const char good_Asus_board[] = "P8Z68-V";
4834 +-
4835 +- if (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP)
4836 +- return;
4837 +- if (dev->subsystem_vendor != PCI_VENDOR_ID_ASUSTEK)
4838 +- return;
4839 +- sys_info = dmi_get_system_info(DMI_BOARD_NAME);
4840 +- if (sys_info && memcmp(sys_info, good_Asus_board,
4841 +- sizeof(good_Asus_board) - 1) == 0)
4842 +- return;
4843 +-
4844 +- dev_info(&dev->dev, "broken D3 during system sleep on ASUS\n");
4845 +- dev->dev_flags |= PCI_DEV_FLAGS_NO_D3_DURING_SLEEP;
4846 +- device_set_wakeup_capable(&dev->dev, false);
4847 +-}
4848 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c26, asus_ehci_no_d3);
4849 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c2d, asus_ehci_no_d3);
4850 +-
4851 + static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
4852 + struct pci_fixup *end)
4853 + {
4854 +diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
4855 +index 809a3ae..b46ec11 100644
4856 +--- a/drivers/platform/x86/intel_ips.c
4857 ++++ b/drivers/platform/x86/intel_ips.c
4858 +@@ -72,6 +72,7 @@
4859 + #include <linux/string.h>
4860 + #include <linux/tick.h>
4861 + #include <linux/timer.h>
4862 ++#include <linux/dmi.h>
4863 + #include <drm/i915_drm.h>
4864 + #include <asm/msr.h>
4865 + #include <asm/processor.h>
4866 +@@ -1505,6 +1506,24 @@ static DEFINE_PCI_DEVICE_TABLE(ips_id_table) = {
4867 +
4868 + MODULE_DEVICE_TABLE(pci, ips_id_table);
4869 +
4870 ++static int ips_blacklist_callback(const struct dmi_system_id *id)
4871 ++{
4872 ++ pr_info("Blacklisted intel_ips for %s\n", id->ident);
4873 ++ return 1;
4874 ++}
4875 ++
4876 ++static const struct dmi_system_id ips_blacklist[] = {
4877 ++ {
4878 ++ .callback = ips_blacklist_callback,
4879 ++ .ident = "HP ProBook",
4880 ++ .matches = {
4881 ++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
4882 ++ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook"),
4883 ++ },
4884 ++ },
4885 ++ { } /* terminating entry */
4886 ++};
4887 ++
4888 + static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
4889 + {
4890 + u64 platform_info;
4891 +@@ -1514,6 +1533,9 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
4892 + u16 htshi, trc, trc_required_mask;
4893 + u8 tse;
4894 +
4895 ++ if (dmi_check_system(ips_blacklist))
4896 ++ return -ENODEV;
4897 ++
4898 + ips = kzalloc(sizeof(struct ips_driver), GFP_KERNEL);
4899 + if (!ips)
4900 + return -ENOMEM;
4901 +diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
4902 +index 09e26bf..af1e296 100644
4903 +--- a/drivers/platform/x86/samsung-laptop.c
4904 ++++ b/drivers/platform/x86/samsung-laptop.c
4905 +@@ -540,245 +540,34 @@ static DEVICE_ATTR(performance_level, S_IWUSR | S_IRUGO,
4906 + get_performance_level, set_performance_level);
4907 +
4908 +
4909 +-static int __init dmi_check_cb(const struct dmi_system_id *id)
4910 +-{
4911 +- pr_info("found laptop model '%s'\n",
4912 +- id->ident);
4913 +- return 1;
4914 +-}
4915 +-
4916 + static struct dmi_system_id __initdata samsung_dmi_table[] = {
4917 + {
4918 +- .ident = "N128",
4919 +- .matches = {
4920 +- DMI_MATCH(DMI_SYS_VENDOR,
4921 +- "SAMSUNG ELECTRONICS CO., LTD."),
4922 +- DMI_MATCH(DMI_PRODUCT_NAME, "N128"),
4923 +- DMI_MATCH(DMI_BOARD_NAME, "N128"),
4924 +- },
4925 +- .callback = dmi_check_cb,
4926 +- },
4927 +- {
4928 +- .ident = "N130",
4929 + .matches = {
4930 + DMI_MATCH(DMI_SYS_VENDOR,
4931 + "SAMSUNG ELECTRONICS CO., LTD."),
4932 +- DMI_MATCH(DMI_PRODUCT_NAME, "N130"),
4933 +- DMI_MATCH(DMI_BOARD_NAME, "N130"),
4934 ++ DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */
4935 + },
4936 +- .callback = dmi_check_cb,
4937 + },
4938 + {
4939 +- .ident = "N510",
4940 + .matches = {
4941 + DMI_MATCH(DMI_SYS_VENDOR,
4942 + "SAMSUNG ELECTRONICS CO., LTD."),
4943 +- DMI_MATCH(DMI_PRODUCT_NAME, "N510"),
4944 +- DMI_MATCH(DMI_BOARD_NAME, "N510"),
4945 ++ DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /* Laptop */
4946 + },
4947 +- .callback = dmi_check_cb,
4948 + },
4949 + {
4950 +- .ident = "X125",
4951 + .matches = {
4952 + DMI_MATCH(DMI_SYS_VENDOR,
4953 + "SAMSUNG ELECTRONICS CO., LTD."),
4954 +- DMI_MATCH(DMI_PRODUCT_NAME, "X125"),
4955 +- DMI_MATCH(DMI_BOARD_NAME, "X125"),
4956 ++ DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
4957 + },
4958 +- .callback = dmi_check_cb,
4959 + },
4960 + {
4961 +- .ident = "X120/X170",
4962 + .matches = {
4963 + DMI_MATCH(DMI_SYS_VENDOR,
4964 + "SAMSUNG ELECTRONICS CO., LTD."),
4965 +- DMI_MATCH(DMI_PRODUCT_NAME, "X120/X170"),
4966 +- DMI_MATCH(DMI_BOARD_NAME, "X120/X170"),
4967 +- },
4968 +- .callback = dmi_check_cb,
4969 +- },
4970 +- {
4971 +- .ident = "NC10",
4972 +- .matches = {
4973 +- DMI_MATCH(DMI_SYS_VENDOR,
4974 +- "SAMSUNG ELECTRONICS CO., LTD."),
4975 +- DMI_MATCH(DMI_PRODUCT_NAME, "NC10"),
4976 +- DMI_MATCH(DMI_BOARD_NAME, "NC10"),
4977 +- },
4978 +- .callback = dmi_check_cb,
4979 +- },
4980 +- {
4981 +- .ident = "NP-Q45",
4982 +- .matches = {
4983 +- DMI_MATCH(DMI_SYS_VENDOR,
4984 +- "SAMSUNG ELECTRONICS CO., LTD."),
4985 +- DMI_MATCH(DMI_PRODUCT_NAME, "SQ45S70S"),
4986 +- DMI_MATCH(DMI_BOARD_NAME, "SQ45S70S"),
4987 +- },
4988 +- .callback = dmi_check_cb,
4989 +- },
4990 +- {
4991 +- .ident = "X360",
4992 +- .matches = {
4993 +- DMI_MATCH(DMI_SYS_VENDOR,
4994 +- "SAMSUNG ELECTRONICS CO., LTD."),
4995 +- DMI_MATCH(DMI_PRODUCT_NAME, "X360"),
4996 +- DMI_MATCH(DMI_BOARD_NAME, "X360"),
4997 +- },
4998 +- .callback = dmi_check_cb,
4999 +- },
5000 +- {
5001 +- .ident = "R410 Plus",
5002 +- .matches = {
5003 +- DMI_MATCH(DMI_SYS_VENDOR,
5004 +- "SAMSUNG ELECTRONICS CO., LTD."),
5005 +- DMI_MATCH(DMI_PRODUCT_NAME, "R410P"),
5006 +- DMI_MATCH(DMI_BOARD_NAME, "R460"),
5007 +- },
5008 +- .callback = dmi_check_cb,
5009 +- },
5010 +- {
5011 +- .ident = "R518",
5012 +- .matches = {
5013 +- DMI_MATCH(DMI_SYS_VENDOR,
5014 +- "SAMSUNG ELECTRONICS CO., LTD."),
5015 +- DMI_MATCH(DMI_PRODUCT_NAME, "R518"),
5016 +- DMI_MATCH(DMI_BOARD_NAME, "R518"),
5017 +- },
5018 +- .callback = dmi_check_cb,
5019 +- },
5020 +- {
5021 +- .ident = "R519/R719",
5022 +- .matches = {
5023 +- DMI_MATCH(DMI_SYS_VENDOR,
5024 +- "SAMSUNG ELECTRONICS CO., LTD."),
5025 +- DMI_MATCH(DMI_PRODUCT_NAME, "R519/R719"),
5026 +- DMI_MATCH(DMI_BOARD_NAME, "R519/R719"),
5027 +- },
5028 +- .callback = dmi_check_cb,
5029 +- },
5030 +- {
5031 +- .ident = "N150/N210/N220",
5032 +- .matches = {
5033 +- DMI_MATCH(DMI_SYS_VENDOR,
5034 +- "SAMSUNG ELECTRONICS CO., LTD."),
5035 +- DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"),
5036 +- DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"),
5037 +- },
5038 +- .callback = dmi_check_cb,
5039 +- },
5040 +- {
5041 +- .ident = "N220",
5042 +- .matches = {
5043 +- DMI_MATCH(DMI_SYS_VENDOR,
5044 +- "SAMSUNG ELECTRONICS CO., LTD."),
5045 +- DMI_MATCH(DMI_PRODUCT_NAME, "N220"),
5046 +- DMI_MATCH(DMI_BOARD_NAME, "N220"),
5047 +- },
5048 +- .callback = dmi_check_cb,
5049 +- },
5050 +- {
5051 +- .ident = "N150/N210/N220/N230",
5052 +- .matches = {
5053 +- DMI_MATCH(DMI_SYS_VENDOR,
5054 +- "SAMSUNG ELECTRONICS CO., LTD."),
5055 +- DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220/N230"),
5056 +- DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220/N230"),
5057 +- },
5058 +- .callback = dmi_check_cb,
5059 +- },
5060 +- {
5061 +- .ident = "N150P/N210P/N220P",
5062 +- .matches = {
5063 +- DMI_MATCH(DMI_SYS_VENDOR,
5064 +- "SAMSUNG ELECTRONICS CO., LTD."),
5065 +- DMI_MATCH(DMI_PRODUCT_NAME, "N150P/N210P/N220P"),
5066 +- DMI_MATCH(DMI_BOARD_NAME, "N150P/N210P/N220P"),
5067 +- },
5068 +- .callback = dmi_check_cb,
5069 +- },
5070 +- {
5071 +- .ident = "R700",
5072 +- .matches = {
5073 +- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
5074 +- DMI_MATCH(DMI_PRODUCT_NAME, "SR700"),
5075 +- DMI_MATCH(DMI_BOARD_NAME, "SR700"),
5076 +- },
5077 +- .callback = dmi_check_cb,
5078 +- },
5079 +- {
5080 +- .ident = "R530/R730",
5081 +- .matches = {
5082 +- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
5083 +- DMI_MATCH(DMI_PRODUCT_NAME, "R530/R730"),
5084 +- DMI_MATCH(DMI_BOARD_NAME, "R530/R730"),
5085 +- },
5086 +- .callback = dmi_check_cb,
5087 +- },
5088 +- {
5089 +- .ident = "NF110/NF210/NF310",
5090 +- .matches = {
5091 +- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
5092 +- DMI_MATCH(DMI_PRODUCT_NAME, "NF110/NF210/NF310"),
5093 +- DMI_MATCH(DMI_BOARD_NAME, "NF110/NF210/NF310"),
5094 +- },
5095 +- .callback = dmi_check_cb,
5096 +- },
5097 +- {
5098 +- .ident = "N145P/N250P/N260P",
5099 +- .matches = {
5100 +- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
5101 +- DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"),
5102 +- DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"),
5103 +- },
5104 +- .callback = dmi_check_cb,
5105 +- },
5106 +- {
5107 +- .ident = "R70/R71",
5108 +- .matches = {
5109 +- DMI_MATCH(DMI_SYS_VENDOR,
5110 +- "SAMSUNG ELECTRONICS CO., LTD."),
5111 +- DMI_MATCH(DMI_PRODUCT_NAME, "R70/R71"),
5112 +- DMI_MATCH(DMI_BOARD_NAME, "R70/R71"),
5113 +- },
5114 +- .callback = dmi_check_cb,
5115 +- },
5116 +- {
5117 +- .ident = "P460",
5118 +- .matches = {
5119 +- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
5120 +- DMI_MATCH(DMI_PRODUCT_NAME, "P460"),
5121 +- DMI_MATCH(DMI_BOARD_NAME, "P460"),
5122 +- },
5123 +- .callback = dmi_check_cb,
5124 +- },
5125 +- {
5126 +- .ident = "R528/R728",
5127 +- .matches = {
5128 +- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
5129 +- DMI_MATCH(DMI_PRODUCT_NAME, "R528/R728"),
5130 +- DMI_MATCH(DMI_BOARD_NAME, "R528/R728"),
5131 +- },
5132 +- .callback = dmi_check_cb,
5133 +- },
5134 +- {
5135 +- .ident = "NC210/NC110",
5136 +- .matches = {
5137 +- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
5138 +- DMI_MATCH(DMI_PRODUCT_NAME, "NC210/NC110"),
5139 +- DMI_MATCH(DMI_BOARD_NAME, "NC210/NC110"),
5140 +- },
5141 +- .callback = dmi_check_cb,
5142 +- },
5143 +- {
5144 +- .ident = "X520",
5145 +- .matches = {
5146 +- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
5147 +- DMI_MATCH(DMI_PRODUCT_NAME, "X520"),
5148 +- DMI_MATCH(DMI_BOARD_NAME, "X520"),
5149 ++ DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */
5150 + },
5151 +- .callback = dmi_check_cb,
5152 + },
5153 + { },
5154 + };
5155 +@@ -819,7 +608,8 @@ static int __init samsung_init(void)
5156 +
5157 + f0000_segment = ioremap_nocache(0xf0000, 0xffff);
5158 + if (!f0000_segment) {
5159 +- pr_err("Can't map the segment at 0xf0000\n");
5160 ++ if (debug || force)
5161 ++ pr_err("Can't map the segment at 0xf0000\n");
5162 + return -EINVAL;
5163 + }
5164 +
5165 +@@ -832,7 +622,8 @@ static int __init samsung_init(void)
5166 + }
5167 +
5168 + if (loca == 0xffff) {
5169 +- pr_err("This computer does not support SABI\n");
5170 ++ if (debug || force)
5171 ++ pr_err("This computer does not support SABI\n");
5172 + goto error_no_signature;
5173 + }
5174 +
5175 +diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
5176 +index 39e41fb..5160354 100644
5177 +--- a/drivers/rtc/rtc-mxc.c
5178 ++++ b/drivers/rtc/rtc-mxc.c
5179 +@@ -191,10 +191,11 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
5180 + struct platform_device *pdev = dev_id;
5181 + struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
5182 + void __iomem *ioaddr = pdata->ioaddr;
5183 ++ unsigned long flags;
5184 + u32 status;
5185 + u32 events = 0;
5186 +
5187 +- spin_lock_irq(&pdata->rtc->irq_lock);
5188 ++ spin_lock_irqsave(&pdata->rtc->irq_lock, flags);
5189 + status = readw(ioaddr + RTC_RTCISR) & readw(ioaddr + RTC_RTCIENR);
5190 + /* clear interrupt sources */
5191 + writew(status, ioaddr + RTC_RTCISR);
5192 +@@ -217,7 +218,7 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
5193 + rtc_update_alarm(&pdev->dev, &pdata->g_rtc_alarm);
5194 +
5195 + rtc_update_irq(pdata->rtc, 1, events);
5196 +- spin_unlock_irq(&pdata->rtc->irq_lock);
5197 ++ spin_unlock_irqrestore(&pdata->rtc->irq_lock, flags);
5198 +
5199 + return IRQ_HANDLED;
5200 + }
5201 +diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
5202 +index 532d212..393e7ce 100644
5203 +--- a/drivers/scsi/aic94xx/aic94xx_task.c
5204 ++++ b/drivers/scsi/aic94xx/aic94xx_task.c
5205 +@@ -201,7 +201,7 @@ static void asd_get_response_tasklet(struct asd_ascb *ascb,
5206 +
5207 + if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
5208 + resp->frame_len = le16_to_cpu(*(__le16 *)(r+6));
5209 +- memcpy(&resp->ending_fis[0], r+16, 24);
5210 ++ memcpy(&resp->ending_fis[0], r+16, ATA_RESP_FIS_SIZE);
5211 + ts->buf_valid_size = sizeof(*resp);
5212 + }
5213 + }
5214 +diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
5215 +index db9238f..4868fc9 100644
5216 +--- a/drivers/scsi/libsas/sas_ata.c
5217 ++++ b/drivers/scsi/libsas/sas_ata.c
5218 +@@ -112,12 +112,12 @@ static void sas_ata_task_done(struct sas_task *task)
5219 + if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
5220 + ((stat->stat == SAM_STAT_CHECK_CONDITION &&
5221 + dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
5222 +- ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
5223 ++ memcpy(dev->sata_dev.fis, resp->ending_fis, ATA_RESP_FIS_SIZE);
5224 +
5225 + if (!link->sactive) {
5226 +- qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
5227 ++ qc->err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
5228 + } else {
5229 +- link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.tf.command);
5230 ++ link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
5231 + if (unlikely(link->eh_info.err_mask))
5232 + qc->flags |= ATA_QCFLAG_FAILED;
5233 + }
5234 +@@ -138,8 +138,8 @@ static void sas_ata_task_done(struct sas_task *task)
5235 + qc->flags |= ATA_QCFLAG_FAILED;
5236 + }
5237 +
5238 +- dev->sata_dev.tf.feature = 0x04; /* status err */
5239 +- dev->sata_dev.tf.command = ATA_ERR;
5240 ++ dev->sata_dev.fis[3] = 0x04; /* status err */
5241 ++ dev->sata_dev.fis[2] = ATA_ERR;
5242 + }
5243 + }
5244 +
5245 +@@ -252,7 +252,7 @@ static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
5246 + {
5247 + struct domain_device *dev = qc->ap->private_data;
5248 +
5249 +- memcpy(&qc->result_tf, &dev->sata_dev.tf, sizeof(qc->result_tf));
5250 ++ ata_tf_from_fis(dev->sata_dev.fis, &qc->result_tf);
5251 + return true;
5252 + }
5253 +
5254 +diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
5255 +index 65ea65a..93b9406 100644
5256 +--- a/drivers/target/target_core_cdb.c
5257 ++++ b/drivers/target/target_core_cdb.c
5258 +@@ -1199,7 +1199,7 @@ int target_emulate_write_same(struct se_task *task)
5259 + if (num_blocks != 0)
5260 + range = num_blocks;
5261 + else
5262 +- range = (dev->transport->get_blocks(dev) - lba);
5263 ++ range = (dev->transport->get_blocks(dev) - lba) + 1;
5264 +
5265 + pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
5266 + (unsigned long long)lba, (unsigned long long)range);
5267 +diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
5268 +index b75bc92..9145141 100644
5269 +--- a/drivers/target/target_core_pr.c
5270 ++++ b/drivers/target/target_core_pr.c
5271 +@@ -2042,7 +2042,7 @@ static int __core_scsi3_write_aptpl_to_file(
5272 + if (IS_ERR(file) || !file || !file->f_dentry) {
5273 + pr_err("filp_open(%s) for APTPL metadata"
5274 + " failed\n", path);
5275 +- return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT);
5276 ++ return IS_ERR(file) ? PTR_ERR(file) : -ENOENT;
5277 + }
5278 +
5279 + iov[0].iov_base = &buf[0];
5280 +@@ -3853,7 +3853,7 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
5281 + " SPC-2 reservation is held, returning"
5282 + " RESERVATION_CONFLICT\n");
5283 + cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
5284 +- ret = EINVAL;
5285 ++ ret = -EINVAL;
5286 + goto out;
5287 + }
5288 +
5289 +@@ -3863,7 +3863,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
5290 + */
5291 + if (!cmd->se_sess) {
5292 + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
5293 +- return -EINVAL;
5294 ++ ret = -EINVAL;
5295 ++ goto out;
5296 + }
5297 +
5298 + if (cmd->data_length < 24) {
5299 +diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
5300 +index d95cfe2..278819c 100644
5301 +--- a/drivers/target/tcm_fc/tfc_cmd.c
5302 ++++ b/drivers/target/tcm_fc/tfc_cmd.c
5303 +@@ -249,6 +249,8 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)
5304 + {
5305 + struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
5306 +
5307 ++ if (cmd->aborted)
5308 ++ return ~0;
5309 + return fc_seq_exch(cmd->seq)->rxid;
5310 + }
5311 +
5312 +diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
5313 +index 19fb5fa..9aaed0d 100644
5314 +--- a/drivers/usb/class/cdc-wdm.c
5315 ++++ b/drivers/usb/class/cdc-wdm.c
5316 +@@ -473,6 +473,8 @@ retry:
5317 + goto retry;
5318 + }
5319 + if (!desc->reslength) { /* zero length read */
5320 ++ dev_dbg(&desc->intf->dev, "%s: zero length - clearing WDM_READ\n", __func__);
5321 ++ clear_bit(WDM_READ, &desc->flags);
5322 + spin_unlock_irq(&desc->iuspin);
5323 + goto retry;
5324 + }
5325 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
5326 +index 52d27ed..175b6bb 100644
5327 +--- a/drivers/usb/core/hub.c
5328 ++++ b/drivers/usb/core/hub.c
5329 +@@ -2039,12 +2039,16 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
5330 + static int hub_port_reset(struct usb_hub *hub, int port1,
5331 + struct usb_device *udev, unsigned int delay, bool warm);
5332 +
5333 +-/* Is a USB 3.0 port in the Inactive state? */
5334 +-static bool hub_port_inactive(struct usb_hub *hub, u16 portstatus)
5335 ++/* Is a USB 3.0 port in the Inactive or Complinance Mode state?
5336 ++ * Port worm reset is required to recover
5337 ++ */
5338 ++static bool hub_port_warm_reset_required(struct usb_hub *hub, u16 portstatus)
5339 + {
5340 + return hub_is_superspeed(hub->hdev) &&
5341 +- (portstatus & USB_PORT_STAT_LINK_STATE) ==
5342 +- USB_SS_PORT_LS_SS_INACTIVE;
5343 ++ (((portstatus & USB_PORT_STAT_LINK_STATE) ==
5344 ++ USB_SS_PORT_LS_SS_INACTIVE) ||
5345 ++ ((portstatus & USB_PORT_STAT_LINK_STATE) ==
5346 ++ USB_SS_PORT_LS_COMP_MOD)) ;
5347 + }
5348 +
5349 + static int hub_port_wait_reset(struct usb_hub *hub, int port1,
5350 +@@ -2080,7 +2084,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
5351 + *
5352 + * See https://bugzilla.kernel.org/show_bug.cgi?id=41752
5353 + */
5354 +- if (hub_port_inactive(hub, portstatus)) {
5355 ++ if (hub_port_warm_reset_required(hub, portstatus)) {
5356 + int ret;
5357 +
5358 + if ((portchange & USB_PORT_STAT_C_CONNECTION))
5359 +@@ -3646,9 +3650,7 @@ static void hub_events(void)
5360 + /* Warm reset a USB3 protocol port if it's in
5361 + * SS.Inactive state.
5362 + */
5363 +- if (hub_is_superspeed(hub->hdev) &&
5364 +- (portstatus & USB_PORT_STAT_LINK_STATE)
5365 +- == USB_SS_PORT_LS_SS_INACTIVE) {
5366 ++ if (hub_port_warm_reset_required(hub, portstatus)) {
5367 + dev_dbg(hub_dev, "warm reset port %d\n", i);
5368 + hub_port_reset(hub, i, NULL,
5369 + HUB_BH_RESET_TIME, true);
5370 +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
5371 +index a8b2980..fd8a2c2 100644
5372 +--- a/drivers/usb/host/xhci-hub.c
5373 ++++ b/drivers/usb/host/xhci-hub.c
5374 +@@ -438,6 +438,42 @@ void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
5375 + }
5376 + }
5377 +
5378 ++/* Updates Link Status for super Speed port */
5379 ++static void xhci_hub_report_link_state(u32 *status, u32 status_reg)
5380 ++{
5381 ++ u32 pls = status_reg & PORT_PLS_MASK;
5382 ++
5383 ++ /* resume state is a xHCI internal state.
5384 ++ * Do not report it to usb core.
5385 ++ */
5386 ++ if (pls == XDEV_RESUME)
5387 ++ return;
5388 ++
5389 ++ /* When the CAS bit is set then warm reset
5390 ++ * should be performed on port
5391 ++ */
5392 ++ if (status_reg & PORT_CAS) {
5393 ++ /* The CAS bit can be set while the port is
5394 ++ * in any link state.
5395 ++ * Only roothubs have CAS bit, so we
5396 ++ * pretend to be in compliance mode
5397 ++ * unless we're already in compliance
5398 ++ * or the inactive state.
5399 ++ */
5400 ++ if (pls != USB_SS_PORT_LS_COMP_MOD &&
5401 ++ pls != USB_SS_PORT_LS_SS_INACTIVE) {
5402 ++ pls = USB_SS_PORT_LS_COMP_MOD;
5403 ++ }
5404 ++ /* Return also connection bit -
5405 ++ * hub state machine resets port
5406 ++ * when this bit is set.
5407 ++ */
5408 ++ pls |= USB_PORT_STAT_CONNECTION;
5409 ++ }
5410 ++ /* update status field */
5411 ++ *status |= pls;
5412 ++}
5413 ++
5414 + int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
5415 + u16 wIndex, char *buf, u16 wLength)
5416 + {
5417 +@@ -579,13 +615,9 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
5418 + else
5419 + status |= USB_PORT_STAT_POWER;
5420 + }
5421 +- /* Port Link State */
5422 ++ /* Update Port Link State for super speed ports*/
5423 + if (hcd->speed == HCD_USB3) {
5424 +- /* resume state is a xHCI internal state.
5425 +- * Do not report it to usb core.
5426 +- */
5427 +- if ((temp & PORT_PLS_MASK) != XDEV_RESUME)
5428 +- status |= (temp & PORT_PLS_MASK);
5429 ++ xhci_hub_report_link_state(&status, temp);
5430 + }
5431 + if (bus_state->port_c_suspend & (1 << wIndex))
5432 + status |= 1 << USB_PORT_FEAT_C_SUSPEND;
5433 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
5434 +index 363b141..7a56805 100644
5435 +--- a/drivers/usb/host/xhci.h
5436 ++++ b/drivers/usb/host/xhci.h
5437 +@@ -341,7 +341,11 @@ struct xhci_op_regs {
5438 + #define PORT_PLC (1 << 22)
5439 + /* port configure error change - port failed to configure its link partner */
5440 + #define PORT_CEC (1 << 23)
5441 +-/* bit 24 reserved */
5442 ++/* Cold Attach Status - xHC can set this bit to report device attached during
5443 ++ * Sx state. Warm port reset should be perfomed to clear this bit and move port
5444 ++ * to connected state.
5445 ++ */
5446 ++#define PORT_CAS (1 << 24)
5447 + /* wake on connect (enable) */
5448 + #define PORT_WKCONN_E (1 << 25)
5449 + /* wake on disconnect (enable) */
5450 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
5451 +index 21a4734..5971c95 100644
5452 +--- a/drivers/usb/serial/option.c
5453 ++++ b/drivers/usb/serial/option.c
5454 +@@ -496,6 +496,15 @@ static void option_instat_callback(struct urb *urb);
5455 +
5456 + /* MediaTek products */
5457 + #define MEDIATEK_VENDOR_ID 0x0e8d
5458 ++#define MEDIATEK_PRODUCT_DC_1COM 0x00a0
5459 ++#define MEDIATEK_PRODUCT_DC_4COM 0x00a5
5460 ++#define MEDIATEK_PRODUCT_DC_5COM 0x00a4
5461 ++#define MEDIATEK_PRODUCT_7208_1COM 0x7101
5462 ++#define MEDIATEK_PRODUCT_7208_2COM 0x7102
5463 ++#define MEDIATEK_PRODUCT_FP_1COM 0x0003
5464 ++#define MEDIATEK_PRODUCT_FP_2COM 0x0023
5465 ++#define MEDIATEK_PRODUCT_FPDC_1COM 0x0043
5466 ++#define MEDIATEK_PRODUCT_FPDC_2COM 0x0033
5467 +
5468 + /* Cellient products */
5469 + #define CELLIENT_VENDOR_ID 0x2692
5470 +@@ -553,6 +562,10 @@ static const struct option_blacklist_info net_intf1_blacklist = {
5471 + .reserved = BIT(1),
5472 + };
5473 +
5474 ++static const struct option_blacklist_info net_intf2_blacklist = {
5475 ++ .reserved = BIT(2),
5476 ++};
5477 ++
5478 + static const struct option_blacklist_info net_intf3_blacklist = {
5479 + .reserved = BIT(3),
5480 + };
5481 +@@ -1093,6 +1106,8 @@ static const struct usb_device_id option_ids[] = {
5482 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
5483 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
5484 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
5485 ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
5486 ++ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
5487 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
5488 + 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
5489 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
5490 +@@ -1234,6 +1249,17 @@ static const struct usb_device_id option_ids[] = {
5491 + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x02, 0x01) },
5492 + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x00, 0x00) },
5493 + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x02, 0x01) }, /* MediaTek MT6276M modem & app port */
5494 ++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_1COM, 0x0a, 0x00, 0x00) },
5495 ++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x02, 0x01) },
5496 ++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x00, 0x00) },
5497 ++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x02, 0x01) },
5498 ++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x00, 0x00) },
5499 ++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_1COM, 0x02, 0x00, 0x00) },
5500 ++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_2COM, 0x02, 0x02, 0x01) },
5501 ++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_1COM, 0x0a, 0x00, 0x00) },
5502 ++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_2COM, 0x0a, 0x00, 0x00) },
5503 ++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_1COM, 0x0a, 0x00, 0x00) },
5504 ++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_2COM, 0x0a, 0x00, 0x00) },
5505 + { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
5506 + { } /* Terminating entry */
5507 + };
5508 +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
5509 +index c14c42b..ae66278 100644
5510 +--- a/drivers/vhost/vhost.c
5511 ++++ b/drivers/vhost/vhost.c
5512 +@@ -222,6 +222,8 @@ static int vhost_worker(void *data)
5513 + if (work) {
5514 + __set_current_state(TASK_RUNNING);
5515 + work->fn(work);
5516 ++ if (need_resched())
5517 ++ schedule();
5518 + } else
5519 + schedule();
5520 +
5521 +diff --git a/fs/buffer.c b/fs/buffer.c
5522 +index c807931..4115eca 100644
5523 +--- a/fs/buffer.c
5524 ++++ b/fs/buffer.c
5525 +@@ -1087,6 +1087,9 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
5526 + static struct buffer_head *
5527 + __getblk_slow(struct block_device *bdev, sector_t block, int size)
5528 + {
5529 ++ int ret;
5530 ++ struct buffer_head *bh;
5531 ++
5532 + /* Size must be multiple of hard sectorsize */
5533 + if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
5534 + (size < 512 || size > PAGE_SIZE))) {
5535 +@@ -1099,20 +1102,21 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
5536 + return NULL;
5537 + }
5538 +
5539 +- for (;;) {
5540 +- struct buffer_head * bh;
5541 +- int ret;
5542 ++retry:
5543 ++ bh = __find_get_block(bdev, block, size);
5544 ++ if (bh)
5545 ++ return bh;
5546 +
5547 ++ ret = grow_buffers(bdev, block, size);
5548 ++ if (ret == 0) {
5549 ++ free_more_memory();
5550 ++ goto retry;
5551 ++ } else if (ret > 0) {
5552 + bh = __find_get_block(bdev, block, size);
5553 + if (bh)
5554 + return bh;
5555 +-
5556 +- ret = grow_buffers(bdev, block, size);
5557 +- if (ret < 0)
5558 +- return NULL;
5559 +- if (ret == 0)
5560 +- free_more_memory();
5561 + }
5562 ++ return NULL;
5563 + }
5564 +
5565 + /*
5566 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
5567 +index b21670c..56c152d 100644
5568 +--- a/fs/cifs/connect.c
5569 ++++ b/fs/cifs/connect.c
5570 +@@ -2925,6 +2925,18 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
5571 + #define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024)
5572 + #define CIFS_DEFAULT_NON_POSIX_WSIZE (65536)
5573 +
5574 ++/*
5575 ++ * On hosts with high memory, we can't currently support wsize/rsize that are
5576 ++ * larger than we can kmap at once. Cap the rsize/wsize at
5577 ++ * LAST_PKMAP * PAGE_SIZE. We'll never be able to fill a read or write request
5578 ++ * larger than that anyway.
5579 ++ */
5580 ++#ifdef CONFIG_HIGHMEM
5581 ++#define CIFS_KMAP_SIZE_LIMIT (LAST_PKMAP * PAGE_CACHE_SIZE)
5582 ++#else /* CONFIG_HIGHMEM */
5583 ++#define CIFS_KMAP_SIZE_LIMIT (1<<24)
5584 ++#endif /* CONFIG_HIGHMEM */
5585 ++
5586 + static unsigned int
5587 + cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
5588 + {
5589 +@@ -2955,6 +2967,9 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
5590 + wsize = min_t(unsigned int, wsize,
5591 + server->maxBuf - sizeof(WRITE_REQ) + 4);
5592 +
5593 ++ /* limit to the amount that we can kmap at once */
5594 ++ wsize = min_t(unsigned int, wsize, CIFS_KMAP_SIZE_LIMIT);
5595 ++
5596 + /* hard limit of CIFS_MAX_WSIZE */
5597 + wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE);
5598 +
5599 +@@ -2996,6 +3011,9 @@ cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
5600 + if (!(server->capabilities & CAP_LARGE_READ_X))
5601 + rsize = min_t(unsigned int, CIFSMaxBufSize, rsize);
5602 +
5603 ++ /* limit to the amount that we can kmap at once */
5604 ++ rsize = min_t(unsigned int, rsize, CIFS_KMAP_SIZE_LIMIT);
5605 ++
5606 + /* hard limit of CIFS_MAX_RSIZE */
5607 + rsize = min_t(unsigned int, rsize, CIFS_MAX_RSIZE);
5608 +
5609 +diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
5610 +index db4a138..4c37ed4 100644
5611 +--- a/fs/cifs/readdir.c
5612 ++++ b/fs/cifs/readdir.c
5613 +@@ -86,9 +86,12 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
5614 +
5615 + dentry = d_lookup(parent, name);
5616 + if (dentry) {
5617 +- /* FIXME: check for inode number changes? */
5618 +- if (dentry->d_inode != NULL)
5619 ++ inode = dentry->d_inode;
5620 ++ /* update inode in place if i_ino didn't change */
5621 ++ if (inode && CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) {
5622 ++ cifs_fattr_to_inode(inode, fattr);
5623 + return dentry;
5624 ++ }
5625 + d_drop(dentry);
5626 + dput(dentry);
5627 + }
5628 +diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c
5629 +index 69f994a..0dbe58a 100644
5630 +--- a/fs/ecryptfs/kthread.c
5631 ++++ b/fs/ecryptfs/kthread.c
5632 +@@ -149,7 +149,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
5633 + (*lower_file) = dentry_open(lower_dentry, lower_mnt, flags, cred);
5634 + if (!IS_ERR(*lower_file))
5635 + goto out;
5636 +- if (flags & O_RDONLY) {
5637 ++ if ((flags & O_ACCMODE) == O_RDONLY) {
5638 + rc = PTR_ERR((*lower_file));
5639 + goto out;
5640 + }
5641 +diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
5642 +index 0dc5a3d..de42310 100644
5643 +--- a/fs/ecryptfs/miscdev.c
5644 ++++ b/fs/ecryptfs/miscdev.c
5645 +@@ -49,7 +49,10 @@ ecryptfs_miscdev_poll(struct file *file, poll_table *pt)
5646 + mutex_lock(&ecryptfs_daemon_hash_mux);
5647 + /* TODO: Just use file->private_data? */
5648 + rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
5649 +- BUG_ON(rc || !daemon);
5650 ++ if (rc || !daemon) {
5651 ++ mutex_unlock(&ecryptfs_daemon_hash_mux);
5652 ++ return -EINVAL;
5653 ++ }
5654 + mutex_lock(&daemon->mux);
5655 + mutex_unlock(&ecryptfs_daemon_hash_mux);
5656 + if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
5657 +@@ -122,6 +125,7 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file)
5658 + goto out_unlock_daemon;
5659 + }
5660 + daemon->flags |= ECRYPTFS_DAEMON_MISCDEV_OPEN;
5661 ++ file->private_data = daemon;
5662 + atomic_inc(&ecryptfs_num_miscdev_opens);
5663 + out_unlock_daemon:
5664 + mutex_unlock(&daemon->mux);
5665 +@@ -152,9 +156,9 @@ ecryptfs_miscdev_release(struct inode *inode, struct file *file)
5666 +
5667 + mutex_lock(&ecryptfs_daemon_hash_mux);
5668 + rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
5669 +- BUG_ON(rc || !daemon);
5670 ++ if (rc || !daemon)
5671 ++ daemon = file->private_data;
5672 + mutex_lock(&daemon->mux);
5673 +- BUG_ON(daemon->pid != task_pid(current));
5674 + BUG_ON(!(daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN));
5675 + daemon->flags &= ~ECRYPTFS_DAEMON_MISCDEV_OPEN;
5676 + atomic_dec(&ecryptfs_num_miscdev_opens);
5677 +@@ -191,31 +195,32 @@ int ecryptfs_send_miscdev(char *data, size_t data_size,
5678 + struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type,
5679 + u16 msg_flags, struct ecryptfs_daemon *daemon)
5680 + {
5681 +- int rc = 0;
5682 ++ struct ecryptfs_message *msg;
5683 +
5684 +- mutex_lock(&msg_ctx->mux);
5685 +- msg_ctx->msg = kmalloc((sizeof(*msg_ctx->msg) + data_size),
5686 +- GFP_KERNEL);
5687 +- if (!msg_ctx->msg) {
5688 +- rc = -ENOMEM;
5689 ++ msg = kmalloc((sizeof(*msg) + data_size), GFP_KERNEL);
5690 ++ if (!msg) {
5691 + printk(KERN_ERR "%s: Out of memory whilst attempting "
5692 + "to kmalloc(%zd, GFP_KERNEL)\n", __func__,
5693 +- (sizeof(*msg_ctx->msg) + data_size));
5694 +- goto out_unlock;
5695 ++ (sizeof(*msg) + data_size));
5696 ++ return -ENOMEM;
5697 + }
5698 ++
5699 ++ mutex_lock(&msg_ctx->mux);
5700 ++ msg_ctx->msg = msg;
5701 + msg_ctx->msg->index = msg_ctx->index;
5702 + msg_ctx->msg->data_len = data_size;
5703 + msg_ctx->type = msg_type;
5704 + memcpy(msg_ctx->msg->data, data, data_size);
5705 + msg_ctx->msg_size = (sizeof(*msg_ctx->msg) + data_size);
5706 +- mutex_lock(&daemon->mux);
5707 + list_add_tail(&msg_ctx->daemon_out_list, &daemon->msg_ctx_out_queue);
5708 ++ mutex_unlock(&msg_ctx->mux);
5709 ++
5710 ++ mutex_lock(&daemon->mux);
5711 + daemon->num_queued_msg_ctx++;
5712 + wake_up_interruptible(&daemon->wait);
5713 + mutex_unlock(&daemon->mux);
5714 +-out_unlock:
5715 +- mutex_unlock(&msg_ctx->mux);
5716 +- return rc;
5717 ++
5718 ++ return 0;
5719 + }
5720 +
5721 + /**
5722 +@@ -246,8 +251,16 @@ ecryptfs_miscdev_read(struct file *file, char __user *buf, size_t count,
5723 + mutex_lock(&ecryptfs_daemon_hash_mux);
5724 + /* TODO: Just use file->private_data? */
5725 + rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
5726 +- BUG_ON(rc || !daemon);
5727 ++ if (rc || !daemon) {
5728 ++ mutex_unlock(&ecryptfs_daemon_hash_mux);
5729 ++ return -EINVAL;
5730 ++ }
5731 + mutex_lock(&daemon->mux);
5732 ++ if (task_pid(current) != daemon->pid) {
5733 ++ mutex_unlock(&daemon->mux);
5734 ++ mutex_unlock(&ecryptfs_daemon_hash_mux);
5735 ++ return -EPERM;
5736 ++ }
5737 + if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
5738 + rc = 0;
5739 + mutex_unlock(&ecryptfs_daemon_hash_mux);
5740 +@@ -284,9 +297,6 @@ check_list:
5741 + * message from the queue; try again */
5742 + goto check_list;
5743 + }
5744 +- BUG_ON(euid != daemon->euid);
5745 +- BUG_ON(current_user_ns() != daemon->user_ns);
5746 +- BUG_ON(task_pid(current) != daemon->pid);
5747 + msg_ctx = list_first_entry(&daemon->msg_ctx_out_queue,
5748 + struct ecryptfs_msg_ctx, daemon_out_list);
5749 + BUG_ON(!msg_ctx);
5750 +diff --git a/fs/eventpoll.c b/fs/eventpoll.c
5751 +index 4d9d3a4..a6f3763 100644
5752 +--- a/fs/eventpoll.c
5753 ++++ b/fs/eventpoll.c
5754 +@@ -1629,8 +1629,10 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
5755 + if (op == EPOLL_CTL_ADD) {
5756 + if (is_file_epoll(tfile)) {
5757 + error = -ELOOP;
5758 +- if (ep_loop_check(ep, tfile) != 0)
5759 ++ if (ep_loop_check(ep, tfile) != 0) {
5760 ++ clear_tfile_check_list();
5761 + goto error_tgt_fput;
5762 ++ }
5763 + } else
5764 + list_add(&tfile->f_tfile_llink, &tfile_check_list);
5765 + }
5766 +diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
5767 +index 49cf230..24a49d4 100644
5768 +--- a/fs/exofs/ore.c
5769 ++++ b/fs/exofs/ore.c
5770 +@@ -735,13 +735,7 @@ static int _prepare_for_striping(struct ore_io_state *ios)
5771 + out:
5772 + ios->numdevs = devs_in_group;
5773 + ios->pages_consumed = cur_pg;
5774 +- if (unlikely(ret)) {
5775 +- if (length == ios->length)
5776 +- return ret;
5777 +- else
5778 +- ios->length -= length;
5779 +- }
5780 +- return 0;
5781 ++ return ret;
5782 + }
5783 +
5784 + int ore_create(struct ore_io_state *ios)
5785 +diff --git a/fs/exofs/ore_raid.c b/fs/exofs/ore_raid.c
5786 +index d222c77..fff2070 100644
5787 +--- a/fs/exofs/ore_raid.c
5788 ++++ b/fs/exofs/ore_raid.c
5789 +@@ -461,16 +461,12 @@ static void _mark_read4write_pages_uptodate(struct ore_io_state *ios, int ret)
5790 + * ios->sp2d[p][*], xor is calculated the same way. These pages are
5791 + * allocated/freed and don't go through cache
5792 + */
5793 +-static int _read_4_write(struct ore_io_state *ios)
5794 ++static int _read_4_write_first_stripe(struct ore_io_state *ios)
5795 + {
5796 +- struct ore_io_state *ios_read;
5797 + struct ore_striping_info read_si;
5798 + struct __stripe_pages_2d *sp2d = ios->sp2d;
5799 + u64 offset = ios->si.first_stripe_start;
5800 +- u64 last_stripe_end;
5801 +- unsigned bytes_in_stripe = ios->si.bytes_in_stripe;
5802 +- unsigned i, c, p, min_p = sp2d->pages_in_unit, max_p = -1;
5803 +- int ret;
5804 ++ unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1;
5805 +
5806 + if (offset == ios->offset) /* Go to start collect $200 */
5807 + goto read_last_stripe;
5808 +@@ -478,6 +474,9 @@ static int _read_4_write(struct ore_io_state *ios)
5809 + min_p = _sp2d_min_pg(sp2d);
5810 + max_p = _sp2d_max_pg(sp2d);
5811 +
5812 ++ ORE_DBGMSG("stripe_start=0x%llx ios->offset=0x%llx min_p=%d max_p=%d\n",
5813 ++ offset, ios->offset, min_p, max_p);
5814 ++
5815 + for (c = 0; ; c++) {
5816 + ore_calc_stripe_info(ios->layout, offset, 0, &read_si);
5817 + read_si.obj_offset += min_p * PAGE_SIZE;
5818 +@@ -512,6 +511,18 @@ static int _read_4_write(struct ore_io_state *ios)
5819 + }
5820 +
5821 + read_last_stripe:
5822 ++ return 0;
5823 ++}
5824 ++
5825 ++static int _read_4_write_last_stripe(struct ore_io_state *ios)
5826 ++{
5827 ++ struct ore_striping_info read_si;
5828 ++ struct __stripe_pages_2d *sp2d = ios->sp2d;
5829 ++ u64 offset;
5830 ++ u64 last_stripe_end;
5831 ++ unsigned bytes_in_stripe = ios->si.bytes_in_stripe;
5832 ++ unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1;
5833 ++
5834 + offset = ios->offset + ios->length;
5835 + if (offset % PAGE_SIZE)
5836 + _add_to_r4w_last_page(ios, &offset);
5837 +@@ -527,15 +538,15 @@ read_last_stripe:
5838 + c = _dev_order(ios->layout->group_width * ios->layout->mirrors_p1,
5839 + ios->layout->mirrors_p1, read_si.par_dev, read_si.dev);
5840 +
5841 +- BUG_ON(ios->si.first_stripe_start + bytes_in_stripe != last_stripe_end);
5842 +- /* unaligned IO must be within a single stripe */
5843 +-
5844 + if (min_p == sp2d->pages_in_unit) {
5845 + /* Didn't do it yet */
5846 + min_p = _sp2d_min_pg(sp2d);
5847 + max_p = _sp2d_max_pg(sp2d);
5848 + }
5849 +
5850 ++ ORE_DBGMSG("offset=0x%llx stripe_end=0x%llx min_p=%d max_p=%d\n",
5851 ++ offset, last_stripe_end, min_p, max_p);
5852 ++
5853 + while (offset < last_stripe_end) {
5854 + struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
5855 +
5856 +@@ -568,6 +579,15 @@ read_last_stripe:
5857 + }
5858 +
5859 + read_it:
5860 ++ return 0;
5861 ++}
5862 ++
5863 ++static int _read_4_write_execute(struct ore_io_state *ios)
5864 ++{
5865 ++ struct ore_io_state *ios_read;
5866 ++ unsigned i;
5867 ++ int ret;
5868 ++
5869 + ios_read = ios->ios_read_4_write;
5870 + if (!ios_read)
5871 + return 0;
5872 +@@ -591,6 +611,8 @@ read_it:
5873 + }
5874 +
5875 + _mark_read4write_pages_uptodate(ios_read, ret);
5876 ++ ore_put_io_state(ios_read);
5877 ++ ios->ios_read_4_write = NULL; /* Might need a reuse at last stripe */
5878 + return 0;
5879 + }
5880 +
5881 +@@ -626,8 +648,11 @@ int _ore_add_parity_unit(struct ore_io_state *ios,
5882 + /* If first stripe, Read in all read4write pages
5883 + * (if needed) before we calculate the first parity.
5884 + */
5885 +- _read_4_write(ios);
5886 ++ _read_4_write_first_stripe(ios);
5887 + }
5888 ++ if (!cur_len) /* If last stripe r4w pages of last stripe */
5889 ++ _read_4_write_last_stripe(ios);
5890 ++ _read_4_write_execute(ios);
5891 +
5892 + for (i = 0; i < num_pages; i++) {
5893 + pages[i] = _raid_page_alloc();
5894 +@@ -654,34 +679,14 @@ int _ore_add_parity_unit(struct ore_io_state *ios,
5895 +
5896 + int _ore_post_alloc_raid_stuff(struct ore_io_state *ios)
5897 + {
5898 +- struct ore_layout *layout = ios->layout;
5899 +-
5900 + if (ios->parity_pages) {
5901 ++ struct ore_layout *layout = ios->layout;
5902 + unsigned pages_in_unit = layout->stripe_unit / PAGE_SIZE;
5903 +- unsigned stripe_size = ios->si.bytes_in_stripe;
5904 +- u64 last_stripe, first_stripe;
5905 +
5906 + if (_sp2d_alloc(pages_in_unit, layout->group_width,
5907 + layout->parity, &ios->sp2d)) {
5908 + return -ENOMEM;
5909 + }
5910 +-
5911 +- /* Round io down to last full strip */
5912 +- first_stripe = div_u64(ios->offset, stripe_size);
5913 +- last_stripe = div_u64(ios->offset + ios->length, stripe_size);
5914 +-
5915 +- /* If an IO spans more then a single stripe it must end at
5916 +- * a stripe boundary. The reminder at the end is pushed into the
5917 +- * next IO.
5918 +- */
5919 +- if (last_stripe != first_stripe) {
5920 +- ios->length = last_stripe * stripe_size - ios->offset;
5921 +-
5922 +- BUG_ON(!ios->length);
5923 +- ios->nr_pages = (ios->length + PAGE_SIZE - 1) /
5924 +- PAGE_SIZE;
5925 +- ios->si.length = ios->length; /*make it consistent */
5926 +- }
5927 + }
5928 + return 0;
5929 + }
5930 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
5931 +index ab7aa3f..a93486e 100644
5932 +--- a/fs/ext4/super.c
5933 ++++ b/fs/ext4/super.c
5934 +@@ -1097,7 +1097,7 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
5935 + }
5936 + if (sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) {
5937 + seq_printf(seq, ",max_batch_time=%u",
5938 +- (unsigned) sbi->s_min_batch_time);
5939 ++ (unsigned) sbi->s_max_batch_time);
5940 + }
5941 +
5942 + /*
5943 +diff --git a/fs/fifo.c b/fs/fifo.c
5944 +index b1a524d..cf6f434 100644
5945 +--- a/fs/fifo.c
5946 ++++ b/fs/fifo.c
5947 +@@ -14,7 +14,7 @@
5948 + #include <linux/sched.h>
5949 + #include <linux/pipe_fs_i.h>
5950 +
5951 +-static void wait_for_partner(struct inode* inode, unsigned int *cnt)
5952 ++static int wait_for_partner(struct inode* inode, unsigned int *cnt)
5953 + {
5954 + int cur = *cnt;
5955 +
5956 +@@ -23,6 +23,7 @@ static void wait_for_partner(struct inode* inode, unsigned int *cnt)
5957 + if (signal_pending(current))
5958 + break;
5959 + }
5960 ++ return cur == *cnt ? -ERESTARTSYS : 0;
5961 + }
5962 +
5963 + static void wake_up_partner(struct inode* inode)
5964 +@@ -67,8 +68,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
5965 + * seen a writer */
5966 + filp->f_version = pipe->w_counter;
5967 + } else {
5968 +- wait_for_partner(inode, &pipe->w_counter);
5969 +- if(signal_pending(current))
5970 ++ if (wait_for_partner(inode, &pipe->w_counter))
5971 + goto err_rd;
5972 + }
5973 + }
5974 +@@ -90,8 +90,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
5975 + wake_up_partner(inode);
5976 +
5977 + if (!pipe->readers) {
5978 +- wait_for_partner(inode, &pipe->r_counter);
5979 +- if (signal_pending(current))
5980 ++ if (wait_for_partner(inode, &pipe->r_counter))
5981 + goto err_wr;
5982 + }
5983 + break;
5984 +diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
5985 +index 2d0ca24..ebc2f4d 100644
5986 +--- a/fs/hugetlbfs/inode.c
5987 ++++ b/fs/hugetlbfs/inode.c
5988 +@@ -592,9 +592,15 @@ static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
5989 + spin_lock(&sbinfo->stat_lock);
5990 + /* If no limits set, just report 0 for max/free/used
5991 + * blocks, like simple_statfs() */
5992 +- if (sbinfo->max_blocks >= 0) {
5993 +- buf->f_blocks = sbinfo->max_blocks;
5994 +- buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
5995 ++ if (sbinfo->spool) {
5996 ++ long free_pages;
5997 ++
5998 ++ spin_lock(&sbinfo->spool->lock);
5999 ++ buf->f_blocks = sbinfo->spool->max_hpages;
6000 ++ free_pages = sbinfo->spool->max_hpages
6001 ++ - sbinfo->spool->used_hpages;
6002 ++ buf->f_bavail = buf->f_bfree = free_pages;
6003 ++ spin_unlock(&sbinfo->spool->lock);
6004 + buf->f_files = sbinfo->max_inodes;
6005 + buf->f_ffree = sbinfo->free_inodes;
6006 + }
6007 +@@ -610,6 +616,10 @@ static void hugetlbfs_put_super(struct super_block *sb)
6008 +
6009 + if (sbi) {
6010 + sb->s_fs_info = NULL;
6011 ++
6012 ++ if (sbi->spool)
6013 ++ hugepage_put_subpool(sbi->spool);
6014 ++
6015 + kfree(sbi);
6016 + }
6017 + }
6018 +@@ -841,10 +851,14 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
6019 + sb->s_fs_info = sbinfo;
6020 + sbinfo->hstate = config.hstate;
6021 + spin_lock_init(&sbinfo->stat_lock);
6022 +- sbinfo->max_blocks = config.nr_blocks;
6023 +- sbinfo->free_blocks = config.nr_blocks;
6024 + sbinfo->max_inodes = config.nr_inodes;
6025 + sbinfo->free_inodes = config.nr_inodes;
6026 ++ sbinfo->spool = NULL;
6027 ++ if (config.nr_blocks != -1) {
6028 ++ sbinfo->spool = hugepage_new_subpool(config.nr_blocks);
6029 ++ if (!sbinfo->spool)
6030 ++ goto out_free;
6031 ++ }
6032 + sb->s_maxbytes = MAX_LFS_FILESIZE;
6033 + sb->s_blocksize = huge_page_size(config.hstate);
6034 + sb->s_blocksize_bits = huge_page_shift(config.hstate);
6035 +@@ -864,38 +878,12 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
6036 + sb->s_root = root;
6037 + return 0;
6038 + out_free:
6039 ++ if (sbinfo->spool)
6040 ++ kfree(sbinfo->spool);
6041 + kfree(sbinfo);
6042 + return -ENOMEM;
6043 + }
6044 +
6045 +-int hugetlb_get_quota(struct address_space *mapping, long delta)
6046 +-{
6047 +- int ret = 0;
6048 +- struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb);
6049 +-
6050 +- if (sbinfo->free_blocks > -1) {
6051 +- spin_lock(&sbinfo->stat_lock);
6052 +- if (sbinfo->free_blocks - delta >= 0)
6053 +- sbinfo->free_blocks -= delta;
6054 +- else
6055 +- ret = -ENOMEM;
6056 +- spin_unlock(&sbinfo->stat_lock);
6057 +- }
6058 +-
6059 +- return ret;
6060 +-}
6061 +-
6062 +-void hugetlb_put_quota(struct address_space *mapping, long delta)
6063 +-{
6064 +- struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb);
6065 +-
6066 +- if (sbinfo->free_blocks > -1) {
6067 +- spin_lock(&sbinfo->stat_lock);
6068 +- sbinfo->free_blocks += delta;
6069 +- spin_unlock(&sbinfo->stat_lock);
6070 +- }
6071 +-}
6072 +-
6073 + static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type,
6074 + int flags, const char *dev_name, void *data)
6075 + {
6076 +diff --git a/fs/locks.c b/fs/locks.c
6077 +index 0d68f1f..6a64f15 100644
6078 +--- a/fs/locks.c
6079 ++++ b/fs/locks.c
6080 +@@ -1465,7 +1465,7 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
6081 + case F_WRLCK:
6082 + return generic_add_lease(filp, arg, flp);
6083 + default:
6084 +- BUG();
6085 ++ return -EINVAL;
6086 + }
6087 + }
6088 + EXPORT_SYMBOL(generic_setlease);
6089 +diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
6090 +index 47d1c6f..b122af8 100644
6091 +--- a/fs/nfs/idmap.c
6092 ++++ b/fs/nfs/idmap.c
6093 +@@ -318,12 +318,12 @@ struct idmap_hashent {
6094 + unsigned long ih_expires;
6095 + __u32 ih_id;
6096 + size_t ih_namelen;
6097 +- char ih_name[IDMAP_NAMESZ];
6098 ++ const char *ih_name;
6099 + };
6100 +
6101 + struct idmap_hashtable {
6102 + __u8 h_type;
6103 +- struct idmap_hashent h_entries[IDMAP_HASH_SZ];
6104 ++ struct idmap_hashent *h_entries;
6105 + };
6106 +
6107 + struct idmap {
6108 +@@ -378,6 +378,28 @@ nfs_idmap_new(struct nfs_client *clp)
6109 + return 0;
6110 + }
6111 +
6112 ++static void
6113 ++idmap_alloc_hashtable(struct idmap_hashtable *h)
6114 ++{
6115 ++ if (h->h_entries != NULL)
6116 ++ return;
6117 ++ h->h_entries = kcalloc(IDMAP_HASH_SZ,
6118 ++ sizeof(*h->h_entries),
6119 ++ GFP_KERNEL);
6120 ++}
6121 ++
6122 ++static void
6123 ++idmap_free_hashtable(struct idmap_hashtable *h)
6124 ++{
6125 ++ int i;
6126 ++
6127 ++ if (h->h_entries == NULL)
6128 ++ return;
6129 ++ for (i = 0; i < IDMAP_HASH_SZ; i++)
6130 ++ kfree(h->h_entries[i].ih_name);
6131 ++ kfree(h->h_entries);
6132 ++}
6133 ++
6134 + void
6135 + nfs_idmap_delete(struct nfs_client *clp)
6136 + {
6137 +@@ -387,6 +409,8 @@ nfs_idmap_delete(struct nfs_client *clp)
6138 + return;
6139 + rpc_unlink(idmap->idmap_dentry);
6140 + clp->cl_idmap = NULL;
6141 ++ idmap_free_hashtable(&idmap->idmap_user_hash);
6142 ++ idmap_free_hashtable(&idmap->idmap_group_hash);
6143 + kfree(idmap);
6144 + }
6145 +
6146 +@@ -396,6 +420,8 @@ nfs_idmap_delete(struct nfs_client *clp)
6147 + static inline struct idmap_hashent *
6148 + idmap_name_hash(struct idmap_hashtable* h, const char *name, size_t len)
6149 + {
6150 ++ if (h->h_entries == NULL)
6151 ++ return NULL;
6152 + return &h->h_entries[fnvhash32(name, len) % IDMAP_HASH_SZ];
6153 + }
6154 +
6155 +@@ -404,6 +430,8 @@ idmap_lookup_name(struct idmap_hashtable *h, const char *name, size_t len)
6156 + {
6157 + struct idmap_hashent *he = idmap_name_hash(h, name, len);
6158 +
6159 ++ if (he == NULL)
6160 ++ return NULL;
6161 + if (he->ih_namelen != len || memcmp(he->ih_name, name, len) != 0)
6162 + return NULL;
6163 + if (time_after(jiffies, he->ih_expires))
6164 +@@ -414,6 +442,8 @@ idmap_lookup_name(struct idmap_hashtable *h, const char *name, size_t len)
6165 + static inline struct idmap_hashent *
6166 + idmap_id_hash(struct idmap_hashtable* h, __u32 id)
6167 + {
6168 ++ if (h->h_entries == NULL)
6169 ++ return NULL;
6170 + return &h->h_entries[fnvhash32(&id, sizeof(id)) % IDMAP_HASH_SZ];
6171 + }
6172 +
6173 +@@ -421,6 +451,9 @@ static struct idmap_hashent *
6174 + idmap_lookup_id(struct idmap_hashtable *h, __u32 id)
6175 + {
6176 + struct idmap_hashent *he = idmap_id_hash(h, id);
6177 ++
6178 ++ if (he == NULL)
6179 ++ return NULL;
6180 + if (he->ih_id != id || he->ih_namelen == 0)
6181 + return NULL;
6182 + if (time_after(jiffies, he->ih_expires))
6183 +@@ -436,12 +469,14 @@ idmap_lookup_id(struct idmap_hashtable *h, __u32 id)
6184 + static inline struct idmap_hashent *
6185 + idmap_alloc_name(struct idmap_hashtable *h, char *name, size_t len)
6186 + {
6187 ++ idmap_alloc_hashtable(h);
6188 + return idmap_name_hash(h, name, len);
6189 + }
6190 +
6191 + static inline struct idmap_hashent *
6192 + idmap_alloc_id(struct idmap_hashtable *h, __u32 id)
6193 + {
6194 ++ idmap_alloc_hashtable(h);
6195 + return idmap_id_hash(h, id);
6196 + }
6197 +
6198 +@@ -449,9 +484,14 @@ static void
6199 + idmap_update_entry(struct idmap_hashent *he, const char *name,
6200 + size_t namelen, __u32 id)
6201 + {
6202 ++ char *str = kmalloc(namelen + 1, GFP_KERNEL);
6203 ++ if (str == NULL)
6204 ++ return;
6205 ++ kfree(he->ih_name);
6206 + he->ih_id = id;
6207 +- memcpy(he->ih_name, name, namelen);
6208 +- he->ih_name[namelen] = '\0';
6209 ++ memcpy(str, name, namelen);
6210 ++ str[namelen] = '\0';
6211 ++ he->ih_name = str;
6212 + he->ih_namelen = namelen;
6213 + he->ih_expires = jiffies + nfs_idmap_cache_timeout;
6214 + }
6215 +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
6216 +index 66020ac..07354b7 100644
6217 +--- a/fs/nfs/nfs4state.c
6218 ++++ b/fs/nfs/nfs4state.c
6219 +@@ -1186,8 +1186,9 @@ restart:
6220 + spin_lock(&state->state_lock);
6221 + list_for_each_entry(lock, &state->lock_states, ls_locks) {
6222 + if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
6223 +- printk("%s: Lock reclaim failed!\n",
6224 +- __func__);
6225 ++ pr_warn_ratelimited("NFS: "
6226 ++ "%s: Lock reclaim "
6227 ++ "failed!\n", __func__);
6228 + }
6229 + spin_unlock(&state->state_lock);
6230 + nfs4_put_open_state(state);
6231 +diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
6232 +index 55d0128..a03ee52 100644
6233 +--- a/fs/nfs/objlayout/objio_osd.c
6234 ++++ b/fs/nfs/objlayout/objio_osd.c
6235 +@@ -433,7 +433,10 @@ int objio_read_pagelist(struct nfs_read_data *rdata)
6236 + objios->ios->done = _read_done;
6237 + dprintk("%s: offset=0x%llx length=0x%x\n", __func__,
6238 + rdata->args.offset, rdata->args.count);
6239 +- return ore_read(objios->ios);
6240 ++ ret = ore_read(objios->ios);
6241 ++ if (unlikely(ret))
6242 ++ objio_free_result(&objios->oir);
6243 ++ return ret;
6244 + }
6245 +
6246 + /*
6247 +@@ -464,8 +467,16 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
6248 + struct objio_state *objios = priv;
6249 + struct nfs_write_data *wdata = objios->oir.rpcdata;
6250 + pgoff_t index = offset / PAGE_SIZE;
6251 +- struct page *page = find_get_page(wdata->inode->i_mapping, index);
6252 ++ struct page *page;
6253 ++ loff_t i_size = i_size_read(wdata->inode);
6254 ++
6255 ++ if (offset >= i_size) {
6256 ++ *uptodate = true;
6257 ++ dprintk("%s: g_zero_page index=0x%lx\n", __func__, index);
6258 ++ return ZERO_PAGE(0);
6259 ++ }
6260 +
6261 ++ page = find_get_page(wdata->inode->i_mapping, index);
6262 + if (!page) {
6263 + page = find_or_create_page(wdata->inode->i_mapping,
6264 + index, GFP_NOFS);
6265 +@@ -486,8 +497,10 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
6266 +
6267 + static void __r4w_put_page(void *priv, struct page *page)
6268 + {
6269 +- dprintk("%s: index=0x%lx\n", __func__, page->index);
6270 +- page_cache_release(page);
6271 ++ dprintk("%s: index=0x%lx\n", __func__,
6272 ++ (page == ZERO_PAGE(0)) ? -1UL : page->index);
6273 ++ if (ZERO_PAGE(0) != page)
6274 ++ page_cache_release(page);
6275 + return;
6276 + }
6277 +
6278 +@@ -517,8 +530,10 @@ int objio_write_pagelist(struct nfs_write_data *wdata, int how)
6279 + dprintk("%s: offset=0x%llx length=0x%x\n", __func__,
6280 + wdata->args.offset, wdata->args.count);
6281 + ret = ore_write(objios->ios);
6282 +- if (unlikely(ret))
6283 ++ if (unlikely(ret)) {
6284 ++ objio_free_result(&objios->oir);
6285 + return ret;
6286 ++ }
6287 +
6288 + if (objios->sync)
6289 + _write_done(objios->ios, objios);
6290 +diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
6291 +index 07ee5b4..1c7d45e 100644
6292 +--- a/fs/ocfs2/file.c
6293 ++++ b/fs/ocfs2/file.c
6294 +@@ -1950,7 +1950,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
6295 + if (ret < 0)
6296 + mlog_errno(ret);
6297 +
6298 +- if (file->f_flags & O_SYNC)
6299 ++ if (file && (file->f_flags & O_SYNC))
6300 + handle->h_sync = 1;
6301 +
6302 + ocfs2_commit_trans(osb, handle);
6303 +diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
6304 +index fbb0b47..d5378d0 100644
6305 +--- a/fs/ramfs/file-nommu.c
6306 ++++ b/fs/ramfs/file-nommu.c
6307 +@@ -110,6 +110,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
6308 +
6309 + /* prevent the page from being discarded on memory pressure */
6310 + SetPageDirty(page);
6311 ++ SetPageUptodate(page);
6312 +
6313 + unlock_page(page);
6314 + put_page(page);
6315 +diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c
6316 +index 6094c5a..b73ecd8 100644
6317 +--- a/fs/ubifs/sb.c
6318 ++++ b/fs/ubifs/sb.c
6319 +@@ -715,8 +715,12 @@ static int fixup_free_space(struct ubifs_info *c)
6320 + lnum = ubifs_next_log_lnum(c, lnum);
6321 + }
6322 +
6323 +- /* Fixup the current log head */
6324 +- err = fixup_leb(c, c->lhead_lnum, c->lhead_offs);
6325 ++ /*
6326 ++ * Fixup the log head which contains the only a CS node at the
6327 ++ * beginning.
6328 ++ */
6329 ++ err = fixup_leb(c, c->lhead_lnum,
6330 ++ ALIGN(UBIFS_CS_NODE_SZ, c->min_io_size));
6331 + if (err)
6332 + goto out;
6333 +
6334 +diff --git a/include/linux/Kbuild b/include/linux/Kbuild
6335 +index bd21ecd..a3ce901 100644
6336 +--- a/include/linux/Kbuild
6337 ++++ b/include/linux/Kbuild
6338 +@@ -268,6 +268,7 @@ header-y += netfilter_ipv4.h
6339 + header-y += netfilter_ipv6.h
6340 + header-y += netlink.h
6341 + header-y += netrom.h
6342 ++header-y += nfc.h
6343 + header-y += nfs.h
6344 + header-y += nfs2.h
6345 + header-y += nfs3.h
6346 +diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
6347 +index fd0dc30..cc07d27 100644
6348 +--- a/include/linux/hrtimer.h
6349 ++++ b/include/linux/hrtimer.h
6350 +@@ -165,6 +165,7 @@ enum hrtimer_base_type {
6351 + * @lock: lock protecting the base and associated clock bases
6352 + * and timers
6353 + * @active_bases: Bitfield to mark bases with active timers
6354 ++ * @clock_was_set: Indicates that clock was set from irq context.
6355 + * @expires_next: absolute time of the next event which was scheduled
6356 + * via clock_set_next_event()
6357 + * @hres_active: State of high resolution mode
6358 +@@ -177,7 +178,8 @@ enum hrtimer_base_type {
6359 + */
6360 + struct hrtimer_cpu_base {
6361 + raw_spinlock_t lock;
6362 +- unsigned long active_bases;
6363 ++ unsigned int active_bases;
6364 ++ unsigned int clock_was_set;
6365 + #ifdef CONFIG_HIGH_RES_TIMERS
6366 + ktime_t expires_next;
6367 + int hres_active;
6368 +@@ -286,6 +288,8 @@ extern void hrtimer_peek_ahead_timers(void);
6369 + # define MONOTONIC_RES_NSEC HIGH_RES_NSEC
6370 + # define KTIME_MONOTONIC_RES KTIME_HIGH_RES
6371 +
6372 ++extern void clock_was_set_delayed(void);
6373 ++
6374 + #else
6375 +
6376 + # define MONOTONIC_RES_NSEC LOW_RES_NSEC
6377 +@@ -306,6 +310,9 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
6378 + {
6379 + return 0;
6380 + }
6381 ++
6382 ++static inline void clock_was_set_delayed(void) { }
6383 ++
6384 + #endif
6385 +
6386 + extern void clock_was_set(void);
6387 +@@ -320,6 +327,7 @@ extern ktime_t ktime_get(void);
6388 + extern ktime_t ktime_get_real(void);
6389 + extern ktime_t ktime_get_boottime(void);
6390 + extern ktime_t ktime_get_monotonic_offset(void);
6391 ++extern ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot);
6392 +
6393 + DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
6394 +
6395 +diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
6396 +index d9d6c86..c5ed2f1 100644
6397 +--- a/include/linux/hugetlb.h
6398 ++++ b/include/linux/hugetlb.h
6399 +@@ -14,6 +14,15 @@ struct user_struct;
6400 + #include <linux/shm.h>
6401 + #include <asm/tlbflush.h>
6402 +
6403 ++struct hugepage_subpool {
6404 ++ spinlock_t lock;
6405 ++ long count;
6406 ++ long max_hpages, used_hpages;
6407 ++};
6408 ++
6409 ++struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
6410 ++void hugepage_put_subpool(struct hugepage_subpool *spool);
6411 ++
6412 + int PageHuge(struct page *page);
6413 +
6414 + void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
6415 +@@ -138,12 +147,11 @@ struct hugetlbfs_config {
6416 + };
6417 +
6418 + struct hugetlbfs_sb_info {
6419 +- long max_blocks; /* blocks allowed */
6420 +- long free_blocks; /* blocks free */
6421 + long max_inodes; /* inodes allowed */
6422 + long free_inodes; /* inodes free */
6423 + spinlock_t stat_lock;
6424 + struct hstate *hstate;
6425 ++ struct hugepage_subpool *spool;
6426 + };
6427 +
6428 +
6429 +@@ -166,8 +174,6 @@ extern const struct file_operations hugetlbfs_file_operations;
6430 + extern const struct vm_operations_struct hugetlb_vm_ops;
6431 + struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
6432 + struct user_struct **user, int creat_flags);
6433 +-int hugetlb_get_quota(struct address_space *mapping, long delta);
6434 +-void hugetlb_put_quota(struct address_space *mapping, long delta);
6435 +
6436 + static inline int is_file_hugepages(struct file *file)
6437 + {
6438 +diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
6439 +index 188cb2f..905b1e1 100644
6440 +--- a/include/linux/mmzone.h
6441 ++++ b/include/linux/mmzone.h
6442 +@@ -652,7 +652,7 @@ typedef struct pglist_data {
6443 + range, including holes */
6444 + int node_id;
6445 + wait_queue_head_t kswapd_wait;
6446 +- struct task_struct *kswapd;
6447 ++ struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */
6448 + int kswapd_max_order;
6449 + enum zone_type classzone_idx;
6450 + } pg_data_t;
6451 +diff --git a/include/linux/pci.h b/include/linux/pci.h
6452 +index c0cfa0d..7cda65b 100644
6453 +--- a/include/linux/pci.h
6454 ++++ b/include/linux/pci.h
6455 +@@ -176,8 +176,6 @@ enum pci_dev_flags {
6456 + PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2,
6457 + /* Provide indication device is assigned by a Virtual Machine Manager */
6458 + PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4,
6459 +- /* Device causes system crash if in D3 during S3 sleep */
6460 +- PCI_DEV_FLAGS_NO_D3_DURING_SLEEP = (__force pci_dev_flags_t) 8,
6461 + };
6462 +
6463 + enum pci_irq_reroute_variant {
6464 +diff --git a/include/linux/sched.h b/include/linux/sched.h
6465 +index 1c4f3e9..5afa2a3 100644
6466 +--- a/include/linux/sched.h
6467 ++++ b/include/linux/sched.h
6468 +@@ -1892,6 +1892,14 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
6469 + }
6470 + #endif
6471 +
6472 ++#ifdef CONFIG_NO_HZ
6473 ++void calc_load_enter_idle(void);
6474 ++void calc_load_exit_idle(void);
6475 ++#else
6476 ++static inline void calc_load_enter_idle(void) { }
6477 ++static inline void calc_load_exit_idle(void) { }
6478 ++#endif /* CONFIG_NO_HZ */
6479 ++
6480 + #ifndef CONFIG_CPUMASK_OFFSTACK
6481 + static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
6482 + {
6483 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
6484 +index bdb4590..53dc7e7 100644
6485 +--- a/include/linux/skbuff.h
6486 ++++ b/include/linux/skbuff.h
6487 +@@ -213,11 +213,8 @@ enum {
6488 + /* device driver is going to provide hardware time stamp */
6489 + SKBTX_IN_PROGRESS = 1 << 2,
6490 +
6491 +- /* ensure the originating sk reference is available on driver level */
6492 +- SKBTX_DRV_NEEDS_SK_REF = 1 << 3,
6493 +-
6494 + /* device driver supports TX zero-copy buffers */
6495 +- SKBTX_DEV_ZEROCOPY = 1 << 4,
6496 ++ SKBTX_DEV_ZEROCOPY = 1 << 3,
6497 + };
6498 +
6499 + /*
6500 +diff --git a/include/linux/timex.h b/include/linux/timex.h
6501 +index aa60fe7..08e90fb 100644
6502 +--- a/include/linux/timex.h
6503 ++++ b/include/linux/timex.h
6504 +@@ -266,7 +266,7 @@ static inline int ntp_synced(void)
6505 + /* Returns how long ticks are at present, in ns / 2^NTP_SCALE_SHIFT. */
6506 + extern u64 tick_length;
6507 +
6508 +-extern void second_overflow(void);
6509 ++extern int second_overflow(unsigned long secs);
6510 + extern void update_ntp_one_tick(void);
6511 + extern int do_adjtimex(struct timex *);
6512 + extern void hardpps(const struct timespec *, const struct timespec *);
6513 +diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
6514 +index 6a308d4..1e100c6 100644
6515 +--- a/include/scsi/libsas.h
6516 ++++ b/include/scsi/libsas.h
6517 +@@ -159,6 +159,8 @@ enum ata_command_set {
6518 + ATAPI_COMMAND_SET = 1,
6519 + };
6520 +
6521 ++#define ATA_RESP_FIS_SIZE 24
6522 ++
6523 + struct sata_device {
6524 + enum ata_command_set command_set;
6525 + struct smp_resp rps_resp; /* report_phy_sata_resp */
6526 +@@ -170,7 +172,7 @@ struct sata_device {
6527 +
6528 + struct ata_port *ap;
6529 + struct ata_host ata_host;
6530 +- struct ata_taskfile tf;
6531 ++ u8 fis[ATA_RESP_FIS_SIZE];
6532 + u32 sstatus;
6533 + u32 serror;
6534 + u32 scontrol;
6535 +@@ -486,7 +488,7 @@ enum exec_status {
6536 + */
6537 + struct ata_task_resp {
6538 + u16 frame_len;
6539 +- u8 ending_fis[24]; /* dev to host or data-in */
6540 ++ u8 ending_fis[ATA_RESP_FIS_SIZE]; /* dev to host or data-in */
6541 + u32 sstatus;
6542 + u32 serror;
6543 + u32 scontrol;
6544 +diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
6545 +index ae34bf5..6db7a5e 100644
6546 +--- a/kernel/hrtimer.c
6547 ++++ b/kernel/hrtimer.c
6548 +@@ -657,6 +657,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
6549 + return 0;
6550 + }
6551 +
6552 ++static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
6553 ++{
6554 ++ ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
6555 ++ ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
6556 ++
6557 ++ return ktime_get_update_offsets(offs_real, offs_boot);
6558 ++}
6559 ++
6560 + /*
6561 + * Retrigger next event is called after clock was set
6562 + *
6563 +@@ -665,22 +673,12 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
6564 + static void retrigger_next_event(void *arg)
6565 + {
6566 + struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
6567 +- struct timespec realtime_offset, xtim, wtm, sleep;
6568 +
6569 + if (!hrtimer_hres_active())
6570 + return;
6571 +
6572 +- /* Optimized out for !HIGH_RES */
6573 +- get_xtime_and_monotonic_and_sleep_offset(&xtim, &wtm, &sleep);
6574 +- set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec);
6575 +-
6576 +- /* Adjust CLOCK_REALTIME offset */
6577 + raw_spin_lock(&base->lock);
6578 +- base->clock_base[HRTIMER_BASE_REALTIME].offset =
6579 +- timespec_to_ktime(realtime_offset);
6580 +- base->clock_base[HRTIMER_BASE_BOOTTIME].offset =
6581 +- timespec_to_ktime(sleep);
6582 +-
6583 ++ hrtimer_update_base(base);
6584 + hrtimer_force_reprogram(base, 0);
6585 + raw_spin_unlock(&base->lock);
6586 + }
6587 +@@ -710,13 +708,25 @@ static int hrtimer_switch_to_hres(void)
6588 + base->clock_base[i].resolution = KTIME_HIGH_RES;
6589 +
6590 + tick_setup_sched_timer();
6591 +-
6592 + /* "Retrigger" the interrupt to get things going */
6593 + retrigger_next_event(NULL);
6594 + local_irq_restore(flags);
6595 + return 1;
6596 + }
6597 +
6598 ++/*
6599 ++ * Called from timekeeping code to reprogramm the hrtimer interrupt
6600 ++ * device. If called from the timer interrupt context we defer it to
6601 ++ * softirq context.
6602 ++ */
6603 ++void clock_was_set_delayed(void)
6604 ++{
6605 ++ struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
6606 ++
6607 ++ cpu_base->clock_was_set = 1;
6608 ++ __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
6609 ++}
6610 ++
6611 + #else
6612 +
6613 + static inline int hrtimer_hres_active(void) { return 0; }
6614 +@@ -1250,11 +1260,10 @@ void hrtimer_interrupt(struct clock_event_device *dev)
6615 + cpu_base->nr_events++;
6616 + dev->next_event.tv64 = KTIME_MAX;
6617 +
6618 +- entry_time = now = ktime_get();
6619 ++ raw_spin_lock(&cpu_base->lock);
6620 ++ entry_time = now = hrtimer_update_base(cpu_base);
6621 + retry:
6622 + expires_next.tv64 = KTIME_MAX;
6623 +-
6624 +- raw_spin_lock(&cpu_base->lock);
6625 + /*
6626 + * We set expires_next to KTIME_MAX here with cpu_base->lock
6627 + * held to prevent that a timer is enqueued in our queue via
6628 +@@ -1330,8 +1339,12 @@ retry:
6629 + * We need to prevent that we loop forever in the hrtimer
6630 + * interrupt routine. We give it 3 attempts to avoid
6631 + * overreacting on some spurious event.
6632 ++ *
6633 ++ * Acquire base lock for updating the offsets and retrieving
6634 ++ * the current time.
6635 + */
6636 +- now = ktime_get();
6637 ++ raw_spin_lock(&cpu_base->lock);
6638 ++ now = hrtimer_update_base(cpu_base);
6639 + cpu_base->nr_retries++;
6640 + if (++retries < 3)
6641 + goto retry;
6642 +@@ -1343,6 +1356,7 @@ retry:
6643 + */
6644 + cpu_base->nr_hangs++;
6645 + cpu_base->hang_detected = 1;
6646 ++ raw_spin_unlock(&cpu_base->lock);
6647 + delta = ktime_sub(now, entry_time);
6648 + if (delta.tv64 > cpu_base->max_hang_time.tv64)
6649 + cpu_base->max_hang_time = delta;
6650 +@@ -1395,6 +1409,13 @@ void hrtimer_peek_ahead_timers(void)
6651 +
6652 + static void run_hrtimer_softirq(struct softirq_action *h)
6653 + {
6654 ++ struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
6655 ++
6656 ++ if (cpu_base->clock_was_set) {
6657 ++ cpu_base->clock_was_set = 0;
6658 ++ clock_was_set();
6659 ++ }
6660 ++
6661 + hrtimer_peek_ahead_timers();
6662 + }
6663 +
6664 +diff --git a/kernel/power/swap.c b/kernel/power/swap.c
6665 +index b313086..64f8f97 100644
6666 +--- a/kernel/power/swap.c
6667 ++++ b/kernel/power/swap.c
6668 +@@ -6,7 +6,7 @@
6669 + *
6670 + * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@×××.cz>
6671 + * Copyright (C) 2006 Rafael J. Wysocki <rjw@××××.pl>
6672 +- * Copyright (C) 2010 Bojan Smojver <bojan@×××××××××.com>
6673 ++ * Copyright (C) 2010-2012 Bojan Smojver <bojan@×××××××××.com>
6674 + *
6675 + * This file is released under the GPLv2.
6676 + *
6677 +@@ -283,14 +283,17 @@ static int write_page(void *buf, sector_t offset, struct bio **bio_chain)
6678 + return -ENOSPC;
6679 +
6680 + if (bio_chain) {
6681 +- src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
6682 ++ src = (void *)__get_free_page(__GFP_WAIT | __GFP_NOWARN |
6683 ++ __GFP_NORETRY);
6684 + if (src) {
6685 + copy_page(src, buf);
6686 + } else {
6687 + ret = hib_wait_on_bio_chain(bio_chain); /* Free pages */
6688 + if (ret)
6689 + return ret;
6690 +- src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
6691 ++ src = (void *)__get_free_page(__GFP_WAIT |
6692 ++ __GFP_NOWARN |
6693 ++ __GFP_NORETRY);
6694 + if (src) {
6695 + copy_page(src, buf);
6696 + } else {
6697 +@@ -368,12 +371,17 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf,
6698 + clear_page(handle->cur);
6699 + handle->cur_swap = offset;
6700 + handle->k = 0;
6701 +- }
6702 +- if (bio_chain && low_free_pages() <= handle->reqd_free_pages) {
6703 +- error = hib_wait_on_bio_chain(bio_chain);
6704 +- if (error)
6705 +- goto out;
6706 +- handle->reqd_free_pages = reqd_free_pages();
6707 ++
6708 ++ if (bio_chain && low_free_pages() <= handle->reqd_free_pages) {
6709 ++ error = hib_wait_on_bio_chain(bio_chain);
6710 ++ if (error)
6711 ++ goto out;
6712 ++ /*
6713 ++ * Recalculate the number of required free pages, to
6714 ++ * make sure we never take more than half.
6715 ++ */
6716 ++ handle->reqd_free_pages = reqd_free_pages();
6717 ++ }
6718 + }
6719 + out:
6720 + return error;
6721 +@@ -420,8 +428,9 @@ static int swap_writer_finish(struct swap_map_handle *handle,
6722 + /* Maximum number of threads for compression/decompression. */
6723 + #define LZO_THREADS 3
6724 +
6725 +-/* Maximum number of pages for read buffering. */
6726 +-#define LZO_READ_PAGES (MAP_PAGE_ENTRIES * 8)
6727 ++/* Minimum/maximum number of pages for read buffering. */
6728 ++#define LZO_MIN_RD_PAGES 1024
6729 ++#define LZO_MAX_RD_PAGES 8192
6730 +
6731 +
6732 + /**
6733 +@@ -632,12 +641,6 @@ static int save_image_lzo(struct swap_map_handle *handle,
6734 + }
6735 +
6736 + /*
6737 +- * Adjust number of free pages after all allocations have been done.
6738 +- * We don't want to run out of pages when writing.
6739 +- */
6740 +- handle->reqd_free_pages = reqd_free_pages();
6741 +-
6742 +- /*
6743 + * Start the CRC32 thread.
6744 + */
6745 + init_waitqueue_head(&crc->go);
6746 +@@ -658,6 +661,12 @@ static int save_image_lzo(struct swap_map_handle *handle,
6747 + goto out_clean;
6748 + }
6749 +
6750 ++ /*
6751 ++ * Adjust the number of required free pages after all allocations have
6752 ++ * been done. We don't want to run out of pages when writing.
6753 ++ */
6754 ++ handle->reqd_free_pages = reqd_free_pages();
6755 ++
6756 + printk(KERN_INFO
6757 + "PM: Using %u thread(s) for compression.\n"
6758 + "PM: Compressing and saving image data (%u pages) ... ",
6759 +@@ -1067,7 +1076,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
6760 + unsigned i, thr, run_threads, nr_threads;
6761 + unsigned ring = 0, pg = 0, ring_size = 0,
6762 + have = 0, want, need, asked = 0;
6763 +- unsigned long read_pages;
6764 ++ unsigned long read_pages = 0;
6765 + unsigned char **page = NULL;
6766 + struct dec_data *data = NULL;
6767 + struct crc_data *crc = NULL;
6768 +@@ -1079,7 +1088,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
6769 + nr_threads = num_online_cpus() - 1;
6770 + nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
6771 +
6772 +- page = vmalloc(sizeof(*page) * LZO_READ_PAGES);
6773 ++ page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES);
6774 + if (!page) {
6775 + printk(KERN_ERR "PM: Failed to allocate LZO page\n");
6776 + ret = -ENOMEM;
6777 +@@ -1144,15 +1153,22 @@ static int load_image_lzo(struct swap_map_handle *handle,
6778 + }
6779 +
6780 + /*
6781 +- * Adjust number of pages for read buffering, in case we are short.
6782 ++ * Set the number of pages for read buffering.
6783 ++ * This is complete guesswork, because we'll only know the real
6784 ++ * picture once prepare_image() is called, which is much later on
6785 ++ * during the image load phase. We'll assume the worst case and
6786 ++ * say that none of the image pages are from high memory.
6787 + */
6788 +- read_pages = (nr_free_pages() - snapshot_get_image_size()) >> 1;
6789 +- read_pages = clamp_val(read_pages, LZO_CMP_PAGES, LZO_READ_PAGES);
6790 ++ if (low_free_pages() > snapshot_get_image_size())
6791 ++ read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
6792 ++ read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
6793 +
6794 + for (i = 0; i < read_pages; i++) {
6795 + page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
6796 + __GFP_WAIT | __GFP_HIGH :
6797 +- __GFP_WAIT);
6798 ++ __GFP_WAIT | __GFP_NOWARN |
6799 ++ __GFP_NORETRY);
6800 ++
6801 + if (!page[i]) {
6802 + if (i < LZO_CMP_PAGES) {
6803 + ring_size = i;
6804 +diff --git a/kernel/sched.c b/kernel/sched.c
6805 +index 576a27f..52ac69b 100644
6806 +--- a/kernel/sched.c
6807 ++++ b/kernel/sched.c
6808 +@@ -1885,7 +1885,6 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
6809 +
6810 + #endif
6811 +
6812 +-static void calc_load_account_idle(struct rq *this_rq);
6813 + static void update_sysctl(void);
6814 + static int get_update_sysctl_factor(void);
6815 + static void update_cpu_load(struct rq *this_rq);
6816 +@@ -3401,11 +3400,73 @@ unsigned long this_cpu_load(void)
6817 + }
6818 +
6819 +
6820 ++/*
6821 ++ * Global load-average calculations
6822 ++ *
6823 ++ * We take a distributed and async approach to calculating the global load-avg
6824 ++ * in order to minimize overhead.
6825 ++ *
6826 ++ * The global load average is an exponentially decaying average of nr_running +
6827 ++ * nr_uninterruptible.
6828 ++ *
6829 ++ * Once every LOAD_FREQ:
6830 ++ *
6831 ++ * nr_active = 0;
6832 ++ * for_each_possible_cpu(cpu)
6833 ++ * nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible;
6834 ++ *
6835 ++ * avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n)
6836 ++ *
6837 ++ * Due to a number of reasons the above turns in the mess below:
6838 ++ *
6839 ++ * - for_each_possible_cpu() is prohibitively expensive on machines with
6840 ++ * serious number of cpus, therefore we need to take a distributed approach
6841 ++ * to calculating nr_active.
6842 ++ *
6843 ++ * \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0
6844 ++ * = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) }
6845 ++ *
6846 ++ * So assuming nr_active := 0 when we start out -- true per definition, we
6847 ++ * can simply take per-cpu deltas and fold those into a global accumulate
6848 ++ * to obtain the same result. See calc_load_fold_active().
6849 ++ *
6850 ++ * Furthermore, in order to avoid synchronizing all per-cpu delta folding
6851 ++ * across the machine, we assume 10 ticks is sufficient time for every
6852 ++ * cpu to have completed this task.
6853 ++ *
6854 ++ * This places an upper-bound on the IRQ-off latency of the machine. Then
6855 ++ * again, being late doesn't loose the delta, just wrecks the sample.
6856 ++ *
6857 ++ * - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because
6858 ++ * this would add another cross-cpu cacheline miss and atomic operation
6859 ++ * to the wakeup path. Instead we increment on whatever cpu the task ran
6860 ++ * when it went into uninterruptible state and decrement on whatever cpu
6861 ++ * did the wakeup. This means that only the sum of nr_uninterruptible over
6862 ++ * all cpus yields the correct result.
6863 ++ *
6864 ++ * This covers the NO_HZ=n code, for extra head-aches, see the comment below.
6865 ++ */
6866 ++
6867 + /* Variables and functions for calc_load */
6868 + static atomic_long_t calc_load_tasks;
6869 + static unsigned long calc_load_update;
6870 + unsigned long avenrun[3];
6871 +-EXPORT_SYMBOL(avenrun);
6872 ++EXPORT_SYMBOL(avenrun); /* should be removed */
6873 ++
6874 ++/**
6875 ++ * get_avenrun - get the load average array
6876 ++ * @loads: pointer to dest load array
6877 ++ * @offset: offset to add
6878 ++ * @shift: shift count to shift the result left
6879 ++ *
6880 ++ * These values are estimates at best, so no need for locking.
6881 ++ */
6882 ++void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
6883 ++{
6884 ++ loads[0] = (avenrun[0] + offset) << shift;
6885 ++ loads[1] = (avenrun[1] + offset) << shift;
6886 ++ loads[2] = (avenrun[2] + offset) << shift;
6887 ++}
6888 +
6889 + static long calc_load_fold_active(struct rq *this_rq)
6890 + {
6891 +@@ -3422,6 +3483,9 @@ static long calc_load_fold_active(struct rq *this_rq)
6892 + return delta;
6893 + }
6894 +
6895 ++/*
6896 ++ * a1 = a0 * e + a * (1 - e)
6897 ++ */
6898 + static unsigned long
6899 + calc_load(unsigned long load, unsigned long exp, unsigned long active)
6900 + {
6901 +@@ -3433,30 +3497,118 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
6902 +
6903 + #ifdef CONFIG_NO_HZ
6904 + /*
6905 +- * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
6906 ++ * Handle NO_HZ for the global load-average.
6907 ++ *
6908 ++ * Since the above described distributed algorithm to compute the global
6909 ++ * load-average relies on per-cpu sampling from the tick, it is affected by
6910 ++ * NO_HZ.
6911 ++ *
6912 ++ * The basic idea is to fold the nr_active delta into a global idle-delta upon
6913 ++ * entering NO_HZ state such that we can include this as an 'extra' cpu delta
6914 ++ * when we read the global state.
6915 ++ *
6916 ++ * Obviously reality has to ruin such a delightfully simple scheme:
6917 ++ *
6918 ++ * - When we go NO_HZ idle during the window, we can negate our sample
6919 ++ * contribution, causing under-accounting.
6920 ++ *
6921 ++ * We avoid this by keeping two idle-delta counters and flipping them
6922 ++ * when the window starts, thus separating old and new NO_HZ load.
6923 ++ *
6924 ++ * The only trick is the slight shift in index flip for read vs write.
6925 ++ *
6926 ++ * 0s 5s 10s 15s
6927 ++ * +10 +10 +10 +10
6928 ++ * |-|-----------|-|-----------|-|-----------|-|
6929 ++ * r:0 0 1 1 0 0 1 1 0
6930 ++ * w:0 1 1 0 0 1 1 0 0
6931 ++ *
6932 ++ * This ensures we'll fold the old idle contribution in this window while
6933 ++ * accumlating the new one.
6934 ++ *
6935 ++ * - When we wake up from NO_HZ idle during the window, we push up our
6936 ++ * contribution, since we effectively move our sample point to a known
6937 ++ * busy state.
6938 ++ *
6939 ++ * This is solved by pushing the window forward, and thus skipping the
6940 ++ * sample, for this cpu (effectively using the idle-delta for this cpu which
6941 ++ * was in effect at the time the window opened). This also solves the issue
6942 ++ * of having to deal with a cpu having been in NOHZ idle for multiple
6943 ++ * LOAD_FREQ intervals.
6944 + *
6945 + * When making the ILB scale, we should try to pull this in as well.
6946 + */
6947 +-static atomic_long_t calc_load_tasks_idle;
6948 ++static atomic_long_t calc_load_idle[2];
6949 ++static int calc_load_idx;
6950 +
6951 +-static void calc_load_account_idle(struct rq *this_rq)
6952 ++static inline int calc_load_write_idx(void)
6953 + {
6954 ++ int idx = calc_load_idx;
6955 ++
6956 ++ /*
6957 ++ * See calc_global_nohz(), if we observe the new index, we also
6958 ++ * need to observe the new update time.
6959 ++ */
6960 ++ smp_rmb();
6961 ++
6962 ++ /*
6963 ++ * If the folding window started, make sure we start writing in the
6964 ++ * next idle-delta.
6965 ++ */
6966 ++ if (!time_before(jiffies, calc_load_update))
6967 ++ idx++;
6968 ++
6969 ++ return idx & 1;
6970 ++}
6971 ++
6972 ++static inline int calc_load_read_idx(void)
6973 ++{
6974 ++ return calc_load_idx & 1;
6975 ++}
6976 ++
6977 ++void calc_load_enter_idle(void)
6978 ++{
6979 ++ struct rq *this_rq = this_rq();
6980 + long delta;
6981 +
6982 ++ /*
6983 ++ * We're going into NOHZ mode, if there's any pending delta, fold it
6984 ++ * into the pending idle delta.
6985 ++ */
6986 + delta = calc_load_fold_active(this_rq);
6987 +- if (delta)
6988 +- atomic_long_add(delta, &calc_load_tasks_idle);
6989 ++ if (delta) {
6990 ++ int idx = calc_load_write_idx();
6991 ++ atomic_long_add(delta, &calc_load_idle[idx]);
6992 ++ }
6993 + }
6994 +
6995 +-static long calc_load_fold_idle(void)
6996 ++void calc_load_exit_idle(void)
6997 + {
6998 +- long delta = 0;
6999 ++ struct rq *this_rq = this_rq();
7000 ++
7001 ++ /*
7002 ++ * If we're still before the sample window, we're done.
7003 ++ */
7004 ++ if (time_before(jiffies, this_rq->calc_load_update))
7005 ++ return;
7006 +
7007 + /*
7008 +- * Its got a race, we don't care...
7009 ++ * We woke inside or after the sample window, this means we're already
7010 ++ * accounted through the nohz accounting, so skip the entire deal and
7011 ++ * sync up for the next window.
7012 + */
7013 +- if (atomic_long_read(&calc_load_tasks_idle))
7014 +- delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
7015 ++ this_rq->calc_load_update = calc_load_update;
7016 ++ if (time_before(jiffies, this_rq->calc_load_update + 10))
7017 ++ this_rq->calc_load_update += LOAD_FREQ;
7018 ++}
7019 ++
7020 ++static long calc_load_fold_idle(void)
7021 ++{
7022 ++ int idx = calc_load_read_idx();
7023 ++ long delta = 0;
7024 ++
7025 ++ if (atomic_long_read(&calc_load_idle[idx]))
7026 ++ delta = atomic_long_xchg(&calc_load_idle[idx], 0);
7027 +
7028 + return delta;
7029 + }
7030 +@@ -3542,66 +3694,39 @@ static void calc_global_nohz(void)
7031 + {
7032 + long delta, active, n;
7033 +
7034 +- /*
7035 +- * If we crossed a calc_load_update boundary, make sure to fold
7036 +- * any pending idle changes, the respective CPUs might have
7037 +- * missed the tick driven calc_load_account_active() update
7038 +- * due to NO_HZ.
7039 +- */
7040 +- delta = calc_load_fold_idle();
7041 +- if (delta)
7042 +- atomic_long_add(delta, &calc_load_tasks);
7043 +-
7044 +- /*
7045 +- * It could be the one fold was all it took, we done!
7046 +- */
7047 +- if (time_before(jiffies, calc_load_update + 10))
7048 +- return;
7049 +-
7050 +- /*
7051 +- * Catch-up, fold however many we are behind still
7052 +- */
7053 +- delta = jiffies - calc_load_update - 10;
7054 +- n = 1 + (delta / LOAD_FREQ);
7055 ++ if (!time_before(jiffies, calc_load_update + 10)) {
7056 ++ /*
7057 ++ * Catch-up, fold however many we are behind still
7058 ++ */
7059 ++ delta = jiffies - calc_load_update - 10;
7060 ++ n = 1 + (delta / LOAD_FREQ);
7061 +
7062 +- active = atomic_long_read(&calc_load_tasks);
7063 +- active = active > 0 ? active * FIXED_1 : 0;
7064 ++ active = atomic_long_read(&calc_load_tasks);
7065 ++ active = active > 0 ? active * FIXED_1 : 0;
7066 +
7067 +- avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
7068 +- avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
7069 +- avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
7070 ++ avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
7071 ++ avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
7072 ++ avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
7073 +
7074 +- calc_load_update += n * LOAD_FREQ;
7075 +-}
7076 +-#else
7077 +-static void calc_load_account_idle(struct rq *this_rq)
7078 +-{
7079 +-}
7080 ++ calc_load_update += n * LOAD_FREQ;
7081 ++ }
7082 +
7083 +-static inline long calc_load_fold_idle(void)
7084 +-{
7085 +- return 0;
7086 ++ /*
7087 ++ * Flip the idle index...
7088 ++ *
7089 ++ * Make sure we first write the new time then flip the index, so that
7090 ++ * calc_load_write_idx() will see the new time when it reads the new
7091 ++ * index, this avoids a double flip messing things up.
7092 ++ */
7093 ++ smp_wmb();
7094 ++ calc_load_idx++;
7095 + }
7096 ++#else /* !CONFIG_NO_HZ */
7097 +
7098 +-static void calc_global_nohz(void)
7099 +-{
7100 +-}
7101 +-#endif
7102 ++static inline long calc_load_fold_idle(void) { return 0; }
7103 ++static inline void calc_global_nohz(void) { }
7104 +
7105 +-/**
7106 +- * get_avenrun - get the load average array
7107 +- * @loads: pointer to dest load array
7108 +- * @offset: offset to add
7109 +- * @shift: shift count to shift the result left
7110 +- *
7111 +- * These values are estimates at best, so no need for locking.
7112 +- */
7113 +-void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
7114 +-{
7115 +- loads[0] = (avenrun[0] + offset) << shift;
7116 +- loads[1] = (avenrun[1] + offset) << shift;
7117 +- loads[2] = (avenrun[2] + offset) << shift;
7118 +-}
7119 ++#endif /* CONFIG_NO_HZ */
7120 +
7121 + /*
7122 + * calc_load - update the avenrun load estimates 10 ticks after the
7123 +@@ -3609,11 +3734,18 @@ void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
7124 + */
7125 + void calc_global_load(unsigned long ticks)
7126 + {
7127 +- long active;
7128 ++ long active, delta;
7129 +
7130 + if (time_before(jiffies, calc_load_update + 10))
7131 + return;
7132 +
7133 ++ /*
7134 ++ * Fold the 'old' idle-delta to include all NO_HZ cpus.
7135 ++ */
7136 ++ delta = calc_load_fold_idle();
7137 ++ if (delta)
7138 ++ atomic_long_add(delta, &calc_load_tasks);
7139 ++
7140 + active = atomic_long_read(&calc_load_tasks);
7141 + active = active > 0 ? active * FIXED_1 : 0;
7142 +
7143 +@@ -3624,12 +3756,7 @@ void calc_global_load(unsigned long ticks)
7144 + calc_load_update += LOAD_FREQ;
7145 +
7146 + /*
7147 +- * Account one period with whatever state we found before
7148 +- * folding in the nohz state and ageing the entire idle period.
7149 +- *
7150 +- * This avoids loosing a sample when we go idle between
7151 +- * calc_load_account_active() (10 ticks ago) and now and thus
7152 +- * under-accounting.
7153 ++ * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
7154 + */
7155 + calc_global_nohz();
7156 + }
7157 +@@ -3646,7 +3773,6 @@ static void calc_load_account_active(struct rq *this_rq)
7158 + return;
7159 +
7160 + delta = calc_load_fold_active(this_rq);
7161 +- delta += calc_load_fold_idle();
7162 + if (delta)
7163 + atomic_long_add(delta, &calc_load_tasks);
7164 +
7165 +@@ -3654,6 +3780,10 @@ static void calc_load_account_active(struct rq *this_rq)
7166 + }
7167 +
7168 + /*
7169 ++ * End of global load-average stuff
7170 ++ */
7171 ++
7172 ++/*
7173 + * The exact cpuload at various idx values, calculated at every tick would be
7174 + * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
7175 + *
7176 +diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
7177 +index 0a51882..be92bfe 100644
7178 +--- a/kernel/sched_idletask.c
7179 ++++ b/kernel/sched_idletask.c
7180 +@@ -23,7 +23,6 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
7181 + static struct task_struct *pick_next_task_idle(struct rq *rq)
7182 + {
7183 + schedstat_inc(rq, sched_goidle);
7184 +- calc_load_account_idle(rq);
7185 + return rq->idle;
7186 + }
7187 +
7188 +diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
7189 +index 4b85a7a..f1eb182 100644
7190 +--- a/kernel/time/ntp.c
7191 ++++ b/kernel/time/ntp.c
7192 +@@ -31,8 +31,6 @@ unsigned long tick_nsec;
7193 + u64 tick_length;
7194 + static u64 tick_length_base;
7195 +
7196 +-static struct hrtimer leap_timer;
7197 +-
7198 + #define MAX_TICKADJ 500LL /* usecs */
7199 + #define MAX_TICKADJ_SCALED \
7200 + (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
7201 +@@ -350,60 +348,60 @@ void ntp_clear(void)
7202 + }
7203 +
7204 + /*
7205 +- * Leap second processing. If in leap-insert state at the end of the
7206 +- * day, the system clock is set back one second; if in leap-delete
7207 +- * state, the system clock is set ahead one second.
7208 ++ * this routine handles the overflow of the microsecond field
7209 ++ *
7210 ++ * The tricky bits of code to handle the accurate clock support
7211 ++ * were provided by Dave Mills (Mills@××××.EDU) of NTP fame.
7212 ++ * They were originally developed for SUN and DEC kernels.
7213 ++ * All the kudos should go to Dave for this stuff.
7214 ++ *
7215 ++ * Also handles leap second processing, and returns leap offset
7216 + */
7217 +-static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
7218 ++int second_overflow(unsigned long secs)
7219 + {
7220 +- enum hrtimer_restart res = HRTIMER_NORESTART;
7221 +-
7222 +- write_seqlock(&xtime_lock);
7223 ++ int leap = 0;
7224 ++ s64 delta;
7225 +
7226 ++ /*
7227 ++ * Leap second processing. If in leap-insert state at the end of the
7228 ++ * day, the system clock is set back one second; if in leap-delete
7229 ++ * state, the system clock is set ahead one second.
7230 ++ */
7231 + switch (time_state) {
7232 + case TIME_OK:
7233 ++ if (time_status & STA_INS)
7234 ++ time_state = TIME_INS;
7235 ++ else if (time_status & STA_DEL)
7236 ++ time_state = TIME_DEL;
7237 + break;
7238 + case TIME_INS:
7239 +- timekeeping_leap_insert(-1);
7240 +- time_state = TIME_OOP;
7241 +- printk(KERN_NOTICE
7242 +- "Clock: inserting leap second 23:59:60 UTC\n");
7243 +- hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC);
7244 +- res = HRTIMER_RESTART;
7245 ++ if (secs % 86400 == 0) {
7246 ++ leap = -1;
7247 ++ time_state = TIME_OOP;
7248 ++ time_tai++;
7249 ++ printk(KERN_NOTICE
7250 ++ "Clock: inserting leap second 23:59:60 UTC\n");
7251 ++ }
7252 + break;
7253 + case TIME_DEL:
7254 +- timekeeping_leap_insert(1);
7255 +- time_tai--;
7256 +- time_state = TIME_WAIT;
7257 +- printk(KERN_NOTICE
7258 +- "Clock: deleting leap second 23:59:59 UTC\n");
7259 ++ if ((secs + 1) % 86400 == 0) {
7260 ++ leap = 1;
7261 ++ time_tai--;
7262 ++ time_state = TIME_WAIT;
7263 ++ printk(KERN_NOTICE
7264 ++ "Clock: deleting leap second 23:59:59 UTC\n");
7265 ++ }
7266 + break;
7267 + case TIME_OOP:
7268 +- time_tai++;
7269 + time_state = TIME_WAIT;
7270 +- /* fall through */
7271 ++ break;
7272 ++
7273 + case TIME_WAIT:
7274 + if (!(time_status & (STA_INS | STA_DEL)))
7275 + time_state = TIME_OK;
7276 + break;
7277 + }
7278 +
7279 +- write_sequnlock(&xtime_lock);
7280 +-
7281 +- return res;
7282 +-}
7283 +-
7284 +-/*
7285 +- * this routine handles the overflow of the microsecond field
7286 +- *
7287 +- * The tricky bits of code to handle the accurate clock support
7288 +- * were provided by Dave Mills (Mills@××××.EDU) of NTP fame.
7289 +- * They were originally developed for SUN and DEC kernels.
7290 +- * All the kudos should go to Dave for this stuff.
7291 +- */
7292 +-void second_overflow(void)
7293 +-{
7294 +- s64 delta;
7295 +
7296 + /* Bump the maxerror field */
7297 + time_maxerror += MAXFREQ / NSEC_PER_USEC;
7298 +@@ -423,23 +421,25 @@ void second_overflow(void)
7299 + pps_dec_valid();
7300 +
7301 + if (!time_adjust)
7302 +- return;
7303 ++ goto out;
7304 +
7305 + if (time_adjust > MAX_TICKADJ) {
7306 + time_adjust -= MAX_TICKADJ;
7307 + tick_length += MAX_TICKADJ_SCALED;
7308 +- return;
7309 ++ goto out;
7310 + }
7311 +
7312 + if (time_adjust < -MAX_TICKADJ) {
7313 + time_adjust += MAX_TICKADJ;
7314 + tick_length -= MAX_TICKADJ_SCALED;
7315 +- return;
7316 ++ goto out;
7317 + }
7318 +
7319 + tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ)
7320 + << NTP_SCALE_SHIFT;
7321 + time_adjust = 0;
7322 ++out:
7323 ++ return leap;
7324 + }
7325 +
7326 + #ifdef CONFIG_GENERIC_CMOS_UPDATE
7327 +@@ -501,27 +501,6 @@ static void notify_cmos_timer(void)
7328 + static inline void notify_cmos_timer(void) { }
7329 + #endif
7330 +
7331 +-/*
7332 +- * Start the leap seconds timer:
7333 +- */
7334 +-static inline void ntp_start_leap_timer(struct timespec *ts)
7335 +-{
7336 +- long now = ts->tv_sec;
7337 +-
7338 +- if (time_status & STA_INS) {
7339 +- time_state = TIME_INS;
7340 +- now += 86400 - now % 86400;
7341 +- hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
7342 +-
7343 +- return;
7344 +- }
7345 +-
7346 +- if (time_status & STA_DEL) {
7347 +- time_state = TIME_DEL;
7348 +- now += 86400 - (now + 1) % 86400;
7349 +- hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
7350 +- }
7351 +-}
7352 +
7353 + /*
7354 + * Propagate a new txc->status value into the NTP state:
7355 +@@ -546,22 +525,6 @@ static inline void process_adj_status(struct timex *txc, struct timespec *ts)
7356 + time_status &= STA_RONLY;
7357 + time_status |= txc->status & ~STA_RONLY;
7358 +
7359 +- switch (time_state) {
7360 +- case TIME_OK:
7361 +- ntp_start_leap_timer(ts);
7362 +- break;
7363 +- case TIME_INS:
7364 +- case TIME_DEL:
7365 +- time_state = TIME_OK;
7366 +- ntp_start_leap_timer(ts);
7367 +- case TIME_WAIT:
7368 +- if (!(time_status & (STA_INS | STA_DEL)))
7369 +- time_state = TIME_OK;
7370 +- break;
7371 +- case TIME_OOP:
7372 +- hrtimer_restart(&leap_timer);
7373 +- break;
7374 +- }
7375 + }
7376 + /*
7377 + * Called with the xtime lock held, so we can access and modify
7378 +@@ -643,9 +606,6 @@ int do_adjtimex(struct timex *txc)
7379 + (txc->tick < 900000/USER_HZ ||
7380 + txc->tick > 1100000/USER_HZ))
7381 + return -EINVAL;
7382 +-
7383 +- if (txc->modes & ADJ_STATUS && time_state != TIME_OK)
7384 +- hrtimer_cancel(&leap_timer);
7385 + }
7386 +
7387 + if (txc->modes & ADJ_SETOFFSET) {
7388 +@@ -967,6 +927,4 @@ __setup("ntp_tick_adj=", ntp_tick_adj_setup);
7389 + void __init ntp_init(void)
7390 + {
7391 + ntp_clear();
7392 +- hrtimer_init(&leap_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
7393 +- leap_timer.function = ntp_leap_second;
7394 + }
7395 +diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
7396 +index c923640..9955ebd 100644
7397 +--- a/kernel/time/tick-sched.c
7398 ++++ b/kernel/time/tick-sched.c
7399 +@@ -430,6 +430,7 @@ void tick_nohz_stop_sched_tick(int inidle)
7400 + */
7401 + if (!ts->tick_stopped) {
7402 + select_nohz_load_balancer(1);
7403 ++ calc_load_enter_idle();
7404 +
7405 + ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
7406 + ts->tick_stopped = 1;
7407 +@@ -563,6 +564,7 @@ void tick_nohz_restart_sched_tick(void)
7408 + account_idle_ticks(ticks);
7409 + #endif
7410 +
7411 ++ calc_load_exit_idle();
7412 + touch_softlockup_watchdog();
7413 + /*
7414 + * Cancel the scheduled timer and restore the tick
7415 +diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
7416 +index 2378413..03e67d4 100644
7417 +--- a/kernel/time/timekeeping.c
7418 ++++ b/kernel/time/timekeeping.c
7419 +@@ -161,23 +161,43 @@ static struct timespec xtime __attribute__ ((aligned (16)));
7420 + static struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
7421 + static struct timespec total_sleep_time;
7422 +
7423 ++/* Offset clock monotonic -> clock realtime */
7424 ++static ktime_t offs_real;
7425 ++
7426 ++/* Offset clock monotonic -> clock boottime */
7427 ++static ktime_t offs_boot;
7428 ++
7429 + /*
7430 + * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock.
7431 + */
7432 + static struct timespec raw_time;
7433 +
7434 +-/* flag for if timekeeping is suspended */
7435 +-int __read_mostly timekeeping_suspended;
7436 ++/* must hold write on xtime_lock */
7437 ++static void update_rt_offset(void)
7438 ++{
7439 ++ struct timespec tmp, *wtm = &wall_to_monotonic;
7440 +
7441 +-/* must hold xtime_lock */
7442 +-void timekeeping_leap_insert(int leapsecond)
7443 ++ set_normalized_timespec(&tmp, -wtm->tv_sec, -wtm->tv_nsec);
7444 ++ offs_real = timespec_to_ktime(tmp);
7445 ++}
7446 ++
7447 ++/* must hold write on xtime_lock */
7448 ++static void timekeeping_update(bool clearntp)
7449 + {
7450 +- xtime.tv_sec += leapsecond;
7451 +- wall_to_monotonic.tv_sec -= leapsecond;
7452 +- update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
7453 +- timekeeper.mult);
7454 ++ if (clearntp) {
7455 ++ timekeeper.ntp_error = 0;
7456 ++ ntp_clear();
7457 ++ }
7458 ++ update_rt_offset();
7459 ++ update_vsyscall(&xtime, &wall_to_monotonic,
7460 ++ timekeeper.clock, timekeeper.mult);
7461 + }
7462 +
7463 ++
7464 ++
7465 ++/* flag for if timekeeping is suspended */
7466 ++int __read_mostly timekeeping_suspended;
7467 ++
7468 + /**
7469 + * timekeeping_forward_now - update clock to the current time
7470 + *
7471 +@@ -375,11 +395,7 @@ int do_settimeofday(const struct timespec *tv)
7472 +
7473 + xtime = *tv;
7474 +
7475 +- timekeeper.ntp_error = 0;
7476 +- ntp_clear();
7477 +-
7478 +- update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
7479 +- timekeeper.mult);
7480 ++ timekeeping_update(true);
7481 +
7482 + write_sequnlock_irqrestore(&xtime_lock, flags);
7483 +
7484 +@@ -412,11 +428,7 @@ int timekeeping_inject_offset(struct timespec *ts)
7485 + xtime = timespec_add(xtime, *ts);
7486 + wall_to_monotonic = timespec_sub(wall_to_monotonic, *ts);
7487 +
7488 +- timekeeper.ntp_error = 0;
7489 +- ntp_clear();
7490 +-
7491 +- update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
7492 +- timekeeper.mult);
7493 ++ timekeeping_update(true);
7494 +
7495 + write_sequnlock_irqrestore(&xtime_lock, flags);
7496 +
7497 +@@ -591,6 +603,7 @@ void __init timekeeping_init(void)
7498 + }
7499 + set_normalized_timespec(&wall_to_monotonic,
7500 + -boot.tv_sec, -boot.tv_nsec);
7501 ++ update_rt_offset();
7502 + total_sleep_time.tv_sec = 0;
7503 + total_sleep_time.tv_nsec = 0;
7504 + write_sequnlock_irqrestore(&xtime_lock, flags);
7505 +@@ -599,6 +612,12 @@ void __init timekeeping_init(void)
7506 + /* time in seconds when suspend began */
7507 + static struct timespec timekeeping_suspend_time;
7508 +
7509 ++static void update_sleep_time(struct timespec t)
7510 ++{
7511 ++ total_sleep_time = t;
7512 ++ offs_boot = timespec_to_ktime(t);
7513 ++}
7514 ++
7515 + /**
7516 + * __timekeeping_inject_sleeptime - Internal function to add sleep interval
7517 + * @delta: pointer to a timespec delta value
7518 +@@ -616,7 +635,7 @@ static void __timekeeping_inject_sleeptime(struct timespec *delta)
7519 +
7520 + xtime = timespec_add(xtime, *delta);
7521 + wall_to_monotonic = timespec_sub(wall_to_monotonic, *delta);
7522 +- total_sleep_time = timespec_add(total_sleep_time, *delta);
7523 ++ update_sleep_time(timespec_add(total_sleep_time, *delta));
7524 + }
7525 +
7526 +
7527 +@@ -645,10 +664,7 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
7528 +
7529 + __timekeeping_inject_sleeptime(delta);
7530 +
7531 +- timekeeper.ntp_error = 0;
7532 +- ntp_clear();
7533 +- update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
7534 +- timekeeper.mult);
7535 ++ timekeeping_update(true);
7536 +
7537 + write_sequnlock_irqrestore(&xtime_lock, flags);
7538 +
7539 +@@ -683,6 +699,7 @@ static void timekeeping_resume(void)
7540 + timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
7541 + timekeeper.ntp_error = 0;
7542 + timekeeping_suspended = 0;
7543 ++ timekeeping_update(false);
7544 + write_sequnlock_irqrestore(&xtime_lock, flags);
7545 +
7546 + touch_softlockup_watchdog();
7547 +@@ -942,9 +959,14 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
7548 +
7549 + timekeeper.xtime_nsec += timekeeper.xtime_interval << shift;
7550 + while (timekeeper.xtime_nsec >= nsecps) {
7551 ++ int leap;
7552 + timekeeper.xtime_nsec -= nsecps;
7553 + xtime.tv_sec++;
7554 +- second_overflow();
7555 ++ leap = second_overflow(xtime.tv_sec);
7556 ++ xtime.tv_sec += leap;
7557 ++ wall_to_monotonic.tv_sec -= leap;
7558 ++ if (leap)
7559 ++ clock_was_set_delayed();
7560 + }
7561 +
7562 + /* Accumulate raw time */
7563 +@@ -1050,14 +1072,17 @@ static void update_wall_time(void)
7564 + * xtime.tv_nsec isn't larger then NSEC_PER_SEC
7565 + */
7566 + if (unlikely(xtime.tv_nsec >= NSEC_PER_SEC)) {
7567 ++ int leap;
7568 + xtime.tv_nsec -= NSEC_PER_SEC;
7569 + xtime.tv_sec++;
7570 +- second_overflow();
7571 ++ leap = second_overflow(xtime.tv_sec);
7572 ++ xtime.tv_sec += leap;
7573 ++ wall_to_monotonic.tv_sec -= leap;
7574 ++ if (leap)
7575 ++ clock_was_set_delayed();
7576 + }
7577 +
7578 +- /* check to see if there is a new clocksource to use */
7579 +- update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
7580 +- timekeeper.mult);
7581 ++ timekeeping_update(false);
7582 + }
7583 +
7584 + /**
7585 +@@ -1216,6 +1241,40 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
7586 + } while (read_seqretry(&xtime_lock, seq));
7587 + }
7588 +
7589 ++#ifdef CONFIG_HIGH_RES_TIMERS
7590 ++/**
7591 ++ * ktime_get_update_offsets - hrtimer helper
7592 ++ * @real: pointer to storage for monotonic -> realtime offset
7593 ++ * @_boot: pointer to storage for monotonic -> boottime offset
7594 ++ *
7595 ++ * Returns current monotonic time and updates the offsets
7596 ++ * Called from hrtimer_interupt() or retrigger_next_event()
7597 ++ */
7598 ++ktime_t ktime_get_update_offsets(ktime_t *real, ktime_t *boot)
7599 ++{
7600 ++ ktime_t now;
7601 ++ unsigned int seq;
7602 ++ u64 secs, nsecs;
7603 ++
7604 ++ do {
7605 ++ seq = read_seqbegin(&xtime_lock);
7606 ++
7607 ++ secs = xtime.tv_sec;
7608 ++ nsecs = xtime.tv_nsec;
7609 ++ nsecs += timekeeping_get_ns();
7610 ++ /* If arch requires, add in gettimeoffset() */
7611 ++ nsecs += arch_gettimeoffset();
7612 ++
7613 ++ *real = offs_real;
7614 ++ *boot = offs_boot;
7615 ++ } while (read_seqretry(&xtime_lock, seq));
7616 ++
7617 ++ now = ktime_add_ns(ktime_set(secs, 0), nsecs);
7618 ++ now = ktime_sub(now, *real);
7619 ++ return now;
7620 ++}
7621 ++#endif
7622 ++
7623 + /**
7624 + * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
7625 + */
7626 +diff --git a/mm/compaction.c b/mm/compaction.c
7627 +index 8fb8a40..50f1c60 100644
7628 +--- a/mm/compaction.c
7629 ++++ b/mm/compaction.c
7630 +@@ -592,8 +592,11 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
7631 + if (err) {
7632 + putback_lru_pages(&cc->migratepages);
7633 + cc->nr_migratepages = 0;
7634 ++ if (err == -ENOMEM) {
7635 ++ ret = COMPACT_PARTIAL;
7636 ++ goto out;
7637 ++ }
7638 + }
7639 +-
7640 + }
7641 +
7642 + out:
7643 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
7644 +index 5f5c545..7c535b0 100644
7645 +--- a/mm/hugetlb.c
7646 ++++ b/mm/hugetlb.c
7647 +@@ -53,6 +53,84 @@ static unsigned long __initdata default_hstate_size;
7648 + */
7649 + static DEFINE_SPINLOCK(hugetlb_lock);
7650 +
7651 ++static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
7652 ++{
7653 ++ bool free = (spool->count == 0) && (spool->used_hpages == 0);
7654 ++
7655 ++ spin_unlock(&spool->lock);
7656 ++
7657 ++ /* If no pages are used, and no other handles to the subpool
7658 ++ * remain, free the subpool the subpool remain */
7659 ++ if (free)
7660 ++ kfree(spool);
7661 ++}
7662 ++
7663 ++struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
7664 ++{
7665 ++ struct hugepage_subpool *spool;
7666 ++
7667 ++ spool = kmalloc(sizeof(*spool), GFP_KERNEL);
7668 ++ if (!spool)
7669 ++ return NULL;
7670 ++
7671 ++ spin_lock_init(&spool->lock);
7672 ++ spool->count = 1;
7673 ++ spool->max_hpages = nr_blocks;
7674 ++ spool->used_hpages = 0;
7675 ++
7676 ++ return spool;
7677 ++}
7678 ++
7679 ++void hugepage_put_subpool(struct hugepage_subpool *spool)
7680 ++{
7681 ++ spin_lock(&spool->lock);
7682 ++ BUG_ON(!spool->count);
7683 ++ spool->count--;
7684 ++ unlock_or_release_subpool(spool);
7685 ++}
7686 ++
7687 ++static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
7688 ++ long delta)
7689 ++{
7690 ++ int ret = 0;
7691 ++
7692 ++ if (!spool)
7693 ++ return 0;
7694 ++
7695 ++ spin_lock(&spool->lock);
7696 ++ if ((spool->used_hpages + delta) <= spool->max_hpages) {
7697 ++ spool->used_hpages += delta;
7698 ++ } else {
7699 ++ ret = -ENOMEM;
7700 ++ }
7701 ++ spin_unlock(&spool->lock);
7702 ++
7703 ++ return ret;
7704 ++}
7705 ++
7706 ++static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
7707 ++ long delta)
7708 ++{
7709 ++ if (!spool)
7710 ++ return;
7711 ++
7712 ++ spin_lock(&spool->lock);
7713 ++ spool->used_hpages -= delta;
7714 ++ /* If hugetlbfs_put_super couldn't free spool due to
7715 ++ * an outstanding quota reference, free it now. */
7716 ++ unlock_or_release_subpool(spool);
7717 ++}
7718 ++
7719 ++static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
7720 ++{
7721 ++ return HUGETLBFS_SB(inode->i_sb)->spool;
7722 ++}
7723 ++
7724 ++static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
7725 ++{
7726 ++ return subpool_inode(vma->vm_file->f_dentry->d_inode);
7727 ++}
7728 ++
7729 + /*
7730 + * Region tracking -- allows tracking of reservations and instantiated pages
7731 + * across the pages in a mapping.
7732 +@@ -533,9 +611,9 @@ static void free_huge_page(struct page *page)
7733 + */
7734 + struct hstate *h = page_hstate(page);
7735 + int nid = page_to_nid(page);
7736 +- struct address_space *mapping;
7737 ++ struct hugepage_subpool *spool =
7738 ++ (struct hugepage_subpool *)page_private(page);
7739 +
7740 +- mapping = (struct address_space *) page_private(page);
7741 + set_page_private(page, 0);
7742 + page->mapping = NULL;
7743 + BUG_ON(page_count(page));
7744 +@@ -551,8 +629,7 @@ static void free_huge_page(struct page *page)
7745 + enqueue_huge_page(h, page);
7746 + }
7747 + spin_unlock(&hugetlb_lock);
7748 +- if (mapping)
7749 +- hugetlb_put_quota(mapping, 1);
7750 ++ hugepage_subpool_put_pages(spool, 1);
7751 + }
7752 +
7753 + static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
7754 +@@ -966,11 +1043,12 @@ static void return_unused_surplus_pages(struct hstate *h,
7755 + /*
7756 + * Determine if the huge page at addr within the vma has an associated
7757 + * reservation. Where it does not we will need to logically increase
7758 +- * reservation and actually increase quota before an allocation can occur.
7759 +- * Where any new reservation would be required the reservation change is
7760 +- * prepared, but not committed. Once the page has been quota'd allocated
7761 +- * an instantiated the change should be committed via vma_commit_reservation.
7762 +- * No action is required on failure.
7763 ++ * reservation and actually increase subpool usage before an allocation
7764 ++ * can occur. Where any new reservation would be required the
7765 ++ * reservation change is prepared, but not committed. Once the page
7766 ++ * has been allocated from the subpool and instantiated the change should
7767 ++ * be committed via vma_commit_reservation. No action is required on
7768 ++ * failure.
7769 + */
7770 + static long vma_needs_reservation(struct hstate *h,
7771 + struct vm_area_struct *vma, unsigned long addr)
7772 +@@ -1019,24 +1097,24 @@ static void vma_commit_reservation(struct hstate *h,
7773 + static struct page *alloc_huge_page(struct vm_area_struct *vma,
7774 + unsigned long addr, int avoid_reserve)
7775 + {
7776 ++ struct hugepage_subpool *spool = subpool_vma(vma);
7777 + struct hstate *h = hstate_vma(vma);
7778 + struct page *page;
7779 +- struct address_space *mapping = vma->vm_file->f_mapping;
7780 +- struct inode *inode = mapping->host;
7781 + long chg;
7782 +
7783 + /*
7784 +- * Processes that did not create the mapping will have no reserves and
7785 +- * will not have accounted against quota. Check that the quota can be
7786 +- * made before satisfying the allocation
7787 +- * MAP_NORESERVE mappings may also need pages and quota allocated
7788 +- * if no reserve mapping overlaps.
7789 ++ * Processes that did not create the mapping will have no
7790 ++ * reserves and will not have accounted against subpool
7791 ++ * limit. Check that the subpool limit can be made before
7792 ++ * satisfying the allocation MAP_NORESERVE mappings may also
7793 ++ * need pages and subpool limit allocated allocated if no reserve
7794 ++ * mapping overlaps.
7795 + */
7796 + chg = vma_needs_reservation(h, vma, addr);
7797 + if (chg < 0)
7798 + return ERR_PTR(-VM_FAULT_OOM);
7799 + if (chg)
7800 +- if (hugetlb_get_quota(inode->i_mapping, chg))
7801 ++ if (hugepage_subpool_get_pages(spool, chg))
7802 + return ERR_PTR(-VM_FAULT_SIGBUS);
7803 +
7804 + spin_lock(&hugetlb_lock);
7805 +@@ -1046,12 +1124,12 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
7806 + if (!page) {
7807 + page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
7808 + if (!page) {
7809 +- hugetlb_put_quota(inode->i_mapping, chg);
7810 ++ hugepage_subpool_put_pages(spool, chg);
7811 + return ERR_PTR(-VM_FAULT_SIGBUS);
7812 + }
7813 + }
7814 +
7815 +- set_page_private(page, (unsigned long) mapping);
7816 ++ set_page_private(page, (unsigned long)spool);
7817 +
7818 + vma_commit_reservation(h, vma, addr);
7819 +
7820 +@@ -2081,6 +2159,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
7821 + {
7822 + struct hstate *h = hstate_vma(vma);
7823 + struct resv_map *reservations = vma_resv_map(vma);
7824 ++ struct hugepage_subpool *spool = subpool_vma(vma);
7825 + unsigned long reserve;
7826 + unsigned long start;
7827 + unsigned long end;
7828 +@@ -2096,7 +2175,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
7829 +
7830 + if (reserve) {
7831 + hugetlb_acct_memory(h, -reserve);
7832 +- hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
7833 ++ hugepage_subpool_put_pages(spool, reserve);
7834 + }
7835 + }
7836 + }
7837 +@@ -2326,7 +2405,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
7838 + address = address & huge_page_mask(h);
7839 + pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
7840 + + (vma->vm_pgoff >> PAGE_SHIFT);
7841 +- mapping = (struct address_space *)page_private(page);
7842 ++ mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
7843 +
7844 + /*
7845 + * Take the mapping lock for the duration of the table walk. As
7846 +@@ -2865,11 +2944,12 @@ int hugetlb_reserve_pages(struct inode *inode,
7847 + {
7848 + long ret, chg;
7849 + struct hstate *h = hstate_inode(inode);
7850 ++ struct hugepage_subpool *spool = subpool_inode(inode);
7851 +
7852 + /*
7853 + * Only apply hugepage reservation if asked. At fault time, an
7854 + * attempt will be made for VM_NORESERVE to allocate a page
7855 +- * and filesystem quota without using reserves
7856 ++ * without using reserves
7857 + */
7858 + if (vm_flags & VM_NORESERVE)
7859 + return 0;
7860 +@@ -2898,19 +2978,19 @@ int hugetlb_reserve_pages(struct inode *inode,
7861 + goto out_err;
7862 + }
7863 +
7864 +- /* There must be enough filesystem quota for the mapping */
7865 +- if (hugetlb_get_quota(inode->i_mapping, chg)) {
7866 ++ /* There must be enough pages in the subpool for the mapping */
7867 ++ if (hugepage_subpool_get_pages(spool, chg)) {
7868 + ret = -ENOSPC;
7869 + goto out_err;
7870 + }
7871 +
7872 + /*
7873 + * Check enough hugepages are available for the reservation.
7874 +- * Hand back the quota if there are not
7875 ++ * Hand the pages back to the subpool if there are not
7876 + */
7877 + ret = hugetlb_acct_memory(h, chg);
7878 + if (ret < 0) {
7879 +- hugetlb_put_quota(inode->i_mapping, chg);
7880 ++ hugepage_subpool_put_pages(spool, chg);
7881 + goto out_err;
7882 + }
7883 +
7884 +@@ -2938,12 +3018,13 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
7885 + {
7886 + struct hstate *h = hstate_inode(inode);
7887 + long chg = region_truncate(&inode->i_mapping->private_list, offset);
7888 ++ struct hugepage_subpool *spool = subpool_inode(inode);
7889 +
7890 + spin_lock(&inode->i_lock);
7891 + inode->i_blocks -= (blocks_per_huge_page(h) * freed);
7892 + spin_unlock(&inode->i_lock);
7893 +
7894 +- hugetlb_put_quota(inode->i_mapping, (chg - freed));
7895 ++ hugepage_subpool_put_pages(spool, (chg - freed));
7896 + hugetlb_acct_memory(h, -(chg - freed));
7897 + }
7898 +
7899 +diff --git a/mm/vmscan.c b/mm/vmscan.c
7900 +index fbe2d2c..8342119 100644
7901 +--- a/mm/vmscan.c
7902 ++++ b/mm/vmscan.c
7903 +@@ -2824,7 +2824,10 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
7904 + * them before going back to sleep.
7905 + */
7906 + set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
7907 +- schedule();
7908 ++
7909 ++ if (!kthread_should_stop())
7910 ++ schedule();
7911 ++
7912 + set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
7913 + } else {
7914 + if (remaining)
7915 +@@ -3090,14 +3093,17 @@ int kswapd_run(int nid)
7916 + }
7917 +
7918 + /*
7919 +- * Called by memory hotplug when all memory in a node is offlined.
7920 ++ * Called by memory hotplug when all memory in a node is offlined. Caller must
7921 ++ * hold lock_memory_hotplug().
7922 + */
7923 + void kswapd_stop(int nid)
7924 + {
7925 + struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
7926 +
7927 +- if (kswapd)
7928 ++ if (kswapd) {
7929 + kthread_stop(kswapd);
7930 ++ NODE_DATA(nid)->kswapd = NULL;
7931 ++ }
7932 + }
7933 +
7934 + static int __init kswapd_init(void)
7935 +diff --git a/net/can/raw.c b/net/can/raw.c
7936 +index cde1b4a..46cca3a 100644
7937 +--- a/net/can/raw.c
7938 ++++ b/net/can/raw.c
7939 +@@ -681,9 +681,6 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
7940 + if (err < 0)
7941 + goto free_skb;
7942 +
7943 +- /* to be able to check the received tx sock reference in raw_rcv() */
7944 +- skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
7945 +-
7946 + skb->dev = dev;
7947 + skb->sk = sk;
7948 +
7949 +diff --git a/net/core/dev.c b/net/core/dev.c
7950 +index 1cbddc9..5738654 100644
7951 +--- a/net/core/dev.c
7952 ++++ b/net/core/dev.c
7953 +@@ -2079,25 +2079,6 @@ static int dev_gso_segment(struct sk_buff *skb, int features)
7954 + return 0;
7955 + }
7956 +
7957 +-/*
7958 +- * Try to orphan skb early, right before transmission by the device.
7959 +- * We cannot orphan skb if tx timestamp is requested or the sk-reference
7960 +- * is needed on driver level for other reasons, e.g. see net/can/raw.c
7961 +- */
7962 +-static inline void skb_orphan_try(struct sk_buff *skb)
7963 +-{
7964 +- struct sock *sk = skb->sk;
7965 +-
7966 +- if (sk && !skb_shinfo(skb)->tx_flags) {
7967 +- /* skb_tx_hash() wont be able to get sk.
7968 +- * We copy sk_hash into skb->rxhash
7969 +- */
7970 +- if (!skb->rxhash)
7971 +- skb->rxhash = sk->sk_hash;
7972 +- skb_orphan(skb);
7973 +- }
7974 +-}
7975 +-
7976 + static bool can_checksum_protocol(unsigned long features, __be16 protocol)
7977 + {
7978 + return ((features & NETIF_F_GEN_CSUM) ||
7979 +@@ -2182,8 +2163,6 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
7980 + if (!list_empty(&ptype_all))
7981 + dev_queue_xmit_nit(skb, dev);
7982 +
7983 +- skb_orphan_try(skb);
7984 +-
7985 + features = netif_skb_features(skb);
7986 +
7987 + if (vlan_tx_tag_present(skb) &&
7988 +@@ -2293,7 +2272,7 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
7989 + if (skb->sk && skb->sk->sk_hash)
7990 + hash = skb->sk->sk_hash;
7991 + else
7992 +- hash = (__force u16) skb->protocol ^ skb->rxhash;
7993 ++ hash = (__force u16) skb->protocol;
7994 + hash = jhash_1word(hash, hashrnd);
7995 +
7996 + return (u16) (((u64) hash * qcount) >> 32) + qoffset;
7997 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
7998 +index 9726927..32e6ca2 100644
7999 +--- a/net/ipv4/tcp_input.c
8000 ++++ b/net/ipv4/tcp_input.c
8001 +@@ -5836,6 +5836,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
8002 + goto discard;
8003 +
8004 + if (th->syn) {
8005 ++ if (th->fin)
8006 ++ goto discard;
8007 + if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
8008 + return 1;
8009 +
8010 +diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
8011 +index 274d150..cf98d62 100644
8012 +--- a/net/iucv/af_iucv.c
8013 ++++ b/net/iucv/af_iucv.c
8014 +@@ -380,7 +380,6 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
8015 + skb_trim(skb, skb->dev->mtu);
8016 + }
8017 + skb->protocol = ETH_P_AF_IUCV;
8018 +- skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
8019 + nskb = skb_clone(skb, GFP_ATOMIC);
8020 + if (!nskb)
8021 + return -ENOMEM;
8022 +diff --git a/net/wireless/util.c b/net/wireless/util.c
8023 +index d38815d..74d5292 100644
8024 +--- a/net/wireless/util.c
8025 ++++ b/net/wireless/util.c
8026 +@@ -813,7 +813,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
8027 + ntype == NL80211_IFTYPE_P2P_CLIENT))
8028 + return -EBUSY;
8029 +
8030 +- if (ntype != otype) {
8031 ++ if (ntype != otype && netif_running(dev)) {
8032 + err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr,
8033 + ntype);
8034 + if (err)
8035 +diff --git a/scripts/depmod.sh b/scripts/depmod.sh
8036 +index a272356..2ae4817 100755
8037 +--- a/scripts/depmod.sh
8038 ++++ b/scripts/depmod.sh
8039 +@@ -9,12 +9,6 @@ fi
8040 + DEPMOD=$1
8041 + KERNELRELEASE=$2
8042 +
8043 +-if ! "$DEPMOD" -V 2>/dev/null | grep -q module-init-tools; then
8044 +- echo "Warning: you may need to install module-init-tools" >&2
8045 +- echo "See http://www.codemonkey.org.uk/docs/post-halloween-2.6.txt" >&2
8046 +- sleep 1
8047 +-fi
8048 +-
8049 + if ! test -r System.map -a -x "$DEPMOD"; then
8050 + exit 0
8051 + fi
8052 +diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
8053 +index 9f614b4..272407c 100644
8054 +--- a/virt/kvm/irq_comm.c
8055 ++++ b/virt/kvm/irq_comm.c
8056 +@@ -318,6 +318,7 @@ static int setup_routing_entry(struct kvm_irq_routing_table *rt,
8057 + */
8058 + hlist_for_each_entry(ei, n, &rt->map[ue->gsi], link)
8059 + if (ei->type == KVM_IRQ_ROUTING_MSI ||
8060 ++ ue->type == KVM_IRQ_ROUTING_MSI ||
8061 + ue->u.irqchip.irqchip == ei->irqchip.irqchip)
8062 + return r;
8063 +
8064
8065 diff --git a/3.2.23/4420_grsecurity-2.9.1-3.2.23-201207242236.patch b/3.2.24/4420_grsecurity-2.9.1-3.2.24-201207281946.patch
8066 similarity index 98%
8067 rename from 3.2.23/4420_grsecurity-2.9.1-3.2.23-201207242236.patch
8068 rename to 3.2.24/4420_grsecurity-2.9.1-3.2.24-201207281946.patch
8069 index acc5089..d960312 100644
8070 --- a/3.2.23/4420_grsecurity-2.9.1-3.2.23-201207242236.patch
8071 +++ b/3.2.24/4420_grsecurity-2.9.1-3.2.24-201207281946.patch
8072 @@ -1,5 +1,5 @@
8073 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
8074 -index dfa6fc6..7afd8a1 100644
8075 +index dfa6fc6..65f7dbe 100644
8076 --- a/Documentation/dontdiff
8077 +++ b/Documentation/dontdiff
8078 @@ -2,9 +2,11 @@
8079 @@ -22,7 +22,7 @@ index dfa6fc6..7afd8a1 100644
8080 *.grep
8081 *.grp
8082 *.gz
8083 -@@ -48,9 +51,11 @@
8084 +@@ -48,14 +51,17 @@
8085 *.tab.h
8086 *.tex
8087 *.ver
8088 @@ -34,7 +34,14 @@ index dfa6fc6..7afd8a1 100644
8089 *_vga16.c
8090 *~
8091 \#*#
8092 -@@ -70,6 +75,7 @@ Kerntypes
8093 + *.9
8094 +-.*
8095 ++.[^g]*
8096 ++.gen*
8097 + .*.d
8098 + .mm
8099 + 53c700_d.h
8100 +@@ -70,6 +76,7 @@ Kerntypes
8101 Module.markers
8102 Module.symvers
8103 PENDING
8104 @@ -42,7 +49,7 @@ index dfa6fc6..7afd8a1 100644
8105 SCCS
8106 System.map*
8107 TAGS
8108 -@@ -81,6 +87,7 @@ aic7*seq.h*
8109 +@@ -81,6 +88,7 @@ aic7*seq.h*
8110 aicasm
8111 aicdb.h*
8112 altivec*.c
8113 @@ -50,7 +57,7 @@ index dfa6fc6..7afd8a1 100644
8114 asm-offsets.h
8115 asm_offsets.h
8116 autoconf.h*
8117 -@@ -93,19 +100,24 @@ bounds.h
8118 +@@ -93,19 +101,24 @@ bounds.h
8119 bsetup
8120 btfixupprep
8121 build
8122 @@ -75,7 +82,7 @@ index dfa6fc6..7afd8a1 100644
8123 conmakehash
8124 consolemap_deftbl.c*
8125 cpustr.h
8126 -@@ -116,9 +128,11 @@ devlist.h*
8127 +@@ -116,9 +129,11 @@ devlist.h*
8128 dnotify_test
8129 docproc
8130 dslm
8131 @@ -87,7 +94,7 @@ index dfa6fc6..7afd8a1 100644
8132 fixdep
8133 flask.h
8134 fore200e_mkfirm
8135 -@@ -126,12 +140,15 @@ fore200e_pca_fw.c*
8136 +@@ -126,12 +141,15 @@ fore200e_pca_fw.c*
8137 gconf
8138 gconf.glade.h
8139 gen-devlist
8140 @@ -103,7 +110,7 @@ index dfa6fc6..7afd8a1 100644
8141 hpet_example
8142 hugepage-mmap
8143 hugepage-shm
8144 -@@ -146,7 +163,7 @@ int32.c
8145 +@@ -146,7 +164,7 @@ int32.c
8146 int4.c
8147 int8.c
8148 kallsyms
8149 @@ -112,7 +119,7 @@ index dfa6fc6..7afd8a1 100644
8150 keywords.c
8151 ksym.c*
8152 ksym.h*
8153 -@@ -154,7 +171,7 @@ kxgettext
8154 +@@ -154,7 +172,7 @@ kxgettext
8155 lkc_defs.h
8156 lex.c
8157 lex.*.c
8158 @@ -121,7 +128,7 @@ index dfa6fc6..7afd8a1 100644
8159 logo_*.c
8160 logo_*_clut224.c
8161 logo_*_mono.c
8162 -@@ -166,14 +183,15 @@ machtypes.h
8163 +@@ -166,14 +184,15 @@ machtypes.h
8164 map
8165 map_hugetlb
8166 maui_boot.h
8167 @@ -138,7 +145,7 @@ index dfa6fc6..7afd8a1 100644
8168 mkprep
8169 mkregtable
8170 mktables
8171 -@@ -209,6 +227,7 @@ r300_reg_safe.h
8172 +@@ -209,6 +228,7 @@ r300_reg_safe.h
8173 r420_reg_safe.h
8174 r600_reg_safe.h
8175 recordmcount
8176 @@ -146,7 +153,7 @@ index dfa6fc6..7afd8a1 100644
8177 relocs
8178 rlim_names.h
8179 rn50_reg_safe.h
8180 -@@ -218,7 +237,9 @@ series
8181 +@@ -218,7 +238,9 @@ series
8182 setup
8183 setup.bin
8184 setup.elf
8185 @@ -156,7 +163,7 @@ index dfa6fc6..7afd8a1 100644
8186 sm_tbl*
8187 split-include
8188 syscalltab.h
8189 -@@ -229,6 +250,7 @@ tftpboot.img
8190 +@@ -229,6 +251,7 @@ tftpboot.img
8191 timeconst.h
8192 times.h*
8193 trix_boot.h
8194 @@ -164,7 +171,7 @@ index dfa6fc6..7afd8a1 100644
8195 utsrelease.h*
8196 vdso-syms.lds
8197 vdso.lds
8198 -@@ -246,7 +268,9 @@ vmlinux
8199 +@@ -246,7 +269,9 @@ vmlinux
8200 vmlinux-*
8201 vmlinux.aout
8202 vmlinux.bin.all
8203 @@ -174,7 +181,7 @@ index dfa6fc6..7afd8a1 100644
8204 vmlinuz
8205 voffset.h
8206 vsyscall.lds
8207 -@@ -254,9 +278,11 @@ vsyscall_32.lds
8208 +@@ -254,9 +279,11 @@ vsyscall_32.lds
8209 wanxlfw.inc
8210 uImage
8211 unifdef
8212 @@ -205,7 +212,7 @@ index 81c287f..d456d02 100644
8213
8214 pcd. [PARIDE]
8215 diff --git a/Makefile b/Makefile
8216 -index 40d1e3b..bf02dfb 100644
8217 +index 80bb4fd..964ea28 100644
8218 --- a/Makefile
8219 +++ b/Makefile
8220 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
8221 @@ -231,7 +238,7 @@ index 40d1e3b..bf02dfb 100644
8222 $(Q)$(MAKE) $(build)=scripts/basic
8223 $(Q)rm -f .tmp_quiet_recordmcount
8224
8225 -@@ -564,6 +565,56 @@ else
8226 +@@ -564,6 +565,60 @@ else
8227 KBUILD_CFLAGS += -O2
8228 endif
8229
8230 @@ -264,10 +271,14 @@ index 40d1e3b..bf02dfb 100644
8231 +ifdef CONFIG_PAX_SIZE_OVERFLOW
8232 +SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
8233 +endif
8234 ++ifdef CONFIG_PAX_LATENT_ENTROPY
8235 ++LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
8236 ++endif
8237 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
8238 -+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS) $(SIZE_OVERFLOW_PLUGIN_CFLAGS)
8239 ++GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
8240 ++GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8241 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
8242 -+export PLUGINCC CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN SIZE_OVERFLOW_PLUGIN
8243 ++export PLUGINCC CONSTIFY_PLUGIN
8244 +ifeq ($(KBUILD_EXTMOD),)
8245 +gcc-plugins:
8246 + $(Q)$(MAKE) $(build)=tools/gcc
8247 @@ -288,7 +299,7 @@ index 40d1e3b..bf02dfb 100644
8248 include $(srctree)/arch/$(SRCARCH)/Makefile
8249
8250 ifneq ($(CONFIG_FRAME_WARN),0)
8251 -@@ -708,7 +759,7 @@ export mod_strip_cmd
8252 +@@ -708,7 +763,7 @@ export mod_strip_cmd
8253
8254
8255 ifeq ($(KBUILD_EXTMOD),)
8256 @@ -297,7 +308,7 @@ index 40d1e3b..bf02dfb 100644
8257
8258 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
8259 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
8260 -@@ -932,6 +983,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
8261 +@@ -932,6 +987,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
8262
8263 # The actual objects are generated when descending,
8264 # make sure no implicit rule kicks in
8265 @@ -306,7 +317,7 @@ index 40d1e3b..bf02dfb 100644
8266 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
8267
8268 # Handle descending into subdirectories listed in $(vmlinux-dirs)
8269 -@@ -941,7 +994,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
8270 +@@ -941,7 +998,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
8271 # Error messages still appears in the original language
8272
8273 PHONY += $(vmlinux-dirs)
8274 @@ -315,7 +326,7 @@ index 40d1e3b..bf02dfb 100644
8275 $(Q)$(MAKE) $(build)=$@
8276
8277 # Store (new) KERNELRELASE string in include/config/kernel.release
8278 -@@ -985,6 +1038,7 @@ prepare0: archprepare FORCE
8279 +@@ -985,6 +1042,7 @@ prepare0: archprepare FORCE
8280 $(Q)$(MAKE) $(build)=.
8281
8282 # All the preparing..
8283 @@ -323,7 +334,7 @@ index 40d1e3b..bf02dfb 100644
8284 prepare: prepare0
8285
8286 # Generate some files
8287 -@@ -1089,6 +1143,8 @@ all: modules
8288 +@@ -1089,6 +1147,8 @@ all: modules
8289 # using awk while concatenating to the final file.
8290
8291 PHONY += modules
8292 @@ -332,7 +343,7 @@ index 40d1e3b..bf02dfb 100644
8293 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
8294 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
8295 @$(kecho) ' Building modules, stage 2.';
8296 -@@ -1104,7 +1160,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
8297 +@@ -1104,7 +1164,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
8298
8299 # Target to prepare building external modules
8300 PHONY += modules_prepare
8301 @@ -341,7 +352,7 @@ index 40d1e3b..bf02dfb 100644
8302
8303 # Target to install modules
8304 PHONY += modules_install
8305 -@@ -1163,7 +1219,7 @@ CLEAN_FILES += vmlinux System.map \
8306 +@@ -1163,7 +1223,7 @@ CLEAN_FILES += vmlinux System.map \
8307 MRPROPER_DIRS += include/config usr/include include/generated \
8308 arch/*/include/generated
8309 MRPROPER_FILES += .config .config.old .version .old_version \
8310 @@ -350,7 +361,7 @@ index 40d1e3b..bf02dfb 100644
8311 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
8312
8313 # clean - Delete most, but leave enough to build external modules
8314 -@@ -1201,6 +1257,7 @@ distclean: mrproper
8315 +@@ -1201,6 +1261,7 @@ distclean: mrproper
8316 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
8317 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
8318 -o -name '.*.rej' \
8319 @@ -358,7 +369,7 @@ index 40d1e3b..bf02dfb 100644
8320 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
8321 -type f -print | xargs rm -f
8322
8323 -@@ -1361,6 +1418,8 @@ PHONY += $(module-dirs) modules
8324 +@@ -1361,6 +1422,8 @@ PHONY += $(module-dirs) modules
8325 $(module-dirs): crmodverdir $(objtree)/Module.symvers
8326 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
8327
8328 @@ -367,7 +378,7 @@ index 40d1e3b..bf02dfb 100644
8329 modules: $(module-dirs)
8330 @$(kecho) ' Building modules, stage 2.';
8331 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
8332 -@@ -1487,17 +1546,21 @@ else
8333 +@@ -1487,17 +1550,21 @@ else
8334 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
8335 endif
8336
8337 @@ -393,7 +404,7 @@ index 40d1e3b..bf02dfb 100644
8338 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
8339 %.symtypes: %.c prepare scripts FORCE
8340 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
8341 -@@ -1507,11 +1570,15 @@ endif
8342 +@@ -1507,11 +1574,15 @@ endif
8343 $(cmd_crmodverdir)
8344 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
8345 $(build)=$(build-dir)
8346 @@ -2887,7 +2898,7 @@ index 6018c80..7c37203 100644
8347
8348 #endif /* _ASM_SYSTEM_H */
8349 diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
8350 -index 97f8bf6..3986751 100644
8351 +index adda036..e0f33bb 100644
8352 --- a/arch/mips/include/asm/thread_info.h
8353 +++ b/arch/mips/include/asm/thread_info.h
8354 @@ -124,6 +124,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
8355 @@ -6585,7 +6596,7 @@ index 301421c..e2535d1 100644
8356 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
8357 obj-y += fault_$(BITS).o
8358 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
8359 -index 8023fd7..c8e89e9 100644
8360 +index 8023fd7..bb71401 100644
8361 --- a/arch/sparc/mm/fault_32.c
8362 +++ b/arch/sparc/mm/fault_32.c
8363 @@ -21,6 +21,9 @@
8364 @@ -6598,7 +6609,7 @@ index 8023fd7..c8e89e9 100644
8365
8366 #include <asm/system.h>
8367 #include <asm/page.h>
8368 -@@ -208,6 +211,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
8369 +@@ -208,6 +211,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
8370 return safe_compute_effective_address(regs, insn);
8371 }
8372
8373 @@ -6689,40 +6700,49 @@ index 8023fd7..c8e89e9 100644
8374 + }
8375 + } while (0);
8376 +
8377 -+ { /* PaX: patched PLT emulation #2 */
8378 ++ do { /* PaX: patched PLT emulation #2 */
8379 + unsigned int ba;
8380 +
8381 + err = get_user(ba, (unsigned int *)regs->pc);
8382 +
8383 -+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
8384 ++ if (err)
8385 ++ break;
8386 ++
8387 ++ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
8388 + unsigned int addr;
8389 +
8390 -+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
8391 ++ if ((ba & 0xFFC00000U) == 0x30800000U)
8392 ++ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
8393 ++ else
8394 ++ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
8395 + regs->pc = addr;
8396 + regs->npc = addr+4;
8397 + return 2;
8398 + }
8399 -+ }
8400 ++ } while (0);
8401 +
8402 + do { /* PaX: patched PLT emulation #3 */
8403 -+ unsigned int sethi, jmpl, nop;
8404 ++ unsigned int sethi, bajmpl, nop;
8405 +
8406 + err = get_user(sethi, (unsigned int *)regs->pc);
8407 -+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
8408 ++ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
8409 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
8410 +
8411 + if (err)
8412 + break;
8413 +
8414 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
8415 -+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
8416 ++ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
8417 + nop == 0x01000000U)
8418 + {
8419 + unsigned int addr;
8420 +
8421 + addr = (sethi & 0x003FFFFFU) << 10;
8422 + regs->u_regs[UREG_G1] = addr;
8423 -+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
8424 ++ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
8425 ++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
8426 ++ else
8427 ++ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
8428 + regs->pc = addr;
8429 + regs->npc = addr+4;
8430 + return 2;
8431 @@ -6867,7 +6887,7 @@ index 8023fd7..c8e89e9 100644
8432 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
8433 int text_fault)
8434 {
8435 -@@ -280,6 +545,24 @@ good_area:
8436 +@@ -280,6 +554,24 @@ good_area:
8437 if(!(vma->vm_flags & VM_WRITE))
8438 goto bad_area;
8439 } else {
8440 @@ -6893,7 +6913,7 @@ index 8023fd7..c8e89e9 100644
8441 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
8442 goto bad_area;
8443 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
8444 -index 504c062..6fcb9c6 100644
8445 +index 504c062..a383267 100644
8446 --- a/arch/sparc/mm/fault_64.c
8447 +++ b/arch/sparc/mm/fault_64.c
8448 @@ -21,6 +21,9 @@
8449 @@ -6915,7 +6935,7 @@ index 504c062..6fcb9c6 100644
8450 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
8451 dump_stack();
8452 unhandled_fault(regs->tpc, current, regs);
8453 -@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
8454 +@@ -272,6 +275,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
8455 show_regs(regs);
8456 }
8457
8458 @@ -7010,15 +7030,21 @@ index 504c062..6fcb9c6 100644
8459 + }
8460 + } while (0);
8461 +
8462 -+ { /* PaX: patched PLT emulation #2 */
8463 ++ do { /* PaX: patched PLT emulation #2 */
8464 + unsigned int ba;
8465 +
8466 + err = get_user(ba, (unsigned int *)regs->tpc);
8467 +
8468 -+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
8469 ++ if (err)
8470 ++ break;
8471 ++
8472 ++ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
8473 + unsigned long addr;
8474 +
8475 -+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
8476 ++ if ((ba & 0xFFC00000U) == 0x30800000U)
8477 ++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
8478 ++ else
8479 ++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
8480 +
8481 + if (test_thread_flag(TIF_32BIT))
8482 + addr &= 0xFFFFFFFFUL;
8483 @@ -7027,27 +7053,30 @@ index 504c062..6fcb9c6 100644
8484 + regs->tnpc = addr+4;
8485 + return 2;
8486 + }
8487 -+ }
8488 ++ } while (0);
8489 +
8490 + do { /* PaX: patched PLT emulation #3 */
8491 -+ unsigned int sethi, jmpl, nop;
8492 ++ unsigned int sethi, bajmpl, nop;
8493 +
8494 + err = get_user(sethi, (unsigned int *)regs->tpc);
8495 -+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
8496 ++ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
8497 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
8498 +
8499 + if (err)
8500 + break;
8501 +
8502 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
8503 -+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
8504 ++ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
8505 + nop == 0x01000000U)
8506 + {
8507 + unsigned long addr;
8508 +
8509 + addr = (sethi & 0x003FFFFFU) << 10;
8510 + regs->u_regs[UREG_G1] = addr;
8511 -+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
8512 ++ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
8513 ++ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
8514 ++ else
8515 ++ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
8516 +
8517 + if (test_thread_flag(TIF_32BIT))
8518 + addr &= 0xFFFFFFFFUL;
8519 @@ -7373,7 +7402,7 @@ index 504c062..6fcb9c6 100644
8520 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
8521 {
8522 struct mm_struct *mm = current->mm;
8523 -@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
8524 +@@ -340,6 +803,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
8525 if (!vma)
8526 goto bad_area;
8527
8528 @@ -9152,7 +9181,7 @@ index 20370c6..a2eb9b0 100644
8529 "popl %%ebp\n\t"
8530 "popl %%edi\n\t"
8531 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
8532 -index 58cb6d4..ca9010d 100644
8533 +index 58cb6d4..a4b806c 100644
8534 --- a/arch/x86/include/asm/atomic.h
8535 +++ b/arch/x86/include/asm/atomic.h
8536 @@ -22,7 +22,18 @@
8537 @@ -9560,6 +9589,51 @@ index 58cb6d4..ca9010d 100644
8538
8539 /*
8540 * atomic_dec_if_positive - decrement by 1 if old value positive
8541 +@@ -293,14 +552,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
8542 + #endif
8543 +
8544 + /* These are x86-specific, used by some header files */
8545 +-#define atomic_clear_mask(mask, addr) \
8546 +- asm volatile(LOCK_PREFIX "andl %0,%1" \
8547 +- : : "r" (~(mask)), "m" (*(addr)) : "memory")
8548 ++static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
8549 ++{
8550 ++ asm volatile(LOCK_PREFIX "andl %1,%0"
8551 ++ : "+m" (v->counter)
8552 ++ : "r" (~(mask))
8553 ++ : "memory");
8554 ++}
8555 +
8556 +-#define atomic_set_mask(mask, addr) \
8557 +- asm volatile(LOCK_PREFIX "orl %0,%1" \
8558 +- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
8559 +- : "memory")
8560 ++static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
8561 ++{
8562 ++ asm volatile(LOCK_PREFIX "andl %1,%0"
8563 ++ : "+m" (v->counter)
8564 ++ : "r" (~(mask))
8565 ++ : "memory");
8566 ++}
8567 ++
8568 ++static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
8569 ++{
8570 ++ asm volatile(LOCK_PREFIX "orl %1,%0"
8571 ++ : "+m" (v->counter)
8572 ++ : "r" (mask)
8573 ++ : "memory");
8574 ++}
8575 ++
8576 ++static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
8577 ++{
8578 ++ asm volatile(LOCK_PREFIX "orl %1,%0"
8579 ++ : "+m" (v->counter)
8580 ++ : "r" (mask)
8581 ++ : "memory");
8582 ++}
8583 +
8584 + /* Atomic operations are already serializing on x86 */
8585 + #define smp_mb__before_atomic_dec() barrier()
8586 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
8587 index 24098aa..1e37723 100644
8588 --- a/arch/x86/include/asm/atomic64_32.h
8589 @@ -18752,7 +18826,7 @@ index 42eb330..139955c 100644
8590
8591 return ret;
8592 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
8593 -index 37a458b..e63d183 100644
8594 +index e61f79c..bbbaa4d 100644
8595 --- a/arch/x86/kernel/reboot.c
8596 +++ b/arch/x86/kernel/reboot.c
8597 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
8598 @@ -18835,7 +18909,7 @@ index 37a458b..e63d183 100644
8599 }
8600 #ifdef CONFIG_APM_MODULE
8601 EXPORT_SYMBOL(machine_real_restart);
8602 -@@ -540,7 +570,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
8603 +@@ -548,7 +578,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
8604 * try to force a triple fault and then cycle between hitting the keyboard
8605 * controller and doing that
8606 */
8607 @@ -18844,7 +18918,7 @@ index 37a458b..e63d183 100644
8608 {
8609 int i;
8610 int attempt = 0;
8611 -@@ -664,13 +694,13 @@ void native_machine_shutdown(void)
8612 +@@ -672,13 +702,13 @@ void native_machine_shutdown(void)
8613 #endif
8614 }
8615
8616 @@ -18860,7 +18934,7 @@ index 37a458b..e63d183 100644
8617 {
8618 printk("machine restart\n");
8619
8620 -@@ -679,7 +709,7 @@ static void native_machine_restart(char *__unused)
8621 +@@ -687,7 +717,7 @@ static void native_machine_restart(char *__unused)
8622 __machine_emergency_restart(0);
8623 }
8624
8625 @@ -18869,7 +18943,7 @@ index 37a458b..e63d183 100644
8626 {
8627 /* stop other cpus and apics */
8628 machine_shutdown();
8629 -@@ -690,7 +720,7 @@ static void native_machine_halt(void)
8630 +@@ -698,7 +728,7 @@ static void native_machine_halt(void)
8631 stop_this_cpu(NULL);
8632 }
8633
8634 @@ -18878,7 +18952,7 @@ index 37a458b..e63d183 100644
8635 {
8636 if (pm_power_off) {
8637 if (!reboot_force)
8638 -@@ -699,6 +729,7 @@ static void native_machine_power_off(void)
8639 +@@ -707,6 +737,7 @@ static void native_machine_power_off(void)
8640 }
8641 /* a fallback in case there is no PM info available */
8642 tboot_shutdown(TB_SHUTDOWN_HALT);
8643 @@ -27506,7 +27580,7 @@ index 7b72502..646105c 100644
8644 err = -EFAULT;
8645 goto out;
8646 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
8647 -index 688be8a..8a37d98 100644
8648 +index 9e76a32..48d7145 100644
8649 --- a/block/scsi_ioctl.c
8650 +++ b/block/scsi_ioctl.c
8651 @@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
8652 @@ -29562,7 +29636,7 @@ index 1aeaaba..e018570 100644
8653 .part_num = MBCS_PART_NUM,
8654 .mfg_num = MBCS_MFG_NUM,
8655 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
8656 -index 1451790..f705c30 100644
8657 +index 1451790..3c7dfbb 100644
8658 --- a/drivers/char/mem.c
8659 +++ b/drivers/char/mem.c
8660 @@ -18,6 +18,7 @@
8661 @@ -29624,7 +29698,7 @@ index 1451790..f705c30 100644
8662
8663 - remaining = copy_to_user(buf, ptr, sz);
8664 +#ifdef CONFIG_PAX_USERCOPY
8665 -+ temp = kmalloc(sz, GFP_KERNEL);
8666 ++ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
8667 + if (!temp) {
8668 + unxlate_dev_mem_ptr(p, ptr);
8669 + return -ENOMEM;
8670 @@ -29669,7 +29743,7 @@ index 1451790..f705c30 100644
8671
8672 - if (copy_to_user(buf, kbuf, sz))
8673 +#ifdef CONFIG_PAX_USERCOPY
8674 -+ temp = kmalloc(sz, GFP_KERNEL);
8675 ++ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
8676 + if (!temp)
8677 + return -ENOMEM;
8678 + memcpy(temp, kbuf, sz);
8679 @@ -29711,7 +29785,7 @@ index da3cfee..a5a6606 100644
8680
8681 *ppos = i;
8682 diff --git a/drivers/char/random.c b/drivers/char/random.c
8683 -index 6035ab8..d45e4d4 100644
8684 +index 6035ab8..c7e4a44 100644
8685 --- a/drivers/char/random.c
8686 +++ b/drivers/char/random.c
8687 @@ -261,8 +261,13 @@
8688 @@ -29746,7 +29820,25 @@ index 6035ab8..d45e4d4 100644
8689 #if 0
8690 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
8691 { 2048, 1638, 1231, 819, 411, 1 },
8692 -@@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
8693 +@@ -722,6 +734,17 @@ void add_disk_randomness(struct gendisk *disk)
8694 + }
8695 + #endif
8696 +
8697 ++#ifdef CONFIG_PAX_LATENT_ENTROPY
8698 ++u64 latent_entropy;
8699 ++
8700 ++__init void transfer_latent_entropy(void)
8701 ++{
8702 ++ mix_pool_bytes(&input_pool, &latent_entropy, sizeof(latent_entropy));
8703 ++ mix_pool_bytes(&nonblocking_pool, &latent_entropy, sizeof(latent_entropy));
8704 ++// printk(KERN_INFO "PAX: transferring latent entropy: %16llx\n", latent_entropy);
8705 ++}
8706 ++#endif
8707 ++
8708 + /*********************************************************************
8709 + *
8710 + * Entropy extraction routines
8711 +@@ -909,7 +932,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
8712
8713 extract_buf(r, tmp);
8714 i = min_t(int, nbytes, EXTRACT_SIZE);
8715 @@ -29755,7 +29847,7 @@ index 6035ab8..d45e4d4 100644
8716 ret = -EFAULT;
8717 break;
8718 }
8719 -@@ -1228,7 +1240,7 @@ EXPORT_SYMBOL(generate_random_uuid);
8720 +@@ -1228,7 +1251,7 @@ EXPORT_SYMBOL(generate_random_uuid);
8721 #include <linux/sysctl.h>
8722
8723 static int min_read_thresh = 8, min_write_thresh;
8724 @@ -29764,7 +29856,7 @@ index 6035ab8..d45e4d4 100644
8725 static int max_write_thresh = INPUT_POOL_WORDS * 32;
8726 static char sysctl_bootid[16];
8727
8728 -@@ -1250,10 +1262,15 @@ static int proc_do_uuid(ctl_table *table, int write,
8729 +@@ -1250,10 +1273,15 @@ static int proc_do_uuid(ctl_table *table, int write,
8730 uuid = table->data;
8731 if (!uuid) {
8732 uuid = tmp_uuid;
8733 @@ -30880,7 +30972,7 @@ index 578ddfc..86ac0d0 100644
8734 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
8735 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
8736 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
8737 -index 6aa7716..8e5a304 100644
8738 +index cc75c4b..4542065 100644
8739 --- a/drivers/gpu/drm/i915/intel_display.c
8740 +++ b/drivers/gpu/drm/i915/intel_display.c
8741 @@ -2196,7 +2196,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
8742 @@ -30901,16 +30993,19 @@ index 6aa7716..8e5a304 100644
8743 }
8744
8745 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
8746 -@@ -6982,7 +6982,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
8747 +@@ -6980,9 +6980,8 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
8748
8749 - atomic_clear_mask(1 << intel_crtc->plane,
8750 - &obj->pending_flip.counter);
8751 + obj = work->old_fb_obj;
8752 +
8753 +- atomic_clear_mask(1 << intel_crtc->plane,
8754 +- &obj->pending_flip.counter);
8755 - if (atomic_read(&obj->pending_flip) == 0)
8756 ++ atomic_clear_mask_unchecked(1 << intel_crtc->plane, &obj->pending_flip);
8757 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
8758 wake_up(&dev_priv->pending_flip_queue);
8759
8760 schedule_work(&work->work);
8761 -@@ -7177,7 +7177,13 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
8762 +@@ -7177,7 +7176,13 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
8763 OUT_RING(fb->pitch | obj->tiling_mode);
8764 OUT_RING(obj->gtt_offset);
8765
8766 @@ -30925,7 +31020,7 @@ index 6aa7716..8e5a304 100644
8767 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
8768 OUT_RING(pf | pipesrc);
8769 ADVANCE_LP_RING();
8770 -@@ -7309,7 +7315,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
8771 +@@ -7309,7 +7314,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
8772 /* Block clients from rendering to the new back buffer until
8773 * the flip occurs and the object is no longer visible.
8774 */
8775 @@ -30934,7 +31029,7 @@ index 6aa7716..8e5a304 100644
8776
8777 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
8778 if (ret)
8779 -@@ -7323,7 +7329,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
8780 +@@ -7323,7 +7328,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
8781 return 0;
8782
8783 cleanup_pending:
8784 @@ -31617,10 +31712,10 @@ index 8a8725c..afed796 100644
8785 marker = list_first_entry(&queue->head,
8786 struct vmw_marker, head);
8787 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
8788 -index c27b402..353115a 100644
8789 +index 95430a0..1a65ca9 100644
8790 --- a/drivers/hid/hid-core.c
8791 +++ b/drivers/hid/hid-core.c
8792 -@@ -2013,7 +2013,7 @@ static bool hid_ignore(struct hid_device *hdev)
8793 +@@ -2020,7 +2020,7 @@ static bool hid_ignore(struct hid_device *hdev)
8794
8795 int hid_add_device(struct hid_device *hdev)
8796 {
8797 @@ -31629,7 +31724,7 @@ index c27b402..353115a 100644
8798 int ret;
8799
8800 if (WARN_ON(hdev->status & HID_STAT_ADDED))
8801 -@@ -2028,7 +2028,7 @@ int hid_add_device(struct hid_device *hdev)
8802 +@@ -2035,7 +2035,7 @@ int hid_add_device(struct hid_device *hdev)
8803 /* XXX hack, any other cleaner solution after the driver core
8804 * is converted to allow more than 20 bytes as the device name? */
8805 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
8806 @@ -33125,10 +33220,10 @@ index b8d8611..7a4a04b 100644
8807 #include <linux/input.h>
8808 #include <linux/gameport.h>
8809 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
8810 -index d728875..844c89b 100644
8811 +index 2189cbf..05ad609 100644
8812 --- a/drivers/input/joystick/xpad.c
8813 +++ b/drivers/input/joystick/xpad.c
8814 -@@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
8815 +@@ -714,7 +714,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
8816
8817 static int xpad_led_probe(struct usb_xpad *xpad)
8818 {
8819 @@ -33137,7 +33232,7 @@ index d728875..844c89b 100644
8820 long led_no;
8821 struct xpad_led *led;
8822 struct led_classdev *led_cdev;
8823 -@@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
8824 +@@ -727,7 +727,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
8825 if (!led)
8826 return -ENOMEM;
8827
8828 @@ -33612,7 +33707,7 @@ index 1f23e04..08d9a20 100644
8829
8830 spin_lock(&receiving_list_lock);
8831 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
8832 -index 9bfd057..01180bc 100644
8833 +index dae2b7a..5c50c7e 100644
8834 --- a/drivers/md/dm-raid1.c
8835 +++ b/drivers/md/dm-raid1.c
8836 @@ -40,7 +40,7 @@ enum dm_raid1_error {
8837 @@ -33678,7 +33773,7 @@ index 9bfd057..01180bc 100644
8838 ms->mirror[mirror].error_type = 0;
8839 ms->mirror[mirror].offset = offset;
8840
8841 -@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_target *ti)
8842 +@@ -1348,7 +1348,7 @@ static void mirror_resume(struct dm_target *ti)
8843 */
8844 static char device_status_char(struct mirror *m)
8845 {
8846 @@ -33823,7 +33918,7 @@ index 4720f68..78d1df7 100644
8847
8848 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
8849 diff --git a/drivers/md/md.c b/drivers/md/md.c
8850 -index 700ecae..8122a9c 100644
8851 +index d8646d7..8122a9c 100644
8852 --- a/drivers/md/md.c
8853 +++ b/drivers/md/md.c
8854 @@ -278,10 +278,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
8855 @@ -33895,125 +33990,7 @@ index 700ecae..8122a9c 100644
8856
8857 INIT_LIST_HEAD(&rdev->same_set);
8858 init_waitqueue_head(&rdev->blocked_wait);
8859 -@@ -3700,8 +3700,8 @@ array_state_show(struct mddev *mddev, char *page)
8860 - return sprintf(page, "%s\n", array_states[st]);
8861 - }
8862 -
8863 --static int do_md_stop(struct mddev * mddev, int ro, int is_open);
8864 --static int md_set_readonly(struct mddev * mddev, int is_open);
8865 -+static int do_md_stop(struct mddev * mddev, int ro, struct block_device *bdev);
8866 -+static int md_set_readonly(struct mddev * mddev, struct block_device *bdev);
8867 - static int do_md_run(struct mddev * mddev);
8868 - static int restart_array(struct mddev *mddev);
8869 -
8870 -@@ -3717,14 +3717,14 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
8871 - /* stopping an active array */
8872 - if (atomic_read(&mddev->openers) > 0)
8873 - return -EBUSY;
8874 -- err = do_md_stop(mddev, 0, 0);
8875 -+ err = do_md_stop(mddev, 0, NULL);
8876 - break;
8877 - case inactive:
8878 - /* stopping an active array */
8879 - if (mddev->pers) {
8880 - if (atomic_read(&mddev->openers) > 0)
8881 - return -EBUSY;
8882 -- err = do_md_stop(mddev, 2, 0);
8883 -+ err = do_md_stop(mddev, 2, NULL);
8884 - } else
8885 - err = 0; /* already inactive */
8886 - break;
8887 -@@ -3732,7 +3732,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
8888 - break; /* not supported yet */
8889 - case readonly:
8890 - if (mddev->pers)
8891 -- err = md_set_readonly(mddev, 0);
8892 -+ err = md_set_readonly(mddev, NULL);
8893 - else {
8894 - mddev->ro = 1;
8895 - set_disk_ro(mddev->gendisk, 1);
8896 -@@ -3742,7 +3742,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
8897 - case read_auto:
8898 - if (mddev->pers) {
8899 - if (mddev->ro == 0)
8900 -- err = md_set_readonly(mddev, 0);
8901 -+ err = md_set_readonly(mddev, NULL);
8902 - else if (mddev->ro == 1)
8903 - err = restart_array(mddev);
8904 - if (err == 0) {
8905 -@@ -5078,15 +5078,17 @@ void md_stop(struct mddev *mddev)
8906 - }
8907 - EXPORT_SYMBOL_GPL(md_stop);
8908 -
8909 --static int md_set_readonly(struct mddev *mddev, int is_open)
8910 -+static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
8911 - {
8912 - int err = 0;
8913 - mutex_lock(&mddev->open_mutex);
8914 -- if (atomic_read(&mddev->openers) > is_open) {
8915 -+ if (atomic_read(&mddev->openers) > !!bdev) {
8916 - printk("md: %s still in use.\n",mdname(mddev));
8917 - err = -EBUSY;
8918 - goto out;
8919 - }
8920 -+ if (bdev)
8921 -+ sync_blockdev(bdev);
8922 - if (mddev->pers) {
8923 - __md_stop_writes(mddev);
8924 -
8925 -@@ -5108,18 +5110,26 @@ out:
8926 - * 0 - completely stop and dis-assemble array
8927 - * 2 - stop but do not disassemble array
8928 - */
8929 --static int do_md_stop(struct mddev * mddev, int mode, int is_open)
8930 -+static int do_md_stop(struct mddev * mddev, int mode,
8931 -+ struct block_device *bdev)
8932 - {
8933 - struct gendisk *disk = mddev->gendisk;
8934 - struct md_rdev *rdev;
8935 -
8936 - mutex_lock(&mddev->open_mutex);
8937 -- if (atomic_read(&mddev->openers) > is_open ||
8938 -+ if (atomic_read(&mddev->openers) > !!bdev ||
8939 - mddev->sysfs_active) {
8940 - printk("md: %s still in use.\n",mdname(mddev));
8941 - mutex_unlock(&mddev->open_mutex);
8942 - return -EBUSY;
8943 - }
8944 -+ if (bdev)
8945 -+ /* It is possible IO was issued on some other
8946 -+ * open file which was closed before we took ->open_mutex.
8947 -+ * As that was not the last close __blkdev_put will not
8948 -+ * have called sync_blockdev, so we must.
8949 -+ */
8950 -+ sync_blockdev(bdev);
8951 -
8952 - if (mddev->pers) {
8953 - if (mddev->ro)
8954 -@@ -5193,7 +5203,7 @@ static void autorun_array(struct mddev *mddev)
8955 - err = do_md_run(mddev);
8956 - if (err) {
8957 - printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
8958 -- do_md_stop(mddev, 0, 0);
8959 -+ do_md_stop(mddev, 0, NULL);
8960 - }
8961 - }
8962 -
8963 -@@ -6184,11 +6194,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
8964 - goto done_unlock;
8965 -
8966 - case STOP_ARRAY:
8967 -- err = do_md_stop(mddev, 0, 1);
8968 -+ err = do_md_stop(mddev, 0, bdev);
8969 - goto done_unlock;
8970 -
8971 - case STOP_ARRAY_RO:
8972 -- err = md_set_readonly(mddev, 1);
8973 -+ err = md_set_readonly(mddev, bdev);
8974 - goto done_unlock;
8975 -
8976 - case BLKROSET:
8977 -@@ -6686,7 +6696,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
8978 +@@ -6696,7 +6696,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
8979
8980 spin_unlock(&pers_lock);
8981 seq_printf(seq, "\n");
8982 @@ -34022,7 +33999,7 @@ index 700ecae..8122a9c 100644
8983 return 0;
8984 }
8985 if (v == (void*)2) {
8986 -@@ -6775,7 +6785,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
8987 +@@ -6785,7 +6785,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
8988 chunk_kb ? "KB" : "B");
8989 if (bitmap->file) {
8990 seq_printf(seq, ", file: ");
8991 @@ -34031,7 +34008,7 @@ index 700ecae..8122a9c 100644
8992 }
8993
8994 seq_printf(seq, "\n");
8995 -@@ -6806,7 +6816,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
8996 +@@ -6816,7 +6816,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
8997 return error;
8998
8999 seq = file->private_data;
9000 @@ -34040,7 +34017,7 @@ index 700ecae..8122a9c 100644
9001 return error;
9002 }
9003
9004 -@@ -6820,7 +6830,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
9005 +@@ -6830,7 +6830,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
9006 /* always allow read */
9007 mask = POLLIN | POLLRDNORM;
9008
9009 @@ -34049,7 +34026,7 @@ index 700ecae..8122a9c 100644
9010 mask |= POLLERR | POLLPRI;
9011 return mask;
9012 }
9013 -@@ -6864,7 +6874,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
9014 +@@ -6874,7 +6874,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
9015 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
9016 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
9017 (int)part_stat_read(&disk->part0, sectors[1]) -
9018 @@ -34139,7 +34116,7 @@ index 1cbfc6b..56e1dbb 100644
9019 /*----------------------------------------------------------------*/
9020
9021 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
9022 -index 7af60ec..10a4a5d 100644
9023 +index 2d97bf0..5caa9cf 100644
9024 --- a/drivers/md/raid1.c
9025 +++ b/drivers/md/raid1.c
9026 @@ -1581,7 +1581,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
9027 @@ -34151,7 +34128,7 @@ index 7af60ec..10a4a5d 100644
9028 }
9029 sectors -= s;
9030 sect += s;
9031 -@@ -1794,7 +1794,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
9032 +@@ -1800,7 +1800,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
9033 test_bit(In_sync, &rdev->flags)) {
9034 if (r1_sync_page_io(rdev, sect, s,
9035 conf->tmppage, READ)) {
9036 @@ -34224,10 +34201,10 @@ index 7a9eef6..707cb03 100644
9037
9038 rdev_dec_pending(rdev, mddev);
9039 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
9040 -index 6ba4954..5ce122a 100644
9041 +index 26ef63a..bd587cd 100644
9042 --- a/drivers/md/raid5.c
9043 +++ b/drivers/md/raid5.c
9044 -@@ -1616,19 +1616,19 @@ static void raid5_end_read_request(struct bio * bi, int error)
9045 +@@ -1618,19 +1618,19 @@ static void raid5_end_read_request(struct bio * bi, int error)
9046 (unsigned long long)(sh->sector
9047 + rdev->data_offset),
9048 bdevname(rdev->bdev, b));
9049 @@ -34251,7 +34228,7 @@ index 6ba4954..5ce122a 100644
9050 if (conf->mddev->degraded >= conf->max_degraded)
9051 printk_ratelimited(
9052 KERN_WARNING
9053 -@@ -1648,7 +1648,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
9054 +@@ -1650,7 +1650,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
9055 (unsigned long long)(sh->sector
9056 + rdev->data_offset),
9057 bdn);
9058 @@ -34287,7 +34264,7 @@ index a7d876f..8c21b61 100644
9059 struct dvb_demux *demux;
9060 void *priv;
9061 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
9062 -index f732877..d38c35a 100644
9063 +index d5cda35..017af46 100644
9064 --- a/drivers/media/dvb/dvb-core/dvbdev.c
9065 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
9066 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
9067 @@ -35354,7 +35331,7 @@ index e1159e5..e18684d 100644
9068 /* Set media type */
9069 switch (adapter->pdev->device) {
9070 diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
9071 -index e556fc3..fa9199d 100644
9072 +index 3072d35..a0f4827 100644
9073 --- a/drivers/net/ethernet/intel/e1000e/82571.c
9074 +++ b/drivers/net/ethernet/intel/e1000e/82571.c
9075 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
9076 @@ -35708,7 +35685,7 @@ index 4a518a3..936b334 100644
9077 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
9078 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
9079 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
9080 -index cc2565c..7325c3c 100644
9081 +index 9e61d6b..852f305 100644
9082 --- a/drivers/net/ethernet/realtek/r8169.c
9083 +++ b/drivers/net/ethernet/realtek/r8169.c
9084 @@ -702,17 +702,17 @@ struct rtl8169_private {
9085 @@ -35761,10 +35738,10 @@ index c07cfe9..81cbf7e 100644
9086
9087 /* To mask all all interrupts.*/
9088 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
9089 -index 72cd190..fcf7fb3 100644
9090 +index d4d2bc1..14b8672 100644
9091 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
9092 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
9093 -@@ -1599,7 +1599,7 @@ static const struct file_operations stmmac_rings_status_fops = {
9094 +@@ -1602,7 +1602,7 @@ static const struct file_operations stmmac_rings_status_fops = {
9095 .open = stmmac_sysfs_ring_open,
9096 .read = seq_read,
9097 .llseek = seq_lseek,
9098 @@ -35773,7 +35750,7 @@ index 72cd190..fcf7fb3 100644
9099 };
9100
9101 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
9102 -@@ -1671,7 +1671,7 @@ static const struct file_operations stmmac_dma_cap_fops = {
9103 +@@ -1674,7 +1674,7 @@ static const struct file_operations stmmac_dma_cap_fops = {
9104 .open = stmmac_sysfs_dma_cap_open,
9105 .read = seq_read,
9106 .llseek = seq_lseek,
9107 @@ -35782,19 +35759,6 @@ index 72cd190..fcf7fb3 100644
9108 };
9109
9110 static int stmmac_init_fs(struct net_device *dev)
9111 -diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
9112 -index 1b7082d..c786773 100644
9113 ---- a/drivers/net/macvtap.c
9114 -+++ b/drivers/net/macvtap.c
9115 -@@ -526,6 +526,8 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
9116 - }
9117 - base = (unsigned long)from->iov_base + offset1;
9118 - size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
9119 -+ if (i + size >= MAX_SKB_FRAGS)
9120 -+ return -EFAULT;
9121 - num_pages = get_user_pages_fast(base, size, 0, &page[i]);
9122 - if ((num_pages != size) ||
9123 - (num_pages > MAX_SKB_FRAGS - skb_shinfo(skb)->nr_frags))
9124 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
9125 index 3ed983c..a1bb418 100644
9126 --- a/drivers/net/ppp/ppp_generic.c
9127 @@ -37337,7 +37301,7 @@ index 9de9db2..1e09660 100644
9128 fc_frame_free(fp);
9129 }
9130 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
9131 -index db9238f..4378ed2 100644
9132 +index 4868fc9..7f2e028 100644
9133 --- a/drivers/scsi/libsas/sas_ata.c
9134 +++ b/drivers/scsi/libsas/sas_ata.c
9135 @@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
9136 @@ -38323,51 +38287,6 @@ index 0842cc7..61d886d 100644
9137 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
9138 if (--cmd->outstanding_r2ts < 1) {
9139 iscsit_stop_dataout_timer(cmd);
9140 -diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
9141 -index 65ea65a..93b9406 100644
9142 ---- a/drivers/target/target_core_cdb.c
9143 -+++ b/drivers/target/target_core_cdb.c
9144 -@@ -1199,7 +1199,7 @@ int target_emulate_write_same(struct se_task *task)
9145 - if (num_blocks != 0)
9146 - range = num_blocks;
9147 - else
9148 -- range = (dev->transport->get_blocks(dev) - lba);
9149 -+ range = (dev->transport->get_blocks(dev) - lba) + 1;
9150 -
9151 - pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
9152 - (unsigned long long)lba, (unsigned long long)range);
9153 -diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
9154 -index b75bc92..9145141 100644
9155 ---- a/drivers/target/target_core_pr.c
9156 -+++ b/drivers/target/target_core_pr.c
9157 -@@ -2042,7 +2042,7 @@ static int __core_scsi3_write_aptpl_to_file(
9158 - if (IS_ERR(file) || !file || !file->f_dentry) {
9159 - pr_err("filp_open(%s) for APTPL metadata"
9160 - " failed\n", path);
9161 -- return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT);
9162 -+ return IS_ERR(file) ? PTR_ERR(file) : -ENOENT;
9163 - }
9164 -
9165 - iov[0].iov_base = &buf[0];
9166 -@@ -3853,7 +3853,7 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
9167 - " SPC-2 reservation is held, returning"
9168 - " RESERVATION_CONFLICT\n");
9169 - cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
9170 -- ret = EINVAL;
9171 -+ ret = -EINVAL;
9172 - goto out;
9173 - }
9174 -
9175 -@@ -3863,7 +3863,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
9176 - */
9177 - if (!cmd->se_sess) {
9178 - cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
9179 -- return -EINVAL;
9180 -+ ret = -EINVAL;
9181 -+ goto out;
9182 - }
9183 -
9184 - if (cmd->data_length < 24) {
9185 diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
9186 index 6845228..df77141 100644
9187 --- a/drivers/target/target_core_tmr.c
9188 @@ -38469,19 +38388,6 @@ index 5660916..f6dab21 100644
9189 smp_mb__after_atomic_inc();
9190 }
9191 }
9192 -diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
9193 -index d95cfe2..278819c 100644
9194 ---- a/drivers/target/tcm_fc/tfc_cmd.c
9195 -+++ b/drivers/target/tcm_fc/tfc_cmd.c
9196 -@@ -249,6 +249,8 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)
9197 - {
9198 - struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
9199 -
9200 -+ if (cmd->aborted)
9201 -+ return ~0;
9202 - return fc_seq_exch(cmd->seq)->rxid;
9203 - }
9204 -
9205 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
9206 index b9040be..e3f5aab 100644
9207 --- a/drivers/tty/hvc/hvcs.c
9208 @@ -39297,10 +39203,10 @@ index 57c01ab..8a05959 100644
9209
9210 /*
9211 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
9212 -index c14c42b..f955cc2 100644
9213 +index ae66278..579de88b 100644
9214 --- a/drivers/vhost/vhost.c
9215 +++ b/drivers/vhost/vhost.c
9216 -@@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
9217 +@@ -631,7 +631,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
9218 return 0;
9219 }
9220
9221 @@ -42731,7 +42637,7 @@ index a6395bd..f1e376a 100644
9222 (unsigned long) create_aout_tables((char __user *) bprm->p, bprm);
9223 #ifdef __alpha__
9224 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
9225 -index 6ff96c6..3020df9 100644
9226 +index 6ff96c6..566204e 100644
9227 --- a/fs/binfmt_elf.c
9228 +++ b/fs/binfmt_elf.c
9229 @@ -32,6 +32,7 @@
9230 @@ -42862,7 +42768,7 @@ index 6ff96c6..3020df9 100644
9231 error = -ENOMEM;
9232 goto out_close;
9233 }
9234 -@@ -528,6 +552,349 @@ out:
9235 +@@ -528,6 +552,311 @@ out:
9236 return error;
9237 }
9238
9239 @@ -42882,15 +42788,6 @@ index 6ff96c6..3020df9 100644
9240 + pax_flags |= MF_PAX_SEGMEXEC;
9241 +#endif
9242 +
9243 -+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
9244 -+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
9245 -+ if ((__supported_pte_mask & _PAGE_NX))
9246 -+ pax_flags &= ~MF_PAX_SEGMEXEC;
9247 -+ else
9248 -+ pax_flags &= ~MF_PAX_PAGEEXEC;
9249 -+ }
9250 -+#endif
9251 -+
9252 +#ifdef CONFIG_PAX_EMUTRAMP
9253 + if (elf_phdata->p_flags & PF_EMUTRAMP)
9254 + pax_flags |= MF_PAX_EMUTRAMP;
9255 @@ -42924,15 +42821,6 @@ index 6ff96c6..3020df9 100644
9256 + pax_flags |= MF_PAX_SEGMEXEC;
9257 +#endif
9258 +
9259 -+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
9260 -+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
9261 -+ if ((__supported_pte_mask & _PAGE_NX))
9262 -+ pax_flags &= ~MF_PAX_SEGMEXEC;
9263 -+ else
9264 -+ pax_flags &= ~MF_PAX_PAGEEXEC;
9265 -+ }
9266 -+#endif
9267 -+
9268 +#ifdef CONFIG_PAX_EMUTRAMP
9269 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
9270 + pax_flags |= MF_PAX_EMUTRAMP;
9271 @@ -42968,15 +42856,6 @@ index 6ff96c6..3020df9 100644
9272 + pax_flags |= MF_PAX_SEGMEXEC;
9273 +#endif
9274 +
9275 -+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
9276 -+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
9277 -+ if ((__supported_pte_mask & _PAGE_NX))
9278 -+ pax_flags &= ~MF_PAX_SEGMEXEC;
9279 -+ else
9280 -+ pax_flags &= ~MF_PAX_PAGEEXEC;
9281 -+ }
9282 -+#endif
9283 -+
9284 +#ifdef CONFIG_PAX_EMUTRAMP
9285 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
9286 + pax_flags |= MF_PAX_EMUTRAMP;
9287 @@ -43010,15 +42889,6 @@ index 6ff96c6..3020df9 100644
9288 + pax_flags |= MF_PAX_SEGMEXEC;
9289 +#endif
9290 +
9291 -+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
9292 -+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
9293 -+ if ((__supported_pte_mask & _PAGE_NX))
9294 -+ pax_flags &= ~MF_PAX_SEGMEXEC;
9295 -+ else
9296 -+ pax_flags &= ~MF_PAX_PAGEEXEC;
9297 -+ }
9298 -+#endif
9299 -+
9300 +#ifdef CONFIG_PAX_EMUTRAMP
9301 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
9302 + pax_flags |= MF_PAX_EMUTRAMP;
9303 @@ -43038,7 +42908,7 @@ index 6ff96c6..3020df9 100644
9304 +}
9305 +#endif
9306 +
9307 -+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
9308 ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
9309 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
9310 +{
9311 + unsigned long pax_flags = 0UL;
9312 @@ -43055,15 +42925,6 @@ index 6ff96c6..3020df9 100644
9313 + pax_flags |= MF_PAX_SEGMEXEC;
9314 +#endif
9315 +
9316 -+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
9317 -+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
9318 -+ if ((__supported_pte_mask & _PAGE_NX))
9319 -+ pax_flags &= ~MF_PAX_SEGMEXEC;
9320 -+ else
9321 -+ pax_flags &= ~MF_PAX_PAGEEXEC;
9322 -+ }
9323 -+#endif
9324 -+
9325 +#ifdef CONFIG_PAX_EMUTRAMP
9326 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
9327 + pax_flags |= MF_PAX_EMUTRAMP;
9328 @@ -43085,19 +42946,17 @@ index 6ff96c6..3020df9 100644
9329 + pax_flags |= MF_PAX_PAGEEXEC;
9330 +#endif
9331 +
9332 ++#ifdef CONFIG_PAX_SEGMEXEC
9333 ++ pax_flags |= MF_PAX_SEGMEXEC;
9334 ++#endif
9335 ++
9336 +#ifdef CONFIG_PAX_MPROTECT
9337 + pax_flags |= MF_PAX_MPROTECT;
9338 +#endif
9339 +
9340 +#ifdef CONFIG_PAX_RANDMMAP
9341 -+ pax_flags |= MF_PAX_RANDMMAP;
9342 -+#endif
9343 -+
9344 -+#ifdef CONFIG_PAX_SEGMEXEC
9345 -+ if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
9346 -+ pax_flags &= ~MF_PAX_PAGEEXEC;
9347 -+ pax_flags |= MF_PAX_SEGMEXEC;
9348 -+ }
9349 ++ if (randomize_va_space)
9350 ++ pax_flags |= MF_PAX_RANDMMAP;
9351 +#endif
9352 +
9353 +#endif
9354 @@ -43201,6 +43060,15 @@ index 6ff96c6..3020df9 100644
9355 + if (pt_pax_flags != ~0UL)
9356 + pax_flags = pt_pax_flags;
9357 +
9358 ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
9359 ++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
9360 ++ if ((__supported_pte_mask & _PAGE_NX))
9361 ++ pax_flags &= ~MF_PAX_SEGMEXEC;
9362 ++ else
9363 ++ pax_flags &= ~MF_PAX_PAGEEXEC;
9364 ++ }
9365 ++#endif
9366 ++
9367 + if (0 > pax_check_flags(&pax_flags))
9368 + return -EINVAL;
9369 +
9370 @@ -43212,7 +43080,7 @@ index 6ff96c6..3020df9 100644
9371 /*
9372 * These are the functions used to load ELF style executables and shared
9373 * libraries. There is no binary dependent code anywhere else.
9374 -@@ -544,6 +911,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
9375 +@@ -544,6 +873,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
9376 {
9377 unsigned int random_variable = 0;
9378
9379 @@ -43224,7 +43092,7 @@ index 6ff96c6..3020df9 100644
9380 if ((current->flags & PF_RANDOMIZE) &&
9381 !(current->personality & ADDR_NO_RANDOMIZE)) {
9382 random_variable = get_random_int() & STACK_RND_MASK;
9383 -@@ -562,7 +934,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
9384 +@@ -562,7 +896,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
9385 unsigned long load_addr = 0, load_bias = 0;
9386 int load_addr_set = 0;
9387 char * elf_interpreter = NULL;
9388 @@ -43233,7 +43101,7 @@ index 6ff96c6..3020df9 100644
9389 struct elf_phdr *elf_ppnt, *elf_phdata;
9390 unsigned long elf_bss, elf_brk;
9391 int retval, i;
9392 -@@ -572,11 +944,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
9393 +@@ -572,11 +906,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
9394 unsigned long start_code, end_code, start_data, end_data;
9395 unsigned long reloc_func_desc __maybe_unused = 0;
9396 int executable_stack = EXSTACK_DEFAULT;
9397 @@ -43246,7 +43114,7 @@ index 6ff96c6..3020df9 100644
9398
9399 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
9400 if (!loc) {
9401 -@@ -713,11 +1085,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
9402 +@@ -713,11 +1047,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
9403
9404 /* OK, This is the point of no return */
9405 current->flags &= ~PF_FORKNOEXEC;
9406 @@ -43271,7 +43139,7 @@ index 6ff96c6..3020df9 100644
9407 +
9408 + current->mm->def_flags = 0;
9409 +
9410 -+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
9411 ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
9412 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
9413 + send_sig(SIGKILL, current, 0);
9414 + goto out_free_dentry;
9415 @@ -43329,7 +43197,7 @@ index 6ff96c6..3020df9 100644
9416 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
9417 current->personality |= READ_IMPLIES_EXEC;
9418
9419 -@@ -808,6 +1250,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
9420 +@@ -808,6 +1212,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
9421 #else
9422 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
9423 #endif
9424 @@ -43350,7 +43218,7 @@ index 6ff96c6..3020df9 100644
9425 }
9426
9427 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
9428 -@@ -840,9 +1296,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
9429 +@@ -840,9 +1258,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
9430 * allowed task size. Note that p_filesz must always be
9431 * <= p_memsz so it is only necessary to check p_memsz.
9432 */
9433 @@ -43363,7 +43231,7 @@ index 6ff96c6..3020df9 100644
9434 /* set_brk can never work. Avoid overflows. */
9435 send_sig(SIGKILL, current, 0);
9436 retval = -EINVAL;
9437 -@@ -881,11 +1337,40 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
9438 +@@ -881,11 +1299,40 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
9439 goto out_free_dentry;
9440 }
9441 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
9442 @@ -43407,7 +43275,7 @@ index 6ff96c6..3020df9 100644
9443 if (elf_interpreter) {
9444 unsigned long uninitialized_var(interp_map_addr);
9445
9446 -@@ -1098,7 +1583,7 @@ out:
9447 +@@ -1098,7 +1545,7 @@ out:
9448 * Decide what to dump of a segment, part, all or none.
9449 */
9450 static unsigned long vma_dump_size(struct vm_area_struct *vma,
9451 @@ -43416,7 +43284,7 @@ index 6ff96c6..3020df9 100644
9452 {
9453 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
9454
9455 -@@ -1132,7 +1617,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
9456 +@@ -1132,7 +1579,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
9457 if (vma->vm_file == NULL)
9458 return 0;
9459
9460 @@ -43425,7 +43293,7 @@ index 6ff96c6..3020df9 100644
9461 goto whole;
9462
9463 /*
9464 -@@ -1354,9 +1839,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
9465 +@@ -1354,9 +1801,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
9466 {
9467 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
9468 int i = 0;
9469 @@ -43437,7 +43305,7 @@ index 6ff96c6..3020df9 100644
9470 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
9471 }
9472
9473 -@@ -1862,14 +2347,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
9474 +@@ -1862,14 +2309,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
9475 }
9476
9477 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
9478 @@ -43454,7 +43322,7 @@ index 6ff96c6..3020df9 100644
9479 return size;
9480 }
9481
9482 -@@ -1963,7 +2448,7 @@ static int elf_core_dump(struct coredump_params *cprm)
9483 +@@ -1963,7 +2410,7 @@ static int elf_core_dump(struct coredump_params *cprm)
9484
9485 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
9486
9487 @@ -43463,7 +43331,7 @@ index 6ff96c6..3020df9 100644
9488 offset += elf_core_extra_data_size();
9489 e_shoff = offset;
9490
9491 -@@ -1977,10 +2462,12 @@ static int elf_core_dump(struct coredump_params *cprm)
9492 +@@ -1977,10 +2424,12 @@ static int elf_core_dump(struct coredump_params *cprm)
9493 offset = dataoff;
9494
9495 size += sizeof(*elf);
9496 @@ -43476,7 +43344,7 @@ index 6ff96c6..3020df9 100644
9497 if (size > cprm->limit
9498 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
9499 goto end_coredump;
9500 -@@ -1994,7 +2481,7 @@ static int elf_core_dump(struct coredump_params *cprm)
9501 +@@ -1994,7 +2443,7 @@ static int elf_core_dump(struct coredump_params *cprm)
9502 phdr.p_offset = offset;
9503 phdr.p_vaddr = vma->vm_start;
9504 phdr.p_paddr = 0;
9505 @@ -43485,7 +43353,7 @@ index 6ff96c6..3020df9 100644
9506 phdr.p_memsz = vma->vm_end - vma->vm_start;
9507 offset += phdr.p_filesz;
9508 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
9509 -@@ -2005,6 +2492,7 @@ static int elf_core_dump(struct coredump_params *cprm)
9510 +@@ -2005,6 +2454,7 @@ static int elf_core_dump(struct coredump_params *cprm)
9511 phdr.p_align = ELF_EXEC_PAGESIZE;
9512
9513 size += sizeof(phdr);
9514 @@ -43493,7 +43361,7 @@ index 6ff96c6..3020df9 100644
9515 if (size > cprm->limit
9516 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
9517 goto end_coredump;
9518 -@@ -2029,7 +2517,7 @@ static int elf_core_dump(struct coredump_params *cprm)
9519 +@@ -2029,7 +2479,7 @@ static int elf_core_dump(struct coredump_params *cprm)
9520 unsigned long addr;
9521 unsigned long end;
9522
9523 @@ -43502,7 +43370,7 @@ index 6ff96c6..3020df9 100644
9524
9525 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
9526 struct page *page;
9527 -@@ -2038,6 +2526,7 @@ static int elf_core_dump(struct coredump_params *cprm)
9528 +@@ -2038,6 +2488,7 @@ static int elf_core_dump(struct coredump_params *cprm)
9529 page = get_dump_page(addr);
9530 if (page) {
9531 void *kaddr = kmap(page);
9532 @@ -43510,7 +43378,7 @@ index 6ff96c6..3020df9 100644
9533 stop = ((size += PAGE_SIZE) > cprm->limit) ||
9534 !dump_write(cprm->file, kaddr,
9535 PAGE_SIZE);
9536 -@@ -2055,6 +2544,7 @@ static int elf_core_dump(struct coredump_params *cprm)
9537 +@@ -2055,6 +2506,7 @@ static int elf_core_dump(struct coredump_params *cprm)
9538
9539 if (e_phnum == PN_XNUM) {
9540 size += sizeof(*shdr4extnum);
9541 @@ -43518,7 +43386,7 @@ index 6ff96c6..3020df9 100644
9542 if (size > cprm->limit
9543 || !dump_write(cprm->file, shdr4extnum,
9544 sizeof(*shdr4extnum)))
9545 -@@ -2075,6 +2565,97 @@ out:
9546 +@@ -2075,6 +2527,97 @@ out:
9547
9548 #endif /* CONFIG_ELF_CORE */
9549
9550 @@ -43791,51 +43659,6 @@ index cfb5543..1ae7347 100644
9551
9552 if (!del) {
9553 spin_lock(&rc->reloc_root_tree.lock);
9554 -diff --git a/fs/buffer.c b/fs/buffer.c
9555 -index c807931..4115eca 100644
9556 ---- a/fs/buffer.c
9557 -+++ b/fs/buffer.c
9558 -@@ -1087,6 +1087,9 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
9559 - static struct buffer_head *
9560 - __getblk_slow(struct block_device *bdev, sector_t block, int size)
9561 - {
9562 -+ int ret;
9563 -+ struct buffer_head *bh;
9564 -+
9565 - /* Size must be multiple of hard sectorsize */
9566 - if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
9567 - (size < 512 || size > PAGE_SIZE))) {
9568 -@@ -1099,20 +1102,21 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
9569 - return NULL;
9570 - }
9571 -
9572 -- for (;;) {
9573 -- struct buffer_head * bh;
9574 -- int ret;
9575 -+retry:
9576 -+ bh = __find_get_block(bdev, block, size);
9577 -+ if (bh)
9578 -+ return bh;
9579 -
9580 -+ ret = grow_buffers(bdev, block, size);
9581 -+ if (ret == 0) {
9582 -+ free_more_memory();
9583 -+ goto retry;
9584 -+ } else if (ret > 0) {
9585 - bh = __find_get_block(bdev, block, size);
9586 - if (bh)
9587 - return bh;
9588 --
9589 -- ret = grow_buffers(bdev, block, size);
9590 -- if (ret < 0)
9591 -- return NULL;
9592 -- if (ret == 0)
9593 -- free_more_memory();
9594 - }
9595 -+ return NULL;
9596 - }
9597 -
9598 - /*
9599 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
9600 index 622f469..e8d2d55 100644
9601 --- a/fs/cachefiles/bind.c
9602 @@ -44623,10 +44446,10 @@ index af11098..81e3bbe 100644
9603 /* Free the char* */
9604 kfree(buf);
9605 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
9606 -index 0dc5a3d..d3cdeea 100644
9607 +index de42310..867dddd 100644
9608 --- a/fs/ecryptfs/miscdev.c
9609 +++ b/fs/ecryptfs/miscdev.c
9610 -@@ -328,7 +328,7 @@ check_list:
9611 +@@ -338,7 +338,7 @@ check_list:
9612 goto out_unlock_msg_ctx;
9613 i = 5;
9614 if (msg_ctx->msg) {
9615 @@ -44657,24 +44480,8 @@ index 608c1c3..7d040a8 100644
9616 set_fs(fs_save);
9617 return rc;
9618 }
9619 -diff --git a/fs/eventpoll.c b/fs/eventpoll.c
9620 -index 4d9d3a4..a6f3763 100644
9621 ---- a/fs/eventpoll.c
9622 -+++ b/fs/eventpoll.c
9623 -@@ -1629,8 +1629,10 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
9624 - if (op == EPOLL_CTL_ADD) {
9625 - if (is_file_epoll(tfile)) {
9626 - error = -ELOOP;
9627 -- if (ep_loop_check(ep, tfile) != 0)
9628 -+ if (ep_loop_check(ep, tfile) != 0) {
9629 -+ clear_tfile_check_list();
9630 - goto error_tgt_fput;
9631 -+ }
9632 - } else
9633 - list_add(&tfile->f_tfile_llink, &tfile_check_list);
9634 - }
9635 diff --git a/fs/exec.c b/fs/exec.c
9636 -index 160cd2f..52c1678 100644
9637 +index 160cd2f..5cc2091 100644
9638 --- a/fs/exec.c
9639 +++ b/fs/exec.c
9640 @@ -55,12 +55,33 @@
9641 @@ -45194,7 +45001,7 @@ index 160cd2f..52c1678 100644
9642 cn->corename = kmalloc(cn->size, GFP_KERNEL);
9643 cn->used = 0;
9644
9645 -@@ -1815,6 +1948,228 @@ out:
9646 +@@ -1815,6 +1948,250 @@ out:
9647 return ispipe;
9648 }
9649
9650 @@ -45339,7 +45146,7 @@ index 160cd2f..52c1678 100644
9651 +
9652 +#ifdef CONFIG_PAX_USERCOPY
9653 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
9654 -+int object_is_on_stack(const void *obj, unsigned long len)
9655 ++static noinline int check_stack_object(const void *obj, unsigned long len)
9656 +{
9657 + const void * const stack = task_stack_page(current);
9658 + const void * const stackend = stack + THREAD_SIZE;
9659 @@ -45385,7 +45192,7 @@ index 160cd2f..52c1678 100644
9660 +#endif
9661 +}
9662 +
9663 -+__noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
9664 ++static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
9665 +{
9666 + if (current->signal->curr_ip)
9667 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
9668 @@ -45399,6 +45206,28 @@ index 160cd2f..52c1678 100644
9669 +}
9670 +#endif
9671 +
9672 ++void check_object_size(const void *ptr, unsigned long n, bool to)
9673 ++{
9674 ++
9675 ++#ifdef CONFIG_PAX_USERCOPY
9676 ++ const char *type;
9677 ++
9678 ++ if (!n)
9679 ++ return;
9680 ++
9681 ++ type = check_heap_object(ptr, n, to);
9682 ++ if (!type) {
9683 ++ if (check_stack_object(ptr, n) != -1)
9684 ++ return;
9685 ++ type = "<process stack>";
9686 ++ }
9687 ++
9688 ++ pax_report_usercopy(ptr, n, to, type);
9689 ++#endif
9690 ++
9691 ++}
9692 ++EXPORT_SYMBOL(check_object_size);
9693 ++
9694 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
9695 +void pax_track_stack(void)
9696 +{
9697 @@ -45423,7 +45252,7 @@ index 160cd2f..52c1678 100644
9698 static int zap_process(struct task_struct *start, int exit_code)
9699 {
9700 struct task_struct *t;
9701 -@@ -2026,17 +2381,17 @@ static void wait_for_dump_helpers(struct file *file)
9702 +@@ -2026,17 +2403,17 @@ static void wait_for_dump_helpers(struct file *file)
9703 pipe = file->f_path.dentry->d_inode->i_pipe;
9704
9705 pipe_lock(pipe);
9706 @@ -45446,7 +45275,7 @@ index 160cd2f..52c1678 100644
9707 pipe_unlock(pipe);
9708
9709 }
9710 -@@ -2097,7 +2452,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
9711 +@@ -2097,7 +2474,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
9712 int retval = 0;
9713 int flag = 0;
9714 int ispipe;
9715 @@ -45455,7 +45284,7 @@ index 160cd2f..52c1678 100644
9716 struct coredump_params cprm = {
9717 .signr = signr,
9718 .regs = regs,
9719 -@@ -2112,6 +2467,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
9720 +@@ -2112,6 +2489,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
9721
9722 audit_core_dumps(signr);
9723
9724 @@ -45465,7 +45294,7 @@ index 160cd2f..52c1678 100644
9725 binfmt = mm->binfmt;
9726 if (!binfmt || !binfmt->core_dump)
9727 goto fail;
9728 -@@ -2179,7 +2537,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
9729 +@@ -2179,7 +2559,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
9730 }
9731 cprm.limit = RLIM_INFINITY;
9732
9733 @@ -45474,7 +45303,7 @@ index 160cd2f..52c1678 100644
9734 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
9735 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
9736 task_tgid_vnr(current), current->comm);
9737 -@@ -2206,6 +2564,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
9738 +@@ -2206,6 +2586,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
9739 } else {
9740 struct inode *inode;
9741
9742 @@ -45483,7 +45312,7 @@ index 160cd2f..52c1678 100644
9743 if (cprm.limit < binfmt->min_coredump)
9744 goto fail_unlock;
9745
9746 -@@ -2249,7 +2609,7 @@ close_fail:
9747 +@@ -2249,7 +2631,7 @@ close_fail:
9748 filp_close(cprm.file, NULL);
9749 fail_dropcount:
9750 if (ispipe)
9751 @@ -45492,7 +45321,7 @@ index 160cd2f..52c1678 100644
9752 fail_unlock:
9753 kfree(cn.corename);
9754 fail_corename:
9755 -@@ -2268,7 +2628,7 @@ fail:
9756 +@@ -2268,7 +2650,7 @@ fail:
9757 */
9758 int dump_write(struct file *file, const void *addr, int nr)
9759 {
9760 @@ -45740,27 +45569,10 @@ index 22764c7..86372c9 100644
9761 break;
9762 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
9763 diff --git a/fs/fifo.c b/fs/fifo.c
9764 -index b1a524d..3d7942c 100644
9765 +index cf6f434..3d7942c 100644
9766 --- a/fs/fifo.c
9767 +++ b/fs/fifo.c
9768 -@@ -14,7 +14,7 @@
9769 - #include <linux/sched.h>
9770 - #include <linux/pipe_fs_i.h>
9771 -
9772 --static void wait_for_partner(struct inode* inode, unsigned int *cnt)
9773 -+static int wait_for_partner(struct inode* inode, unsigned int *cnt)
9774 - {
9775 - int cur = *cnt;
9776 -
9777 -@@ -23,6 +23,7 @@ static void wait_for_partner(struct inode* inode, unsigned int *cnt)
9778 - if (signal_pending(current))
9779 - break;
9780 - }
9781 -+ return cur == *cnt ? -ERESTARTSYS : 0;
9782 - }
9783 -
9784 - static void wake_up_partner(struct inode* inode)
9785 -@@ -58,17 +59,16 @@ static int fifo_open(struct inode *inode, struct file *filp)
9786 +@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
9787 */
9788 filp->f_op = &read_pipefifo_fops;
9789 pipe->r_counter++;
9790 @@ -45773,15 +45585,7 @@ index b1a524d..3d7942c 100644
9791 if ((filp->f_flags & O_NONBLOCK)) {
9792 /* suppress POLLHUP until we have
9793 * seen a writer */
9794 - filp->f_version = pipe->w_counter;
9795 - } else {
9796 -- wait_for_partner(inode, &pipe->w_counter);
9797 -- if(signal_pending(current))
9798 -+ if (wait_for_partner(inode, &pipe->w_counter))
9799 - goto err_rd;
9800 - }
9801 - }
9802 -@@ -81,17 +81,16 @@ static int fifo_open(struct inode *inode, struct file *filp)
9803 +@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
9804 * errno=ENXIO when there is no process reading the FIFO.
9805 */
9806 ret = -ENXIO;
9807 @@ -45796,14 +45600,11 @@ index b1a524d..3d7942c 100644
9808 wake_up_partner(inode);
9809
9810 - if (!pipe->readers) {
9811 -- wait_for_partner(inode, &pipe->r_counter);
9812 -- if (signal_pending(current))
9813 + if (!atomic_read(&pipe->readers)) {
9814 -+ if (wait_for_partner(inode, &pipe->r_counter))
9815 + if (wait_for_partner(inode, &pipe->r_counter))
9816 goto err_wr;
9817 }
9818 - break;
9819 -@@ -105,11 +104,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
9820 +@@ -104,11 +104,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
9821 */
9822 filp->f_op = &rdwr_pipefifo_fops;
9823
9824 @@ -45818,7 +45619,7 @@ index b1a524d..3d7942c 100644
9825 wake_up_partner(inode);
9826 break;
9827
9828 -@@ -123,19 +122,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
9829 +@@ -122,19 +122,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
9830 return 0;
9831
9832 err_rd:
9833 @@ -47344,10 +47145,10 @@ index cfd4959..a780959 100644
9834 kfree(s);
9835 }
9836 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
9837 -index 2d0ca24..c4b8676511 100644
9838 +index ebc2f4d..eb1c5cd 100644
9839 --- a/fs/hugetlbfs/inode.c
9840 +++ b/fs/hugetlbfs/inode.c
9841 -@@ -908,7 +908,7 @@ static struct file_system_type hugetlbfs_fs_type = {
9842 +@@ -896,7 +896,7 @@ static struct file_system_type hugetlbfs_fs_type = {
9843 .kill_sb = kill_litter_super,
9844 };
9845
9846 @@ -47418,7 +47219,7 @@ index b09e51d..e482afa 100644
9847
9848 /*
9849 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
9850 -index a44eff0..462e07d 100644
9851 +index a44eff076..462e07d 100644
9852 --- a/fs/jfs/super.c
9853 +++ b/fs/jfs/super.c
9854 @@ -802,7 +802,7 @@ static int __init init_jfs_fs(void)
9855 @@ -47477,7 +47278,7 @@ index 8392cb8..80d6193 100644
9856 memcpy(c->data, &cookie, 4);
9857 c->len=4;
9858 diff --git a/fs/locks.c b/fs/locks.c
9859 -index 0d68f1f..c3dacf2 100644
9860 +index 6a64f15..c3dacf2 100644
9861 --- a/fs/locks.c
9862 +++ b/fs/locks.c
9863 @@ -308,7 +308,7 @@ static int flock_make_lock(struct file *filp, struct file_lock **lock,
9864 @@ -47507,15 +47308,6 @@ index 0d68f1f..c3dacf2 100644
9865 {
9866 struct file_lock *fl = locks_alloc_lock();
9867 int error = -ENOMEM;
9868 -@@ -1465,7 +1465,7 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
9869 - case F_WRLCK:
9870 - return generic_add_lease(filp, arg, flp);
9871 - default:
9872 -- BUG();
9873 -+ return -EINVAL;
9874 - }
9875 - }
9876 - EXPORT_SYMBOL(generic_setlease);
9877 @@ -2075,16 +2075,16 @@ void locks_remove_flock(struct file *filp)
9878 return;
9879
9880 @@ -48280,19 +48072,6 @@ index c587e2d..3641eaa 100644
9881
9882 -const struct inode_operations ntfs_empty_inode_ops = {};
9883 +const struct inode_operations ntfs_empty_inode_ops __read_only;
9884 -diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
9885 -index 07ee5b4..1c7d45e 100644
9886 ---- a/fs/ocfs2/file.c
9887 -+++ b/fs/ocfs2/file.c
9888 -@@ -1950,7 +1950,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
9889 - if (ret < 0)
9890 - mlog_errno(ret);
9891 -
9892 -- if (file->f_flags & O_SYNC)
9893 -+ if (file && (file->f_flags & O_SYNC))
9894 - handle->h_sync = 1;
9895 -
9896 - ocfs2_commit_trans(osb, handle);
9897 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
9898 index 210c352..a174f83 100644
9899 --- a/fs/ocfs2/localalloc.c
9900 @@ -60572,7 +60351,7 @@ index 6cd5b64..f620d2d 100644
9901 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
9902
9903 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
9904 -index b7babf0..71e4e74 100644
9905 +index b7babf0..3ba8aee 100644
9906 --- a/include/asm-generic/atomic-long.h
9907 +++ b/include/asm-generic/atomic-long.h
9908 @@ -22,6 +22,12 @@
9909 @@ -60825,7 +60604,7 @@ index b7babf0..71e4e74 100644
9910 static inline long atomic_long_dec_return(atomic_long_t *l)
9911 {
9912 atomic_t *v = (atomic_t *)l;
9913 -@@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
9914 +@@ -255,4 +393,55 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
9915
9916 #endif /* BITS_PER_LONG == 64 */
9917
9918 @@ -60843,6 +60622,10 @@ index b7babf0..71e4e74 100644
9919 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
9920 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
9921 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
9922 ++#ifdef CONFIG_X86
9923 ++ atomic_clear_mask_unchecked(0, NULL);
9924 ++ atomic_set_mask_unchecked(0, NULL);
9925 ++#endif
9926 +
9927 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
9928 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
9929 @@ -60864,6 +60647,8 @@ index b7babf0..71e4e74 100644
9930 +#define atomic_dec_unchecked(v) atomic_dec(v)
9931 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
9932 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
9933 ++#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
9934 ++#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
9935 +
9936 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
9937 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
9938 @@ -60875,6 +60660,19 @@ index b7babf0..71e4e74 100644
9939 +#endif
9940 +
9941 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
9942 +diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
9943 +index e37963c..6f5b60b 100644
9944 +--- a/include/asm-generic/atomic.h
9945 ++++ b/include/asm-generic/atomic.h
9946 +@@ -158,7 +158,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9947 + * Atomically clears the bits set in @mask from @v
9948 + */
9949 + #ifndef atomic_clear_mask
9950 +-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
9951 ++static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
9952 + {
9953 + unsigned long flags;
9954 +
9955 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
9956 index b18ce4f..2ee2843 100644
9957 --- a/include/asm-generic/atomic64.h
9958 @@ -61358,10 +61156,10 @@ index 04ffb2e..6799180 100644
9959 extern struct cleancache_ops
9960 cleancache_register_ops(struct cleancache_ops *ops);
9961 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
9962 -index dfadc96..d90deca 100644
9963 +index dfadc96..441a641 100644
9964 --- a/include/linux/compiler-gcc4.h
9965 +++ b/include/linux/compiler-gcc4.h
9966 -@@ -31,6 +31,15 @@
9967 +@@ -31,6 +31,20 @@
9968
9969
9970 #if __GNUC_MINOR__ >= 5
9971 @@ -61374,10 +61172,15 @@ index dfadc96..d90deca 100644
9972 +#ifdef SIZE_OVERFLOW_PLUGIN
9973 +#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
9974 +#endif
9975 ++
9976 ++#ifdef LATENT_ENTROPY_PLUGIN
9977 ++#define __latent_entropy __attribute__((latent_entropy))
9978 ++#endif
9979 ++
9980 /*
9981 * Mark a position in code as unreachable. This can be used to
9982 * suppress control flow warnings after asm blocks that transfer
9983 -@@ -46,6 +55,11 @@
9984 +@@ -46,6 +60,11 @@
9985 #define __noclone __attribute__((__noclone__))
9986
9987 #endif
9988 @@ -61390,7 +61193,7 @@ index dfadc96..d90deca 100644
9989
9990 #if __GNUC_MINOR__ > 0
9991 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
9992 -index 320d6c9..1221a6b 100644
9993 +index 320d6c9..066b6d5 100644
9994 --- a/include/linux/compiler.h
9995 +++ b/include/linux/compiler.h
9996 @@ -5,31 +5,62 @@
9997 @@ -61466,7 +61269,7 @@ index 320d6c9..1221a6b 100644
9998 #endif
9999
10000 #ifdef __KERNEL__
10001 -@@ -264,6 +297,17 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
10002 +@@ -264,6 +297,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
10003 # define __attribute_const__ /* unimplemented */
10004 #endif
10005
10006 @@ -61481,10 +61284,15 @@ index 320d6c9..1221a6b 100644
10007 +#ifndef __size_overflow
10008 +# define __size_overflow(...)
10009 +#endif
10010 ++
10011 ++#ifndef __latent_entropy
10012 ++# define __latent_entropy
10013 ++#endif
10014 ++
10015 /*
10016 * Tell gcc if a function is cold. The compiler will assume any path
10017 * directly leading to the call is unlikely.
10018 -@@ -273,6 +317,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
10019 +@@ -273,6 +322,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
10020 #define __cold
10021 #endif
10022
10023 @@ -61507,7 +61315,7 @@ index 320d6c9..1221a6b 100644
10024 /* Simple shorthand for a section definition */
10025 #ifndef __section
10026 # define __section(S) __attribute__ ((__section__(#S)))
10027 -@@ -306,6 +366,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
10028 +@@ -306,6 +371,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
10029 * use is to mediate communication between process-level code and irq/NMI
10030 * handlers, all running on the same CPU.
10031 */
10032 @@ -61844,6 +61652,49 @@ index 4eec461..84c73cf 100644
10033 struct disk_events *ev;
10034 #ifdef CONFIG_BLK_DEV_INTEGRITY
10035 struct blk_integrity *integrity;
10036 +diff --git a/include/linux/gfp.h b/include/linux/gfp.h
10037 +index 3a76faf..c0592c7 100644
10038 +--- a/include/linux/gfp.h
10039 ++++ b/include/linux/gfp.h
10040 +@@ -37,6 +37,12 @@ struct vm_area_struct;
10041 + #define ___GFP_NO_KSWAPD 0x400000u
10042 + #define ___GFP_OTHER_NODE 0x800000u
10043 +
10044 ++#ifdef CONFIG_PAX_USERCOPY_SLABS
10045 ++#define ___GFP_USERCOPY 0x1000000u
10046 ++#else
10047 ++#define ___GFP_USERCOPY 0
10048 ++#endif
10049 ++
10050 + /*
10051 + * GFP bitmasks..
10052 + *
10053 +@@ -85,6 +91,7 @@ struct vm_area_struct;
10054 +
10055 + #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
10056 + #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
10057 ++#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
10058 +
10059 + /*
10060 + * This may seem redundant, but it's a way of annotating false positives vs.
10061 +@@ -92,7 +99,7 @@ struct vm_area_struct;
10062 + */
10063 + #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
10064 +
10065 +-#define __GFP_BITS_SHIFT 24 /* Room for N __GFP_FOO bits */
10066 ++#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
10067 + #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
10068 +
10069 + /* This equals 0, but use constants in case they ever change */
10070 +@@ -146,6 +153,8 @@ struct vm_area_struct;
10071 + /* 4GB DMA on some platforms */
10072 + #define GFP_DMA32 __GFP_DMA32
10073 +
10074 ++#define GFP_USERCOPY __GFP_USERCOPY
10075 ++
10076 + /* Convert GFP flags to their corresponding migrate type */
10077 + static inline int allocflags_to_migratetype(gfp_t gfp_flags)
10078 + {
10079 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
10080 new file mode 100644
10081 index 0000000..c938b1f
10082 @@ -62998,10 +62849,54 @@ index a6deef4..c56a7f2 100644
10083 and pointers */
10084 #endif
10085 diff --git a/include/linux/init.h b/include/linux/init.h
10086 -index 9146f39..885354d 100644
10087 +index 9146f39..e19693b 100644
10088 --- a/include/linux/init.h
10089 +++ b/include/linux/init.h
10090 -@@ -293,13 +293,13 @@ void __init parse_early_options(char *cmdline);
10091 +@@ -38,9 +38,15 @@
10092 + * Also note, that this data cannot be "const".
10093 + */
10094 +
10095 ++#ifdef MODULE
10096 ++#define add_latent_entropy
10097 ++#else
10098 ++#define add_latent_entropy __latent_entropy
10099 ++#endif
10100 ++
10101 + /* These are for everybody (although not all archs will actually
10102 + discard it in modules) */
10103 +-#define __init __section(.init.text) __cold notrace
10104 ++#define __init __section(.init.text) __cold notrace add_latent_entropy
10105 + #define __initdata __section(.init.data)
10106 + #define __initconst __section(.init.rodata)
10107 + #define __exitdata __section(.exit.data)
10108 +@@ -82,7 +88,7 @@
10109 + #define __exit __section(.exit.text) __exitused __cold notrace
10110 +
10111 + /* Used for HOTPLUG */
10112 +-#define __devinit __section(.devinit.text) __cold notrace
10113 ++#define __devinit __section(.devinit.text) __cold notrace add_latent_entropy
10114 + #define __devinitdata __section(.devinit.data)
10115 + #define __devinitconst __section(.devinit.rodata)
10116 + #define __devexit __section(.devexit.text) __exitused __cold notrace
10117 +@@ -90,7 +96,7 @@
10118 + #define __devexitconst __section(.devexit.rodata)
10119 +
10120 + /* Used for HOTPLUG_CPU */
10121 +-#define __cpuinit __section(.cpuinit.text) __cold notrace
10122 ++#define __cpuinit __section(.cpuinit.text) __cold notrace add_latent_entropy
10123 + #define __cpuinitdata __section(.cpuinit.data)
10124 + #define __cpuinitconst __section(.cpuinit.rodata)
10125 + #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
10126 +@@ -98,7 +104,7 @@
10127 + #define __cpuexitconst __section(.cpuexit.rodata)
10128 +
10129 + /* Used for MEMORY_HOTPLUG */
10130 +-#define __meminit __section(.meminit.text) __cold notrace
10131 ++#define __meminit __section(.meminit.text) __cold notrace add_latent_entropy
10132 + #define __meminitdata __section(.meminit.data)
10133 + #define __meminitconst __section(.meminit.rodata)
10134 + #define __memexit __section(.memexit.text) __exitused __cold notrace
10135 +@@ -293,13 +299,13 @@ void __init parse_early_options(char *cmdline);
10136
10137 /* Each module must use one module_init(). */
10138 #define module_init(initfn) \
10139 @@ -63517,7 +63412,7 @@ index 1d1b1e1..2a13c78 100644
10140
10141 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
10142 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
10143 -index 188cb2f..d401c76 100644
10144 +index 905b1e1..d401c76 100644
10145 --- a/include/linux/mmzone.h
10146 +++ b/include/linux/mmzone.h
10147 @@ -369,7 +369,7 @@ struct zone {
10148 @@ -63529,15 +63424,6 @@ index 188cb2f..d401c76 100644
10149
10150 /*
10151 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
10152 -@@ -652,7 +652,7 @@ typedef struct pglist_data {
10153 - range, including holes */
10154 - int node_id;
10155 - wait_queue_head_t kswapd_wait;
10156 -- struct task_struct *kswapd;
10157 -+ struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */
10158 - int kswapd_max_order;
10159 - enum zone_type classzone_idx;
10160 - } pg_data_t;
10161 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
10162 index 468819c..17b9db3 100644
10163 --- a/include/linux/mod_devicetable.h
10164 @@ -64007,10 +63893,21 @@ index 800f113..12c82ec 100644
10165 }
10166
10167 diff --git a/include/linux/random.h b/include/linux/random.h
10168 -index 8f74538..02a1012 100644
10169 +index 8f74538..de61694 100644
10170 --- a/include/linux/random.h
10171 +++ b/include/linux/random.h
10172 -@@ -69,12 +69,17 @@ void srandom32(u32 seed);
10173 +@@ -54,6 +54,10 @@ extern void add_input_randomness(unsigned int type, unsigned int code,
10174 + unsigned int value);
10175 + extern void add_interrupt_randomness(int irq);
10176 +
10177 ++#ifdef CONFIG_PAX_LATENT_ENTROPY
10178 ++extern void transfer_latent_entropy(void);
10179 ++#endif
10180 ++
10181 + extern void get_random_bytes(void *buf, int nbytes);
10182 + void generate_random_uuid(unsigned char uuid_out[16]);
10183 +
10184 +@@ -69,12 +73,17 @@ void srandom32(u32 seed);
10185
10186 u32 prandom32(struct rnd_state *);
10187
10188 @@ -64148,7 +64045,7 @@ index 2148b12..519b820 100644
10189
10190 static inline void anon_vma_merge(struct vm_area_struct *vma,
10191 diff --git a/include/linux/sched.h b/include/linux/sched.h
10192 -index 1c4f3e9..342eb1f 100644
10193 +index 5afa2a3..98df553 100644
10194 --- a/include/linux/sched.h
10195 +++ b/include/linux/sched.h
10196 @@ -101,6 +101,7 @@ struct bio_list;
10197 @@ -64332,12 +64229,12 @@ index 1c4f3e9..342eb1f 100644
10198 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
10199 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
10200 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
10201 -+extern __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
10202 ++extern void check_object_size(const void *ptr, unsigned long n, bool to);
10203 +
10204 /* Future-safe accessor for struct task_struct's cpus_allowed. */
10205 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
10206
10207 -@@ -2081,7 +2173,9 @@ void yield(void);
10208 +@@ -2089,7 +2181,9 @@ void yield(void);
10209 extern struct exec_domain default_exec_domain;
10210
10211 union thread_union {
10212 @@ -64347,7 +64244,7 @@ index 1c4f3e9..342eb1f 100644
10213 unsigned long stack[THREAD_SIZE/sizeof(long)];
10214 };
10215
10216 -@@ -2114,6 +2208,7 @@ extern struct pid_namespace init_pid_ns;
10217 +@@ -2122,6 +2216,7 @@ extern struct pid_namespace init_pid_ns;
10218 */
10219
10220 extern struct task_struct *find_task_by_vpid(pid_t nr);
10221 @@ -64355,7 +64252,7 @@ index 1c4f3e9..342eb1f 100644
10222 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
10223 struct pid_namespace *ns);
10224
10225 -@@ -2235,6 +2330,12 @@ static inline void mmdrop(struct mm_struct * mm)
10226 +@@ -2243,6 +2338,12 @@ static inline void mmdrop(struct mm_struct * mm)
10227 extern void mmput(struct mm_struct *);
10228 /* Grab a reference to a task's mm, if it is not already going away */
10229 extern struct mm_struct *get_task_mm(struct task_struct *task);
10230 @@ -64368,7 +64265,7 @@ index 1c4f3e9..342eb1f 100644
10231 /* Remove the current tasks stale references to the old mm_struct */
10232 extern void mm_release(struct task_struct *, struct mm_struct *);
10233 /* Allocate a new mm structure and copy contents from tsk->mm */
10234 -@@ -2251,7 +2352,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
10235 +@@ -2259,7 +2360,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
10236 extern void exit_itimers(struct signal_struct *);
10237 extern void flush_itimer_signals(void);
10238
10239 @@ -64377,7 +64274,7 @@ index 1c4f3e9..342eb1f 100644
10240
10241 extern void daemonize(const char *, ...);
10242 extern int allow_signal(int);
10243 -@@ -2416,13 +2517,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
10244 +@@ -2424,9 +2525,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
10245
10246 #endif
10247
10248 @@ -64389,14 +64286,6 @@ index 1c4f3e9..342eb1f 100644
10249
10250 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
10251 }
10252 -
10253 -+#ifdef CONFIG_PAX_USERCOPY
10254 -+extern int object_is_on_stack(const void *obj, unsigned long len);
10255 -+#endif
10256 -+
10257 - extern void thread_info_cache_init(void);
10258 -
10259 - #ifdef CONFIG_DEBUG_STACK_USAGE
10260 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
10261 index 899fbb4..1cb4138 100644
10262 --- a/include/linux/screen_info.h
10263 @@ -64461,10 +64350,10 @@ index 92808b8..c28cac4 100644
10264
10265 /* shm_mode upper byte flags */
10266 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
10267 -index bdb4590..961638c 100644
10268 +index 53dc7e7..bb5915f 100644
10269 --- a/include/linux/skbuff.h
10270 +++ b/include/linux/skbuff.h
10271 -@@ -643,7 +643,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
10272 +@@ -640,7 +640,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
10273 */
10274 static inline int skb_queue_empty(const struct sk_buff_head *list)
10275 {
10276 @@ -64473,7 +64362,7 @@ index bdb4590..961638c 100644
10277 }
10278
10279 /**
10280 -@@ -656,7 +656,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
10281 +@@ -653,7 +653,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
10282 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
10283 const struct sk_buff *skb)
10284 {
10285 @@ -64482,7 +64371,7 @@ index bdb4590..961638c 100644
10286 }
10287
10288 /**
10289 -@@ -669,7 +669,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
10290 +@@ -666,7 +666,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
10291 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
10292 const struct sk_buff *skb)
10293 {
10294 @@ -64491,7 +64380,7 @@ index bdb4590..961638c 100644
10295 }
10296
10297 /**
10298 -@@ -1546,7 +1546,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
10299 +@@ -1543,7 +1543,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
10300 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
10301 */
10302 #ifndef NET_SKB_PAD
10303 @@ -64501,7 +64390,7 @@ index bdb4590..961638c 100644
10304
10305 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
10306 diff --git a/include/linux/slab.h b/include/linux/slab.h
10307 -index 573c809..07e1f43 100644
10308 +index 573c809..a6e62c9 100644
10309 --- a/include/linux/slab.h
10310 +++ b/include/linux/slab.h
10311 @@ -11,12 +11,20 @@
10312 @@ -64516,7 +64405,7 @@ index 573c809..07e1f43 100644
10313 */
10314 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
10315 +
10316 -+#ifdef CONFIG_PAX_USERCOPY
10317 ++#ifdef CONFIG_PAX_USERCOPY_SLABS
10318 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
10319 +#else
10320 +#define SLAB_USERCOPY 0x00000000UL
10321 @@ -64542,7 +64431,7 @@ index 573c809..07e1f43 100644
10322
10323 /*
10324 * struct kmem_cache related prototypes
10325 -@@ -156,11 +167,12 @@ unsigned int kmem_cache_size(struct kmem_cache *);
10326 +@@ -156,11 +167,13 @@ unsigned int kmem_cache_size(struct kmem_cache *);
10327 /*
10328 * Common kmalloc functions provided by all allocators
10329 */
10330 @@ -64553,11 +64442,12 @@ index 573c809..07e1f43 100644
10331 void kfree(const void *);
10332 void kzfree(const void *);
10333 size_t ksize(const void *);
10334 -+void check_object_size(const void *ptr, unsigned long n, bool to);
10335 ++const char *check_heap_object(const void *ptr, unsigned long n, bool to);
10336 ++bool is_usercopy_object(const void *ptr);
10337
10338 /*
10339 * Allocator specific definitions. These are mainly used to establish optimized
10340 -@@ -287,7 +299,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
10341 +@@ -287,7 +300,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
10342 */
10343 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
10344 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
10345 @@ -64566,7 +64456,7 @@ index 573c809..07e1f43 100644
10346 #define kmalloc_track_caller(size, flags) \
10347 __kmalloc_track_caller(size, flags, _RET_IP_)
10348 #else
10349 -@@ -306,7 +318,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
10350 +@@ -306,7 +319,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
10351 */
10352 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
10353 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
10354 @@ -64576,7 +64466,7 @@ index 573c809..07e1f43 100644
10355 __kmalloc_node_track_caller(size, flags, node, \
10356 _RET_IP_)
10357 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
10358 -index d00e0ba..d61fb1f 100644
10359 +index d00e0ba..f75c968 100644
10360 --- a/include/linux/slab_def.h
10361 +++ b/include/linux/slab_def.h
10362 @@ -68,10 +68,10 @@ struct kmem_cache {
10363 @@ -64594,7 +64484,16 @@ index d00e0ba..d61fb1f 100644
10364
10365 /*
10366 * If debugging is enabled, then the allocator can add additional
10367 -@@ -109,7 +109,7 @@ struct cache_sizes {
10368 +@@ -105,11 +105,16 @@ struct cache_sizes {
10369 + #ifdef CONFIG_ZONE_DMA
10370 + struct kmem_cache *cs_dmacachep;
10371 + #endif
10372 ++
10373 ++#ifdef CONFIG_PAX_USERCOPY_SLABS
10374 ++ struct kmem_cache *cs_usercopycachep;
10375 ++#endif
10376 ++
10377 + };
10378 extern struct cache_sizes malloc_sizes[];
10379
10380 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
10381 @@ -64603,7 +64502,7 @@ index d00e0ba..d61fb1f 100644
10382
10383 #ifdef CONFIG_TRACING
10384 extern void *kmem_cache_alloc_trace(size_t size,
10385 -@@ -127,6 +127,7 @@ static inline size_t slab_buffer_size(struct kmem_cache *cachep)
10386 +@@ -127,6 +132,7 @@ static inline size_t slab_buffer_size(struct kmem_cache *cachep)
10387 }
10388 #endif
10389
10390 @@ -64611,7 +64510,21 @@ index d00e0ba..d61fb1f 100644
10391 static __always_inline void *kmalloc(size_t size, gfp_t flags)
10392 {
10393 struct kmem_cache *cachep;
10394 -@@ -162,7 +163,7 @@ found:
10395 +@@ -152,6 +158,13 @@ found:
10396 + cachep = malloc_sizes[i].cs_dmacachep;
10397 + else
10398 + #endif
10399 ++
10400 ++#ifdef CONFIG_PAX_USERCOPY_SLABS
10401 ++ if (flags & GFP_USERCOPY)
10402 ++ cachep = malloc_sizes[i].cs_usercopycachep;
10403 ++ else
10404 ++#endif
10405 ++
10406 + cachep = malloc_sizes[i].cs_cachep;
10407 +
10408 + ret = kmem_cache_alloc_trace(size, cachep, flags);
10409 +@@ -162,7 +175,7 @@ found:
10410 }
10411
10412 #ifdef CONFIG_NUMA
10413 @@ -64620,7 +64533,7 @@ index d00e0ba..d61fb1f 100644
10414 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
10415
10416 #ifdef CONFIG_TRACING
10417 -@@ -181,6 +182,7 @@ kmem_cache_alloc_node_trace(size_t size,
10418 +@@ -181,6 +194,7 @@ kmem_cache_alloc_node_trace(size_t size,
10419 }
10420 #endif
10421
10422 @@ -64628,6 +64541,20 @@ index d00e0ba..d61fb1f 100644
10423 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
10424 {
10425 struct kmem_cache *cachep;
10426 +@@ -205,6 +219,13 @@ found:
10427 + cachep = malloc_sizes[i].cs_dmacachep;
10428 + else
10429 + #endif
10430 ++
10431 ++#ifdef CONFIG_PAX_USERCOPY_SLABS
10432 ++ if (flags & GFP_USERCOPY)
10433 ++ cachep = malloc_sizes[i].cs_usercopycachep;
10434 ++ else
10435 ++#endif
10436 ++
10437 + cachep = malloc_sizes[i].cs_cachep;
10438 +
10439 + return kmem_cache_alloc_node_trace(size, cachep, flags, node);
10440 diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
10441 index 0ec00b3..65e7e0e 100644
10442 --- a/include/linux/slob_def.h
10443 @@ -66182,7 +66109,7 @@ index 2531811..040d4d4 100644
10444 next_state = Reset;
10445 return 0;
10446 diff --git a/init/main.c b/init/main.c
10447 -index cb08fea2..f5b850d 100644
10448 +index cb08fea2..e9a9598 100644
10449 --- a/init/main.c
10450 +++ b/init/main.c
10451 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
10452 @@ -66272,7 +66199,39 @@ index cb08fea2..f5b850d 100644
10453 }
10454
10455 return ret;
10456 -@@ -821,7 +867,7 @@ static int __init kernel_init(void * unused)
10457 +@@ -711,8 +757,14 @@ static void __init do_initcalls(void)
10458 + {
10459 + initcall_t *fn;
10460 +
10461 +- for (fn = __early_initcall_end; fn < __initcall_end; fn++)
10462 ++ for (fn = __early_initcall_end; fn < __initcall_end; fn++) {
10463 + do_one_initcall(*fn);
10464 ++
10465 ++#ifdef CONFIG_PAX_LATENT_ENTROPY
10466 ++ transfer_latent_entropy();
10467 ++#endif
10468 ++
10469 ++ }
10470 + }
10471 +
10472 + /*
10473 +@@ -738,8 +790,14 @@ static void __init do_pre_smp_initcalls(void)
10474 + {
10475 + initcall_t *fn;
10476 +
10477 +- for (fn = __initcall_start; fn < __early_initcall_end; fn++)
10478 ++ for (fn = __initcall_start; fn < __early_initcall_end; fn++) {
10479 + do_one_initcall(*fn);
10480 ++
10481 ++#ifdef CONFIG_PAX_LATENT_ENTROPY
10482 ++ transfer_latent_entropy();
10483 ++#endif
10484 ++
10485 ++ }
10486 + }
10487 +
10488 + static void run_init_process(const char *init_filename)
10489 +@@ -821,7 +879,7 @@ static int __init kernel_init(void * unused)
10490 do_basic_setup();
10491
10492 /* Open the /dev/console on the rootfs, this should never fail */
10493 @@ -66281,7 +66240,7 @@ index cb08fea2..f5b850d 100644
10494 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
10495
10496 (void) sys_dup(0);
10497 -@@ -834,11 +880,13 @@ static int __init kernel_init(void * unused)
10498 +@@ -834,11 +892,13 @@ static int __init kernel_init(void * unused)
10499 if (!ramdisk_execute_command)
10500 ramdisk_execute_command = "/init";
10501
10502 @@ -67635,18 +67594,18 @@ index 9b22d03..6295b62 100644
10503 prev->next = info->next;
10504 else
10505 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
10506 -index ae34bf5..4e2f3d0 100644
10507 +index 6db7a5e..25b6648 100644
10508 --- a/kernel/hrtimer.c
10509 +++ b/kernel/hrtimer.c
10510 -@@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
10511 +@@ -1407,7 +1407,7 @@ void hrtimer_peek_ahead_timers(void)
10512 local_irq_restore(flags);
10513 }
10514
10515 -static void run_hrtimer_softirq(struct softirq_action *h)
10516 +static void run_hrtimer_softirq(void)
10517 {
10518 - hrtimer_peek_ahead_timers();
10519 - }
10520 + struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
10521 +
10522 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
10523 index 66ff710..794bc5a 100644
10524 --- a/kernel/jump_label.c
10525 @@ -69889,10 +69848,10 @@ index 3d9f31c..7fefc9e 100644
10526
10527 default:
10528 diff --git a/kernel/sched.c b/kernel/sched.c
10529 -index 576a27f..b8f518c 100644
10530 +index 52ac69b..b102f7f 100644
10531 --- a/kernel/sched.c
10532 +++ b/kernel/sched.c
10533 -@@ -5097,6 +5097,8 @@ int can_nice(const struct task_struct *p, const int nice)
10534 +@@ -5227,6 +5227,8 @@ int can_nice(const struct task_struct *p, const int nice)
10535 /* convert nice value [19,-20] to rlimit style value [1,40] */
10536 int nice_rlim = 20 - nice;
10537
10538 @@ -69901,7 +69860,7 @@ index 576a27f..b8f518c 100644
10539 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
10540 capable(CAP_SYS_NICE));
10541 }
10542 -@@ -5130,7 +5132,8 @@ SYSCALL_DEFINE1(nice, int, increment)
10543 +@@ -5260,7 +5262,8 @@ SYSCALL_DEFINE1(nice, int, increment)
10544 if (nice > 19)
10545 nice = 19;
10546
10547 @@ -69911,7 +69870,7 @@ index 576a27f..b8f518c 100644
10548 return -EPERM;
10549
10550 retval = security_task_setnice(current, nice);
10551 -@@ -5287,6 +5290,7 @@ recheck:
10552 +@@ -5417,6 +5420,7 @@ recheck:
10553 unsigned long rlim_rtprio =
10554 task_rlimit(p, RLIMIT_RTPRIO);
10555
10556 @@ -70706,7 +70665,7 @@ index fd4a7b1..fae5c2a 100644
10557 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
10558 tick_broadcast_clear_oneshot(cpu);
10559 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
10560 -index 2378413..be455fd 100644
10561 +index 03e67d4..21ae77b 100644
10562 --- a/kernel/time/timekeeping.c
10563 +++ b/kernel/time/timekeeping.c
10564 @@ -14,6 +14,7 @@
10565 @@ -70717,7 +70676,7 @@ index 2378413..be455fd 100644
10566 #include <linux/syscore_ops.h>
10567 #include <linux/clocksource.h>
10568 #include <linux/jiffies.h>
10569 -@@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv)
10570 +@@ -385,6 +386,8 @@ int do_settimeofday(const struct timespec *tv)
10571 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
10572 return -EINVAL;
10573
10574 @@ -71576,10 +71535,10 @@ index 8f005e9..1cb1036 100644
10575 /* if an huge pmd materialized from under us just retry later */
10576 if (unlikely(pmd_trans_huge(*pmd)))
10577 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
10578 -index 5f5c545..c8312c8 100644
10579 +index 7c535b0..1a2d14f 100644
10580 --- a/mm/hugetlb.c
10581 +++ b/mm/hugetlb.c
10582 -@@ -2356,6 +2356,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
10583 +@@ -2435,6 +2435,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
10584 return 1;
10585 }
10586
10587 @@ -71607,7 +71566,7 @@ index 5f5c545..c8312c8 100644
10588 /*
10589 * Hugetlb_cow() should be called with page lock of the original hugepage held.
10590 */
10591 -@@ -2458,6 +2479,11 @@ retry_avoidcopy:
10592 +@@ -2537,6 +2558,11 @@ retry_avoidcopy:
10593 make_huge_pte(vma, new_page, 1));
10594 page_remove_rmap(old_page);
10595 hugepage_add_new_anon_rmap(new_page, vma, address);
10596 @@ -71619,7 +71578,7 @@ index 5f5c545..c8312c8 100644
10597 /* Make the old page be freed below */
10598 new_page = old_page;
10599 mmu_notifier_invalidate_range_end(mm,
10600 -@@ -2609,6 +2635,10 @@ retry:
10601 +@@ -2688,6 +2714,10 @@ retry:
10602 && (vma->vm_flags & VM_SHARED)));
10603 set_huge_pte_at(mm, address, ptep, new_pte);
10604
10605 @@ -71630,7 +71589,7 @@ index 5f5c545..c8312c8 100644
10606 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
10607 /* Optimization, do the COW without a second fault */
10608 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
10609 -@@ -2638,6 +2668,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
10610 +@@ -2717,6 +2747,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
10611 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
10612 struct hstate *h = hstate_vma(vma);
10613
10614 @@ -71641,7 +71600,7 @@ index 5f5c545..c8312c8 100644
10615 ptep = huge_pte_offset(mm, address);
10616 if (ptep) {
10617 entry = huge_ptep_get(ptep);
10618 -@@ -2649,6 +2683,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
10619 +@@ -2728,6 +2762,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
10620 VM_FAULT_SET_HINDEX(h - hstates);
10621 }
10622
10623 @@ -72629,7 +72588,7 @@ index 177aca4..ab3a744 100644
10624 err = -EPERM;
10625 goto out;
10626 diff --git a/mm/mlock.c b/mm/mlock.c
10627 -index 4f4f53b..9511904 100644
10628 +index 4f4f53b..de8e432 100644
10629 --- a/mm/mlock.c
10630 +++ b/mm/mlock.c
10631 @@ -13,6 +13,7 @@
10632 @@ -72640,6 +72599,15 @@ index 4f4f53b..9511904 100644
10633 #include <linux/sched.h>
10634 #include <linux/export.h>
10635 #include <linux/rmap.h>
10636 +@@ -376,7 +377,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
10637 + {
10638 + unsigned long nstart, end, tmp;
10639 + struct vm_area_struct * vma, * prev;
10640 +- int error;
10641 ++ int error = 0;
10642 +
10643 + VM_BUG_ON(start & ~PAGE_MASK);
10644 + VM_BUG_ON(len != PAGE_ALIGN(len));
10645 @@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
10646 return -EINVAL;
10647 if (end == start)
10648 @@ -74723,7 +74691,7 @@ index 7a82174..75d1c8b 100644
10649 return -ENOMEM;
10650
10651 diff --git a/mm/slab.c b/mm/slab.c
10652 -index 83311c9a..fcf8f86 100644
10653 +index 83311c9a..2449631 100644
10654 --- a/mm/slab.c
10655 +++ b/mm/slab.c
10656 @@ -151,7 +151,7 @@
10657 @@ -74777,16 +74745,36 @@ index 83311c9a..fcf8f86 100644
10658 {
10659 u32 offset = (obj - slab->s_mem);
10660 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
10661 -@@ -564,7 +564,7 @@ struct cache_names {
10662 +@@ -559,12 +559,13 @@ EXPORT_SYMBOL(malloc_sizes);
10663 + struct cache_names {
10664 + char *name;
10665 + char *name_dma;
10666 ++ char *name_usercopy;
10667 + };
10668 +
10669 static struct cache_names __initdata cache_names[] = {
10670 - #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
10671 +-#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
10672 ++#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)", .name_usercopy = "size-" #x "(USERCOPY)" },
10673 #include <linux/kmalloc_sizes.h>
10674 - {NULL,}
10675 + {NULL}
10676 #undef CACHE
10677 };
10678
10679 -@@ -1572,7 +1572,7 @@ void __init kmem_cache_init(void)
10680 +@@ -752,6 +753,12 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
10681 + if (unlikely(gfpflags & GFP_DMA))
10682 + return csizep->cs_dmacachep;
10683 + #endif
10684 ++
10685 ++#ifdef CONFIG_PAX_USERCOPY_SLABS
10686 ++ if (unlikely(gfpflags & GFP_USERCOPY))
10687 ++ return csizep->cs_usercopycachep;
10688 ++#endif
10689 ++
10690 + return csizep->cs_cachep;
10691 + }
10692 +
10693 +@@ -1572,7 +1579,7 @@ void __init kmem_cache_init(void)
10694 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
10695 sizes[INDEX_AC].cs_size,
10696 ARCH_KMALLOC_MINALIGN,
10697 @@ -74795,7 +74783,7 @@ index 83311c9a..fcf8f86 100644
10698 NULL);
10699
10700 if (INDEX_AC != INDEX_L3) {
10701 -@@ -1580,7 +1580,7 @@ void __init kmem_cache_init(void)
10702 +@@ -1580,7 +1587,7 @@ void __init kmem_cache_init(void)
10703 kmem_cache_create(names[INDEX_L3].name,
10704 sizes[INDEX_L3].cs_size,
10705 ARCH_KMALLOC_MINALIGN,
10706 @@ -74804,7 +74792,7 @@ index 83311c9a..fcf8f86 100644
10707 NULL);
10708 }
10709
10710 -@@ -1598,7 +1598,7 @@ void __init kmem_cache_init(void)
10711 +@@ -1598,7 +1605,7 @@ void __init kmem_cache_init(void)
10712 sizes->cs_cachep = kmem_cache_create(names->name,
10713 sizes->cs_size,
10714 ARCH_KMALLOC_MINALIGN,
10715 @@ -74813,7 +74801,24 @@ index 83311c9a..fcf8f86 100644
10716 NULL);
10717 }
10718 #ifdef CONFIG_ZONE_DMA
10719 -@@ -4322,10 +4322,10 @@ static int s_show(struct seq_file *m, void *p)
10720 +@@ -1610,6 +1617,16 @@ void __init kmem_cache_init(void)
10721 + SLAB_PANIC,
10722 + NULL);
10723 + #endif
10724 ++
10725 ++#ifdef CONFIG_PAX_USERCOPY_SLABS
10726 ++ sizes->cs_usercopycachep = kmem_cache_create(
10727 ++ names->name_usercopy,
10728 ++ sizes->cs_size,
10729 ++ ARCH_KMALLOC_MINALIGN,
10730 ++ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
10731 ++ NULL);
10732 ++#endif
10733 ++
10734 + sizes++;
10735 + names++;
10736 + }
10737 +@@ -4322,10 +4339,10 @@ static int s_show(struct seq_file *m, void *p)
10738 }
10739 /* cpu stats */
10740 {
10741 @@ -74828,7 +74833,7 @@ index 83311c9a..fcf8f86 100644
10742
10743 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
10744 allochit, allocmiss, freehit, freemiss);
10745 -@@ -4584,13 +4584,62 @@ static int __init slab_proc_init(void)
10746 +@@ -4584,13 +4601,68 @@ static int __init slab_proc_init(void)
10747 {
10748 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
10749 #ifdef CONFIG_DEBUG_SLAB_LEAK
10750 @@ -74840,60 +74845,66 @@ index 83311c9a..fcf8f86 100644
10751 module_init(slab_proc_init);
10752 #endif
10753
10754 -+void check_object_size(const void *ptr, unsigned long n, bool to)
10755 ++bool is_usercopy_object(const void *ptr)
10756 +{
10757 ++ struct page *page;
10758 ++ struct kmem_cache *cachep;
10759 ++
10760 ++ if (ZERO_OR_NULL_PTR(ptr))
10761 ++ return false;
10762 ++
10763 ++ if (!virt_addr_valid(ptr))
10764 ++ return false;
10765 ++
10766 ++ page = virt_to_head_page(ptr);
10767 ++
10768 ++ if (!PageSlab(page))
10769 ++ return false;
10770 ++
10771 ++ cachep = page_get_cache(page);
10772 ++ return cachep->flags & SLAB_USERCOPY;
10773 ++}
10774 +
10775 +#ifdef CONFIG_PAX_USERCOPY
10776 ++const char *check_heap_object(const void *ptr, unsigned long n, bool to)
10777 ++{
10778 + struct page *page;
10779 -+ struct kmem_cache *cachep = NULL;
10780 ++ struct kmem_cache *cachep;
10781 + struct slab *slabp;
10782 + unsigned int objnr;
10783 + unsigned long offset;
10784 -+ const char *type;
10785 -+
10786 -+ if (!n)
10787 -+ return;
10788 +
10789 -+ type = "<null>";
10790 + if (ZERO_OR_NULL_PTR(ptr))
10791 -+ goto report;
10792 ++ return "<null>";
10793 +
10794 + if (!virt_addr_valid(ptr))
10795 -+ return;
10796 ++ return NULL;
10797 +
10798 + page = virt_to_head_page(ptr);
10799 +
10800 -+ type = "<process stack>";
10801 -+ if (!PageSlab(page)) {
10802 -+ if (object_is_on_stack(ptr, n) == -1)
10803 -+ goto report;
10804 -+ return;
10805 -+ }
10806 ++ if (!PageSlab(page))
10807 ++ return NULL;
10808 +
10809 + cachep = page_get_cache(page);
10810 -+ type = cachep->name;
10811 + if (!(cachep->flags & SLAB_USERCOPY))
10812 -+ goto report;
10813 ++ return cachep->name;
10814 +
10815 + slabp = page_get_slab(page);
10816 + objnr = obj_to_index(cachep, slabp, ptr);
10817 + BUG_ON(objnr >= cachep->num);
10818 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
10819 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
10820 -+ return;
10821 -+
10822 -+report:
10823 -+ pax_report_usercopy(ptr, n, to, type);
10824 -+#endif
10825 ++ return NULL;
10826 +
10827 ++ return cachep->name;
10828 +}
10829 -+EXPORT_SYMBOL(check_object_size);
10830 ++#endif
10831 +
10832 /**
10833 * ksize - get the actual amount of memory allocated for a given object
10834 * @objp: Pointer to the object
10835 diff --git a/mm/slob.c b/mm/slob.c
10836 -index 8105be4..e045f96 100644
10837 +index 8105be4..3c15e57 100644
10838 --- a/mm/slob.c
10839 +++ b/mm/slob.c
10840 @@ -29,7 +29,7 @@
10841 @@ -75044,7 +75055,7 @@ index 8105be4..e045f96 100644
10842 return ret;
10843 }
10844 EXPORT_SYMBOL(__kmalloc_node);
10845 -@@ -533,13 +547,92 @@ void kfree(const void *block)
10846 +@@ -533,13 +547,83 @@ void kfree(const void *block)
10847 sp = slob_page(block);
10848 if (is_slob_page(sp)) {
10849 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
10850 @@ -75062,40 +75073,34 @@ index 8105be4..e045f96 100644
10851 }
10852 EXPORT_SYMBOL(kfree);
10853
10854 -+void check_object_size(const void *ptr, unsigned long n, bool to)
10855 ++bool is_usercopy_object(const void *ptr)
10856 +{
10857 ++ return false;
10858 ++}
10859 +
10860 +#ifdef CONFIG_PAX_USERCOPY
10861 ++const char *check_heap_object(const void *ptr, unsigned long n, bool to)
10862 ++{
10863 + struct slob_page *sp;
10864 + const slob_t *free;
10865 + const void *base;
10866 + unsigned long flags;
10867 -+ const char *type;
10868 -+
10869 -+ if (!n)
10870 -+ return;
10871 +
10872 -+ type = "<null>";
10873 + if (ZERO_OR_NULL_PTR(ptr))
10874 -+ goto report;
10875 ++ return "<null>";
10876 +
10877 + if (!virt_addr_valid(ptr))
10878 -+ return;
10879 ++ return NULL;
10880 +
10881 -+ type = "<process stack>";
10882 + sp = slob_page(ptr);
10883 -+ if (!PageSlab((struct page *)sp)) {
10884 -+ if (object_is_on_stack(ptr, n) == -1)
10885 -+ goto report;
10886 -+ return;
10887 -+ }
10888 ++ if (!PageSlab((struct page *)sp))
10889 ++ return NULL;
10890 +
10891 -+ type = "<slob>";
10892 + if (sp->size) {
10893 + base = page_address(&sp->page);
10894 + if (base <= ptr && n <= sp->size - (ptr - base))
10895 -+ return;
10896 -+ goto report;
10897 ++ return NULL;
10898 ++ return "<slob>";
10899 + }
10900 +
10901 + /* some tricky double walking to find the chunk */
10902 @@ -75126,21 +75131,18 @@ index 8105be4..e045f96 100644
10903 + break;
10904 +
10905 + spin_unlock_irqrestore(&slob_lock, flags);
10906 -+ return;
10907 ++ return NULL;
10908 + }
10909 +
10910 + spin_unlock_irqrestore(&slob_lock, flags);
10911 -+report:
10912 -+ pax_report_usercopy(ptr, n, to, type);
10913 -+#endif
10914 -+
10915 ++ return "<slob>";
10916 +}
10917 -+EXPORT_SYMBOL(check_object_size);
10918 ++#endif
10919 +
10920 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
10921 size_t ksize(const void *block)
10922 {
10923 -@@ -552,10 +645,10 @@ size_t ksize(const void *block)
10924 +@@ -552,10 +636,10 @@ size_t ksize(const void *block)
10925 sp = slob_page(block);
10926 if (is_slob_page(sp)) {
10927 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
10928 @@ -75154,11 +75156,11 @@ index 8105be4..e045f96 100644
10929 }
10930 EXPORT_SYMBOL(ksize);
10931
10932 -@@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
10933 +@@ -571,8 +655,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
10934 {
10935 struct kmem_cache *c;
10936
10937 -+#ifdef CONFIG_PAX_USERCOPY
10938 ++#ifdef CONFIG_PAX_USERCOPY_SLABS
10939 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
10940 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
10941 +#else
10942 @@ -75168,11 +75170,11 @@ index 8105be4..e045f96 100644
10943
10944 if (c) {
10945 c->name = name;
10946 -@@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
10947 +@@ -614,17 +703,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
10948
10949 lockdep_trace_alloc(flags);
10950
10951 -+#ifdef CONFIG_PAX_USERCOPY
10952 ++#ifdef CONFIG_PAX_USERCOPY_SLABS
10953 + b = __kmalloc_node_align(c->size, flags, node, c->align);
10954 +#else
10955 if (c->size < PAGE_SIZE) {
10956 @@ -75194,7 +75196,7 @@ index 8105be4..e045f96 100644
10957
10958 if (c->ctor)
10959 c->ctor(b);
10960 -@@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
10961 +@@ -636,10 +733,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
10962
10963 static void __kmem_cache_free(void *b, int size)
10964 {
10965 @@ -75213,13 +75215,13 @@ index 8105be4..e045f96 100644
10966 }
10967
10968 static void kmem_rcu_free(struct rcu_head *head)
10969 -@@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
10970 +@@ -652,17 +755,31 @@ static void kmem_rcu_free(struct rcu_head *head)
10971
10972 void kmem_cache_free(struct kmem_cache *c, void *b)
10973 {
10974 + int size = c->size;
10975 +
10976 -+#ifdef CONFIG_PAX_USERCOPY
10977 ++#ifdef CONFIG_PAX_USERCOPY_SLABS
10978 + if (size + c->align < PAGE_SIZE) {
10979 + size += c->align;
10980 + b -= c->align;
10981 @@ -75239,7 +75241,7 @@ index 8105be4..e045f96 100644
10982 + __kmem_cache_free(b, size);
10983 }
10984
10985 -+#ifdef CONFIG_PAX_USERCOPY
10986 ++#ifdef CONFIG_PAX_USERCOPY_SLABS
10987 + trace_kfree(_RET_IP_, b);
10988 +#else
10989 trace_kmem_cache_free(_RET_IP_, b);
10990 @@ -75249,7 +75251,7 @@ index 8105be4..e045f96 100644
10991 EXPORT_SYMBOL(kmem_cache_free);
10992
10993 diff --git a/mm/slub.c b/mm/slub.c
10994 -index af47188..ff84aee 100644
10995 +index af47188..9c2d9c0 100644
10996 --- a/mm/slub.c
10997 +++ b/mm/slub.c
10998 @@ -208,7 +208,7 @@ struct track {
10999 @@ -75307,58 +75309,89 @@ index af47188..ff84aee 100644
11000 list_del(&s->list);
11001 up_write(&slub_lock);
11002 if (kmem_cache_close(s)) {
11003 -@@ -3361,6 +3362,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
11004 +@@ -3179,6 +3180,10 @@ static struct kmem_cache *kmem_cache;
11005 + static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
11006 + #endif
11007 +
11008 ++#ifdef CONFIG_PAX_USERCOPY_SLABS
11009 ++static struct kmem_cache *kmalloc_usercopy_caches[SLUB_PAGE_SHIFT];
11010 ++#endif
11011 ++
11012 + static int __init setup_slub_min_order(char *str)
11013 + {
11014 + get_option(&str, &slub_min_order);
11015 +@@ -3293,6 +3298,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
11016 + return kmalloc_dma_caches[index];
11017 +
11018 + #endif
11019 ++
11020 ++#ifdef CONFIG_PAX_USERCOPY_SLABS
11021 ++ if (flags & SLAB_USERCOPY)
11022 ++ return kmalloc_usercopy_caches[index];
11023 ++
11024 ++#endif
11025 ++
11026 + return kmalloc_caches[index];
11027 + }
11028 +
11029 +@@ -3361,6 +3373,56 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
11030 EXPORT_SYMBOL(__kmalloc_node);
11031 #endif
11032
11033 -+void check_object_size(const void *ptr, unsigned long n, bool to)
11034 ++bool is_usercopy_object(const void *ptr)
11035 +{
11036 ++ struct page *page;
11037 ++ struct kmem_cache *s;
11038 ++
11039 ++ if (ZERO_OR_NULL_PTR(ptr))
11040 ++ return false;
11041 ++
11042 ++ if (!virt_addr_valid(ptr))
11043 ++ return false;
11044 ++
11045 ++ page = virt_to_head_page(ptr);
11046 ++
11047 ++ if (!PageSlab(page))
11048 ++ return false;
11049 ++
11050 ++ s = page->slab;
11051 ++ return s->flags & SLAB_USERCOPY;
11052 ++}
11053 +
11054 +#ifdef CONFIG_PAX_USERCOPY
11055 ++const char *check_heap_object(const void *ptr, unsigned long n, bool to)
11056 ++{
11057 + struct page *page;
11058 -+ struct kmem_cache *s = NULL;
11059 ++ struct kmem_cache *s;
11060 + unsigned long offset;
11061 -+ const char *type;
11062 +
11063 -+ if (!n)
11064 -+ return;
11065 -+
11066 -+ type = "<null>";
11067 + if (ZERO_OR_NULL_PTR(ptr))
11068 -+ goto report;
11069 ++ return "<null>";
11070 +
11071 + if (!virt_addr_valid(ptr))
11072 -+ return;
11073 ++ return NULL;
11074 +
11075 + page = virt_to_head_page(ptr);
11076 +
11077 -+ type = "<process stack>";
11078 -+ if (!PageSlab(page)) {
11079 -+ if (object_is_on_stack(ptr, n) == -1)
11080 -+ goto report;
11081 -+ return;
11082 -+ }
11083 ++ if (!PageSlab(page))
11084 ++ return NULL;
11085 +
11086 + s = page->slab;
11087 -+ type = s->name;
11088 + if (!(s->flags & SLAB_USERCOPY))
11089 -+ goto report;
11090 ++ return s->name;
11091 +
11092 + offset = (ptr - page_address(page)) % s->size;
11093 + if (offset <= s->objsize && n <= s->objsize - offset)
11094 -+ return;
11095 -+
11096 -+report:
11097 -+ pax_report_usercopy(ptr, n, to, type);
11098 -+#endif
11099 ++ return NULL;
11100 +
11101 ++ return s->name;
11102 +}
11103 -+EXPORT_SYMBOL(check_object_size);
11104 ++#endif
11105 +
11106 size_t ksize(const void *object)
11107 {
11108 struct page *page;
11109 -@@ -3635,7 +3680,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
11110 +@@ -3635,7 +3697,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
11111 int node;
11112
11113 list_add(&s->list, &slab_caches);
11114 @@ -75367,7 +75400,7 @@ index af47188..ff84aee 100644
11115
11116 for_each_node_state(node, N_NORMAL_MEMORY) {
11117 struct kmem_cache_node *n = get_node(s, node);
11118 -@@ -3752,17 +3797,17 @@ void __init kmem_cache_init(void)
11119 +@@ -3752,17 +3814,17 @@ void __init kmem_cache_init(void)
11120
11121 /* Caches that are not of the two-to-the-power-of size */
11122 if (KMALLOC_MIN_SIZE <= 32) {
11123 @@ -75388,7 +75421,30 @@ index af47188..ff84aee 100644
11124 caches++;
11125 }
11126
11127 -@@ -3830,7 +3875,7 @@ static int slab_unmergeable(struct kmem_cache *s)
11128 +@@ -3804,6 +3866,22 @@ void __init kmem_cache_init(void)
11129 + }
11130 + }
11131 + #endif
11132 ++
11133 ++#ifdef CONFIG_PAX_USERCOPY_SLABS
11134 ++ for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
11135 ++ struct kmem_cache *s = kmalloc_caches[i];
11136 ++
11137 ++ if (s && s->size) {
11138 ++ char *name = kasprintf(GFP_NOWAIT,
11139 ++ "usercopy-kmalloc-%d", s->objsize);
11140 ++
11141 ++ BUG_ON(!name);
11142 ++ kmalloc_usercopy_caches[i] = create_kmalloc_cache(name,
11143 ++ s->objsize, SLAB_USERCOPY);
11144 ++ }
11145 ++ }
11146 ++#endif
11147 ++
11148 + printk(KERN_INFO
11149 + "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
11150 + " CPUs=%d, Nodes=%d\n",
11151 +@@ -3830,7 +3908,7 @@ static int slab_unmergeable(struct kmem_cache *s)
11152 /*
11153 * We may have set a slab to be unmergeable during bootstrap.
11154 */
11155 @@ -75397,7 +75453,7 @@ index af47188..ff84aee 100644
11156 return 1;
11157
11158 return 0;
11159 -@@ -3889,7 +3934,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
11160 +@@ -3889,7 +3967,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
11161 down_write(&slub_lock);
11162 s = find_mergeable(size, align, flags, name, ctor);
11163 if (s) {
11164 @@ -75406,7 +75462,7 @@ index af47188..ff84aee 100644
11165 /*
11166 * Adjust the object sizes so that we clear
11167 * the complete object on kzalloc.
11168 -@@ -3898,7 +3943,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
11169 +@@ -3898,7 +3976,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
11170 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
11171
11172 if (sysfs_slab_alias(s, name)) {
11173 @@ -75415,7 +75471,7 @@ index af47188..ff84aee 100644
11174 goto err;
11175 }
11176 up_write(&slub_lock);
11177 -@@ -4027,7 +4072,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
11178 +@@ -4027,7 +4105,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
11179 }
11180 #endif
11181
11182 @@ -75424,7 +75480,7 @@ index af47188..ff84aee 100644
11183 static int count_inuse(struct page *page)
11184 {
11185 return page->inuse;
11186 -@@ -4414,12 +4459,12 @@ static void resiliency_test(void)
11187 +@@ -4414,12 +4492,12 @@ static void resiliency_test(void)
11188 validate_slab_cache(kmalloc_caches[9]);
11189 }
11190 #else
11191 @@ -75439,7 +75495,7 @@ index af47188..ff84aee 100644
11192 enum slab_stat_type {
11193 SL_ALL, /* All slabs */
11194 SL_PARTIAL, /* Only partially allocated slabs */
11195 -@@ -4660,7 +4705,7 @@ SLAB_ATTR_RO(ctor);
11196 +@@ -4660,7 +4738,7 @@ SLAB_ATTR_RO(ctor);
11197
11198 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
11199 {
11200 @@ -75448,7 +75504,7 @@ index af47188..ff84aee 100644
11201 }
11202 SLAB_ATTR_RO(aliases);
11203
11204 -@@ -5227,6 +5272,7 @@ static char *create_unique_id(struct kmem_cache *s)
11205 +@@ -5227,6 +5305,7 @@ static char *create_unique_id(struct kmem_cache *s)
11206 return name;
11207 }
11208
11209 @@ -75456,7 +75512,7 @@ index af47188..ff84aee 100644
11210 static int sysfs_slab_add(struct kmem_cache *s)
11211 {
11212 int err;
11213 -@@ -5289,6 +5335,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
11214 +@@ -5289,6 +5368,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
11215 kobject_del(&s->kobj);
11216 kobject_put(&s->kobj);
11217 }
11218 @@ -75464,7 +75520,7 @@ index af47188..ff84aee 100644
11219
11220 /*
11221 * Need to buffer aliases during bootup until sysfs becomes
11222 -@@ -5302,6 +5349,7 @@ struct saved_alias {
11223 +@@ -5302,6 +5382,7 @@ struct saved_alias {
11224
11225 static struct saved_alias *alias_list;
11226
11227 @@ -75472,7 +75528,7 @@ index af47188..ff84aee 100644
11228 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
11229 {
11230 struct saved_alias *al;
11231 -@@ -5324,6 +5372,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
11232 +@@ -5324,6 +5405,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
11233 alias_list = al;
11234 return 0;
11235 }
11236 @@ -75791,42 +75847,6 @@ index eeba3bb..820e22e 100644
11237 if (!vas || !vms)
11238 goto err_free;
11239
11240 -diff --git a/mm/vmscan.c b/mm/vmscan.c
11241 -index fbe2d2c..8342119 100644
11242 ---- a/mm/vmscan.c
11243 -+++ b/mm/vmscan.c
11244 -@@ -2824,7 +2824,10 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
11245 - * them before going back to sleep.
11246 - */
11247 - set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
11248 -- schedule();
11249 -+
11250 -+ if (!kthread_should_stop())
11251 -+ schedule();
11252 -+
11253 - set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
11254 - } else {
11255 - if (remaining)
11256 -@@ -3090,14 +3093,17 @@ int kswapd_run(int nid)
11257 - }
11258 -
11259 - /*
11260 -- * Called by memory hotplug when all memory in a node is offlined.
11261 -+ * Called by memory hotplug when all memory in a node is offlined. Caller must
11262 -+ * hold lock_memory_hotplug().
11263 - */
11264 - void kswapd_stop(int nid)
11265 - {
11266 - struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
11267 -
11268 -- if (kswapd)
11269 -+ if (kswapd) {
11270 - kthread_stop(kswapd);
11271 -+ NODE_DATA(nid)->kswapd = NULL;
11272 -+ }
11273 - }
11274 -
11275 - static int __init kswapd_init(void)
11276 diff --git a/mm/vmstat.c b/mm/vmstat.c
11277 index 8fd603b..cf0d930 100644
11278 --- a/mm/vmstat.c
11279 @@ -76538,7 +76558,7 @@ index 68bbf9f..5ef0d12 100644
11280
11281 return err;
11282 diff --git a/net/core/dev.c b/net/core/dev.c
11283 -index 1cbddc9..e52e698 100644
11284 +index 5738654..2078746 100644
11285 --- a/net/core/dev.c
11286 +++ b/net/core/dev.c
11287 @@ -1139,10 +1139,14 @@ void dev_load(struct net *net, const char *name)
11288 @@ -76583,7 +76603,7 @@ index 1cbddc9..e52e698 100644
11289
11290 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
11291
11292 -@@ -2964,7 +2968,7 @@ enqueue:
11293 +@@ -2943,7 +2947,7 @@ enqueue:
11294
11295 local_irq_restore(flags);
11296
11297 @@ -76592,7 +76612,7 @@ index 1cbddc9..e52e698 100644
11298 kfree_skb(skb);
11299 return NET_RX_DROP;
11300 }
11301 -@@ -3038,7 +3042,7 @@ int netif_rx_ni(struct sk_buff *skb)
11302 +@@ -3017,7 +3021,7 @@ int netif_rx_ni(struct sk_buff *skb)
11303 }
11304 EXPORT_SYMBOL(netif_rx_ni);
11305
11306 @@ -76601,7 +76621,7 @@ index 1cbddc9..e52e698 100644
11307 {
11308 struct softnet_data *sd = &__get_cpu_var(softnet_data);
11309
11310 -@@ -3327,7 +3331,7 @@ ncls:
11311 +@@ -3306,7 +3310,7 @@ ncls:
11312 if (pt_prev) {
11313 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
11314 } else {
11315 @@ -76610,7 +76630,7 @@ index 1cbddc9..e52e698 100644
11316 kfree_skb(skb);
11317 /* Jamal, now you will not able to escape explaining
11318 * me how you were going to use this. :-)
11319 -@@ -3892,7 +3896,7 @@ void netif_napi_del(struct napi_struct *napi)
11320 +@@ -3871,7 +3875,7 @@ void netif_napi_del(struct napi_struct *napi)
11321 }
11322 EXPORT_SYMBOL(netif_napi_del);
11323
11324 @@ -76619,7 +76639,7 @@ index 1cbddc9..e52e698 100644
11325 {
11326 struct softnet_data *sd = &__get_cpu_var(softnet_data);
11327 unsigned long time_limit = jiffies + 2;
11328 -@@ -5918,7 +5922,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
11329 +@@ -5897,7 +5901,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
11330 } else {
11331 netdev_stats_to_stats64(storage, &dev->stats);
11332 }
11333 @@ -77203,18 +77223,18 @@ index 94cdbc5..0cb0063 100644
11334 ts = peer->tcp_ts;
11335 tsage = get_seconds() - peer->tcp_ts_stamp;
11336 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
11337 -index 9726927..436489e 100644
11338 +index 32e6ca2..436489e 100644
11339 --- a/net/ipv4/tcp_input.c
11340 +++ b/net/ipv4/tcp_input.c
11341 -@@ -5836,6 +5836,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
11342 +@@ -5836,7 +5836,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
11343 goto discard;
11344
11345 if (th->syn) {
11346 +- if (th->fin)
11347 + if (th->fin || th->urg || th->psh)
11348 -+ goto discard;
11349 + goto discard;
11350 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
11351 return 1;
11352 -
11353 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
11354 index de69cec..74908e1 100644
11355 --- a/net/ipv4/tcp_ipv4.c
11356 @@ -77934,10 +77954,10 @@ index 253695d..9481ce8 100644
11357 seq_printf(m, "Max header size: %d\n", self->max_header_size);
11358
11359 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
11360 -index 274d150..656a144 100644
11361 +index cf98d62..7bf2972 100644
11362 --- a/net/iucv/af_iucv.c
11363 +++ b/net/iucv/af_iucv.c
11364 -@@ -787,10 +787,10 @@ static int iucv_sock_autobind(struct sock *sk)
11365 +@@ -786,10 +786,10 @@ static int iucv_sock_autobind(struct sock *sk)
11366
11367 write_lock_bh(&iucv_sk_list.lock);
11368
11369 @@ -80097,7 +80117,7 @@ index d1d0ae8..6b73b2a 100644
11370 sprintf(alias, "dmi*");
11371
11372 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
11373 -index 619228d..274ce0e 100644
11374 +index 619228d..bf61bbb 100644
11375 --- a/scripts/mod/modpost.c
11376 +++ b/scripts/mod/modpost.c
11377 @@ -922,6 +922,7 @@ enum mismatch {
11378 @@ -80139,12 +80159,12 @@ index 619228d..274ce0e 100644
11379 free(prl_to);
11380 break;
11381 + case DATA_TO_TEXT:
11382 -+/*
11383 ++#if 0
11384 + fprintf(stderr,
11385 -+ "The variable %s references\n"
11386 -+ "the %s %s%s%s\n",
11387 -+ fromsym, to, sec2annotation(tosec), tosym, to_p);
11388 -+*/
11389 ++ "The %s %s:%s references\n"
11390 ++ "the %s %s:%s%s\n",
11391 ++ from, fromsec, fromsym, to, tosec, tosym, to_p);
11392 ++#endif
11393 + break;
11394 }
11395 fprintf(stderr, "\n");
11396 @@ -80256,10 +80276,10 @@ index 38f6617..e70b72b 100755
11397
11398 exuberant()
11399 diff --git a/security/Kconfig b/security/Kconfig
11400 -index 51bd5a0..d660068 100644
11401 +index 51bd5a0..f94ba7f 100644
11402 --- a/security/Kconfig
11403 +++ b/security/Kconfig
11404 -@@ -4,6 +4,861 @@
11405 +@@ -4,6 +4,875 @@
11406
11407 menu "Security options"
11408
11409 @@ -80284,6 +80304,9 @@ index 51bd5a0..d660068 100644
11410 + bool
11411 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
11412 +
11413 ++ config PAX_USERCOPY_SLABS
11414 ++ bool
11415 ++
11416 +config GRKERNSEC
11417 + bool "Grsecurity"
11418 + select CRYPTO
11419 @@ -80518,13 +80541,12 @@ index 51bd5a0..d660068 100644
11420 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
11421 + support.
11422 +
11423 -+ If you have applications not marked by the PT_PAX_FLAGS ELF program
11424 -+ header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
11425 -+ option otherwise they will not get any protection.
11426 -+
11427 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
11428 + support as well, they will override the legacy EI_PAX marks.
11429 +
11430 ++ If you enable none of the marking options then all applications
11431 ++ will run with PaX enabled on them by default.
11432 ++
11433 +config PAX_PT_PAX_FLAGS
11434 + bool 'Use ELF program header marking'
11435 + default y if GRKERNSEC_CONFIG_AUTO
11436 @@ -80537,15 +80559,14 @@ index 51bd5a0..d660068 100644
11437 + integrated into the toolchain (the binutils patch is available
11438 + from http://pax.grsecurity.net).
11439 +
11440 -+ If you have applications not marked by the PT_PAX_FLAGS ELF program
11441 -+ header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
11442 -+ support otherwise they will not get any protection.
11443 ++ Note that if you enable the legacy EI_PAX marking support as well,
11444 ++ the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
11445 +
11446 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
11447 + must make sure that the marks are the same if a binary has both marks.
11448 +
11449 -+ Note that if you enable the legacy EI_PAX marking support as well,
11450 -+ the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
11451 ++ If you enable none of the marking options then all applications
11452 ++ will run with PaX enabled on them by default.
11453 +
11454 +config PAX_XATTR_PAX_FLAGS
11455 + bool 'Use filesystem extended attributes marking'
11456 @@ -80570,15 +80591,14 @@ index 51bd5a0..d660068 100644
11457 + isofs, udf, vfat) so copying files through such filesystems will
11458 + lose the extended attributes and these PaX markings.
11459 +
11460 -+ If you have applications not marked by the PT_PAX_FLAGS ELF program
11461 -+ header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
11462 -+ support otherwise they will not get any protection.
11463 ++ Note that if you enable the legacy EI_PAX marking support as well,
11464 ++ the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
11465 +
11466 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
11467 + must make sure that the marks are the same if a binary has both marks.
11468 +
11469 -+ Note that if you enable the legacy EI_PAX marking support as well,
11470 -+ the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
11471 ++ If you enable none of the marking options then all applications
11472 ++ will run with PaX enabled on them by default.
11473 +
11474 +choice
11475 + prompt 'MAC system integration'
11476 @@ -81068,6 +81088,7 @@ index 51bd5a0..d660068 100644
11477 + default y if GRKERNSEC_CONFIG_AUTO
11478 + depends on X86 || PPC || SPARC || ARM
11479 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
11480 ++ select PAX_USERCOPY_SLABS
11481 + help
11482 + By saying Y here the kernel will enforce the size of heap objects
11483 + when they are copied in either direction between the kernel and
11484 @@ -81108,6 +81129,19 @@ index 51bd5a0..d660068 100644
11485 + Homepage:
11486 + http://www.grsecurity.net/~ephox/overflow_plugin/
11487 +
11488 ++config PAX_LATENT_ENTROPY
11489 ++ bool "Generate some entropy during boot"
11490 ++ default y if GRKERNSEC_CONFIG_AUTO
11491 ++ help
11492 ++ By saying Y here the kernel will instrument early boot code to
11493 ++ extract some entropy from both original and artificially created
11494 ++ program state. This will help especially embedded systems where
11495 ++ there is little 'natural' source of entropy normally. The cost
11496 ++ is some slowdown of the boot process.
11497 ++
11498 ++ Note that entropy extracted this way is not cryptographically
11499 ++ secure!
11500 ++
11501 +endmenu
11502 +
11503 +endmenu
11504 @@ -81121,7 +81155,7 @@ index 51bd5a0..d660068 100644
11505 config KEYS
11506 bool "Enable access key retention support"
11507 help
11508 -@@ -169,7 +1024,7 @@ config INTEL_TXT
11509 +@@ -169,7 +1038,7 @@ config INTEL_TXT
11510 config LSM_MMAP_MIN_ADDR
11511 int "Low address space for LSM to protect from user allocation"
11512 depends on SECURITY && SECURITY_SELINUX
11513 @@ -82049,12 +82083,19 @@ index a39edcc..1014050 100644
11514 int last_frame_number; /* stored frame number */
11515 int last_delay; /* stored delay */
11516 };
11517 +diff --git a/tools/gcc/.gitignore b/tools/gcc/.gitignore
11518 +new file mode 100644
11519 +index 0000000..50f2f2f
11520 +--- /dev/null
11521 ++++ b/tools/gcc/.gitignore
11522 +@@ -0,0 +1 @@
11523 ++size_overflow_hash.h
11524 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
11525 new file mode 100644
11526 -index 0000000..f4f9986
11527 +index 0000000..1d09b7e
11528 --- /dev/null
11529 +++ b/tools/gcc/Makefile
11530 -@@ -0,0 +1,41 @@
11531 +@@ -0,0 +1,43 @@
11532 +#CC := gcc
11533 +#PLUGIN_SOURCE_FILES := pax_plugin.c
11534 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
11535 @@ -82076,6 +82117,7 @@ index 0000000..f4f9986
11536 +$(HOSTLIBS)-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
11537 +$(HOSTLIBS)-y += colorize_plugin.so
11538 +$(HOSTLIBS)-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so
11539 ++$(HOSTLIBS)-$(CONFIG_PAX_LATENT_ENTROPY) += latent_entropy_plugin.so
11540 +
11541 +always := $($(HOSTLIBS)-y)
11542 +
11543 @@ -82086,6 +82128,7 @@ index 0000000..f4f9986
11544 +checker_plugin-objs := checker_plugin.o
11545 +colorize_plugin-objs := colorize_plugin.o
11546 +size_overflow_plugin-objs := size_overflow_plugin.o
11547 ++latent_entropy_plugin-objs := latent_entropy_plugin.o
11548 +
11549 +$(obj)/size_overflow_plugin.o: $(objtree)/$(obj)/size_overflow_hash.h
11550 +
11551 @@ -82275,7 +82318,7 @@ index 0000000..d41b5af
11552 +}
11553 diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c
11554 new file mode 100644
11555 -index 0000000..7a5e311
11556 +index 0000000..846aeb0
11557 --- /dev/null
11558 +++ b/tools/gcc/colorize_plugin.c
11559 @@ -0,0 +1,148 @@
11560 @@ -82413,7 +82456,7 @@ index 0000000..7a5e311
11561 + struct register_pass_info colorize_rearm_pass_info = {
11562 + .pass = &pass_ipa_colorize_rearm.pass,
11563 + .reference_pass_name = "*free_lang_data",
11564 -+ .ref_pass_instance_number = 0,
11565 ++ .ref_pass_instance_number = 1,
11566 + .pos_op = PASS_POS_INSERT_AFTER
11567 + };
11568 +
11569 @@ -82429,7 +82472,7 @@ index 0000000..7a5e311
11570 +}
11571 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
11572 new file mode 100644
11573 -index 0000000..89b7f56
11574 +index 0000000..048d4ff
11575 --- /dev/null
11576 +++ b/tools/gcc/constify_plugin.c
11577 @@ -0,0 +1,328 @@
11578 @@ -82735,7 +82778,7 @@ index 0000000..89b7f56
11579 + struct register_pass_info local_variable_pass_info = {
11580 + .pass = &pass_local_variable.pass,
11581 + .reference_pass_name = "*referenced_vars",
11582 -+ .ref_pass_instance_number = 0,
11583 ++ .ref_pass_instance_number = 1,
11584 + .pos_op = PASS_POS_INSERT_AFTER
11585 + };
11586 +
11587 @@ -82863,7 +82906,7 @@ index 0000000..a0fe8b2
11588 +exit 0
11589 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
11590 new file mode 100644
11591 -index 0000000..a5eabce
11592 +index 0000000..a86e422
11593 --- /dev/null
11594 +++ b/tools/gcc/kallocstat_plugin.c
11595 @@ -0,0 +1,167 @@
11596 @@ -83020,7 +83063,7 @@ index 0000000..a5eabce
11597 + struct register_pass_info kallocstat_pass_info = {
11598 + .pass = &kallocstat_pass.pass,
11599 + .reference_pass_name = "ssa",
11600 -+ .ref_pass_instance_number = 0,
11601 ++ .ref_pass_instance_number = 1,
11602 + .pos_op = PASS_POS_INSERT_AFTER
11603 + };
11604 +
11605 @@ -83036,7 +83079,7 @@ index 0000000..a5eabce
11606 +}
11607 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
11608 new file mode 100644
11609 -index 0000000..d8a8da2
11610 +index 0000000..98011fa
11611 --- /dev/null
11612 +++ b/tools/gcc/kernexec_plugin.c
11613 @@ -0,0 +1,427 @@
11614 @@ -83412,19 +83455,19 @@ index 0000000..d8a8da2
11615 + struct register_pass_info kernexec_reload_pass_info = {
11616 + .pass = &kernexec_reload_pass.pass,
11617 + .reference_pass_name = "ssa",
11618 -+ .ref_pass_instance_number = 0,
11619 ++ .ref_pass_instance_number = 1,
11620 + .pos_op = PASS_POS_INSERT_AFTER
11621 + };
11622 + struct register_pass_info kernexec_fptr_pass_info = {
11623 + .pass = &kernexec_fptr_pass.pass,
11624 + .reference_pass_name = "ssa",
11625 -+ .ref_pass_instance_number = 0,
11626 ++ .ref_pass_instance_number = 1,
11627 + .pos_op = PASS_POS_INSERT_AFTER
11628 + };
11629 + struct register_pass_info kernexec_retaddr_pass_info = {
11630 + .pass = &kernexec_retaddr_pass.pass,
11631 + .reference_pass_name = "pro_and_epilogue",
11632 -+ .ref_pass_instance_number = 0,
11633 ++ .ref_pass_instance_number = 1,
11634 + .pos_op = PASS_POS_INSERT_AFTER
11635 + };
11636 +
11637 @@ -83467,6 +83510,307 @@ index 0000000..d8a8da2
11638 +
11639 + return 0;
11640 +}
11641 +diff --git a/tools/gcc/latent_entropy_plugin.c b/tools/gcc/latent_entropy_plugin.c
11642 +new file mode 100644
11643 +index 0000000..b8008f7
11644 +--- /dev/null
11645 ++++ b/tools/gcc/latent_entropy_plugin.c
11646 +@@ -0,0 +1,295 @@
11647 ++/*
11648 ++ * Copyright 2012 by the PaX Team <pageexec@××××××××.hu>
11649 ++ * Licensed under the GPL v2
11650 ++ *
11651 ++ * Note: the choice of the license means that the compilation process is
11652 ++ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
11653 ++ * but for the kernel it doesn't matter since it doesn't link against
11654 ++ * any of the gcc libraries
11655 ++ *
11656 ++ * gcc plugin to help generate a little bit of entropy from program state,
11657 ++ * used during boot in the kernel
11658 ++ *
11659 ++ * TODO:
11660 ++ * - add ipa pass to identify not explicitly marked candidate functions
11661 ++ * - mix in more program state (function arguments/return values, loop variables, etc)
11662 ++ * - more instrumentation control via attribute parameters
11663 ++ *
11664 ++ * BUGS:
11665 ++ * - LTO needs -flto-partition=none for now
11666 ++ */
11667 ++#include "gcc-plugin.h"
11668 ++#include "config.h"
11669 ++#include "system.h"
11670 ++#include "coretypes.h"
11671 ++#include "tree.h"
11672 ++#include "tree-pass.h"
11673 ++#include "flags.h"
11674 ++#include "intl.h"
11675 ++#include "toplev.h"
11676 ++#include "plugin.h"
11677 ++//#include "expr.h" where are you...
11678 ++#include "diagnostic.h"
11679 ++#include "plugin-version.h"
11680 ++#include "tm.h"
11681 ++#include "function.h"
11682 ++#include "basic-block.h"
11683 ++#include "gimple.h"
11684 ++#include "rtl.h"
11685 ++#include "emit-rtl.h"
11686 ++#include "tree-flow.h"
11687 ++
11688 ++int plugin_is_GPL_compatible;
11689 ++
11690 ++static tree latent_entropy_decl;
11691 ++
11692 ++static struct plugin_info latent_entropy_plugin_info = {
11693 ++ .version = "201207271820",
11694 ++ .help = NULL
11695 ++};
11696 ++
11697 ++static unsigned int execute_latent_entropy(void);
11698 ++static bool gate_latent_entropy(void);
11699 ++
11700 ++static struct gimple_opt_pass latent_entropy_pass = {
11701 ++ .pass = {
11702 ++ .type = GIMPLE_PASS,
11703 ++ .name = "latent_entropy",
11704 ++ .gate = gate_latent_entropy,
11705 ++ .execute = execute_latent_entropy,
11706 ++ .sub = NULL,
11707 ++ .next = NULL,
11708 ++ .static_pass_number = 0,
11709 ++ .tv_id = TV_NONE,
11710 ++ .properties_required = PROP_gimple_leh | PROP_cfg,
11711 ++ .properties_provided = 0,
11712 ++ .properties_destroyed = 0,
11713 ++ .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
11714 ++ .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
11715 ++ }
11716 ++};
11717 ++
11718 ++static tree handle_latent_entropy_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
11719 ++{
11720 ++ if (TREE_CODE(*node) != FUNCTION_DECL) {
11721 ++ *no_add_attrs = true;
11722 ++ error("%qE attribute only applies to functions", name);
11723 ++ }
11724 ++ return NULL_TREE;
11725 ++}
11726 ++
11727 ++static struct attribute_spec latent_entropy_attr = {
11728 ++ .name = "latent_entropy",
11729 ++ .min_length = 0,
11730 ++ .max_length = 0,
11731 ++ .decl_required = true,
11732 ++ .type_required = false,
11733 ++ .function_type_required = false,
11734 ++ .handler = handle_latent_entropy_attribute,
11735 ++#if BUILDING_GCC_VERSION >= 4007
11736 ++ .affects_type_identity = false
11737 ++#endif
11738 ++};
11739 ++
11740 ++static void register_attributes(void *event_data, void *data)
11741 ++{
11742 ++ register_attribute(&latent_entropy_attr);
11743 ++}
11744 ++
11745 ++static bool gate_latent_entropy(void)
11746 ++{
11747 ++ tree latent_entropy_attr;
11748 ++
11749 ++ latent_entropy_attr = lookup_attribute("latent_entropy", DECL_ATTRIBUTES(current_function_decl));
11750 ++ return latent_entropy_attr != NULL_TREE;
11751 ++}
11752 ++
11753 ++static unsigned HOST_WIDE_INT seed;
11754 ++static unsigned HOST_WIDE_INT get_random_const(void)
11755 ++{
11756 ++ seed = (seed >> 1U) ^ (-(seed & 1ULL) & 0xD800000000000000ULL);
11757 ++ return seed;
11758 ++}
11759 ++
11760 ++static enum tree_code get_op(tree *rhs)
11761 ++{
11762 ++ static enum tree_code op;
11763 ++ unsigned HOST_WIDE_INT random_const;
11764 ++
11765 ++ random_const = get_random_const();
11766 ++
11767 ++ switch (op) {
11768 ++ case BIT_XOR_EXPR:
11769 ++ op = PLUS_EXPR;
11770 ++ break;
11771 ++
11772 ++ case PLUS_EXPR:
11773 ++ if (rhs) {
11774 ++ op = LROTATE_EXPR;
11775 ++ random_const &= HOST_BITS_PER_WIDE_INT - 1;
11776 ++ break;
11777 ++ }
11778 ++
11779 ++ case LROTATE_EXPR:
11780 ++ default:
11781 ++ op = BIT_XOR_EXPR;
11782 ++ break;
11783 ++ }
11784 ++ if (rhs)
11785 ++ *rhs = build_int_cstu(unsigned_intDI_type_node, random_const);
11786 ++ return op;
11787 ++}
11788 ++
11789 ++static void perturb_local_entropy(basic_block bb, tree local_entropy)
11790 ++{
11791 ++ gimple_stmt_iterator gsi;
11792 ++ gimple assign;
11793 ++ tree addxorrol, rhs;
11794 ++ enum tree_code op;
11795 ++
11796 ++ op = get_op(&rhs);
11797 ++ addxorrol = fold_build2_loc(UNKNOWN_LOCATION, op, unsigned_intDI_type_node, local_entropy, rhs);
11798 ++ assign = gimple_build_assign(local_entropy, addxorrol);
11799 ++ find_referenced_vars_in(assign);
11800 ++//debug_bb(bb);
11801 ++ gsi = gsi_after_labels(bb);
11802 ++ gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
11803 ++ update_stmt(assign);
11804 ++}
11805 ++
11806 ++static void perturb_latent_entropy(basic_block bb, tree rhs)
11807 ++{
11808 ++ gimple_stmt_iterator gsi;
11809 ++ gimple assign;
11810 ++ tree addxorrol, temp;
11811 ++
11812 ++ // 1. create temporary copy of latent_entropy
11813 ++ temp = create_tmp_var(unsigned_intDI_type_node, "temp_latent_entropy");
11814 ++ add_referenced_var(temp);
11815 ++ mark_sym_for_renaming(temp);
11816 ++
11817 ++ // 2. read...
11818 ++ assign = gimple_build_assign(temp, latent_entropy_decl);
11819 ++ find_referenced_vars_in(assign);
11820 ++ gsi = gsi_after_labels(bb);
11821 ++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
11822 ++ update_stmt(assign);
11823 ++
11824 ++ // 3. ...modify...
11825 ++ addxorrol = fold_build2_loc(UNKNOWN_LOCATION, get_op(NULL), unsigned_intDI_type_node, temp, rhs);
11826 ++ assign = gimple_build_assign(temp, addxorrol);
11827 ++ find_referenced_vars_in(assign);
11828 ++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
11829 ++ update_stmt(assign);
11830 ++
11831 ++ // 4. ...write latent_entropy
11832 ++ assign = gimple_build_assign(latent_entropy_decl, temp);
11833 ++ find_referenced_vars_in(assign);
11834 ++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
11835 ++ update_stmt(assign);
11836 ++}
11837 ++
11838 ++static unsigned int execute_latent_entropy(void)
11839 ++{
11840 ++ basic_block bb;
11841 ++ gimple assign;
11842 ++ gimple_stmt_iterator gsi;
11843 ++ tree local_entropy;
11844 ++
11845 ++ if (!latent_entropy_decl) {
11846 ++ struct varpool_node *node;
11847 ++
11848 ++ for (node = varpool_nodes; node; node = node->next) {
11849 ++ tree var = node->decl;
11850 ++ if (strcmp(IDENTIFIER_POINTER(DECL_NAME(var)), "latent_entropy"))
11851 ++ continue;
11852 ++ latent_entropy_decl = var;
11853 ++// debug_tree(var);
11854 ++ break;
11855 ++ }
11856 ++ if (!latent_entropy_decl) {
11857 ++// debug_tree(current_function_decl);
11858 ++ return 0;
11859 ++ }
11860 ++ }
11861 ++
11862 ++//fprintf(stderr, "latent_entropy: %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl)));
11863 ++
11864 ++ // 1. create local entropy variable
11865 ++ local_entropy = create_tmp_var(unsigned_intDI_type_node, "local_entropy");
11866 ++ add_referenced_var(local_entropy);
11867 ++ mark_sym_for_renaming(local_entropy);
11868 ++
11869 ++ // 2. initialize local entropy variable
11870 ++ bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
11871 ++ if (dom_info_available_p(CDI_DOMINATORS))
11872 ++ set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
11873 ++ gsi = gsi_start_bb(bb);
11874 ++
11875 ++ assign = gimple_build_assign(local_entropy, build_int_cstu(unsigned_intDI_type_node, get_random_const()));
11876 ++// gimple_set_location(assign, loc);
11877 ++ find_referenced_vars_in(assign);
11878 ++ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
11879 ++ update_stmt(assign);
11880 ++ bb = bb->next_bb;
11881 ++
11882 ++ // 3. instrument each BB with an operation on the local entropy variable
11883 ++ while (bb != EXIT_BLOCK_PTR) {
11884 ++ perturb_local_entropy(bb, local_entropy);
11885 ++ bb = bb->next_bb;
11886 ++ };
11887 ++
11888 ++ // 4. mix local entropy into the global entropy variable
11889 ++ perturb_latent_entropy(EXIT_BLOCK_PTR->prev_bb, local_entropy);
11890 ++ return 0;
11891 ++}
11892 ++
11893 ++static void start_unit_callback(void *gcc_data, void *user_data)
11894 ++{
11895 ++#if BUILDING_GCC_VERSION >= 4007
11896 ++ seed = get_random_seed(false);
11897 ++#else
11898 ++ sscanf(get_random_seed(false), "%" HOST_WIDE_INT_PRINT "x", &seed);
11899 ++ seed *= seed;
11900 ++#endif
11901 ++
11902 ++ if (in_lto_p)
11903 ++ return;
11904 ++
11905 ++ // extern u64 latent_entropy
11906 ++ latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, get_identifier("latent_entropy"), unsigned_intDI_type_node);
11907 ++
11908 ++ TREE_STATIC(latent_entropy_decl) = 1;
11909 ++ TREE_PUBLIC(latent_entropy_decl) = 1;
11910 ++ TREE_USED(latent_entropy_decl) = 1;
11911 ++ TREE_THIS_VOLATILE(latent_entropy_decl) = 1;
11912 ++ DECL_EXTERNAL(latent_entropy_decl) = 1;
11913 ++ DECL_ARTIFICIAL(latent_entropy_decl) = 0;
11914 ++ DECL_INITIAL(latent_entropy_decl) = NULL;
11915 ++// DECL_ASSEMBLER_NAME(latent_entropy_decl);
11916 ++// varpool_finalize_decl(latent_entropy_decl);
11917 ++// varpool_mark_needed_node(latent_entropy_decl);
11918 ++}
11919 ++
11920 ++int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
11921 ++{
11922 ++ const char * const plugin_name = plugin_info->base_name;
11923 ++ struct register_pass_info latent_entropy_pass_info = {
11924 ++ .pass = &latent_entropy_pass.pass,
11925 ++ .reference_pass_name = "optimized",
11926 ++ .ref_pass_instance_number = 1,
11927 ++ .pos_op = PASS_POS_INSERT_BEFORE
11928 ++ };
11929 ++
11930 ++ if (!plugin_default_version_check(version, &gcc_version)) {
11931 ++ error(G_("incompatible gcc/plugin versions"));
11932 ++ return 1;
11933 ++ }
11934 ++
11935 ++ register_callback(plugin_name, PLUGIN_INFO, NULL, &latent_entropy_plugin_info);
11936 ++ register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
11937 ++ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &latent_entropy_pass_info);
11938 ++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
11939 ++
11940 ++ return 0;
11941 ++}
11942 diff --git a/tools/gcc/size_overflow_hash.data b/tools/gcc/size_overflow_hash.data
11943 new file mode 100644
11944 index 0000000..54a12fe
11945 @@ -87077,7 +87421,7 @@ index 0000000..cc96254
11946 +}
11947 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
11948 new file mode 100644
11949 -index 0000000..b87ec9d
11950 +index 0000000..38d2014
11951 --- /dev/null
11952 +++ b/tools/gcc/stackleak_plugin.c
11953 @@ -0,0 +1,313 @@
11954 @@ -87350,13 +87694,13 @@ index 0000000..b87ec9d
11955 + .pass = &stackleak_tree_instrument_pass.pass,
11956 +// .reference_pass_name = "tree_profile",
11957 + .reference_pass_name = "optimized",
11958 -+ .ref_pass_instance_number = 0,
11959 ++ .ref_pass_instance_number = 1,
11960 + .pos_op = PASS_POS_INSERT_BEFORE
11961 + };
11962 + struct register_pass_info stackleak_final_pass_info = {
11963 + .pass = &stackleak_final_rtl_opt_pass.pass,
11964 + .reference_pass_name = "final",
11965 -+ .ref_pass_instance_number = 0,
11966 ++ .ref_pass_instance_number = 1,
11967 + .pos_op = PASS_POS_INSERT_BEFORE
11968 + };
11969 +
11970
11971 diff --git a/3.2.23/4430_grsec-remove-localversion-grsec.patch b/3.2.24/4430_grsec-remove-localversion-grsec.patch
11972 similarity index 100%
11973 rename from 3.2.23/4430_grsec-remove-localversion-grsec.patch
11974 rename to 3.2.24/4430_grsec-remove-localversion-grsec.patch
11975
11976 diff --git a/3.2.23/4435_grsec-mute-warnings.patch b/3.2.24/4435_grsec-mute-warnings.patch
11977 similarity index 100%
11978 rename from 3.2.23/4435_grsec-mute-warnings.patch
11979 rename to 3.2.24/4435_grsec-mute-warnings.patch
11980
11981 diff --git a/3.2.23/4440_grsec-remove-protected-paths.patch b/3.2.24/4440_grsec-remove-protected-paths.patch
11982 similarity index 100%
11983 rename from 3.2.23/4440_grsec-remove-protected-paths.patch
11984 rename to 3.2.24/4440_grsec-remove-protected-paths.patch
11985
11986 diff --git a/3.2.23/4450_grsec-kconfig-default-gids.patch b/3.2.24/4450_grsec-kconfig-default-gids.patch
11987 similarity index 100%
11988 rename from 3.2.23/4450_grsec-kconfig-default-gids.patch
11989 rename to 3.2.24/4450_grsec-kconfig-default-gids.patch
11990
11991 diff --git a/3.2.23/4465_selinux-avc_audit-log-curr_ip.patch b/3.2.24/4465_selinux-avc_audit-log-curr_ip.patch
11992 similarity index 100%
11993 rename from 3.2.23/4465_selinux-avc_audit-log-curr_ip.patch
11994 rename to 3.2.24/4465_selinux-avc_audit-log-curr_ip.patch
11995
11996 diff --git a/3.2.23/4470_disable-compat_vdso.patch b/3.2.24/4470_disable-compat_vdso.patch
11997 similarity index 100%
11998 rename from 3.2.23/4470_disable-compat_vdso.patch
11999 rename to 3.2.24/4470_disable-compat_vdso.patch
12000
12001 diff --git a/3.4.6/0000_README b/3.4.6/0000_README
12002 index 9b230b9..0a9e8d9 100644
12003 --- a/3.4.6/0000_README
12004 +++ b/3.4.6/0000_README
12005 @@ -6,7 +6,7 @@ Patch: 1005_linux-3.4.6.patch
12006 From: http://www.kernel.org
12007 Desc: Linux 3.4.6
12008
12009 -Patch: 4420_grsecurity-2.9.1-3.4.6-201207242237.patch
12010 +Patch: 4420_grsecurity-2.9.1-3.4.6-201207281946.patch
12011 From: http://www.grsecurity.net
12012 Desc: hardened-sources base patch from upstream grsecurity
12013
12014
12015 diff --git a/3.4.6/4420_grsecurity-2.9.1-3.4.6-201207242237.patch b/3.4.6/4420_grsecurity-2.9.1-3.4.6-201207281946.patch
12016 similarity index 99%
12017 rename from 3.4.6/4420_grsecurity-2.9.1-3.4.6-201207242237.patch
12018 rename to 3.4.6/4420_grsecurity-2.9.1-3.4.6-201207281946.patch
12019 index 0f5d8af..357f472 100644
12020 --- a/3.4.6/4420_grsecurity-2.9.1-3.4.6-201207242237.patch
12021 +++ b/3.4.6/4420_grsecurity-2.9.1-3.4.6-201207281946.patch
12022 @@ -236,7 +236,7 @@ index c1601e5..08557ce 100644
12023
12024 pcd. [PARIDE]
12025 diff --git a/Makefile b/Makefile
12026 -index 5d0edcb..121c424 100644
12027 +index 5d0edcb..f69ee4c 100644
12028 --- a/Makefile
12029 +++ b/Makefile
12030 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
12031 @@ -296,11 +296,11 @@ index 5d0edcb..121c424 100644
12032 +SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
12033 +endif
12034 +ifdef CONFIG_PAX_LATENT_ENTROPY
12035 -+LATENT_ENTROPY := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so
12036 ++LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
12037 +endif
12038 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
12039 +GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
12040 -+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY)
12041 ++GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS)
12042 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
12043 +export PLUGINCC CONSTIFY_PLUGIN
12044 +ifeq ($(KBUILD_EXTMOD),)
12045 @@ -6527,7 +6527,7 @@ index 301421c..e2535d1 100644
12046 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
12047 obj-y += fault_$(BITS).o
12048 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
12049 -index df3155a..eb708b8 100644
12050 +index df3155a..b6e32fa 100644
12051 --- a/arch/sparc/mm/fault_32.c
12052 +++ b/arch/sparc/mm/fault_32.c
12053 @@ -21,6 +21,9 @@
12054 @@ -6540,7 +6540,7 @@ index df3155a..eb708b8 100644
12055
12056 #include <asm/page.h>
12057 #include <asm/pgtable.h>
12058 -@@ -207,6 +210,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
12059 +@@ -207,6 +210,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
12060 return safe_compute_effective_address(regs, insn);
12061 }
12062
12063 @@ -6631,40 +6631,49 @@ index df3155a..eb708b8 100644
12064 + }
12065 + } while (0);
12066 +
12067 -+ { /* PaX: patched PLT emulation #2 */
12068 ++ do { /* PaX: patched PLT emulation #2 */
12069 + unsigned int ba;
12070 +
12071 + err = get_user(ba, (unsigned int *)regs->pc);
12072 +
12073 -+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
12074 ++ if (err)
12075 ++ break;
12076 ++
12077 ++ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
12078 + unsigned int addr;
12079 +
12080 -+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
12081 ++ if ((ba & 0xFFC00000U) == 0x30800000U)
12082 ++ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
12083 ++ else
12084 ++ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
12085 + regs->pc = addr;
12086 + regs->npc = addr+4;
12087 + return 2;
12088 + }
12089 -+ }
12090 ++ } while (0);
12091 +
12092 + do { /* PaX: patched PLT emulation #3 */
12093 -+ unsigned int sethi, jmpl, nop;
12094 ++ unsigned int sethi, bajmpl, nop;
12095 +
12096 + err = get_user(sethi, (unsigned int *)regs->pc);
12097 -+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
12098 ++ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
12099 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
12100 +
12101 + if (err)
12102 + break;
12103 +
12104 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
12105 -+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
12106 ++ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
12107 + nop == 0x01000000U)
12108 + {
12109 + unsigned int addr;
12110 +
12111 + addr = (sethi & 0x003FFFFFU) << 10;
12112 + regs->u_regs[UREG_G1] = addr;
12113 -+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
12114 ++ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
12115 ++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
12116 ++ else
12117 ++ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
12118 + regs->pc = addr;
12119 + regs->npc = addr+4;
12120 + return 2;
12121 @@ -6809,7 +6818,7 @@ index df3155a..eb708b8 100644
12122 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
12123 int text_fault)
12124 {
12125 -@@ -282,6 +547,24 @@ good_area:
12126 +@@ -282,6 +556,24 @@ good_area:
12127 if(!(vma->vm_flags & VM_WRITE))
12128 goto bad_area;
12129 } else {
12130 @@ -6835,7 +6844,7 @@ index df3155a..eb708b8 100644
12131 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
12132 goto bad_area;
12133 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
12134 -index 1fe0429..aee2e87 100644
12135 +index 1fe0429..8dd5dd5 100644
12136 --- a/arch/sparc/mm/fault_64.c
12137 +++ b/arch/sparc/mm/fault_64.c
12138 @@ -21,6 +21,9 @@
12139 @@ -6857,7 +6866,7 @@ index 1fe0429..aee2e87 100644
12140 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
12141 dump_stack();
12142 unhandled_fault(regs->tpc, current, regs);
12143 -@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
12144 +@@ -272,6 +275,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
12145 show_regs(regs);
12146 }
12147
12148 @@ -6952,15 +6961,21 @@ index 1fe0429..aee2e87 100644
12149 + }
12150 + } while (0);
12151 +
12152 -+ { /* PaX: patched PLT emulation #2 */
12153 ++ do { /* PaX: patched PLT emulation #2 */
12154 + unsigned int ba;
12155 +
12156 + err = get_user(ba, (unsigned int *)regs->tpc);
12157 +
12158 -+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
12159 ++ if (err)
12160 ++ break;
12161 ++
12162 ++ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
12163 + unsigned long addr;
12164 +
12165 -+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
12166 ++ if ((ba & 0xFFC00000U) == 0x30800000U)
12167 ++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
12168 ++ else
12169 ++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12170 +
12171 + if (test_thread_flag(TIF_32BIT))
12172 + addr &= 0xFFFFFFFFUL;
12173 @@ -6969,27 +6984,30 @@ index 1fe0429..aee2e87 100644
12174 + regs->tnpc = addr+4;
12175 + return 2;
12176 + }
12177 -+ }
12178 ++ } while (0);
12179 +
12180 + do { /* PaX: patched PLT emulation #3 */
12181 -+ unsigned int sethi, jmpl, nop;
12182 ++ unsigned int sethi, bajmpl, nop;
12183 +
12184 + err = get_user(sethi, (unsigned int *)regs->tpc);
12185 -+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
12186 ++ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
12187 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12188 +
12189 + if (err)
12190 + break;
12191 +
12192 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
12193 -+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
12194 ++ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
12195 + nop == 0x01000000U)
12196 + {
12197 + unsigned long addr;
12198 +
12199 + addr = (sethi & 0x003FFFFFU) << 10;
12200 + regs->u_regs[UREG_G1] = addr;
12201 -+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12202 ++ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
12203 ++ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12204 ++ else
12205 ++ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12206 +
12207 + if (test_thread_flag(TIF_32BIT))
12208 + addr &= 0xFFFFFFFFUL;
12209 @@ -7315,7 +7333,7 @@ index 1fe0429..aee2e87 100644
12210 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
12211 {
12212 struct mm_struct *mm = current->mm;
12213 -@@ -343,6 +797,29 @@ retry:
12214 +@@ -343,6 +806,29 @@ retry:
12215 if (!vma)
12216 goto bad_area;
12217
12218 @@ -59419,7 +59437,7 @@ index f1c8ca6..b5c1cc7 100644
12219 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
12220
12221 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
12222 -index b7babf0..c1e2d45 100644
12223 +index b7babf0..3ba8aee 100644
12224 --- a/include/asm-generic/atomic-long.h
12225 +++ b/include/asm-generic/atomic-long.h
12226 @@ -22,6 +22,12 @@
12227 @@ -59672,7 +59690,7 @@ index b7babf0..c1e2d45 100644
12228 static inline long atomic_long_dec_return(atomic_long_t *l)
12229 {
12230 atomic_t *v = (atomic_t *)l;
12231 -@@ -255,4 +393,53 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
12232 +@@ -255,4 +393,55 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
12233
12234 #endif /* BITS_PER_LONG == 64 */
12235
12236 @@ -59690,8 +59708,10 @@ index b7babf0..c1e2d45 100644
12237 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
12238 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
12239 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
12240 ++#ifdef CONFIG_X86
12241 + atomic_clear_mask_unchecked(0, NULL);
12242 + atomic_set_mask_unchecked(0, NULL);
12243 ++#endif
12244 +
12245 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
12246 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
12247 @@ -60204,10 +60224,10 @@ index 42e55de..1cd0e66 100644
12248 extern struct cleancache_ops
12249 cleancache_register_ops(struct cleancache_ops *ops);
12250 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
12251 -index 2f40791..a62d196 100644
12252 +index 2f40791..9c9e13c 100644
12253 --- a/include/linux/compiler-gcc4.h
12254 +++ b/include/linux/compiler-gcc4.h
12255 -@@ -32,6 +32,16 @@
12256 +@@ -32,6 +32,20 @@
12257 #define __linktime_error(message) __attribute__((__error__(message)))
12258
12259 #if __GNUC_MINOR__ >= 5
12260 @@ -60221,10 +60241,14 @@ index 2f40791..a62d196 100644
12261 +#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
12262 +#endif
12263 +
12264 ++#ifdef LATENT_ENTROPY_PLUGIN
12265 ++#define __latent_entropy __attribute__((latent_entropy))
12266 ++#endif
12267 ++
12268 /*
12269 * Mark a position in code as unreachable. This can be used to
12270 * suppress control flow warnings after asm blocks that transfer
12271 -@@ -47,6 +57,11 @@
12272 +@@ -47,6 +61,11 @@
12273 #define __noclone __attribute__((__noclone__))
12274
12275 #endif
12276 @@ -60237,7 +60261,7 @@ index 2f40791..a62d196 100644
12277
12278 #if __GNUC_MINOR__ > 0
12279 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
12280 -index 923d093..726c17f 100644
12281 +index 923d093..1fef491 100644
12282 --- a/include/linux/compiler.h
12283 +++ b/include/linux/compiler.h
12284 @@ -5,31 +5,62 @@
12285 @@ -60313,7 +60337,7 @@ index 923d093..726c17f 100644
12286 #endif
12287
12288 #ifdef __KERNEL__
12289 -@@ -264,6 +297,18 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
12290 +@@ -264,6 +297,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
12291 # define __attribute_const__ /* unimplemented */
12292 #endif
12293
12294 @@ -60329,10 +60353,14 @@ index 923d093..726c17f 100644
12295 +# define __size_overflow(...)
12296 +#endif
12297 +
12298 ++#ifndef __latent_entropy
12299 ++# define __latent_entropy
12300 ++#endif
12301 ++
12302 /*
12303 * Tell gcc if a function is cold. The compiler will assume any path
12304 * directly leading to the call is unlikely.
12305 -@@ -273,6 +318,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
12306 +@@ -273,6 +322,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
12307 #define __cold
12308 #endif
12309
12310 @@ -60355,7 +60383,7 @@ index 923d093..726c17f 100644
12311 /* Simple shorthand for a section definition */
12312 #ifndef __section
12313 # define __section(S) __attribute__ ((__section__(#S)))
12314 -@@ -308,6 +369,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
12315 +@@ -308,6 +373,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
12316 * use is to mediate communication between process-level code and irq/NMI
12317 * handlers, all running on the same CPU.
12318 */
12319 @@ -61887,10 +61915,54 @@ index 58404b0..439ed95 100644
12320 };
12321
12322 diff --git a/include/linux/init.h b/include/linux/init.h
12323 -index 6b95109..4aca62c 100644
12324 +index 6b95109..bcbdd68 100644
12325 --- a/include/linux/init.h
12326 +++ b/include/linux/init.h
12327 -@@ -294,13 +294,13 @@ void __init parse_early_options(char *cmdline);
12328 +@@ -39,9 +39,15 @@
12329 + * Also note, that this data cannot be "const".
12330 + */
12331 +
12332 ++#ifdef MODULE
12333 ++#define add_latent_entropy
12334 ++#else
12335 ++#define add_latent_entropy __latent_entropy
12336 ++#endif
12337 ++
12338 + /* These are for everybody (although not all archs will actually
12339 + discard it in modules) */
12340 +-#define __init __section(.init.text) __cold notrace
12341 ++#define __init __section(.init.text) __cold notrace add_latent_entropy
12342 + #define __initdata __section(.init.data)
12343 + #define __initconst __section(.init.rodata)
12344 + #define __exitdata __section(.exit.data)
12345 +@@ -83,7 +89,7 @@
12346 + #define __exit __section(.exit.text) __exitused __cold notrace
12347 +
12348 + /* Used for HOTPLUG */
12349 +-#define __devinit __section(.devinit.text) __cold notrace
12350 ++#define __devinit __section(.devinit.text) __cold notrace add_latent_entropy
12351 + #define __devinitdata __section(.devinit.data)
12352 + #define __devinitconst __section(.devinit.rodata)
12353 + #define __devexit __section(.devexit.text) __exitused __cold notrace
12354 +@@ -91,7 +97,7 @@
12355 + #define __devexitconst __section(.devexit.rodata)
12356 +
12357 + /* Used for HOTPLUG_CPU */
12358 +-#define __cpuinit __section(.cpuinit.text) __cold notrace
12359 ++#define __cpuinit __section(.cpuinit.text) __cold notrace add_latent_entropy
12360 + #define __cpuinitdata __section(.cpuinit.data)
12361 + #define __cpuinitconst __section(.cpuinit.rodata)
12362 + #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
12363 +@@ -99,7 +105,7 @@
12364 + #define __cpuexitconst __section(.cpuexit.rodata)
12365 +
12366 + /* Used for MEMORY_HOTPLUG */
12367 +-#define __meminit __section(.meminit.text) __cold notrace
12368 ++#define __meminit __section(.meminit.text) __cold notrace add_latent_entropy
12369 + #define __meminitdata __section(.meminit.data)
12370 + #define __meminitconst __section(.meminit.rodata)
12371 + #define __memexit __section(.memexit.text) __exitused __cold notrace
12372 +@@ -294,13 +300,13 @@ void __init parse_early_options(char *cmdline);
12373
12374 /* Each module must use one module_init(). */
12375 #define module_init(initfn) \
12376 @@ -71262,7 +71334,7 @@ index bf5b485..e44c2cb 100644
12377 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
12378
12379 diff --git a/mm/mlock.c b/mm/mlock.c
12380 -index ef726e8..13e0901 100644
12381 +index ef726e8..cd7f1ec 100644
12382 --- a/mm/mlock.c
12383 +++ b/mm/mlock.c
12384 @@ -13,6 +13,7 @@
12385 @@ -71273,6 +71345,15 @@ index ef726e8..13e0901 100644
12386 #include <linux/sched.h>
12387 #include <linux/export.h>
12388 #include <linux/rmap.h>
12389 +@@ -376,7 +377,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
12390 + {
12391 + unsigned long nstart, end, tmp;
12392 + struct vm_area_struct * vma, * prev;
12393 +- int error;
12394 ++ int error = 0;
12395 +
12396 + VM_BUG_ON(start & ~PAGE_MASK);
12397 + VM_BUG_ON(len != PAGE_ALIGN(len));
12398 @@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
12399 return -EINVAL;
12400 if (end == start)
12401 @@ -78607,10 +78688,10 @@ index 5c11312..72742b5 100644
12402 write_hex_cnt = 0;
12403 for (i = 0; i < logo_clutsize; i++) {
12404 diff --git a/security/Kconfig b/security/Kconfig
12405 -index ccc61f8..a2bd35c 100644
12406 +index ccc61f8..5e68d73 100644
12407 --- a/security/Kconfig
12408 +++ b/security/Kconfig
12409 -@@ -4,6 +4,874 @@
12410 +@@ -4,6 +4,875 @@
12411
12412 menu "Security options"
12413
12414 @@ -79462,6 +79543,7 @@ index ccc61f8..a2bd35c 100644
12415 +
12416 +config PAX_LATENT_ENTROPY
12417 + bool "Generate some entropy during boot"
12418 ++ default y if GRKERNSEC_CONFIG_AUTO
12419 + help
12420 + By saying Y here the kernel will instrument early boot code to
12421 + extract some entropy from both original and artificially created
12422 @@ -79485,7 +79567,7 @@ index ccc61f8..a2bd35c 100644
12423 config KEYS
12424 bool "Enable access key retention support"
12425 help
12426 -@@ -169,7 +1037,7 @@ config INTEL_TXT
12427 +@@ -169,7 +1038,7 @@ config INTEL_TXT
12428 config LSM_MMAP_MIN_ADDR
12429 int "Low address space for LSM to protect from user allocation"
12430 depends on SECURITY && SECURITY_SELINUX
12431 @@ -80358,7 +80440,7 @@ index 0000000..50f2f2f
12432 +size_overflow_hash.h
12433 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
12434 new file mode 100644
12435 -index 0000000..e9d4079
12436 +index 0000000..1d09b7e
12437 --- /dev/null
12438 +++ b/tools/gcc/Makefile
12439 @@ -0,0 +1,43 @@
12440 @@ -80370,10 +80452,10 @@ index 0000000..e9d4079
12441 +
12442 +ifeq ($(PLUGINCC),$(HOSTCC))
12443 +HOSTLIBS := hostlibs
12444 -+HOST_EXTRACFLAGS += -Iinclude -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu99 -ggdb
12445 ++HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu99 -ggdb
12446 +else
12447 +HOSTLIBS := hostcxxlibs
12448 -+HOST_EXTRACXXFLAGS += -Iinclude -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu++98 -ggdb -Wno-unused-parameter
12449 ++HOST_EXTRACXXFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu++98 -ggdb -Wno-unused-parameter
12450 +endif
12451 +
12452 +$(HOSTLIBS)-y := constify_plugin.so
12453 @@ -81778,10 +81860,10 @@ index 0000000..98011fa
12454 +}
12455 diff --git a/tools/gcc/latent_entropy_plugin.c b/tools/gcc/latent_entropy_plugin.c
12456 new file mode 100644
12457 -index 0000000..9788bfe
12458 +index 0000000..b8008f7
12459 --- /dev/null
12460 +++ b/tools/gcc/latent_entropy_plugin.c
12461 -@@ -0,0 +1,291 @@
12462 +@@ -0,0 +1,295 @@
12463 +/*
12464 + * Copyright 2012 by the PaX Team <pageexec@××××××××.hu>
12465 + * Licensed under the GPL v2
12466 @@ -81795,10 +81877,12 @@ index 0000000..9788bfe
12467 + * used during boot in the kernel
12468 + *
12469 + * TODO:
12470 -+ * - quite a few, see the comments :)
12471 ++ * - add ipa pass to identify not explicitly marked candidate functions
12472 ++ * - mix in more program state (function arguments/return values, loop variables, etc)
12473 ++ * - more instrumentation control via attribute parameters
12474 + *
12475 + * BUGS:
12476 -+ * - none known
12477 ++ * - LTO needs -flto-partition=none for now
12478 + */
12479 +#include "gcc-plugin.h"
12480 +#include "config.h"
12481 @@ -81820,17 +81904,13 @@ index 0000000..9788bfe
12482 +#include "rtl.h"
12483 +#include "emit-rtl.h"
12484 +#include "tree-flow.h"
12485 -+#include "cpplib.h"
12486 -+#include "c-pragma.h"
12487 -+
12488 -+#include "linux/kconfig.h"
12489 +
12490 +int plugin_is_GPL_compatible;
12491 +
12492 +static tree latent_entropy_decl;
12493 +
12494 +static struct plugin_info latent_entropy_plugin_info = {
12495 -+ .version = "201207202140",
12496 ++ .version = "201207271820",
12497 + .help = NULL
12498 +};
12499 +
12500 @@ -81855,54 +81935,39 @@ index 0000000..9788bfe
12501 + }
12502 +};
12503 +
12504 -+// for kernel use we just want to instrument some of the boot code
12505 -+// for userland use this would need changes
12506 -+static bool gate_latent_entropy(void)
12507 ++static tree handle_latent_entropy_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
12508 +{
12509 -+ tree section_attr;
12510 -+ const char *section_name;
12511 -+
12512 -+ // don't instrument modules
12513 -+ if (cpp_defined(parse_in, (const unsigned char *)"MODULE", 6))
12514 -+ return false;
12515 -+
12516 -+ // don't instrument normal code
12517 -+ section_attr = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
12518 -+ if (!section_attr || !TREE_VALUE(section_attr))
12519 -+ return false;
12520 -+
12521 -+ section_name = TREE_STRING_POINTER(TREE_VALUE(TREE_VALUE(section_attr)));
12522 -+
12523 -+ // instrument code in boot related sections
12524 -+ if (!strncmp(section_name, ".init.text", 10))
12525 -+ return true;
12526 -+
12527 -+ if (!strncmp(section_name, ".initcall", 9))
12528 -+ return true;
12529 -+
12530 -+ if (!strncmp(section_name, ".con_initcall", 13))
12531 -+ return true;
12532 -+
12533 -+ if (!strncmp(section_name, ".security_initcall", 18))
12534 -+ return true;
12535 ++ if (TREE_CODE(*node) != FUNCTION_DECL) {
12536 ++ *no_add_attrs = true;
12537 ++ error("%qE attribute only applies to functions", name);
12538 ++ }
12539 ++ return NULL_TREE;
12540 ++}
12541 +
12542 -+#ifndef CONFIG_HOTPLUG
12543 -+ if (!strncmp(section_name, ".devinit.text", 13))
12544 -+ return true;
12545 ++static struct attribute_spec latent_entropy_attr = {
12546 ++ .name = "latent_entropy",
12547 ++ .min_length = 0,
12548 ++ .max_length = 0,
12549 ++ .decl_required = true,
12550 ++ .type_required = false,
12551 ++ .function_type_required = false,
12552 ++ .handler = handle_latent_entropy_attribute,
12553 ++#if BUILDING_GCC_VERSION >= 4007
12554 ++ .affects_type_identity = false
12555 +#endif
12556 ++};
12557 +
12558 -+#ifndef CONFIG_HOTPLUG_CPU
12559 -+ if (!strncmp(section_name, ".cpuinit.text", 13))
12560 -+ return true;
12561 -+#endif
12562 ++static void register_attributes(void *event_data, void *data)
12563 ++{
12564 ++ register_attribute(&latent_entropy_attr);
12565 ++}
12566 +
12567 -+#ifndef CONFIG_HOTPLUG_MEMORY
12568 -+ if (!strncmp(section_name, ".meminit.text", 13))
12569 -+ return true;
12570 -+#endif
12571 ++static bool gate_latent_entropy(void)
12572 ++{
12573 ++ tree latent_entropy_attr;
12574 +
12575 -+ // TODO check whether cfun is static and all its callers meet the above criteria
12576 -+ return false;
12577 ++ latent_entropy_attr = lookup_attribute("latent_entropy", DECL_ATTRIBUTES(current_function_decl));
12578 ++ return latent_entropy_attr != NULL_TREE;
12579 +}
12580 +
12581 +static unsigned HOST_WIDE_INT seed;
12582 @@ -81988,8 +82053,6 @@ index 0000000..9788bfe
12583 + find_referenced_vars_in(assign);
12584 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
12585 + update_stmt(assign);
12586 -+
12587 -+ // TODO we could mix in more local state such as function return values, etc
12588 +}
12589 +
12590 +static unsigned int execute_latent_entropy(void)
12591 @@ -81999,6 +82062,23 @@ index 0000000..9788bfe
12592 + gimple_stmt_iterator gsi;
12593 + tree local_entropy;
12594 +
12595 ++ if (!latent_entropy_decl) {
12596 ++ struct varpool_node *node;
12597 ++
12598 ++ for (node = varpool_nodes; node; node = node->next) {
12599 ++ tree var = node->decl;
12600 ++ if (strcmp(IDENTIFIER_POINTER(DECL_NAME(var)), "latent_entropy"))
12601 ++ continue;
12602 ++ latent_entropy_decl = var;
12603 ++// debug_tree(var);
12604 ++ break;
12605 ++ }
12606 ++ if (!latent_entropy_decl) {
12607 ++// debug_tree(current_function_decl);
12608 ++ return 0;
12609 ++ }
12610 ++ }
12611 ++
12612 +//fprintf(stderr, "latent_entropy: %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl)));
12613 +
12614 + // 1. create local entropy variable
12615 @@ -82032,24 +82112,29 @@ index 0000000..9788bfe
12616 +
12617 +static void start_unit_callback(void *gcc_data, void *user_data)
12618 +{
12619 ++#if BUILDING_GCC_VERSION >= 4007
12620 ++ seed = get_random_seed(false);
12621 ++#else
12622 ++ sscanf(get_random_seed(false), "%" HOST_WIDE_INT_PRINT "x", &seed);
12623 ++ seed *= seed;
12624 ++#endif
12625 ++
12626 ++ if (in_lto_p)
12627 ++ return;
12628 ++
12629 + // extern u64 latent_entropy
12630 + latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, get_identifier("latent_entropy"), unsigned_intDI_type_node);
12631 +
12632 + TREE_STATIC(latent_entropy_decl) = 1;
12633 + TREE_PUBLIC(latent_entropy_decl) = 1;
12634 ++ TREE_USED(latent_entropy_decl) = 1;
12635 ++ TREE_THIS_VOLATILE(latent_entropy_decl) = 1;
12636 + DECL_EXTERNAL(latent_entropy_decl) = 1;
12637 -+ DECL_ARTIFICIAL(latent_entropy_decl) = 1;
12638 ++ DECL_ARTIFICIAL(latent_entropy_decl) = 0;
12639 + DECL_INITIAL(latent_entropy_decl) = NULL;
12640 +// DECL_ASSEMBLER_NAME(latent_entropy_decl);
12641 +// varpool_finalize_decl(latent_entropy_decl);
12642 +// varpool_mark_needed_node(latent_entropy_decl);
12643 -+
12644 -+#if BUILDING_GCC_VERSION >= 4007
12645 -+ seed = get_random_seed(false);
12646 -+#else
12647 -+ sscanf(get_random_seed(false), "%" HOST_WIDE_INT_PRINT "x", &seed);
12648 -+ seed *= seed;
12649 -+#endif
12650 +}
12651 +
12652 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
12653 @@ -82070,6 +82155,7 @@ index 0000000..9788bfe
12654 + register_callback(plugin_name, PLUGIN_INFO, NULL, &latent_entropy_plugin_info);
12655 + register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
12656 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &latent_entropy_pass_info);
12657 ++ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
12658 +
12659 + return 0;
12660 +}