Gentoo Archives: gentoo-commits

From: "Anthony G. Basile" <blueness@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/hardened-patchset:master commit in: 3.2.51/, 3.11.2/, 2.6.32/, 3.11.1/
Date: Sun, 29 Sep 2013 19:12:53
Message-Id: 1380482003.290728f2970dde95a2499c72844cff0e09f97bae.blueness@gentoo
1 commit: 290728f2970dde95a2499c72844cff0e09f97bae
2 Author: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
3 AuthorDate: Sun Sep 29 19:13:23 2013 +0000
4 Commit: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
5 CommitDate: Sun Sep 29 19:13:23 2013 +0000
6 URL: http://git.overlays.gentoo.org/gitweb/?p=proj/hardened-patchset.git;a=commit;h=290728f2
7
8 Grsec/PaX: 2.9.1-{2.6.32.61,3.2.51,3.11.2}-201309281102
9
10 ---
11 2.6.32/0000_README | 2 +-
12 ..._grsecurity-2.9.1-2.6.32.61-201309281101.patch} | 88 +-
13 2.6.32/4440_grsec-remove-protected-paths.patch | 2 +-
14 2.6.32/4450_grsec-kconfig-default-gids.patch | 12 +-
15 2.6.32/4465_selinux-avc_audit-log-curr_ip.patch | 2 +-
16 {3.11.1 => 3.11.2}/0000_README | 6 +-
17 3.11.2/1001_linux-3.11.2.patch | 4419 ++++++++++++++++++++
18 ...4420_grsecurity-2.9.1-3.11.2-201309281103.patch | 757 ++--
19 {3.11.1 => 3.11.2}/4425_grsec_remove_EI_PAX.patch | 0
20 .../4427_force_XATTR_PAX_tmpfs.patch | 0
21 .../4430_grsec-remove-localversion-grsec.patch | 0
22 {3.11.1 => 3.11.2}/4435_grsec-mute-warnings.patch | 0
23 .../4440_grsec-remove-protected-paths.patch | 0
24 .../4450_grsec-kconfig-default-gids.patch | 0
25 .../4465_selinux-avc_audit-log-curr_ip.patch | 0
26 {3.11.1 => 3.11.2}/4470_disable-compat_vdso.patch | 0
27 {3.11.1 => 3.11.2}/4475_emutramp_default_on.patch | 0
28 3.2.51/0000_README | 2 +-
29 ...420_grsecurity-2.9.1-3.2.51-201309281102.patch} | 347 +-
30 19 files changed, 5079 insertions(+), 558 deletions(-)
31
32 diff --git a/2.6.32/0000_README b/2.6.32/0000_README
33 index c481225..381f8d3 100644
34 --- a/2.6.32/0000_README
35 +++ b/2.6.32/0000_README
36 @@ -38,7 +38,7 @@ Patch: 1060_linux-2.6.32.61.patch
37 From: http://www.kernel.org
38 Desc: Linux 2.6.32.61
39
40 -Patch: 4420_grsecurity-2.9.1-2.6.32.61-201309052115.patch
41 +Patch: 4420_grsecurity-2.9.1-2.6.32.61-201309281101.patch
42 From: http://www.grsecurity.net
43 Desc: hardened-sources base patch from upstream grsecurity
44
45
46 diff --git a/2.6.32/4420_grsecurity-2.9.1-2.6.32.61-201309052115.patch b/2.6.32/4420_grsecurity-2.9.1-2.6.32.61-201309281101.patch
47 similarity index 99%
48 rename from 2.6.32/4420_grsecurity-2.9.1-2.6.32.61-201309052115.patch
49 rename to 2.6.32/4420_grsecurity-2.9.1-2.6.32.61-201309281101.patch
50 index 41ba8b2..80f4104 100644
51 --- a/2.6.32/4420_grsecurity-2.9.1-2.6.32.61-201309052115.patch
52 +++ b/2.6.32/4420_grsecurity-2.9.1-2.6.32.61-201309281101.patch
53 @@ -45625,7 +45625,7 @@ index 3beb26d..6ce9c4a 100644
54 INIT_LIST_HEAD(&rdev->fence_drv.emited);
55 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
56 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
57 -index a1bf11d..4a123c0 100644
58 +index a1bf11de..4a123c0 100644
59 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
60 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
61 @@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
62 @@ -91904,10 +91904,10 @@ index 0000000..5a3ac97
63 +}
64 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
65 new file mode 100644
66 -index 0000000..b6b5239
67 +index 0000000..462a28e
68 --- /dev/null
69 +++ b/grsecurity/gracl_ip.c
70 -@@ -0,0 +1,388 @@
71 +@@ -0,0 +1,387 @@
72 +#include <linux/kernel.h>
73 +#include <asm/uaccess.h>
74 +#include <asm/errno.h>
75 @@ -92000,6 +92000,8 @@ index 0000000..b6b5239
76 + return gr_sockfamilies[family];
77 +}
78 +
79 ++extern const struct net_proto_family *net_families[NPROTO] __read_mostly;
80 ++
81 +int
82 +gr_search_socket(const int domain, const int type, const int protocol)
83 +{
84 @@ -92079,10 +92081,7 @@ index 0000000..b6b5239
85 + if (domain == PF_INET)
86 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
87 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
88 -+ else
89 -+#ifndef CONFIG_IPV6
90 -+ if (domain != PF_INET6)
91 -+#endif
92 ++ else if (net_families[domain] != NULL)
93 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
94 + gr_socktype_to_name(type), protocol);
95 +
96 @@ -95482,7 +95481,7 @@ index 0000000..7512ea9
97 +}
98 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
99 new file mode 100644
100 -index 0000000..5a6d4bc
101 +index 0000000..5a6d4bc1
102 --- /dev/null
103 +++ b/grsecurity/grsec_sysctl.c
104 @@ -0,0 +1,527 @@
105 @@ -111522,7 +111521,7 @@ index aaca868..2ebecdc 100644
106 err = -EPERM;
107 goto out;
108 diff --git a/mm/mlock.c b/mm/mlock.c
109 -index 2d846cf..ca1e492 100644
110 +index 2d846cf..1183f13 100644
111 --- a/mm/mlock.c
112 +++ b/mm/mlock.c
113 @@ -13,6 +13,7 @@
114 @@ -111625,7 +111624,15 @@ index 2d846cf..ca1e492 100644
115
116 newflags = vma->vm_flags | VM_LOCKED;
117 if (!(flags & MCL_CURRENT))
118 -@@ -570,6 +572,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
119 +@@ -545,6 +547,7 @@ static int do_mlockall(int flags)
120 +
121 + /* Ignore errors */
122 + mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
123 ++ cond_resched();
124 + }
125 + out:
126 + return 0;
127 +@@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
128 lock_limit >>= PAGE_SHIFT;
129
130 ret = -ENOMEM;
131 @@ -118962,7 +118969,7 @@ index e04c9f8..51bc18e 100644
132 + (rtt >> sctp_rto_alpha);
133 } else {
134 diff --git a/net/socket.c b/net/socket.c
135 -index bf9fc68..0ea7e39 100644
136 +index bf9fc68..27b436e 100644
137 --- a/net/socket.c
138 +++ b/net/socket.c
139 @@ -87,6 +87,7 @@
140 @@ -118995,6 +119002,15 @@ index bf9fc68..0ea7e39 100644
141 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
142 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
143 unsigned long nr_segs, loff_t pos);
144 +@@ -148,7 +164,7 @@ static const struct file_operations socket_file_ops = {
145 + */
146 +
147 + static DEFINE_SPINLOCK(net_family_lock);
148 +-static const struct net_proto_family *net_families[NPROTO] __read_mostly;
149 ++const struct net_proto_family *net_families[NPROTO] __read_mostly;
150 +
151 + /*
152 + * Statistics counters of the socket lists
153 @@ -298,7 +314,7 @@ static int sockfs_get_sb(struct file_system_type *fs_type,
154 mnt);
155 }
156 @@ -119013,24 +119029,28 @@ index bf9fc68..0ea7e39 100644
157
158 /* Compatibility.
159
160 -@@ -1283,6 +1301,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
161 - if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
162 - flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
163 +@@ -1174,6 +1192,20 @@ static int __sock_create(struct net *net, int family, int type, int protocol,
164 + if (err)
165 + return err;
166
167 -+ if(!gr_search_socket(family, type, protocol)) {
168 -+ retval = -EACCES;
169 -+ goto out;
170 ++ if(!kern && !gr_search_socket(family, type, protocol)) {
171 ++ if (net_families[family] == NULL)
172 ++ return -EAFNOSUPPORT;
173 ++ else
174 ++ return -EACCES;
175 + }
176 +
177 -+ if (gr_handle_sock_all(family, type, protocol)) {
178 -+ retval = -EACCES;
179 -+ goto out;
180 ++ if (!kern && gr_handle_sock_all(family, type, protocol)) {
181 ++ if (net_families[family] == NULL)
182 ++ return -EAFNOSUPPORT;
183 ++ else
184 ++ return -EACCES;
185 + }
186 +
187 - retval = sock_create(family, type, protocol, &sock);
188 - if (retval < 0)
189 - goto out;
190 -@@ -1415,6 +1443,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
191 + /*
192 + * Allocate the socket and allow the family to set things up. if
193 + * the protocol is 0, the family is instructed to select an appropriate
194 +@@ -1415,6 +1447,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
195 if (sock) {
196 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
197 if (err >= 0) {
198 @@ -119045,7 +119065,7 @@ index bf9fc68..0ea7e39 100644
199 err = security_socket_bind(sock,
200 (struct sockaddr *)&address,
201 addrlen);
202 -@@ -1423,6 +1459,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
203 +@@ -1423,6 +1463,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
204 (struct sockaddr *)
205 &address, addrlen);
206 }
207 @@ -119053,7 +119073,7 @@ index bf9fc68..0ea7e39 100644
208 fput_light(sock->file, fput_needed);
209 }
210 return err;
211 -@@ -1446,10 +1483,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
212 +@@ -1446,10 +1487,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
213 if ((unsigned)backlog > somaxconn)
214 backlog = somaxconn;
215
216 @@ -119074,7 +119094,7 @@ index bf9fc68..0ea7e39 100644
217 fput_light(sock->file, fput_needed);
218 }
219 return err;
220 -@@ -1492,6 +1539,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
221 +@@ -1492,6 +1543,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
222 newsock->type = sock->type;
223 newsock->ops = sock->ops;
224
225 @@ -119093,7 +119113,7 @@ index bf9fc68..0ea7e39 100644
226 /*
227 * We don't need try_module_get here, as the listening socket (sock)
228 * has the protocol module (sock->ops->owner) held.
229 -@@ -1534,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
230 +@@ -1534,6 +1597,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
231 fd_install(newfd, newfile);
232 err = newfd;
233
234 @@ -119102,7 +119122,7 @@ index bf9fc68..0ea7e39 100644
235 out_put:
236 fput_light(sock->file, fput_needed);
237 out:
238 -@@ -1571,6 +1632,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
239 +@@ -1571,6 +1636,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
240 int, addrlen)
241 {
242 struct socket *sock;
243 @@ -119110,7 +119130,7 @@ index bf9fc68..0ea7e39 100644
244 struct sockaddr_storage address;
245 int err, fput_needed;
246
247 -@@ -1581,6 +1643,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
248 +@@ -1581,6 +1647,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
249 if (err < 0)
250 goto out_put;
251
252 @@ -119128,7 +119148,7 @@ index bf9fc68..0ea7e39 100644
253 err =
254 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
255 if (err)
256 -@@ -1728,7 +1801,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
257 +@@ -1728,7 +1805,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
258 struct socket *sock;
259 struct iovec iov;
260 struct msghdr msg;
261 @@ -119137,7 +119157,7 @@ index bf9fc68..0ea7e39 100644
262 int err, err2;
263 int fput_needed;
264
265 -@@ -1882,6 +1955,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
266 +@@ -1882,6 +1959,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
267 int err, ctl_len, iov_size, total_len;
268 int fput_needed;
269
270 @@ -119146,7 +119166,7 @@ index bf9fc68..0ea7e39 100644
271 err = -EFAULT;
272 if (MSG_CMSG_COMPAT & flags) {
273 if (get_compat_msghdr(&msg_sys, msg_compat))
274 -@@ -1987,7 +2062,7 @@ SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
275 +@@ -1987,7 +2066,7 @@ SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
276 int fput_needed;
277
278 /* kernel mode address */
279 @@ -119155,7 +119175,7 @@ index bf9fc68..0ea7e39 100644
280
281 /* user mode address pointers */
282 struct sockaddr __user *uaddr;
283 -@@ -2022,7 +2097,7 @@ SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
284 +@@ -2022,7 +2101,7 @@ SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
285 * kernel msghdr to use the kernel address space)
286 */
287
288
289 diff --git a/2.6.32/4440_grsec-remove-protected-paths.patch b/2.6.32/4440_grsec-remove-protected-paths.patch
290 index 339cc6e..38d465e 100644
291 --- a/2.6.32/4440_grsec-remove-protected-paths.patch
292 +++ b/2.6.32/4440_grsec-remove-protected-paths.patch
293 @@ -6,7 +6,7 @@ the filesystem.
294 diff -Naur a/grsecurity/Makefile b/grsecurity/Makefile
295 --- a/grsecurity/Makefile 2011-10-19 19:48:21.000000000 -0400
296 +++ b/grsecurity/Makefile 2011-10-19 19:50:44.000000000 -0400
297 -@@ -29,10 +29,4 @@
298 +@@ -34,10 +34,4 @@
299 ifdef CONFIG_GRKERNSEC_HIDESYM
300 extra-y := grsec_hidesym.o
301 $(obj)/grsec_hidesym.o:
302
303 diff --git a/2.6.32/4450_grsec-kconfig-default-gids.patch b/2.6.32/4450_grsec-kconfig-default-gids.patch
304 index 87aa8e4..3dfdc8f 100644
305 --- a/2.6.32/4450_grsec-kconfig-default-gids.patch
306 +++ b/2.6.32/4450_grsec-kconfig-default-gids.patch
307 @@ -16,7 +16,7 @@ from shooting themselves in the foot.
308 diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
309 --- a/grsecurity/Kconfig 2012-10-13 09:51:35.000000000 -0400
310 +++ b/grsecurity/Kconfig 2012-10-13 09:52:32.000000000 -0400
311 -@@ -570,7 +570,7 @@
312 +@@ -572,7 +572,7 @@
313 config GRKERNSEC_AUDIT_GID
314 int "GID for auditing"
315 depends on GRKERNSEC_AUDIT_GROUP
316 @@ -25,7 +25,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
317
318 config GRKERNSEC_EXECLOG
319 bool "Exec logging"
320 -@@ -790,7 +790,7 @@
321 +@@ -792,7 +792,7 @@
322 config GRKERNSEC_TPE_UNTRUSTED_GID
323 int "GID for TPE-untrusted users"
324 depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
325 @@ -34,7 +34,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
326 help
327 Setting this GID determines what group TPE restrictions will be
328 *enabled* for. If the sysctl option is enabled, a sysctl option
329 -@@ -799,7 +799,7 @@
330 +@@ -801,7 +801,7 @@
331 config GRKERNSEC_TPE_TRUSTED_GID
332 int "GID for TPE-trusted users"
333 depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
334 @@ -43,7 +43,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
335 help
336 Setting this GID determines what group TPE restrictions will be
337 *disabled* for. If the sysctl option is enabled, a sysctl option
338 -@@ -892,7 +892,7 @@
339 +@@ -894,7 +894,7 @@
340 config GRKERNSEC_SOCKET_ALL_GID
341 int "GID to deny all sockets for"
342 depends on GRKERNSEC_SOCKET_ALL
343 @@ -52,7 +52,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
344 help
345 Here you can choose the GID to disable socket access for. Remember to
346 add the users you want socket access disabled for to the GID
347 -@@ -913,7 +913,7 @@
348 +@@ -915,7 +915,7 @@
349 config GRKERNSEC_SOCKET_CLIENT_GID
350 int "GID to deny client sockets for"
351 depends on GRKERNSEC_SOCKET_CLIENT
352 @@ -61,7 +61,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
353 help
354 Here you can choose the GID to disable client socket access for.
355 Remember to add the users you want client socket access disabled for to
356 -@@ -931,7 +931,7 @@
357 +@@ -933,7 +933,7 @@
358 config GRKERNSEC_SOCKET_SERVER_GID
359 int "GID to deny server sockets for"
360 depends on GRKERNSEC_SOCKET_SERVER
361
362 diff --git a/2.6.32/4465_selinux-avc_audit-log-curr_ip.patch b/2.6.32/4465_selinux-avc_audit-log-curr_ip.patch
363 index 19027c3..418ae16 100644
364 --- a/2.6.32/4465_selinux-avc_audit-log-curr_ip.patch
365 +++ b/2.6.32/4465_selinux-avc_audit-log-curr_ip.patch
366 @@ -28,7 +28,7 @@ Signed-off-by: Lorenzo Hernandez Garcia-Hierro <lorenzo@×××.org>
367 diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
368 --- a/grsecurity/Kconfig 2011-04-17 18:47:02.000000000 -0400
369 +++ b/grsecurity/Kconfig 2011-04-17 18:51:15.000000000 -0400
370 -@@ -990,6 +990,27 @@
371 +@@ -1027,6 +1027,27 @@
372 menu "Logging Options"
373 depends on GRKERNSEC
374
375
376 diff --git a/3.11.1/0000_README b/3.11.2/0000_README
377 similarity index 92%
378 rename from 3.11.1/0000_README
379 rename to 3.11.2/0000_README
380 index da0f1cd..b666b59 100644
381 --- a/3.11.1/0000_README
382 +++ b/3.11.2/0000_README
383 @@ -2,7 +2,11 @@ README
384 -----------------------------------------------------------------------------
385 Individual Patch Descriptions:
386 -----------------------------------------------------------------------------
387 -Patch: 4420_grsecurity-2.9.1-3.11.1-201309221838.patch
388 +Patch: 1001_linux-3.11.2.patch
389 +From: http://www.kernel.org
390 +Desc: Linux 3.11.2
391 +
392 +Patch: 4420_grsecurity-2.9.1-3.11.2-201309281103.patch
393 From: http://www.grsecurity.net
394 Desc: hardened-sources base patch from upstream grsecurity
395
396
397 diff --git a/3.11.2/1001_linux-3.11.2.patch b/3.11.2/1001_linux-3.11.2.patch
398 new file mode 100644
399 index 0000000..5d8bdf1
400 --- /dev/null
401 +++ b/3.11.2/1001_linux-3.11.2.patch
402 @@ -0,0 +1,4419 @@
403 +diff --git a/Makefile b/Makefile
404 +index efd2396..aede319 100644
405 +--- a/Makefile
406 ++++ b/Makefile
407 +@@ -1,6 +1,6 @@
408 + VERSION = 3
409 + PATCHLEVEL = 11
410 +-SUBLEVEL = 1
411 ++SUBLEVEL = 2
412 + EXTRAVERSION =
413 + NAME = Linux for Workgroups
414 +
415 +diff --git a/arch/arc/include/asm/sections.h b/arch/arc/include/asm/sections.h
416 +index 6fc1159..764f1e3 100644
417 +--- a/arch/arc/include/asm/sections.h
418 ++++ b/arch/arc/include/asm/sections.h
419 +@@ -11,7 +11,6 @@
420 +
421 + #include <asm-generic/sections.h>
422 +
423 +-extern char _int_vec_base_lds[];
424 + extern char __arc_dccm_base[];
425 + extern char __dtb_start[];
426 +
427 +diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
428 +index 2a913f8..0f944f0 100644
429 +--- a/arch/arc/kernel/head.S
430 ++++ b/arch/arc/kernel/head.S
431 +@@ -34,6 +34,9 @@ stext:
432 + ; IDENTITY Reg [ 3 2 1 0 ]
433 + ; (cpu-id) ^^^ => Zero for UP ARC700
434 + ; => #Core-ID if SMP (Master 0)
435 ++ ; Note that non-boot CPUs might not land here if halt-on-reset and
436 ++ ; instead breath life from @first_lines_of_secondary, but we still
437 ++ ; need to make sure only boot cpu takes this path.
438 + GET_CPU_ID r5
439 + cmp r5, 0
440 + jnz arc_platform_smp_wait_to_boot
441 +@@ -98,6 +101,8 @@ stext:
442 +
443 + first_lines_of_secondary:
444 +
445 ++ sr @_int_vec_base_lds, [AUX_INTR_VEC_BASE]
446 ++
447 + ; setup per-cpu idle task as "current" on this CPU
448 + ld r0, [@secondary_idle_tsk]
449 + SET_CURR_TASK_ON_CPU r0, r1
450 +diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
451 +index 305b3f8..5fc9245 100644
452 +--- a/arch/arc/kernel/irq.c
453 ++++ b/arch/arc/kernel/irq.c
454 +@@ -24,7 +24,6 @@
455 + * -Needed for each CPU (hence not foldable into init_IRQ)
456 + *
457 + * what it does ?
458 +- * -setup Vector Table Base Reg - in case Linux not linked at 0x8000_0000
459 + * -Disable all IRQs (on CPU side)
460 + * -Optionally, setup the High priority Interrupts as Level 2 IRQs
461 + */
462 +diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
463 +index 6b08345..e818563 100644
464 +--- a/arch/arc/kernel/setup.c
465 ++++ b/arch/arc/kernel/setup.c
466 +@@ -47,10 +47,7 @@ void read_arc_build_cfg_regs(void)
467 + READ_BCR(AUX_IDENTITY, cpu->core);
468 +
469 + cpu->timers = read_aux_reg(ARC_REG_TIMERS_BCR);
470 +-
471 + cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
472 +- if (cpu->vec_base == 0)
473 +- cpu->vec_base = (unsigned int)_int_vec_base_lds;
474 +
475 + READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
476 + cpu->uncached_base = uncached_space.start << 24;
477 +diff --git a/arch/arm/mach-versatile/include/mach/platform.h b/arch/arm/mach-versatile/include/mach/platform.h
478 +index ec08740..6f938cc 100644
479 +--- a/arch/arm/mach-versatile/include/mach/platform.h
480 ++++ b/arch/arm/mach-versatile/include/mach/platform.h
481 +@@ -231,12 +231,14 @@
482 + /* PCI space */
483 + #define VERSATILE_PCI_BASE 0x41000000 /* PCI Interface */
484 + #define VERSATILE_PCI_CFG_BASE 0x42000000
485 ++#define VERSATILE_PCI_IO_BASE 0x43000000
486 + #define VERSATILE_PCI_MEM_BASE0 0x44000000
487 + #define VERSATILE_PCI_MEM_BASE1 0x50000000
488 + #define VERSATILE_PCI_MEM_BASE2 0x60000000
489 + /* Sizes of above maps */
490 + #define VERSATILE_PCI_BASE_SIZE 0x01000000
491 + #define VERSATILE_PCI_CFG_BASE_SIZE 0x02000000
492 ++#define VERSATILE_PCI_IO_BASE_SIZE 0x01000000
493 + #define VERSATILE_PCI_MEM_BASE0_SIZE 0x0c000000 /* 32Mb */
494 + #define VERSATILE_PCI_MEM_BASE1_SIZE 0x10000000 /* 256Mb */
495 + #define VERSATILE_PCI_MEM_BASE2_SIZE 0x10000000 /* 256Mb */
496 +diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c
497 +index e92e5e0..c97be4e 100644
498 +--- a/arch/arm/mach-versatile/pci.c
499 ++++ b/arch/arm/mach-versatile/pci.c
500 +@@ -43,9 +43,9 @@
501 + #define PCI_IMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x0)
502 + #define PCI_IMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x4)
503 + #define PCI_IMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x8)
504 +-#define PCI_SMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x10)
505 +-#define PCI_SMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x14)
506 +-#define PCI_SMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x18)
507 ++#define PCI_SMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x14)
508 ++#define PCI_SMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x18)
509 ++#define PCI_SMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x1c)
510 + #define PCI_SELFID __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0xc)
511 +
512 + #define DEVICE_ID_OFFSET 0x00
513 +@@ -170,8 +170,8 @@ static struct pci_ops pci_versatile_ops = {
514 + .write = versatile_write_config,
515 + };
516 +
517 +-static struct resource io_mem = {
518 +- .name = "PCI I/O space",
519 ++static struct resource unused_mem = {
520 ++ .name = "PCI unused",
521 + .start = VERSATILE_PCI_MEM_BASE0,
522 + .end = VERSATILE_PCI_MEM_BASE0+VERSATILE_PCI_MEM_BASE0_SIZE-1,
523 + .flags = IORESOURCE_MEM,
524 +@@ -195,9 +195,9 @@ static int __init pci_versatile_setup_resources(struct pci_sys_data *sys)
525 + {
526 + int ret = 0;
527 +
528 +- ret = request_resource(&iomem_resource, &io_mem);
529 ++ ret = request_resource(&iomem_resource, &unused_mem);
530 + if (ret) {
531 +- printk(KERN_ERR "PCI: unable to allocate I/O "
532 ++ printk(KERN_ERR "PCI: unable to allocate unused "
533 + "memory region (%d)\n", ret);
534 + goto out;
535 + }
536 +@@ -205,7 +205,7 @@ static int __init pci_versatile_setup_resources(struct pci_sys_data *sys)
537 + if (ret) {
538 + printk(KERN_ERR "PCI: unable to allocate non-prefetchable "
539 + "memory region (%d)\n", ret);
540 +- goto release_io_mem;
541 ++ goto release_unused_mem;
542 + }
543 + ret = request_resource(&iomem_resource, &pre_mem);
544 + if (ret) {
545 +@@ -225,8 +225,8 @@ static int __init pci_versatile_setup_resources(struct pci_sys_data *sys)
546 +
547 + release_non_mem:
548 + release_resource(&non_mem);
549 +- release_io_mem:
550 +- release_resource(&io_mem);
551 ++ release_unused_mem:
552 ++ release_resource(&unused_mem);
553 + out:
554 + return ret;
555 + }
556 +@@ -246,7 +246,7 @@ int __init pci_versatile_setup(int nr, struct pci_sys_data *sys)
557 + goto out;
558 + }
559 +
560 +- ret = pci_ioremap_io(0, VERSATILE_PCI_MEM_BASE0);
561 ++ ret = pci_ioremap_io(0, VERSATILE_PCI_IO_BASE);
562 + if (ret)
563 + goto out;
564 +
565 +@@ -295,6 +295,19 @@ int __init pci_versatile_setup(int nr, struct pci_sys_data *sys)
566 + __raw_writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_2);
567 +
568 + /*
569 ++ * For many years the kernel and QEMU were symbiotically buggy
570 ++ * in that they both assumed the same broken IRQ mapping.
571 ++ * QEMU therefore attempts to auto-detect old broken kernels
572 ++ * so that they still work on newer QEMU as they did on old
573 ++ * QEMU. Since we now use the correct (ie matching-hardware)
574 ++ * IRQ mapping we write a definitely different value to a
575 ++ * PCI_INTERRUPT_LINE register to tell QEMU that we expect
576 ++ * real hardware behaviour and it need not be backwards
577 ++ * compatible for us. This write is harmless on real hardware.
578 ++ */
579 ++ __raw_writel(0, VERSATILE_PCI_VIRT_BASE+PCI_INTERRUPT_LINE);
580 ++
581 ++ /*
582 + * Do not to map Versatile FPGA PCI device into memory space
583 + */
584 + pci_slot_ignore |= (1 << myslot);
585 +@@ -327,13 +340,13 @@ static int __init versatile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
586 + {
587 + int irq;
588 +
589 +- /* slot, pin, irq
590 +- * 24 1 IRQ_SIC_PCI0
591 +- * 25 1 IRQ_SIC_PCI1
592 +- * 26 1 IRQ_SIC_PCI2
593 +- * 27 1 IRQ_SIC_PCI3
594 ++ /*
595 ++ * Slot INTA INTB INTC INTD
596 ++ * 31 PCI1 PCI2 PCI3 PCI0
597 ++ * 30 PCI0 PCI1 PCI2 PCI3
598 ++ * 29 PCI3 PCI0 PCI1 PCI2
599 + */
600 +- irq = IRQ_SIC_PCI0 + ((slot - 24 + pin - 1) & 3);
601 ++ irq = IRQ_SIC_PCI0 + ((slot + 2 + pin - 1) & 3);
602 +
603 + return irq;
604 + }
605 +diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
606 +index 8a6295c..7071fca 100644
607 +--- a/arch/arm/xen/enlighten.c
608 ++++ b/arch/arm/xen/enlighten.c
609 +@@ -273,12 +273,15 @@ core_initcall(xen_guest_init);
610 +
611 + static int __init xen_pm_init(void)
612 + {
613 ++ if (!xen_domain())
614 ++ return -ENODEV;
615 ++
616 + pm_power_off = xen_power_off;
617 + arm_pm_restart = xen_restart;
618 +
619 + return 0;
620 + }
621 +-subsys_initcall(xen_pm_init);
622 ++late_initcall(xen_pm_init);
623 +
624 + static irqreturn_t xen_arm_callback(int irq, void *arg)
625 + {
626 +diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
627 +index 12e6ccb..cea1594 100644
628 +--- a/arch/arm64/kernel/perf_event.c
629 ++++ b/arch/arm64/kernel/perf_event.c
630 +@@ -325,7 +325,10 @@ validate_event(struct pmu_hw_events *hw_events,
631 + if (is_software_event(event))
632 + return 1;
633 +
634 +- if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
635 ++ if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
636 ++ return 1;
637 ++
638 ++ if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
639 + return 1;
640 +
641 + return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
642 +@@ -781,7 +784,7 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
643 + /*
644 + * PMXEVTYPER: Event selection reg
645 + */
646 +-#define ARMV8_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */
647 ++#define ARMV8_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
648 + #define ARMV8_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
649 +
650 + /*
651 +diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
652 +index 765ef30..733017b 100644
653 +--- a/arch/mips/ath79/clock.c
654 ++++ b/arch/mips/ath79/clock.c
655 +@@ -164,7 +164,7 @@ static void __init ar933x_clocks_init(void)
656 + ath79_ahb_clk.rate = freq / t;
657 + }
658 +
659 +- ath79_wdt_clk.rate = ath79_ref_clk.rate;
660 ++ ath79_wdt_clk.rate = ath79_ahb_clk.rate;
661 + ath79_uart_clk.rate = ath79_ref_clk.rate;
662 + }
663 +
664 +diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
665 +index ee5b690..52e5758 100644
666 +--- a/arch/powerpc/kernel/align.c
667 ++++ b/arch/powerpc/kernel/align.c
668 +@@ -764,6 +764,16 @@ int fix_alignment(struct pt_regs *regs)
669 + nb = aligninfo[instr].len;
670 + flags = aligninfo[instr].flags;
671 +
672 ++ /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
673 ++ if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
674 ++ nb = 8;
675 ++ flags = LD+SW;
676 ++ } else if (IS_XFORM(instruction) &&
677 ++ ((instruction >> 1) & 0x3ff) == 660) {
678 ++ nb = 8;
679 ++ flags = ST+SW;
680 ++ }
681 ++
682 + /* Byteswap little endian loads and stores */
683 + swiz = 0;
684 + if (regs->msr & MSR_LE) {
685 +diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
686 +index 94c1dd4..a3a5cb8 100644
687 +--- a/arch/powerpc/kvm/book3s_xics.c
688 ++++ b/arch/powerpc/kvm/book3s_xics.c
689 +@@ -19,6 +19,7 @@
690 + #include <asm/hvcall.h>
691 + #include <asm/xics.h>
692 + #include <asm/debug.h>
693 ++#include <asm/time.h>
694 +
695 + #include <linux/debugfs.h>
696 + #include <linux/seq_file.h>
697 +diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
698 +index c11c823..54b998f 100644
699 +--- a/arch/powerpc/platforms/pseries/setup.c
700 ++++ b/arch/powerpc/platforms/pseries/setup.c
701 +@@ -354,7 +354,7 @@ static int alloc_dispatch_log_kmem_cache(void)
702 + }
703 + early_initcall(alloc_dispatch_log_kmem_cache);
704 +
705 +-static void pSeries_idle(void)
706 ++static void pseries_lpar_idle(void)
707 + {
708 + /* This would call on the cpuidle framework, and the back-end pseries
709 + * driver to go to idle states
710 +@@ -362,10 +362,22 @@ static void pSeries_idle(void)
711 + if (cpuidle_idle_call()) {
712 + /* On error, execute default handler
713 + * to go into low thread priority and possibly
714 +- * low power mode.
715 ++ * low power mode by cedeing processor to hypervisor
716 + */
717 +- HMT_low();
718 +- HMT_very_low();
719 ++
720 ++ /* Indicate to hypervisor that we are idle. */
721 ++ get_lppaca()->idle = 1;
722 ++
723 ++ /*
724 ++ * Yield the processor to the hypervisor. We return if
725 ++ * an external interrupt occurs (which are driven prior
726 ++ * to returning here) or if a prod occurs from another
727 ++ * processor. When returning here, external interrupts
728 ++ * are enabled.
729 ++ */
730 ++ cede_processor();
731 ++
732 ++ get_lppaca()->idle = 0;
733 + }
734 + }
735 +
736 +@@ -456,15 +468,14 @@ static void __init pSeries_setup_arch(void)
737 +
738 + pSeries_nvram_init();
739 +
740 +- if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
741 ++ if (firmware_has_feature(FW_FEATURE_LPAR)) {
742 + vpa_init(boot_cpuid);
743 +- ppc_md.power_save = pSeries_idle;
744 +- }
745 +-
746 +- if (firmware_has_feature(FW_FEATURE_LPAR))
747 ++ ppc_md.power_save = pseries_lpar_idle;
748 + ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
749 +- else
750 ++ } else {
751 ++ /* No special idle routine */
752 + ppc_md.enable_pmcs = power4_enable_pmcs;
753 ++ }
754 +
755 + ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;
756 +
757 +diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
758 +index d5f10a4..7092392 100644
759 +--- a/arch/s390/net/bpf_jit_comp.c
760 ++++ b/arch/s390/net/bpf_jit_comp.c
761 +@@ -805,7 +805,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
762 + return NULL;
763 + memset(header, 0, sz);
764 + header->pages = sz / PAGE_SIZE;
765 +- hole = sz - bpfsize + sizeof(*header);
766 ++ hole = sz - (bpfsize + sizeof(*header));
767 + /* Insert random number of illegal instructions before BPF code
768 + * and make sure the first instruction starts at an even address.
769 + */
770 +diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h
771 +index 95feaa4..c70a234 100644
772 +--- a/arch/um/include/shared/os.h
773 ++++ b/arch/um/include/shared/os.h
774 +@@ -200,6 +200,7 @@ extern int os_unmap_memory(void *addr, int len);
775 + extern int os_drop_memory(void *addr, int length);
776 + extern int can_drop_memory(void);
777 + extern void os_flush_stdout(void);
778 ++extern int os_mincore(void *addr, unsigned long len);
779 +
780 + /* execvp.c */
781 + extern int execvp_noalloc(char *buf, const char *file, char *const argv[]);
782 +diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
783 +index babe218..d8b78a0 100644
784 +--- a/arch/um/kernel/Makefile
785 ++++ b/arch/um/kernel/Makefile
786 +@@ -13,7 +13,7 @@ clean-files :=
787 + obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \
788 + physmem.o process.o ptrace.o reboot.o sigio.o \
789 + signal.o smp.o syscall.o sysrq.o time.o tlb.o trap.o \
790 +- um_arch.o umid.o skas/
791 ++ um_arch.o umid.o maccess.o skas/
792 +
793 + obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
794 + obj-$(CONFIG_GPROF) += gprof_syms.o
795 +diff --git a/arch/um/kernel/maccess.c b/arch/um/kernel/maccess.c
796 +new file mode 100644
797 +index 0000000..1f3d5c4
798 +--- /dev/null
799 ++++ b/arch/um/kernel/maccess.c
800 +@@ -0,0 +1,24 @@
801 ++/*
802 ++ * Copyright (C) 2013 Richard Weinberger <richrd@×××.at>
803 ++ *
804 ++ * This program is free software; you can redistribute it and/or modify
805 ++ * it under the terms of the GNU General Public License version 2 as
806 ++ * published by the Free Software Foundation.
807 ++ */
808 ++
809 ++#include <linux/uaccess.h>
810 ++#include <linux/kernel.h>
811 ++#include <os.h>
812 ++
813 ++long probe_kernel_read(void *dst, const void *src, size_t size)
814 ++{
815 ++ void *psrc = (void *)rounddown((unsigned long)src, PAGE_SIZE);
816 ++
817 ++ if ((unsigned long)src < PAGE_SIZE || size <= 0)
818 ++ return -EFAULT;
819 ++
820 ++ if (os_mincore(psrc, size + src - psrc) <= 0)
821 ++ return -EFAULT;
822 ++
823 ++ return __probe_kernel_read(dst, src, size);
824 ++}
825 +diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c
826 +index b8f34c9..67b9c8f 100644
827 +--- a/arch/um/os-Linux/process.c
828 ++++ b/arch/um/os-Linux/process.c
829 +@@ -4,6 +4,7 @@
830 + */
831 +
832 + #include <stdio.h>
833 ++#include <stdlib.h>
834 + #include <unistd.h>
835 + #include <errno.h>
836 + #include <signal.h>
837 +@@ -232,6 +233,57 @@ out:
838 + return ok;
839 + }
840 +
841 ++static int os_page_mincore(void *addr)
842 ++{
843 ++ char vec[2];
844 ++ int ret;
845 ++
846 ++ ret = mincore(addr, UM_KERN_PAGE_SIZE, vec);
847 ++ if (ret < 0) {
848 ++ if (errno == ENOMEM || errno == EINVAL)
849 ++ return 0;
850 ++ else
851 ++ return -errno;
852 ++ }
853 ++
854 ++ return vec[0] & 1;
855 ++}
856 ++
857 ++int os_mincore(void *addr, unsigned long len)
858 ++{
859 ++ char *vec;
860 ++ int ret, i;
861 ++
862 ++ if (len <= UM_KERN_PAGE_SIZE)
863 ++ return os_page_mincore(addr);
864 ++
865 ++ vec = calloc(1, (len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE);
866 ++ if (!vec)
867 ++ return -ENOMEM;
868 ++
869 ++ ret = mincore(addr, UM_KERN_PAGE_SIZE, vec);
870 ++ if (ret < 0) {
871 ++ if (errno == ENOMEM || errno == EINVAL)
872 ++ ret = 0;
873 ++ else
874 ++ ret = -errno;
875 ++
876 ++ goto out;
877 ++ }
878 ++
879 ++ for (i = 0; i < ((len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE); i++) {
880 ++ if (!(vec[i] & 1)) {
881 ++ ret = 0;
882 ++ goto out;
883 ++ }
884 ++ }
885 ++
886 ++ ret = 1;
887 ++out:
888 ++ free(vec);
889 ++ return ret;
890 ++}
891 ++
892 + void init_new_thread_signals(void)
893 + {
894 + set_handler(SIGSEGV);
895 +diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
896 +index bccfca6..665a730 100644
897 +--- a/arch/x86/ia32/ia32_signal.c
898 ++++ b/arch/x86/ia32/ia32_signal.c
899 +@@ -457,7 +457,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
900 + else
901 + put_user_ex(0, &frame->uc.uc_flags);
902 + put_user_ex(0, &frame->uc.uc_link);
903 +- err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp);
904 ++ compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
905 +
906 + if (ksig->ka.sa.sa_flags & SA_RESTORER)
907 + restorer = ksig->ka.sa.sa_restorer;
908 +diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
909 +index 46fc474..f50de69 100644
910 +--- a/arch/x86/include/asm/checksum_32.h
911 ++++ b/arch/x86/include/asm/checksum_32.h
912 +@@ -49,9 +49,15 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
913 + int len, __wsum sum,
914 + int *err_ptr)
915 + {
916 ++ __wsum ret;
917 ++
918 + might_sleep();
919 +- return csum_partial_copy_generic((__force void *)src, dst,
920 +- len, sum, err_ptr, NULL);
921 ++ stac();
922 ++ ret = csum_partial_copy_generic((__force void *)src, dst,
923 ++ len, sum, err_ptr, NULL);
924 ++ clac();
925 ++
926 ++ return ret;
927 + }
928 +
929 + /*
930 +@@ -176,10 +182,16 @@ static inline __wsum csum_and_copy_to_user(const void *src,
931 + int len, __wsum sum,
932 + int *err_ptr)
933 + {
934 ++ __wsum ret;
935 ++
936 + might_sleep();
937 +- if (access_ok(VERIFY_WRITE, dst, len))
938 +- return csum_partial_copy_generic(src, (__force void *)dst,
939 +- len, sum, NULL, err_ptr);
940 ++ if (access_ok(VERIFY_WRITE, dst, len)) {
941 ++ stac();
942 ++ ret = csum_partial_copy_generic(src, (__force void *)dst,
943 ++ len, sum, NULL, err_ptr);
944 ++ clac();
945 ++ return ret;
946 ++ }
947 +
948 + if (len)
949 + *err_ptr = -EFAULT;
950 +diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
951 +index 29e3093..aa97342 100644
952 +--- a/arch/x86/include/asm/mce.h
953 ++++ b/arch/x86/include/asm/mce.h
954 +@@ -32,11 +32,20 @@
955 + #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
956 + #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
957 + #define MCI_STATUS_AR (1ULL<<55) /* Action required */
958 +-#define MCACOD 0xffff /* MCA Error Code */
959 ++
960 ++/*
961 ++ * Note that the full MCACOD field of IA32_MCi_STATUS MSR is
962 ++ * bits 15:0. But bit 12 is the 'F' bit, defined for corrected
963 ++ * errors to indicate that errors are being filtered by hardware.
964 ++ * We should mask out bit 12 when looking for specific signatures
965 ++ * of uncorrected errors - so the F bit is deliberately skipped
966 ++ * in this #define.
967 ++ */
968 ++#define MCACOD 0xefff /* MCA Error Code */
969 +
970 + /* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
971 + #define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
972 +-#define MCACOD_SCRUBMSK 0xfff0
973 ++#define MCACOD_SCRUBMSK 0xeff0 /* Skip bit 12 ('F' bit) */
974 + #define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
975 + #define MCACOD_DATA 0x0134 /* Data Load */
976 + #define MCACOD_INSTR 0x0150 /* Instruction Fetch */
977 +diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
978 +index cdbf367..be12c53 100644
979 +--- a/arch/x86/include/asm/mmu_context.h
980 ++++ b/arch/x86/include/asm/mmu_context.h
981 +@@ -45,22 +45,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
982 + /* Re-load page tables */
983 + load_cr3(next->pgd);
984 +
985 +- /* stop flush ipis for the previous mm */
986 ++ /* Stop flush ipis for the previous mm */
987 + cpumask_clear_cpu(cpu, mm_cpumask(prev));
988 +
989 +- /*
990 +- * load the LDT, if the LDT is different:
991 +- */
992 ++ /* Load the LDT, if the LDT is different: */
993 + if (unlikely(prev->context.ldt != next->context.ldt))
994 + load_LDT_nolock(&next->context);
995 + }
996 + #ifdef CONFIG_SMP
997 +- else {
998 ++ else {
999 + this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
1000 + BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
1001 +
1002 +- if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) {
1003 +- /* We were in lazy tlb mode and leave_mm disabled
1004 ++ if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
1005 ++ /*
1006 ++ * On established mms, the mm_cpumask is only changed
1007 ++ * from irq context, from ptep_clear_flush() while in
1008 ++ * lazy tlb mode, and here. Irqs are blocked during
1009 ++ * schedule, protecting us from simultaneous changes.
1010 ++ */
1011 ++ cpumask_set_cpu(cpu, mm_cpumask(next));
1012 ++ /*
1013 ++ * We were in lazy tlb mode and leave_mm disabled
1014 + * tlb flush IPI delivery. We must reload CR3
1015 + * to make sure to use no freed page tables.
1016 + */
1017 +diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
1018 +index 3048ded..59554dc 100644
1019 +--- a/arch/x86/kernel/amd_nb.c
1020 ++++ b/arch/x86/kernel/amd_nb.c
1021 +@@ -20,6 +20,7 @@ const struct pci_device_id amd_nb_misc_ids[] = {
1022 + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
1023 + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
1024 + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
1025 ++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
1026 + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
1027 + {}
1028 + };
1029 +@@ -27,6 +28,7 @@ EXPORT_SYMBOL(amd_nb_misc_ids);
1030 +
1031 + static const struct pci_device_id amd_nb_link_ids[] = {
1032 + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
1033 ++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
1034 + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
1035 + {}
1036 + };
1037 +@@ -81,13 +83,20 @@ int amd_cache_northbridges(void)
1038 + next_northbridge(misc, amd_nb_misc_ids);
1039 + node_to_amd_nb(i)->link = link =
1040 + next_northbridge(link, amd_nb_link_ids);
1041 +- }
1042 ++ }
1043 +
1044 ++ /* GART present only on Fam15h upto model 0fh */
1045 + if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
1046 +- boot_cpu_data.x86 == 0x15)
1047 ++ (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model < 0x10))
1048 + amd_northbridges.flags |= AMD_NB_GART;
1049 +
1050 + /*
1051 ++ * Check for L3 cache presence.
1052 ++ */
1053 ++ if (!cpuid_edx(0x80000006))
1054 ++ return 0;
1055 ++
1056 ++ /*
1057 + * Some CPU families support L3 Cache Index Disable. There are some
1058 + * limitations because of E382 and E388 on family 0x10.
1059 + */
1060 +diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
1061 +index cf91358..d859eea 100644
1062 +--- a/arch/x86/kernel/signal.c
1063 ++++ b/arch/x86/kernel/signal.c
1064 +@@ -358,7 +358,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
1065 + else
1066 + put_user_ex(0, &frame->uc.uc_flags);
1067 + put_user_ex(0, &frame->uc.uc_link);
1068 +- err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
1069 ++ save_altstack_ex(&frame->uc.uc_stack, regs->sp);
1070 +
1071 + /* Set up to return from userspace. */
1072 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
1073 +@@ -423,7 +423,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
1074 + else
1075 + put_user_ex(0, &frame->uc.uc_flags);
1076 + put_user_ex(0, &frame->uc.uc_link);
1077 +- err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
1078 ++ save_altstack_ex(&frame->uc.uc_stack, regs->sp);
1079 +
1080 + /* Set up to return from userspace. If provided, use a stub
1081 + already in userspace. */
1082 +@@ -490,7 +490,7 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
1083 + else
1084 + put_user_ex(0, &frame->uc.uc_flags);
1085 + put_user_ex(0, &frame->uc.uc_link);
1086 +- err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp);
1087 ++ compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
1088 + put_user_ex(0, &frame->uc.uc__pad0);
1089 +
1090 + if (ksig->ka.sa.sa_flags & SA_RESTORER) {
1091 +diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
1092 +index 25b7ae8..7609e0e 100644
1093 +--- a/arch/x86/lib/csum-wrappers_64.c
1094 ++++ b/arch/x86/lib/csum-wrappers_64.c
1095 +@@ -6,6 +6,7 @@
1096 + */
1097 + #include <asm/checksum.h>
1098 + #include <linux/module.h>
1099 ++#include <asm/smap.h>
1100 +
1101 + /**
1102 + * csum_partial_copy_from_user - Copy and checksum from user space.
1103 +@@ -52,8 +53,10 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
1104 + len -= 2;
1105 + }
1106 + }
1107 ++ stac();
1108 + isum = csum_partial_copy_generic((__force const void *)src,
1109 + dst, len, isum, errp, NULL);
1110 ++ clac();
1111 + if (unlikely(*errp))
1112 + goto out_err;
1113 +
1114 +@@ -82,6 +85,8 @@ __wsum
1115 + csum_partial_copy_to_user(const void *src, void __user *dst,
1116 + int len, __wsum isum, int *errp)
1117 + {
1118 ++ __wsum ret;
1119 ++
1120 + might_sleep();
1121 +
1122 + if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) {
1123 +@@ -105,8 +110,11 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
1124 + }
1125 +
1126 + *errp = 0;
1127 +- return csum_partial_copy_generic(src, (void __force *)dst,
1128 +- len, isum, NULL, errp);
1129 ++ stac();
1130 ++ ret = csum_partial_copy_generic(src, (void __force *)dst,
1131 ++ len, isum, NULL, errp);
1132 ++ clac();
1133 ++ return ret;
1134 + }
1135 + EXPORT_SYMBOL(csum_partial_copy_to_user);
1136 +
1137 +diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
1138 +index d8507f8..74a60c7 100644
1139 +--- a/arch/xtensa/kernel/xtensa_ksyms.c
1140 ++++ b/arch/xtensa/kernel/xtensa_ksyms.c
1141 +@@ -25,6 +25,7 @@
1142 + #include <asm/io.h>
1143 + #include <asm/page.h>
1144 + #include <asm/pgalloc.h>
1145 ++#include <asm/ftrace.h>
1146 + #ifdef CONFIG_BLK_DEV_FD
1147 + #include <asm/floppy.h>
1148 + #endif
1149 +diff --git a/crypto/api.c b/crypto/api.c
1150 +index 3b61803..37c4c72 100644
1151 +--- a/crypto/api.c
1152 ++++ b/crypto/api.c
1153 +@@ -34,6 +34,8 @@ EXPORT_SYMBOL_GPL(crypto_alg_sem);
1154 + BLOCKING_NOTIFIER_HEAD(crypto_chain);
1155 + EXPORT_SYMBOL_GPL(crypto_chain);
1156 +
1157 ++static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
1158 ++
1159 + struct crypto_alg *crypto_mod_get(struct crypto_alg *alg)
1160 + {
1161 + return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL;
1162 +@@ -144,8 +146,11 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type,
1163 + }
1164 + up_write(&crypto_alg_sem);
1165 +
1166 +- if (alg != &larval->alg)
1167 ++ if (alg != &larval->alg) {
1168 + kfree(larval);
1169 ++ if (crypto_is_larval(alg))
1170 ++ alg = crypto_larval_wait(alg);
1171 ++ }
1172 +
1173 + return alg;
1174 + }
1175 +diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
1176 +index 6a38218..fb78bb9 100644
1177 +--- a/drivers/acpi/acpi_lpss.c
1178 ++++ b/drivers/acpi/acpi_lpss.c
1179 +@@ -257,12 +257,13 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
1180 + pdata->mmio_size = resource_size(&rentry->res);
1181 + pdata->mmio_base = ioremap(rentry->res.start,
1182 + pdata->mmio_size);
1183 +- pdata->dev_desc = dev_desc;
1184 + break;
1185 + }
1186 +
1187 + acpi_dev_free_resource_list(&resource_list);
1188 +
1189 ++ pdata->dev_desc = dev_desc;
1190 ++
1191 + if (dev_desc->clk_required) {
1192 + ret = register_device_clock(adev, pdata);
1193 + if (ret) {
1194 +diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
1195 +index 5917839..a67853e 100644
1196 +--- a/drivers/acpi/pci_root.c
1197 ++++ b/drivers/acpi/pci_root.c
1198 +@@ -378,6 +378,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
1199 + struct acpi_pci_root *root;
1200 + u32 flags, base_flags;
1201 + acpi_handle handle = device->handle;
1202 ++ bool no_aspm = false, clear_aspm = false;
1203 +
1204 + root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
1205 + if (!root)
1206 +@@ -437,27 +438,6 @@ static int acpi_pci_root_add(struct acpi_device *device,
1207 + flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
1208 + acpi_pci_osc_support(root, flags);
1209 +
1210 +- /*
1211 +- * TBD: Need PCI interface for enumeration/configuration of roots.
1212 +- */
1213 +-
1214 +- /*
1215 +- * Scan the Root Bridge
1216 +- * --------------------
1217 +- * Must do this prior to any attempt to bind the root device, as the
1218 +- * PCI namespace does not get created until this call is made (and
1219 +- * thus the root bridge's pci_dev does not exist).
1220 +- */
1221 +- root->bus = pci_acpi_scan_root(root);
1222 +- if (!root->bus) {
1223 +- dev_err(&device->dev,
1224 +- "Bus %04x:%02x not present in PCI namespace\n",
1225 +- root->segment, (unsigned int)root->secondary.start);
1226 +- result = -ENODEV;
1227 +- goto end;
1228 +- }
1229 +-
1230 +- /* Indicate support for various _OSC capabilities. */
1231 + if (pci_ext_cfg_avail())
1232 + flags |= OSC_EXT_PCI_CONFIG_SUPPORT;
1233 + if (pcie_aspm_support_enabled()) {
1234 +@@ -471,7 +451,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
1235 + if (ACPI_FAILURE(status)) {
1236 + dev_info(&device->dev, "ACPI _OSC support "
1237 + "notification failed, disabling PCIe ASPM\n");
1238 +- pcie_no_aspm();
1239 ++ no_aspm = true;
1240 + flags = base_flags;
1241 + }
1242 + }
1243 +@@ -503,7 +483,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
1244 + * We have ASPM control, but the FADT indicates
1245 + * that it's unsupported. Clear it.
1246 + */
1247 +- pcie_clear_aspm(root->bus);
1248 ++ clear_aspm = true;
1249 + }
1250 + } else {
1251 + dev_info(&device->dev,
1252 +@@ -512,7 +492,14 @@ static int acpi_pci_root_add(struct acpi_device *device,
1253 + acpi_format_exception(status), flags);
1254 + dev_info(&device->dev,
1255 + "ACPI _OSC control for PCIe not granted, disabling ASPM\n");
1256 +- pcie_no_aspm();
1257 ++ /*
1258 ++ * We want to disable ASPM here, but aspm_disabled
1259 ++ * needs to remain in its state from boot so that we
1260 ++ * properly handle PCIe 1.1 devices. So we set this
1261 ++ * flag here, to defer the action until after the ACPI
1262 ++ * root scan.
1263 ++ */
1264 ++ no_aspm = true;
1265 + }
1266 + } else {
1267 + dev_info(&device->dev,
1268 +@@ -520,6 +507,33 @@ static int acpi_pci_root_add(struct acpi_device *device,
1269 + "(_OSC support mask: 0x%02x)\n", flags);
1270 + }
1271 +
1272 ++ /*
1273 ++ * TBD: Need PCI interface for enumeration/configuration of roots.
1274 ++ */
1275 ++
1276 ++ /*
1277 ++ * Scan the Root Bridge
1278 ++ * --------------------
1279 ++ * Must do this prior to any attempt to bind the root device, as the
1280 ++ * PCI namespace does not get created until this call is made (and
1281 ++ * thus the root bridge's pci_dev does not exist).
1282 ++ */
1283 ++ root->bus = pci_acpi_scan_root(root);
1284 ++ if (!root->bus) {
1285 ++ dev_err(&device->dev,
1286 ++ "Bus %04x:%02x not present in PCI namespace\n",
1287 ++ root->segment, (unsigned int)root->secondary.start);
1288 ++ result = -ENODEV;
1289 ++ goto end;
1290 ++ }
1291 ++
1292 ++ if (clear_aspm) {
1293 ++ dev_info(&device->dev, "Disabling ASPM (FADT indicates it is unsupported)\n");
1294 ++ pcie_clear_aspm(root->bus);
1295 ++ }
1296 ++ if (no_aspm)
1297 ++ pcie_no_aspm();
1298 ++
1299 + pci_acpi_add_bus_pm_notifier(device, root->bus);
1300 + if (device->wakeup.flags.run_wake)
1301 + device_set_run_wake(root->bus->bridge, true);
1302 +diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
1303 +index a439602..c8dac74 100644
1304 +--- a/drivers/base/firmware_class.c
1305 ++++ b/drivers/base/firmware_class.c
1306 +@@ -868,8 +868,15 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
1307 + goto err_del_dev;
1308 + }
1309 +
1310 ++ mutex_lock(&fw_lock);
1311 ++ list_add(&buf->pending_list, &pending_fw_head);
1312 ++ mutex_unlock(&fw_lock);
1313 ++
1314 + retval = device_create_file(f_dev, &dev_attr_loading);
1315 + if (retval) {
1316 ++ mutex_lock(&fw_lock);
1317 ++ list_del_init(&buf->pending_list);
1318 ++ mutex_unlock(&fw_lock);
1319 + dev_err(f_dev, "%s: device_create_file failed\n", __func__);
1320 + goto err_del_bin_attr;
1321 + }
1322 +@@ -884,10 +891,6 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
1323 + kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
1324 + }
1325 +
1326 +- mutex_lock(&fw_lock);
1327 +- list_add(&buf->pending_list, &pending_fw_head);
1328 +- mutex_unlock(&fw_lock);
1329 +-
1330 + wait_for_completion(&buf->completion);
1331 +
1332 + cancel_delayed_work_sync(&fw_priv->timeout_work);
1333 +diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
1334 +index 5349575..6c2652a 100644
1335 +--- a/drivers/base/regmap/regmap-debugfs.c
1336 ++++ b/drivers/base/regmap/regmap-debugfs.c
1337 +@@ -85,8 +85,8 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
1338 + unsigned int reg_offset;
1339 +
1340 + /* Suppress the cache if we're using a subrange */
1341 +- if (from)
1342 +- return from;
1343 ++ if (base)
1344 ++ return base;
1345 +
1346 + /*
1347 + * If we don't have a cache build one so we don't have to do a
1348 +diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
1349 +index 4ad2ad9..45aa20a 100644
1350 +--- a/drivers/block/rbd.c
1351 ++++ b/drivers/block/rbd.c
1352 +@@ -1557,11 +1557,12 @@ rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1353 + obj_request, obj_request->img_request, obj_request->result,
1354 + xferred, length);
1355 + /*
1356 +- * ENOENT means a hole in the image. We zero-fill the
1357 +- * entire length of the request. A short read also implies
1358 +- * zero-fill to the end of the request. Either way we
1359 +- * update the xferred count to indicate the whole request
1360 +- * was satisfied.
1361 ++ * ENOENT means a hole in the image. We zero-fill the entire
1362 ++ * length of the request. A short read also implies zero-fill
1363 ++ * to the end of the request. An error requires the whole
1364 ++ * length of the request to be reported finished with an error
1365 ++ * to the block layer. In each case we update the xferred
1366 ++ * count to indicate the whole request was satisfied.
1367 + */
1368 + rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1369 + if (obj_request->result == -ENOENT) {
1370 +@@ -1570,14 +1571,13 @@ rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1371 + else
1372 + zero_pages(obj_request->pages, 0, length);
1373 + obj_request->result = 0;
1374 +- obj_request->xferred = length;
1375 + } else if (xferred < length && !obj_request->result) {
1376 + if (obj_request->type == OBJ_REQUEST_BIO)
1377 + zero_bio_chain(obj_request->bio_list, xferred);
1378 + else
1379 + zero_pages(obj_request->pages, xferred, length);
1380 +- obj_request->xferred = length;
1381 + }
1382 ++ obj_request->xferred = length;
1383 + obj_request_done_set(obj_request);
1384 + }
1385 +
1386 +diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c
1387 +index 1b3f8c9..1d5af3f 100644
1388 +--- a/drivers/clk/clk-wm831x.c
1389 ++++ b/drivers/clk/clk-wm831x.c
1390 +@@ -360,6 +360,8 @@ static int wm831x_clk_probe(struct platform_device *pdev)
1391 + if (!clkdata)
1392 + return -ENOMEM;
1393 +
1394 ++ clkdata->wm831x = wm831x;
1395 ++
1396 + /* XTAL_ENA can only be set via OTP/InstantConfig so just read once */
1397 + ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_2);
1398 + if (ret < 0) {
1399 +diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
1400 +index 2a297f8..fe853903 100644
1401 +--- a/drivers/cpuidle/coupled.c
1402 ++++ b/drivers/cpuidle/coupled.c
1403 +@@ -106,6 +106,7 @@ struct cpuidle_coupled {
1404 + cpumask_t coupled_cpus;
1405 + int requested_state[NR_CPUS];
1406 + atomic_t ready_waiting_counts;
1407 ++ atomic_t abort_barrier;
1408 + int online_count;
1409 + int refcnt;
1410 + int prevent;
1411 +@@ -122,12 +123,19 @@ static DEFINE_MUTEX(cpuidle_coupled_lock);
1412 + static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
1413 +
1414 + /*
1415 +- * The cpuidle_coupled_poked_mask mask is used to avoid calling
1416 ++ * The cpuidle_coupled_poke_pending mask is used to avoid calling
1417 + * __smp_call_function_single with the per cpu call_single_data struct already
1418 + * in use. This prevents a deadlock where two cpus are waiting for each others
1419 + * call_single_data struct to be available
1420 + */
1421 +-static cpumask_t cpuidle_coupled_poked_mask;
1422 ++static cpumask_t cpuidle_coupled_poke_pending;
1423 ++
1424 ++/*
1425 ++ * The cpuidle_coupled_poked mask is used to ensure that each cpu has been poked
1426 ++ * once to minimize entering the ready loop with a poke pending, which would
1427 ++ * require aborting and retrying.
1428 ++ */
1429 ++static cpumask_t cpuidle_coupled_poked;
1430 +
1431 + /**
1432 + * cpuidle_coupled_parallel_barrier - synchronize all online coupled cpus
1433 +@@ -291,10 +299,11 @@ static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev,
1434 + return state;
1435 + }
1436 +
1437 +-static void cpuidle_coupled_poked(void *info)
1438 ++static void cpuidle_coupled_handle_poke(void *info)
1439 + {
1440 + int cpu = (unsigned long)info;
1441 +- cpumask_clear_cpu(cpu, &cpuidle_coupled_poked_mask);
1442 ++ cpumask_set_cpu(cpu, &cpuidle_coupled_poked);
1443 ++ cpumask_clear_cpu(cpu, &cpuidle_coupled_poke_pending);
1444 + }
1445 +
1446 + /**
1447 +@@ -313,7 +322,7 @@ static void cpuidle_coupled_poke(int cpu)
1448 + {
1449 + struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
1450 +
1451 +- if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poked_mask))
1452 ++ if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending))
1453 + __smp_call_function_single(cpu, csd, 0);
1454 + }
1455 +
1456 +@@ -340,30 +349,19 @@ static void cpuidle_coupled_poke_others(int this_cpu,
1457 + * @coupled: the struct coupled that contains the current cpu
1458 + * @next_state: the index in drv->states of the requested state for this cpu
1459 + *
1460 +- * Updates the requested idle state for the specified cpuidle device,
1461 +- * poking all coupled cpus out of idle if necessary to let them see the new
1462 +- * state.
1463 ++ * Updates the requested idle state for the specified cpuidle device.
1464 ++ * Returns the number of waiting cpus.
1465 + */
1466 +-static void cpuidle_coupled_set_waiting(int cpu,
1467 ++static int cpuidle_coupled_set_waiting(int cpu,
1468 + struct cpuidle_coupled *coupled, int next_state)
1469 + {
1470 +- int w;
1471 +-
1472 + coupled->requested_state[cpu] = next_state;
1473 +
1474 + /*
1475 +- * If this is the last cpu to enter the waiting state, poke
1476 +- * all the other cpus out of their waiting state so they can
1477 +- * enter a deeper state. This can race with one of the cpus
1478 +- * exiting the waiting state due to an interrupt and
1479 +- * decrementing waiting_count, see comment below.
1480 +- *
1481 + * The atomic_inc_return provides a write barrier to order the write
1482 + * to requested_state with the later write that increments ready_count.
1483 + */
1484 +- w = atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK;
1485 +- if (w == coupled->online_count)
1486 +- cpuidle_coupled_poke_others(cpu, coupled);
1487 ++ return atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK;
1488 + }
1489 +
1490 + /**
1491 +@@ -410,19 +408,33 @@ static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled)
1492 + * been processed and the poke bit has been cleared.
1493 + *
1494 + * Other interrupts may also be processed while interrupts are enabled, so
1495 +- * need_resched() must be tested after turning interrupts off again to make sure
1496 ++ * need_resched() must be tested after this function returns to make sure
1497 + * the interrupt didn't schedule work that should take the cpu out of idle.
1498 + *
1499 +- * Returns 0 if need_resched was false, -EINTR if need_resched was true.
1500 ++ * Returns 0 if no poke was pending, 1 if a poke was cleared.
1501 + */
1502 + static int cpuidle_coupled_clear_pokes(int cpu)
1503 + {
1504 ++ if (!cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending))
1505 ++ return 0;
1506 ++
1507 + local_irq_enable();
1508 +- while (cpumask_test_cpu(cpu, &cpuidle_coupled_poked_mask))
1509 ++ while (cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending))
1510 + cpu_relax();
1511 + local_irq_disable();
1512 +
1513 +- return need_resched() ? -EINTR : 0;
1514 ++ return 1;
1515 ++}
1516 ++
1517 ++static bool cpuidle_coupled_any_pokes_pending(struct cpuidle_coupled *coupled)
1518 ++{
1519 ++ cpumask_t cpus;
1520 ++ int ret;
1521 ++
1522 ++ cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus);
1523 ++ ret = cpumask_and(&cpus, &cpuidle_coupled_poke_pending, &cpus);
1524 ++
1525 ++ return ret;
1526 + }
1527 +
1528 + /**
1529 +@@ -449,12 +461,14 @@ int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
1530 + {
1531 + int entered_state = -1;
1532 + struct cpuidle_coupled *coupled = dev->coupled;
1533 ++ int w;
1534 +
1535 + if (!coupled)
1536 + return -EINVAL;
1537 +
1538 + while (coupled->prevent) {
1539 +- if (cpuidle_coupled_clear_pokes(dev->cpu)) {
1540 ++ cpuidle_coupled_clear_pokes(dev->cpu);
1541 ++ if (need_resched()) {
1542 + local_irq_enable();
1543 + return entered_state;
1544 + }
1545 +@@ -465,15 +479,37 @@ int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
1546 + /* Read barrier ensures online_count is read after prevent is cleared */
1547 + smp_rmb();
1548 +
1549 +- cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state);
1550 ++reset:
1551 ++ cpumask_clear_cpu(dev->cpu, &cpuidle_coupled_poked);
1552 ++
1553 ++ w = cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state);
1554 ++ /*
1555 ++ * If this is the last cpu to enter the waiting state, poke
1556 ++ * all the other cpus out of their waiting state so they can
1557 ++ * enter a deeper state. This can race with one of the cpus
1558 ++ * exiting the waiting state due to an interrupt and
1559 ++ * decrementing waiting_count, see comment below.
1560 ++ */
1561 ++ if (w == coupled->online_count) {
1562 ++ cpumask_set_cpu(dev->cpu, &cpuidle_coupled_poked);
1563 ++ cpuidle_coupled_poke_others(dev->cpu, coupled);
1564 ++ }
1565 +
1566 + retry:
1567 + /*
1568 + * Wait for all coupled cpus to be idle, using the deepest state
1569 +- * allowed for a single cpu.
1570 ++ * allowed for a single cpu. If this was not the poking cpu, wait
1571 ++ * for at least one poke before leaving to avoid a race where
1572 ++ * two cpus could arrive at the waiting loop at the same time,
1573 ++ * but the first of the two to arrive could skip the loop without
1574 ++ * processing the pokes from the last to arrive.
1575 + */
1576 +- while (!cpuidle_coupled_cpus_waiting(coupled)) {
1577 +- if (cpuidle_coupled_clear_pokes(dev->cpu)) {
1578 ++ while (!cpuidle_coupled_cpus_waiting(coupled) ||
1579 ++ !cpumask_test_cpu(dev->cpu, &cpuidle_coupled_poked)) {
1580 ++ if (cpuidle_coupled_clear_pokes(dev->cpu))
1581 ++ continue;
1582 ++
1583 ++ if (need_resched()) {
1584 + cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
1585 + goto out;
1586 + }
1587 +@@ -487,12 +523,19 @@ retry:
1588 + dev->safe_state_index);
1589 + }
1590 +
1591 +- if (cpuidle_coupled_clear_pokes(dev->cpu)) {
1592 ++ cpuidle_coupled_clear_pokes(dev->cpu);
1593 ++ if (need_resched()) {
1594 + cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
1595 + goto out;
1596 + }
1597 +
1598 + /*
1599 ++ * Make sure final poke status for this cpu is visible before setting
1600 ++ * cpu as ready.
1601 ++ */
1602 ++ smp_wmb();
1603 ++
1604 ++ /*
1605 + * All coupled cpus are probably idle. There is a small chance that
1606 + * one of the other cpus just became active. Increment the ready count,
1607 + * and spin until all coupled cpus have incremented the counter. Once a
1608 +@@ -511,6 +554,28 @@ retry:
1609 + cpu_relax();
1610 + }
1611 +
1612 ++ /*
1613 ++ * Make sure read of all cpus ready is done before reading pending pokes
1614 ++ */
1615 ++ smp_rmb();
1616 ++
1617 ++ /*
1618 ++ * There is a small chance that a cpu left and reentered idle after this
1619 ++ * cpu saw that all cpus were waiting. The cpu that reentered idle will
1620 ++ * have sent this cpu a poke, which will still be pending after the
1621 ++ * ready loop. The pending interrupt may be lost by the interrupt
1622 ++ * controller when entering the deep idle state. It's not possible to
1623 ++ * clear a pending interrupt without turning interrupts on and handling
1624 ++ * it, and it's too late to turn on interrupts here, so reset the
1625 ++ * coupled idle state of all cpus and retry.
1626 ++ */
1627 ++ if (cpuidle_coupled_any_pokes_pending(coupled)) {
1628 ++ cpuidle_coupled_set_done(dev->cpu, coupled);
1629 ++ /* Wait for all cpus to see the pending pokes */
1630 ++ cpuidle_coupled_parallel_barrier(dev, &coupled->abort_barrier);
1631 ++ goto reset;
1632 ++ }
1633 ++
1634 + /* all cpus have acked the coupled state */
1635 + next_state = cpuidle_coupled_get_state(dev, coupled);
1636 +
1637 +@@ -596,7 +661,7 @@ have_coupled:
1638 + coupled->refcnt++;
1639 +
1640 + csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu);
1641 +- csd->func = cpuidle_coupled_poked;
1642 ++ csd->func = cpuidle_coupled_handle_poke;
1643 + csd->info = (void *)(unsigned long)dev->cpu;
1644 +
1645 + return 0;
1646 +diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
1647 +index 8b6a034..8b3d901 100644
1648 +--- a/drivers/edac/amd64_edac.c
1649 ++++ b/drivers/edac/amd64_edac.c
1650 +@@ -2470,8 +2470,15 @@ static int amd64_init_one_instance(struct pci_dev *F2)
1651 + layers[0].size = pvt->csels[0].b_cnt;
1652 + layers[0].is_virt_csrow = true;
1653 + layers[1].type = EDAC_MC_LAYER_CHANNEL;
1654 +- layers[1].size = pvt->channel_count;
1655 ++
1656 ++ /*
1657 ++ * Always allocate two channels since we can have setups with DIMMs on
1658 ++ * only one channel. Also, this simplifies handling later for the price
1659 ++ * of a couple of KBs tops.
1660 ++ */
1661 ++ layers[1].size = 2;
1662 + layers[1].is_virt_csrow = false;
1663 ++
1664 + mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
1665 + if (!mci)
1666 + goto err_siblings;
1667 +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
1668 +index 95d6f4b..70fc133 100644
1669 +--- a/drivers/gpu/drm/drm_edid.c
1670 ++++ b/drivers/gpu/drm/drm_edid.c
1671 +@@ -125,6 +125,9 @@ static struct edid_quirk {
1672 +
1673 + /* ViewSonic VA2026w */
1674 + { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
1675 ++
1676 ++ /* Medion MD 30217 PG */
1677 ++ { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
1678 + };
1679 +
1680 + /*
1681 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1682 +index be79f47..ca40d1b 100644
1683 +--- a/drivers/gpu/drm/i915/intel_display.c
1684 ++++ b/drivers/gpu/drm/i915/intel_display.c
1685 +@@ -7809,6 +7809,19 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
1686 + pipe_config->cpu_transcoder = to_intel_crtc(crtc)->pipe;
1687 + pipe_config->shared_dpll = DPLL_ID_PRIVATE;
1688 +
1689 ++ /*
1690 ++ * Sanitize sync polarity flags based on requested ones. If neither
1691 ++ * positive or negative polarity is requested, treat this as meaning
1692 ++ * negative polarity.
1693 ++ */
1694 ++ if (!(pipe_config->adjusted_mode.flags &
1695 ++ (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
1696 ++ pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
1697 ++
1698 ++ if (!(pipe_config->adjusted_mode.flags &
1699 ++ (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
1700 ++ pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
1701 ++
1702 + /* Compute a starting value for pipe_config->pipe_bpp taking the source
1703 + * plane pixel format and any sink constraints into account. Returns the
1704 + * source plane bpp so that dithering can be selected on mismatches
1705 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
1706 +index 36668d1..5956445 100644
1707 +--- a/drivers/hid/hid-core.c
1708 ++++ b/drivers/hid/hid-core.c
1709 +@@ -63,6 +63,8 @@ struct hid_report *hid_register_report(struct hid_device *device, unsigned type,
1710 + struct hid_report_enum *report_enum = device->report_enum + type;
1711 + struct hid_report *report;
1712 +
1713 ++ if (id >= HID_MAX_IDS)
1714 ++ return NULL;
1715 + if (report_enum->report_id_hash[id])
1716 + return report_enum->report_id_hash[id];
1717 +
1718 +@@ -404,8 +406,10 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
1719 +
1720 + case HID_GLOBAL_ITEM_TAG_REPORT_ID:
1721 + parser->global.report_id = item_udata(item);
1722 +- if (parser->global.report_id == 0) {
1723 +- hid_err(parser->device, "report_id 0 is invalid\n");
1724 ++ if (parser->global.report_id == 0 ||
1725 ++ parser->global.report_id >= HID_MAX_IDS) {
1726 ++ hid_err(parser->device, "report_id %u is invalid\n",
1727 ++ parser->global.report_id);
1728 + return -1;
1729 + }
1730 + return 0;
1731 +@@ -575,7 +579,7 @@ static void hid_close_report(struct hid_device *device)
1732 + for (i = 0; i < HID_REPORT_TYPES; i++) {
1733 + struct hid_report_enum *report_enum = device->report_enum + i;
1734 +
1735 +- for (j = 0; j < 256; j++) {
1736 ++ for (j = 0; j < HID_MAX_IDS; j++) {
1737 + struct hid_report *report = report_enum->report_id_hash[j];
1738 + if (report)
1739 + hid_free_report(report);
1740 +@@ -1152,7 +1156,12 @@ EXPORT_SYMBOL_GPL(hid_output_report);
1741 +
1742 + int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
1743 + {
1744 +- unsigned size = field->report_size;
1745 ++ unsigned size;
1746 ++
1747 ++ if (!field)
1748 ++ return -1;
1749 ++
1750 ++ size = field->report_size;
1751 +
1752 + hid_dump_input(field->report->device, field->usage + offset, value);
1753 +
1754 +@@ -1597,6 +1606,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1755 + { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
1756 + { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
1757 + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
1758 ++ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
1759 + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
1760 + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
1761 + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
1762 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
1763 +index ffe4c7a..22134d4 100644
1764 +--- a/drivers/hid/hid-ids.h
1765 ++++ b/drivers/hid/hid-ids.h
1766 +@@ -135,9 +135,9 @@
1767 + #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
1768 + #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255
1769 + #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256
1770 +-#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0291
1771 +-#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0292
1772 +-#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0293
1773 ++#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
1774 ++#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
1775 ++#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
1776 + #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
1777 + #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
1778 + #define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240
1779 +@@ -482,6 +482,7 @@
1780 + #define USB_VENDOR_ID_KYE 0x0458
1781 + #define USB_DEVICE_ID_KYE_ERGO_525V 0x0087
1782 + #define USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE 0x0138
1783 ++#define USB_DEVICE_ID_GENIUS_GX_IMPERATOR 0x4018
1784 + #define USB_DEVICE_ID_KYE_GPEN_560 0x5003
1785 + #define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010
1786 + #define USB_DEVICE_ID_KYE_MOUSEPEN_I608X 0x5011
1787 +@@ -658,6 +659,7 @@
1788 + #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16 0x0012
1789 + #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17 0x0013
1790 + #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18 0x0014
1791 ++#define USB_DEVICE_ID_NTRIG_DUOSENSE 0x1500
1792 +
1793 + #define USB_VENDOR_ID_ONTRAK 0x0a07
1794 + #define USB_DEVICE_ID_ONTRAK_ADU100 0x0064
1795 +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
1796 +index 7480799..3fc4034 100644
1797 +--- a/drivers/hid/hid-input.c
1798 ++++ b/drivers/hid/hid-input.c
1799 +@@ -340,7 +340,7 @@ static int hidinput_get_battery_property(struct power_supply *psy,
1800 + {
1801 + struct hid_device *dev = container_of(psy, struct hid_device, battery);
1802 + int ret = 0;
1803 +- __u8 buf[2] = {};
1804 ++ __u8 *buf;
1805 +
1806 + switch (prop) {
1807 + case POWER_SUPPLY_PROP_PRESENT:
1808 +@@ -349,12 +349,19 @@ static int hidinput_get_battery_property(struct power_supply *psy,
1809 + break;
1810 +
1811 + case POWER_SUPPLY_PROP_CAPACITY:
1812 ++
1813 ++ buf = kmalloc(2 * sizeof(__u8), GFP_KERNEL);
1814 ++ if (!buf) {
1815 ++ ret = -ENOMEM;
1816 ++ break;
1817 ++ }
1818 + ret = dev->hid_get_raw_report(dev, dev->battery_report_id,
1819 +- buf, sizeof(buf),
1820 ++ buf, 2,
1821 + dev->battery_report_type);
1822 +
1823 + if (ret != 2) {
1824 + ret = -ENODATA;
1825 ++ kfree(buf);
1826 + break;
1827 + }
1828 + ret = 0;
1829 +@@ -364,6 +371,7 @@ static int hidinput_get_battery_property(struct power_supply *psy,
1830 + buf[1] <= dev->battery_max)
1831 + val->intval = (100 * (buf[1] - dev->battery_min)) /
1832 + (dev->battery_max - dev->battery_min);
1833 ++ kfree(buf);
1834 + break;
1835 +
1836 + case POWER_SUPPLY_PROP_MODEL_NAME:
1837 +diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
1838 +index 1e2ee2aa..7384512 100644
1839 +--- a/drivers/hid/hid-kye.c
1840 ++++ b/drivers/hid/hid-kye.c
1841 +@@ -268,6 +268,26 @@ static __u8 easypen_m610x_rdesc_fixed[] = {
1842 + 0xC0 /* End Collection */
1843 + };
1844 +
1845 ++static __u8 *kye_consumer_control_fixup(struct hid_device *hdev, __u8 *rdesc,
1846 ++ unsigned int *rsize, int offset, const char *device_name) {
1847 ++ /*
1848 ++ * the fixup that need to be done:
1849 ++ * - change Usage Maximum in the Comsumer Control
1850 ++ * (report ID 3) to a reasonable value
1851 ++ */
1852 ++ if (*rsize >= offset + 31 &&
1853 ++ /* Usage Page (Consumer Devices) */
1854 ++ rdesc[offset] == 0x05 && rdesc[offset + 1] == 0x0c &&
1855 ++ /* Usage (Consumer Control) */
1856 ++ rdesc[offset + 2] == 0x09 && rdesc[offset + 3] == 0x01 &&
1857 ++ /* Usage Maximum > 12287 */
1858 ++ rdesc[offset + 10] == 0x2a && rdesc[offset + 12] > 0x2f) {
1859 ++ hid_info(hdev, "fixing up %s report descriptor\n", device_name);
1860 ++ rdesc[offset + 12] = 0x2f;
1861 ++ }
1862 ++ return rdesc;
1863 ++}
1864 ++
1865 + static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
1866 + unsigned int *rsize)
1867 + {
1868 +@@ -315,23 +335,12 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
1869 + }
1870 + break;
1871 + case USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE:
1872 +- /*
1873 +- * the fixup that need to be done:
1874 +- * - change Usage Maximum in the Comsumer Control
1875 +- * (report ID 3) to a reasonable value
1876 +- */
1877 +- if (*rsize >= 135 &&
1878 +- /* Usage Page (Consumer Devices) */
1879 +- rdesc[104] == 0x05 && rdesc[105] == 0x0c &&
1880 +- /* Usage (Consumer Control) */
1881 +- rdesc[106] == 0x09 && rdesc[107] == 0x01 &&
1882 +- /* Usage Maximum > 12287 */
1883 +- rdesc[114] == 0x2a && rdesc[116] > 0x2f) {
1884 +- hid_info(hdev,
1885 +- "fixing up Genius Gila Gaming Mouse "
1886 +- "report descriptor\n");
1887 +- rdesc[116] = 0x2f;
1888 +- }
1889 ++ rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104,
1890 ++ "Genius Gila Gaming Mouse");
1891 ++ break;
1892 ++ case USB_DEVICE_ID_GENIUS_GX_IMPERATOR:
1893 ++ rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 83,
1894 ++ "Genius Gx Imperator Keyboard");
1895 + break;
1896 + }
1897 + return rdesc;
1898 +@@ -428,6 +437,8 @@ static const struct hid_device_id kye_devices[] = {
1899 + USB_DEVICE_ID_KYE_EASYPEN_M610X) },
1900 + { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
1901 + USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
1902 ++ { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
1903 ++ USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
1904 + { }
1905 + };
1906 + MODULE_DEVICE_TABLE(hid, kye_devices);
1907 +diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
1908 +index ef95102..5482156 100644
1909 +--- a/drivers/hid/hid-ntrig.c
1910 ++++ b/drivers/hid/hid-ntrig.c
1911 +@@ -115,7 +115,8 @@ static inline int ntrig_get_mode(struct hid_device *hdev)
1912 + struct hid_report *report = hdev->report_enum[HID_FEATURE_REPORT].
1913 + report_id_hash[0x0d];
1914 +
1915 +- if (!report)
1916 ++ if (!report || report->maxfield < 1 ||
1917 ++ report->field[0]->report_count < 1)
1918 + return -EINVAL;
1919 +
1920 + hid_hw_request(hdev, report, HID_REQ_GET_REPORT);
1921 +diff --git a/drivers/hid/hid-picolcd_cir.c b/drivers/hid/hid-picolcd_cir.c
1922 +index e346038..59d5eb1 100644
1923 +--- a/drivers/hid/hid-picolcd_cir.c
1924 ++++ b/drivers/hid/hid-picolcd_cir.c
1925 +@@ -145,6 +145,7 @@ void picolcd_exit_cir(struct picolcd_data *data)
1926 + struct rc_dev *rdev = data->rc_dev;
1927 +
1928 + data->rc_dev = NULL;
1929 +- rc_unregister_device(rdev);
1930 ++ if (rdev)
1931 ++ rc_unregister_device(rdev);
1932 + }
1933 +
1934 +diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c
1935 +index b48092d..acbb0210 100644
1936 +--- a/drivers/hid/hid-picolcd_core.c
1937 ++++ b/drivers/hid/hid-picolcd_core.c
1938 +@@ -290,7 +290,7 @@ static ssize_t picolcd_operation_mode_store(struct device *dev,
1939 + buf += 10;
1940 + cnt -= 10;
1941 + }
1942 +- if (!report)
1943 ++ if (!report || report->maxfield != 1)
1944 + return -EINVAL;
1945 +
1946 + while (cnt > 0 && (buf[cnt-1] == '\n' || buf[cnt-1] == '\r'))
1947 +diff --git a/drivers/hid/hid-picolcd_fb.c b/drivers/hid/hid-picolcd_fb.c
1948 +index 591f6b2..c930ab8 100644
1949 +--- a/drivers/hid/hid-picolcd_fb.c
1950 ++++ b/drivers/hid/hid-picolcd_fb.c
1951 +@@ -593,10 +593,14 @@ err_nomem:
1952 + void picolcd_exit_framebuffer(struct picolcd_data *data)
1953 + {
1954 + struct fb_info *info = data->fb_info;
1955 +- struct picolcd_fb_data *fbdata = info->par;
1956 ++ struct picolcd_fb_data *fbdata;
1957 + unsigned long flags;
1958 +
1959 ++ if (!info)
1960 ++ return;
1961 ++
1962 + device_remove_file(&data->hdev->dev, &dev_attr_fb_update_rate);
1963 ++ fbdata = info->par;
1964 +
1965 + /* disconnect framebuffer from HID dev */
1966 + spin_lock_irqsave(&fbdata->lock, flags);
1967 +diff --git a/drivers/hid/hid-pl.c b/drivers/hid/hid-pl.c
1968 +index d29112f..2dcd7d9 100644
1969 +--- a/drivers/hid/hid-pl.c
1970 ++++ b/drivers/hid/hid-pl.c
1971 +@@ -132,8 +132,14 @@ static int plff_init(struct hid_device *hid)
1972 + strong = &report->field[0]->value[2];
1973 + weak = &report->field[0]->value[3];
1974 + debug("detected single-field device");
1975 +- } else if (report->maxfield >= 4 && report->field[0]->maxusage == 1 &&
1976 +- report->field[0]->usage[0].hid == (HID_UP_LED | 0x43)) {
1977 ++ } else if (report->field[0]->maxusage == 1 &&
1978 ++ report->field[0]->usage[0].hid ==
1979 ++ (HID_UP_LED | 0x43) &&
1980 ++ report->maxfield >= 4 &&
1981 ++ report->field[0]->report_count >= 1 &&
1982 ++ report->field[1]->report_count >= 1 &&
1983 ++ report->field[2]->report_count >= 1 &&
1984 ++ report->field[3]->report_count >= 1) {
1985 + report->field[0]->value[0] = 0x00;
1986 + report->field[1]->value[0] = 0x00;
1987 + strong = &report->field[2]->value[0];
1988 +diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
1989 +index ca749810..aa34755 100644
1990 +--- a/drivers/hid/hid-sensor-hub.c
1991 ++++ b/drivers/hid/hid-sensor-hub.c
1992 +@@ -221,7 +221,8 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
1993 +
1994 + mutex_lock(&data->mutex);
1995 + report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
1996 +- if (!report || (field_index >= report->maxfield)) {
1997 ++ if (!report || (field_index >= report->maxfield) ||
1998 ++ report->field[field_index]->report_count < 1) {
1999 + ret = -EINVAL;
2000 + goto done_proc;
2001 + }
2002 +diff --git a/drivers/hid/hid-speedlink.c b/drivers/hid/hid-speedlink.c
2003 +index a2f587d..7112f3e 100644
2004 +--- a/drivers/hid/hid-speedlink.c
2005 ++++ b/drivers/hid/hid-speedlink.c
2006 +@@ -3,7 +3,7 @@
2007 + * Fixes "jumpy" cursor and removes nonexistent keyboard LEDS from
2008 + * the HID descriptor.
2009 + *
2010 +- * Copyright (c) 2011 Stefan Kriwanek <mail@××××××××××××××.de>
2011 ++ * Copyright (c) 2011, 2013 Stefan Kriwanek <dev@××××××××××××××.de>
2012 + */
2013 +
2014 + /*
2015 +@@ -46,8 +46,13 @@ static int speedlink_event(struct hid_device *hdev, struct hid_field *field,
2016 + struct hid_usage *usage, __s32 value)
2017 + {
2018 + /* No other conditions due to usage_table. */
2019 +- /* Fix "jumpy" cursor (invalid events sent by device). */
2020 +- if (value == 256)
2021 ++
2022 ++ /* This fixes the "jumpy" cursor occuring due to invalid events sent
2023 ++ * by the device. Some devices only send them with value==+256, others
2024 ++ * don't. However, catching abs(value)>=256 is restrictive enough not
2025 ++ * to interfere with devices that were bug-free (has been tested).
2026 ++ */
2027 ++ if (abs(value) >= 256)
2028 + return 1;
2029 + /* Drop useless distance 0 events (on button clicks etc.) as well */
2030 + if (value == 0)
2031 +diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
2032 +index 0c06054..6602098 100644
2033 +--- a/drivers/hid/hid-wiimote-core.c
2034 ++++ b/drivers/hid/hid-wiimote-core.c
2035 +@@ -212,10 +212,12 @@ static __u8 select_drm(struct wiimote_data *wdata)
2036 +
2037 + if (ir == WIIPROTO_FLAG_IR_BASIC) {
2038 + if (wdata->state.flags & WIIPROTO_FLAG_ACCEL) {
2039 +- if (ext)
2040 +- return WIIPROTO_REQ_DRM_KAIE;
2041 +- else
2042 +- return WIIPROTO_REQ_DRM_KAI;
2043 ++ /* GEN10 and ealier devices bind IR formats to DRMs.
2044 ++ * Hence, we cannot use DRM_KAI here as it might be
2045 ++ * bound to IR_EXT. Use DRM_KAIE unconditionally so we
2046 ++ * work with all devices and our parsers can use the
2047 ++ * fixed formats, too. */
2048 ++ return WIIPROTO_REQ_DRM_KAIE;
2049 + } else {
2050 + return WIIPROTO_REQ_DRM_KIE;
2051 + }
2052 +diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
2053 +index 6f1feb2..dbfe300 100644
2054 +--- a/drivers/hid/hidraw.c
2055 ++++ b/drivers/hid/hidraw.c
2056 +@@ -113,7 +113,7 @@ static ssize_t hidraw_send_report(struct file *file, const char __user *buffer,
2057 + __u8 *buf;
2058 + int ret = 0;
2059 +
2060 +- if (!hidraw_table[minor]) {
2061 ++ if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
2062 + ret = -ENODEV;
2063 + goto out;
2064 + }
2065 +@@ -261,7 +261,7 @@ static int hidraw_open(struct inode *inode, struct file *file)
2066 + }
2067 +
2068 + mutex_lock(&minors_lock);
2069 +- if (!hidraw_table[minor]) {
2070 ++ if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
2071 + err = -ENODEV;
2072 + goto out_unlock;
2073 + }
2074 +@@ -302,39 +302,38 @@ static int hidraw_fasync(int fd, struct file *file, int on)
2075 + return fasync_helper(fd, file, on, &list->fasync);
2076 + }
2077 +
2078 ++static void drop_ref(struct hidraw *hidraw, int exists_bit)
2079 ++{
2080 ++ if (exists_bit) {
2081 ++ hid_hw_close(hidraw->hid);
2082 ++ hidraw->exist = 0;
2083 ++ if (hidraw->open)
2084 ++ wake_up_interruptible(&hidraw->wait);
2085 ++ } else {
2086 ++ --hidraw->open;
2087 ++ }
2088 ++
2089 ++ if (!hidraw->open && !hidraw->exist) {
2090 ++ device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
2091 ++ hidraw_table[hidraw->minor] = NULL;
2092 ++ kfree(hidraw);
2093 ++ }
2094 ++}
2095 ++
2096 + static int hidraw_release(struct inode * inode, struct file * file)
2097 + {
2098 + unsigned int minor = iminor(inode);
2099 +- struct hidraw *dev;
2100 + struct hidraw_list *list = file->private_data;
2101 +- int ret;
2102 +- int i;
2103 +
2104 + mutex_lock(&minors_lock);
2105 +- if (!hidraw_table[minor]) {
2106 +- ret = -ENODEV;
2107 +- goto unlock;
2108 +- }
2109 +
2110 + list_del(&list->node);
2111 +- dev = hidraw_table[minor];
2112 +- if (!--dev->open) {
2113 +- if (list->hidraw->exist) {
2114 +- hid_hw_power(dev->hid, PM_HINT_NORMAL);
2115 +- hid_hw_close(dev->hid);
2116 +- } else {
2117 +- kfree(list->hidraw);
2118 +- }
2119 +- }
2120 +-
2121 +- for (i = 0; i < HIDRAW_BUFFER_SIZE; ++i)
2122 +- kfree(list->buffer[i].value);
2123 + kfree(list);
2124 +- ret = 0;
2125 +-unlock:
2126 +- mutex_unlock(&minors_lock);
2127 +
2128 +- return ret;
2129 ++ drop_ref(hidraw_table[minor], 0);
2130 ++
2131 ++ mutex_unlock(&minors_lock);
2132 ++ return 0;
2133 + }
2134 +
2135 + static long hidraw_ioctl(struct file *file, unsigned int cmd,
2136 +@@ -539,18 +538,9 @@ void hidraw_disconnect(struct hid_device *hid)
2137 + struct hidraw *hidraw = hid->hidraw;
2138 +
2139 + mutex_lock(&minors_lock);
2140 +- hidraw->exist = 0;
2141 +-
2142 +- device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
2143 +
2144 +- hidraw_table[hidraw->minor] = NULL;
2145 ++ drop_ref(hidraw, 1);
2146 +
2147 +- if (hidraw->open) {
2148 +- hid_hw_close(hid);
2149 +- wake_up_interruptible(&hidraw->wait);
2150 +- } else {
2151 +- kfree(hidraw);
2152 +- }
2153 + mutex_unlock(&minors_lock);
2154 + }
2155 + EXPORT_SYMBOL_GPL(hidraw_disconnect);
2156 +diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
2157 +index 19b8360..0734552 100644
2158 +--- a/drivers/hid/usbhid/hid-quirks.c
2159 ++++ b/drivers/hid/usbhid/hid-quirks.c
2160 +@@ -109,6 +109,8 @@ static const struct hid_blacklist {
2161 + { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
2162 + { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
2163 + { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
2164 ++ { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
2165 ++
2166 + { 0, 0 }
2167 + };
2168 +
2169 +diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
2170 +index 4ef4d5e..a73f961 100644
2171 +--- a/drivers/input/mouse/bcm5974.c
2172 ++++ b/drivers/input/mouse/bcm5974.c
2173 +@@ -89,9 +89,9 @@
2174 + #define USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO 0x025a
2175 + #define USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS 0x025b
2176 + /* MacbookAir6,2 (unibody, June 2013) */
2177 +-#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0291
2178 +-#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0292
2179 +-#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0293
2180 ++#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
2181 ++#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
2182 ++#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
2183 +
2184 + #define BCM5974_DEVICE(prod) { \
2185 + .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
2186 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
2187 +index eec0d3e..15e9b57 100644
2188 +--- a/drivers/iommu/intel-iommu.c
2189 ++++ b/drivers/iommu/intel-iommu.c
2190 +@@ -890,56 +890,54 @@ static int dma_pte_clear_range(struct dmar_domain *domain,
2191 + return order;
2192 + }
2193 +
2194 ++static void dma_pte_free_level(struct dmar_domain *domain, int level,
2195 ++ struct dma_pte *pte, unsigned long pfn,
2196 ++ unsigned long start_pfn, unsigned long last_pfn)
2197 ++{
2198 ++ pfn = max(start_pfn, pfn);
2199 ++ pte = &pte[pfn_level_offset(pfn, level)];
2200 ++
2201 ++ do {
2202 ++ unsigned long level_pfn;
2203 ++ struct dma_pte *level_pte;
2204 ++
2205 ++ if (!dma_pte_present(pte) || dma_pte_superpage(pte))
2206 ++ goto next;
2207 ++
2208 ++ level_pfn = pfn & level_mask(level - 1);
2209 ++ level_pte = phys_to_virt(dma_pte_addr(pte));
2210 ++
2211 ++ if (level > 2)
2212 ++ dma_pte_free_level(domain, level - 1, level_pte,
2213 ++ level_pfn, start_pfn, last_pfn);
2214 ++
2215 ++ /* If range covers entire pagetable, free it */
2216 ++ if (!(start_pfn > level_pfn ||
2217 ++ last_pfn < level_pfn + level_size(level))) {
2218 ++ dma_clear_pte(pte);
2219 ++ domain_flush_cache(domain, pte, sizeof(*pte));
2220 ++ free_pgtable_page(level_pte);
2221 ++ }
2222 ++next:
2223 ++ pfn += level_size(level);
2224 ++ } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
2225 ++}
2226 ++
2227 + /* free page table pages. last level pte should already be cleared */
2228 + static void dma_pte_free_pagetable(struct dmar_domain *domain,
2229 + unsigned long start_pfn,
2230 + unsigned long last_pfn)
2231 + {
2232 + int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
2233 +- struct dma_pte *first_pte, *pte;
2234 +- int total = agaw_to_level(domain->agaw);
2235 +- int level;
2236 +- unsigned long tmp;
2237 +- int large_page = 2;
2238 +
2239 + BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
2240 + BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
2241 + BUG_ON(start_pfn > last_pfn);
2242 +
2243 + /* We don't need lock here; nobody else touches the iova range */
2244 +- level = 2;
2245 +- while (level <= total) {
2246 +- tmp = align_to_level(start_pfn, level);
2247 +-
2248 +- /* If we can't even clear one PTE at this level, we're done */
2249 +- if (tmp + level_size(level) - 1 > last_pfn)
2250 +- return;
2251 +-
2252 +- do {
2253 +- large_page = level;
2254 +- first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
2255 +- if (large_page > level)
2256 +- level = large_page + 1;
2257 +- if (!pte) {
2258 +- tmp = align_to_level(tmp + 1, level + 1);
2259 +- continue;
2260 +- }
2261 +- do {
2262 +- if (dma_pte_present(pte)) {
2263 +- free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
2264 +- dma_clear_pte(pte);
2265 +- }
2266 +- pte++;
2267 +- tmp += level_size(level);
2268 +- } while (!first_pte_in_page(pte) &&
2269 +- tmp + level_size(level) - 1 <= last_pfn);
2270 ++ dma_pte_free_level(domain, agaw_to_level(domain->agaw),
2271 ++ domain->pgd, 0, start_pfn, last_pfn);
2272 +
2273 +- domain_flush_cache(domain, first_pte,
2274 +- (void *)pte - (void *)first_pte);
2275 +-
2276 +- } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
2277 +- level++;
2278 +- }
2279 + /* free pgd */
2280 + if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
2281 + free_pgtable_page(domain->pgd);
2282 +diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c
2283 +index 120815a..5a19abd 100644
2284 +--- a/drivers/leds/leds-wm831x-status.c
2285 ++++ b/drivers/leds/leds-wm831x-status.c
2286 +@@ -230,9 +230,9 @@ static int wm831x_status_probe(struct platform_device *pdev)
2287 + int id = pdev->id % ARRAY_SIZE(chip_pdata->status);
2288 + int ret;
2289 +
2290 +- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
2291 ++ res = platform_get_resource(pdev, IORESOURCE_REG, 0);
2292 + if (res == NULL) {
2293 +- dev_err(&pdev->dev, "No I/O resource\n");
2294 ++ dev_err(&pdev->dev, "No register resource\n");
2295 + ret = -EINVAL;
2296 + goto err;
2297 + }
2298 +diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c
2299 +index 0862622..63676a8 100644
2300 +--- a/drivers/media/common/siano/smsdvb-main.c
2301 ++++ b/drivers/media/common/siano/smsdvb-main.c
2302 +@@ -276,7 +276,8 @@ static void smsdvb_update_per_slices(struct smsdvb_client_t *client,
2303 +
2304 + /* Legacy PER/BER */
2305 + tmp = p->ets_packets * 65535;
2306 +- do_div(tmp, p->ts_packets + p->ets_packets);
2307 ++ if (p->ts_packets + p->ets_packets)
2308 ++ do_div(tmp, p->ts_packets + p->ets_packets);
2309 + client->legacy_per = tmp;
2310 + }
2311 +
2312 +diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
2313 +index 856374b..2c7217f 100644
2314 +--- a/drivers/media/dvb-frontends/mb86a20s.c
2315 ++++ b/drivers/media/dvb-frontends/mb86a20s.c
2316 +@@ -157,7 +157,6 @@ static struct regdata mb86a20s_init2[] = {
2317 + { 0x45, 0x04 }, /* CN symbol 4 */
2318 + { 0x48, 0x04 }, /* CN manual mode */
2319 +
2320 +- { 0x50, 0xd5 }, { 0x51, 0x01 }, /* Serial */
2321 + { 0x50, 0xd6 }, { 0x51, 0x1f },
2322 + { 0x50, 0xd2 }, { 0x51, 0x03 },
2323 + { 0x50, 0xd7 }, { 0x51, 0xbf },
2324 +@@ -1860,16 +1859,15 @@ static int mb86a20s_initfe(struct dvb_frontend *fe)
2325 + dev_dbg(&state->i2c->dev, "%s: IF=%d, IF reg=0x%06llx\n",
2326 + __func__, state->if_freq, (long long)pll);
2327 +
2328 +- if (!state->config->is_serial) {
2329 ++ if (!state->config->is_serial)
2330 + regD5 &= ~1;
2331 +
2332 +- rc = mb86a20s_writereg(state, 0x50, 0xd5);
2333 +- if (rc < 0)
2334 +- goto err;
2335 +- rc = mb86a20s_writereg(state, 0x51, regD5);
2336 +- if (rc < 0)
2337 +- goto err;
2338 +- }
2339 ++ rc = mb86a20s_writereg(state, 0x50, 0xd5);
2340 ++ if (rc < 0)
2341 ++ goto err;
2342 ++ rc = mb86a20s_writereg(state, 0x51, regD5);
2343 ++ if (rc < 0)
2344 ++ goto err;
2345 +
2346 + rc = mb86a20s_writeregdata(state, mb86a20s_init2);
2347 + if (rc < 0)
2348 +diff --git a/drivers/media/pci/cx88/cx88.h b/drivers/media/pci/cx88/cx88.h
2349 +index afe0eae..28893a6 100644
2350 +--- a/drivers/media/pci/cx88/cx88.h
2351 ++++ b/drivers/media/pci/cx88/cx88.h
2352 +@@ -259,7 +259,7 @@ struct cx88_input {
2353 + };
2354 +
2355 + enum cx88_audio_chip {
2356 +- CX88_AUDIO_WM8775,
2357 ++ CX88_AUDIO_WM8775 = 1,
2358 + CX88_AUDIO_TVAUDIO,
2359 + };
2360 +
2361 +diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
2362 +index 559fab2..1ec60264 100644
2363 +--- a/drivers/media/platform/exynos-gsc/gsc-core.c
2364 ++++ b/drivers/media/platform/exynos-gsc/gsc-core.c
2365 +@@ -1122,10 +1122,14 @@ static int gsc_probe(struct platform_device *pdev)
2366 + goto err_clk;
2367 + }
2368 +
2369 +- ret = gsc_register_m2m_device(gsc);
2370 ++ ret = v4l2_device_register(dev, &gsc->v4l2_dev);
2371 + if (ret)
2372 + goto err_clk;
2373 +
2374 ++ ret = gsc_register_m2m_device(gsc);
2375 ++ if (ret)
2376 ++ goto err_v4l2;
2377 ++
2378 + platform_set_drvdata(pdev, gsc);
2379 + pm_runtime_enable(dev);
2380 + ret = pm_runtime_get_sync(&pdev->dev);
2381 +@@ -1147,6 +1151,8 @@ err_pm:
2382 + pm_runtime_put(dev);
2383 + err_m2m:
2384 + gsc_unregister_m2m_device(gsc);
2385 ++err_v4l2:
2386 ++ v4l2_device_unregister(&gsc->v4l2_dev);
2387 + err_clk:
2388 + gsc_clk_put(gsc);
2389 + return ret;
2390 +@@ -1157,6 +1163,7 @@ static int gsc_remove(struct platform_device *pdev)
2391 + struct gsc_dev *gsc = platform_get_drvdata(pdev);
2392 +
2393 + gsc_unregister_m2m_device(gsc);
2394 ++ v4l2_device_unregister(&gsc->v4l2_dev);
2395 +
2396 + vb2_dma_contig_cleanup_ctx(gsc->alloc_ctx);
2397 + pm_runtime_disable(&pdev->dev);
2398 +diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h
2399 +index cc19bba..76435d3 100644
2400 +--- a/drivers/media/platform/exynos-gsc/gsc-core.h
2401 ++++ b/drivers/media/platform/exynos-gsc/gsc-core.h
2402 +@@ -343,6 +343,7 @@ struct gsc_dev {
2403 + unsigned long state;
2404 + struct vb2_alloc_ctx *alloc_ctx;
2405 + struct video_device vdev;
2406 ++ struct v4l2_device v4l2_dev;
2407 + };
2408 +
2409 + /**
2410 +diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
2411 +index 40a73f7..e576ff2 100644
2412 +--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
2413 ++++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
2414 +@@ -751,6 +751,7 @@ int gsc_register_m2m_device(struct gsc_dev *gsc)
2415 + gsc->vdev.release = video_device_release_empty;
2416 + gsc->vdev.lock = &gsc->lock;
2417 + gsc->vdev.vfl_dir = VFL_DIR_M2M;
2418 ++ gsc->vdev.v4l2_dev = &gsc->v4l2_dev;
2419 + snprintf(gsc->vdev.name, sizeof(gsc->vdev.name), "%s.%d:m2m",
2420 + GSC_MODULE_NAME, gsc->id);
2421 +
2422 +diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
2423 +index 08fbfed..e85dc4f 100644
2424 +--- a/drivers/media/platform/exynos4-is/fimc-lite.c
2425 ++++ b/drivers/media/platform/exynos4-is/fimc-lite.c
2426 +@@ -90,7 +90,7 @@ static const struct fimc_fmt fimc_lite_formats[] = {
2427 + .name = "RAW10 (GRBG)",
2428 + .fourcc = V4L2_PIX_FMT_SGRBG10,
2429 + .colorspace = V4L2_COLORSPACE_SRGB,
2430 +- .depth = { 10 },
2431 ++ .depth = { 16 },
2432 + .color = FIMC_FMT_RAW10,
2433 + .memplanes = 1,
2434 + .mbus_code = V4L2_MBUS_FMT_SGRBG10_1X10,
2435 +@@ -99,7 +99,7 @@ static const struct fimc_fmt fimc_lite_formats[] = {
2436 + .name = "RAW12 (GRBG)",
2437 + .fourcc = V4L2_PIX_FMT_SGRBG12,
2438 + .colorspace = V4L2_COLORSPACE_SRGB,
2439 +- .depth = { 12 },
2440 ++ .depth = { 16 },
2441 + .color = FIMC_FMT_RAW12,
2442 + .memplanes = 1,
2443 + .mbus_code = V4L2_MBUS_FMT_SGRBG12_1X12,
2444 +diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
2445 +index 19f556c..91f21e2 100644
2446 +--- a/drivers/media/platform/exynos4-is/media-dev.c
2447 ++++ b/drivers/media/platform/exynos4-is/media-dev.c
2448 +@@ -1530,9 +1530,9 @@ static int fimc_md_probe(struct platform_device *pdev)
2449 + err_unlock:
2450 + mutex_unlock(&fmd->media_dev.graph_mutex);
2451 + err_clk:
2452 +- media_device_unregister(&fmd->media_dev);
2453 + fimc_md_put_clocks(fmd);
2454 + fimc_md_unregister_entities(fmd);
2455 ++ media_device_unregister(&fmd->media_dev);
2456 + err_md:
2457 + v4l2_device_unregister(&fmd->v4l2_dev);
2458 + return ret;
2459 +diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
2460 +index 47bdb8f..65edb4a 100644
2461 +--- a/drivers/mmc/host/tmio_mmc_dma.c
2462 ++++ b/drivers/mmc/host/tmio_mmc_dma.c
2463 +@@ -104,6 +104,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
2464 + pio:
2465 + if (!desc) {
2466 + /* DMA failed, fall back to PIO */
2467 ++ tmio_mmc_enable_dma(host, false);
2468 + if (ret >= 0)
2469 + ret = -EIO;
2470 + host->chan_rx = NULL;
2471 +@@ -116,7 +117,6 @@ pio:
2472 + }
2473 + dev_warn(&host->pdev->dev,
2474 + "DMA failed: %d, falling back to PIO\n", ret);
2475 +- tmio_mmc_enable_dma(host, false);
2476 + }
2477 +
2478 + dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
2479 +@@ -185,6 +185,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
2480 + pio:
2481 + if (!desc) {
2482 + /* DMA failed, fall back to PIO */
2483 ++ tmio_mmc_enable_dma(host, false);
2484 + if (ret >= 0)
2485 + ret = -EIO;
2486 + host->chan_tx = NULL;
2487 +@@ -197,7 +198,6 @@ pio:
2488 + }
2489 + dev_warn(&host->pdev->dev,
2490 + "DMA failed: %d, falling back to PIO\n", ret);
2491 +- tmio_mmc_enable_dma(host, false);
2492 + }
2493 +
2494 + dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
2495 +diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
2496 +index dfcd0a5..fb8c4de 100644
2497 +--- a/drivers/mtd/nand/nand_base.c
2498 ++++ b/drivers/mtd/nand/nand_base.c
2499 +@@ -2793,7 +2793,9 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
2500 +
2501 + if (!chip->select_chip)
2502 + chip->select_chip = nand_select_chip;
2503 +- if (!chip->read_byte)
2504 ++
2505 ++ /* If called twice, pointers that depend on busw may need to be reset */
2506 ++ if (!chip->read_byte || chip->read_byte == nand_read_byte)
2507 + chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
2508 + if (!chip->read_word)
2509 + chip->read_word = nand_read_word;
2510 +@@ -2801,9 +2803,9 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
2511 + chip->block_bad = nand_block_bad;
2512 + if (!chip->block_markbad)
2513 + chip->block_markbad = nand_default_block_markbad;
2514 +- if (!chip->write_buf)
2515 ++ if (!chip->write_buf || chip->write_buf == nand_write_buf)
2516 + chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
2517 +- if (!chip->read_buf)
2518 ++ if (!chip->read_buf || chip->read_buf == nand_read_buf)
2519 + chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
2520 + if (!chip->scan_bbt)
2521 + chip->scan_bbt = nand_default_bbt;
2522 +diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
2523 +index 5df49d3..c95bfb1 100644
2524 +--- a/drivers/mtd/ubi/wl.c
2525 ++++ b/drivers/mtd/ubi/wl.c
2526 +@@ -1069,6 +1069,9 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
2527 + if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
2528 + dbg_wl("no WL needed: min used EC %d, max free EC %d",
2529 + e1->ec, e2->ec);
2530 ++
2531 ++ /* Give the unused PEB back */
2532 ++ wl_tree_add(e2, &ubi->free);
2533 + goto out_cancel;
2534 + }
2535 + self_check_in_wl_tree(ubi, e1, &ubi->used);
2536 +diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
2537 +index b017818..90ab292 100644
2538 +--- a/drivers/net/ethernet/marvell/mvneta.c
2539 ++++ b/drivers/net/ethernet/marvell/mvneta.c
2540 +@@ -138,7 +138,9 @@
2541 + #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
2542 + #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
2543 + #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
2544 ++#define MVNETA_GMAC_AN_SPEED_EN BIT(7)
2545 + #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
2546 ++#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
2547 + #define MVNETA_MIB_COUNTERS_BASE 0x3080
2548 + #define MVNETA_MIB_LATE_COLLISION 0x7c
2549 + #define MVNETA_DA_FILT_SPEC_MCAST 0x3400
2550 +@@ -915,6 +917,13 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
2551 + /* Assign port SDMA configuration */
2552 + mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
2553 +
2554 ++ /* Disable PHY polling in hardware, since we're using the
2555 ++ * kernel phylib to do this.
2556 ++ */
2557 ++ val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
2558 ++ val &= ~MVNETA_PHY_POLLING_ENABLE;
2559 ++ mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
2560 ++
2561 + mvneta_set_ucast_table(pp, -1);
2562 + mvneta_set_special_mcast_table(pp, -1);
2563 + mvneta_set_other_mcast_table(pp, -1);
2564 +@@ -2307,7 +2316,9 @@ static void mvneta_adjust_link(struct net_device *ndev)
2565 + val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2566 + val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
2567 + MVNETA_GMAC_CONFIG_GMII_SPEED |
2568 +- MVNETA_GMAC_CONFIG_FULL_DUPLEX);
2569 ++ MVNETA_GMAC_CONFIG_FULL_DUPLEX |
2570 ++ MVNETA_GMAC_AN_SPEED_EN |
2571 ++ MVNETA_GMAC_AN_DUPLEX_EN);
2572 +
2573 + if (phydev->duplex)
2574 + val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
2575 +diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
2576 +index 1f694ab..77d3a70 100644
2577 +--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
2578 ++++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
2579 +@@ -1173,6 +1173,10 @@ skip_ws_det:
2580 + * is_on == 0 means MRC CCK is OFF (more noise imm)
2581 + */
2582 + bool is_on = param ? 1 : 0;
2583 ++
2584 ++ if (ah->caps.rx_chainmask == 1)
2585 ++ break;
2586 ++
2587 + REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
2588 + AR_PHY_MRC_CCK_ENABLE, is_on);
2589 + REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
2590 +diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
2591 +index c1224b5..020b9b3 100644
2592 +--- a/drivers/net/wireless/ath/ath9k/ath9k.h
2593 ++++ b/drivers/net/wireless/ath/ath9k/ath9k.h
2594 +@@ -79,10 +79,6 @@ struct ath_config {
2595 + sizeof(struct ath_buf_state)); \
2596 + } while (0)
2597 +
2598 +-#define ATH_RXBUF_RESET(_bf) do { \
2599 +- (_bf)->bf_stale = false; \
2600 +- } while (0)
2601 +-
2602 + /**
2603 + * enum buffer_type - Buffer type flags
2604 + *
2605 +@@ -317,6 +313,7 @@ struct ath_rx {
2606 + struct ath_descdma rxdma;
2607 + struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
2608 +
2609 ++ struct ath_buf *buf_hold;
2610 + struct sk_buff *frag;
2611 +
2612 + u32 ampdu_ref;
2613 +diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
2614 +index 865e043..b4902b3 100644
2615 +--- a/drivers/net/wireless/ath/ath9k/recv.c
2616 ++++ b/drivers/net/wireless/ath/ath9k/recv.c
2617 +@@ -42,8 +42,6 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
2618 + struct ath_desc *ds;
2619 + struct sk_buff *skb;
2620 +
2621 +- ATH_RXBUF_RESET(bf);
2622 +-
2623 + ds = bf->bf_desc;
2624 + ds->ds_link = 0; /* link to null */
2625 + ds->ds_data = bf->bf_buf_addr;
2626 +@@ -70,6 +68,14 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
2627 + sc->rx.rxlink = &ds->ds_link;
2628 + }
2629 +
2630 ++static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_buf *bf)
2631 ++{
2632 ++ if (sc->rx.buf_hold)
2633 ++ ath_rx_buf_link(sc, sc->rx.buf_hold);
2634 ++
2635 ++ sc->rx.buf_hold = bf;
2636 ++}
2637 ++
2638 + static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
2639 + {
2640 + /* XXX block beacon interrupts */
2641 +@@ -117,7 +123,6 @@ static bool ath_rx_edma_buf_link(struct ath_softc *sc,
2642 +
2643 + skb = bf->bf_mpdu;
2644 +
2645 +- ATH_RXBUF_RESET(bf);
2646 + memset(skb->data, 0, ah->caps.rx_status_len);
2647 + dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
2648 + ah->caps.rx_status_len, DMA_TO_DEVICE);
2649 +@@ -432,6 +437,7 @@ int ath_startrecv(struct ath_softc *sc)
2650 + if (list_empty(&sc->rx.rxbuf))
2651 + goto start_recv;
2652 +
2653 ++ sc->rx.buf_hold = NULL;
2654 + sc->rx.rxlink = NULL;
2655 + list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
2656 + ath_rx_buf_link(sc, bf);
2657 +@@ -677,6 +683,9 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
2658 + }
2659 +
2660 + bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
2661 ++ if (bf == sc->rx.buf_hold)
2662 ++ return NULL;
2663 ++
2664 + ds = bf->bf_desc;
2665 +
2666 + /*
2667 +@@ -1375,7 +1384,7 @@ requeue:
2668 + if (edma) {
2669 + ath_rx_edma_buf_link(sc, qtype);
2670 + } else {
2671 +- ath_rx_buf_link(sc, bf);
2672 ++ ath_rx_buf_relink(sc, bf);
2673 + ath9k_hw_rxena(ah);
2674 + }
2675 + } while (1);
2676 +diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
2677 +index 9279927..ab64683 100644
2678 +--- a/drivers/net/wireless/ath/ath9k/xmit.c
2679 ++++ b/drivers/net/wireless/ath/ath9k/xmit.c
2680 +@@ -2602,6 +2602,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2681 + for (acno = 0, ac = &an->ac[acno];
2682 + acno < IEEE80211_NUM_ACS; acno++, ac++) {
2683 + ac->sched = false;
2684 ++ ac->clear_ps_filter = true;
2685 + ac->txq = sc->tx.txq_map[acno];
2686 + INIT_LIST_HEAD(&ac->tid_q);
2687 + }
2688 +diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
2689 +index 1860c57..4fb9635 100644
2690 +--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
2691 ++++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
2692 +@@ -1015,9 +1015,10 @@ static bool dma64_txidle(struct dma_info *di)
2693 +
2694 + /*
2695 + * post receive buffers
2696 +- * return false is refill failed completely and ring is empty this will stall
2697 +- * the rx dma and user might want to call rxfill again asap. This unlikely
2698 +- * happens on memory-rich NIC, but often on memory-constrained dongle
2699 ++ * Return false if refill failed completely or dma mapping failed. The ring
2700 ++ * is empty, which will stall the rx dma and user might want to call rxfill
2701 ++ * again asap. This is unlikely to happen on a memory-rich NIC, but often on
2702 ++ * memory-constrained dongle.
2703 + */
2704 + bool dma_rxfill(struct dma_pub *pub)
2705 + {
2706 +@@ -1078,6 +1079,8 @@ bool dma_rxfill(struct dma_pub *pub)
2707 +
2708 + pa = dma_map_single(di->dmadev, p->data, di->rxbufsize,
2709 + DMA_FROM_DEVICE);
2710 ++ if (dma_mapping_error(di->dmadev, pa))
2711 ++ return false;
2712 +
2713 + /* save the free packet pointer */
2714 + di->rxp[rxout] = p;
2715 +@@ -1284,7 +1287,11 @@ static void dma_txenq(struct dma_info *di, struct sk_buff *p)
2716 +
2717 + /* get physical address of buffer start */
2718 + pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE);
2719 +-
2720 ++ /* if mapping failed, free skb */
2721 ++ if (dma_mapping_error(di->dmadev, pa)) {
2722 ++ brcmu_pkt_buf_free_skb(p);
2723 ++ return;
2724 ++ }
2725 + /* With a DMA segment list, Descriptor table is filled
2726 + * using the segment list instead of looping over
2727 + * buffers in multi-chain DMA. Therefore, EOF for SGLIST
2728 +diff --git a/drivers/of/base.c b/drivers/of/base.c
2729 +index 5c54279..bf8432f 100644
2730 +--- a/drivers/of/base.c
2731 ++++ b/drivers/of/base.c
2732 +@@ -1629,6 +1629,7 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
2733 + ap = dt_alloc(sizeof(*ap) + len + 1, 4);
2734 + if (!ap)
2735 + continue;
2736 ++ memset(ap, 0, sizeof(*ap) + len + 1);
2737 + ap->alias = start;
2738 + of_alias_add(ap, np, id, start, len);
2739 + }
2740 +diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
2741 +index b90a3a0..19afb9a 100644
2742 +--- a/drivers/pinctrl/pinctrl-at91.c
2743 ++++ b/drivers/pinctrl/pinctrl-at91.c
2744 +@@ -325,7 +325,7 @@ static void at91_mux_disable_interrupt(void __iomem *pio, unsigned mask)
2745 +
2746 + static unsigned at91_mux_get_pullup(void __iomem *pio, unsigned pin)
2747 + {
2748 +- return (readl_relaxed(pio + PIO_PUSR) >> pin) & 0x1;
2749 ++ return !((readl_relaxed(pio + PIO_PUSR) >> pin) & 0x1);
2750 + }
2751 +
2752 + static void at91_mux_set_pullup(void __iomem *pio, unsigned mask, bool on)
2753 +@@ -445,7 +445,7 @@ static void at91_mux_pio3_set_debounce(void __iomem *pio, unsigned mask,
2754 +
2755 + static bool at91_mux_pio3_get_pulldown(void __iomem *pio, unsigned pin)
2756 + {
2757 +- return (__raw_readl(pio + PIO_PPDSR) >> pin) & 0x1;
2758 ++ return !((__raw_readl(pio + PIO_PPDSR) >> pin) & 0x1);
2759 + }
2760 +
2761 + static void at91_mux_pio3_set_pulldown(void __iomem *pio, unsigned mask, bool is_on)
2762 +diff --git a/drivers/scsi/mpt3sas/Makefile b/drivers/scsi/mpt3sas/Makefile
2763 +index 4c1d2e7..efb0c4c 100644
2764 +--- a/drivers/scsi/mpt3sas/Makefile
2765 ++++ b/drivers/scsi/mpt3sas/Makefile
2766 +@@ -1,5 +1,5 @@
2767 + # mpt3sas makefile
2768 +-obj-m += mpt3sas.o
2769 ++obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas.o
2770 + mpt3sas-y += mpt3sas_base.o \
2771 + mpt3sas_config.o \
2772 + mpt3sas_scsih.o \
2773 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
2774 +index 86fcf2c..2783dd7 100644
2775 +--- a/drivers/scsi/sd.c
2776 ++++ b/drivers/scsi/sd.c
2777 +@@ -2419,14 +2419,9 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2778 + }
2779 + }
2780 +
2781 +- if (modepage == 0x3F) {
2782 +- sd_printk(KERN_ERR, sdkp, "No Caching mode page "
2783 +- "present\n");
2784 +- goto defaults;
2785 +- } else if ((buffer[offset] & 0x3f) != modepage) {
2786 +- sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
2787 +- goto defaults;
2788 +- }
2789 ++ sd_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
2790 ++ goto defaults;
2791 ++
2792 + Page_found:
2793 + if (modepage == 8) {
2794 + sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
2795 +diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
2796 +index c1950e3..674b236 100644
2797 +--- a/drivers/staging/comedi/drivers/dt282x.c
2798 ++++ b/drivers/staging/comedi/drivers/dt282x.c
2799 +@@ -264,8 +264,9 @@ struct dt282x_private {
2800 + } \
2801 + udelay(5); \
2802 + } \
2803 +- if (_i) \
2804 ++ if (_i) { \
2805 + b \
2806 ++ } \
2807 + } while (0)
2808 +
2809 + static int prep_ai_dma(struct comedi_device *dev, int chan, int size);
2810 +diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
2811 +index e77fb6e..8f54c50 100644
2812 +--- a/drivers/staging/zram/zram_drv.c
2813 ++++ b/drivers/staging/zram/zram_drv.c
2814 +@@ -445,6 +445,14 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
2815 + goto out;
2816 + }
2817 +
2818 ++ /*
2819 ++ * zram_slot_free_notify could miss free so that let's
2820 ++ * double check.
2821 ++ */
2822 ++ if (unlikely(meta->table[index].handle ||
2823 ++ zram_test_flag(meta, index, ZRAM_ZERO)))
2824 ++ zram_free_page(zram, index);
2825 ++
2826 + ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
2827 + meta->compress_workmem);
2828 +
2829 +@@ -504,6 +512,20 @@ out:
2830 + return ret;
2831 + }
2832 +
2833 ++static void handle_pending_slot_free(struct zram *zram)
2834 ++{
2835 ++ struct zram_slot_free *free_rq;
2836 ++
2837 ++ spin_lock(&zram->slot_free_lock);
2838 ++ while (zram->slot_free_rq) {
2839 ++ free_rq = zram->slot_free_rq;
2840 ++ zram->slot_free_rq = free_rq->next;
2841 ++ zram_free_page(zram, free_rq->index);
2842 ++ kfree(free_rq);
2843 ++ }
2844 ++ spin_unlock(&zram->slot_free_lock);
2845 ++}
2846 ++
2847 + static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
2848 + int offset, struct bio *bio, int rw)
2849 + {
2850 +@@ -511,10 +533,12 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
2851 +
2852 + if (rw == READ) {
2853 + down_read(&zram->lock);
2854 ++ handle_pending_slot_free(zram);
2855 + ret = zram_bvec_read(zram, bvec, index, offset, bio);
2856 + up_read(&zram->lock);
2857 + } else {
2858 + down_write(&zram->lock);
2859 ++ handle_pending_slot_free(zram);
2860 + ret = zram_bvec_write(zram, bvec, index, offset);
2861 + up_write(&zram->lock);
2862 + }
2863 +@@ -522,11 +546,13 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
2864 + return ret;
2865 + }
2866 +
2867 +-static void zram_reset_device(struct zram *zram)
2868 ++static void zram_reset_device(struct zram *zram, bool reset_capacity)
2869 + {
2870 + size_t index;
2871 + struct zram_meta *meta;
2872 +
2873 ++ flush_work(&zram->free_work);
2874 ++
2875 + down_write(&zram->init_lock);
2876 + if (!zram->init_done) {
2877 + up_write(&zram->init_lock);
2878 +@@ -551,7 +577,8 @@ static void zram_reset_device(struct zram *zram)
2879 + memset(&zram->stats, 0, sizeof(zram->stats));
2880 +
2881 + zram->disksize = 0;
2882 +- set_capacity(zram->disk, 0);
2883 ++ if (reset_capacity)
2884 ++ set_capacity(zram->disk, 0);
2885 + up_write(&zram->init_lock);
2886 + }
2887 +
2888 +@@ -635,7 +662,7 @@ static ssize_t reset_store(struct device *dev,
2889 + if (bdev)
2890 + fsync_bdev(bdev);
2891 +
2892 +- zram_reset_device(zram);
2893 ++ zram_reset_device(zram, true);
2894 + return len;
2895 + }
2896 +
2897 +@@ -720,16 +747,40 @@ error:
2898 + bio_io_error(bio);
2899 + }
2900 +
2901 ++static void zram_slot_free(struct work_struct *work)
2902 ++{
2903 ++ struct zram *zram;
2904 ++
2905 ++ zram = container_of(work, struct zram, free_work);
2906 ++ down_write(&zram->lock);
2907 ++ handle_pending_slot_free(zram);
2908 ++ up_write(&zram->lock);
2909 ++}
2910 ++
2911 ++static void add_slot_free(struct zram *zram, struct zram_slot_free *free_rq)
2912 ++{
2913 ++ spin_lock(&zram->slot_free_lock);
2914 ++ free_rq->next = zram->slot_free_rq;
2915 ++ zram->slot_free_rq = free_rq;
2916 ++ spin_unlock(&zram->slot_free_lock);
2917 ++}
2918 ++
2919 + static void zram_slot_free_notify(struct block_device *bdev,
2920 + unsigned long index)
2921 + {
2922 + struct zram *zram;
2923 ++ struct zram_slot_free *free_rq;
2924 +
2925 + zram = bdev->bd_disk->private_data;
2926 +- down_write(&zram->lock);
2927 +- zram_free_page(zram, index);
2928 +- up_write(&zram->lock);
2929 + atomic64_inc(&zram->stats.notify_free);
2930 ++
2931 ++ free_rq = kmalloc(sizeof(struct zram_slot_free), GFP_ATOMIC);
2932 ++ if (!free_rq)
2933 ++ return;
2934 ++
2935 ++ free_rq->index = index;
2936 ++ add_slot_free(zram, free_rq);
2937 ++ schedule_work(&zram->free_work);
2938 + }
2939 +
2940 + static const struct block_device_operations zram_devops = {
2941 +@@ -776,6 +827,10 @@ static int create_device(struct zram *zram, int device_id)
2942 + init_rwsem(&zram->lock);
2943 + init_rwsem(&zram->init_lock);
2944 +
2945 ++ INIT_WORK(&zram->free_work, zram_slot_free);
2946 ++ spin_lock_init(&zram->slot_free_lock);
2947 ++ zram->slot_free_rq = NULL;
2948 ++
2949 + zram->queue = blk_alloc_queue(GFP_KERNEL);
2950 + if (!zram->queue) {
2951 + pr_err("Error allocating disk queue for device %d\n",
2952 +@@ -902,10 +957,12 @@ static void __exit zram_exit(void)
2953 + for (i = 0; i < num_devices; i++) {
2954 + zram = &zram_devices[i];
2955 +
2956 +- get_disk(zram->disk);
2957 + destroy_device(zram);
2958 +- zram_reset_device(zram);
2959 +- put_disk(zram->disk);
2960 ++ /*
2961 ++ * Shouldn't access zram->disk after destroy_device
2962 ++ * because destroy_device already released zram->disk.
2963 ++ */
2964 ++ zram_reset_device(zram, false);
2965 + }
2966 +
2967 + unregister_blkdev(zram_major, "zram");
2968 +diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h
2969 +index 9e57bfb..97a3acf 100644
2970 +--- a/drivers/staging/zram/zram_drv.h
2971 ++++ b/drivers/staging/zram/zram_drv.h
2972 +@@ -94,11 +94,20 @@ struct zram_meta {
2973 + struct zs_pool *mem_pool;
2974 + };
2975 +
2976 ++struct zram_slot_free {
2977 ++ unsigned long index;
2978 ++ struct zram_slot_free *next;
2979 ++};
2980 ++
2981 + struct zram {
2982 + struct zram_meta *meta;
2983 + struct rw_semaphore lock; /* protect compression buffers, table,
2984 + * 32bit stat counters against concurrent
2985 + * notifications, reads and writes */
2986 ++
2987 ++ struct work_struct free_work; /* handle pending free request */
2988 ++ struct zram_slot_free *slot_free_rq; /* list head of free request */
2989 ++
2990 + struct request_queue *queue;
2991 + struct gendisk *disk;
2992 + int init_done;
2993 +@@ -109,6 +118,7 @@ struct zram {
2994 + * we can store in a disk.
2995 + */
2996 + u64 disksize; /* bytes */
2997 ++ spinlock_t slot_free_lock;
2998 +
2999 + struct zram_stats stats;
3000 + };
3001 +diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
3002 +index cbe48ab..f608fbc 100644
3003 +--- a/drivers/target/target_core_alua.c
3004 ++++ b/drivers/target/target_core_alua.c
3005 +@@ -730,7 +730,7 @@ static int core_alua_write_tpg_metadata(
3006 + if (ret < 0)
3007 + pr_err("Error writing ALUA metadata file: %s\n", path);
3008 + fput(file);
3009 +- return ret ? -EIO : 0;
3010 ++ return (ret < 0) ? -EIO : 0;
3011 + }
3012 +
3013 + /*
3014 +diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
3015 +index bd78faf..adec5a8 100644
3016 +--- a/drivers/target/target_core_pr.c
3017 ++++ b/drivers/target/target_core_pr.c
3018 +@@ -1949,7 +1949,7 @@ static int __core_scsi3_write_aptpl_to_file(
3019 + pr_debug("Error writing APTPL metadata file: %s\n", path);
3020 + fput(file);
3021 +
3022 +- return ret ? -EIO : 0;
3023 ++ return (ret < 0) ? -EIO : 0;
3024 + }
3025 +
3026 + /*
3027 +diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
3028 +index 366af83..20689b9 100644
3029 +--- a/drivers/tty/tty_io.c
3030 ++++ b/drivers/tty/tty_io.c
3031 +@@ -850,7 +850,8 @@ void disassociate_ctty(int on_exit)
3032 + struct pid *tty_pgrp = tty_get_pgrp(tty);
3033 + if (tty_pgrp) {
3034 + kill_pgrp(tty_pgrp, SIGHUP, on_exit);
3035 +- kill_pgrp(tty_pgrp, SIGCONT, on_exit);
3036 ++ if (!on_exit)
3037 ++ kill_pgrp(tty_pgrp, SIGCONT, on_exit);
3038 + put_pid(tty_pgrp);
3039 + }
3040 + }
3041 +diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
3042 +index 8a230f0..d3318a0 100644
3043 +--- a/drivers/usb/class/cdc-wdm.c
3044 ++++ b/drivers/usb/class/cdc-wdm.c
3045 +@@ -209,6 +209,7 @@ skip_error:
3046 + static void wdm_int_callback(struct urb *urb)
3047 + {
3048 + int rv = 0;
3049 ++ int responding;
3050 + int status = urb->status;
3051 + struct wdm_device *desc;
3052 + struct usb_cdc_notification *dr;
3053 +@@ -262,8 +263,8 @@ static void wdm_int_callback(struct urb *urb)
3054 +
3055 + spin_lock(&desc->iuspin);
3056 + clear_bit(WDM_READ, &desc->flags);
3057 +- set_bit(WDM_RESPONDING, &desc->flags);
3058 +- if (!test_bit(WDM_DISCONNECTING, &desc->flags)
3059 ++ responding = test_and_set_bit(WDM_RESPONDING, &desc->flags);
3060 ++ if (!responding && !test_bit(WDM_DISCONNECTING, &desc->flags)
3061 + && !test_bit(WDM_SUSPENDING, &desc->flags)) {
3062 + rv = usb_submit_urb(desc->response, GFP_ATOMIC);
3063 + dev_dbg(&desc->intf->dev, "%s: usb_submit_urb %d",
3064 +@@ -685,16 +686,20 @@ static void wdm_rxwork(struct work_struct *work)
3065 + {
3066 + struct wdm_device *desc = container_of(work, struct wdm_device, rxwork);
3067 + unsigned long flags;
3068 +- int rv;
3069 ++ int rv = 0;
3070 ++ int responding;
3071 +
3072 + spin_lock_irqsave(&desc->iuspin, flags);
3073 + if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
3074 + spin_unlock_irqrestore(&desc->iuspin, flags);
3075 + } else {
3076 ++ responding = test_and_set_bit(WDM_RESPONDING, &desc->flags);
3077 + spin_unlock_irqrestore(&desc->iuspin, flags);
3078 +- rv = usb_submit_urb(desc->response, GFP_KERNEL);
3079 ++ if (!responding)
3080 ++ rv = usb_submit_urb(desc->response, GFP_KERNEL);
3081 + if (rv < 0 && rv != -EPERM) {
3082 + spin_lock_irqsave(&desc->iuspin, flags);
3083 ++ clear_bit(WDM_RESPONDING, &desc->flags);
3084 + if (!test_bit(WDM_DISCONNECTING, &desc->flags))
3085 + schedule_work(&desc->rxwork);
3086 + spin_unlock_irqrestore(&desc->iuspin, flags);
3087 +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
3088 +index 7199adc..a6b2cab 100644
3089 +--- a/drivers/usb/core/config.c
3090 ++++ b/drivers/usb/core/config.c
3091 +@@ -424,7 +424,8 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
3092 +
3093 + memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE);
3094 + if (config->desc.bDescriptorType != USB_DT_CONFIG ||
3095 +- config->desc.bLength < USB_DT_CONFIG_SIZE) {
3096 ++ config->desc.bLength < USB_DT_CONFIG_SIZE ||
3097 ++ config->desc.bLength > size) {
3098 + dev_err(ddev, "invalid descriptor for config index %d: "
3099 + "type = 0x%X, length = %d\n", cfgidx,
3100 + config->desc.bDescriptorType, config->desc.bLength);
3101 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
3102 +index 558313d..17c3785 100644
3103 +--- a/drivers/usb/core/hub.c
3104 ++++ b/drivers/usb/core/hub.c
3105 +@@ -2918,7 +2918,6 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
3106 + {
3107 + struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
3108 + struct usb_port *port_dev = hub->ports[udev->portnum - 1];
3109 +- enum pm_qos_flags_status pm_qos_stat;
3110 + int port1 = udev->portnum;
3111 + int status;
3112 + bool really_suspend = true;
3113 +@@ -2956,7 +2955,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
3114 + status);
3115 + /* bail if autosuspend is requested */
3116 + if (PMSG_IS_AUTO(msg))
3117 +- return status;
3118 ++ goto err_wakeup;
3119 + }
3120 + }
3121 +
3122 +@@ -2965,14 +2964,16 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
3123 + usb_set_usb2_hardware_lpm(udev, 0);
3124 +
3125 + if (usb_disable_ltm(udev)) {
3126 +- dev_err(&udev->dev, "%s Failed to disable LTM before suspend\n.",
3127 +- __func__);
3128 +- return -ENOMEM;
3129 ++ dev_err(&udev->dev, "Failed to disable LTM before suspend\n.");
3130 ++ status = -ENOMEM;
3131 ++ if (PMSG_IS_AUTO(msg))
3132 ++ goto err_ltm;
3133 + }
3134 + if (usb_unlocked_disable_lpm(udev)) {
3135 +- dev_err(&udev->dev, "%s Failed to disable LPM before suspend\n.",
3136 +- __func__);
3137 +- return -ENOMEM;
3138 ++ dev_err(&udev->dev, "Failed to disable LPM before suspend\n.");
3139 ++ status = -ENOMEM;
3140 ++ if (PMSG_IS_AUTO(msg))
3141 ++ goto err_lpm3;
3142 + }
3143 +
3144 + /* see 7.1.7.6 */
3145 +@@ -3000,28 +3001,31 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
3146 + if (status) {
3147 + dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n",
3148 + port1, status);
3149 +- /* paranoia: "should not happen" */
3150 +- if (udev->do_remote_wakeup) {
3151 +- if (!hub_is_superspeed(hub->hdev)) {
3152 +- (void) usb_control_msg(udev,
3153 +- usb_sndctrlpipe(udev, 0),
3154 +- USB_REQ_CLEAR_FEATURE,
3155 +- USB_RECIP_DEVICE,
3156 +- USB_DEVICE_REMOTE_WAKEUP, 0,
3157 +- NULL, 0,
3158 +- USB_CTRL_SET_TIMEOUT);
3159 +- } else
3160 +- (void) usb_disable_function_remotewakeup(udev);
3161 +-
3162 +- }
3163 +
3164 ++ /* Try to enable USB3 LPM and LTM again */
3165 ++ usb_unlocked_enable_lpm(udev);
3166 ++ err_lpm3:
3167 ++ usb_enable_ltm(udev);
3168 ++ err_ltm:
3169 + /* Try to enable USB2 hardware LPM again */
3170 + if (udev->usb2_hw_lpm_capable == 1)
3171 + usb_set_usb2_hardware_lpm(udev, 1);
3172 +
3173 +- /* Try to enable USB3 LTM and LPM again */
3174 +- usb_enable_ltm(udev);
3175 +- usb_unlocked_enable_lpm(udev);
3176 ++ if (udev->do_remote_wakeup) {
3177 ++ if (udev->speed < USB_SPEED_SUPER)
3178 ++ usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
3179 ++ USB_REQ_CLEAR_FEATURE,
3180 ++ USB_RECIP_DEVICE,
3181 ++ USB_DEVICE_REMOTE_WAKEUP, 0,
3182 ++ NULL, 0, USB_CTRL_SET_TIMEOUT);
3183 ++ else
3184 ++ usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
3185 ++ USB_REQ_CLEAR_FEATURE,
3186 ++ USB_RECIP_INTERFACE,
3187 ++ USB_INTRF_FUNC_SUSPEND, 0,
3188 ++ NULL, 0, USB_CTRL_SET_TIMEOUT);
3189 ++ }
3190 ++ err_wakeup:
3191 +
3192 + /* System sleep transitions should never fail */
3193 + if (!PMSG_IS_AUTO(msg))
3194 +@@ -3039,16 +3043,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
3195 + usb_set_device_state(udev, USB_STATE_SUSPENDED);
3196 + }
3197 +
3198 +- /*
3199 +- * Check whether current status meets the requirement of
3200 +- * usb port power off mechanism
3201 +- */
3202 +- pm_qos_stat = dev_pm_qos_flags(&port_dev->dev,
3203 +- PM_QOS_FLAG_NO_POWER_OFF);
3204 +- if (!udev->do_remote_wakeup
3205 +- && pm_qos_stat != PM_QOS_FLAGS_ALL
3206 +- && udev->persist_enabled
3207 +- && !status) {
3208 ++ if (status == 0 && !udev->do_remote_wakeup && udev->persist_enabled) {
3209 + pm_runtime_put_sync(&port_dev->dev);
3210 + port_dev->did_runtime_put = true;
3211 + }
3212 +diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
3213 +index d6b0fad..9909911 100644
3214 +--- a/drivers/usb/core/port.c
3215 ++++ b/drivers/usb/core/port.c
3216 +@@ -89,22 +89,19 @@ static int usb_port_runtime_resume(struct device *dev)
3217 + retval = usb_hub_set_port_power(hdev, hub, port1, true);
3218 + if (port_dev->child && !retval) {
3219 + /*
3220 +- * Wait for usb hub port to be reconnected in order to make
3221 +- * the resume procedure successful.
3222 ++ * Attempt to wait for usb hub port to be reconnected in order
3223 ++ * to make the resume procedure successful. The device may have
3224 ++ * disconnected while the port was powered off, so ignore the
3225 ++ * return status.
3226 + */
3227 + retval = hub_port_debounce_be_connected(hub, port1);
3228 +- if (retval < 0) {
3229 ++ if (retval < 0)
3230 + dev_dbg(&port_dev->dev, "can't get reconnection after setting port power on, status %d\n",
3231 + retval);
3232 +- goto out;
3233 +- }
3234 + usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_ENABLE);
3235 +-
3236 +- /* Set return value to 0 if debounce successful */
3237 + retval = 0;
3238 + }
3239 +
3240 +-out:
3241 + clear_bit(port1, hub->busy_bits);
3242 + usb_autopm_put_interface(intf);
3243 + return retval;
3244 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
3245 +index f77083f..14d28d6 100644
3246 +--- a/drivers/usb/dwc3/gadget.c
3247 ++++ b/drivers/usb/dwc3/gadget.c
3248 +@@ -1508,6 +1508,15 @@ static int dwc3_gadget_start(struct usb_gadget *g,
3249 + int irq;
3250 + u32 reg;
3251 +
3252 ++ irq = platform_get_irq(to_platform_device(dwc->dev), 0);
3253 ++ ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
3254 ++ IRQF_SHARED | IRQF_ONESHOT, "dwc3", dwc);
3255 ++ if (ret) {
3256 ++ dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
3257 ++ irq, ret);
3258 ++ goto err0;
3259 ++ }
3260 ++
3261 + spin_lock_irqsave(&dwc->lock, flags);
3262 +
3263 + if (dwc->gadget_driver) {
3264 +@@ -1515,7 +1524,7 @@ static int dwc3_gadget_start(struct usb_gadget *g,
3265 + dwc->gadget.name,
3266 + dwc->gadget_driver->driver.name);
3267 + ret = -EBUSY;
3268 +- goto err0;
3269 ++ goto err1;
3270 + }
3271 +
3272 + dwc->gadget_driver = driver;
3273 +@@ -1551,42 +1560,38 @@ static int dwc3_gadget_start(struct usb_gadget *g,
3274 + ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
3275 + if (ret) {
3276 + dev_err(dwc->dev, "failed to enable %s\n", dep->name);
3277 +- goto err0;
3278 ++ goto err2;
3279 + }
3280 +
3281 + dep = dwc->eps[1];
3282 + ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
3283 + if (ret) {
3284 + dev_err(dwc->dev, "failed to enable %s\n", dep->name);
3285 +- goto err1;
3286 ++ goto err3;
3287 + }
3288 +
3289 + /* begin to receive SETUP packets */
3290 + dwc->ep0state = EP0_SETUP_PHASE;
3291 + dwc3_ep0_out_start(dwc);
3292 +
3293 +- irq = platform_get_irq(to_platform_device(dwc->dev), 0);
3294 +- ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
3295 +- IRQF_SHARED | IRQF_ONESHOT, "dwc3", dwc);
3296 +- if (ret) {
3297 +- dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
3298 +- irq, ret);
3299 +- goto err1;
3300 +- }
3301 +-
3302 + dwc3_gadget_enable_irq(dwc);
3303 +
3304 + spin_unlock_irqrestore(&dwc->lock, flags);
3305 +
3306 + return 0;
3307 +
3308 +-err1:
3309 ++err3:
3310 + __dwc3_gadget_ep_disable(dwc->eps[0]);
3311 +
3312 +-err0:
3313 ++err2:
3314 + dwc->gadget_driver = NULL;
3315 ++
3316 ++err1:
3317 + spin_unlock_irqrestore(&dwc->lock, flags);
3318 +
3319 ++ free_irq(irq, dwc);
3320 ++
3321 ++err0:
3322 + return ret;
3323 + }
3324 +
3325 +@@ -1600,9 +1605,6 @@ static int dwc3_gadget_stop(struct usb_gadget *g,
3326 + spin_lock_irqsave(&dwc->lock, flags);
3327 +
3328 + dwc3_gadget_disable_irq(dwc);
3329 +- irq = platform_get_irq(to_platform_device(dwc->dev), 0);
3330 +- free_irq(irq, dwc);
3331 +-
3332 + __dwc3_gadget_ep_disable(dwc->eps[0]);
3333 + __dwc3_gadget_ep_disable(dwc->eps[1]);
3334 +
3335 +@@ -1610,6 +1612,9 @@ static int dwc3_gadget_stop(struct usb_gadget *g,
3336 +
3337 + spin_unlock_irqrestore(&dwc->lock, flags);
3338 +
3339 ++ irq = platform_get_irq(to_platform_device(dwc->dev), 0);
3340 ++ free_irq(irq, dwc);
3341 ++
3342 + return 0;
3343 + }
3344 +
3345 +diff --git a/drivers/usb/gadget/uvc_queue.c b/drivers/usb/gadget/uvc_queue.c
3346 +index e617047..0bb5d50 100644
3347 +--- a/drivers/usb/gadget/uvc_queue.c
3348 ++++ b/drivers/usb/gadget/uvc_queue.c
3349 +@@ -193,12 +193,16 @@ static int uvc_queue_buffer(struct uvc_video_queue *queue,
3350 +
3351 + mutex_lock(&queue->mutex);
3352 + ret = vb2_qbuf(&queue->queue, buf);
3353 ++ if (ret < 0)
3354 ++ goto done;
3355 ++
3356 + spin_lock_irqsave(&queue->irqlock, flags);
3357 + ret = (queue->flags & UVC_QUEUE_PAUSED) != 0;
3358 + queue->flags &= ~UVC_QUEUE_PAUSED;
3359 + spin_unlock_irqrestore(&queue->irqlock, flags);
3360 +- mutex_unlock(&queue->mutex);
3361 +
3362 ++done:
3363 ++ mutex_unlock(&queue->mutex);
3364 + return ret;
3365 + }
3366 +
3367 +diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
3368 +index e4c34ac..4c166e1 100644
3369 +--- a/drivers/usb/host/ehci-mxc.c
3370 ++++ b/drivers/usb/host/ehci-mxc.c
3371 +@@ -184,7 +184,7 @@ static int ehci_mxc_drv_remove(struct platform_device *pdev)
3372 + if (pdata && pdata->exit)
3373 + pdata->exit(pdev);
3374 +
3375 +- if (pdata->otg)
3376 ++ if (pdata && pdata->otg)
3377 + usb_phy_shutdown(pdata->otg);
3378 +
3379 + clk_disable_unprepare(priv->usbclk);
3380 +diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
3381 +index 279b049..ec337c2 100644
3382 +--- a/drivers/usb/host/ohci-pci.c
3383 ++++ b/drivers/usb/host/ohci-pci.c
3384 +@@ -289,7 +289,7 @@ static struct pci_driver ohci_pci_driver = {
3385 + .remove = usb_hcd_pci_remove,
3386 + .shutdown = usb_hcd_pci_shutdown,
3387 +
3388 +-#ifdef CONFIG_PM_SLEEP
3389 ++#ifdef CONFIG_PM
3390 + .driver = {
3391 + .pm = &usb_hcd_pci_pm_ops
3392 + },
3393 +diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
3394 +index 8d7a132..9fe3225 100644
3395 +--- a/drivers/usb/host/xhci-ext-caps.h
3396 ++++ b/drivers/usb/host/xhci-ext-caps.h
3397 +@@ -71,7 +71,7 @@
3398 +
3399 + /* USB 2.0 xHCI 1.0 hardware LMP capability - section 7.2.2.1.3.2 */
3400 + #define XHCI_HLC (1 << 19)
3401 +-#define XHCI_BLC (1 << 19)
3402 ++#define XHCI_BLC (1 << 20)
3403 +
3404 + /* command register values to disable interrupts and halt the HC */
3405 + /* start/stop HC execution - do not write unless HC is halted*/
3406 +diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
3407 +index 51e22bf..6eca5a5 100644
3408 +--- a/drivers/usb/host/xhci-plat.c
3409 ++++ b/drivers/usb/host/xhci-plat.c
3410 +@@ -24,7 +24,7 @@ static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
3411 + * here that the generic code does not try to make a pci_dev from our
3412 + * dev struct in order to setup MSI
3413 + */
3414 +- xhci->quirks |= XHCI_BROKEN_MSI;
3415 ++ xhci->quirks |= XHCI_PLAT;
3416 + }
3417 +
3418 + /* called during probe() after chip reset completes */
3419 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
3420 +index 9478caa..b3c4162 100644
3421 +--- a/drivers/usb/host/xhci.c
3422 ++++ b/drivers/usb/host/xhci.c
3423 +@@ -343,9 +343,14 @@ static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
3424 + static int xhci_try_enable_msi(struct usb_hcd *hcd)
3425 + {
3426 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3427 +- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
3428 ++ struct pci_dev *pdev;
3429 + int ret;
3430 +
3431 ++ /* The xhci platform device has set up IRQs through usb_add_hcd. */
3432 ++ if (xhci->quirks & XHCI_PLAT)
3433 ++ return 0;
3434 ++
3435 ++ pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
3436 + /*
3437 + * Some Fresco Logic host controllers advertise MSI, but fail to
3438 + * generate interrupts. Don't even try to enable MSI.
3439 +@@ -3581,10 +3586,21 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3440 + {
3441 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3442 + struct xhci_virt_device *virt_dev;
3443 ++ struct device *dev = hcd->self.controller;
3444 + unsigned long flags;
3445 + u32 state;
3446 + int i, ret;
3447 +
3448 ++#ifndef CONFIG_USB_DEFAULT_PERSIST
3449 ++ /*
3450 ++ * We called pm_runtime_get_noresume when the device was attached.
3451 ++ * Decrement the counter here to allow controller to runtime suspend
3452 ++ * if no devices remain.
3453 ++ */
3454 ++ if (xhci->quirks & XHCI_RESET_ON_RESUME)
3455 ++ pm_runtime_put_noidle(dev);
3456 ++#endif
3457 ++
3458 + ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3459 + /* If the host is halted due to driver unload, we still need to free the
3460 + * device.
3461 +@@ -3656,6 +3672,7 @@ static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3462 + int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3463 + {
3464 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3465 ++ struct device *dev = hcd->self.controller;
3466 + unsigned long flags;
3467 + int timeleft;
3468 + int ret;
3469 +@@ -3708,6 +3725,16 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3470 + goto disable_slot;
3471 + }
3472 + udev->slot_id = xhci->slot_id;
3473 ++
3474 ++#ifndef CONFIG_USB_DEFAULT_PERSIST
3475 ++ /*
3476 ++ * If resetting upon resume, we can't put the controller into runtime
3477 ++ * suspend if there is a device attached.
3478 ++ */
3479 ++ if (xhci->quirks & XHCI_RESET_ON_RESUME)
3480 ++ pm_runtime_get_noresume(dev);
3481 ++#endif
3482 ++
3483 + /* Is this a LS or FS device under a HS hub? */
3484 + /* Hub or peripherial? */
3485 + return 1;
3486 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
3487 +index c338741..6ab1e60 100644
3488 +--- a/drivers/usb/host/xhci.h
3489 ++++ b/drivers/usb/host/xhci.h
3490 +@@ -1542,6 +1542,7 @@ struct xhci_hcd {
3491 + #define XHCI_SPURIOUS_REBOOT (1 << 13)
3492 + #define XHCI_COMP_MODE_QUIRK (1 << 14)
3493 + #define XHCI_AVOID_BEI (1 << 15)
3494 ++#define XHCI_PLAT (1 << 16)
3495 + unsigned int num_active_eps;
3496 + unsigned int limit_active_eps;
3497 + /* There are two roothubs to keep track of bus suspend info for */
3498 +diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
3499 +index b013001..84657e0 100644
3500 +--- a/drivers/usb/serial/mos7720.c
3501 ++++ b/drivers/usb/serial/mos7720.c
3502 +@@ -374,7 +374,7 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
3503 + kfree(urbtrack);
3504 + return -ENOMEM;
3505 + }
3506 +- urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_KERNEL);
3507 ++ urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_ATOMIC);
3508 + if (!urbtrack->setup) {
3509 + usb_free_urb(urbtrack->urb);
3510 + kfree(urbtrack);
3511 +@@ -382,8 +382,8 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
3512 + }
3513 + urbtrack->setup->bRequestType = (__u8)0x40;
3514 + urbtrack->setup->bRequest = (__u8)0x0e;
3515 +- urbtrack->setup->wValue = get_reg_value(reg, dummy);
3516 +- urbtrack->setup->wIndex = get_reg_index(reg);
3517 ++ urbtrack->setup->wValue = cpu_to_le16(get_reg_value(reg, dummy));
3518 ++ urbtrack->setup->wIndex = cpu_to_le16(get_reg_index(reg));
3519 + urbtrack->setup->wLength = 0;
3520 + usb_fill_control_urb(urbtrack->urb, usbdev,
3521 + usb_sndctrlpipe(usbdev, 0),
3522 +diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
3523 +index 04cdeb8..c4d2298 100644
3524 +--- a/drivers/xen/grant-table.c
3525 ++++ b/drivers/xen/grant-table.c
3526 +@@ -730,9 +730,18 @@ void gnttab_request_free_callback(struct gnttab_free_callback *callback,
3527 + void (*fn)(void *), void *arg, u16 count)
3528 + {
3529 + unsigned long flags;
3530 ++ struct gnttab_free_callback *cb;
3531 ++
3532 + spin_lock_irqsave(&gnttab_list_lock, flags);
3533 +- if (callback->next)
3534 +- goto out;
3535 ++
3536 ++ /* Check if the callback is already on the list */
3537 ++ cb = gnttab_free_callback_list;
3538 ++ while (cb) {
3539 ++ if (cb == callback)
3540 ++ goto out;
3541 ++ cb = cb->next;
3542 ++ }
3543 ++
3544 + callback->fn = fn;
3545 + callback->arg = arg;
3546 + callback->count = count;
3547 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
3548 +index 238a055..9877a2a 100644
3549 +--- a/fs/btrfs/ioctl.c
3550 ++++ b/fs/btrfs/ioctl.c
3551 +@@ -3312,6 +3312,9 @@ static long btrfs_ioctl_dev_replace(struct btrfs_root *root, void __user *arg)
3552 +
3553 + switch (p->cmd) {
3554 + case BTRFS_IOCTL_DEV_REPLACE_CMD_START:
3555 ++ if (root->fs_info->sb->s_flags & MS_RDONLY)
3556 ++ return -EROFS;
3557 ++
3558 + if (atomic_xchg(
3559 + &root->fs_info->mutually_exclusive_operation_running,
3560 + 1)) {
3561 +diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
3562 +index e0b4ef3..a5ce62e 100644
3563 +--- a/fs/ceph/ioctl.c
3564 ++++ b/fs/ceph/ioctl.c
3565 +@@ -196,8 +196,10 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
3566 + r = ceph_calc_file_object_mapping(&ci->i_layout, dl.file_offset, len,
3567 + &dl.object_no, &dl.object_offset,
3568 + &olen);
3569 +- if (r < 0)
3570 ++ if (r < 0) {
3571 ++ up_read(&osdc->map_sem);
3572 + return -EIO;
3573 ++ }
3574 + dl.file_offset -= dl.object_offset;
3575 + dl.object_size = ceph_file_layout_object_size(ci->i_layout);
3576 + dl.block_size = ceph_file_layout_su(ci->i_layout);
3577 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
3578 +index d67c550..37950c6 100644
3579 +--- a/fs/cifs/connect.c
3580 ++++ b/fs/cifs/connect.c
3581 +@@ -379,6 +379,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
3582 + try_to_freeze();
3583 +
3584 + /* we should try only the port we connected to before */
3585 ++ mutex_lock(&server->srv_mutex);
3586 + rc = generic_ip_connect(server);
3587 + if (rc) {
3588 + cifs_dbg(FYI, "reconnect error %d\n", rc);
3589 +@@ -390,6 +391,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
3590 + server->tcpStatus = CifsNeedNegotiate;
3591 + spin_unlock(&GlobalMid_Lock);
3592 + }
3593 ++ mutex_unlock(&server->srv_mutex);
3594 + } while (server->tcpStatus == CifsNeedReconnect);
3595 +
3596 + return rc;
3597 +diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
3598 +index b0c4334..f851d03 100644
3599 +--- a/fs/cifs/smb2misc.c
3600 ++++ b/fs/cifs/smb2misc.c
3601 +@@ -417,96 +417,108 @@ cifs_ses_oplock_break(struct work_struct *work)
3602 + }
3603 +
3604 + static bool
3605 +-smb2_is_valid_lease_break(char *buffer, struct TCP_Server_Info *server)
3606 ++smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
3607 ++ struct smb2_lease_break_work *lw)
3608 + {
3609 +- struct smb2_lease_break *rsp = (struct smb2_lease_break *)buffer;
3610 +- struct list_head *tmp, *tmp1, *tmp2;
3611 +- struct cifs_ses *ses;
3612 +- struct cifs_tcon *tcon;
3613 +- struct cifsInodeInfo *cinode;
3614 ++ bool found;
3615 ++ __u8 lease_state;
3616 ++ struct list_head *tmp;
3617 + struct cifsFileInfo *cfile;
3618 + struct cifs_pending_open *open;
3619 +- struct smb2_lease_break_work *lw;
3620 +- bool found;
3621 ++ struct cifsInodeInfo *cinode;
3622 + int ack_req = le32_to_cpu(rsp->Flags &
3623 + SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED);
3624 +
3625 +- lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL);
3626 +- if (!lw)
3627 +- return false;
3628 ++ lease_state = smb2_map_lease_to_oplock(rsp->NewLeaseState);
3629 +
3630 +- INIT_WORK(&lw->lease_break, cifs_ses_oplock_break);
3631 +- lw->lease_state = rsp->NewLeaseState;
3632 ++ list_for_each(tmp, &tcon->openFileList) {
3633 ++ cfile = list_entry(tmp, struct cifsFileInfo, tlist);
3634 ++ cinode = CIFS_I(cfile->dentry->d_inode);
3635 +
3636 +- cifs_dbg(FYI, "Checking for lease break\n");
3637 ++ if (memcmp(cinode->lease_key, rsp->LeaseKey,
3638 ++ SMB2_LEASE_KEY_SIZE))
3639 ++ continue;
3640 +
3641 +- /* look up tcon based on tid & uid */
3642 +- spin_lock(&cifs_tcp_ses_lock);
3643 +- list_for_each(tmp, &server->smb_ses_list) {
3644 +- ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
3645 ++ cifs_dbg(FYI, "found in the open list\n");
3646 ++ cifs_dbg(FYI, "lease key match, lease break 0x%d\n",
3647 ++ le32_to_cpu(rsp->NewLeaseState));
3648 +
3649 +- spin_lock(&cifs_file_list_lock);
3650 +- list_for_each(tmp1, &ses->tcon_list) {
3651 +- tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
3652 ++ smb2_set_oplock_level(cinode, lease_state);
3653 +
3654 +- cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
3655 +- list_for_each(tmp2, &tcon->openFileList) {
3656 +- cfile = list_entry(tmp2, struct cifsFileInfo,
3657 +- tlist);
3658 +- cinode = CIFS_I(cfile->dentry->d_inode);
3659 ++ if (ack_req)
3660 ++ cfile->oplock_break_cancelled = false;
3661 ++ else
3662 ++ cfile->oplock_break_cancelled = true;
3663 +
3664 +- if (memcmp(cinode->lease_key, rsp->LeaseKey,
3665 +- SMB2_LEASE_KEY_SIZE))
3666 +- continue;
3667 ++ queue_work(cifsiod_wq, &cfile->oplock_break);
3668 ++ kfree(lw);
3669 ++ return true;
3670 ++ }
3671 +
3672 +- cifs_dbg(FYI, "found in the open list\n");
3673 +- cifs_dbg(FYI, "lease key match, lease break 0x%d\n",
3674 +- le32_to_cpu(rsp->NewLeaseState));
3675 ++ found = false;
3676 ++ list_for_each_entry(open, &tcon->pending_opens, olist) {
3677 ++ if (memcmp(open->lease_key, rsp->LeaseKey,
3678 ++ SMB2_LEASE_KEY_SIZE))
3679 ++ continue;
3680 ++
3681 ++ if (!found && ack_req) {
3682 ++ found = true;
3683 ++ memcpy(lw->lease_key, open->lease_key,
3684 ++ SMB2_LEASE_KEY_SIZE);
3685 ++ lw->tlink = cifs_get_tlink(open->tlink);
3686 ++ queue_work(cifsiod_wq, &lw->lease_break);
3687 ++ }
3688 +
3689 +- smb2_set_oplock_level(cinode,
3690 +- smb2_map_lease_to_oplock(rsp->NewLeaseState));
3691 ++ cifs_dbg(FYI, "found in the pending open list\n");
3692 ++ cifs_dbg(FYI, "lease key match, lease break 0x%d\n",
3693 ++ le32_to_cpu(rsp->NewLeaseState));
3694 +
3695 +- if (ack_req)
3696 +- cfile->oplock_break_cancelled = false;
3697 +- else
3698 +- cfile->oplock_break_cancelled = true;
3699 ++ open->oplock = lease_state;
3700 ++ }
3701 ++ return found;
3702 ++}
3703 +
3704 +- queue_work(cifsiod_wq, &cfile->oplock_break);
3705 ++static bool
3706 ++smb2_is_valid_lease_break(char *buffer)
3707 ++{
3708 ++ struct smb2_lease_break *rsp = (struct smb2_lease_break *)buffer;
3709 ++ struct list_head *tmp, *tmp1, *tmp2;
3710 ++ struct TCP_Server_Info *server;
3711 ++ struct cifs_ses *ses;
3712 ++ struct cifs_tcon *tcon;
3713 ++ struct smb2_lease_break_work *lw;
3714 +
3715 +- spin_unlock(&cifs_file_list_lock);
3716 +- spin_unlock(&cifs_tcp_ses_lock);
3717 +- return true;
3718 +- }
3719 ++ lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL);
3720 ++ if (!lw)
3721 ++ return false;
3722 +
3723 +- found = false;
3724 +- list_for_each_entry(open, &tcon->pending_opens, olist) {
3725 +- if (memcmp(open->lease_key, rsp->LeaseKey,
3726 +- SMB2_LEASE_KEY_SIZE))
3727 +- continue;
3728 ++ INIT_WORK(&lw->lease_break, cifs_ses_oplock_break);
3729 ++ lw->lease_state = rsp->NewLeaseState;
3730 +
3731 +- if (!found && ack_req) {
3732 +- found = true;
3733 +- memcpy(lw->lease_key, open->lease_key,
3734 +- SMB2_LEASE_KEY_SIZE);
3735 +- lw->tlink = cifs_get_tlink(open->tlink);
3736 +- queue_work(cifsiod_wq,
3737 +- &lw->lease_break);
3738 +- }
3739 ++ cifs_dbg(FYI, "Checking for lease break\n");
3740 ++
3741 ++ /* look up tcon based on tid & uid */
3742 ++ spin_lock(&cifs_tcp_ses_lock);
3743 ++ list_for_each(tmp, &cifs_tcp_ses_list) {
3744 ++ server = list_entry(tmp, struct TCP_Server_Info, tcp_ses_list);
3745 +
3746 +- cifs_dbg(FYI, "found in the pending open list\n");
3747 +- cifs_dbg(FYI, "lease key match, lease break 0x%d\n",
3748 +- le32_to_cpu(rsp->NewLeaseState));
3749 ++ list_for_each(tmp1, &server->smb_ses_list) {
3750 ++ ses = list_entry(tmp1, struct cifs_ses, smb_ses_list);
3751 +
3752 +- open->oplock =
3753 +- smb2_map_lease_to_oplock(rsp->NewLeaseState);
3754 +- }
3755 +- if (found) {
3756 +- spin_unlock(&cifs_file_list_lock);
3757 +- spin_unlock(&cifs_tcp_ses_lock);
3758 +- return true;
3759 ++ spin_lock(&cifs_file_list_lock);
3760 ++ list_for_each(tmp2, &ses->tcon_list) {
3761 ++ tcon = list_entry(tmp2, struct cifs_tcon,
3762 ++ tcon_list);
3763 ++ cifs_stats_inc(
3764 ++ &tcon->stats.cifs_stats.num_oplock_brks);
3765 ++ if (smb2_tcon_has_lease(tcon, rsp, lw)) {
3766 ++ spin_unlock(&cifs_file_list_lock);
3767 ++ spin_unlock(&cifs_tcp_ses_lock);
3768 ++ return true;
3769 ++ }
3770 + }
3771 ++ spin_unlock(&cifs_file_list_lock);
3772 + }
3773 +- spin_unlock(&cifs_file_list_lock);
3774 + }
3775 + spin_unlock(&cifs_tcp_ses_lock);
3776 + kfree(lw);
3777 +@@ -532,7 +544,7 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
3778 + if (rsp->StructureSize !=
3779 + smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) {
3780 + if (le16_to_cpu(rsp->StructureSize) == 44)
3781 +- return smb2_is_valid_lease_break(buffer, server);
3782 ++ return smb2_is_valid_lease_break(buffer);
3783 + else
3784 + return false;
3785 + }
3786 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
3787 +index c2ca04e..ea4d188 100644
3788 +--- a/fs/ext4/inode.c
3789 ++++ b/fs/ext4/inode.c
3790 +@@ -1890,6 +1890,26 @@ static int ext4_writepage(struct page *page,
3791 + return ret;
3792 + }
3793 +
3794 ++static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
3795 ++{
3796 ++ int len;
3797 ++ loff_t size = i_size_read(mpd->inode);
3798 ++ int err;
3799 ++
3800 ++ BUG_ON(page->index != mpd->first_page);
3801 ++ if (page->index == size >> PAGE_CACHE_SHIFT)
3802 ++ len = size & ~PAGE_CACHE_MASK;
3803 ++ else
3804 ++ len = PAGE_CACHE_SIZE;
3805 ++ clear_page_dirty_for_io(page);
3806 ++ err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc);
3807 ++ if (!err)
3808 ++ mpd->wbc->nr_to_write--;
3809 ++ mpd->first_page++;
3810 ++
3811 ++ return err;
3812 ++}
3813 ++
3814 + #define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay))
3815 +
3816 + /*
3817 +@@ -1904,82 +1924,94 @@ static int ext4_writepage(struct page *page,
3818 + *
3819 + * @mpd - extent of blocks
3820 + * @lblk - logical number of the block in the file
3821 +- * @b_state - b_state of the buffer head added
3822 ++ * @bh - buffer head we want to add to the extent
3823 + *
3824 +- * the function is used to collect contig. blocks in same state
3825 ++ * The function is used to collect contig. blocks in the same state. If the
3826 ++ * buffer doesn't require mapping for writeback and we haven't started the
3827 ++ * extent of buffers to map yet, the function returns 'true' immediately - the
3828 ++ * caller can write the buffer right away. Otherwise the function returns true
3829 ++ * if the block has been added to the extent, false if the block couldn't be
3830 ++ * added.
3831 + */
3832 +-static int mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
3833 +- unsigned long b_state)
3834 ++static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
3835 ++ struct buffer_head *bh)
3836 + {
3837 + struct ext4_map_blocks *map = &mpd->map;
3838 +
3839 +- /* Don't go larger than mballoc is willing to allocate */
3840 +- if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
3841 +- return 0;
3842 ++ /* Buffer that doesn't need mapping for writeback? */
3843 ++ if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
3844 ++ (!buffer_delay(bh) && !buffer_unwritten(bh))) {
3845 ++ /* So far no extent to map => we write the buffer right away */
3846 ++ if (map->m_len == 0)
3847 ++ return true;
3848 ++ return false;
3849 ++ }
3850 +
3851 + /* First block in the extent? */
3852 + if (map->m_len == 0) {
3853 + map->m_lblk = lblk;
3854 + map->m_len = 1;
3855 +- map->m_flags = b_state & BH_FLAGS;
3856 +- return 1;
3857 ++ map->m_flags = bh->b_state & BH_FLAGS;
3858 ++ return true;
3859 + }
3860 +
3861 ++ /* Don't go larger than mballoc is willing to allocate */
3862 ++ if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
3863 ++ return false;
3864 ++
3865 + /* Can we merge the block to our big extent? */
3866 + if (lblk == map->m_lblk + map->m_len &&
3867 +- (b_state & BH_FLAGS) == map->m_flags) {
3868 ++ (bh->b_state & BH_FLAGS) == map->m_flags) {
3869 + map->m_len++;
3870 +- return 1;
3871 ++ return true;
3872 + }
3873 +- return 0;
3874 ++ return false;
3875 + }
3876 +
3877 +-static bool add_page_bufs_to_extent(struct mpage_da_data *mpd,
3878 +- struct buffer_head *head,
3879 +- struct buffer_head *bh,
3880 +- ext4_lblk_t lblk)
3881 ++/*
3882 ++ * mpage_process_page_bufs - submit page buffers for IO or add them to extent
3883 ++ *
3884 ++ * @mpd - extent of blocks for mapping
3885 ++ * @head - the first buffer in the page
3886 ++ * @bh - buffer we should start processing from
3887 ++ * @lblk - logical number of the block in the file corresponding to @bh
3888 ++ *
3889 ++ * Walk through page buffers from @bh upto @head (exclusive) and either submit
3890 ++ * the page for IO if all buffers in this page were mapped and there's no
3891 ++ * accumulated extent of buffers to map or add buffers in the page to the
3892 ++ * extent of buffers to map. The function returns 1 if the caller can continue
3893 ++ * by processing the next page, 0 if it should stop adding buffers to the
3894 ++ * extent to map because we cannot extend it anymore. It can also return value
3895 ++ * < 0 in case of error during IO submission.
3896 ++ */
3897 ++static int mpage_process_page_bufs(struct mpage_da_data *mpd,
3898 ++ struct buffer_head *head,
3899 ++ struct buffer_head *bh,
3900 ++ ext4_lblk_t lblk)
3901 + {
3902 + struct inode *inode = mpd->inode;
3903 ++ int err;
3904 + ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1)
3905 + >> inode->i_blkbits;
3906 +
3907 + do {
3908 + BUG_ON(buffer_locked(bh));
3909 +
3910 +- if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
3911 +- (!buffer_delay(bh) && !buffer_unwritten(bh)) ||
3912 +- lblk >= blocks) {
3913 ++ if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) {
3914 + /* Found extent to map? */
3915 + if (mpd->map.m_len)
3916 +- return false;
3917 +- if (lblk >= blocks)
3918 +- return true;
3919 +- continue;
3920 ++ return 0;
3921 ++ /* Everything mapped so far and we hit EOF */
3922 ++ break;
3923 + }
3924 +- if (!mpage_add_bh_to_extent(mpd, lblk, bh->b_state))
3925 +- return false;
3926 + } while (lblk++, (bh = bh->b_this_page) != head);
3927 +- return true;
3928 +-}
3929 +-
3930 +-static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
3931 +-{
3932 +- int len;
3933 +- loff_t size = i_size_read(mpd->inode);
3934 +- int err;
3935 +-
3936 +- BUG_ON(page->index != mpd->first_page);
3937 +- if (page->index == size >> PAGE_CACHE_SHIFT)
3938 +- len = size & ~PAGE_CACHE_MASK;
3939 +- else
3940 +- len = PAGE_CACHE_SIZE;
3941 +- clear_page_dirty_for_io(page);
3942 +- err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc);
3943 +- if (!err)
3944 +- mpd->wbc->nr_to_write--;
3945 +- mpd->first_page++;
3946 +-
3947 +- return err;
3948 ++ /* So far everything mapped? Submit the page for IO. */
3949 ++ if (mpd->map.m_len == 0) {
3950 ++ err = mpage_submit_page(mpd, head->b_page);
3951 ++ if (err < 0)
3952 ++ return err;
3953 ++ }
3954 ++ return lblk < blocks;
3955 + }
3956 +
3957 + /*
3958 +@@ -2003,8 +2035,6 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
3959 + struct inode *inode = mpd->inode;
3960 + struct buffer_head *head, *bh;
3961 + int bpp_bits = PAGE_CACHE_SHIFT - inode->i_blkbits;
3962 +- ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1)
3963 +- >> inode->i_blkbits;
3964 + pgoff_t start, end;
3965 + ext4_lblk_t lblk;
3966 + sector_t pblock;
3967 +@@ -2039,18 +2069,26 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
3968 + */
3969 + mpd->map.m_len = 0;
3970 + mpd->map.m_flags = 0;
3971 +- add_page_bufs_to_extent(mpd, head, bh,
3972 +- lblk);
3973 ++ /*
3974 ++ * FIXME: If dioread_nolock supports
3975 ++ * blocksize < pagesize, we need to make
3976 ++ * sure we add size mapped so far to
3977 ++ * io_end->size as the following call
3978 ++ * can submit the page for IO.
3979 ++ */
3980 ++ err = mpage_process_page_bufs(mpd, head,
3981 ++ bh, lblk);
3982 + pagevec_release(&pvec);
3983 +- return 0;
3984 ++ if (err > 0)
3985 ++ err = 0;
3986 ++ return err;
3987 + }
3988 + if (buffer_delay(bh)) {
3989 + clear_buffer_delay(bh);
3990 + bh->b_blocknr = pblock++;
3991 + }
3992 + clear_buffer_unwritten(bh);
3993 +- } while (++lblk < blocks &&
3994 +- (bh = bh->b_this_page) != head);
3995 ++ } while (lblk++, (bh = bh->b_this_page) != head);
3996 +
3997 + /*
3998 + * FIXME: This is going to break if dioread_nolock
3999 +@@ -2319,14 +2357,10 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
4000 + lblk = ((ext4_lblk_t)page->index) <<
4001 + (PAGE_CACHE_SHIFT - blkbits);
4002 + head = page_buffers(page);
4003 +- if (!add_page_bufs_to_extent(mpd, head, head, lblk))
4004 ++ err = mpage_process_page_bufs(mpd, head, head, lblk);
4005 ++ if (err <= 0)
4006 + goto out;
4007 +- /* So far everything mapped? Submit the page for IO. */
4008 +- if (mpd->map.m_len == 0) {
4009 +- err = mpage_submit_page(mpd, page);
4010 +- if (err < 0)
4011 +- goto out;
4012 +- }
4013 ++ err = 0;
4014 +
4015 + /*
4016 + * Accumulated enough dirty pages? This doesn't apply
4017 +@@ -4566,7 +4600,9 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
4018 + ext4_journal_stop(handle);
4019 + }
4020 +
4021 +- if (attr->ia_valid & ATTR_SIZE) {
4022 ++ if (attr->ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
4023 ++ handle_t *handle;
4024 ++ loff_t oldsize = inode->i_size;
4025 +
4026 + if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4027 + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4028 +@@ -4574,73 +4610,60 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
4029 + if (attr->ia_size > sbi->s_bitmap_maxbytes)
4030 + return -EFBIG;
4031 + }
4032 +- }
4033 +-
4034 +- if (S_ISREG(inode->i_mode) &&
4035 +- attr->ia_valid & ATTR_SIZE &&
4036 +- (attr->ia_size < inode->i_size)) {
4037 +- handle_t *handle;
4038 +-
4039 +- handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
4040 +- if (IS_ERR(handle)) {
4041 +- error = PTR_ERR(handle);
4042 +- goto err_out;
4043 +- }
4044 +- if (ext4_handle_valid(handle)) {
4045 +- error = ext4_orphan_add(handle, inode);
4046 +- orphan = 1;
4047 +- }
4048 +- EXT4_I(inode)->i_disksize = attr->ia_size;
4049 +- rc = ext4_mark_inode_dirty(handle, inode);
4050 +- if (!error)
4051 +- error = rc;
4052 +- ext4_journal_stop(handle);
4053 +-
4054 +- if (ext4_should_order_data(inode)) {
4055 +- error = ext4_begin_ordered_truncate(inode,
4056 ++ if (S_ISREG(inode->i_mode) &&
4057 ++ (attr->ia_size < inode->i_size)) {
4058 ++ if (ext4_should_order_data(inode)) {
4059 ++ error = ext4_begin_ordered_truncate(inode,
4060 + attr->ia_size);
4061 +- if (error) {
4062 +- /* Do as much error cleanup as possible */
4063 +- handle = ext4_journal_start(inode,
4064 +- EXT4_HT_INODE, 3);
4065 +- if (IS_ERR(handle)) {
4066 +- ext4_orphan_del(NULL, inode);
4067 ++ if (error)
4068 + goto err_out;
4069 +- }
4070 +- ext4_orphan_del(handle, inode);
4071 +- orphan = 0;
4072 +- ext4_journal_stop(handle);
4073 ++ }
4074 ++ handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
4075 ++ if (IS_ERR(handle)) {
4076 ++ error = PTR_ERR(handle);
4077 ++ goto err_out;
4078 ++ }
4079 ++ if (ext4_handle_valid(handle)) {
4080 ++ error = ext4_orphan_add(handle, inode);
4081 ++ orphan = 1;
4082 ++ }
4083 ++ EXT4_I(inode)->i_disksize = attr->ia_size;
4084 ++ rc = ext4_mark_inode_dirty(handle, inode);
4085 ++ if (!error)
4086 ++ error = rc;
4087 ++ ext4_journal_stop(handle);
4088 ++ if (error) {
4089 ++ ext4_orphan_del(NULL, inode);
4090 + goto err_out;
4091 + }
4092 + }
4093 +- }
4094 +-
4095 +- if (attr->ia_valid & ATTR_SIZE) {
4096 +- if (attr->ia_size != inode->i_size) {
4097 +- loff_t oldsize = inode->i_size;
4098 +
4099 +- i_size_write(inode, attr->ia_size);
4100 +- /*
4101 +- * Blocks are going to be removed from the inode. Wait
4102 +- * for dio in flight. Temporarily disable
4103 +- * dioread_nolock to prevent livelock.
4104 +- */
4105 +- if (orphan) {
4106 +- if (!ext4_should_journal_data(inode)) {
4107 +- ext4_inode_block_unlocked_dio(inode);
4108 +- inode_dio_wait(inode);
4109 +- ext4_inode_resume_unlocked_dio(inode);
4110 +- } else
4111 +- ext4_wait_for_tail_page_commit(inode);
4112 +- }
4113 +- /*
4114 +- * Truncate pagecache after we've waited for commit
4115 +- * in data=journal mode to make pages freeable.
4116 +- */
4117 +- truncate_pagecache(inode, oldsize, inode->i_size);
4118 ++ i_size_write(inode, attr->ia_size);
4119 ++ /*
4120 ++ * Blocks are going to be removed from the inode. Wait
4121 ++ * for dio in flight. Temporarily disable
4122 ++ * dioread_nolock to prevent livelock.
4123 ++ */
4124 ++ if (orphan) {
4125 ++ if (!ext4_should_journal_data(inode)) {
4126 ++ ext4_inode_block_unlocked_dio(inode);
4127 ++ inode_dio_wait(inode);
4128 ++ ext4_inode_resume_unlocked_dio(inode);
4129 ++ } else
4130 ++ ext4_wait_for_tail_page_commit(inode);
4131 + }
4132 +- ext4_truncate(inode);
4133 ++ /*
4134 ++ * Truncate pagecache after we've waited for commit
4135 ++ * in data=journal mode to make pages freeable.
4136 ++ */
4137 ++ truncate_pagecache(inode, oldsize, inode->i_size);
4138 + }
4139 ++ /*
4140 ++ * We want to call ext4_truncate() even if attr->ia_size ==
4141 ++ * inode->i_size for cases like truncation of fallocated space
4142 ++ */
4143 ++ if (attr->ia_valid & ATTR_SIZE)
4144 ++ ext4_truncate(inode);
4145 +
4146 + if (!rc) {
4147 + setattr_copy(inode, attr);
4148 +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
4149 +index 72a5d5b..8fec28f 100644
4150 +--- a/fs/fuse/dir.c
4151 ++++ b/fs/fuse/dir.c
4152 +@@ -1174,6 +1174,8 @@ static int parse_dirfile(char *buf, size_t nbytes, struct file *file,
4153 + return -EIO;
4154 + if (reclen > nbytes)
4155 + break;
4156 ++ if (memchr(dirent->name, '/', dirent->namelen) != NULL)
4157 ++ return -EIO;
4158 +
4159 + if (!dir_emit(ctx, dirent->name, dirent->namelen,
4160 + dirent->ino, dirent->type))
4161 +@@ -1320,6 +1322,8 @@ static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
4162 + return -EIO;
4163 + if (reclen > nbytes)
4164 + break;
4165 ++ if (memchr(dirent->name, '/', dirent->namelen) != NULL)
4166 ++ return -EIO;
4167 +
4168 + if (!over) {
4169 + /* We fill entries into dstbuf only as much as
4170 +@@ -1590,6 +1594,7 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
4171 + struct file *file)
4172 + {
4173 + struct fuse_conn *fc = get_fuse_conn(inode);
4174 ++ struct fuse_inode *fi = get_fuse_inode(inode);
4175 + struct fuse_req *req;
4176 + struct fuse_setattr_in inarg;
4177 + struct fuse_attr_out outarg;
4178 +@@ -1617,8 +1622,10 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
4179 + if (IS_ERR(req))
4180 + return PTR_ERR(req);
4181 +
4182 +- if (is_truncate)
4183 ++ if (is_truncate) {
4184 + fuse_set_nowrite(inode);
4185 ++ set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
4186 ++ }
4187 +
4188 + memset(&inarg, 0, sizeof(inarg));
4189 + memset(&outarg, 0, sizeof(outarg));
4190 +@@ -1680,12 +1687,14 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
4191 + invalidate_inode_pages2(inode->i_mapping);
4192 + }
4193 +
4194 ++ clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
4195 + return 0;
4196 +
4197 + error:
4198 + if (is_truncate)
4199 + fuse_release_nowrite(inode);
4200 +
4201 ++ clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
4202 + return err;
4203 + }
4204 +
4205 +@@ -1749,6 +1758,8 @@ static int fuse_setxattr(struct dentry *entry, const char *name,
4206 + fc->no_setxattr = 1;
4207 + err = -EOPNOTSUPP;
4208 + }
4209 ++ if (!err)
4210 ++ fuse_invalidate_attr(inode);
4211 + return err;
4212 + }
4213 +
4214 +@@ -1878,6 +1889,8 @@ static int fuse_removexattr(struct dentry *entry, const char *name)
4215 + fc->no_removexattr = 1;
4216 + err = -EOPNOTSUPP;
4217 + }
4218 ++ if (!err)
4219 ++ fuse_invalidate_attr(inode);
4220 + return err;
4221 + }
4222 +
4223 +diff --git a/fs/fuse/file.c b/fs/fuse/file.c
4224 +index 5c121fe..d409dea 100644
4225 +--- a/fs/fuse/file.c
4226 ++++ b/fs/fuse/file.c
4227 +@@ -629,7 +629,8 @@ static void fuse_read_update_size(struct inode *inode, loff_t size,
4228 + struct fuse_inode *fi = get_fuse_inode(inode);
4229 +
4230 + spin_lock(&fc->lock);
4231 +- if (attr_ver == fi->attr_version && size < inode->i_size) {
4232 ++ if (attr_ver == fi->attr_version && size < inode->i_size &&
4233 ++ !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
4234 + fi->attr_version = ++fc->attr_version;
4235 + i_size_write(inode, size);
4236 + }
4237 +@@ -1032,12 +1033,16 @@ static ssize_t fuse_perform_write(struct file *file,
4238 + {
4239 + struct inode *inode = mapping->host;
4240 + struct fuse_conn *fc = get_fuse_conn(inode);
4241 ++ struct fuse_inode *fi = get_fuse_inode(inode);
4242 + int err = 0;
4243 + ssize_t res = 0;
4244 +
4245 + if (is_bad_inode(inode))
4246 + return -EIO;
4247 +
4248 ++ if (inode->i_size < pos + iov_iter_count(ii))
4249 ++ set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
4250 ++
4251 + do {
4252 + struct fuse_req *req;
4253 + ssize_t count;
4254 +@@ -1073,6 +1078,7 @@ static ssize_t fuse_perform_write(struct file *file,
4255 + if (res > 0)
4256 + fuse_write_update_size(inode, pos);
4257 +
4258 ++ clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
4259 + fuse_invalidate_attr(inode);
4260 +
4261 + return res > 0 ? res : err;
4262 +@@ -1529,7 +1535,6 @@ static int fuse_writepage_locked(struct page *page)
4263 +
4264 + inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK);
4265 + inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
4266 +- end_page_writeback(page);
4267 +
4268 + spin_lock(&fc->lock);
4269 + list_add(&req->writepages_entry, &fi->writepages);
4270 +@@ -1537,6 +1542,8 @@ static int fuse_writepage_locked(struct page *page)
4271 + fuse_flush_writepages(inode);
4272 + spin_unlock(&fc->lock);
4273 +
4274 ++ end_page_writeback(page);
4275 ++
4276 + return 0;
4277 +
4278 + err_free:
4279 +diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
4280 +index fde7249..5ced199 100644
4281 +--- a/fs/fuse/fuse_i.h
4282 ++++ b/fs/fuse/fuse_i.h
4283 +@@ -115,6 +115,8 @@ struct fuse_inode {
4284 + enum {
4285 + /** Advise readdirplus */
4286 + FUSE_I_ADVISE_RDPLUS,
4287 ++ /** An operation changing file size is in progress */
4288 ++ FUSE_I_SIZE_UNSTABLE,
4289 + };
4290 +
4291 + struct fuse_conn;
4292 +diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
4293 +index 0b57859..e0fe703 100644
4294 +--- a/fs/fuse/inode.c
4295 ++++ b/fs/fuse/inode.c
4296 +@@ -201,7 +201,8 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
4297 + struct timespec old_mtime;
4298 +
4299 + spin_lock(&fc->lock);
4300 +- if (attr_version != 0 && fi->attr_version > attr_version) {
4301 ++ if ((attr_version != 0 && fi->attr_version > attr_version) ||
4302 ++ test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
4303 + spin_unlock(&fc->lock);
4304 + return;
4305 + }
4306 +diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
4307 +index c348d6d..e5d408a 100644
4308 +--- a/fs/isofs/inode.c
4309 ++++ b/fs/isofs/inode.c
4310 +@@ -117,8 +117,8 @@ static void destroy_inodecache(void)
4311 +
4312 + static int isofs_remount(struct super_block *sb, int *flags, char *data)
4313 + {
4314 +- /* we probably want a lot more here */
4315 +- *flags |= MS_RDONLY;
4316 ++ if (!(*flags & MS_RDONLY))
4317 ++ return -EROFS;
4318 + return 0;
4319 + }
4320 +
4321 +@@ -763,15 +763,6 @@ root_found:
4322 + */
4323 + s->s_maxbytes = 0x80000000000LL;
4324 +
4325 +- /*
4326 +- * The CDROM is read-only, has no nodes (devices) on it, and since
4327 +- * all of the files appear to be owned by root, we really do not want
4328 +- * to allow suid. (suid or devices will not show up unless we have
4329 +- * Rock Ridge extensions)
4330 +- */
4331 +-
4332 +- s->s_flags |= MS_RDONLY /* | MS_NODEV | MS_NOSUID */;
4333 +-
4334 + /* Set this for reference. Its not currently used except on write
4335 + which we don't have .. */
4336 +
4337 +@@ -1530,6 +1521,9 @@ struct inode *isofs_iget(struct super_block *sb,
4338 + static struct dentry *isofs_mount(struct file_system_type *fs_type,
4339 + int flags, const char *dev_name, void *data)
4340 + {
4341 ++ /* We don't support read-write mounts */
4342 ++ if (!(flags & MS_RDONLY))
4343 ++ return ERR_PTR(-EACCES);
4344 + return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
4345 + }
4346 +
4347 +diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
4348 +index 2487116..8460647 100644
4349 +--- a/fs/ocfs2/extent_map.c
4350 ++++ b/fs/ocfs2/extent_map.c
4351 +@@ -781,7 +781,6 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4352 + cpos = map_start >> osb->s_clustersize_bits;
4353 + mapping_end = ocfs2_clusters_for_bytes(inode->i_sb,
4354 + map_start + map_len);
4355 +- mapping_end -= cpos;
4356 + is_last = 0;
4357 + while (cpos < mapping_end && !is_last) {
4358 + u32 fe_flags;
4359 +diff --git a/fs/proc/root.c b/fs/proc/root.c
4360 +index e0a790d..0e0e83c 100644
4361 +--- a/fs/proc/root.c
4362 ++++ b/fs/proc/root.c
4363 +@@ -110,7 +110,8 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
4364 + ns = task_active_pid_ns(current);
4365 + options = data;
4366 +
4367 +- if (!current_user_ns()->may_mount_proc)
4368 ++ if (!current_user_ns()->may_mount_proc ||
4369 ++ !ns_capable(ns->user_ns, CAP_SYS_ADMIN))
4370 + return ERR_PTR(-EPERM);
4371 + }
4372 +
4373 +diff --git a/include/linux/compat.h b/include/linux/compat.h
4374 +index 7f0c1dd..ec1aee4 100644
4375 +--- a/include/linux/compat.h
4376 ++++ b/include/linux/compat.h
4377 +@@ -669,6 +669,13 @@ asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
4378 +
4379 + int compat_restore_altstack(const compat_stack_t __user *uss);
4380 + int __compat_save_altstack(compat_stack_t __user *, unsigned long);
4381 ++#define compat_save_altstack_ex(uss, sp) do { \
4382 ++ compat_stack_t __user *__uss = uss; \
4383 ++ struct task_struct *t = current; \
4384 ++ put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \
4385 ++ put_user_ex(sas_ss_flags(sp), &__uss->ss_flags); \
4386 ++ put_user_ex(t->sas_ss_size, &__uss->ss_size); \
4387 ++} while (0);
4388 +
4389 + asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
4390 + struct compat_timespec __user *interval);
4391 +diff --git a/include/linux/hid.h b/include/linux/hid.h
4392 +index 0c48991..ff545cc 100644
4393 +--- a/include/linux/hid.h
4394 ++++ b/include/linux/hid.h
4395 +@@ -393,10 +393,12 @@ struct hid_report {
4396 + struct hid_device *device; /* associated device */
4397 + };
4398 +
4399 ++#define HID_MAX_IDS 256
4400 ++
4401 + struct hid_report_enum {
4402 + unsigned numbered;
4403 + struct list_head report_list;
4404 +- struct hid_report *report_id_hash[256];
4405 ++ struct hid_report *report_id_hash[HID_MAX_IDS];
4406 + };
4407 +
4408 + #define HID_REPORT_TYPES 3
4409 +diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
4410 +index 3bed2e8..d1fe5d0 100644
4411 +--- a/include/linux/pci_ids.h
4412 ++++ b/include/linux/pci_ids.h
4413 +@@ -518,6 +518,8 @@
4414 + #define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303
4415 + #define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304
4416 + #define PCI_DEVICE_ID_AMD_15H_M10H_F3 0x1403
4417 ++#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F3 0x141d
4418 ++#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F4 0x141e
4419 + #define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600
4420 + #define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601
4421 + #define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
4422 +diff --git a/include/linux/rculist.h b/include/linux/rculist.h
4423 +index f4b1001..4106721 100644
4424 +--- a/include/linux/rculist.h
4425 ++++ b/include/linux/rculist.h
4426 +@@ -267,8 +267,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
4427 + */
4428 + #define list_first_or_null_rcu(ptr, type, member) \
4429 + ({struct list_head *__ptr = (ptr); \
4430 +- struct list_head __rcu *__next = list_next_rcu(__ptr); \
4431 +- likely(__ptr != __next) ? container_of(__next, type, member) : NULL; \
4432 ++ struct list_head *__next = ACCESS_ONCE(__ptr->next); \
4433 ++ likely(__ptr != __next) ? \
4434 ++ list_entry_rcu(__next, type, member) : NULL; \
4435 + })
4436 +
4437 + /**
4438 +diff --git a/include/linux/signal.h b/include/linux/signal.h
4439 +index d897484..2ac423b 100644
4440 +--- a/include/linux/signal.h
4441 ++++ b/include/linux/signal.h
4442 +@@ -434,6 +434,14 @@ void signals_init(void);
4443 + int restore_altstack(const stack_t __user *);
4444 + int __save_altstack(stack_t __user *, unsigned long);
4445 +
4446 ++#define save_altstack_ex(uss, sp) do { \
4447 ++ stack_t __user *__uss = uss; \
4448 ++ struct task_struct *t = current; \
4449 ++ put_user_ex((void __user *)t->sas_ss_sp, &__uss->ss_sp); \
4450 ++ put_user_ex(sas_ss_flags(sp), &__uss->ss_flags); \
4451 ++ put_user_ex(t->sas_ss_size, &__uss->ss_size); \
4452 ++} while (0);
4453 ++
4454 + #ifdef CONFIG_PROC_FS
4455 + struct seq_file;
4456 + extern void render_sigset_t(struct seq_file *, const char *, sigset_t *);
4457 +diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
4458 +index 1e88377..3e541e6 100644
4459 +--- a/include/linux/usb/hcd.h
4460 ++++ b/include/linux/usb/hcd.h
4461 +@@ -411,7 +411,7 @@ extern int usb_hcd_pci_probe(struct pci_dev *dev,
4462 + extern void usb_hcd_pci_remove(struct pci_dev *dev);
4463 + extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
4464 +
4465 +-#ifdef CONFIG_PM_SLEEP
4466 ++#ifdef CONFIG_PM
4467 + extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
4468 + #endif
4469 + #endif /* CONFIG_PCI */
4470 +diff --git a/ipc/msg.c b/ipc/msg.c
4471 +index 9f29d9e..b65fdf1 100644
4472 +--- a/ipc/msg.c
4473 ++++ b/ipc/msg.c
4474 +@@ -680,16 +680,18 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
4475 + goto out_unlock1;
4476 + }
4477 +
4478 ++ ipc_lock_object(&msq->q_perm);
4479 ++
4480 + for (;;) {
4481 + struct msg_sender s;
4482 +
4483 + err = -EACCES;
4484 + if (ipcperms(ns, &msq->q_perm, S_IWUGO))
4485 +- goto out_unlock1;
4486 ++ goto out_unlock0;
4487 +
4488 + err = security_msg_queue_msgsnd(msq, msg, msgflg);
4489 + if (err)
4490 +- goto out_unlock1;
4491 ++ goto out_unlock0;
4492 +
4493 + if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
4494 + 1 + msq->q_qnum <= msq->q_qbytes) {
4495 +@@ -699,10 +701,9 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
4496 + /* queue full, wait: */
4497 + if (msgflg & IPC_NOWAIT) {
4498 + err = -EAGAIN;
4499 +- goto out_unlock1;
4500 ++ goto out_unlock0;
4501 + }
4502 +
4503 +- ipc_lock_object(&msq->q_perm);
4504 + ss_add(msq, &s);
4505 +
4506 + if (!ipc_rcu_getref(msq)) {
4507 +@@ -730,10 +731,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
4508 + goto out_unlock0;
4509 + }
4510 +
4511 +- ipc_unlock_object(&msq->q_perm);
4512 + }
4513 +-
4514 +- ipc_lock_object(&msq->q_perm);
4515 + msq->q_lspid = task_tgid_vnr(current);
4516 + msq->q_stime = get_seconds();
4517 +
4518 +diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
4519 +index f356974..ad8e1bd 100644
4520 +--- a/kernel/events/uprobes.c
4521 ++++ b/kernel/events/uprobes.c
4522 +@@ -1682,12 +1682,10 @@ static bool handle_trampoline(struct pt_regs *regs)
4523 + tmp = ri;
4524 + ri = ri->next;
4525 + kfree(tmp);
4526 ++ utask->depth--;
4527 +
4528 + if (!chained)
4529 + break;
4530 +-
4531 +- utask->depth--;
4532 +-
4533 + BUG_ON(!ri);
4534 + }
4535 +
4536 +diff --git a/kernel/fork.c b/kernel/fork.c
4537 +index bf46287..200a7a2 100644
4538 +--- a/kernel/fork.c
4539 ++++ b/kernel/fork.c
4540 +@@ -1173,10 +1173,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
4541 + return ERR_PTR(-EINVAL);
4542 +
4543 + /*
4544 +- * If the new process will be in a different pid namespace
4545 +- * don't allow the creation of threads.
4546 ++ * If the new process will be in a different pid namespace don't
4547 ++ * allow it to share a thread group or signal handlers with the
4548 ++ * forking task.
4549 + */
4550 +- if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) &&
4551 ++ if ((clone_flags & (CLONE_SIGHAND | CLONE_NEWPID)) &&
4552 + (task_active_pid_ns(current) !=
4553 + current->nsproxy->pid_ns_for_children))
4554 + return ERR_PTR(-EINVAL);
4555 +diff --git a/kernel/pid.c b/kernel/pid.c
4556 +index 66505c1..ebe5e80 100644
4557 +--- a/kernel/pid.c
4558 ++++ b/kernel/pid.c
4559 +@@ -265,6 +265,7 @@ void free_pid(struct pid *pid)
4560 + struct pid_namespace *ns = upid->ns;
4561 + hlist_del_rcu(&upid->pid_chain);
4562 + switch(--ns->nr_hashed) {
4563 ++ case 2:
4564 + case 1:
4565 + /* When all that is left in the pid namespace
4566 + * is the reaper wake up the reaper. The reaper
4567 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
4568 +index a92012a..f2820fb 100644
4569 +--- a/mm/huge_memory.c
4570 ++++ b/mm/huge_memory.c
4571 +@@ -2296,6 +2296,8 @@ static void collapse_huge_page(struct mm_struct *mm,
4572 + goto out;
4573 +
4574 + vma = find_vma(mm, address);
4575 ++ if (!vma)
4576 ++ goto out;
4577 + hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
4578 + hend = vma->vm_end & HPAGE_PMD_MASK;
4579 + if (address < hstart || address + HPAGE_PMD_SIZE > hend)
4580 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
4581 +index 0878ff7..aa44621 100644
4582 +--- a/mm/memcontrol.c
4583 ++++ b/mm/memcontrol.c
4584 +@@ -5616,7 +5616,13 @@ static int compare_thresholds(const void *a, const void *b)
4585 + const struct mem_cgroup_threshold *_a = a;
4586 + const struct mem_cgroup_threshold *_b = b;
4587 +
4588 +- return _a->threshold - _b->threshold;
4589 ++ if (_a->threshold > _b->threshold)
4590 ++ return 1;
4591 ++
4592 ++ if (_a->threshold < _b->threshold)
4593 ++ return -1;
4594 ++
4595 ++ return 0;
4596 + }
4597 +
4598 + static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4599 +diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
4600 +index dd47889..dbc0a73 100644
4601 +--- a/net/ceph/osd_client.c
4602 ++++ b/net/ceph/osd_client.c
4603 +@@ -2129,6 +2129,8 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
4604 + dout("osdc_start_request failed map, "
4605 + " will retry %lld\n", req->r_tid);
4606 + rc = 0;
4607 ++ } else {
4608 ++ __unregister_request(osdc, req);
4609 + }
4610 + goto out_unlock;
4611 + }
4612 +diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
4613 +index 603ddd9..dbd9a47 100644
4614 +--- a/net/ceph/osdmap.c
4615 ++++ b/net/ceph/osdmap.c
4616 +@@ -1129,7 +1129,7 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
4617 +
4618 + /* pg_temp? */
4619 + pgid.seed = ceph_stable_mod(pgid.seed, pool->pg_num,
4620 +- pool->pgp_num_mask);
4621 ++ pool->pg_num_mask);
4622 + pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
4623 + if (pg) {
4624 + *num = pg->len;
4625 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
4626 +index cc9e02d..7a98d52 100644
4627 +--- a/net/mac80211/mlme.c
4628 ++++ b/net/mac80211/mlme.c
4629 +@@ -2851,14 +2851,6 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
4630 + ieee80211_rx_bss_put(local, bss);
4631 + sdata->vif.bss_conf.beacon_rate = bss->beacon_rate;
4632 + }
4633 +-
4634 +- if (!sdata->u.mgd.associated ||
4635 +- !ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid))
4636 +- return;
4637 +-
4638 +- ieee80211_sta_process_chanswitch(sdata, rx_status->mactime,
4639 +- elems, true);
4640 +-
4641 + }
4642 +
4643 +
4644 +@@ -3147,6 +3139,9 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
4645 +
4646 + ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems);
4647 +
4648 ++ ieee80211_sta_process_chanswitch(sdata, rx_status->mactime,
4649 ++ &elems, true);
4650 ++
4651 + if (ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
4652 + elems.wmm_param_len))
4653 + changed |= BSS_CHANGED_QOS;
4654 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
4655 +index 8860dd5..9552da2 100644
4656 +--- a/sound/pci/hda/hda_intel.c
4657 ++++ b/sound/pci/hda/hda_intel.c
4658 +@@ -3376,6 +3376,7 @@ static struct snd_pci_quirk msi_black_list[] = {
4659 + SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
4660 + SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
4661 + SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */
4662 ++ SND_PCI_QUIRK(0x1179, 0xfb44, "Toshiba Satellite C870", 0), /* AMD Hudson */
4663 + SND_PCI_QUIRK(0x1849, 0x0888, "ASRock", 0), /* Athlon64 X2 + nvidia */
4664 + SND_PCI_QUIRK(0xa0a0, 0x0575, "Aopen MZ915-M", 0), /* ICH6 */
4665 + {}
4666 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
4667 +index 9f35862..45850f6 100644
4668 +--- a/sound/pci/hda/patch_hdmi.c
4669 ++++ b/sound/pci/hda/patch_hdmi.c
4670 +@@ -67,6 +67,8 @@ struct hdmi_spec_per_pin {
4671 + struct delayed_work work;
4672 + struct snd_kcontrol *eld_ctl;
4673 + int repoll_count;
4674 ++ bool setup; /* the stream has been set up by prepare callback */
4675 ++ int channels; /* current number of channels */
4676 + bool non_pcm;
4677 + bool chmap_set; /* channel-map override by ALSA API? */
4678 + unsigned char chmap[8]; /* ALSA API channel-map */
4679 +@@ -551,6 +553,17 @@ static int hdmi_channel_allocation(struct hdmi_eld *eld, int channels)
4680 + }
4681 + }
4682 +
4683 ++ if (!ca) {
4684 ++ /* if there was no match, select the regular ALSA channel
4685 ++ * allocation with the matching number of channels */
4686 ++ for (i = 0; i < ARRAY_SIZE(channel_allocations); i++) {
4687 ++ if (channels == channel_allocations[i].channels) {
4688 ++ ca = channel_allocations[i].ca_index;
4689 ++ break;
4690 ++ }
4691 ++ }
4692 ++ }
4693 ++
4694 + snd_print_channel_allocation(eld->info.spk_alloc, buf, sizeof(buf));
4695 + snd_printdd("HDMI: select CA 0x%x for %d-channel allocation: %s\n",
4696 + ca, channels, buf);
4697 +@@ -868,18 +881,19 @@ static bool hdmi_infoframe_uptodate(struct hda_codec *codec, hda_nid_t pin_nid,
4698 + return true;
4699 + }
4700 +
4701 +-static void hdmi_setup_audio_infoframe(struct hda_codec *codec, int pin_idx,
4702 +- bool non_pcm,
4703 +- struct snd_pcm_substream *substream)
4704 ++static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
4705 ++ struct hdmi_spec_per_pin *per_pin,
4706 ++ bool non_pcm)
4707 + {
4708 +- struct hdmi_spec *spec = codec->spec;
4709 +- struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
4710 + hda_nid_t pin_nid = per_pin->pin_nid;
4711 +- int channels = substream->runtime->channels;
4712 ++ int channels = per_pin->channels;
4713 + struct hdmi_eld *eld;
4714 + int ca;
4715 + union audio_infoframe ai;
4716 +
4717 ++ if (!channels)
4718 ++ return;
4719 ++
4720 + eld = &per_pin->sink_eld;
4721 + if (!eld->monitor_present)
4722 + return;
4723 +@@ -1329,6 +1343,7 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
4724 + eld_changed = true;
4725 + }
4726 + if (update_eld) {
4727 ++ bool old_eld_valid = pin_eld->eld_valid;
4728 + pin_eld->eld_valid = eld->eld_valid;
4729 + eld_changed = pin_eld->eld_size != eld->eld_size ||
4730 + memcmp(pin_eld->eld_buffer, eld->eld_buffer,
4731 +@@ -1338,6 +1353,18 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
4732 + eld->eld_size);
4733 + pin_eld->eld_size = eld->eld_size;
4734 + pin_eld->info = eld->info;
4735 ++
4736 ++ /* Haswell-specific workaround: re-setup when the transcoder is
4737 ++ * changed during the stream playback
4738 ++ */
4739 ++ if (codec->vendor_id == 0x80862807 &&
4740 ++ eld->eld_valid && !old_eld_valid && per_pin->setup) {
4741 ++ snd_hda_codec_write(codec, pin_nid, 0,
4742 ++ AC_VERB_SET_AMP_GAIN_MUTE,
4743 ++ AMP_OUT_UNMUTE);
4744 ++ hdmi_setup_audio_infoframe(codec, per_pin,
4745 ++ per_pin->non_pcm);
4746 ++ }
4747 + }
4748 + mutex_unlock(&pin_eld->lock);
4749 +
4750 +@@ -1510,14 +1537,17 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
4751 + hda_nid_t cvt_nid = hinfo->nid;
4752 + struct hdmi_spec *spec = codec->spec;
4753 + int pin_idx = hinfo_to_pin_index(spec, hinfo);
4754 +- hda_nid_t pin_nid = get_pin(spec, pin_idx)->pin_nid;
4755 ++ struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
4756 ++ hda_nid_t pin_nid = per_pin->pin_nid;
4757 + bool non_pcm;
4758 +
4759 + non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
4760 ++ per_pin->channels = substream->runtime->channels;
4761 ++ per_pin->setup = true;
4762 +
4763 + hdmi_set_channel_count(codec, cvt_nid, substream->runtime->channels);
4764 +
4765 +- hdmi_setup_audio_infoframe(codec, pin_idx, non_pcm, substream);
4766 ++ hdmi_setup_audio_infoframe(codec, per_pin, non_pcm);
4767 +
4768 + return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
4769 + }
4770 +@@ -1557,6 +1587,9 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
4771 + snd_hda_spdif_ctls_unassign(codec, pin_idx);
4772 + per_pin->chmap_set = false;
4773 + memset(per_pin->chmap, 0, sizeof(per_pin->chmap));
4774 ++
4775 ++ per_pin->setup = false;
4776 ++ per_pin->channels = 0;
4777 + }
4778 +
4779 + return 0;
4780 +@@ -1692,8 +1725,7 @@ static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol,
4781 + per_pin->chmap_set = true;
4782 + memcpy(per_pin->chmap, chmap, sizeof(chmap));
4783 + if (prepared)
4784 +- hdmi_setup_audio_infoframe(codec, pin_idx, per_pin->non_pcm,
4785 +- substream);
4786 ++ hdmi_setup_audio_infoframe(codec, per_pin, per_pin->non_pcm);
4787 +
4788 + return 0;
4789 + }
4790 +diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
4791 +index 5402dfb..8a8d936 100644
4792 +--- a/sound/soc/codecs/mc13783.c
4793 ++++ b/sound/soc/codecs/mc13783.c
4794 +@@ -126,6 +126,10 @@ static int mc13783_write(struct snd_soc_codec *codec,
4795 +
4796 + ret = mc13xxx_reg_write(priv->mc13xxx, reg, value);
4797 +
4798 ++ /* include errata fix for spi audio problems */
4799 ++ if (reg == MC13783_AUDIO_CODEC || reg == MC13783_AUDIO_DAC)
4800 ++ ret = mc13xxx_reg_write(priv->mc13xxx, reg, value);
4801 ++
4802 + mc13xxx_unlock(priv->mc13xxx);
4803 +
4804 + return ret;
4805 +diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
4806 +index 0a4ffdd..5e5af89 100644
4807 +--- a/sound/soc/codecs/wm8960.c
4808 ++++ b/sound/soc/codecs/wm8960.c
4809 +@@ -857,9 +857,9 @@ static int wm8960_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
4810 + if (pll_div.k) {
4811 + reg |= 0x20;
4812 +
4813 +- snd_soc_write(codec, WM8960_PLL2, (pll_div.k >> 18) & 0x3f);
4814 +- snd_soc_write(codec, WM8960_PLL3, (pll_div.k >> 9) & 0x1ff);
4815 +- snd_soc_write(codec, WM8960_PLL4, pll_div.k & 0x1ff);
4816 ++ snd_soc_write(codec, WM8960_PLL2, (pll_div.k >> 16) & 0xff);
4817 ++ snd_soc_write(codec, WM8960_PLL3, (pll_div.k >> 8) & 0xff);
4818 ++ snd_soc_write(codec, WM8960_PLL4, pll_div.k & 0xff);
4819 + }
4820 + snd_soc_write(codec, WM8960_PLL1, reg);
4821 +
4822
4823 diff --git a/3.11.1/4420_grsecurity-2.9.1-3.11.1-201309221838.patch b/3.11.2/4420_grsecurity-2.9.1-3.11.2-201309281103.patch
4824 similarity index 99%
4825 rename from 3.11.1/4420_grsecurity-2.9.1-3.11.1-201309221838.patch
4826 rename to 3.11.2/4420_grsecurity-2.9.1-3.11.2-201309281103.patch
4827 index f7acb39..3abf324 100644
4828 --- a/3.11.1/4420_grsecurity-2.9.1-3.11.1-201309221838.patch
4829 +++ b/3.11.2/4420_grsecurity-2.9.1-3.11.2-201309281103.patch
4830 @@ -281,7 +281,7 @@ index 7f9d4f5..6d1afd6 100644
4831
4832 pcd. [PARIDE]
4833 diff --git a/Makefile b/Makefile
4834 -index efd2396..682975d 100644
4835 +index aede319..6bf55a4 100644
4836 --- a/Makefile
4837 +++ b/Makefile
4838 @@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
4839 @@ -2745,10 +2745,10 @@ index 2c7cc1e..ab2e911 100644
4840 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
4841 #endif
4842 diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
4843 -index 85c3fb6..b3068b1 100644
4844 +index 85c3fb6..054c2dc 100644
4845 --- a/arch/arm/kernel/module.c
4846 +++ b/arch/arm/kernel/module.c
4847 -@@ -37,12 +37,37 @@
4848 +@@ -37,12 +37,39 @@
4849 #endif
4850
4851 #ifdef CONFIG_MMU
4852 @@ -2779,11 +2779,13 @@ index 85c3fb6..b3068b1 100644
4853 +{
4854 + module_free(mod, module_region);
4855 +}
4856 ++EXPORT_SYMBOL(module_free_exec);
4857 +
4858 +void *module_alloc_exec(unsigned long size)
4859 +{
4860 + return __module_alloc(size, PAGE_KERNEL_EXEC);
4861 +}
4862 ++EXPORT_SYMBOL(module_alloc_exec);
4863 +#endif
4864 #endif
4865
4866 @@ -3128,7 +3130,7 @@ index ab517fc..9adf2fa 100644
4867 /*
4868 * on V7-M there is no need to copy the vector table to a dedicated
4869 diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
4870 -index 7bcee5c..64c9c5f 100644
4871 +index 7bcee5c..e2f3249 100644
4872 --- a/arch/arm/kernel/vmlinux.lds.S
4873 +++ b/arch/arm/kernel/vmlinux.lds.S
4874 @@ -8,7 +8,11 @@
4875 @@ -3144,6 +3146,15 @@ index 7bcee5c..64c9c5f 100644
4876 #define PROC_INFO \
4877 . = ALIGN(4); \
4878 VMLINUX_SYMBOL(__proc_info_begin) = .; \
4879 +@@ -34,7 +38,7 @@
4880 + #endif
4881 +
4882 + #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
4883 +- defined(CONFIG_GENERIC_BUG)
4884 ++ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
4885 + #define ARM_EXIT_KEEP(x) x
4886 + #define ARM_EXIT_DISCARD(x)
4887 + #else
4888 @@ -90,6 +94,11 @@ SECTIONS
4889 _text = .;
4890 HEAD_TEXT
4891 @@ -13233,7 +13244,7 @@ index bae3aba..c1788c1 100644
4892 set_fs(KERNEL_DS);
4893 has_dumped = 1;
4894 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
4895 -index bccfca6..a312009 100644
4896 +index 665a730..8e7a67a 100644
4897 --- a/arch/x86/ia32/ia32_signal.c
4898 +++ b/arch/x86/ia32/ia32_signal.c
4899 @@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
4900 @@ -13263,12 +13274,7 @@ index bccfca6..a312009 100644
4901 };
4902
4903 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
4904 -@@ -457,20 +457,22 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
4905 - else
4906 - put_user_ex(0, &frame->uc.uc_flags);
4907 - put_user_ex(0, &frame->uc.uc_link);
4908 -- err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp);
4909 -+ __compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
4910 +@@ -461,16 +461,18 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
4911
4912 if (ksig->ka.sa.sa_flags & SA_RESTORER)
4913 restorer = ksig->ka.sa.sa_restorer;
4914 @@ -14738,7 +14744,7 @@ index 9863ee3..4a1f8e1 100644
4915 return _PAGE_CACHE_WC;
4916 else if (pg_flags == _PGMT_UC_MINUS)
4917 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
4918 -index 46fc474..b02b0f9 100644
4919 +index f50de69..2b0a458 100644
4920 --- a/arch/x86/include/asm/checksum_32.h
4921 +++ b/arch/x86/include/asm/checksum_32.h
4922 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
4923 @@ -14756,24 +14762,24 @@ index 46fc474..b02b0f9 100644
4924 /*
4925 * Note: when you get a NULL pointer exception here this means someone
4926 * passed in an incorrect kernel address to one of these functions.
4927 -@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
4928 - int *err_ptr)
4929 - {
4930 - might_sleep();
4931 -- return csum_partial_copy_generic((__force void *)src, dst,
4932 -+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
4933 - len, sum, err_ptr, NULL);
4934 - }
4935 +@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
4936
4937 -@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
4938 - {
4939 might_sleep();
4940 - if (access_ok(VERIFY_WRITE, dst, len))
4941 -- return csum_partial_copy_generic(src, (__force void *)dst,
4942 -+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
4943 - len, sum, NULL, err_ptr);
4944 + stac();
4945 +- ret = csum_partial_copy_generic((__force void *)src, dst,
4946 ++ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
4947 + len, sum, err_ptr, NULL);
4948 + clac();
4949
4950 - if (len)
4951 +@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
4952 + might_sleep();
4953 + if (access_ok(VERIFY_WRITE, dst, len)) {
4954 + stac();
4955 +- ret = csum_partial_copy_generic(src, (__force void *)dst,
4956 ++ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
4957 + len, sum, NULL, err_ptr);
4958 + clac();
4959 + return ret;
4960 diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
4961 index d47786a..ce1b05d 100644
4962 --- a/arch/x86/include/asm/cmpxchg.h
4963 @@ -15760,7 +15766,7 @@ index 5f55e69..e20bfb1 100644
4964
4965 #ifdef CONFIG_SMP
4966 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
4967 -index cdbf367..ce8f82b 100644
4968 +index be12c53..2124e35 100644
4969 --- a/arch/x86/include/asm/mmu_context.h
4970 +++ b/arch/x86/include/asm/mmu_context.h
4971 @@ -24,6 +24,20 @@ void destroy_context(struct mm_struct *mm);
4972 @@ -15838,13 +15844,12 @@ index cdbf367..ce8f82b 100644
4973 load_cr3(next->pgd);
4974 +#endif
4975
4976 - /* stop flush ipis for the previous mm */
4977 + /* Stop flush ipis for the previous mm */
4978 cpumask_clear_cpu(cpu, mm_cpumask(prev));
4979 -@@ -53,9 +106,63 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
4980 - */
4981 +@@ -51,9 +104,63 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
4982 + /* Load the LDT, if the LDT is different: */
4983 if (unlikely(prev->context.ldt != next->context.ldt))
4984 load_LDT_nolock(&next->context);
4985 -- }
4986 +
4987 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
4988 + if (!(__supported_pte_mask & _PAGE_NX)) {
4989 @@ -15859,14 +15864,14 @@ index cdbf367..ce8f82b 100644
4990 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
4991 + prev->context.user_cs_limit != next->context.user_cs_limit))
4992 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
4993 - #ifdef CONFIG_SMP
4994 ++#ifdef CONFIG_SMP
4995 + else if (unlikely(tlbstate != TLBSTATE_OK))
4996 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
4997 +#endif
4998 +#endif
4999 +
5000 -+ }
5001 - else {
5002 + }
5003 ++ else {
5004 +
5005 +#ifdef CONFIG_PAX_PER_CPU_PGD
5006 + pax_open_kernel();
5007 @@ -15901,11 +15906,12 @@ index cdbf367..ce8f82b 100644
5008 + load_cr3(get_cpu_pgd(cpu, kernel));
5009 +#endif
5010 +
5011 -+#ifdef CONFIG_SMP
5012 + #ifdef CONFIG_SMP
5013 +- else {
5014 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
5015 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
5016
5017 -@@ -64,11 +171,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
5018 +@@ -70,11 +177,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
5019 * tlb flush IPI delivery. We must reload CR3
5020 * to make sure to use no freed page tables.
5021 */
5022 @@ -25235,7 +25241,7 @@ index 5cdff03..80fa283 100644
5023 * Up to this point, the boot CPU has been using .init.data
5024 * area. Reload any changed state for the boot CPU.
5025 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
5026 -index cf91358..a7081ea 100644
5027 +index d859eea..44e17c4 100644
5028 --- a/arch/x86/kernel/signal.c
5029 +++ b/arch/x86/kernel/signal.c
5030 @@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
5031 @@ -25268,12 +25274,8 @@ index cf91358..a7081ea 100644
5032
5033 if (err)
5034 return -EFAULT;
5035 -@@ -358,10 +358,13 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
5036 - else
5037 - put_user_ex(0, &frame->uc.uc_flags);
5038 - put_user_ex(0, &frame->uc.uc_link);
5039 -- err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
5040 -+ __save_altstack_ex(&frame->uc.uc_stack, regs->sp);
5041 +@@ -361,7 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
5042 + save_altstack_ex(&frame->uc.uc_stack, regs->sp);
5043
5044 /* Set up to return from userspace. */
5045 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5046 @@ -25293,15 +25295,6 @@ index cf91358..a7081ea 100644
5047 } put_user_catch(err);
5048
5049 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
5050 -@@ -423,7 +426,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
5051 - else
5052 - put_user_ex(0, &frame->uc.uc_flags);
5053 - put_user_ex(0, &frame->uc.uc_link);
5054 -- err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
5055 -+ __save_altstack_ex(&frame->uc.uc_stack, regs->sp);
5056 -
5057 - /* Set up to return from userspace. If provided, use a stub
5058 - already in userspace. */
5059 @@ -609,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
5060 {
5061 int usig = signr_convert(ksig->sig);
5062 @@ -27941,38 +27934,37 @@ index 2419d5f..953ee51 100644
5063 CFI_RESTORE_STATE
5064
5065 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
5066 -index 25b7ae8..c40113e 100644
5067 +index 7609e0e..b449b98 100644
5068 --- a/arch/x86/lib/csum-wrappers_64.c
5069 +++ b/arch/x86/lib/csum-wrappers_64.c
5070 -@@ -52,8 +52,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
5071 +@@ -53,10 +53,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
5072 len -= 2;
5073 }
5074 }
5075 -- isum = csum_partial_copy_generic((__force const void *)src,
5076 + pax_open_userland();
5077 -+ stac();
5078 + stac();
5079 +- isum = csum_partial_copy_generic((__force const void *)src,
5080 + isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
5081 dst, len, isum, errp, NULL);
5082 -+ clac();
5083 + clac();
5084 + pax_close_userland();
5085 if (unlikely(*errp))
5086 goto out_err;
5087
5088 -@@ -105,8 +109,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
5089 +@@ -110,10 +112,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
5090 }
5091
5092 *errp = 0;
5093 -- return csum_partial_copy_generic(src, (void __force *)dst,
5094 + pax_open_userland();
5095 -+ stac();
5096 -+ isum = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
5097 - len, isum, NULL, errp);
5098 -+ clac();
5099 + stac();
5100 +- ret = csum_partial_copy_generic(src, (void __force *)dst,
5101 ++ ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
5102 + len, isum, NULL, errp);
5103 + clac();
5104 + pax_close_userland();
5105 -+ return isum;
5106 + return ret;
5107 }
5108 EXPORT_SYMBOL(csum_partial_copy_to_user);
5109 -
5110 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
5111 index a451235..1daa956 100644
5112 --- a/arch/x86/lib/getuser.S
5113 @@ -34319,7 +34311,7 @@ index 290792a..416f287 100644
5114 spin_lock_init(&blkcg->lock);
5115 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
5116 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
5117 -index 4b8d9b54..ff76220 100644
5118 +index 4b8d9b54..a7178c0 100644
5119 --- a/block/blk-iopoll.c
5120 +++ b/block/blk-iopoll.c
5121 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
5122 @@ -34327,7 +34319,7 @@ index 4b8d9b54..ff76220 100644
5123 EXPORT_SYMBOL(blk_iopoll_complete);
5124
5125 -static void blk_iopoll_softirq(struct softirq_action *h)
5126 -+static void blk_iopoll_softirq(void)
5127 ++static __latent_entropy void blk_iopoll_softirq(void)
5128 {
5129 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
5130 int rearm = 0, budget = blk_iopoll_budget;
5131 @@ -34345,7 +34337,7 @@ index 623e1cd..ca1e109 100644
5132 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
5133 else
5134 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
5135 -index ec9e606..2244d4e 100644
5136 +index ec9e606..3f38839 100644
5137 --- a/block/blk-softirq.c
5138 +++ b/block/blk-softirq.c
5139 @@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
5140 @@ -34353,7 +34345,7 @@ index ec9e606..2244d4e 100644
5141 * while passing them to the queue registered handler.
5142 */
5143 -static void blk_done_softirq(struct softirq_action *h)
5144 -+static void blk_done_softirq(void)
5145 ++static __latent_entropy void blk_done_softirq(void)
5146 {
5147 struct list_head *cpu_list, local_list;
5148
5149 @@ -34513,32 +34505,6 @@ index a5ffcc9..3cedc9c 100644
5150 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
5151 goto error;
5152
5153 -diff --git a/crypto/api.c b/crypto/api.c
5154 -index 3b61803..37c4c72 100644
5155 ---- a/crypto/api.c
5156 -+++ b/crypto/api.c
5157 -@@ -34,6 +34,8 @@ EXPORT_SYMBOL_GPL(crypto_alg_sem);
5158 - BLOCKING_NOTIFIER_HEAD(crypto_chain);
5159 - EXPORT_SYMBOL_GPL(crypto_chain);
5160 -
5161 -+static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
5162 -+
5163 - struct crypto_alg *crypto_mod_get(struct crypto_alg *alg)
5164 - {
5165 - return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL;
5166 -@@ -144,8 +146,11 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type,
5167 - }
5168 - up_write(&crypto_alg_sem);
5169 -
5170 -- if (alg != &larval->alg)
5171 -+ if (alg != &larval->alg) {
5172 - kfree(larval);
5173 -+ if (crypto_is_larval(alg))
5174 -+ alg = crypto_larval_wait(alg);
5175 -+ }
5176 -
5177 - return alg;
5178 - }
5179 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
5180 index 7bdd61b..afec999 100644
5181 --- a/crypto/cryptd.c
5182 @@ -35969,19 +35935,18 @@ index e8d11b6..7b1b36f 100644
5183 }
5184 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
5185 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
5186 -index 62b6c2c..4a11354 100644
5187 +index 62b6c2c..002d10f 100644
5188 --- a/drivers/block/cciss.c
5189 +++ b/drivers/block/cciss.c
5190 -@@ -1189,6 +1189,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
5191 +@@ -1189,6 +1189,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
5192 int err;
5193 u32 cp;
5194
5195 + memset(&arg64, 0, sizeof(arg64));
5196 -+
5197 err = 0;
5198 err |=
5199 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
5200 -@@ -3010,7 +3012,7 @@ static void start_io(ctlr_info_t *h)
5201 +@@ -3010,7 +3011,7 @@ static void start_io(ctlr_info_t *h)
5202 while (!list_empty(&h->reqQ)) {
5203 c = list_entry(h->reqQ.next, CommandList_struct, list);
5204 /* can't do anything if fifo is full */
5205 @@ -35990,7 +35955,7 @@ index 62b6c2c..4a11354 100644
5206 dev_warn(&h->pdev->dev, "fifo full\n");
5207 break;
5208 }
5209 -@@ -3020,7 +3022,7 @@ static void start_io(ctlr_info_t *h)
5210 +@@ -3020,7 +3021,7 @@ static void start_io(ctlr_info_t *h)
5211 h->Qdepth--;
5212
5213 /* Tell the controller execute command */
5214 @@ -35999,7 +35964,7 @@ index 62b6c2c..4a11354 100644
5215
5216 /* Put job onto the completed Q */
5217 addQ(&h->cmpQ, c);
5218 -@@ -3446,17 +3448,17 @@ startio:
5219 +@@ -3446,17 +3447,17 @@ startio:
5220
5221 static inline unsigned long get_next_completion(ctlr_info_t *h)
5222 {
5223 @@ -36020,7 +35985,7 @@ index 62b6c2c..4a11354 100644
5224 (h->interrupts_enabled == 0));
5225 }
5226
5227 -@@ -3489,7 +3491,7 @@ static inline u32 next_command(ctlr_info_t *h)
5228 +@@ -3489,7 +3490,7 @@ static inline u32 next_command(ctlr_info_t *h)
5229 u32 a;
5230
5231 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
5232 @@ -36029,7 +35994,7 @@ index 62b6c2c..4a11354 100644
5233
5234 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
5235 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
5236 -@@ -4046,7 +4048,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
5237 +@@ -4046,7 +4047,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
5238 trans_support & CFGTBL_Trans_use_short_tags);
5239
5240 /* Change the access methods to the performant access methods */
5241 @@ -36038,7 +36003,7 @@ index 62b6c2c..4a11354 100644
5242 h->transMethod = CFGTBL_Trans_Performant;
5243
5244 return;
5245 -@@ -4319,7 +4321,7 @@ static int cciss_pci_init(ctlr_info_t *h)
5246 +@@ -4319,7 +4320,7 @@ static int cciss_pci_init(ctlr_info_t *h)
5247 if (prod_index < 0)
5248 return -ENODEV;
5249 h->product_name = products[prod_index].product_name;
5250 @@ -36047,7 +36012,7 @@ index 62b6c2c..4a11354 100644
5251
5252 if (cciss_board_disabled(h)) {
5253 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
5254 -@@ -5051,7 +5053,7 @@ reinit_after_soft_reset:
5255 +@@ -5051,7 +5052,7 @@ reinit_after_soft_reset:
5256 }
5257
5258 /* make sure the board interrupts are off */
5259 @@ -36056,7 +36021,7 @@ index 62b6c2c..4a11354 100644
5260 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
5261 if (rc)
5262 goto clean2;
5263 -@@ -5101,7 +5103,7 @@ reinit_after_soft_reset:
5264 +@@ -5101,7 +5102,7 @@ reinit_after_soft_reset:
5265 * fake ones to scoop up any residual completions.
5266 */
5267 spin_lock_irqsave(&h->lock, flags);
5268 @@ -36065,7 +36030,7 @@ index 62b6c2c..4a11354 100644
5269 spin_unlock_irqrestore(&h->lock, flags);
5270 free_irq(h->intr[h->intr_mode], h);
5271 rc = cciss_request_irq(h, cciss_msix_discard_completions,
5272 -@@ -5121,9 +5123,9 @@ reinit_after_soft_reset:
5273 +@@ -5121,9 +5122,9 @@ reinit_after_soft_reset:
5274 dev_info(&h->pdev->dev, "Board READY.\n");
5275 dev_info(&h->pdev->dev,
5276 "Waiting for stale completions to drain.\n");
5277 @@ -36077,7 +36042,7 @@ index 62b6c2c..4a11354 100644
5278
5279 rc = controller_reset_failed(h->cfgtable);
5280 if (rc)
5281 -@@ -5146,7 +5148,7 @@ reinit_after_soft_reset:
5282 +@@ -5146,7 +5147,7 @@ reinit_after_soft_reset:
5283 cciss_scsi_setup(h);
5284
5285 /* Turn the interrupts on so we can service requests */
5286 @@ -36086,7 +36051,7 @@ index 62b6c2c..4a11354 100644
5287
5288 /* Get the firmware version */
5289 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
5290 -@@ -5218,7 +5220,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
5291 +@@ -5218,7 +5219,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
5292 kfree(flush_buf);
5293 if (return_code != IO_OK)
5294 dev_warn(&h->pdev->dev, "Error flushing cache\n");
5295 @@ -38551,10 +38516,10 @@ index 3d92a7c..9a9cfd7 100644
5296 iir = I915_READ(IIR);
5297
5298 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
5299 -index be79f47..95e150b 100644
5300 +index ca40d1b..6baacfd 100644
5301 --- a/drivers/gpu/drm/i915/intel_display.c
5302 +++ b/drivers/gpu/drm/i915/intel_display.c
5303 -@@ -9418,13 +9418,13 @@ struct intel_quirk {
5304 +@@ -9431,13 +9431,13 @@ struct intel_quirk {
5305 int subsystem_vendor;
5306 int subsystem_device;
5307 void (*hook)(struct drm_device *dev);
5308 @@ -38570,7 +38535,7 @@ index be79f47..95e150b 100644
5309
5310 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
5311 {
5312 -@@ -9432,18 +9432,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
5313 +@@ -9445,18 +9445,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
5314 return 1;
5315 }
5316
5317 @@ -39427,41 +39392,10 @@ index 5360e5a..c2c0d26 100644
5318 err = drm_debugfs_create_files(dc->debugfs_files,
5319 ARRAY_SIZE(debugfs_files),
5320 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
5321 -index 36668d1..9f4ccb0 100644
5322 +index 5956445..1d30d7e 100644
5323 --- a/drivers/hid/hid-core.c
5324 +++ b/drivers/hid/hid-core.c
5325 -@@ -63,6 +63,8 @@ struct hid_report *hid_register_report(struct hid_device *device, unsigned type,
5326 - struct hid_report_enum *report_enum = device->report_enum + type;
5327 - struct hid_report *report;
5328 -
5329 -+ if (id >= HID_MAX_IDS)
5330 -+ return NULL;
5331 - if (report_enum->report_id_hash[id])
5332 - return report_enum->report_id_hash[id];
5333 -
5334 -@@ -404,8 +406,10 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
5335 -
5336 - case HID_GLOBAL_ITEM_TAG_REPORT_ID:
5337 - parser->global.report_id = item_udata(item);
5338 -- if (parser->global.report_id == 0) {
5339 -- hid_err(parser->device, "report_id 0 is invalid\n");
5340 -+ if (parser->global.report_id == 0 ||
5341 -+ parser->global.report_id >= HID_MAX_IDS) {
5342 -+ hid_err(parser->device, "report_id %u is invalid\n",
5343 -+ parser->global.report_id);
5344 - return -1;
5345 - }
5346 - return 0;
5347 -@@ -575,7 +579,7 @@ static void hid_close_report(struct hid_device *device)
5348 - for (i = 0; i < HID_REPORT_TYPES; i++) {
5349 - struct hid_report_enum *report_enum = device->report_enum + i;
5350 -
5351 -- for (j = 0; j < 256; j++) {
5352 -+ for (j = 0; j < HID_MAX_IDS; j++) {
5353 - struct hid_report *report = report_enum->report_id_hash[j];
5354 - if (report)
5355 - hid_free_report(report);
5356 -@@ -755,6 +759,56 @@ int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size)
5357 +@@ -759,6 +759,56 @@ int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size)
5358 }
5359 EXPORT_SYMBOL_GPL(hid_parse_report);
5360
5361 @@ -39518,21 +39452,7 @@ index 36668d1..9f4ccb0 100644
5362 /**
5363 * hid_open_report - open a driver-specific device report
5364 *
5365 -@@ -1152,7 +1206,12 @@ EXPORT_SYMBOL_GPL(hid_output_report);
5366 -
5367 - int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
5368 - {
5369 -- unsigned size = field->report_size;
5370 -+ unsigned size;
5371 -+
5372 -+ if (!field)
5373 -+ return -1;
5374 -+
5375 -+ size = field->report_size;
5376 -
5377 - hid_dump_input(field->report->device, field->usage + offset, value);
5378 -
5379 -@@ -2285,7 +2344,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
5380 +@@ -2295,7 +2345,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
5381
5382 int hid_add_device(struct hid_device *hdev)
5383 {
5384 @@ -39541,7 +39461,7 @@ index 36668d1..9f4ccb0 100644
5385 int ret;
5386
5387 if (WARN_ON(hdev->status & HID_STAT_ADDED))
5388 -@@ -2319,7 +2378,7 @@ int hid_add_device(struct hid_device *hdev)
5389 +@@ -2329,7 +2379,7 @@ int hid_add_device(struct hid_device *hdev)
5390 /* XXX hack, any other cleaner solution after the driver core
5391 * is converted to allow more than 20 bytes as the device name? */
5392 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
5393 @@ -39815,68 +39735,6 @@ index cb0e361..2aa275e 100644
5394 }
5395
5396 for (r = 0; r < report->maxfield; r++) {
5397 -diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
5398 -index ef95102..5482156 100644
5399 ---- a/drivers/hid/hid-ntrig.c
5400 -+++ b/drivers/hid/hid-ntrig.c
5401 -@@ -115,7 +115,8 @@ static inline int ntrig_get_mode(struct hid_device *hdev)
5402 - struct hid_report *report = hdev->report_enum[HID_FEATURE_REPORT].
5403 - report_id_hash[0x0d];
5404 -
5405 -- if (!report)
5406 -+ if (!report || report->maxfield < 1 ||
5407 -+ report->field[0]->report_count < 1)
5408 - return -EINVAL;
5409 -
5410 - hid_hw_request(hdev, report, HID_REQ_GET_REPORT);
5411 -diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c
5412 -index b48092d..72bba1e 100644
5413 ---- a/drivers/hid/hid-picolcd_core.c
5414 -+++ b/drivers/hid/hid-picolcd_core.c
5415 -@@ -290,7 +290,7 @@ static ssize_t picolcd_operation_mode_store(struct device *dev,
5416 - buf += 10;
5417 - cnt -= 10;
5418 - }
5419 -- if (!report)
5420 -+ if (!report || report->maxfield < 1)
5421 - return -EINVAL;
5422 -
5423 - while (cnt > 0 && (buf[cnt-1] == '\n' || buf[cnt-1] == '\r'))
5424 -diff --git a/drivers/hid/hid-pl.c b/drivers/hid/hid-pl.c
5425 -index d29112f..2dcd7d9 100644
5426 ---- a/drivers/hid/hid-pl.c
5427 -+++ b/drivers/hid/hid-pl.c
5428 -@@ -132,8 +132,14 @@ static int plff_init(struct hid_device *hid)
5429 - strong = &report->field[0]->value[2];
5430 - weak = &report->field[0]->value[3];
5431 - debug("detected single-field device");
5432 -- } else if (report->maxfield >= 4 && report->field[0]->maxusage == 1 &&
5433 -- report->field[0]->usage[0].hid == (HID_UP_LED | 0x43)) {
5434 -+ } else if (report->field[0]->maxusage == 1 &&
5435 -+ report->field[0]->usage[0].hid ==
5436 -+ (HID_UP_LED | 0x43) &&
5437 -+ report->maxfield >= 4 &&
5438 -+ report->field[0]->report_count >= 1 &&
5439 -+ report->field[1]->report_count >= 1 &&
5440 -+ report->field[2]->report_count >= 1 &&
5441 -+ report->field[3]->report_count >= 1) {
5442 - report->field[0]->value[0] = 0x00;
5443 - report->field[1]->value[0] = 0x00;
5444 - strong = &report->field[2]->value[0];
5445 -diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
5446 -index ca749810..aa34755 100644
5447 ---- a/drivers/hid/hid-sensor-hub.c
5448 -+++ b/drivers/hid/hid-sensor-hub.c
5449 -@@ -221,7 +221,8 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
5450 -
5451 - mutex_lock(&data->mutex);
5452 - report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
5453 -- if (!report || (field_index >= report->maxfield)) {
5454 -+ if (!report || (field_index >= report->maxfield) ||
5455 -+ report->field[field_index]->report_count < 1) {
5456 - ret = -EINVAL;
5457 - goto done_proc;
5458 - }
5459 diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c
5460 index d164911..ef42e86 100644
5461 --- a/drivers/hid/hid-steelseries.c
5462 @@ -47364,10 +47222,10 @@ index f379c7f..e8fc69c 100644
5463
5464 transport_setup_device(&rport->dev);
5465 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
5466 -index 86fcf2c..26d8594 100644
5467 +index 2783dd7..d20395b 100644
5468 --- a/drivers/scsi/sd.c
5469 +++ b/drivers/scsi/sd.c
5470 -@@ -2938,7 +2938,7 @@ static int sd_probe(struct device *dev)
5471 +@@ -2933,7 +2933,7 @@ static int sd_probe(struct device *dev)
5472 sdkp->disk = gd;
5473 sdkp->index = index;
5474 atomic_set(&sdkp->openers, 0);
5475 @@ -48853,10 +48711,10 @@ index d5cc3ac..3263411 100644
5476
5477 if (get_user(c, buf))
5478 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
5479 -index 366af83..6db51c3 100644
5480 +index 20689b9..7fd3a31 100644
5481 --- a/drivers/tty/tty_io.c
5482 +++ b/drivers/tty/tty_io.c
5483 -@@ -3467,7 +3467,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
5484 +@@ -3468,7 +3468,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
5485
5486 void tty_default_fops(struct file_operations *fops)
5487 {
5488 @@ -49287,7 +49145,7 @@ index 014dc99..4d25fd7 100644
5489 wake_up(&usb_kill_urb_queue);
5490 usb_put_urb(urb);
5491 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
5492 -index 558313d..8cadfa5 100644
5493 +index 17c3785..deffb11 100644
5494 --- a/drivers/usb/core/hub.c
5495 +++ b/drivers/usb/core/hub.c
5496 @@ -27,6 +27,7 @@
5497 @@ -49298,7 +49156,7 @@ index 558313d..8cadfa5 100644
5498
5499 #include <asm/uaccess.h>
5500 #include <asm/byteorder.h>
5501 -@@ -4426,6 +4427,10 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
5502 +@@ -4421,6 +4422,10 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
5503 goto done;
5504 return;
5505 }
5506 @@ -49349,7 +49207,7 @@ index 7dad603..350f7a9 100644
5507 INIT_LIST_HEAD(&dev->ep0.urb_list);
5508 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
5509 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
5510 -index f77083f..f3e2e34 100644
5511 +index 14d28d6..5f511ac 100644
5512 --- a/drivers/usb/dwc3/gadget.c
5513 +++ b/drivers/usb/dwc3/gadget.c
5514 @@ -550,8 +550,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
5515 @@ -52541,7 +52399,7 @@ index 89dec7f..361b0d75 100644
5516 fd_offset + ex.a_text);
5517 if (error != N_DATADDR(ex)) {
5518 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
5519 -index 100edcc..ed95731 100644
5520 +index 100edcc..244db37 100644
5521 --- a/fs/binfmt_elf.c
5522 +++ b/fs/binfmt_elf.c
5523 @@ -34,6 +34,7 @@
5524 @@ -52695,7 +52553,22 @@ index 100edcc..ed95731 100644
5525 error = -ENOMEM;
5526 goto out_close;
5527 }
5528 -@@ -538,6 +567,315 @@ out:
5529 +@@ -525,9 +554,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
5530 + elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
5531 +
5532 + /* Map the last of the bss segment */
5533 +- error = vm_brk(elf_bss, last_bss - elf_bss);
5534 +- if (BAD_ADDR(error))
5535 +- goto out_close;
5536 ++ if (last_bss > elf_bss) {
5537 ++ error = vm_brk(elf_bss, last_bss - elf_bss);
5538 ++ if (BAD_ADDR(error))
5539 ++ goto out_close;
5540 ++ }
5541 + }
5542 +
5543 + error = load_addr;
5544 +@@ -538,6 +569,315 @@ out:
5545 return error;
5546 }
5547
5548 @@ -53011,7 +52884,7 @@ index 100edcc..ed95731 100644
5549 /*
5550 * These are the functions used to load ELF style executables and shared
5551 * libraries. There is no binary dependent code anywhere else.
5552 -@@ -554,6 +892,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
5553 +@@ -554,6 +894,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
5554 {
5555 unsigned int random_variable = 0;
5556
5557 @@ -53023,7 +52896,7 @@ index 100edcc..ed95731 100644
5558 if ((current->flags & PF_RANDOMIZE) &&
5559 !(current->personality & ADDR_NO_RANDOMIZE)) {
5560 random_variable = get_random_int() & STACK_RND_MASK;
5561 -@@ -572,7 +915,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
5562 +@@ -572,7 +917,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
5563 unsigned long load_addr = 0, load_bias = 0;
5564 int load_addr_set = 0;
5565 char * elf_interpreter = NULL;
5566 @@ -53032,7 +52905,7 @@ index 100edcc..ed95731 100644
5567 struct elf_phdr *elf_ppnt, *elf_phdata;
5568 unsigned long elf_bss, elf_brk;
5569 int retval, i;
5570 -@@ -582,12 +925,12 @@ static int load_elf_binary(struct linux_binprm *bprm)
5571 +@@ -582,12 +927,12 @@ static int load_elf_binary(struct linux_binprm *bprm)
5572 unsigned long start_code, end_code, start_data, end_data;
5573 unsigned long reloc_func_desc __maybe_unused = 0;
5574 int executable_stack = EXSTACK_DEFAULT;
5575 @@ -53046,7 +52919,7 @@ index 100edcc..ed95731 100644
5576
5577 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
5578 if (!loc) {
5579 -@@ -723,11 +1066,81 @@ static int load_elf_binary(struct linux_binprm *bprm)
5580 +@@ -723,11 +1068,81 @@ static int load_elf_binary(struct linux_binprm *bprm)
5581 goto out_free_dentry;
5582
5583 /* OK, This is the point of no return */
5584 @@ -53129,7 +53002,7 @@ index 100edcc..ed95731 100644
5585 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
5586 current->personality |= READ_IMPLIES_EXEC;
5587
5588 -@@ -817,6 +1230,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
5589 +@@ -817,6 +1232,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
5590 #else
5591 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
5592 #endif
5593 @@ -53150,7 +53023,7 @@ index 100edcc..ed95731 100644
5594 }
5595
5596 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
5597 -@@ -849,9 +1276,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
5598 +@@ -849,9 +1278,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
5599 * allowed task size. Note that p_filesz must always be
5600 * <= p_memsz so it is only necessary to check p_memsz.
5601 */
5602 @@ -53163,7 +53036,7 @@ index 100edcc..ed95731 100644
5603 /* set_brk can never work. Avoid overflows. */
5604 send_sig(SIGKILL, current, 0);
5605 retval = -EINVAL;
5606 -@@ -890,17 +1317,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
5607 +@@ -890,17 +1319,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
5608 goto out_free_dentry;
5609 }
5610 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
5611 @@ -53215,7 +53088,7 @@ index 100edcc..ed95731 100644
5612 load_bias);
5613 if (!IS_ERR((void *)elf_entry)) {
5614 /*
5615 -@@ -1122,7 +1577,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
5616 +@@ -1122,7 +1579,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
5617 * Decide what to dump of a segment, part, all or none.
5618 */
5619 static unsigned long vma_dump_size(struct vm_area_struct *vma,
5620 @@ -53224,7 +53097,7 @@ index 100edcc..ed95731 100644
5621 {
5622 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
5623
5624 -@@ -1160,7 +1615,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
5625 +@@ -1160,7 +1617,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
5626 if (vma->vm_file == NULL)
5627 return 0;
5628
5629 @@ -53233,7 +53106,7 @@ index 100edcc..ed95731 100644
5630 goto whole;
5631
5632 /*
5633 -@@ -1385,9 +1840,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
5634 +@@ -1385,9 +1842,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
5635 {
5636 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
5637 int i = 0;
5638 @@ -53245,7 +53118,7 @@ index 100edcc..ed95731 100644
5639 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
5640 }
5641
5642 -@@ -1396,7 +1851,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
5643 +@@ -1396,7 +1853,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
5644 {
5645 mm_segment_t old_fs = get_fs();
5646 set_fs(KERNEL_DS);
5647 @@ -53254,7 +53127,7 @@ index 100edcc..ed95731 100644
5648 set_fs(old_fs);
5649 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
5650 }
5651 -@@ -2017,14 +2472,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
5652 +@@ -2017,14 +2474,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
5653 }
5654
5655 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
5656 @@ -53271,7 +53144,7 @@ index 100edcc..ed95731 100644
5657 return size;
5658 }
5659
5660 -@@ -2117,7 +2572,7 @@ static int elf_core_dump(struct coredump_params *cprm)
5661 +@@ -2117,7 +2574,7 @@ static int elf_core_dump(struct coredump_params *cprm)
5662
5663 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
5664
5665 @@ -53280,7 +53153,7 @@ index 100edcc..ed95731 100644
5666 offset += elf_core_extra_data_size();
5667 e_shoff = offset;
5668
5669 -@@ -2131,10 +2586,12 @@ static int elf_core_dump(struct coredump_params *cprm)
5670 +@@ -2131,10 +2588,12 @@ static int elf_core_dump(struct coredump_params *cprm)
5671 offset = dataoff;
5672
5673 size += sizeof(*elf);
5674 @@ -53293,7 +53166,7 @@ index 100edcc..ed95731 100644
5675 if (size > cprm->limit
5676 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
5677 goto end_coredump;
5678 -@@ -2148,7 +2605,7 @@ static int elf_core_dump(struct coredump_params *cprm)
5679 +@@ -2148,7 +2607,7 @@ static int elf_core_dump(struct coredump_params *cprm)
5680 phdr.p_offset = offset;
5681 phdr.p_vaddr = vma->vm_start;
5682 phdr.p_paddr = 0;
5683 @@ -53302,7 +53175,7 @@ index 100edcc..ed95731 100644
5684 phdr.p_memsz = vma->vm_end - vma->vm_start;
5685 offset += phdr.p_filesz;
5686 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
5687 -@@ -2159,6 +2616,7 @@ static int elf_core_dump(struct coredump_params *cprm)
5688 +@@ -2159,6 +2618,7 @@ static int elf_core_dump(struct coredump_params *cprm)
5689 phdr.p_align = ELF_EXEC_PAGESIZE;
5690
5691 size += sizeof(phdr);
5692 @@ -53310,7 +53183,7 @@ index 100edcc..ed95731 100644
5693 if (size > cprm->limit
5694 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
5695 goto end_coredump;
5696 -@@ -2183,7 +2641,7 @@ static int elf_core_dump(struct coredump_params *cprm)
5697 +@@ -2183,7 +2643,7 @@ static int elf_core_dump(struct coredump_params *cprm)
5698 unsigned long addr;
5699 unsigned long end;
5700
5701 @@ -53319,7 +53192,7 @@ index 100edcc..ed95731 100644
5702
5703 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
5704 struct page *page;
5705 -@@ -2192,6 +2650,7 @@ static int elf_core_dump(struct coredump_params *cprm)
5706 +@@ -2192,6 +2652,7 @@ static int elf_core_dump(struct coredump_params *cprm)
5707 page = get_dump_page(addr);
5708 if (page) {
5709 void *kaddr = kmap(page);
5710 @@ -53327,7 +53200,7 @@ index 100edcc..ed95731 100644
5711 stop = ((size += PAGE_SIZE) > cprm->limit) ||
5712 !dump_write(cprm->file, kaddr,
5713 PAGE_SIZE);
5714 -@@ -2209,6 +2668,7 @@ static int elf_core_dump(struct coredump_params *cprm)
5715 +@@ -2209,6 +2670,7 @@ static int elf_core_dump(struct coredump_params *cprm)
5716
5717 if (e_phnum == PN_XNUM) {
5718 size += sizeof(*shdr4extnum);
5719 @@ -53335,7 +53208,7 @@ index 100edcc..ed95731 100644
5720 if (size > cprm->limit
5721 || !dump_write(cprm->file, shdr4extnum,
5722 sizeof(*shdr4extnum)))
5723 -@@ -2229,6 +2689,167 @@ out:
5724 +@@ -2229,6 +2691,167 @@ out:
5725
5726 #endif /* CONFIG_ELF_CORE */
5727
5728 @@ -53657,7 +53530,7 @@ index a4b38f9..f86a509 100644
5729 spin_lock_init(&delayed_root->lock);
5730 init_waitqueue_head(&delayed_root->wait);
5731 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
5732 -index 238a055..1e33cd5 100644
5733 +index 9877a2a..7ebf9ab 100644
5734 --- a/fs/btrfs/ioctl.c
5735 +++ b/fs/btrfs/ioctl.c
5736 @@ -3097,9 +3097,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
5737 @@ -57333,10 +57206,10 @@ index 1d55f94..088da65 100644
5738 }
5739
5740 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
5741 -index 72a5d5b..c991011 100644
5742 +index 8fec28f..cd40dba 100644
5743 --- a/fs/fuse/dir.c
5744 +++ b/fs/fuse/dir.c
5745 -@@ -1433,7 +1433,7 @@ static char *read_link(struct dentry *dentry)
5746 +@@ -1437,7 +1437,7 @@ static char *read_link(struct dentry *dentry)
5747 return link;
5748 }
5749
5750 @@ -60028,10 +59901,10 @@ index 7129046..f2779c6 100644
5751 kfree(ctl_table_arg);
5752 goto out;
5753 diff --git a/fs/proc/root.c b/fs/proc/root.c
5754 -index e0a790d..21e095e 100644
5755 +index 0e0e83c..005ba6a 100644
5756 --- a/fs/proc/root.c
5757 +++ b/fs/proc/root.c
5758 -@@ -182,7 +182,15 @@ void __init proc_root_init(void)
5759 +@@ -183,7 +183,15 @@ void __init proc_root_init(void)
5760 #ifdef CONFIG_PROC_DEVICETREE
5761 proc_device_tree_init();
5762 #endif
5763 @@ -67426,10 +67299,10 @@ index 0000000..a340c17
5764 +}
5765 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
5766 new file mode 100644
5767 -index 0000000..8132048
5768 +index 0000000..f056b81
5769 --- /dev/null
5770 +++ b/grsecurity/gracl_ip.c
5771 -@@ -0,0 +1,387 @@
5772 +@@ -0,0 +1,386 @@
5773 +#include <linux/kernel.h>
5774 +#include <asm/uaccess.h>
5775 +#include <asm/errno.h>
5776 @@ -67521,6 +67394,8 @@ index 0000000..8132048
5777 + return gr_sockfamilies[family];
5778 +}
5779 +
5780 ++extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
5781 ++
5782 +int
5783 +gr_search_socket(const int domain, const int type, const int protocol)
5784 +{
5785 @@ -67600,10 +67475,7 @@ index 0000000..8132048
5786 + if (domain == PF_INET)
5787 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
5788 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
5789 -+ else
5790 -+#ifndef CONFIG_IPV6
5791 -+ if (domain != PF_INET6)
5792 -+#endif
5793 ++ else if (rcu_access_pointer(net_families[domain]) != NULL)
5794 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
5795 + gr_socktype_to_name(type), protocol);
5796 +
5797 @@ -72535,7 +72407,7 @@ index 1ec14a7..d0654a2 100644
5798 /**
5799 * struct clk_init_data - holds init data that's common to all clocks and is
5800 diff --git a/include/linux/compat.h b/include/linux/compat.h
5801 -index 7f0c1dd..206ac34 100644
5802 +index ec1aee4..1077986 100644
5803 --- a/include/linux/compat.h
5804 +++ b/include/linux/compat.h
5805 @@ -312,7 +312,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
5806 @@ -72556,14 +72428,6 @@ index 7f0c1dd..206ac34 100644
5807
5808 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, size_t);
5809 /*
5810 -@@ -669,6 +669,7 @@ asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
5811 -
5812 - int compat_restore_altstack(const compat_stack_t __user *uss);
5813 - int __compat_save_altstack(compat_stack_t __user *, unsigned long);
5814 -+void __compat_save_altstack_ex(compat_stack_t __user *, unsigned long);
5815 -
5816 - asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
5817 - struct compat_timespec __user *interval);
5818 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
5819 index 842de22..7f3a41f 100644
5820 --- a/include/linux/compiler-gcc4.h
5821 @@ -73242,7 +73106,7 @@ index 1c804b0..1432c2b 100644
5822
5823 /*
5824 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
5825 -index 9f3c275..911b591 100644
5826 +index 9f3c275..8bdff5d 100644
5827 --- a/include/linux/genhd.h
5828 +++ b/include/linux/genhd.h
5829 @@ -194,7 +194,7 @@ struct gendisk {
5830 @@ -73254,6 +73118,15 @@ index 9f3c275..911b591 100644
5831 struct disk_events *ev;
5832 #ifdef CONFIG_BLK_DEV_INTEGRITY
5833 struct blk_integrity *integrity;
5834 +@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
5835 + extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
5836 +
5837 + /* drivers/char/random.c */
5838 +-extern void add_disk_randomness(struct gendisk *disk);
5839 ++extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
5840 + extern void rand_initialize_disk(struct gendisk *disk);
5841 +
5842 + static inline sector_t get_start_sect(struct block_device *bdev)
5843 diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
5844 index 023bc34..b02b46a 100644
5845 --- a/include/linux/genl_magic_func.h
5846 @@ -74589,24 +74462,10 @@ index 0000000..e7ffaaf
5847 +
5848 +#endif
5849 diff --git a/include/linux/hid.h b/include/linux/hid.h
5850 -index 0c48991..76e41d8 100644
5851 +index ff545cc..76e41d8 100644
5852 --- a/include/linux/hid.h
5853 +++ b/include/linux/hid.h
5854 -@@ -393,10 +393,12 @@ struct hid_report {
5855 - struct hid_device *device; /* associated device */
5856 - };
5857 -
5858 -+#define HID_MAX_IDS 256
5859 -+
5860 - struct hid_report_enum {
5861 - unsigned numbered;
5862 - struct list_head report_list;
5863 -- struct hid_report *report_id_hash[256];
5864 -+ struct hid_report *report_id_hash[HID_MAX_IDS];
5865 - };
5866 -
5867 - #define HID_REPORT_TYPES 3
5868 -@@ -747,6 +749,10 @@ void hid_output_report(struct hid_report *report, __u8 *data);
5869 +@@ -749,6 +749,10 @@ void hid_output_report(struct hid_report *report, __u8 *data);
5870 struct hid_device *hid_allocate_device(void);
5871 struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id);
5872 int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
5873 @@ -76143,10 +76002,10 @@ index 34a1e10..03a6d03 100644
5874 struct proc_ns {
5875 void *ns;
5876 diff --git a/include/linux/random.h b/include/linux/random.h
5877 -index 3b9377d..943ad4a 100644
5878 +index 3b9377d..e418336 100644
5879 --- a/include/linux/random.h
5880 +++ b/include/linux/random.h
5881 -@@ -10,6 +10,16 @@
5882 +@@ -10,9 +10,19 @@
5883
5884
5885 extern void add_device_randomness(const void *, unsigned int);
5886 @@ -76161,8 +76020,13 @@ index 3b9377d..943ad4a 100644
5887 +}
5888 +
5889 extern void add_input_randomness(unsigned int type, unsigned int code,
5890 - unsigned int value);
5891 - extern void add_interrupt_randomness(int irq, int irq_flags);
5892 +- unsigned int value);
5893 +-extern void add_interrupt_randomness(int irq, int irq_flags);
5894 ++ unsigned int value) __latent_entropy;
5895 ++extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
5896 +
5897 + extern void get_random_bytes(void *buf, int nbytes);
5898 + extern void get_random_bytes_arch(void *buf, int nbytes);
5899 @@ -32,6 +42,11 @@ void prandom_seed(u32 seed);
5900 u32 prandom_u32_state(struct rnd_state *);
5901 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
5902 @@ -76176,7 +76040,7 @@ index 3b9377d..943ad4a 100644
5903 * Handle minimum values for seeds
5904 */
5905 diff --git a/include/linux/rculist.h b/include/linux/rculist.h
5906 -index f4b1001..8ddb2b6 100644
5907 +index 4106721..132d42c 100644
5908 --- a/include/linux/rculist.h
5909 +++ b/include/linux/rculist.h
5910 @@ -44,6 +44,9 @@ extern void __list_add_rcu(struct list_head *new,
5911 @@ -76642,18 +76506,6 @@ index 429c199..4d42e38 100644
5912 };
5913
5914 /* shm_mode upper byte flags */
5915 -diff --git a/include/linux/signal.h b/include/linux/signal.h
5916 -index d897484..323ba98 100644
5917 ---- a/include/linux/signal.h
5918 -+++ b/include/linux/signal.h
5919 -@@ -433,6 +433,7 @@ void signals_init(void);
5920 -
5921 - int restore_altstack(const stack_t __user *);
5922 - int __save_altstack(stack_t __user *, unsigned long);
5923 -+void __save_altstack_ex(stack_t __user *, unsigned long);
5924 -
5925 - #ifdef CONFIG_PROC_FS
5926 - struct seq_file;
5927 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
5928 index 3b71a4e..5c9f309 100644
5929 --- a/include/linux/skbuff.h
5930 @@ -79442,7 +79294,7 @@ index ae1996d..a35f2cc 100644
5931 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
5932 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
5933 diff --git a/ipc/msg.c b/ipc/msg.c
5934 -index 9f29d9e..8f284e0 100644
5935 +index b65fdf1..89ec2b1 100644
5936 --- a/ipc/msg.c
5937 +++ b/ipc/msg.c
5938 @@ -291,18 +291,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
5939 @@ -80358,7 +80210,7 @@ index ca65997..60df03d 100644
5940 /* Callchain handling */
5941 extern struct perf_callchain_entry *
5942 diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
5943 -index f356974..cb8c570 100644
5944 +index ad8e1bd..fed7ba9 100644
5945 --- a/kernel/events/uprobes.c
5946 +++ b/kernel/events/uprobes.c
5947 @@ -1556,7 +1556,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
5948 @@ -80431,7 +80283,7 @@ index a949819..a5f127d 100644
5949 {
5950 struct signal_struct *sig = current->signal;
5951 diff --git a/kernel/fork.c b/kernel/fork.c
5952 -index bf46287..2af185d 100644
5953 +index 200a7a2..43e52da 100644
5954 --- a/kernel/fork.c
5955 +++ b/kernel/fork.c
5956 @@ -319,7 +319,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
5957 @@ -80680,7 +80532,7 @@ index bf46287..2af185d 100644
5958 unsigned long stack_start,
5959 unsigned long stack_size,
5960 int __user *child_tidptr,
5961 -@@ -1200,6 +1250,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
5962 +@@ -1201,6 +1251,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
5963 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
5964 #endif
5965 retval = -EAGAIN;
5966 @@ -80690,7 +80542,7 @@ index bf46287..2af185d 100644
5967 if (atomic_read(&p->real_cred->user->processes) >=
5968 task_rlimit(p, RLIMIT_NPROC)) {
5969 if (p->real_cred->user != INIT_USER &&
5970 -@@ -1449,6 +1502,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
5971 +@@ -1450,6 +1503,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
5972 goto bad_fork_free_pid;
5973 }
5974
5975 @@ -80702,7 +80554,7 @@ index bf46287..2af185d 100644
5976 if (likely(p->pid)) {
5977 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
5978
5979 -@@ -1534,6 +1592,8 @@ bad_fork_cleanup_count:
5980 +@@ -1535,6 +1593,8 @@ bad_fork_cleanup_count:
5981 bad_fork_free:
5982 free_task(p);
5983 fork_out:
5984 @@ -80711,7 +80563,7 @@ index bf46287..2af185d 100644
5985 return ERR_PTR(retval);
5986 }
5987
5988 -@@ -1604,6 +1664,7 @@ long do_fork(unsigned long clone_flags,
5989 +@@ -1605,6 +1665,7 @@ long do_fork(unsigned long clone_flags,
5990
5991 p = copy_process(clone_flags, stack_start, stack_size,
5992 child_tidptr, NULL, trace);
5993 @@ -80719,7 +80571,7 @@ index bf46287..2af185d 100644
5994 /*
5995 * Do this prior waking up the new thread - the thread pointer
5996 * might get invalid after that point, if the thread exits quickly.
5997 -@@ -1618,6 +1679,8 @@ long do_fork(unsigned long clone_flags,
5998 +@@ -1619,6 +1680,8 @@ long do_fork(unsigned long clone_flags,
5999 if (clone_flags & CLONE_PARENT_SETTID)
6000 put_user(nr, parent_tidptr);
6001
6002 @@ -80728,7 +80580,7 @@ index bf46287..2af185d 100644
6003 if (clone_flags & CLONE_VFORK) {
6004 p->vfork_done = &vfork;
6005 init_completion(&vfork);
6006 -@@ -1734,7 +1797,7 @@ void __init proc_caches_init(void)
6007 +@@ -1735,7 +1798,7 @@ void __init proc_caches_init(void)
6008 mm_cachep = kmem_cache_create("mm_struct",
6009 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
6010 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
6011 @@ -80737,7 +80589,7 @@ index bf46287..2af185d 100644
6012 mmap_init();
6013 nsproxy_cache_init();
6014 }
6015 -@@ -1774,7 +1837,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
6016 +@@ -1775,7 +1838,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
6017 return 0;
6018
6019 /* don't need lock here; in the worst case we'll do useless copy */
6020 @@ -80746,7 +80598,7 @@ index bf46287..2af185d 100644
6021 return 0;
6022
6023 *new_fsp = copy_fs_struct(fs);
6024 -@@ -1886,7 +1949,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
6025 +@@ -1887,7 +1950,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
6026 fs = current->fs;
6027 spin_lock(&fs->lock);
6028 current->fs = new_fs;
6029 @@ -80848,7 +80700,7 @@ index 9b22d03..6295b62 100644
6030 prev->next = info->next;
6031 else
6032 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
6033 -index 383319b..cd2b391 100644
6034 +index 383319b..56ebb13 100644
6035 --- a/kernel/hrtimer.c
6036 +++ b/kernel/hrtimer.c
6037 @@ -1438,7 +1438,7 @@ void hrtimer_peek_ahead_timers(void)
6038 @@ -80856,7 +80708,7 @@ index 383319b..cd2b391 100644
6039 }
6040
6041 -static void run_hrtimer_softirq(struct softirq_action *h)
6042 -+static void run_hrtimer_softirq(void)
6043 ++static __latent_entropy void run_hrtimer_softirq(void)
6044 {
6045 hrtimer_peek_ahead_timers();
6046 }
6047 @@ -82351,7 +82203,7 @@ index 8018646..b6a5b4f 100644
6048 }
6049 EXPORT_SYMBOL(__stack_chk_fail);
6050 diff --git a/kernel/pid.c b/kernel/pid.c
6051 -index 66505c1..87af12c 100644
6052 +index ebe5e80..5d6d634 100644
6053 --- a/kernel/pid.c
6054 +++ b/kernel/pid.c
6055 @@ -33,6 +33,7 @@
6056 @@ -82371,7 +82223,7 @@ index 66505c1..87af12c 100644
6057
6058 int pid_max_min = RESERVED_PIDS + 1;
6059 int pid_max_max = PID_MAX_LIMIT;
6060 -@@ -439,10 +440,18 @@ EXPORT_SYMBOL(pid_task);
6061 +@@ -440,10 +441,18 @@ EXPORT_SYMBOL(pid_task);
6062 */
6063 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
6064 {
6065 @@ -82391,7 +82243,7 @@ index 66505c1..87af12c 100644
6066 }
6067
6068 struct task_struct *find_task_by_vpid(pid_t vnr)
6069 -@@ -450,6 +459,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
6070 +@@ -451,6 +460,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
6071 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
6072 }
6073
6074 @@ -82789,7 +82641,7 @@ index cce6ba8..7c758b1f 100644
6075 }
6076 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
6077 diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
6078 -index aa34411..78e5ccb 100644
6079 +index aa34411..4832cd4 100644
6080 --- a/kernel/rcutiny.c
6081 +++ b/kernel/rcutiny.c
6082 @@ -45,7 +45,7 @@
6083 @@ -82806,7 +82658,7 @@ index aa34411..78e5ccb 100644
6084 }
6085
6086 -static void rcu_process_callbacks(struct softirq_action *unused)
6087 -+static void rcu_process_callbacks(void)
6088 ++static __latent_entropy void rcu_process_callbacks(void)
6089 {
6090 __rcu_process_callbacks(&rcu_sched_ctrlblk);
6091 __rcu_process_callbacks(&rcu_bh_ctrlblk);
6092 @@ -82978,7 +82830,7 @@ index f4871e5..8ef5741 100644
6093 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
6094 per_cpu(rcu_torture_count, cpu)[i] = 0;
6095 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
6096 -index 068de3a..df7da65 100644
6097 +index 068de3a..5e7db2f 100644
6098 --- a/kernel/rcutree.c
6099 +++ b/kernel/rcutree.c
6100 @@ -358,9 +358,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
6101 @@ -83107,7 +82959,7 @@ index 068de3a..df7da65 100644
6102 * Do RCU core processing for the current CPU.
6103 */
6104 -static void rcu_process_callbacks(struct softirq_action *unused)
6105 -+static void rcu_process_callbacks(void)
6106 ++static __latent_entropy void rcu_process_callbacks(void)
6107 {
6108 struct rcu_state *rsp;
6109
6110 @@ -83724,7 +83576,7 @@ index 05c39f0..442e6fe 100644
6111 #else
6112 static void register_sched_domain_sysctl(void)
6113 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
6114 -index 68f1609..640ba13 100644
6115 +index 68f1609..2a9fe8a 100644
6116 --- a/kernel/sched/fair.c
6117 +++ b/kernel/sched/fair.c
6118 @@ -869,7 +869,7 @@ void task_numa_fault(int node, int pages, bool migrated)
6119 @@ -83741,7 +83593,7 @@ index 68f1609..640ba13 100644
6120 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
6121 */
6122 -static void run_rebalance_domains(struct softirq_action *h)
6123 -+static void run_rebalance_domains(void)
6124 ++static __latent_entropy void run_rebalance_domains(void)
6125 {
6126 int this_cpu = smp_processor_id();
6127 struct rq *this_rq = cpu_rq(this_cpu);
6128 @@ -83759,7 +83611,7 @@ index ef0a7b2..1b728c1 100644
6129 #define sched_class_highest (&stop_sched_class)
6130 #define for_each_class(class) \
6131 diff --git a/kernel/signal.c b/kernel/signal.c
6132 -index 50e4107..08bcb94 100644
6133 +index 50e4107..9409983 100644
6134 --- a/kernel/signal.c
6135 +++ b/kernel/signal.c
6136 @@ -51,12 +51,12 @@ static struct kmem_cache *sigqueue_cachep;
6137 @@ -83885,24 +83737,7 @@ index 50e4107..08bcb94 100644
6138 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
6139 error = check_kill_permission(sig, info, p);
6140 /*
6141 -@@ -3219,6 +3250,16 @@ int __save_altstack(stack_t __user *uss, unsigned long sp)
6142 - __put_user(t->sas_ss_size, &uss->ss_size);
6143 - }
6144 -
6145 -+#ifdef CONFIG_X86
6146 -+void __save_altstack_ex(stack_t __user *uss, unsigned long sp)
6147 -+{
6148 -+ struct task_struct *t = current;
6149 -+ put_user_ex((void __user *)t->sas_ss_sp, &uss->ss_sp);
6150 -+ put_user_ex(sas_ss_flags(sp), &uss->ss_flags);
6151 -+ put_user_ex(t->sas_ss_size, &uss->ss_size);
6152 -+}
6153 -+#endif
6154 -+
6155 - #ifdef CONFIG_COMPAT
6156 - COMPAT_SYSCALL_DEFINE2(sigaltstack,
6157 - const compat_stack_t __user *, uss_ptr,
6158 -@@ -3240,8 +3281,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
6159 +@@ -3240,8 +3271,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
6160 }
6161 seg = get_fs();
6162 set_fs(KERNEL_DS);
6163 @@ -83913,23 +83748,6 @@ index 50e4107..08bcb94 100644
6164 compat_user_stack_pointer());
6165 set_fs(seg);
6166 if (ret >= 0 && uoss_ptr) {
6167 -@@ -3268,6 +3309,16 @@ int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
6168 - __put_user(sas_ss_flags(sp), &uss->ss_flags) |
6169 - __put_user(t->sas_ss_size, &uss->ss_size);
6170 - }
6171 -+
6172 -+#ifdef CONFIG_X86
6173 -+void __compat_save_altstack_ex(compat_stack_t __user *uss, unsigned long sp)
6174 -+{
6175 -+ struct task_struct *t = current;
6176 -+ put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp);
6177 -+ put_user_ex(sas_ss_flags(sp), &uss->ss_flags);
6178 -+ put_user_ex(t->sas_ss_size, &uss->ss_size);
6179 -+}
6180 -+#endif
6181 - #endif
6182 -
6183 - #ifdef __ARCH_WANT_SYS_SIGPENDING
6184 diff --git a/kernel/smpboot.c b/kernel/smpboot.c
6185 index eb89e18..a4e6792 100644
6186 --- a/kernel/smpboot.c
6187 @@ -83953,7 +83771,7 @@ index eb89e18..a4e6792 100644
6188 mutex_unlock(&smpboot_threads_lock);
6189 put_online_cpus();
6190 diff --git a/kernel/softirq.c b/kernel/softirq.c
6191 -index be3d351..9e4d5f2 100644
6192 +index be3d351..e57af82 100644
6193 --- a/kernel/softirq.c
6194 +++ b/kernel/softirq.c
6195 @@ -53,11 +53,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
6196 @@ -83993,7 +83811,7 @@ index be3d351..9e4d5f2 100644
6197 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
6198
6199 -static void tasklet_action(struct softirq_action *a)
6200 -+static void tasklet_action(void)
6201 ++static __latent_entropy void tasklet_action(void)
6202 {
6203 struct tasklet_struct *list;
6204
6205 @@ -84002,7 +83820,7 @@ index be3d351..9e4d5f2 100644
6206 }
6207
6208 -static void tasklet_hi_action(struct softirq_action *a)
6209 -+static void tasklet_hi_action(void)
6210 ++static __latent_entropy void tasklet_hi_action(void)
6211 {
6212 struct tasklet_struct *list;
6213
6214 @@ -84652,7 +84470,7 @@ index 0b537f2..40d6c20 100644
6215 return -ENOMEM;
6216 return 0;
6217 diff --git a/kernel/timer.c b/kernel/timer.c
6218 -index 4296d13..8998609 100644
6219 +index 4296d13..0164b04 100644
6220 --- a/kernel/timer.c
6221 +++ b/kernel/timer.c
6222 @@ -1366,7 +1366,7 @@ void update_process_times(int user_tick)
6223 @@ -84660,7 +84478,7 @@ index 4296d13..8998609 100644
6224 * This function runs timers and the timer-tq in bottom half context.
6225 */
6226 -static void run_timer_softirq(struct softirq_action *h)
6227 -+static void run_timer_softirq(void)
6228 ++static __latent_entropy void run_timer_softirq(void)
6229 {
6230 struct tvec_base *base = __this_cpu_read(tvec_bases);
6231
6232 @@ -85763,6 +85581,19 @@ index c24c2f7..06e070b 100644
6233 + pax_close_kernel();
6234 +}
6235 +EXPORT_SYMBOL(pax_list_del_rcu);
6236 +diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
6237 +index 7deeb62..144eb47 100644
6238 +--- a/lib/percpu-refcount.c
6239 ++++ b/lib/percpu-refcount.c
6240 +@@ -29,7 +29,7 @@
6241 + * can't hit 0 before we've added up all the percpu refs.
6242 + */
6243 +
6244 +-#define PCPU_COUNT_BIAS (1U << 31)
6245 ++#define PCPU_COUNT_BIAS (1U << 30)
6246 +
6247 + /**
6248 + * percpu_ref_init - initialize a percpu refcount
6249 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
6250 index e796429..6e38f9f 100644
6251 --- a/lib/radix-tree.c
6252 @@ -87290,7 +87121,7 @@ index 6f0c244..6d1ae32 100644
6253 err = -EPERM;
6254 goto out;
6255 diff --git a/mm/mlock.c b/mm/mlock.c
6256 -index 79b7cf7..9944291 100644
6257 +index 79b7cf7..37472bf 100644
6258 --- a/mm/mlock.c
6259 +++ b/mm/mlock.c
6260 @@ -13,6 +13,7 @@
6261 @@ -87340,7 +87171,7 @@ index 79b7cf7..9944291 100644
6262 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
6263 error = do_mlock(start, len, 1);
6264 up_write(&current->mm->mmap_sem);
6265 -@@ -500,6 +510,11 @@ static int do_mlockall(int flags)
6266 +@@ -500,12 +510,18 @@ static int do_mlockall(int flags)
6267 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
6268 vm_flags_t newflags;
6269
6270 @@ -87352,7 +87183,14 @@ index 79b7cf7..9944291 100644
6271 newflags = vma->vm_flags & ~VM_LOCKED;
6272 if (flags & MCL_CURRENT)
6273 newflags |= VM_LOCKED;
6274 -@@ -532,6 +547,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
6275 +
6276 + /* Ignore errors */
6277 + mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
6278 ++ cond_resched();
6279 + }
6280 + out:
6281 + return 0;
6282 +@@ -532,6 +548,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
6283 lock_limit >>= PAGE_SHIFT;
6284
6285 ret = -ENOMEM;
6286 @@ -91477,7 +91315,7 @@ index 8ab48cd..57b1a80 100644
6287
6288 return err;
6289 diff --git a/net/core/dev.c b/net/core/dev.c
6290 -index 26755dd..5020ced 100644
6291 +index 26755dd..2a232de 100644
6292 --- a/net/core/dev.c
6293 +++ b/net/core/dev.c
6294 @@ -1680,14 +1680,14 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
6295 @@ -91520,7 +91358,7 @@ index 26755dd..5020ced 100644
6296 EXPORT_SYMBOL(netif_rx_ni);
6297
6298 -static void net_tx_action(struct softirq_action *h)
6299 -+static void net_tx_action(void)
6300 ++static __latent_entropy void net_tx_action(void)
6301 {
6302 struct softnet_data *sd = &__get_cpu_var(softnet_data);
6303
6304 @@ -91538,7 +91376,7 @@ index 26755dd..5020ced 100644
6305 EXPORT_SYMBOL(netif_napi_del);
6306
6307 -static void net_rx_action(struct softirq_action *h)
6308 -+static void net_rx_action(void)
6309 ++static __latent_entropy void net_rx_action(void)
6310 {
6311 struct softnet_data *sd = &__get_cpu_var(softnet_data);
6312 unsigned long time_limit = jiffies + 2;
6313 @@ -93358,6 +93196,96 @@ index 90747f1..505320d 100644
6314 .kind = "ip6gretap",
6315 .maxtype = IFLA_GRE_MAX,
6316 .policy = ip6gre_policy,
6317 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
6318 +index e7ceb6c..44df1c9 100644
6319 +--- a/net/ipv6/ip6_output.c
6320 ++++ b/net/ipv6/ip6_output.c
6321 +@@ -1040,6 +1040,8 @@ static inline int ip6_ufo_append_data(struct sock *sk,
6322 + * udp datagram
6323 + */
6324 + if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
6325 ++ struct frag_hdr fhdr;
6326 ++
6327 + skb = sock_alloc_send_skb(sk,
6328 + hh_len + fragheaderlen + transhdrlen + 20,
6329 + (flags & MSG_DONTWAIT), &err);
6330 +@@ -1061,12 +1063,6 @@ static inline int ip6_ufo_append_data(struct sock *sk,
6331 + skb->protocol = htons(ETH_P_IPV6);
6332 + skb->ip_summed = CHECKSUM_PARTIAL;
6333 + skb->csum = 0;
6334 +- }
6335 +-
6336 +- err = skb_append_datato_frags(sk,skb, getfrag, from,
6337 +- (length - transhdrlen));
6338 +- if (!err) {
6339 +- struct frag_hdr fhdr;
6340 +
6341 + /* Specify the length of each IPv6 datagram fragment.
6342 + * It has to be a multiple of 8.
6343 +@@ -1077,15 +1073,10 @@ static inline int ip6_ufo_append_data(struct sock *sk,
6344 + ipv6_select_ident(&fhdr, rt);
6345 + skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
6346 + __skb_queue_tail(&sk->sk_write_queue, skb);
6347 +-
6348 +- return 0;
6349 + }
6350 +- /* There is not enough support do UPD LSO,
6351 +- * so follow normal path
6352 +- */
6353 +- kfree_skb(skb);
6354 +
6355 +- return err;
6356 ++ return skb_append_datato_frags(sk, skb, getfrag, from,
6357 ++ (length - transhdrlen));
6358 + }
6359 +
6360 + static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
6361 +@@ -1252,27 +1243,27 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
6362 + * --yoshfuji
6363 + */
6364 +
6365 ++ if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP ||
6366 ++ sk->sk_protocol == IPPROTO_RAW)) {
6367 ++ ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
6368 ++ return -EMSGSIZE;
6369 ++ }
6370 ++
6371 ++ skb = skb_peek_tail(&sk->sk_write_queue);
6372 + cork->length += length;
6373 +- if (length > mtu) {
6374 +- int proto = sk->sk_protocol;
6375 +- if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){
6376 +- ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
6377 +- return -EMSGSIZE;
6378 +- }
6379 +-
6380 +- if (proto == IPPROTO_UDP &&
6381 +- (rt->dst.dev->features & NETIF_F_UFO)) {
6382 +-
6383 +- err = ip6_ufo_append_data(sk, getfrag, from, length,
6384 +- hh_len, fragheaderlen,
6385 +- transhdrlen, mtu, flags, rt);
6386 +- if (err)
6387 +- goto error;
6388 +- return 0;
6389 +- }
6390 ++ if (((length > mtu) ||
6391 ++ (skb && skb_is_gso(skb))) &&
6392 ++ (sk->sk_protocol == IPPROTO_UDP) &&
6393 ++ (rt->dst.dev->features & NETIF_F_UFO)) {
6394 ++ err = ip6_ufo_append_data(sk, getfrag, from, length,
6395 ++ hh_len, fragheaderlen,
6396 ++ transhdrlen, mtu, flags, rt);
6397 ++ if (err)
6398 ++ goto error;
6399 ++ return 0;
6400 + }
6401 +
6402 +- if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
6403 ++ if (!skb)
6404 + goto alloc_new_skb;
6405 +
6406 + while (length > 0) {
6407 diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
6408 index 46ba243..576f50e 100644
6409 --- a/net/ipv6/ip6_tunnel.c
6410 @@ -95749,7 +95677,7 @@ index 9a5c4c9..46e4b29 100644
6411
6412 table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL);
6413 diff --git a/net/socket.c b/net/socket.c
6414 -index b2d7c62..04f19ea 100644
6415 +index b2d7c62..441a7ef 100644
6416 --- a/net/socket.c
6417 +++ b/net/socket.c
6418 @@ -88,6 +88,7 @@
6419 @@ -95769,6 +95697,15 @@ index b2d7c62..04f19ea 100644
6420 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
6421 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
6422 unsigned long nr_segs, loff_t pos);
6423 +@@ -162,7 +165,7 @@ static const struct file_operations socket_file_ops = {
6424 + */
6425 +
6426 + static DEFINE_SPINLOCK(net_family_lock);
6427 +-static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
6428 ++const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
6429 +
6430 + /*
6431 + * Statistics counters of the socket lists
6432 @@ -327,7 +330,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
6433 &sockfs_dentry_operations, SOCKFS_MAGIC);
6434 }
6435 @@ -95787,24 +95724,28 @@ index b2d7c62..04f19ea 100644
6436
6437 /* Compatibility.
6438
6439 -@@ -1394,6 +1399,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
6440 - if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
6441 - flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
6442 +@@ -1283,6 +1288,20 @@ int __sock_create(struct net *net, int family, int type, int protocol,
6443 + if (err)
6444 + return err;
6445
6446 -+ if(!gr_search_socket(family, type, protocol)) {
6447 -+ retval = -EACCES;
6448 -+ goto out;
6449 ++ if(!kern && !gr_search_socket(family, type, protocol)) {
6450 ++ if (rcu_access_pointer(net_families[family]) == NULL)
6451 ++ return -EAFNOSUPPORT;
6452 ++ else
6453 ++ return -EACCES;
6454 + }
6455 +
6456 -+ if (gr_handle_sock_all(family, type, protocol)) {
6457 -+ retval = -EACCES;
6458 -+ goto out;
6459 ++ if (!kern && gr_handle_sock_all(family, type, protocol)) {
6460 ++ if (rcu_access_pointer(net_families[family]) == NULL)
6461 ++ return -EAFNOSUPPORT;
6462 ++ else
6463 ++ return -EACCES;
6464 + }
6465 +
6466 - retval = sock_create(family, type, protocol, &sock);
6467 - if (retval < 0)
6468 - goto out;
6469 -@@ -1521,6 +1536,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
6470 + /*
6471 + * Allocate the socket and allow the family to set things up. if
6472 + * the protocol is 0, the family is instructed to select an appropriate
6473 +@@ -1521,6 +1540,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
6474 if (sock) {
6475 err = move_addr_to_kernel(umyaddr, addrlen, &address);
6476 if (err >= 0) {
6477 @@ -95819,7 +95760,7 @@ index b2d7c62..04f19ea 100644
6478 err = security_socket_bind(sock,
6479 (struct sockaddr *)&address,
6480 addrlen);
6481 -@@ -1529,6 +1552,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
6482 +@@ -1529,6 +1556,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
6483 (struct sockaddr *)
6484 &address, addrlen);
6485 }
6486 @@ -95827,7 +95768,7 @@ index b2d7c62..04f19ea 100644
6487 fput_light(sock->file, fput_needed);
6488 }
6489 return err;
6490 -@@ -1552,10 +1576,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
6491 +@@ -1552,10 +1580,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
6492 if ((unsigned int)backlog > somaxconn)
6493 backlog = somaxconn;
6494
6495 @@ -95848,7 +95789,7 @@ index b2d7c62..04f19ea 100644
6496 fput_light(sock->file, fput_needed);
6497 }
6498 return err;
6499 -@@ -1599,6 +1633,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
6500 +@@ -1599,6 +1637,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
6501 newsock->type = sock->type;
6502 newsock->ops = sock->ops;
6503
6504 @@ -95867,7 +95808,7 @@ index b2d7c62..04f19ea 100644
6505 /*
6506 * We don't need try_module_get here, as the listening socket (sock)
6507 * has the protocol module (sock->ops->owner) held.
6508 -@@ -1644,6 +1690,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
6509 +@@ -1644,6 +1694,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
6510 fd_install(newfd, newfile);
6511 err = newfd;
6512
6513 @@ -95876,7 +95817,7 @@ index b2d7c62..04f19ea 100644
6514 out_put:
6515 fput_light(sock->file, fput_needed);
6516 out:
6517 -@@ -1676,6 +1724,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
6518 +@@ -1676,6 +1728,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
6519 int, addrlen)
6520 {
6521 struct socket *sock;
6522 @@ -95884,7 +95825,7 @@ index b2d7c62..04f19ea 100644
6523 struct sockaddr_storage address;
6524 int err, fput_needed;
6525
6526 -@@ -1686,6 +1735,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
6527 +@@ -1686,6 +1739,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
6528 if (err < 0)
6529 goto out_put;
6530
6531 @@ -95902,7 +95843,7 @@ index b2d7c62..04f19ea 100644
6532 err =
6533 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
6534 if (err)
6535 -@@ -1767,6 +1827,8 @@ SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
6536 +@@ -1767,6 +1831,8 @@ SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
6537 * the protocol.
6538 */
6539
6540 @@ -95911,7 +95852,7 @@ index b2d7c62..04f19ea 100644
6541 SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
6542 unsigned int, flags, struct sockaddr __user *, addr,
6543 int, addr_len)
6544 -@@ -1833,7 +1895,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
6545 +@@ -1833,7 +1899,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
6546 struct socket *sock;
6547 struct iovec iov;
6548 struct msghdr msg;
6549 @@ -95920,7 +95861,7 @@ index b2d7c62..04f19ea 100644
6550 int err, err2;
6551 int fput_needed;
6552
6553 -@@ -2040,7 +2102,7 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
6554 +@@ -2040,7 +2106,7 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
6555 * checking falls down on this.
6556 */
6557 if (copy_from_user(ctl_buf,
6558 @@ -95929,7 +95870,7 @@ index b2d7c62..04f19ea 100644
6559 ctl_len))
6560 goto out_freectl;
6561 msg_sys->msg_control = ctl_buf;
6562 -@@ -2191,7 +2253,7 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
6563 +@@ -2191,7 +2257,7 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
6564 int err, total_len, len;
6565
6566 /* kernel mode address */
6567 @@ -95938,7 +95879,7 @@ index b2d7c62..04f19ea 100644
6568
6569 /* user mode address pointers */
6570 struct sockaddr __user *uaddr;
6571 -@@ -2219,7 +2281,7 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
6572 +@@ -2219,7 +2285,7 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
6573 * kernel msghdr to use the kernel address space)
6574 */
6575
6576 @@ -95947,7 +95888,7 @@ index b2d7c62..04f19ea 100644
6577 uaddr_len = COMPAT_NAMELEN(msg);
6578 if (MSG_CMSG_COMPAT & flags) {
6579 err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
6580 -@@ -2974,7 +3036,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
6581 +@@ -2974,7 +3040,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
6582 old_fs = get_fs();
6583 set_fs(KERNEL_DS);
6584 err = dev_ioctl(net, cmd,
6585 @@ -95956,7 +95897,7 @@ index b2d7c62..04f19ea 100644
6586 set_fs(old_fs);
6587
6588 return err;
6589 -@@ -3083,7 +3145,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
6590 +@@ -3083,7 +3149,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
6591
6592 old_fs = get_fs();
6593 set_fs(KERNEL_DS);
6594 @@ -95965,7 +95906,7 @@ index b2d7c62..04f19ea 100644
6595 set_fs(old_fs);
6596
6597 if (cmd == SIOCGIFMAP && !err) {
6598 -@@ -3188,7 +3250,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
6599 +@@ -3188,7 +3254,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
6600 ret |= __get_user(rtdev, &(ur4->rt_dev));
6601 if (rtdev) {
6602 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
6603 @@ -95974,7 +95915,7 @@ index b2d7c62..04f19ea 100644
6604 devname[15] = 0;
6605 } else
6606 r4.rt_dev = NULL;
6607 -@@ -3414,8 +3476,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
6608 +@@ -3414,8 +3480,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
6609 int __user *uoptlen;
6610 int err;
6611
6612 @@ -95985,7 +95926,7 @@ index b2d7c62..04f19ea 100644
6613
6614 set_fs(KERNEL_DS);
6615 if (level == SOL_SOCKET)
6616 -@@ -3435,7 +3497,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
6617 +@@ -3435,7 +3501,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
6618 char __user *uoptval;
6619 int err;
6620
6621 @@ -101154,7 +101095,7 @@ index 0000000..698da67
6622 +}
6623 diff --git a/tools/gcc/latent_entropy_plugin.c b/tools/gcc/latent_entropy_plugin.c
6624 new file mode 100644
6625 -index 0000000..2ef6fd9
6626 +index 0000000..cd6c242
6627 --- /dev/null
6628 +++ b/tools/gcc/latent_entropy_plugin.c
6629 @@ -0,0 +1,321 @@
6630 @@ -101450,7 +101391,7 @@ index 0000000..2ef6fd9
6631 + TREE_THIS_VOLATILE(latent_entropy_decl) = 1;
6632 + DECL_EXTERNAL(latent_entropy_decl) = 1;
6633 + DECL_ARTIFICIAL(latent_entropy_decl) = 1;
6634 -+ DECL_INITIAL(latent_entropy_decl) = NULL;
6635 ++ DECL_INITIAL(latent_entropy_decl) = build_int_cstu(long_long_unsigned_type_node, get_random_const());
6636 + lang_hooks.decls.pushdecl(latent_entropy_decl);
6637 +// DECL_ASSEMBLER_NAME(latent_entropy_decl);
6638 +// varpool_finalize_decl(latent_entropy_decl);
6639
6640 diff --git a/3.11.1/4425_grsec_remove_EI_PAX.patch b/3.11.2/4425_grsec_remove_EI_PAX.patch
6641 similarity index 100%
6642 rename from 3.11.1/4425_grsec_remove_EI_PAX.patch
6643 rename to 3.11.2/4425_grsec_remove_EI_PAX.patch
6644
6645 diff --git a/3.11.1/4427_force_XATTR_PAX_tmpfs.patch b/3.11.2/4427_force_XATTR_PAX_tmpfs.patch
6646 similarity index 100%
6647 rename from 3.11.1/4427_force_XATTR_PAX_tmpfs.patch
6648 rename to 3.11.2/4427_force_XATTR_PAX_tmpfs.patch
6649
6650 diff --git a/3.11.1/4430_grsec-remove-localversion-grsec.patch b/3.11.2/4430_grsec-remove-localversion-grsec.patch
6651 similarity index 100%
6652 rename from 3.11.1/4430_grsec-remove-localversion-grsec.patch
6653 rename to 3.11.2/4430_grsec-remove-localversion-grsec.patch
6654
6655 diff --git a/3.11.1/4435_grsec-mute-warnings.patch b/3.11.2/4435_grsec-mute-warnings.patch
6656 similarity index 100%
6657 rename from 3.11.1/4435_grsec-mute-warnings.patch
6658 rename to 3.11.2/4435_grsec-mute-warnings.patch
6659
6660 diff --git a/3.11.1/4440_grsec-remove-protected-paths.patch b/3.11.2/4440_grsec-remove-protected-paths.patch
6661 similarity index 100%
6662 rename from 3.11.1/4440_grsec-remove-protected-paths.patch
6663 rename to 3.11.2/4440_grsec-remove-protected-paths.patch
6664
6665 diff --git a/3.11.1/4450_grsec-kconfig-default-gids.patch b/3.11.2/4450_grsec-kconfig-default-gids.patch
6666 similarity index 100%
6667 rename from 3.11.1/4450_grsec-kconfig-default-gids.patch
6668 rename to 3.11.2/4450_grsec-kconfig-default-gids.patch
6669
6670 diff --git a/3.11.1/4465_selinux-avc_audit-log-curr_ip.patch b/3.11.2/4465_selinux-avc_audit-log-curr_ip.patch
6671 similarity index 100%
6672 rename from 3.11.1/4465_selinux-avc_audit-log-curr_ip.patch
6673 rename to 3.11.2/4465_selinux-avc_audit-log-curr_ip.patch
6674
6675 diff --git a/3.11.1/4470_disable-compat_vdso.patch b/3.11.2/4470_disable-compat_vdso.patch
6676 similarity index 100%
6677 rename from 3.11.1/4470_disable-compat_vdso.patch
6678 rename to 3.11.2/4470_disable-compat_vdso.patch
6679
6680 diff --git a/3.11.1/4475_emutramp_default_on.patch b/3.11.2/4475_emutramp_default_on.patch
6681 similarity index 100%
6682 rename from 3.11.1/4475_emutramp_default_on.patch
6683 rename to 3.11.2/4475_emutramp_default_on.patch
6684
6685 diff --git a/3.2.51/0000_README b/3.2.51/0000_README
6686 index cf0a0fe..e87b456 100644
6687 --- a/3.2.51/0000_README
6688 +++ b/3.2.51/0000_README
6689 @@ -122,7 +122,7 @@ Patch: 1050_linux-3.2.51.patch
6690 From: http://www.kernel.org
6691 Desc: Linux 3.2.51
6692
6693 -Patch: 4420_grsecurity-2.9.1-3.2.51-201309181906.patch
6694 +Patch: 4420_grsecurity-2.9.1-3.2.51-201309281102.patch
6695 From: http://www.grsecurity.net
6696 Desc: hardened-sources base patch from upstream grsecurity
6697
6698
6699 diff --git a/3.2.51/4420_grsecurity-2.9.1-3.2.51-201309181906.patch b/3.2.51/4420_grsecurity-2.9.1-3.2.51-201309281102.patch
6700 similarity index 99%
6701 rename from 3.2.51/4420_grsecurity-2.9.1-3.2.51-201309181906.patch
6702 rename to 3.2.51/4420_grsecurity-2.9.1-3.2.51-201309281102.patch
6703 index 6cc3546..79a6bf4 100644
6704 --- a/3.2.51/4420_grsecurity-2.9.1-3.2.51-201309181906.patch
6705 +++ b/3.2.51/4420_grsecurity-2.9.1-3.2.51-201309281102.patch
6706 @@ -30191,7 +30191,7 @@ index af00795..2bb8105 100644
6707 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
6708 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
6709 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
6710 -index 58916af..eb9dbcf6 100644
6711 +index 58916af..9b538a6 100644
6712 --- a/block/blk-iopoll.c
6713 +++ b/block/blk-iopoll.c
6714 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
6715 @@ -30199,7 +30199,7 @@ index 58916af..eb9dbcf6 100644
6716 EXPORT_SYMBOL(blk_iopoll_complete);
6717
6718 -static void blk_iopoll_softirq(struct softirq_action *h)
6719 -+static void blk_iopoll_softirq(void)
6720 ++static __latent_entropy void blk_iopoll_softirq(void)
6721 {
6722 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
6723 int rearm = 0, budget = blk_iopoll_budget;
6724 @@ -30226,7 +30226,7 @@ index 623e1cd..ca1e109 100644
6725 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
6726 else
6727 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
6728 -index 1366a89..dfb3871 100644
6729 +index 1366a89..88178fe 100644
6730 --- a/block/blk-softirq.c
6731 +++ b/block/blk-softirq.c
6732 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
6733 @@ -30234,7 +30234,7 @@ index 1366a89..dfb3871 100644
6734 * while passing them to the queue registered handler.
6735 */
6736 -static void blk_done_softirq(struct softirq_action *h)
6737 -+static void blk_done_softirq(void)
6738 ++static __latent_entropy void blk_done_softirq(void)
6739 {
6740 struct list_head *cpu_list, local_list;
6741
6742 @@ -31889,19 +31889,18 @@ index e8d11b6..7b1b36f 100644
6743 }
6744 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
6745 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
6746 -index d3446f6..12de1df 100644
6747 +index d3446f6..61ddf2c 100644
6748 --- a/drivers/block/cciss.c
6749 +++ b/drivers/block/cciss.c
6750 -@@ -1186,6 +1186,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
6751 +@@ -1186,6 +1186,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
6752 int err;
6753 u32 cp;
6754
6755 + memset(&arg64, 0, sizeof(arg64));
6756 -+
6757 err = 0;
6758 err |=
6759 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
6760 -@@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
6761 +@@ -3007,7 +3008,7 @@ static void start_io(ctlr_info_t *h)
6762 while (!list_empty(&h->reqQ)) {
6763 c = list_entry(h->reqQ.next, CommandList_struct, list);
6764 /* can't do anything if fifo is full */
6765 @@ -31910,7 +31909,7 @@ index d3446f6..12de1df 100644
6766 dev_warn(&h->pdev->dev, "fifo full\n");
6767 break;
6768 }
6769 -@@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
6770 +@@ -3017,7 +3018,7 @@ static void start_io(ctlr_info_t *h)
6771 h->Qdepth--;
6772
6773 /* Tell the controller execute command */
6774 @@ -31919,7 +31918,7 @@ index d3446f6..12de1df 100644
6775
6776 /* Put job onto the completed Q */
6777 addQ(&h->cmpQ, c);
6778 -@@ -3443,17 +3445,17 @@ startio:
6779 +@@ -3443,17 +3444,17 @@ startio:
6780
6781 static inline unsigned long get_next_completion(ctlr_info_t *h)
6782 {
6783 @@ -31940,7 +31939,7 @@ index d3446f6..12de1df 100644
6784 (h->interrupts_enabled == 0));
6785 }
6786
6787 -@@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
6788 +@@ -3486,7 +3487,7 @@ static inline u32 next_command(ctlr_info_t *h)
6789 u32 a;
6790
6791 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
6792 @@ -31949,7 +31948,7 @@ index d3446f6..12de1df 100644
6793
6794 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
6795 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
6796 -@@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
6797 +@@ -4044,7 +4045,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
6798 trans_support & CFGTBL_Trans_use_short_tags);
6799
6800 /* Change the access methods to the performant access methods */
6801 @@ -31958,7 +31957,7 @@ index d3446f6..12de1df 100644
6802 h->transMethod = CFGTBL_Trans_Performant;
6803
6804 return;
6805 -@@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
6806 +@@ -4316,7 +4317,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
6807 if (prod_index < 0)
6808 return -ENODEV;
6809 h->product_name = products[prod_index].product_name;
6810 @@ -31967,7 +31966,7 @@ index d3446f6..12de1df 100644
6811
6812 if (cciss_board_disabled(h)) {
6813 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
6814 -@@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
6815 +@@ -5041,7 +5042,7 @@ reinit_after_soft_reset:
6816 }
6817
6818 /* make sure the board interrupts are off */
6819 @@ -31976,7 +31975,7 @@ index d3446f6..12de1df 100644
6820 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
6821 if (rc)
6822 goto clean2;
6823 -@@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
6824 +@@ -5093,7 +5094,7 @@ reinit_after_soft_reset:
6825 * fake ones to scoop up any residual completions.
6826 */
6827 spin_lock_irqsave(&h->lock, flags);
6828 @@ -31985,7 +31984,7 @@ index d3446f6..12de1df 100644
6829 spin_unlock_irqrestore(&h->lock, flags);
6830 free_irq(h->intr[h->intr_mode], h);
6831 rc = cciss_request_irq(h, cciss_msix_discard_completions,
6832 -@@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
6833 +@@ -5113,9 +5114,9 @@ reinit_after_soft_reset:
6834 dev_info(&h->pdev->dev, "Board READY.\n");
6835 dev_info(&h->pdev->dev,
6836 "Waiting for stale completions to drain.\n");
6837 @@ -31997,7 +31996,7 @@ index d3446f6..12de1df 100644
6838
6839 rc = controller_reset_failed(h->cfgtable);
6840 if (rc)
6841 -@@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
6842 +@@ -5138,7 +5139,7 @@ reinit_after_soft_reset:
6843 cciss_scsi_setup(h);
6844
6845 /* Turn the interrupts on so we can service requests */
6846 @@ -32006,7 +32005,7 @@ index d3446f6..12de1df 100644
6847
6848 /* Get the firmware version */
6849 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
6850 -@@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
6851 +@@ -5211,7 +5212,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
6852 kfree(flush_buf);
6853 if (return_code != IO_OK)
6854 dev_warn(&h->pdev->dev, "Error flushing cache\n");
6855 @@ -49227,7 +49226,7 @@ index a6395bd..f1e376a 100644
6856 (unsigned long) create_aout_tables((char __user *) bprm->p, bprm);
6857 #ifdef __alpha__
6858 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
6859 -index 8dd615c..0d06360 100644
6860 +index 8dd615c..65b7958 100644
6861 --- a/fs/binfmt_elf.c
6862 +++ b/fs/binfmt_elf.c
6863 @@ -32,6 +32,7 @@
6864 @@ -49381,7 +49380,26 @@ index 8dd615c..0d06360 100644
6865 error = -ENOMEM;
6866 goto out_close;
6867 }
6868 -@@ -528,6 +557,315 @@ out:
6869 +@@ -513,11 +542,13 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
6870 + elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
6871 +
6872 + /* Map the last of the bss segment */
6873 +- down_write(&current->mm->mmap_sem);
6874 +- error = do_brk(elf_bss, last_bss - elf_bss);
6875 +- up_write(&current->mm->mmap_sem);
6876 +- if (BAD_ADDR(error))
6877 +- goto out_close;
6878 ++ if (last_bss > elf_bss) {
6879 ++ down_write(&current->mm->mmap_sem);
6880 ++ error = do_brk(elf_bss, last_bss - elf_bss);
6881 ++ up_write(&current->mm->mmap_sem);
6882 ++ if (BAD_ADDR(error))
6883 ++ goto out_close;
6884 ++ }
6885 + }
6886 +
6887 + error = load_addr;
6888 +@@ -528,6 +559,315 @@ out:
6889 return error;
6890 }
6891
6892 @@ -49697,7 +49715,7 @@ index 8dd615c..0d06360 100644
6893 /*
6894 * These are the functions used to load ELF style executables and shared
6895 * libraries. There is no binary dependent code anywhere else.
6896 -@@ -544,6 +882,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
6897 +@@ -544,6 +884,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
6898 {
6899 unsigned int random_variable = 0;
6900
6901 @@ -49709,7 +49727,7 @@ index 8dd615c..0d06360 100644
6902 if ((current->flags & PF_RANDOMIZE) &&
6903 !(current->personality & ADDR_NO_RANDOMIZE)) {
6904 random_variable = get_random_int() & STACK_RND_MASK;
6905 -@@ -562,7 +905,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
6906 +@@ -562,7 +907,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
6907 unsigned long load_addr = 0, load_bias = 0;
6908 int load_addr_set = 0;
6909 char * elf_interpreter = NULL;
6910 @@ -49718,7 +49736,7 @@ index 8dd615c..0d06360 100644
6911 struct elf_phdr *elf_ppnt, *elf_phdata;
6912 unsigned long elf_bss, elf_brk;
6913 int retval, i;
6914 -@@ -572,11 +915,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
6915 +@@ -572,11 +917,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
6916 unsigned long start_code, end_code, start_data, end_data;
6917 unsigned long reloc_func_desc __maybe_unused = 0;
6918 int executable_stack = EXSTACK_DEFAULT;
6919 @@ -49731,7 +49749,7 @@ index 8dd615c..0d06360 100644
6920
6921 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
6922 if (!loc) {
6923 -@@ -713,11 +1056,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
6924 +@@ -713,11 +1058,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
6925
6926 /* OK, This is the point of no return */
6927 current->flags &= ~PF_FORKNOEXEC;
6928 @@ -49814,7 +49832,7 @@ index 8dd615c..0d06360 100644
6929 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
6930 current->personality |= READ_IMPLIES_EXEC;
6931
6932 -@@ -808,6 +1221,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
6933 +@@ -808,6 +1223,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
6934 #else
6935 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
6936 #endif
6937 @@ -49835,7 +49853,7 @@ index 8dd615c..0d06360 100644
6938 }
6939
6940 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
6941 -@@ -840,9 +1267,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
6942 +@@ -840,9 +1269,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
6943 * allowed task size. Note that p_filesz must always be
6944 * <= p_memsz so it is only necessary to check p_memsz.
6945 */
6946 @@ -49848,7 +49866,7 @@ index 8dd615c..0d06360 100644
6947 /* set_brk can never work. Avoid overflows. */
6948 send_sig(SIGKILL, current, 0);
6949 retval = -EINVAL;
6950 -@@ -881,17 +1308,44 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
6951 +@@ -881,17 +1310,44 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
6952 goto out_free_dentry;
6953 }
6954 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
6955 @@ -49899,7 +49917,7 @@ index 8dd615c..0d06360 100644
6956 load_bias);
6957 if (!IS_ERR((void *)elf_entry)) {
6958 /*
6959 -@@ -1098,7 +1552,7 @@ out:
6960 +@@ -1098,7 +1554,7 @@ out:
6961 * Decide what to dump of a segment, part, all or none.
6962 */
6963 static unsigned long vma_dump_size(struct vm_area_struct *vma,
6964 @@ -49908,7 +49926,7 @@ index 8dd615c..0d06360 100644
6965 {
6966 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
6967
6968 -@@ -1132,7 +1586,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
6969 +@@ -1132,7 +1588,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
6970 if (vma->vm_file == NULL)
6971 return 0;
6972
6973 @@ -49917,7 +49935,7 @@ index 8dd615c..0d06360 100644
6974 goto whole;
6975
6976 /*
6977 -@@ -1354,9 +1808,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
6978 +@@ -1354,9 +1810,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
6979 {
6980 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
6981 int i = 0;
6982 @@ -49929,7 +49947,7 @@ index 8dd615c..0d06360 100644
6983 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
6984 }
6985
6986 -@@ -1851,14 +2305,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
6987 +@@ -1851,14 +2307,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
6988 }
6989
6990 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
6991 @@ -49946,7 +49964,7 @@ index 8dd615c..0d06360 100644
6992 return size;
6993 }
6994
6995 -@@ -1952,7 +2406,7 @@ static int elf_core_dump(struct coredump_params *cprm)
6996 +@@ -1952,7 +2408,7 @@ static int elf_core_dump(struct coredump_params *cprm)
6997
6998 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
6999
7000 @@ -49955,7 +49973,7 @@ index 8dd615c..0d06360 100644
7001 offset += elf_core_extra_data_size();
7002 e_shoff = offset;
7003
7004 -@@ -1966,10 +2420,12 @@ static int elf_core_dump(struct coredump_params *cprm)
7005 +@@ -1966,10 +2422,12 @@ static int elf_core_dump(struct coredump_params *cprm)
7006 offset = dataoff;
7007
7008 size += sizeof(*elf);
7009 @@ -49968,7 +49986,7 @@ index 8dd615c..0d06360 100644
7010 if (size > cprm->limit
7011 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
7012 goto end_coredump;
7013 -@@ -1983,7 +2439,7 @@ static int elf_core_dump(struct coredump_params *cprm)
7014 +@@ -1983,7 +2441,7 @@ static int elf_core_dump(struct coredump_params *cprm)
7015 phdr.p_offset = offset;
7016 phdr.p_vaddr = vma->vm_start;
7017 phdr.p_paddr = 0;
7018 @@ -49977,7 +49995,7 @@ index 8dd615c..0d06360 100644
7019 phdr.p_memsz = vma->vm_end - vma->vm_start;
7020 offset += phdr.p_filesz;
7021 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
7022 -@@ -1994,6 +2450,7 @@ static int elf_core_dump(struct coredump_params *cprm)
7023 +@@ -1994,6 +2452,7 @@ static int elf_core_dump(struct coredump_params *cprm)
7024 phdr.p_align = ELF_EXEC_PAGESIZE;
7025
7026 size += sizeof(phdr);
7027 @@ -49985,7 +50003,7 @@ index 8dd615c..0d06360 100644
7028 if (size > cprm->limit
7029 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
7030 goto end_coredump;
7031 -@@ -2018,7 +2475,7 @@ static int elf_core_dump(struct coredump_params *cprm)
7032 +@@ -2018,7 +2477,7 @@ static int elf_core_dump(struct coredump_params *cprm)
7033 unsigned long addr;
7034 unsigned long end;
7035
7036 @@ -49994,7 +50012,7 @@ index 8dd615c..0d06360 100644
7037
7038 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
7039 struct page *page;
7040 -@@ -2027,6 +2484,7 @@ static int elf_core_dump(struct coredump_params *cprm)
7041 +@@ -2027,6 +2486,7 @@ static int elf_core_dump(struct coredump_params *cprm)
7042 page = get_dump_page(addr);
7043 if (page) {
7044 void *kaddr = kmap(page);
7045 @@ -50002,7 +50020,7 @@ index 8dd615c..0d06360 100644
7046 stop = ((size += PAGE_SIZE) > cprm->limit) ||
7047 !dump_write(cprm->file, kaddr,
7048 PAGE_SIZE);
7049 -@@ -2044,6 +2502,7 @@ static int elf_core_dump(struct coredump_params *cprm)
7050 +@@ -2044,6 +2504,7 @@ static int elf_core_dump(struct coredump_params *cprm)
7051
7052 if (e_phnum == PN_XNUM) {
7053 size += sizeof(*shdr4extnum);
7054 @@ -50010,7 +50028,7 @@ index 8dd615c..0d06360 100644
7055 if (size > cprm->limit
7056 || !dump_write(cprm->file, shdr4extnum,
7057 sizeof(*shdr4extnum)))
7058 -@@ -2064,6 +2523,167 @@ out:
7059 +@@ -2064,6 +2525,167 @@ out:
7060
7061 #endif /* CONFIG_ELF_CORE */
7062
7063 @@ -65286,10 +65304,10 @@ index 0000000..b20f6e9
7064 +}
7065 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
7066 new file mode 100644
7067 -index 0000000..db7cc23
7068 +index 0000000..35f8064
7069 --- /dev/null
7070 +++ b/grsecurity/gracl_ip.c
7071 -@@ -0,0 +1,387 @@
7072 +@@ -0,0 +1,386 @@
7073 +#include <linux/kernel.h>
7074 +#include <asm/uaccess.h>
7075 +#include <asm/errno.h>
7076 @@ -65381,6 +65399,8 @@ index 0000000..db7cc23
7077 + return gr_sockfamilies[family];
7078 +}
7079 +
7080 ++extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
7081 ++
7082 +int
7083 +gr_search_socket(const int domain, const int type, const int protocol)
7084 +{
7085 @@ -65460,10 +65480,7 @@ index 0000000..db7cc23
7086 + if (domain == PF_INET)
7087 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
7088 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
7089 -+ else
7090 -+#ifndef CONFIG_IPV6
7091 -+ if (domain != PF_INET6)
7092 -+#endif
7093 ++ else if (rcu_access_pointer(net_families[domain]) != NULL)
7094 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
7095 + gr_socktype_to_name(type), protocol);
7096 +
7097 @@ -71192,7 +71209,7 @@ index 82924bf..1aa58e7 100644
7098 int trace_set_clr_event(const char *system, const char *event, int set);
7099
7100 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
7101 -index 4eec461..84c73cf 100644
7102 +index 4eec461..4ff5db5 100644
7103 --- a/include/linux/genhd.h
7104 +++ b/include/linux/genhd.h
7105 @@ -185,7 +185,7 @@ struct gendisk {
7106 @@ -71204,6 +71221,15 @@ index 4eec461..84c73cf 100644
7107 struct disk_events *ev;
7108 #ifdef CONFIG_BLK_DEV_INTEGRITY
7109 struct blk_integrity *integrity;
7110 +@@ -420,7 +420,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
7111 + extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
7112 +
7113 + /* drivers/char/random.c */
7114 +-extern void add_disk_randomness(struct gendisk *disk);
7115 ++extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
7116 + extern void rand_initialize_disk(struct gendisk *disk);
7117 +
7118 + static inline sector_t get_start_sect(struct block_device *bdev)
7119 diff --git a/include/linux/gfp.h b/include/linux/gfp.h
7120 index 3a76faf..c0592c7 100644
7121 --- a/include/linux/gfp.h
7122 @@ -74058,10 +74084,10 @@ index 800f113..12c82ec 100644
7123 }
7124
7125 diff --git a/include/linux/random.h b/include/linux/random.h
7126 -index 29e217a..a2b27bc 100644
7127 +index 29e217a..a76bcd0 100644
7128 --- a/include/linux/random.h
7129 +++ b/include/linux/random.h
7130 -@@ -51,6 +51,16 @@ struct rnd_state {
7131 +@@ -51,9 +51,19 @@ struct rnd_state {
7132 extern void rand_initialize_irq(int irq);
7133
7134 extern void add_device_randomness(const void *, unsigned int);
7135 @@ -74076,8 +74102,13 @@ index 29e217a..a2b27bc 100644
7136 +}
7137 +
7138 extern void add_input_randomness(unsigned int type, unsigned int code,
7139 - unsigned int value);
7140 - extern void add_interrupt_randomness(int irq, int irq_flags);
7141 +- unsigned int value);
7142 +-extern void add_interrupt_randomness(int irq, int irq_flags);
7143 ++ unsigned int value) __latent_entropy;
7144 ++extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
7145 +
7146 + extern void get_random_bytes(void *buf, int nbytes);
7147 + extern void get_random_bytes_arch(void *buf, int nbytes);
7148 @@ -71,12 +81,17 @@ void srandom32(u32 seed);
7149
7150 u32 prandom32(struct rnd_state *);
7151 @@ -78651,7 +78682,7 @@ index 9b22d03..6295b62 100644
7152 prev->next = info->next;
7153 else
7154 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
7155 -index 60f7e32..76ccd96 100644
7156 +index 60f7e32..d703ad4 100644
7157 --- a/kernel/hrtimer.c
7158 +++ b/kernel/hrtimer.c
7159 @@ -1414,7 +1414,7 @@ void hrtimer_peek_ahead_timers(void)
7160 @@ -78659,7 +78690,7 @@ index 60f7e32..76ccd96 100644
7161 }
7162
7163 -static void run_hrtimer_softirq(struct softirq_action *h)
7164 -+static void run_hrtimer_softirq(void)
7165 ++static __latent_entropy void run_hrtimer_softirq(void)
7166 {
7167 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
7168
7169 @@ -80654,7 +80685,7 @@ index 67fedad..32d32a04 100644
7170 }
7171
7172 diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
7173 -index 636af6d..8af70ab 100644
7174 +index 636af6d..90b936f 100644
7175 --- a/kernel/rcutiny.c
7176 +++ b/kernel/rcutiny.c
7177 @@ -46,7 +46,7 @@
7178 @@ -80671,7 +80702,7 @@ index 636af6d..8af70ab 100644
7179 }
7180
7181 -static void rcu_process_callbacks(struct softirq_action *unused)
7182 -+static void rcu_process_callbacks(void)
7183 ++static __latent_entropy void rcu_process_callbacks(void)
7184 {
7185 __rcu_process_callbacks(&rcu_sched_ctrlblk);
7186 __rcu_process_callbacks(&rcu_bh_ctrlblk);
7187 @@ -80853,7 +80884,7 @@ index 764825c..3aa6ac4 100644
7188 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
7189 per_cpu(rcu_torture_count, cpu)[i] = 0;
7190 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
7191 -index 1aa52af..f2b89e8 100644
7192 +index 1aa52af..d2875ad 100644
7193 --- a/kernel/rcutree.c
7194 +++ b/kernel/rcutree.c
7195 @@ -369,9 +369,9 @@ void rcu_enter_nohz(void)
7196 @@ -80934,7 +80965,7 @@ index 1aa52af..f2b89e8 100644
7197 * Do RCU core processing for the current CPU.
7198 */
7199 -static void rcu_process_callbacks(struct softirq_action *unused)
7200 -+static void rcu_process_callbacks(void)
7201 ++static __latent_entropy void rcu_process_callbacks(void)
7202 {
7203 trace_rcu_utilization("Start RCU core");
7204 __rcu_process_callbacks(&rcu_sched_state,
7205 @@ -81369,7 +81400,7 @@ index f280df1..da1281d 100644
7206 #ifdef CONFIG_RT_GROUP_SCHED
7207 /*
7208 diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
7209 -index 59474c5..490e67f 100644
7210 +index 59474c5..efcae8d 100644
7211 --- a/kernel/sched_fair.c
7212 +++ b/kernel/sched_fair.c
7213 @@ -4801,7 +4801,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
7214 @@ -81377,7 +81408,7 @@ index 59474c5..490e67f 100644
7215 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
7216 */
7217 -static void run_rebalance_domains(struct softirq_action *h)
7218 -+static void run_rebalance_domains(void)
7219 ++static __latent_entropy void run_rebalance_domains(void)
7220 {
7221 int this_cpu = smp_processor_id();
7222 struct rq *this_rq = cpu_rq(this_cpu);
7223 @@ -81549,7 +81580,7 @@ index 9e800b2..1533ba5 100644
7224 raw_spin_unlock_irq(&call_function.lock);
7225 }
7226 diff --git a/kernel/softirq.c b/kernel/softirq.c
7227 -index 2c71d91..f6c64a4 100644
7228 +index 2c71d91..6b690a4 100644
7229 --- a/kernel/softirq.c
7230 +++ b/kernel/softirq.c
7231 @@ -52,11 +52,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
7232 @@ -81589,7 +81620,7 @@ index 2c71d91..f6c64a4 100644
7233 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
7234
7235 -static void tasklet_action(struct softirq_action *a)
7236 -+static void tasklet_action(void)
7237 ++static __latent_entropy void tasklet_action(void)
7238 {
7239 struct tasklet_struct *list;
7240
7241 @@ -81598,7 +81629,7 @@ index 2c71d91..f6c64a4 100644
7242 }
7243
7244 -static void tasklet_hi_action(struct softirq_action *a)
7245 -+static void tasklet_hi_action(void)
7246 ++static __latent_entropy void tasklet_hi_action(void)
7247 {
7248 struct tasklet_struct *list;
7249
7250 @@ -82468,7 +82499,7 @@ index 0b537f2..40d6c20 100644
7251 return -ENOMEM;
7252 return 0;
7253 diff --git a/kernel/timer.c b/kernel/timer.c
7254 -index f8b05a4..9769e5b 100644
7255 +index f8b05a4..ece06b3 100644
7256 --- a/kernel/timer.c
7257 +++ b/kernel/timer.c
7258 @@ -1308,7 +1308,7 @@ void update_process_times(int user_tick)
7259 @@ -82476,7 +82507,7 @@ index f8b05a4..9769e5b 100644
7260 * This function runs timers and the timer-tq in bottom half context.
7261 */
7262 -static void run_timer_softirq(struct softirq_action *h)
7263 -+static void run_timer_softirq(void)
7264 ++static __latent_entropy void run_timer_softirq(void)
7265 {
7266 struct tvec_base *base = __this_cpu_read(tvec_bases);
7267
7268 @@ -85175,7 +85206,7 @@ index 09d6a9d..c514c22 100644
7269 err = -EPERM;
7270 goto out;
7271 diff --git a/mm/mlock.c b/mm/mlock.c
7272 -index 4f4f53b..02d443a 100644
7273 +index 4f4f53b..dbc8aec 100644
7274 --- a/mm/mlock.c
7275 +++ b/mm/mlock.c
7276 @@ -13,6 +13,7 @@
7277 @@ -85225,7 +85256,7 @@ index 4f4f53b..02d443a 100644
7278 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
7279 error = do_mlock(start, len, 1);
7280 up_write(&current->mm->mmap_sem);
7281 -@@ -523,17 +533,22 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
7282 +@@ -523,23 +533,29 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
7283 static int do_mlockall(int flags)
7284 {
7285 struct vm_area_struct * vma, * prev = NULL;
7286 @@ -85251,7 +85282,14 @@ index 4f4f53b..02d443a 100644
7287 newflags = vma->vm_flags | VM_LOCKED;
7288 if (!(flags & MCL_CURRENT))
7289 newflags &= ~VM_LOCKED;
7290 -@@ -566,6 +581,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
7291 +
7292 + /* Ignore errors */
7293 + mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
7294 ++ cond_resched();
7295 + }
7296 + out:
7297 + return 0;
7298 +@@ -566,6 +582,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
7299 lock_limit >>= PAGE_SHIFT;
7300
7301 ret = -ENOMEM;
7302 @@ -89880,7 +89918,7 @@ index 68bbf9f..5ef0d12 100644
7303
7304 return err;
7305 diff --git a/net/core/dev.c b/net/core/dev.c
7306 -index 8e455b8..0e05f5f 100644
7307 +index 8e455b8..4ebd90f 100644
7308 --- a/net/core/dev.c
7309 +++ b/net/core/dev.c
7310 @@ -1142,10 +1142,14 @@ void dev_load(struct net *net, const char *name)
7311 @@ -89939,7 +89977,7 @@ index 8e455b8..0e05f5f 100644
7312 EXPORT_SYMBOL(netif_rx_ni);
7313
7314 -static void net_tx_action(struct softirq_action *h)
7315 -+static void net_tx_action(void)
7316 ++static __latent_entropy void net_tx_action(void)
7317 {
7318 struct softnet_data *sd = &__get_cpu_var(softnet_data);
7319
7320 @@ -89957,7 +89995,7 @@ index 8e455b8..0e05f5f 100644
7321 EXPORT_SYMBOL(netif_napi_del);
7322
7323 -static void net_rx_action(struct softirq_action *h)
7324 -+static void net_rx_action(void)
7325 ++static __latent_entropy void net_rx_action(void)
7326 {
7327 struct softnet_data *sd = &__get_cpu_var(softnet_data);
7328 unsigned long time_limit = jiffies + 2;
7329 @@ -92001,7 +92039,7 @@ index 1567fb1..29af910 100644
7330 dst = NULL;
7331 }
7332 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
7333 -index db60043..33181b7 100644
7334 +index db60043..7f8a2c1 100644
7335 --- a/net/ipv6/ip6_output.c
7336 +++ b/net/ipv6/ip6_output.c
7337 @@ -600,8 +600,8 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
7338 @@ -92033,6 +92071,92 @@ index db60043..33181b7 100644
7339 }
7340
7341 int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
7342 +@@ -1125,6 +1122,8 @@ static inline int ip6_ufo_append_data(struct sock *sk,
7343 + * udp datagram
7344 + */
7345 + if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
7346 ++ struct frag_hdr fhdr;
7347 ++
7348 + skb = sock_alloc_send_skb(sk,
7349 + hh_len + fragheaderlen + transhdrlen + 20,
7350 + (flags & MSG_DONTWAIT), &err);
7351 +@@ -1145,12 +1144,6 @@ static inline int ip6_ufo_append_data(struct sock *sk,
7352 +
7353 + skb->ip_summed = CHECKSUM_PARTIAL;
7354 + skb->csum = 0;
7355 +- }
7356 +-
7357 +- err = skb_append_datato_frags(sk,skb, getfrag, from,
7358 +- (length - transhdrlen));
7359 +- if (!err) {
7360 +- struct frag_hdr fhdr;
7361 +
7362 + /* Specify the length of each IPv6 datagram fragment.
7363 + * It has to be a multiple of 8.
7364 +@@ -1161,15 +1154,10 @@ static inline int ip6_ufo_append_data(struct sock *sk,
7365 + ipv6_select_ident(&fhdr, rt);
7366 + skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
7367 + __skb_queue_tail(&sk->sk_write_queue, skb);
7368 +-
7369 +- return 0;
7370 + }
7371 +- /* There is not enough support do UPD LSO,
7372 +- * so follow normal path
7373 +- */
7374 +- kfree_skb(skb);
7375 +
7376 +- return err;
7377 ++ return skb_append_datato_frags(sk, skb, getfrag, from,
7378 ++ (length - transhdrlen));
7379 + }
7380 +
7381 + static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
7382 +@@ -1342,27 +1330,27 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
7383 + * --yoshfuji
7384 + */
7385 +
7386 ++ if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP ||
7387 ++ sk->sk_protocol == IPPROTO_RAW)) {
7388 ++ ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
7389 ++ return -EMSGSIZE;
7390 ++ }
7391 ++
7392 ++ skb = skb_peek_tail(&sk->sk_write_queue);
7393 + cork->length += length;
7394 +- if (length > mtu) {
7395 +- int proto = sk->sk_protocol;
7396 +- if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){
7397 +- ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
7398 +- return -EMSGSIZE;
7399 +- }
7400 +-
7401 +- if (proto == IPPROTO_UDP &&
7402 +- (rt->dst.dev->features & NETIF_F_UFO)) {
7403 +-
7404 +- err = ip6_ufo_append_data(sk, getfrag, from, length,
7405 +- hh_len, fragheaderlen,
7406 +- transhdrlen, mtu, flags, rt);
7407 +- if (err)
7408 +- goto error;
7409 +- return 0;
7410 +- }
7411 ++ if (((length > mtu) ||
7412 ++ (skb && skb_is_gso(skb))) &&
7413 ++ (sk->sk_protocol == IPPROTO_UDP) &&
7414 ++ (rt->dst.dev->features & NETIF_F_UFO)) {
7415 ++ err = ip6_ufo_append_data(sk, getfrag, from, length,
7416 ++ hh_len, fragheaderlen,
7417 ++ transhdrlen, mtu, flags, rt);
7418 ++ if (err)
7419 ++ goto error;
7420 ++ return 0;
7421 + }
7422 +
7423 +- if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
7424 ++ if (!skb)
7425 + goto alloc_new_skb;
7426 +
7427 + while (length > 0) {
7428 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
7429 index b204df8..8f274f4 100644
7430 --- a/net/ipv6/ipv6_sockglue.c
7431 @@ -94607,7 +94731,7 @@ index 8da4481..d02565e 100644
7432 + (rtt >> sctp_rto_alpha);
7433 } else {
7434 diff --git a/net/socket.c b/net/socket.c
7435 -index cf546a3..f7c6c75 100644
7436 +index cf546a3..a9b550f 100644
7437 --- a/net/socket.c
7438 +++ b/net/socket.c
7439 @@ -88,6 +88,7 @@
7440 @@ -94627,6 +94751,15 @@ index cf546a3..f7c6c75 100644
7441 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
7442 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
7443 unsigned long nr_segs, loff_t pos);
7444 +@@ -156,7 +159,7 @@ static const struct file_operations socket_file_ops = {
7445 + */
7446 +
7447 + static DEFINE_SPINLOCK(net_family_lock);
7448 +-static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
7449 ++const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
7450 +
7451 + /*
7452 + * Statistics counters of the socket lists
7453 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
7454 &sockfs_dentry_operations, SOCKFS_MAGIC);
7455 }
7456 @@ -94645,24 +94778,28 @@ index cf546a3..f7c6c75 100644
7457
7458 /* Compatibility.
7459
7460 -@@ -1319,6 +1324,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
7461 - if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
7462 - flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
7463 +@@ -1207,6 +1212,20 @@ int __sock_create(struct net *net, int family, int type, int protocol,
7464 + if (err)
7465 + return err;
7466
7467 -+ if(!gr_search_socket(family, type, protocol)) {
7468 -+ retval = -EACCES;
7469 -+ goto out;
7470 ++ if(!kern && !gr_search_socket(family, type, protocol)) {
7471 ++ if (rcu_access_pointer(net_families[family]) == NULL)
7472 ++ return -EAFNOSUPPORT;
7473 ++ else
7474 ++ return -EACCES;
7475 + }
7476 +
7477 -+ if (gr_handle_sock_all(family, type, protocol)) {
7478 -+ retval = -EACCES;
7479 -+ goto out;
7480 ++ if (!kern && gr_handle_sock_all(family, type, protocol)) {
7481 ++ if (rcu_access_pointer(net_families[family]) == NULL)
7482 ++ return -EAFNOSUPPORT;
7483 ++ else
7484 ++ return -EACCES;
7485 + }
7486 +
7487 - retval = sock_create(family, type, protocol, &sock);
7488 - if (retval < 0)
7489 - goto out;
7490 -@@ -1431,6 +1446,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
7491 + /*
7492 + * Allocate the socket and allow the family to set things up. if
7493 + * the protocol is 0, the family is instructed to select an appropriate
7494 +@@ -1431,6 +1450,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
7495 if (sock) {
7496 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
7497 if (err >= 0) {
7498 @@ -94677,7 +94814,7 @@ index cf546a3..f7c6c75 100644
7499 err = security_socket_bind(sock,
7500 (struct sockaddr *)&address,
7501 addrlen);
7502 -@@ -1439,6 +1462,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
7503 +@@ -1439,6 +1466,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
7504 (struct sockaddr *)
7505 &address, addrlen);
7506 }
7507 @@ -94685,7 +94822,7 @@ index cf546a3..f7c6c75 100644
7508 fput_light(sock->file, fput_needed);
7509 }
7510 return err;
7511 -@@ -1462,10 +1486,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
7512 +@@ -1462,10 +1490,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
7513 if ((unsigned)backlog > somaxconn)
7514 backlog = somaxconn;
7515
7516 @@ -94706,7 +94843,7 @@ index cf546a3..f7c6c75 100644
7517 fput_light(sock->file, fput_needed);
7518 }
7519 return err;
7520 -@@ -1509,6 +1543,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
7521 +@@ -1509,6 +1547,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
7522 newsock->type = sock->type;
7523 newsock->ops = sock->ops;
7524
7525 @@ -94725,7 +94862,7 @@ index cf546a3..f7c6c75 100644
7526 /*
7527 * We don't need try_module_get here, as the listening socket (sock)
7528 * has the protocol module (sock->ops->owner) held.
7529 -@@ -1547,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
7530 +@@ -1547,6 +1597,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
7531 fd_install(newfd, newfile);
7532 err = newfd;
7533
7534 @@ -94734,7 +94871,7 @@ index cf546a3..f7c6c75 100644
7535 out_put:
7536 fput_light(sock->file, fput_needed);
7537 out:
7538 -@@ -1579,6 +1627,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
7539 +@@ -1579,6 +1631,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
7540 int, addrlen)
7541 {
7542 struct socket *sock;
7543 @@ -94742,7 +94879,7 @@ index cf546a3..f7c6c75 100644
7544 struct sockaddr_storage address;
7545 int err, fput_needed;
7546
7547 -@@ -1589,6 +1638,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
7548 +@@ -1589,6 +1642,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
7549 if (err < 0)
7550 goto out_put;
7551
7552 @@ -94760,7 +94897,7 @@ index cf546a3..f7c6c75 100644
7553 err =
7554 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
7555 if (err)
7556 -@@ -1670,6 +1730,8 @@ SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
7557 +@@ -1670,6 +1734,8 @@ SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
7558 * the protocol.
7559 */
7560
7561 @@ -94769,7 +94906,7 @@ index cf546a3..f7c6c75 100644
7562 SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
7563 unsigned, flags, struct sockaddr __user *, addr,
7564 int, addr_len)
7565 -@@ -1736,7 +1798,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
7566 +@@ -1736,7 +1802,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
7567 struct socket *sock;
7568 struct iovec iov;
7569 struct msghdr msg;
7570 @@ -94778,7 +94915,7 @@ index cf546a3..f7c6c75 100644
7571 int err, err2;
7572 int fput_needed;
7573
7574 -@@ -1950,7 +2012,7 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
7575 +@@ -1950,7 +2016,7 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
7576 * checking falls down on this.
7577 */
7578 if (copy_from_user(ctl_buf,
7579 @@ -94787,7 +94924,7 @@ index cf546a3..f7c6c75 100644
7580 ctl_len))
7581 goto out_freectl;
7582 msg_sys->msg_control = ctl_buf;
7583 -@@ -2101,7 +2163,7 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
7584 +@@ -2101,7 +2167,7 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
7585 int err, iov_size, total_len, len;
7586
7587 /* kernel mode address */
7588 @@ -94796,7 +94933,7 @@ index cf546a3..f7c6c75 100644
7589
7590 /* user mode address pointers */
7591 struct sockaddr __user *uaddr;
7592 -@@ -2131,7 +2193,7 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
7593 +@@ -2131,7 +2197,7 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
7594 * kernel msghdr to use the kernel address space)
7595 */
7596
7597 @@ -94805,7 +94942,7 @@ index cf546a3..f7c6c75 100644
7598 uaddr_len = COMPAT_NAMELEN(msg);
7599 if (MSG_CMSG_COMPAT & flags) {
7600 err = verify_compat_iovec(msg_sys, iov,
7601 -@@ -2772,7 +2834,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
7602 +@@ -2772,7 +2838,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
7603 }
7604
7605 ifr = compat_alloc_user_space(buf_size);
7606 @@ -94814,7 +94951,7 @@ index cf546a3..f7c6c75 100644
7607
7608 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
7609 return -EFAULT;
7610 -@@ -2796,12 +2858,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
7611 +@@ -2796,12 +2862,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
7612 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
7613
7614 if (copy_in_user(rxnfc, compat_rxnfc,
7615 @@ -94831,7 +94968,7 @@ index cf546a3..f7c6c75 100644
7616 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
7617 sizeof(rxnfc->rule_cnt)))
7618 return -EFAULT;
7619 -@@ -2813,12 +2875,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
7620 +@@ -2813,12 +2879,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
7621
7622 if (convert_out) {
7623 if (copy_in_user(compat_rxnfc, rxnfc,
7624 @@ -94848,7 +94985,7 @@ index cf546a3..f7c6c75 100644
7625 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
7626 sizeof(rxnfc->rule_cnt)))
7627 return -EFAULT;
7628 -@@ -2888,7 +2950,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
7629 +@@ -2888,7 +2954,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
7630 old_fs = get_fs();
7631 set_fs(KERNEL_DS);
7632 err = dev_ioctl(net, cmd,
7633 @@ -94857,7 +94994,7 @@ index cf546a3..f7c6c75 100644
7634 set_fs(old_fs);
7635
7636 return err;
7637 -@@ -2997,7 +3059,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
7638 +@@ -2997,7 +3063,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
7639
7640 old_fs = get_fs();
7641 set_fs(KERNEL_DS);
7642 @@ -94866,7 +95003,7 @@ index cf546a3..f7c6c75 100644
7643 set_fs(old_fs);
7644
7645 if (cmd == SIOCGIFMAP && !err) {
7646 -@@ -3102,7 +3164,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
7647 +@@ -3102,7 +3168,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
7648 ret |= __get_user(rtdev, &(ur4->rt_dev));
7649 if (rtdev) {
7650 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
7651 @@ -94875,7 +95012,7 @@ index cf546a3..f7c6c75 100644
7652 devname[15] = 0;
7653 } else
7654 r4.rt_dev = NULL;
7655 -@@ -3342,8 +3404,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
7656 +@@ -3342,8 +3408,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
7657 int __user *uoptlen;
7658 int err;
7659
7660 @@ -94886,7 +95023,7 @@ index cf546a3..f7c6c75 100644
7661
7662 set_fs(KERNEL_DS);
7663 if (level == SOL_SOCKET)
7664 -@@ -3363,7 +3425,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
7665 +@@ -3363,7 +3429,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
7666 char __user *uoptval;
7667 int err;
7668
7669 @@ -101206,7 +101343,7 @@ index 0000000..698da67
7670 +}
7671 diff --git a/tools/gcc/latent_entropy_plugin.c b/tools/gcc/latent_entropy_plugin.c
7672 new file mode 100644
7673 -index 0000000..2ef6fd9
7674 +index 0000000..cd6c242
7675 --- /dev/null
7676 +++ b/tools/gcc/latent_entropy_plugin.c
7677 @@ -0,0 +1,321 @@
7678 @@ -101502,7 +101639,7 @@ index 0000000..2ef6fd9
7679 + TREE_THIS_VOLATILE(latent_entropy_decl) = 1;
7680 + DECL_EXTERNAL(latent_entropy_decl) = 1;
7681 + DECL_ARTIFICIAL(latent_entropy_decl) = 1;
7682 -+ DECL_INITIAL(latent_entropy_decl) = NULL;
7683 ++ DECL_INITIAL(latent_entropy_decl) = build_int_cstu(long_long_unsigned_type_node, get_random_const());
7684 + lang_hooks.decls.pushdecl(latent_entropy_decl);
7685 +// DECL_ASSEMBLER_NAME(latent_entropy_decl);
7686 +// varpool_finalize_decl(latent_entropy_decl);