Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Fri, 15 Feb 2019 12:46:14
Message-Id: 1550234743.28c7eceb0c1de020e71f9f62252291c7753fa792.mpagano@gentoo
1 commit: 28c7eceb0c1de020e71f9f62252291c7753fa792
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Feb 15 12:45:43 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Feb 15 12:45:43 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=28c7eceb
7
8 proj/linux-patches: Linux patch 4.9.157 and 4.9.158
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 8 +
13 1156_linux-4.9.157.patch | 889 +++++++++++++++++++++++++++++++++++++++++++++++
14 1157_linux-4.9.158.patch | 34 ++
15 3 files changed, 931 insertions(+)
16
17 diff --git a/0000_README b/0000_README
18 index dc5a410..0d0c627 100644
19 --- a/0000_README
20 +++ b/0000_README
21 @@ -667,6 +667,14 @@ Patch: 1155_linux-4.9.156.patch
22 From: http://www.k5rnel.org
23 Desc: Linux 4.9.156
24
25 +Patch: 1156_linux-4.9.157.patch
26 +From: http://www.k5rnel.org
27 +Desc: Linux 4.9.157
28 +
29 +Patch: 1157_linux-4.9.158.patch
30 +From: http://www.k5rnel.org
31 +Desc: Linux 4.9.158
32 +
33 Patch: 1500_XATTR_USER_PREFIX.patch
34 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
35 Desc: Support for namespace user.pax.* on tmpfs.
36
37 diff --git a/1156_linux-4.9.157.patch b/1156_linux-4.9.157.patch
38 new file mode 100644
39 index 0000000..92b7e3e
40 --- /dev/null
41 +++ b/1156_linux-4.9.157.patch
42 @@ -0,0 +1,889 @@
43 +diff --git a/Makefile b/Makefile
44 +index 956923115f7e..4eb7a17e18f1 100644
45 +--- a/Makefile
46 ++++ b/Makefile
47 +@@ -1,6 +1,6 @@
48 + VERSION = 4
49 + PATCHLEVEL = 9
50 +-SUBLEVEL = 156
51 ++SUBLEVEL = 157
52 + EXTRAVERSION =
53 + NAME = Roaring Lionus
54 +
55 +diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c
56 +index c1cd80ecc219..a904244264ce 100644
57 +--- a/arch/arm/mach-iop32x/n2100.c
58 ++++ b/arch/arm/mach-iop32x/n2100.c
59 +@@ -75,8 +75,7 @@ void __init n2100_map_io(void)
60 + /*
61 + * N2100 PCI.
62 + */
63 +-static int __init
64 +-n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
65 ++static int n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
66 + {
67 + int irq;
68 +
69 +diff --git a/arch/arm/mach-tango/pm.c b/arch/arm/mach-tango/pm.c
70 +index b05c6d6f99d0..08d813234b2d 100644
71 +--- a/arch/arm/mach-tango/pm.c
72 ++++ b/arch/arm/mach-tango/pm.c
73 +@@ -2,6 +2,7 @@
74 + #include <linux/suspend.h>
75 + #include <asm/suspend.h>
76 + #include "smc.h"
77 ++#include "pm.h"
78 +
79 + static int tango_pm_powerdown(unsigned long arg)
80 + {
81 +@@ -23,10 +24,7 @@ static const struct platform_suspend_ops tango_pm_ops = {
82 + .valid = suspend_valid_only_mem,
83 + };
84 +
85 +-static int __init tango_pm_init(void)
86 ++void __init tango_pm_init(void)
87 + {
88 + suspend_set_ops(&tango_pm_ops);
89 +- return 0;
90 + }
91 +-
92 +-late_initcall(tango_pm_init);
93 +diff --git a/arch/arm/mach-tango/pm.h b/arch/arm/mach-tango/pm.h
94 +new file mode 100644
95 +index 000000000000..35ea705a0ee2
96 +--- /dev/null
97 ++++ b/arch/arm/mach-tango/pm.h
98 +@@ -0,0 +1,7 @@
99 ++/* SPDX-License-Identifier: GPL-2.0 */
100 ++
101 ++#ifdef CONFIG_SUSPEND
102 ++void __init tango_pm_init(void);
103 ++#else
104 ++#define tango_pm_init NULL
105 ++#endif
106 +diff --git a/arch/arm/mach-tango/setup.c b/arch/arm/mach-tango/setup.c
107 +index f14b6c7d255b..2b48e1098ea3 100644
108 +--- a/arch/arm/mach-tango/setup.c
109 ++++ b/arch/arm/mach-tango/setup.c
110 +@@ -1,6 +1,7 @@
111 + #include <asm/mach/arch.h>
112 + #include <asm/hardware/cache-l2x0.h>
113 + #include "smc.h"
114 ++#include "pm.h"
115 +
116 + static void tango_l2c_write(unsigned long val, unsigned int reg)
117 + {
118 +@@ -14,4 +15,5 @@ DT_MACHINE_START(TANGO_DT, "Sigma Tango DT")
119 + .dt_compat = tango_dt_compat,
120 + .l2c_aux_mask = ~0,
121 + .l2c_write_sec = tango_l2c_write,
122 ++ .init_late = tango_pm_init,
123 + MACHINE_END
124 +diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
125 +index 659e6d3ae335..60177a612cb1 100644
126 +--- a/arch/mips/kernel/mips-cm.c
127 ++++ b/arch/mips/kernel/mips-cm.c
128 +@@ -424,5 +424,5 @@ void mips_cm_error_report(void)
129 + }
130 +
131 + /* reprime cause register */
132 +- write_gcr_error_cause(0);
133 ++ write_gcr_error_cause(cm_error);
134 + }
135 +diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
136 +index 308d051fc45c..7c512834a8f1 100644
137 +--- a/arch/mips/pci/pci-octeon.c
138 ++++ b/arch/mips/pci/pci-octeon.c
139 +@@ -573,6 +573,11 @@ static int __init octeon_pci_setup(void)
140 + if (octeon_has_feature(OCTEON_FEATURE_PCIE))
141 + return 0;
142 +
143 ++ if (!octeon_is_pci_host()) {
144 ++ pr_notice("Not in host mode, PCI Controller not initialized\n");
145 ++ return 0;
146 ++ }
147 ++
148 + /* Point pcibios_map_irq() to the PCI version of it */
149 + octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq;
150 +
151 +@@ -584,11 +589,6 @@ static int __init octeon_pci_setup(void)
152 + else
153 + octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG;
154 +
155 +- if (!octeon_is_pci_host()) {
156 +- pr_notice("Not in host mode, PCI Controller not initialized\n");
157 +- return 0;
158 +- }
159 +-
160 + /* PCI I/O and PCI MEM values */
161 + set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
162 + ioport_resource.start = 0;
163 +diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
164 +index c3dc12a8b7d9..0b845cc7fbdc 100644
165 +--- a/arch/mips/vdso/Makefile
166 ++++ b/arch/mips/vdso/Makefile
167 +@@ -116,7 +116,7 @@ $(obj)/%-o32.o: $(src)/%.c FORCE
168 + $(call cmd,force_checksrc)
169 + $(call if_changed_rule,cc_o_c)
170 +
171 +-$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := -mabi=32
172 ++$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=32
173 + $(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE
174 + $(call if_changed_dep,cpp_lds_S)
175 +
176 +@@ -156,7 +156,7 @@ $(obj)/%-n32.o: $(src)/%.c FORCE
177 + $(call cmd,force_checksrc)
178 + $(call if_changed_rule,cc_o_c)
179 +
180 +-$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := -mabi=n32
181 ++$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=n32
182 + $(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE
183 + $(call if_changed_dep,cpp_lds_S)
184 +
185 +diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
186 +index e14366de0e6e..97387cfbbeb5 100644
187 +--- a/drivers/gpu/drm/drm_modes.c
188 ++++ b/drivers/gpu/drm/drm_modes.c
189 +@@ -753,7 +753,7 @@ int drm_mode_hsync(const struct drm_display_mode *mode)
190 + if (mode->hsync)
191 + return mode->hsync;
192 +
193 +- if (mode->htotal < 0)
194 ++ if (mode->htotal <= 0)
195 + return 0;
196 +
197 + calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
198 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
199 +index 29abd28c19b3..4b556e698f13 100644
200 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
201 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
202 +@@ -605,13 +605,16 @@ out_fixup:
203 + static int vmw_dma_masks(struct vmw_private *dev_priv)
204 + {
205 + struct drm_device *dev = dev_priv->dev;
206 ++ int ret = 0;
207 +
208 +- if (intel_iommu_enabled &&
209 ++ ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
210 ++ if (dev_priv->map_mode != vmw_dma_phys &&
211 + (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
212 + DRM_INFO("Restricting DMA addresses to 44 bits.\n");
213 +- return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
214 ++ return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
215 + }
216 +- return 0;
217 ++
218 ++ return ret;
219 + }
220 + #else
221 + static int vmw_dma_masks(struct vmw_private *dev_priv)
222 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
223 +index 81f5a552e32f..9fe8eda7c859 100644
224 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
225 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
226 +@@ -3769,7 +3769,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
227 + *p_fence = NULL;
228 + }
229 +
230 +- return 0;
231 ++ return ret;
232 + }
233 +
234 + /**
235 +diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
236 +index 29423691c105..d7179dd3c9ef 100644
237 +--- a/drivers/hid/hid-debug.c
238 ++++ b/drivers/hid/hid-debug.c
239 +@@ -30,6 +30,7 @@
240 +
241 + #include <linux/debugfs.h>
242 + #include <linux/seq_file.h>
243 ++#include <linux/kfifo.h>
244 + #include <linux/sched.h>
245 + #include <linux/export.h>
246 + #include <linux/slab.h>
247 +@@ -455,7 +456,7 @@ static char *resolv_usage_page(unsigned page, struct seq_file *f) {
248 + char *buf = NULL;
249 +
250 + if (!f) {
251 +- buf = kzalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_ATOMIC);
252 ++ buf = kzalloc(HID_DEBUG_BUFSIZE, GFP_ATOMIC);
253 + if (!buf)
254 + return ERR_PTR(-ENOMEM);
255 + }
256 +@@ -659,17 +660,12 @@ EXPORT_SYMBOL_GPL(hid_dump_device);
257 + /* enqueue string to 'events' ring buffer */
258 + void hid_debug_event(struct hid_device *hdev, char *buf)
259 + {
260 +- unsigned i;
261 + struct hid_debug_list *list;
262 + unsigned long flags;
263 +
264 + spin_lock_irqsave(&hdev->debug_list_lock, flags);
265 +- list_for_each_entry(list, &hdev->debug_list, node) {
266 +- for (i = 0; buf[i]; i++)
267 +- list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] =
268 +- buf[i];
269 +- list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE;
270 +- }
271 ++ list_for_each_entry(list, &hdev->debug_list, node)
272 ++ kfifo_in(&list->hid_debug_fifo, buf, strlen(buf));
273 + spin_unlock_irqrestore(&hdev->debug_list_lock, flags);
274 +
275 + wake_up_interruptible(&hdev->debug_wait);
276 +@@ -720,8 +716,7 @@ void hid_dump_input(struct hid_device *hdev, struct hid_usage *usage, __s32 valu
277 + hid_debug_event(hdev, buf);
278 +
279 + kfree(buf);
280 +- wake_up_interruptible(&hdev->debug_wait);
281 +-
282 ++ wake_up_interruptible(&hdev->debug_wait);
283 + }
284 + EXPORT_SYMBOL_GPL(hid_dump_input);
285 +
286 +@@ -1086,8 +1081,8 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
287 + goto out;
288 + }
289 +
290 +- if (!(list->hid_debug_buf = kzalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_KERNEL))) {
291 +- err = -ENOMEM;
292 ++ err = kfifo_alloc(&list->hid_debug_fifo, HID_DEBUG_FIFOSIZE, GFP_KERNEL);
293 ++ if (err) {
294 + kfree(list);
295 + goto out;
296 + }
297 +@@ -1107,77 +1102,57 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer,
298 + size_t count, loff_t *ppos)
299 + {
300 + struct hid_debug_list *list = file->private_data;
301 +- int ret = 0, len;
302 ++ int ret = 0, copied;
303 + DECLARE_WAITQUEUE(wait, current);
304 +
305 + mutex_lock(&list->read_mutex);
306 +- while (ret == 0) {
307 +- if (list->head == list->tail) {
308 +- add_wait_queue(&list->hdev->debug_wait, &wait);
309 +- set_current_state(TASK_INTERRUPTIBLE);
310 +-
311 +- while (list->head == list->tail) {
312 +- if (file->f_flags & O_NONBLOCK) {
313 +- ret = -EAGAIN;
314 +- break;
315 +- }
316 +- if (signal_pending(current)) {
317 +- ret = -ERESTARTSYS;
318 +- break;
319 +- }
320 ++ if (kfifo_is_empty(&list->hid_debug_fifo)) {
321 ++ add_wait_queue(&list->hdev->debug_wait, &wait);
322 ++ set_current_state(TASK_INTERRUPTIBLE);
323 ++
324 ++ while (kfifo_is_empty(&list->hid_debug_fifo)) {
325 ++ if (file->f_flags & O_NONBLOCK) {
326 ++ ret = -EAGAIN;
327 ++ break;
328 ++ }
329 +
330 +- if (!list->hdev || !list->hdev->debug) {
331 +- ret = -EIO;
332 +- set_current_state(TASK_RUNNING);
333 +- goto out;
334 +- }
335 ++ if (signal_pending(current)) {
336 ++ ret = -ERESTARTSYS;
337 ++ break;
338 ++ }
339 +
340 +- /* allow O_NONBLOCK from other threads */
341 +- mutex_unlock(&list->read_mutex);
342 +- schedule();
343 +- mutex_lock(&list->read_mutex);
344 +- set_current_state(TASK_INTERRUPTIBLE);
345 ++ /* if list->hdev is NULL we cannot remove_wait_queue().
346 ++ * if list->hdev->debug is 0 then hid_debug_unregister()
347 ++ * was already called and list->hdev is being destroyed.
348 ++ * if we add remove_wait_queue() here we can hit a race.
349 ++ */
350 ++ if (!list->hdev || !list->hdev->debug) {
351 ++ ret = -EIO;
352 ++ set_current_state(TASK_RUNNING);
353 ++ goto out;
354 + }
355 +
356 +- set_current_state(TASK_RUNNING);
357 +- remove_wait_queue(&list->hdev->debug_wait, &wait);
358 ++ /* allow O_NONBLOCK from other threads */
359 ++ mutex_unlock(&list->read_mutex);
360 ++ schedule();
361 ++ mutex_lock(&list->read_mutex);
362 ++ set_current_state(TASK_INTERRUPTIBLE);
363 + }
364 +
365 +- if (ret)
366 +- goto out;
367 ++ __set_current_state(TASK_RUNNING);
368 ++ remove_wait_queue(&list->hdev->debug_wait, &wait);
369 +
370 +- /* pass the ringbuffer contents to userspace */
371 +-copy_rest:
372 +- if (list->tail == list->head)
373 ++ if (ret)
374 + goto out;
375 +- if (list->tail > list->head) {
376 +- len = list->tail - list->head;
377 +- if (len > count)
378 +- len = count;
379 +-
380 +- if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) {
381 +- ret = -EFAULT;
382 +- goto out;
383 +- }
384 +- ret += len;
385 +- list->head += len;
386 +- } else {
387 +- len = HID_DEBUG_BUFSIZE - list->head;
388 +- if (len > count)
389 +- len = count;
390 +-
391 +- if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) {
392 +- ret = -EFAULT;
393 +- goto out;
394 +- }
395 +- list->head = 0;
396 +- ret += len;
397 +- count -= len;
398 +- if (count > 0)
399 +- goto copy_rest;
400 +- }
401 +-
402 + }
403 ++
404 ++ /* pass the fifo content to userspace, locking is not needed with only
405 ++ * one concurrent reader and one concurrent writer
406 ++ */
407 ++ ret = kfifo_to_user(&list->hid_debug_fifo, buffer, count, &copied);
408 ++ if (ret)
409 ++ goto out;
410 ++ ret = copied;
411 + out:
412 + mutex_unlock(&list->read_mutex);
413 + return ret;
414 +@@ -1188,7 +1163,7 @@ static unsigned int hid_debug_events_poll(struct file *file, poll_table *wait)
415 + struct hid_debug_list *list = file->private_data;
416 +
417 + poll_wait(file, &list->hdev->debug_wait, wait);
418 +- if (list->head != list->tail)
419 ++ if (!kfifo_is_empty(&list->hid_debug_fifo))
420 + return POLLIN | POLLRDNORM;
421 + if (!list->hdev->debug)
422 + return POLLERR | POLLHUP;
423 +@@ -1203,7 +1178,7 @@ static int hid_debug_events_release(struct inode *inode, struct file *file)
424 + spin_lock_irqsave(&list->hdev->debug_list_lock, flags);
425 + list_del(&list->node);
426 + spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
427 +- kfree(list->hid_debug_buf);
428 ++ kfifo_free(&list->hid_debug_fifo);
429 + kfree(list);
430 +
431 + return 0;
432 +@@ -1254,4 +1229,3 @@ void hid_debug_exit(void)
433 + {
434 + debugfs_remove_recursive(hid_debug_root);
435 + }
436 +-
437 +diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
438 +index ef761a508630..dad2a8be6830 100644
439 +--- a/drivers/iio/chemical/atlas-ph-sensor.c
440 ++++ b/drivers/iio/chemical/atlas-ph-sensor.c
441 +@@ -453,9 +453,8 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
442 + case IIO_CHAN_INFO_SCALE:
443 + switch (chan->type) {
444 + case IIO_TEMP:
445 +- *val = 1; /* 0.01 */
446 +- *val2 = 100;
447 +- break;
448 ++ *val = 10;
449 ++ return IIO_VAL_INT;
450 + case IIO_PH:
451 + *val = 1; /* 0.001 */
452 + *val2 = 1000;
453 +@@ -486,7 +485,7 @@ static int atlas_write_raw(struct iio_dev *indio_dev,
454 + int val, int val2, long mask)
455 + {
456 + struct atlas_data *data = iio_priv(indio_dev);
457 +- __be32 reg = cpu_to_be32(val);
458 ++ __be32 reg = cpu_to_be32(val / 10);
459 +
460 + if (val2 != 0 || val < 0 || val > 20000)
461 + return -EINVAL;
462 +diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c
463 +index c344483fa7d6..9f257c53e6d4 100644
464 +--- a/drivers/misc/vexpress-syscfg.c
465 ++++ b/drivers/misc/vexpress-syscfg.c
466 +@@ -61,7 +61,7 @@ static int vexpress_syscfg_exec(struct vexpress_syscfg_func *func,
467 + int tries;
468 + long timeout;
469 +
470 +- if (WARN_ON(index > func->num_templates))
471 ++ if (WARN_ON(index >= func->num_templates))
472 + return -EINVAL;
473 +
474 + command = readl(syscfg->base + SYS_CFGCTRL);
475 +diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
476 +index 141bd70a49c2..b9509230ce4d 100644
477 +--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
478 ++++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
479 +@@ -168,9 +168,10 @@ int gpmi_init(struct gpmi_nand_data *this)
480 +
481 + /*
482 + * Reset BCH here, too. We got failures otherwise :(
483 +- * See later BCH reset for explanation of MX23 handling
484 ++ * See later BCH reset for explanation of MX23 and MX28 handling
485 + */
486 +- ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
487 ++ ret = gpmi_reset_block(r->bch_regs,
488 ++ GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
489 + if (ret)
490 + goto err_out;
491 +
492 +@@ -275,13 +276,11 @@ int bch_set_geometry(struct gpmi_nand_data *this)
493 +
494 + /*
495 + * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
496 +- * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
497 +- * On the other hand, the MX28 needs the reset, because one case has been
498 +- * seen where the BCH produced ECC errors constantly after 10000
499 +- * consecutive reboots. The latter case has not been seen on the MX23
500 +- * yet, still we don't know if it could happen there as well.
501 ++ * chip, otherwise it will lock up. So we skip resetting BCH on the MX23
502 ++ * and MX28.
503 + */
504 +- ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
505 ++ ret = gpmi_reset_block(r->bch_regs,
506 ++ GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
507 + if (ret)
508 + goto err_out;
509 +
510 +diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
511 +index 8bef27b8f85d..e7b478b49985 100644
512 +--- a/fs/cifs/Kconfig
513 ++++ b/fs/cifs/Kconfig
514 +@@ -111,7 +111,7 @@ config CIFS_XATTR
515 +
516 + config CIFS_POSIX
517 + bool "CIFS POSIX Extensions"
518 +- depends on CIFS && CIFS_ALLOW_INSECURE_LEGACY && CIFS_XATTR
519 ++ depends on CIFS_XATTR
520 + help
521 + Enabling this option will cause the cifs client to attempt to
522 + negotiate a newer dialect with servers, such as Samba 3.0.5
523 +diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
524 +index 3d7de9f4f545..77e9cd7a0137 100644
525 +--- a/fs/debugfs/inode.c
526 ++++ b/fs/debugfs/inode.c
527 +@@ -732,6 +732,13 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
528 + struct dentry *dentry = NULL, *trap;
529 + struct name_snapshot old_name;
530 +
531 ++ if (IS_ERR(old_dir))
532 ++ return old_dir;
533 ++ if (IS_ERR(new_dir))
534 ++ return new_dir;
535 ++ if (IS_ERR_OR_NULL(old_dentry))
536 ++ return old_dentry;
537 ++
538 + trap = lock_rename(new_dir, old_dir);
539 + /* Source or destination directories don't exist? */
540 + if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir))
541 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
542 +index 12d780718b48..3656f87d11e3 100644
543 +--- a/fs/nfsd/nfs4state.c
544 ++++ b/fs/nfsd/nfs4state.c
545 +@@ -1472,8 +1472,10 @@ free_session_slots(struct nfsd4_session *ses)
546 + {
547 + int i;
548 +
549 +- for (i = 0; i < ses->se_fchannel.maxreqs; i++)
550 ++ for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
551 ++ free_svc_cred(&ses->se_slots[i]->sl_cred);
552 + kfree(ses->se_slots[i]);
553 ++ }
554 + }
555 +
556 + /*
557 +@@ -2344,14 +2346,18 @@ nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
558 +
559 + dprintk("--> %s slot %p\n", __func__, slot);
560 +
561 ++ slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
562 + slot->sl_opcnt = resp->opcnt;
563 + slot->sl_status = resp->cstate.status;
564 ++ free_svc_cred(&slot->sl_cred);
565 ++ copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
566 +
567 +- slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
568 +- if (nfsd4_not_cached(resp)) {
569 +- slot->sl_datalen = 0;
570 ++ if (!nfsd4_cache_this(resp)) {
571 ++ slot->sl_flags &= ~NFSD4_SLOT_CACHED;
572 + return;
573 + }
574 ++ slot->sl_flags |= NFSD4_SLOT_CACHED;
575 ++
576 + base = resp->cstate.data_offset;
577 + slot->sl_datalen = buf->len - base;
578 + if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
579 +@@ -2378,8 +2384,16 @@ nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
580 + op = &args->ops[resp->opcnt - 1];
581 + nfsd4_encode_operation(resp, op);
582 +
583 +- /* Return nfserr_retry_uncached_rep in next operation. */
584 +- if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) {
585 ++ if (slot->sl_flags & NFSD4_SLOT_CACHED)
586 ++ return op->status;
587 ++ if (args->opcnt == 1) {
588 ++ /*
589 ++ * The original operation wasn't a solo sequence--we
590 ++ * always cache those--so this retry must not match the
591 ++ * original:
592 ++ */
593 ++ op->status = nfserr_seq_false_retry;
594 ++ } else {
595 + op = &args->ops[resp->opcnt++];
596 + op->status = nfserr_retry_uncached_rep;
597 + nfsd4_encode_operation(resp, op);
598 +@@ -3039,6 +3053,34 @@ static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
599 + return xb->len > session->se_fchannel.maxreq_sz;
600 + }
601 +
602 ++static bool replay_matches_cache(struct svc_rqst *rqstp,
603 ++ struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
604 ++{
605 ++ struct nfsd4_compoundargs *argp = rqstp->rq_argp;
606 ++
607 ++ if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
608 ++ (bool)seq->cachethis)
609 ++ return false;
610 ++ /*
611 ++ * If there's an error than the reply can have fewer ops than
612 ++ * the call. But if we cached a reply with *more* ops than the
613 ++ * call you're sending us now, then this new call is clearly not
614 ++ * really a replay of the old one:
615 ++ */
616 ++ if (slot->sl_opcnt < argp->opcnt)
617 ++ return false;
618 ++ /* This is the only check explicitly called by spec: */
619 ++ if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
620 ++ return false;
621 ++ /*
622 ++ * There may be more comparisons we could actually do, but the
623 ++ * spec doesn't require us to catch every case where the calls
624 ++ * don't match (that would require caching the call as well as
625 ++ * the reply), so we don't bother.
626 ++ */
627 ++ return true;
628 ++}
629 ++
630 + __be32
631 + nfsd4_sequence(struct svc_rqst *rqstp,
632 + struct nfsd4_compound_state *cstate,
633 +@@ -3098,6 +3140,9 @@ nfsd4_sequence(struct svc_rqst *rqstp,
634 + status = nfserr_seq_misordered;
635 + if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
636 + goto out_put_session;
637 ++ status = nfserr_seq_false_retry;
638 ++ if (!replay_matches_cache(rqstp, seq, slot))
639 ++ goto out_put_session;
640 + cstate->slot = slot;
641 + cstate->session = session;
642 + cstate->clp = clp;
643 +diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
644 +index 005c911b34ac..86aa92d200e1 100644
645 +--- a/fs/nfsd/state.h
646 ++++ b/fs/nfsd/state.h
647 +@@ -169,11 +169,13 @@ static inline struct nfs4_delegation *delegstateid(struct nfs4_stid *s)
648 + struct nfsd4_slot {
649 + u32 sl_seqid;
650 + __be32 sl_status;
651 ++ struct svc_cred sl_cred;
652 + u32 sl_datalen;
653 + u16 sl_opcnt;
654 + #define NFSD4_SLOT_INUSE (1 << 0)
655 + #define NFSD4_SLOT_CACHETHIS (1 << 1)
656 + #define NFSD4_SLOT_INITIALIZED (1 << 2)
657 ++#define NFSD4_SLOT_CACHED (1 << 3)
658 + u8 sl_flags;
659 + char sl_data[];
660 + };
661 +diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
662 +index 8fda4abdf3b1..448e74e32344 100644
663 +--- a/fs/nfsd/xdr4.h
664 ++++ b/fs/nfsd/xdr4.h
665 +@@ -645,9 +645,18 @@ static inline bool nfsd4_is_solo_sequence(struct nfsd4_compoundres *resp)
666 + return resp->opcnt == 1 && args->ops[0].opnum == OP_SEQUENCE;
667 + }
668 +
669 +-static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp)
670 ++/*
671 ++ * The session reply cache only needs to cache replies that the client
672 ++ * actually asked us to. But it's almost free for us to cache compounds
673 ++ * consisting of only a SEQUENCE op, so we may as well cache those too.
674 ++ * Also, the protocol doesn't give us a convenient response in the case
675 ++ * of a replay of a solo SEQUENCE op that wasn't cached
676 ++ * (RETRY_UNCACHED_REP can only be returned in the second op of a
677 ++ * compound).
678 ++ */
679 ++static inline bool nfsd4_cache_this(struct nfsd4_compoundres *resp)
680 + {
681 +- return !(resp->cstate.slot->sl_flags & NFSD4_SLOT_CACHETHIS)
682 ++ return (resp->cstate.slot->sl_flags & NFSD4_SLOT_CACHETHIS)
683 + || nfsd4_is_solo_sequence(resp);
684 + }
685 +
686 +diff --git a/include/linux/hid-debug.h b/include/linux/hid-debug.h
687 +index 8663f216c563..2d6100edf204 100644
688 +--- a/include/linux/hid-debug.h
689 ++++ b/include/linux/hid-debug.h
690 +@@ -24,7 +24,10 @@
691 +
692 + #ifdef CONFIG_DEBUG_FS
693 +
694 ++#include <linux/kfifo.h>
695 ++
696 + #define HID_DEBUG_BUFSIZE 512
697 ++#define HID_DEBUG_FIFOSIZE 512
698 +
699 + void hid_dump_input(struct hid_device *, struct hid_usage *, __s32);
700 + void hid_dump_report(struct hid_device *, int , u8 *, int);
701 +@@ -37,11 +40,8 @@ void hid_debug_init(void);
702 + void hid_debug_exit(void);
703 + void hid_debug_event(struct hid_device *, char *);
704 +
705 +-
706 + struct hid_debug_list {
707 +- char *hid_debug_buf;
708 +- int head;
709 +- int tail;
710 ++ DECLARE_KFIFO_PTR(hid_debug_fifo, char);
711 + struct fasync_struct *fasync;
712 + struct hid_device *hdev;
713 + struct list_head node;
714 +@@ -64,4 +64,3 @@ struct hid_debug_list {
715 + #endif
716 +
717 + #endif
718 +-
719 +diff --git a/kernel/signal.c b/kernel/signal.c
720 +index 049929a5f4ce..798b8f495ae2 100644
721 +--- a/kernel/signal.c
722 ++++ b/kernel/signal.c
723 +@@ -696,6 +696,48 @@ static inline bool si_fromuser(const struct siginfo *info)
724 + (!is_si_special(info) && SI_FROMUSER(info));
725 + }
726 +
727 ++static int dequeue_synchronous_signal(siginfo_t *info)
728 ++{
729 ++ struct task_struct *tsk = current;
730 ++ struct sigpending *pending = &tsk->pending;
731 ++ struct sigqueue *q, *sync = NULL;
732 ++
733 ++ /*
734 ++ * Might a synchronous signal be in the queue?
735 ++ */
736 ++ if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
737 ++ return 0;
738 ++
739 ++ /*
740 ++ * Return the first synchronous signal in the queue.
741 ++ */
742 ++ list_for_each_entry(q, &pending->list, list) {
743 ++ /* Synchronous signals have a postive si_code */
744 ++ if ((q->info.si_code > SI_USER) &&
745 ++ (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
746 ++ sync = q;
747 ++ goto next;
748 ++ }
749 ++ }
750 ++ return 0;
751 ++next:
752 ++ /*
753 ++ * Check if there is another siginfo for the same signal.
754 ++ */
755 ++ list_for_each_entry_continue(q, &pending->list, list) {
756 ++ if (q->info.si_signo == sync->info.si_signo)
757 ++ goto still_pending;
758 ++ }
759 ++
760 ++ sigdelset(&pending->signal, sync->info.si_signo);
761 ++ recalc_sigpending();
762 ++still_pending:
763 ++ list_del_init(&sync->list);
764 ++ copy_siginfo(info, &sync->info);
765 ++ __sigqueue_free(sync);
766 ++ return info->si_signo;
767 ++}
768 ++
769 + /*
770 + * called with RCU read lock from check_kill_permission()
771 + */
772 +@@ -2198,6 +2240,11 @@ relock:
773 + goto relock;
774 + }
775 +
776 ++ /* Has this task already been marked for death? */
777 ++ ksig->info.si_signo = signr = SIGKILL;
778 ++ if (signal_group_exit(signal))
779 ++ goto fatal;
780 ++
781 + for (;;) {
782 + struct k_sigaction *ka;
783 +
784 +@@ -2211,7 +2258,15 @@ relock:
785 + goto relock;
786 + }
787 +
788 +- signr = dequeue_signal(current, &current->blocked, &ksig->info);
789 ++ /*
790 ++ * Signals generated by the execution of an instruction
791 ++ * need to be delivered before any other pending signals
792 ++ * so that the instruction pointer in the signal stack
793 ++ * frame points to the faulting instruction.
794 ++ */
795 ++ signr = dequeue_synchronous_signal(&ksig->info);
796 ++ if (!signr)
797 ++ signr = dequeue_signal(current, &current->blocked, &ksig->info);
798 +
799 + if (!signr)
800 + break; /* will return 0 */
801 +@@ -2293,6 +2348,7 @@ relock:
802 + continue;
803 + }
804 +
805 ++ fatal:
806 + spin_unlock_irq(&sighand->siglock);
807 +
808 + /*
809 +diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
810 +index 08ce36147c4c..8f7883b7d717 100644
811 +--- a/net/batman-adv/hard-interface.c
812 ++++ b/net/batman-adv/hard-interface.c
813 +@@ -19,7 +19,6 @@
814 + #include "main.h"
815 +
816 + #include <linux/atomic.h>
817 +-#include <linux/bug.h>
818 + #include <linux/byteorder/generic.h>
819 + #include <linux/errno.h>
820 + #include <linux/fs.h>
821 +@@ -172,8 +171,10 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
822 + parent_dev = __dev_get_by_index((struct net *)parent_net,
823 + dev_get_iflink(net_dev));
824 + /* if we got a NULL parent_dev there is something broken.. */
825 +- if (WARN(!parent_dev, "Cannot find parent device"))
826 ++ if (!parent_dev) {
827 ++ pr_err("Cannot find parent device\n");
828 + return false;
829 ++ }
830 +
831 + if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net))
832 + return false;
833 +diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
834 +index 05bc176decf0..835af771a9fd 100644
835 +--- a/net/batman-adv/soft-interface.c
836 ++++ b/net/batman-adv/soft-interface.c
837 +@@ -211,6 +211,8 @@ static int batadv_interface_tx(struct sk_buff *skb,
838 +
839 + netif_trans_update(soft_iface);
840 + vid = batadv_get_vid(skb, 0);
841 ++
842 ++ skb_reset_mac_header(skb);
843 + ethhdr = eth_hdr(skb);
844 +
845 + switch (ntohs(ethhdr->h_proto)) {
846 +diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
847 +index 5a8075d9f2e7..93eb606f7628 100644
848 +--- a/net/ceph/messenger.c
849 ++++ b/net/ceph/messenger.c
850 +@@ -3186,9 +3186,10 @@ void ceph_con_keepalive(struct ceph_connection *con)
851 + dout("con_keepalive %p\n", con);
852 + mutex_lock(&con->mutex);
853 + clear_standby(con);
854 ++ con_flag_set(con, CON_FLAG_KEEPALIVE_PENDING);
855 + mutex_unlock(&con->mutex);
856 +- if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 &&
857 +- con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
858 ++
859 ++ if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
860 + queue_con(con);
861 + }
862 + EXPORT_SYMBOL(ceph_con_keepalive);
863 +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
864 +index 6a0fb9dbc1ba..f8de166b788a 100644
865 +--- a/net/mac80211/tx.c
866 ++++ b/net/mac80211/tx.c
867 +@@ -1852,9 +1852,16 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
868 + int head_need, bool may_encrypt)
869 + {
870 + struct ieee80211_local *local = sdata->local;
871 ++ struct ieee80211_hdr *hdr;
872 ++ bool enc_tailroom;
873 + int tail_need = 0;
874 +
875 +- if (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt) {
876 ++ hdr = (struct ieee80211_hdr *) skb->data;
877 ++ enc_tailroom = may_encrypt &&
878 ++ (sdata->crypto_tx_tailroom_needed_cnt ||
879 ++ ieee80211_is_mgmt(hdr->frame_control));
880 ++
881 ++ if (enc_tailroom) {
882 + tail_need = IEEE80211_ENCRYPT_TAILROOM;
883 + tail_need -= skb_tailroom(skb);
884 + tail_need = max_t(int, tail_need, 0);
885 +@@ -1862,8 +1869,7 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
886 +
887 + if (skb_cloned(skb) &&
888 + (!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) ||
889 +- !skb_clone_writable(skb, ETH_HLEN) ||
890 +- (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt)))
891 ++ !skb_clone_writable(skb, ETH_HLEN) || enc_tailroom))
892 + I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
893 + else if (head_need || tail_need)
894 + I802_DEBUG_INC(local->tx_expand_skb_head);
895 +diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
896 +index 026770884d46..f6f91c3b2de0 100644
897 +--- a/net/xfrm/xfrm_user.c
898 ++++ b/net/xfrm/xfrm_user.c
899 +@@ -1408,10 +1408,15 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
900 + if (!ut[i].family)
901 + ut[i].family = family;
902 +
903 +- if ((ut[i].mode == XFRM_MODE_TRANSPORT) &&
904 +- (ut[i].family != prev_family))
905 +- return -EINVAL;
906 +-
907 ++ switch (ut[i].mode) {
908 ++ case XFRM_MODE_TUNNEL:
909 ++ case XFRM_MODE_BEET:
910 ++ break;
911 ++ default:
912 ++ if (ut[i].family != prev_family)
913 ++ return -EINVAL;
914 ++ break;
915 ++ }
916 + if (ut[i].mode >= XFRM_MODE_MAX)
917 + return -EINVAL;
918 +
919 +diff --git a/samples/mei/mei-amt-version.c b/samples/mei/mei-amt-version.c
920 +index 57d0d871dcf7..bb9988914a56 100644
921 +--- a/samples/mei/mei-amt-version.c
922 ++++ b/samples/mei/mei-amt-version.c
923 +@@ -117,7 +117,7 @@ static bool mei_init(struct mei *me, const uuid_le *guid,
924 +
925 + me->verbose = verbose;
926 +
927 +- me->fd = open("/dev/mei", O_RDWR);
928 ++ me->fd = open("/dev/mei0", O_RDWR);
929 + if (me->fd == -1) {
930 + mei_err(me, "Cannot establish a handle to the Intel MEI driver\n");
931 + goto err;
932
933 diff --git a/1157_linux-4.9.158.patch b/1157_linux-4.9.158.patch
934 new file mode 100644
935 index 0000000..b38b7c6
936 --- /dev/null
937 +++ b/1157_linux-4.9.158.patch
938 @@ -0,0 +1,34 @@
939 +diff --git a/Makefile b/Makefile
940 +index 4eb7a17e18f1..2b8434aaeece 100644
941 +--- a/Makefile
942 ++++ b/Makefile
943 +@@ -1,6 +1,6 @@
944 + VERSION = 4
945 + PATCHLEVEL = 9
946 +-SUBLEVEL = 157
947 ++SUBLEVEL = 158
948 + EXTRAVERSION =
949 + NAME = Roaring Lionus
950 +
951 +diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
952 +index 634bdbb23851..afdf4e3cafc2 100644
953 +--- a/fs/binfmt_script.c
954 ++++ b/fs/binfmt_script.c
955 +@@ -43,14 +43,10 @@ static int load_script(struct linux_binprm *bprm)
956 + fput(bprm->file);
957 + bprm->file = NULL;
958 +
959 +- for (cp = bprm->buf+2;; cp++) {
960 +- if (cp >= bprm->buf + BINPRM_BUF_SIZE)
961 +- return -ENOEXEC;
962 +- if (!*cp || (*cp == '\n'))
963 +- break;
964 +- }
965 ++ bprm->buf[BINPRM_BUF_SIZE - 1] = '\0';
966 ++ if ((cp = strchr(bprm->buf, '\n')) == NULL)
967 ++ cp = bprm->buf+BINPRM_BUF_SIZE-1;
968 + *cp = '\0';
969 +-
970 + while (cp > bprm->buf) {
971 + cp--;
972 + if ((*cp == ' ') || (*cp == '\t'))