Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Wed, 21 Nov 2018 15:02:32
Message-Id: 1542812453.a2141b383ab8f5a8830f801ed013ff31aec1034b.mpagano@gentoo
1 commit: a2141b383ab8f5a8830f801ed013ff31aec1034b
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Thu Jul 12 16:21:45 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Nov 21 15:00:53 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a2141b38
7
8 linux kernel 4.4.140
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1138_linux-4.4.140.patch | 1823 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1827 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index cfb7ea3..73e6c56 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -599,6 +599,10 @@ Patch: 1138_linux-4.4.139.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.4.139
23
24 +Patch: 1139_linux-4.4.140.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.4.140
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1138_linux-4.4.140.patch b/1138_linux-4.4.140.patch
33 new file mode 100644
34 index 0000000..a2e3d0e
35 --- /dev/null
36 +++ b/1138_linux-4.4.140.patch
37 @@ -0,0 +1,1823 @@
38 +diff --git a/Makefile b/Makefile
39 +index 20a11fd36656..b842298a5970 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,6 +1,6 @@
43 + VERSION = 4
44 + PATCHLEVEL = 4
45 +-SUBLEVEL = 139
46 ++SUBLEVEL = 140
47 + EXTRAVERSION =
48 + NAME = Blurry Fish Butt
49 +
50 +diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
51 +index 399103b8e2c9..c81fb8fdc41f 100644
52 +--- a/arch/arm/boot/dts/imx6q.dtsi
53 ++++ b/arch/arm/boot/dts/imx6q.dtsi
54 +@@ -95,7 +95,7 @@
55 + clocks = <&clks IMX6Q_CLK_ECSPI5>,
56 + <&clks IMX6Q_CLK_ECSPI5>;
57 + clock-names = "ipg", "per";
58 +- dmas = <&sdma 11 7 1>, <&sdma 12 7 2>;
59 ++ dmas = <&sdma 11 8 1>, <&sdma 12 8 2>;
60 + dma-names = "rx", "tx";
61 + status = "disabled";
62 + };
63 +diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
64 +index 5416d5d68308..4cad1adff16b 100644
65 +--- a/arch/s390/kernel/entry.S
66 ++++ b/arch/s390/kernel/entry.S
67 +@@ -1170,7 +1170,7 @@ cleanup_critical:
68 + jl 0f
69 + clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
70 + jl .Lcleanup_load_fpu_regs
71 +-0: BR_EX %r14
72 ++0: BR_EX %r14,%r11
73 +
74 + .align 8
75 + .Lcleanup_table:
76 +@@ -1200,7 +1200,7 @@ cleanup_critical:
77 + ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
78 + lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
79 + larl %r9,sie_exit # skip forward to sie_exit
80 +- BR_EX %r14
81 ++ BR_EX %r14,%r11
82 + #endif
83 +
84 + .Lcleanup_system_call:
85 +diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
86 +index 6edb9530d7e9..ddc9b8125918 100644
87 +--- a/arch/x86/kernel/cpu/mcheck/mce.c
88 ++++ b/arch/x86/kernel/cpu/mcheck/mce.c
89 +@@ -980,11 +980,12 @@ void do_machine_check(struct pt_regs *regs, long error_code)
90 + int i;
91 + int worst = 0;
92 + int severity;
93 ++
94 + /*
95 + * Establish sequential order between the CPUs entering the machine
96 + * check handler.
97 + */
98 +- int order;
99 ++ int order = -1;
100 + /*
101 + * If no_way_out gets set, there is no safe way to recover from this
102 + * MCE. If mca_cfg.tolerant is cranked up, we'll try anyway.
103 +@@ -1000,7 +1001,12 @@ void do_machine_check(struct pt_regs *regs, long error_code)
104 + char *msg = "Unknown";
105 + u64 recover_paddr = ~0ull;
106 + int flags = MF_ACTION_REQUIRED;
107 +- int lmce = 0;
108 ++
109 ++ /*
110 ++ * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES
111 ++ * on Intel.
112 ++ */
113 ++ int lmce = 1;
114 +
115 + /* If this CPU is offline, just bail out. */
116 + if (cpu_is_offline(smp_processor_id())) {
117 +@@ -1039,17 +1045,23 @@ void do_machine_check(struct pt_regs *regs, long error_code)
118 + kill_it = 1;
119 +
120 + /*
121 +- * Check if this MCE is signaled to only this logical processor
122 ++ * Check if this MCE is signaled to only this logical processor,
123 ++ * on Intel only.
124 + */
125 +- if (m.mcgstatus & MCG_STATUS_LMCES)
126 +- lmce = 1;
127 +- else {
128 +- /*
129 +- * Go through all the banks in exclusion of the other CPUs.
130 +- * This way we don't report duplicated events on shared banks
131 +- * because the first one to see it will clear it.
132 +- * If this is a Local MCE, then no need to perform rendezvous.
133 +- */
134 ++ if (m.cpuvendor == X86_VENDOR_INTEL)
135 ++ lmce = m.mcgstatus & MCG_STATUS_LMCES;
136 ++
137 ++ /*
138 ++ * Local machine check may already know that we have to panic.
139 ++ * Broadcast machine check begins rendezvous in mce_start()
140 ++ * Go through all banks in exclusion of the other CPUs. This way we
141 ++ * don't report duplicated events on shared banks because the first one
142 ++ * to see it will clear it.
143 ++ */
144 ++ if (lmce) {
145 ++ if (no_way_out)
146 ++ mce_panic("Fatal local machine check", &m, msg);
147 ++ } else {
148 + order = mce_start(&no_way_out);
149 + }
150 +
151 +@@ -1128,12 +1140,17 @@ void do_machine_check(struct pt_regs *regs, long error_code)
152 + no_way_out = worst >= MCE_PANIC_SEVERITY;
153 + } else {
154 + /*
155 +- * Local MCE skipped calling mce_reign()
156 +- * If we found a fatal error, we need to panic here.
157 ++ * If there was a fatal machine check we should have
158 ++ * already called mce_panic earlier in this function.
159 ++ * Since we re-read the banks, we might have found
160 ++ * something new. Check again to see if we found a
161 ++ * fatal error. We call "mce_severity()" again to
162 ++ * make sure we have the right "msg".
163 + */
164 +- if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
165 +- mce_panic("Machine check from unknown source",
166 +- NULL, NULL);
167 ++ if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) {
168 ++ mce_severity(&m, cfg->tolerant, &msg, true);
169 ++ mce_panic("Local fatal machine check!", &m, msg);
170 ++ }
171 + }
172 +
173 + /*
174 +diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
175 +index df9be5b91270..1f5c47a49e35 100644
176 +--- a/arch/x86/kernel/kprobes/core.c
177 ++++ b/arch/x86/kernel/kprobes/core.c
178 +@@ -411,25 +411,38 @@ void free_insn_page(void *page)
179 + module_memfree(page);
180 + }
181 +
182 ++/* Prepare reljump right after instruction to boost */
183 ++static void prepare_boost(struct kprobe *p, int length)
184 ++{
185 ++ if (can_boost(p->ainsn.insn, p->addr) &&
186 ++ MAX_INSN_SIZE - length >= RELATIVEJUMP_SIZE) {
187 ++ /*
188 ++ * These instructions can be executed directly if it
189 ++ * jumps back to correct address.
190 ++ */
191 ++ synthesize_reljump(p->ainsn.insn + length, p->addr + length);
192 ++ p->ainsn.boostable = 1;
193 ++ } else {
194 ++ p->ainsn.boostable = -1;
195 ++ }
196 ++}
197 ++
198 + static int arch_copy_kprobe(struct kprobe *p)
199 + {
200 +- int ret;
201 ++ int len;
202 +
203 + set_memory_rw((unsigned long)p->ainsn.insn & PAGE_MASK, 1);
204 +
205 + /* Copy an instruction with recovering if other optprobe modifies it.*/
206 +- ret = __copy_instruction(p->ainsn.insn, p->addr);
207 +- if (!ret)
208 ++ len = __copy_instruction(p->ainsn.insn, p->addr);
209 ++ if (!len)
210 + return -EINVAL;
211 +
212 + /*
213 + * __copy_instruction can modify the displacement of the instruction,
214 + * but it doesn't affect boostable check.
215 + */
216 +- if (can_boost(p->ainsn.insn, p->addr))
217 +- p->ainsn.boostable = 0;
218 +- else
219 +- p->ainsn.boostable = -1;
220 ++ prepare_boost(p, len);
221 +
222 + set_memory_ro((unsigned long)p->ainsn.insn & PAGE_MASK, 1);
223 +
224 +@@ -894,21 +907,6 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
225 + break;
226 + }
227 +
228 +- if (p->ainsn.boostable == 0) {
229 +- if ((regs->ip > copy_ip) &&
230 +- (regs->ip - copy_ip) + 5 < MAX_INSN_SIZE) {
231 +- /*
232 +- * These instructions can be executed directly if it
233 +- * jumps back to correct address.
234 +- */
235 +- synthesize_reljump((void *)regs->ip,
236 +- (void *)orig_ip + (regs->ip - copy_ip));
237 +- p->ainsn.boostable = 1;
238 +- } else {
239 +- p->ainsn.boostable = -1;
240 +- }
241 +- }
242 +-
243 + regs->ip += orig_ip - copy_ip;
244 +
245 + no_change:
246 +diff --git a/arch/x86/lib/cmdline.c b/arch/x86/lib/cmdline.c
247 +index a744506856b1..88ce150186c6 100644
248 +--- a/arch/x86/lib/cmdline.c
249 ++++ b/arch/x86/lib/cmdline.c
250 +@@ -21,12 +21,14 @@ static inline int myisspace(u8 c)
251 + * @option: option string to look for
252 + *
253 + * Returns the position of that @option (starts counting with 1)
254 +- * or 0 on not found.
255 ++ * or 0 on not found. @option will only be found if it is found
256 ++ * as an entire word in @cmdline. For instance, if @option="car"
257 ++ * then a cmdline which contains "cart" will not match.
258 + */
259 + int cmdline_find_option_bool(const char *cmdline, const char *option)
260 + {
261 + char c;
262 +- int len, pos = 0, wstart = 0;
263 ++ int pos = 0, wstart = 0;
264 + const char *opptr = NULL;
265 + enum {
266 + st_wordstart = 0, /* Start of word/after whitespace */
267 +@@ -37,11 +39,14 @@ int cmdline_find_option_bool(const char *cmdline, const char *option)
268 + if (!cmdline)
269 + return -1; /* No command line */
270 +
271 +- len = min_t(int, strlen(cmdline), COMMAND_LINE_SIZE);
272 +- if (!len)
273 ++ if (!strlen(cmdline))
274 + return 0;
275 +
276 +- while (len--) {
277 ++ /*
278 ++ * This 'pos' check ensures we do not overrun
279 ++ * a non-NULL-terminated 'cmdline'
280 ++ */
281 ++ while (pos < COMMAND_LINE_SIZE) {
282 + c = *(char *)cmdline++;
283 + pos++;
284 +
285 +@@ -58,17 +63,26 @@ int cmdline_find_option_bool(const char *cmdline, const char *option)
286 + /* fall through */
287 +
288 + case st_wordcmp:
289 +- if (!*opptr)
290 ++ if (!*opptr) {
291 ++ /*
292 ++ * We matched all the way to the end of the
293 ++ * option we were looking for. If the
294 ++ * command-line has a space _or_ ends, then
295 ++ * we matched!
296 ++ */
297 + if (!c || myisspace(c))
298 + return wstart;
299 + else
300 + state = st_wordskip;
301 +- else if (!c)
302 ++ } else if (!c) {
303 ++ /*
304 ++ * Hit the NULL terminator on the end of
305 ++ * cmdline.
306 ++ */
307 + return 0;
308 +- else if (c != *opptr++)
309 ++ } else if (c != *opptr++) {
310 + state = st_wordskip;
311 +- else if (!len) /* last word and is matching */
312 +- return wstart;
313 ++ }
314 + break;
315 +
316 + case st_wordskip:
317 +diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
318 +index 5578c1477ba6..8bfd4fd7e9ec 100644
319 +--- a/drivers/block/drbd/drbd_worker.c
320 ++++ b/drivers/block/drbd/drbd_worker.c
321 +@@ -256,8 +256,8 @@ void drbd_request_endio(struct bio *bio)
322 + } else
323 + what = COMPLETED_OK;
324 +
325 +- bio_put(req->private_bio);
326 + req->private_bio = ERR_PTR(bio->bi_error);
327 ++ bio_put(bio);
328 +
329 + /* not req_mod(), we need irqsave here! */
330 + spin_lock_irqsave(&device->resource->req_lock, flags);
331 +diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
332 +index 2886b645ced7..6c60f4b63d21 100644
333 +--- a/drivers/hid/hid-debug.c
334 ++++ b/drivers/hid/hid-debug.c
335 +@@ -1152,6 +1152,8 @@ copy_rest:
336 + goto out;
337 + if (list->tail > list->head) {
338 + len = list->tail - list->head;
339 ++ if (len > count)
340 ++ len = count;
341 +
342 + if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) {
343 + ret = -EFAULT;
344 +@@ -1161,6 +1163,8 @@ copy_rest:
345 + list->head += len;
346 + } else {
347 + len = HID_DEBUG_BUFSIZE - list->head;
348 ++ if (len > count)
349 ++ len = count;
350 +
351 + if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) {
352 + ret = -EFAULT;
353 +@@ -1168,7 +1172,9 @@ copy_rest:
354 + }
355 + list->head = 0;
356 + ret += len;
357 +- goto copy_rest;
358 ++ count -= len;
359 ++ if (count > 0)
360 ++ goto copy_rest;
361 + }
362 +
363 + }
364 +diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
365 +index 4c3ed078c6b9..a5fed668fde1 100644
366 +--- a/drivers/hid/i2c-hid/i2c-hid.c
367 ++++ b/drivers/hid/i2c-hid/i2c-hid.c
368 +@@ -413,7 +413,7 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
369 + return;
370 + }
371 +
372 +- if ((ret_size > size) || (ret_size <= 2)) {
373 ++ if ((ret_size > size) || (ret_size < 2)) {
374 + dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
375 + __func__, size, ret_size);
376 + return;
377 +diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
378 +index 700145b15088..b59b15d4caa9 100644
379 +--- a/drivers/hid/usbhid/hiddev.c
380 ++++ b/drivers/hid/usbhid/hiddev.c
381 +@@ -35,6 +35,7 @@
382 + #include <linux/hiddev.h>
383 + #include <linux/compat.h>
384 + #include <linux/vmalloc.h>
385 ++#include <linux/nospec.h>
386 + #include "usbhid.h"
387 +
388 + #ifdef CONFIG_USB_DYNAMIC_MINORS
389 +@@ -478,10 +479,14 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
390 +
391 + if (uref->field_index >= report->maxfield)
392 + goto inval;
393 ++ uref->field_index = array_index_nospec(uref->field_index,
394 ++ report->maxfield);
395 +
396 + field = report->field[uref->field_index];
397 + if (uref->usage_index >= field->maxusage)
398 + goto inval;
399 ++ uref->usage_index = array_index_nospec(uref->usage_index,
400 ++ field->maxusage);
401 +
402 + uref->usage_code = field->usage[uref->usage_index].hid;
403 +
404 +@@ -508,6 +513,8 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
405 +
406 + if (uref->field_index >= report->maxfield)
407 + goto inval;
408 ++ uref->field_index = array_index_nospec(uref->field_index,
409 ++ report->maxfield);
410 +
411 + field = report->field[uref->field_index];
412 +
413 +@@ -761,6 +768,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
414 +
415 + if (finfo.field_index >= report->maxfield)
416 + break;
417 ++ finfo.field_index = array_index_nospec(finfo.field_index,
418 ++ report->maxfield);
419 +
420 + field = report->field[finfo.field_index];
421 + memset(&finfo, 0, sizeof(finfo));
422 +@@ -801,6 +810,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
423 +
424 + if (cinfo.index >= hid->maxcollection)
425 + break;
426 ++ cinfo.index = array_index_nospec(cinfo.index,
427 ++ hid->maxcollection);
428 +
429 + cinfo.type = hid->collection[cinfo.index].type;
430 + cinfo.usage = hid->collection[cinfo.index].usage;
431 +diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
432 +index 6f89484765e3..dfe1a53ce4ad 100644
433 +--- a/drivers/i2c/busses/i2c-rcar.c
434 ++++ b/drivers/i2c/busses/i2c-rcar.c
435 +@@ -484,6 +484,8 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
436 +
437 + pm_runtime_get_sync(dev);
438 +
439 ++ rcar_i2c_init(priv);
440 ++
441 + ret = rcar_i2c_bus_barrier(priv);
442 + if (ret < 0)
443 + goto out;
444 +@@ -624,7 +626,6 @@ static int rcar_i2c_probe(struct platform_device *pdev)
445 + if (ret < 0)
446 + goto out_pm_put;
447 +
448 +- rcar_i2c_init(priv);
449 + pm_runtime_put(dev);
450 +
451 + irq = platform_get_irq(pdev, 0);
452 +diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
453 +index 969c815c90b6..b1d5fa0bc8f7 100644
454 +--- a/drivers/md/dm-bufio.c
455 ++++ b/drivers/md/dm-bufio.c
456 +@@ -813,12 +813,14 @@ enum new_flag {
457 + static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
458 + {
459 + struct dm_buffer *b;
460 ++ bool tried_noio_alloc = false;
461 +
462 + /*
463 + * dm-bufio is resistant to allocation failures (it just keeps
464 + * one buffer reserved in cases all the allocations fail).
465 + * So set flags to not try too hard:
466 +- * GFP_NOIO: don't recurse into the I/O layer
467 ++ * GFP_NOWAIT: don't wait; if we need to sleep we'll release our
468 ++ * mutex and wait ourselves.
469 + * __GFP_NORETRY: don't retry and rather return failure
470 + * __GFP_NOMEMALLOC: don't use emergency reserves
471 + * __GFP_NOWARN: don't print a warning in case of failure
472 +@@ -828,7 +830,7 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client
473 + */
474 + while (1) {
475 + if (dm_bufio_cache_size_latch != 1) {
476 +- b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
477 ++ b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
478 + if (b)
479 + return b;
480 + }
481 +@@ -836,6 +838,15 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client
482 + if (nf == NF_PREFETCH)
483 + return NULL;
484 +
485 ++ if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
486 ++ dm_bufio_unlock(c);
487 ++ b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
488 ++ dm_bufio_lock(c);
489 ++ if (b)
490 ++ return b;
491 ++ tried_noio_alloc = true;
492 ++ }
493 ++
494 + if (!list_empty(&c->reserved_buffers)) {
495 + b = list_entry(c->reserved_buffers.next,
496 + struct dm_buffer, lru_list);
497 +@@ -1563,19 +1574,11 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
498 + static unsigned long
499 + dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
500 + {
501 +- struct dm_bufio_client *c;
502 +- unsigned long count;
503 +- unsigned long retain_target;
504 +-
505 +- c = container_of(shrink, struct dm_bufio_client, shrinker);
506 +- if (sc->gfp_mask & __GFP_FS)
507 +- dm_bufio_lock(c);
508 +- else if (!dm_bufio_trylock(c))
509 +- return 0;
510 ++ struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
511 ++ unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
512 ++ READ_ONCE(c->n_buffers[LIST_DIRTY]);
513 ++ unsigned long retain_target = get_retain_buffers(c);
514 +
515 +- count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
516 +- retain_target = get_retain_buffers(c);
517 +- dm_bufio_unlock(c);
518 + return (count < retain_target) ? 0 : (count - retain_target);
519 + }
520 +
521 +diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
522 +index a47ab1947cc4..17d217c3585a 100644
523 +--- a/drivers/media/i2c/cx25840/cx25840-core.c
524 ++++ b/drivers/media/i2c/cx25840/cx25840-core.c
525 +@@ -467,8 +467,13 @@ static void cx23885_initialize(struct i2c_client *client)
526 + {
527 + DEFINE_WAIT(wait);
528 + struct cx25840_state *state = to_state(i2c_get_clientdata(client));
529 ++ u32 clk_freq = 0;
530 + struct workqueue_struct *q;
531 +
532 ++ /* cx23885 sets hostdata to clk_freq pointer */
533 ++ if (v4l2_get_subdev_hostdata(&state->sd))
534 ++ clk_freq = *((u32 *)v4l2_get_subdev_hostdata(&state->sd));
535 ++
536 + /*
537 + * Come out of digital power down
538 + * The CX23888, at least, needs this, otherwise registers aside from
539 +@@ -504,8 +509,13 @@ static void cx23885_initialize(struct i2c_client *client)
540 + * 50.0 MHz * (0xb + 0xe8ba26/0x2000000)/4 = 5 * 28.636363 MHz
541 + * 572.73 MHz before post divide
542 + */
543 +- /* HVR1850 or 50MHz xtal */
544 +- cx25840_write(client, 0x2, 0x71);
545 ++ if (clk_freq == 25000000) {
546 ++ /* 888/ImpactVCBe or 25Mhz xtal */
547 ++ ; /* nothing to do */
548 ++ } else {
549 ++ /* HVR1850 or 50MHz xtal */
550 ++ cx25840_write(client, 0x2, 0x71);
551 ++ }
552 + cx25840_write4(client, 0x11c, 0x01d1744c);
553 + cx25840_write4(client, 0x118, 0x00000416);
554 + cx25840_write4(client, 0x404, 0x0010253e);
555 +@@ -548,9 +558,15 @@ static void cx23885_initialize(struct i2c_client *client)
556 + /* HVR1850 */
557 + switch (state->id) {
558 + case CX23888_AV:
559 +- /* 888/HVR1250 specific */
560 +- cx25840_write4(client, 0x10c, 0x13333333);
561 +- cx25840_write4(client, 0x108, 0x00000515);
562 ++ if (clk_freq == 25000000) {
563 ++ /* 888/ImpactVCBe or 25MHz xtal */
564 ++ cx25840_write4(client, 0x10c, 0x01b6db7b);
565 ++ cx25840_write4(client, 0x108, 0x00000512);
566 ++ } else {
567 ++ /* 888/HVR1250 or 50MHz xtal */
568 ++ cx25840_write4(client, 0x10c, 0x13333333);
569 ++ cx25840_write4(client, 0x108, 0x00000515);
570 ++ }
571 + break;
572 + default:
573 + cx25840_write4(client, 0x10c, 0x002be2c9);
574 +@@ -577,7 +593,7 @@ static void cx23885_initialize(struct i2c_client *client)
575 + * 368.64 MHz before post divide
576 + * 122.88 MHz / 0xa = 12.288 MHz
577 + */
578 +- /* HVR1850 or 50MHz xtal */
579 ++ /* HVR1850 or 50MHz xtal or 25MHz xtal */
580 + cx25840_write4(client, 0x114, 0x017dbf48);
581 + cx25840_write4(client, 0x110, 0x000a030e);
582 + break;
583 +diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
584 +index c484ca8c909c..fb5a3052f144 100644
585 +--- a/drivers/mtd/chips/cfi_cmdset_0002.c
586 ++++ b/drivers/mtd/chips/cfi_cmdset_0002.c
587 +@@ -42,7 +42,7 @@
588 + #define AMD_BOOTLOC_BUG
589 + #define FORCE_WORD_WRITE 0
590 +
591 +-#define MAX_WORD_RETRIES 3
592 ++#define MAX_RETRIES 3
593 +
594 + #define SST49LF004B 0x0060
595 + #define SST49LF040B 0x0050
596 +@@ -1645,7 +1645,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
597 + map_write( map, CMD(0xF0), chip->start );
598 + /* FIXME - should have reset delay before continuing */
599 +
600 +- if (++retry_cnt <= MAX_WORD_RETRIES)
601 ++ if (++retry_cnt <= MAX_RETRIES)
602 + goto retry;
603 +
604 + ret = -EIO;
605 +@@ -2104,7 +2104,7 @@ retry:
606 + map_write(map, CMD(0xF0), chip->start);
607 + /* FIXME - should have reset delay before continuing */
608 +
609 +- if (++retry_cnt <= MAX_WORD_RETRIES)
610 ++ if (++retry_cnt <= MAX_RETRIES)
611 + goto retry;
612 +
613 + ret = -EIO;
614 +@@ -2239,6 +2239,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
615 + unsigned long int adr;
616 + DECLARE_WAITQUEUE(wait, current);
617 + int ret = 0;
618 ++ int retry_cnt = 0;
619 +
620 + adr = cfi->addr_unlock1;
621 +
622 +@@ -2256,6 +2257,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
623 + ENABLE_VPP(map);
624 + xip_disable(map, chip, adr);
625 +
626 ++ retry:
627 + cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
628 + cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
629 + cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
630 +@@ -2292,12 +2294,13 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
631 + chip->erase_suspended = 0;
632 + }
633 +
634 +- if (chip_ready(map, adr))
635 ++ if (chip_good(map, adr, map_word_ff(map)))
636 + break;
637 +
638 + if (time_after(jiffies, timeo)) {
639 + printk(KERN_WARNING "MTD %s(): software timeout\n",
640 + __func__ );
641 ++ ret = -EIO;
642 + break;
643 + }
644 +
645 +@@ -2305,12 +2308,15 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
646 + UDELAY(map, chip, adr, 1000000/HZ);
647 + }
648 + /* Did we succeed? */
649 +- if (!chip_good(map, adr, map_word_ff(map))) {
650 ++ if (ret) {
651 + /* reset on all failures. */
652 + map_write( map, CMD(0xF0), chip->start );
653 + /* FIXME - should have reset delay before continuing */
654 +
655 +- ret = -EIO;
656 ++ if (++retry_cnt <= MAX_RETRIES) {
657 ++ ret = 0;
658 ++ goto retry;
659 ++ }
660 + }
661 +
662 + chip->state = FL_READY;
663 +@@ -2329,6 +2335,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
664 + unsigned long timeo = jiffies + HZ;
665 + DECLARE_WAITQUEUE(wait, current);
666 + int ret = 0;
667 ++ int retry_cnt = 0;
668 +
669 + adr += chip->start;
670 +
671 +@@ -2346,6 +2353,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
672 + ENABLE_VPP(map);
673 + xip_disable(map, chip, adr);
674 +
675 ++ retry:
676 + cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
677 + cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
678 + cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
679 +@@ -2382,7 +2390,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
680 + chip->erase_suspended = 0;
681 + }
682 +
683 +- if (chip_ready(map, adr)) {
684 ++ if (chip_good(map, adr, map_word_ff(map))) {
685 + xip_enable(map, chip, adr);
686 + break;
687 + }
688 +@@ -2391,6 +2399,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
689 + xip_enable(map, chip, adr);
690 + printk(KERN_WARNING "MTD %s(): software timeout\n",
691 + __func__ );
692 ++ ret = -EIO;
693 + break;
694 + }
695 +
696 +@@ -2398,12 +2407,15 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
697 + UDELAY(map, chip, adr, 1000000/HZ);
698 + }
699 + /* Did we succeed? */
700 +- if (!chip_good(map, adr, map_word_ff(map))) {
701 ++ if (ret) {
702 + /* reset on all failures. */
703 + map_write( map, CMD(0xF0), chip->start );
704 + /* FIXME - should have reset delay before continuing */
705 +
706 +- ret = -EIO;
707 ++ if (++retry_cnt <= MAX_RETRIES) {
708 ++ ret = 0;
709 ++ goto retry;
710 ++ }
711 + }
712 +
713 + chip->state = FL_READY;
714 +diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
715 +index 136e73a3e07e..53fe795fd716 100644
716 +--- a/drivers/mtd/nand/mxc_nand.c
717 ++++ b/drivers/mtd/nand/mxc_nand.c
718 +@@ -49,7 +49,7 @@
719 + #define NFC_V1_V2_CONFIG (host->regs + 0x0a)
720 + #define NFC_V1_V2_ECC_STATUS_RESULT (host->regs + 0x0c)
721 + #define NFC_V1_V2_RSLTMAIN_AREA (host->regs + 0x0e)
722 +-#define NFC_V1_V2_RSLTSPARE_AREA (host->regs + 0x10)
723 ++#define NFC_V21_RSLTSPARE_AREA (host->regs + 0x10)
724 + #define NFC_V1_V2_WRPROT (host->regs + 0x12)
725 + #define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14)
726 + #define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16)
727 +@@ -1034,6 +1034,9 @@ static void preset_v2(struct mtd_info *mtd)
728 + writew(config1, NFC_V1_V2_CONFIG1);
729 + /* preset operation */
730 +
731 ++ /* spare area size in 16-bit half-words */
732 ++ writew(mtd->oobsize / 2, NFC_V21_RSLTSPARE_AREA);
733 ++
734 + /* Unlock the internal RAM Buffer */
735 + writew(0x2, NFC_V1_V2_CONFIG);
736 +
737 +diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
738 +index 4dd0391d2942..c4a25c858c07 100644
739 +--- a/drivers/mtd/ubi/eba.c
740 ++++ b/drivers/mtd/ubi/eba.c
741 +@@ -350,6 +350,82 @@ out_unlock:
742 + return err;
743 + }
744 +
745 ++#ifdef CONFIG_MTD_UBI_FASTMAP
746 ++/**
747 ++ * check_mapping - check and fixup a mapping
748 ++ * @ubi: UBI device description object
749 ++ * @vol: volume description object
750 ++ * @lnum: logical eraseblock number
751 ++ * @pnum: physical eraseblock number
752 ++ *
753 ++ * Checks whether a given mapping is valid. Fastmap cannot track LEB unmap
754 ++ * operations, if such an operation is interrupted the mapping still looks
755 ++ * good, but upon first read an ECC is reported to the upper layer.
756 ++ * Normaly during the full-scan at attach time this is fixed, for Fastmap
757 ++ * we have to deal with it while reading.
758 ++ * If the PEB behind a LEB shows this symthom we change the mapping to
759 ++ * %UBI_LEB_UNMAPPED and schedule the PEB for erasure.
760 ++ *
761 ++ * Returns 0 on success, negative error code in case of failure.
762 ++ */
763 ++static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
764 ++ int *pnum)
765 ++{
766 ++ int err;
767 ++ struct ubi_vid_hdr *vid_hdr;
768 ++
769 ++ if (!ubi->fast_attach)
770 ++ return 0;
771 ++
772 ++ vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
773 ++ if (!vid_hdr)
774 ++ return -ENOMEM;
775 ++
776 ++ err = ubi_io_read_vid_hdr(ubi, *pnum, vid_hdr, 0);
777 ++ if (err > 0 && err != UBI_IO_BITFLIPS) {
778 ++ int torture = 0;
779 ++
780 ++ switch (err) {
781 ++ case UBI_IO_FF:
782 ++ case UBI_IO_FF_BITFLIPS:
783 ++ case UBI_IO_BAD_HDR:
784 ++ case UBI_IO_BAD_HDR_EBADMSG:
785 ++ break;
786 ++ default:
787 ++ ubi_assert(0);
788 ++ }
789 ++
790 ++ if (err == UBI_IO_BAD_HDR_EBADMSG || err == UBI_IO_FF_BITFLIPS)
791 ++ torture = 1;
792 ++
793 ++ down_read(&ubi->fm_eba_sem);
794 ++ vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
795 ++ up_read(&ubi->fm_eba_sem);
796 ++ ubi_wl_put_peb(ubi, vol->vol_id, lnum, *pnum, torture);
797 ++
798 ++ *pnum = UBI_LEB_UNMAPPED;
799 ++ } else if (err < 0) {
800 ++ ubi_err(ubi, "unable to read VID header back from PEB %i: %i",
801 ++ *pnum, err);
802 ++
803 ++ goto out_free;
804 ++ }
805 ++
806 ++ err = 0;
807 ++
808 ++out_free:
809 ++ ubi_free_vid_hdr(ubi, vid_hdr);
810 ++
811 ++ return err;
812 ++}
813 ++#else
814 ++static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
815 ++ int *pnum)
816 ++{
817 ++ return 0;
818 ++}
819 ++#endif
820 ++
821 + /**
822 + * ubi_eba_read_leb - read data.
823 + * @ubi: UBI device description object
824 +@@ -381,7 +457,13 @@ int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
825 + return err;
826 +
827 + pnum = vol->eba_tbl[lnum];
828 +- if (pnum < 0) {
829 ++ if (pnum >= 0) {
830 ++ err = check_mapping(ubi, vol, lnum, &pnum);
831 ++ if (err < 0)
832 ++ goto out_unlock;
833 ++ }
834 ++
835 ++ if (pnum == UBI_LEB_UNMAPPED) {
836 + /*
837 + * The logical eraseblock is not mapped, fill the whole buffer
838 + * with 0xFF bytes. The exception is static volumes for which
839 +@@ -696,6 +778,14 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
840 + return err;
841 +
842 + pnum = vol->eba_tbl[lnum];
843 ++ if (pnum >= 0) {
844 ++ err = check_mapping(ubi, vol, lnum, &pnum);
845 ++ if (err < 0) {
846 ++ leb_write_unlock(ubi, vol_id, lnum);
847 ++ return err;
848 ++ }
849 ++ }
850 ++
851 + if (pnum >= 0) {
852 + dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
853 + len, offset, vol_id, lnum, pnum);
854 +diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
855 +index d26cb37b1fbd..b32c47fe926d 100644
856 +--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
857 ++++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
858 +@@ -1166,6 +1166,7 @@ static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
859 + size_t hdr_len, crypto_len;
860 + void *rfc1042;
861 + bool is_first, is_last, is_amsdu;
862 ++ int bytes_aligned = ar->hw_params.decap_align_bytes;
863 +
864 + rxd = (void *)msdu->data - sizeof(*rxd);
865 + hdr = (void *)rxd->rx_hdr_status;
866 +@@ -1182,8 +1183,8 @@ static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
867 + hdr_len = ieee80211_hdrlen(hdr->frame_control);
868 + crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
869 +
870 +- rfc1042 += round_up(hdr_len, 4) +
871 +- round_up(crypto_len, 4);
872 ++ rfc1042 += round_up(hdr_len, bytes_aligned) +
873 ++ round_up(crypto_len, bytes_aligned);
874 + }
875 +
876 + if (is_amsdu)
877 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
878 +index e86fcc9e9852..01f47b68b6e7 100644
879 +--- a/drivers/nvme/host/pci.c
880 ++++ b/drivers/nvme/host/pci.c
881 +@@ -1589,11 +1589,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
882 + if (result < 0)
883 + goto release_cq;
884 +
885 ++ nvme_init_queue(nvmeq, qid);
886 + result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
887 + if (result < 0)
888 + goto release_sq;
889 +
890 +- nvme_init_queue(nvmeq, qid);
891 + return result;
892 +
893 + release_sq:
894 +@@ -1797,6 +1797,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
895 + goto free_nvmeq;
896 +
897 + nvmeq->cq_vector = 0;
898 ++ nvme_init_queue(nvmeq, 0);
899 + result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
900 + if (result) {
901 + nvmeq->cq_vector = -1;
902 +@@ -3165,7 +3166,6 @@ static void nvme_probe_work(struct work_struct *work)
903 + goto disable;
904 + }
905 +
906 +- nvme_init_queue(dev->queues[0], 0);
907 + result = nvme_alloc_admin_tags(dev);
908 + if (result)
909 + goto disable;
910 +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
911 +index 841f3fbec77c..4302880a20b3 100644
912 +--- a/drivers/scsi/sg.c
913 ++++ b/drivers/scsi/sg.c
914 +@@ -51,6 +51,7 @@ static int sg_version_num = 30536; /* 2 digits for each component */
915 + #include <linux/atomic.h>
916 + #include <linux/ratelimit.h>
917 + #include <linux/uio.h>
918 ++#include <linux/cred.h> /* for sg_check_file_access() */
919 +
920 + #include "scsi.h"
921 + #include <scsi/scsi_dbg.h>
922 +@@ -221,6 +222,33 @@ static void sg_device_destroy(struct kref *kref);
923 + sdev_prefix_printk(prefix, (sdp)->device, \
924 + (sdp)->disk->disk_name, fmt, ##a)
925 +
926 ++/*
927 ++ * The SCSI interfaces that use read() and write() as an asynchronous variant of
928 ++ * ioctl(..., SG_IO, ...) are fundamentally unsafe, since there are lots of ways
929 ++ * to trigger read() and write() calls from various contexts with elevated
930 ++ * privileges. This can lead to kernel memory corruption (e.g. if these
931 ++ * interfaces are called through splice()) and privilege escalation inside
932 ++ * userspace (e.g. if a process with access to such a device passes a file
933 ++ * descriptor to a SUID binary as stdin/stdout/stderr).
934 ++ *
935 ++ * This function provides protection for the legacy API by restricting the
936 ++ * calling context.
937 ++ */
938 ++static int sg_check_file_access(struct file *filp, const char *caller)
939 ++{
940 ++ if (filp->f_cred != current_real_cred()) {
941 ++ pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
942 ++ caller, task_tgid_vnr(current), current->comm);
943 ++ return -EPERM;
944 ++ }
945 ++ if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
946 ++ pr_err_once("%s: process %d (%s) called from kernel context, this is not allowed.\n",
947 ++ caller, task_tgid_vnr(current), current->comm);
948 ++ return -EACCES;
949 ++ }
950 ++ return 0;
951 ++}
952 ++
953 + static int sg_allow_access(struct file *filp, unsigned char *cmd)
954 + {
955 + struct sg_fd *sfp = filp->private_data;
956 +@@ -405,6 +433,14 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
957 + struct sg_header *old_hdr = NULL;
958 + int retval = 0;
959 +
960 ++ /*
961 ++ * This could cause a response to be stranded. Close the associated
962 ++ * file descriptor to free up any resources being held.
963 ++ */
964 ++ retval = sg_check_file_access(filp, __func__);
965 ++ if (retval)
966 ++ return retval;
967 ++
968 + if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
969 + return -ENXIO;
970 + SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
971 +@@ -592,9 +628,11 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
972 + struct sg_header old_hdr;
973 + sg_io_hdr_t *hp;
974 + unsigned char cmnd[SG_MAX_CDB_SIZE];
975 ++ int retval;
976 +
977 +- if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
978 +- return -EINVAL;
979 ++ retval = sg_check_file_access(filp, __func__);
980 ++ if (retval)
981 ++ return retval;
982 +
983 + if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
984 + return -ENXIO;
985 +diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
986 +index ca15a87f6fd3..13a9b4c42b26 100644
987 +--- a/drivers/staging/android/ion/ion_heap.c
988 ++++ b/drivers/staging/android/ion/ion_heap.c
989 +@@ -38,7 +38,7 @@ void *ion_heap_map_kernel(struct ion_heap *heap,
990 + struct page **tmp = pages;
991 +
992 + if (!pages)
993 +- return NULL;
994 ++ return ERR_PTR(-ENOMEM);
995 +
996 + if (buffer->flags & ION_FLAG_CACHED)
997 + pgprot = PAGE_KERNEL;
998 +diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
999 +index e9e43139157d..769a94015117 100644
1000 +--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c
1001 ++++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
1002 +@@ -642,7 +642,7 @@ static int daqp_ao_insn_write(struct comedi_device *dev,
1003 + /* Make sure D/A update mode is direct update */
1004 + outb(0, dev->iobase + DAQP_AUX_REG);
1005 +
1006 +- for (i = 0; i > insn->n; i++) {
1007 ++ for (i = 0; i < insn->n; i++) {
1008 + unsigned val = data[i];
1009 + int ret;
1010 +
1011 +diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
1012 +index 190e5dc15738..b1ec202099b2 100644
1013 +--- a/drivers/tty/n_tty.c
1014 ++++ b/drivers/tty/n_tty.c
1015 +@@ -128,6 +128,8 @@ struct n_tty_data {
1016 + struct mutex output_lock;
1017 + };
1018 +
1019 ++#define MASK(x) ((x) & (N_TTY_BUF_SIZE - 1))
1020 ++
1021 + static inline size_t read_cnt(struct n_tty_data *ldata)
1022 + {
1023 + return ldata->read_head - ldata->read_tail;
1024 +@@ -145,6 +147,7 @@ static inline unsigned char *read_buf_addr(struct n_tty_data *ldata, size_t i)
1025 +
1026 + static inline unsigned char echo_buf(struct n_tty_data *ldata, size_t i)
1027 + {
1028 ++ smp_rmb(); /* Matches smp_wmb() in add_echo_byte(). */
1029 + return ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)];
1030 + }
1031 +
1032 +@@ -322,9 +325,7 @@ static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
1033 + static void reset_buffer_flags(struct n_tty_data *ldata)
1034 + {
1035 + ldata->read_head = ldata->canon_head = ldata->read_tail = 0;
1036 +- ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0;
1037 + ldata->commit_head = 0;
1038 +- ldata->echo_mark = 0;
1039 + ldata->line_start = 0;
1040 +
1041 + ldata->erasing = 0;
1042 +@@ -645,12 +646,19 @@ static size_t __process_echoes(struct tty_struct *tty)
1043 + old_space = space = tty_write_room(tty);
1044 +
1045 + tail = ldata->echo_tail;
1046 +- while (ldata->echo_commit != tail) {
1047 ++ while (MASK(ldata->echo_commit) != MASK(tail)) {
1048 + c = echo_buf(ldata, tail);
1049 + if (c == ECHO_OP_START) {
1050 + unsigned char op;
1051 + int no_space_left = 0;
1052 +
1053 ++ /*
1054 ++ * Since add_echo_byte() is called without holding
1055 ++ * output_lock, we might see only portion of multi-byte
1056 ++ * operation.
1057 ++ */
1058 ++ if (MASK(ldata->echo_commit) == MASK(tail + 1))
1059 ++ goto not_yet_stored;
1060 + /*
1061 + * If the buffer byte is the start of a multi-byte
1062 + * operation, get the next byte, which is either the
1063 +@@ -662,6 +670,8 @@ static size_t __process_echoes(struct tty_struct *tty)
1064 + unsigned int num_chars, num_bs;
1065 +
1066 + case ECHO_OP_ERASE_TAB:
1067 ++ if (MASK(ldata->echo_commit) == MASK(tail + 2))
1068 ++ goto not_yet_stored;
1069 + num_chars = echo_buf(ldata, tail + 2);
1070 +
1071 + /*
1072 +@@ -756,7 +766,8 @@ static size_t __process_echoes(struct tty_struct *tty)
1073 + /* If the echo buffer is nearly full (so that the possibility exists
1074 + * of echo overrun before the next commit), then discard enough
1075 + * data at the tail to prevent a subsequent overrun */
1076 +- while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
1077 ++ while (ldata->echo_commit > tail &&
1078 ++ ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
1079 + if (echo_buf(ldata, tail) == ECHO_OP_START) {
1080 + if (echo_buf(ldata, tail + 1) == ECHO_OP_ERASE_TAB)
1081 + tail += 3;
1082 +@@ -766,6 +777,7 @@ static size_t __process_echoes(struct tty_struct *tty)
1083 + tail++;
1084 + }
1085 +
1086 ++ not_yet_stored:
1087 + ldata->echo_tail = tail;
1088 + return old_space - space;
1089 + }
1090 +@@ -776,6 +788,7 @@ static void commit_echoes(struct tty_struct *tty)
1091 + size_t nr, old, echoed;
1092 + size_t head;
1093 +
1094 ++ mutex_lock(&ldata->output_lock);
1095 + head = ldata->echo_head;
1096 + ldata->echo_mark = head;
1097 + old = ldata->echo_commit - ldata->echo_tail;
1098 +@@ -784,10 +797,12 @@ static void commit_echoes(struct tty_struct *tty)
1099 + * is over the threshold (and try again each time another
1100 + * block is accumulated) */
1101 + nr = head - ldata->echo_tail;
1102 +- if (nr < ECHO_COMMIT_WATERMARK || (nr % ECHO_BLOCK > old % ECHO_BLOCK))
1103 ++ if (nr < ECHO_COMMIT_WATERMARK ||
1104 ++ (nr % ECHO_BLOCK > old % ECHO_BLOCK)) {
1105 ++ mutex_unlock(&ldata->output_lock);
1106 + return;
1107 ++ }
1108 +
1109 +- mutex_lock(&ldata->output_lock);
1110 + ldata->echo_commit = head;
1111 + echoed = __process_echoes(tty);
1112 + mutex_unlock(&ldata->output_lock);
1113 +@@ -838,7 +853,9 @@ static void flush_echoes(struct tty_struct *tty)
1114 +
1115 + static inline void add_echo_byte(unsigned char c, struct n_tty_data *ldata)
1116 + {
1117 +- *echo_buf_addr(ldata, ldata->echo_head++) = c;
1118 ++ *echo_buf_addr(ldata, ldata->echo_head) = c;
1119 ++ smp_wmb(); /* Matches smp_rmb() in echo_buf(). */
1120 ++ ldata->echo_head++;
1121 + }
1122 +
1123 + /**
1124 +@@ -1006,14 +1023,15 @@ static void eraser(unsigned char c, struct tty_struct *tty)
1125 + }
1126 +
1127 + seen_alnums = 0;
1128 +- while (ldata->read_head != ldata->canon_head) {
1129 ++ while (MASK(ldata->read_head) != MASK(ldata->canon_head)) {
1130 + head = ldata->read_head;
1131 +
1132 + /* erase a single possibly multibyte character */
1133 + do {
1134 + head--;
1135 + c = read_buf(ldata, head);
1136 +- } while (is_continuation(c, tty) && head != ldata->canon_head);
1137 ++ } while (is_continuation(c, tty) &&
1138 ++ MASK(head) != MASK(ldata->canon_head));
1139 +
1140 + /* do not partially erase */
1141 + if (is_continuation(c, tty))
1142 +@@ -1055,7 +1073,7 @@ static void eraser(unsigned char c, struct tty_struct *tty)
1143 + * This info is used to go back the correct
1144 + * number of columns.
1145 + */
1146 +- while (tail != ldata->canon_head) {
1147 ++ while (MASK(tail) != MASK(ldata->canon_head)) {
1148 + tail--;
1149 + c = read_buf(ldata, tail);
1150 + if (c == '\t') {
1151 +@@ -1332,7 +1350,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
1152 + finish_erasing(ldata);
1153 + echo_char(c, tty);
1154 + echo_char_raw('\n', ldata);
1155 +- while (tail != ldata->read_head) {
1156 ++ while (MASK(tail) != MASK(ldata->read_head)) {
1157 + echo_char(read_buf(ldata, tail), tty);
1158 + tail++;
1159 + }
1160 +@@ -1917,31 +1935,22 @@ static int n_tty_open(struct tty_struct *tty)
1161 + struct n_tty_data *ldata;
1162 +
1163 + /* Currently a malloc failure here can panic */
1164 +- ldata = vmalloc(sizeof(*ldata));
1165 ++ ldata = vzalloc(sizeof(*ldata));
1166 + if (!ldata)
1167 +- goto err;
1168 ++ return -ENOMEM;
1169 +
1170 + ldata->overrun_time = jiffies;
1171 + mutex_init(&ldata->atomic_read_lock);
1172 + mutex_init(&ldata->output_lock);
1173 +
1174 + tty->disc_data = ldata;
1175 +- reset_buffer_flags(tty->disc_data);
1176 +- ldata->column = 0;
1177 +- ldata->canon_column = 0;
1178 + ldata->minimum_to_wake = 1;
1179 +- ldata->num_overrun = 0;
1180 +- ldata->no_room = 0;
1181 +- ldata->lnext = 0;
1182 + tty->closing = 0;
1183 + /* indicate buffer work may resume */
1184 + clear_bit(TTY_LDISC_HALTED, &tty->flags);
1185 + n_tty_set_termios(tty, NULL);
1186 + tty_unthrottle(tty);
1187 +-
1188 + return 0;
1189 +-err:
1190 +- return -ENOMEM;
1191 + }
1192 +
1193 + static inline int input_available_p(struct tty_struct *tty, int poll)
1194 +@@ -2479,7 +2488,7 @@ static unsigned long inq_canon(struct n_tty_data *ldata)
1195 + tail = ldata->read_tail;
1196 + nr = head - tail;
1197 + /* Skip EOF-chars.. */
1198 +- while (head != tail) {
1199 ++ while (MASK(head) != MASK(tail)) {
1200 + if (test_bit(tail & (N_TTY_BUF_SIZE - 1), ldata->read_flags) &&
1201 + read_buf(ldata, tail) == __DISABLED_CHAR)
1202 + nr--;
1203 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
1204 +index edd8ef4ee502..7ed30d0b5273 100644
1205 +--- a/drivers/usb/class/cdc-acm.c
1206 ++++ b/drivers/usb/class/cdc-acm.c
1207 +@@ -1698,6 +1698,9 @@ static const struct usb_device_id acm_ids[] = {
1208 + { USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */
1209 + .driver_info = SINGLE_RX_URB,
1210 + },
1211 ++ { USB_DEVICE(0x1965, 0x0018), /* Uniden UBC125XLT */
1212 ++ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1213 ++ },
1214 + { USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */
1215 + .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1216 + },
1217 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
1218 +index e7a051386b32..73835027a7cc 100644
1219 +--- a/drivers/usb/serial/cp210x.c
1220 ++++ b/drivers/usb/serial/cp210x.c
1221 +@@ -91,6 +91,9 @@ static const struct usb_device_id id_table[] = {
1222 + { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
1223 + { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
1224 + { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */
1225 ++ { USB_DEVICE(0x10C4, 0x817C) }, /* CESINEL MEDCAL N Power Quality Monitor */
1226 ++ { USB_DEVICE(0x10C4, 0x817D) }, /* CESINEL MEDCAL NT Power Quality Monitor */
1227 ++ { USB_DEVICE(0x10C4, 0x817E) }, /* CESINEL MEDCAL S Power Quality Monitor */
1228 + { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
1229 + { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
1230 + { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
1231 +@@ -108,6 +111,9 @@ static const struct usb_device_id id_table[] = {
1232 + { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
1233 + { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
1234 + { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
1235 ++ { USB_DEVICE(0x10C4, 0x82EF) }, /* CESINEL FALCO 6105 AC Power Supply */
1236 ++ { USB_DEVICE(0x10C4, 0x82F1) }, /* CESINEL MEDCAL EFD Earth Fault Detector */
1237 ++ { USB_DEVICE(0x10C4, 0x82F2) }, /* CESINEL MEDCAL ST Network Analyzer */
1238 + { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
1239 + { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
1240 + { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
1241 +@@ -120,7 +126,9 @@ static const struct usb_device_id id_table[] = {
1242 + { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
1243 + { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
1244 + { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
1245 ++ { USB_DEVICE(0x10C4, 0x851E) }, /* CESINEL MEDCAL PT Network Analyzer */
1246 + { USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */
1247 ++ { USB_DEVICE(0x10C4, 0x85B8) }, /* CESINEL ReCon T Energy Logger */
1248 + { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
1249 + { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
1250 + { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
1251 +@@ -130,17 +138,23 @@ static const struct usb_device_id id_table[] = {
1252 + { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
1253 + { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
1254 + { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
1255 ++ { USB_DEVICE(0x10C4, 0x88FB) }, /* CESINEL MEDCAL STII Network Analyzer */
1256 ++ { USB_DEVICE(0x10C4, 0x8938) }, /* CESINEL MEDCAL S II Network Analyzer */
1257 + { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
1258 + { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */
1259 + { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
1260 + { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
1261 ++ { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */
1262 + { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
1263 + { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
1264 + { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
1265 + { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
1266 + { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
1267 ++ { USB_DEVICE(0x10C4, 0xEA63) }, /* Silicon Labs Windows Update (CP2101-4/CP2102N) */
1268 + { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
1269 + { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
1270 ++ { USB_DEVICE(0x10C4, 0xEA7A) }, /* Silicon Labs Windows Update (CP2105) */
1271 ++ { USB_DEVICE(0x10C4, 0xEA7B) }, /* Silicon Labs Windows Update (CP2108) */
1272 + { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
1273 + { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
1274 + { USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */
1275 +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
1276 +index 8632380d2b94..63aea21e6298 100644
1277 +--- a/fs/cifs/cifssmb.c
1278 ++++ b/fs/cifs/cifssmb.c
1279 +@@ -150,8 +150,14 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
1280 + * greater than cifs socket timeout which is 7 seconds
1281 + */
1282 + while (server->tcpStatus == CifsNeedReconnect) {
1283 +- wait_event_interruptible_timeout(server->response_q,
1284 +- (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
1285 ++ rc = wait_event_interruptible_timeout(server->response_q,
1286 ++ (server->tcpStatus != CifsNeedReconnect),
1287 ++ 10 * HZ);
1288 ++ if (rc < 0) {
1289 ++ cifs_dbg(FYI, "%s: aborting reconnect due to a received"
1290 ++ " signal by the process\n", __func__);
1291 ++ return -ERESTARTSYS;
1292 ++ }
1293 +
1294 + /* are we still trying to reconnect? */
1295 + if (server->tcpStatus != CifsNeedReconnect)
1296 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
1297 +index 807e989f436a..5f5ba807b414 100644
1298 +--- a/fs/cifs/smb2pdu.c
1299 ++++ b/fs/cifs/smb2pdu.c
1300 +@@ -158,7 +158,7 @@ out:
1301 + static int
1302 + smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
1303 + {
1304 +- int rc = 0;
1305 ++ int rc;
1306 + struct nls_table *nls_codepage;
1307 + struct cifs_ses *ses;
1308 + struct TCP_Server_Info *server;
1309 +@@ -169,10 +169,10 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
1310 + * for those three - in the calling routine.
1311 + */
1312 + if (tcon == NULL)
1313 +- return rc;
1314 ++ return 0;
1315 +
1316 + if (smb2_command == SMB2_TREE_CONNECT)
1317 +- return rc;
1318 ++ return 0;
1319 +
1320 + if (tcon->tidStatus == CifsExiting) {
1321 + /*
1322 +@@ -215,8 +215,14 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
1323 + return -EAGAIN;
1324 + }
1325 +
1326 +- wait_event_interruptible_timeout(server->response_q,
1327 +- (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
1328 ++ rc = wait_event_interruptible_timeout(server->response_q,
1329 ++ (server->tcpStatus != CifsNeedReconnect),
1330 ++ 10 * HZ);
1331 ++ if (rc < 0) {
1332 ++ cifs_dbg(FYI, "%s: aborting reconnect due to a received"
1333 ++ " signal by the process\n", __func__);
1334 ++ return -ERESTARTSYS;
1335 ++ }
1336 +
1337 + /* are we still trying to reconnect? */
1338 + if (server->tcpStatus != CifsNeedReconnect)
1339 +@@ -234,7 +240,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
1340 + }
1341 +
1342 + if (!tcon->ses->need_reconnect && !tcon->need_reconnect)
1343 +- return rc;
1344 ++ return 0;
1345 +
1346 + nls_codepage = load_nls_default();
1347 +
1348 +diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
1349 +index c57a94f1c198..092da164bdc0 100644
1350 +--- a/fs/ext4/balloc.c
1351 ++++ b/fs/ext4/balloc.c
1352 +@@ -183,7 +183,6 @@ static int ext4_init_block_bitmap(struct super_block *sb,
1353 + unsigned int bit, bit_max;
1354 + struct ext4_sb_info *sbi = EXT4_SB(sb);
1355 + ext4_fsblk_t start, tmp;
1356 +- int flex_bg = 0;
1357 + struct ext4_group_info *grp;
1358 +
1359 + J_ASSERT_BH(bh, buffer_locked(bh));
1360 +@@ -216,22 +215,19 @@ static int ext4_init_block_bitmap(struct super_block *sb,
1361 +
1362 + start = ext4_group_first_block_no(sb, block_group);
1363 +
1364 +- if (ext4_has_feature_flex_bg(sb))
1365 +- flex_bg = 1;
1366 +-
1367 + /* Set bits for block and inode bitmaps, and inode table */
1368 + tmp = ext4_block_bitmap(sb, gdp);
1369 +- if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
1370 ++ if (ext4_block_in_group(sb, tmp, block_group))
1371 + ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
1372 +
1373 + tmp = ext4_inode_bitmap(sb, gdp);
1374 +- if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
1375 ++ if (ext4_block_in_group(sb, tmp, block_group))
1376 + ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
1377 +
1378 + tmp = ext4_inode_table(sb, gdp);
1379 + for (; tmp < ext4_inode_table(sb, gdp) +
1380 + sbi->s_itb_per_group; tmp++) {
1381 +- if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
1382 ++ if (ext4_block_in_group(sb, tmp, block_group))
1383 + ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
1384 + }
1385 +
1386 +@@ -454,7 +450,16 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
1387 + goto verify;
1388 + }
1389 + ext4_lock_group(sb, block_group);
1390 +- if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
1391 ++ if (ext4_has_group_desc_csum(sb) &&
1392 ++ (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
1393 ++ if (block_group == 0) {
1394 ++ ext4_unlock_group(sb, block_group);
1395 ++ unlock_buffer(bh);
1396 ++ ext4_error(sb, "Block bitmap for bg 0 marked "
1397 ++ "uninitialized");
1398 ++ err = -EFSCORRUPTED;
1399 ++ goto out;
1400 ++ }
1401 + err = ext4_init_block_bitmap(sb, bh, block_group, desc);
1402 + set_bitmap_uptodate(bh);
1403 + set_buffer_uptodate(bh);
1404 +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
1405 +index c8ad14c697c4..f5d9f82b173a 100644
1406 +--- a/fs/ext4/ext4.h
1407 ++++ b/fs/ext4/ext4.h
1408 +@@ -1468,11 +1468,6 @@ static inline struct timespec ext4_current_time(struct inode *inode)
1409 + static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
1410 + {
1411 + return ino == EXT4_ROOT_INO ||
1412 +- ino == EXT4_USR_QUOTA_INO ||
1413 +- ino == EXT4_GRP_QUOTA_INO ||
1414 +- ino == EXT4_BOOT_LOADER_INO ||
1415 +- ino == EXT4_JOURNAL_INO ||
1416 +- ino == EXT4_RESIZE_INO ||
1417 + (ino >= EXT4_FIRST_INO(sb) &&
1418 + ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
1419 + }
1420 +diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
1421 +index 3c9381547094..2d8e73793512 100644
1422 +--- a/fs/ext4/ext4_extents.h
1423 ++++ b/fs/ext4/ext4_extents.h
1424 +@@ -103,6 +103,7 @@ struct ext4_extent_header {
1425 + };
1426 +
1427 + #define EXT4_EXT_MAGIC cpu_to_le16(0xf30a)
1428 ++#define EXT4_MAX_EXTENT_DEPTH 5
1429 +
1430 + #define EXT4_EXTENT_TAIL_OFFSET(hdr) \
1431 + (sizeof(struct ext4_extent_header) + \
1432 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
1433 +index 4705c21f9d03..1708597659a1 100644
1434 +--- a/fs/ext4/extents.c
1435 ++++ b/fs/ext4/extents.c
1436 +@@ -876,6 +876,12 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
1437 +
1438 + eh = ext_inode_hdr(inode);
1439 + depth = ext_depth(inode);
1440 ++ if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) {
1441 ++ EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d",
1442 ++ depth);
1443 ++ ret = -EFSCORRUPTED;
1444 ++ goto err;
1445 ++ }
1446 +
1447 + if (path) {
1448 + ext4_ext_drop_refs(path);
1449 +diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
1450 +index 9fe55b7d4c2c..48d818eba9c3 100644
1451 +--- a/fs/ext4/ialloc.c
1452 ++++ b/fs/ext4/ialloc.c
1453 +@@ -152,7 +152,16 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
1454 + }
1455 +
1456 + ext4_lock_group(sb, block_group);
1457 +- if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
1458 ++ if (ext4_has_group_desc_csum(sb) &&
1459 ++ (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) {
1460 ++ if (block_group == 0) {
1461 ++ ext4_unlock_group(sb, block_group);
1462 ++ unlock_buffer(bh);
1463 ++ ext4_error(sb, "Inode bitmap for bg 0 marked "
1464 ++ "uninitialized");
1465 ++ err = -EFSCORRUPTED;
1466 ++ goto out;
1467 ++ }
1468 + memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
1469 + ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
1470 + sb->s_blocksize * 8, bh->b_data);
1471 +@@ -919,7 +928,8 @@ got:
1472 +
1473 + /* recheck and clear flag under lock if we still need to */
1474 + ext4_lock_group(sb, group);
1475 +- if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
1476 ++ if (ext4_has_group_desc_csum(sb) &&
1477 ++ (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
1478 + gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
1479 + ext4_free_group_clusters_set(sb, gdp,
1480 + ext4_free_clusters_after_init(sb, group, gdp));
1481 +diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
1482 +index 3006b81c107f..e72f53a89764 100644
1483 +--- a/fs/ext4/inline.c
1484 ++++ b/fs/ext4/inline.c
1485 +@@ -434,6 +434,7 @@ static int ext4_destroy_inline_data_nolock(handle_t *handle,
1486 +
1487 + memset((void *)ext4_raw_inode(&is.iloc)->i_block,
1488 + 0, EXT4_MIN_INLINE_DATA_SIZE);
1489 ++ memset(ei->i_data, 0, EXT4_MIN_INLINE_DATA_SIZE);
1490 +
1491 + if (ext4_has_feature_extents(inode->i_sb)) {
1492 + if (S_ISDIR(inode->i_mode) ||
1493 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
1494 +index 30efeb656c1e..b507de0e4bbf 100644
1495 +--- a/fs/ext4/inode.c
1496 ++++ b/fs/ext4/inode.c
1497 +@@ -380,9 +380,9 @@ static int __check_block_validity(struct inode *inode, const char *func,
1498 + if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
1499 + map->m_len)) {
1500 + ext4_error_inode(inode, func, line, map->m_pblk,
1501 +- "lblock %lu mapped to illegal pblock "
1502 ++ "lblock %lu mapped to illegal pblock %llu "
1503 + "(length %d)", (unsigned long) map->m_lblk,
1504 +- map->m_len);
1505 ++ map->m_pblk, map->m_len);
1506 + return -EFSCORRUPTED;
1507 + }
1508 + return 0;
1509 +@@ -3991,7 +3991,8 @@ static int __ext4_get_inode_loc(struct inode *inode,
1510 + int inodes_per_block, inode_offset;
1511 +
1512 + iloc->bh = NULL;
1513 +- if (!ext4_valid_inum(sb, inode->i_ino))
1514 ++ if (inode->i_ino < EXT4_ROOT_INO ||
1515 ++ inode->i_ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
1516 + return -EFSCORRUPTED;
1517 +
1518 + iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
1519 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
1520 +index d98ff184d94a..75f79ff29ce0 100644
1521 +--- a/fs/ext4/mballoc.c
1522 ++++ b/fs/ext4/mballoc.c
1523 +@@ -2445,7 +2445,8 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
1524 + * initialize bb_free to be able to skip
1525 + * empty groups without initialization
1526 + */
1527 +- if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
1528 ++ if (ext4_has_group_desc_csum(sb) &&
1529 ++ (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
1530 + meta_group_info[i]->bb_free =
1531 + ext4_free_clusters_after_init(sb, group, desc);
1532 + } else {
1533 +@@ -2966,7 +2967,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
1534 + #endif
1535 + ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
1536 + ac->ac_b_ex.fe_len);
1537 +- if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
1538 ++ if (ext4_has_group_desc_csum(sb) &&
1539 ++ (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
1540 + gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
1541 + ext4_free_group_clusters_set(sb, gdp,
1542 + ext4_free_clusters_after_init(sb,
1543 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
1544 +index 0e0438b5ddbe..49af3c50b263 100644
1545 +--- a/fs/ext4/super.c
1546 ++++ b/fs/ext4/super.c
1547 +@@ -2102,6 +2102,7 @@ static int ext4_check_descriptors(struct super_block *sb,
1548 + struct ext4_sb_info *sbi = EXT4_SB(sb);
1549 + ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
1550 + ext4_fsblk_t last_block;
1551 ++ ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0) + 1;
1552 + ext4_fsblk_t block_bitmap;
1553 + ext4_fsblk_t inode_bitmap;
1554 + ext4_fsblk_t inode_table;
1555 +@@ -2134,6 +2135,14 @@ static int ext4_check_descriptors(struct super_block *sb,
1556 + if (!(sb->s_flags & MS_RDONLY))
1557 + return 0;
1558 + }
1559 ++ if (block_bitmap >= sb_block + 1 &&
1560 ++ block_bitmap <= last_bg_block) {
1561 ++ ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
1562 ++ "Block bitmap for group %u overlaps "
1563 ++ "block group descriptors", i);
1564 ++ if (!(sb->s_flags & MS_RDONLY))
1565 ++ return 0;
1566 ++ }
1567 + if (block_bitmap < first_block || block_bitmap > last_block) {
1568 + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
1569 + "Block bitmap for group %u not in group "
1570 +@@ -2148,6 +2157,14 @@ static int ext4_check_descriptors(struct super_block *sb,
1571 + if (!(sb->s_flags & MS_RDONLY))
1572 + return 0;
1573 + }
1574 ++ if (inode_bitmap >= sb_block + 1 &&
1575 ++ inode_bitmap <= last_bg_block) {
1576 ++ ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
1577 ++ "Inode bitmap for group %u overlaps "
1578 ++ "block group descriptors", i);
1579 ++ if (!(sb->s_flags & MS_RDONLY))
1580 ++ return 0;
1581 ++ }
1582 + if (inode_bitmap < first_block || inode_bitmap > last_block) {
1583 + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
1584 + "Inode bitmap for group %u not in group "
1585 +@@ -2162,6 +2179,14 @@ static int ext4_check_descriptors(struct super_block *sb,
1586 + if (!(sb->s_flags & MS_RDONLY))
1587 + return 0;
1588 + }
1589 ++ if (inode_table >= sb_block + 1 &&
1590 ++ inode_table <= last_bg_block) {
1591 ++ ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
1592 ++ "Inode table for group %u overlaps "
1593 ++ "block group descriptors", i);
1594 ++ if (!(sb->s_flags & MS_RDONLY))
1595 ++ return 0;
1596 ++ }
1597 + if (inode_table < first_block ||
1598 + inode_table + sbi->s_itb_per_group - 1 > last_block) {
1599 + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
1600 +@@ -2842,13 +2867,22 @@ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
1601 + ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
1602 + struct ext4_group_desc *gdp = NULL;
1603 +
1604 ++ if (!ext4_has_group_desc_csum(sb))
1605 ++ return ngroups;
1606 ++
1607 + for (group = 0; group < ngroups; group++) {
1608 + gdp = ext4_get_group_desc(sb, group, NULL);
1609 + if (!gdp)
1610 + continue;
1611 +
1612 +- if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
1613 ++ if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
1614 ++ continue;
1615 ++ if (group != 0)
1616 + break;
1617 ++ ext4_error(sb, "Inode table for bg 0 marked as "
1618 ++ "needing zeroing");
1619 ++ if (sb->s_flags & MS_RDONLY)
1620 ++ return ngroups;
1621 + }
1622 +
1623 + return group;
1624 +@@ -3451,6 +3485,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
1625 + le32_to_cpu(es->s_log_block_size));
1626 + goto failed_mount;
1627 + }
1628 ++ if (le32_to_cpu(es->s_log_cluster_size) >
1629 ++ (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
1630 ++ ext4_msg(sb, KERN_ERR,
1631 ++ "Invalid log cluster size: %u",
1632 ++ le32_to_cpu(es->s_log_cluster_size));
1633 ++ goto failed_mount;
1634 ++ }
1635 +
1636 + if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
1637 + ext4_msg(sb, KERN_ERR,
1638 +@@ -3515,6 +3556,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
1639 + } else {
1640 + sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
1641 + sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
1642 ++ if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) {
1643 ++ ext4_msg(sb, KERN_ERR, "invalid first ino: %u",
1644 ++ sbi->s_first_ino);
1645 ++ goto failed_mount;
1646 ++ }
1647 + if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
1648 + (!is_power_of_2(sbi->s_inode_size)) ||
1649 + (sbi->s_inode_size > blocksize)) {
1650 +@@ -3591,13 +3637,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
1651 + "block size (%d)", clustersize, blocksize);
1652 + goto failed_mount;
1653 + }
1654 +- if (le32_to_cpu(es->s_log_cluster_size) >
1655 +- (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
1656 +- ext4_msg(sb, KERN_ERR,
1657 +- "Invalid log cluster size: %u",
1658 +- le32_to_cpu(es->s_log_cluster_size));
1659 +- goto failed_mount;
1660 +- }
1661 + sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
1662 + le32_to_cpu(es->s_log_block_size);
1663 + sbi->s_clusters_per_group =
1664 +@@ -3618,10 +3657,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
1665 + }
1666 + } else {
1667 + if (clustersize != blocksize) {
1668 +- ext4_warning(sb, "fragment/cluster size (%d) != "
1669 +- "block size (%d)", clustersize,
1670 +- blocksize);
1671 +- clustersize = blocksize;
1672 ++ ext4_msg(sb, KERN_ERR,
1673 ++ "fragment/cluster size (%d) != "
1674 ++ "block size (%d)", clustersize, blocksize);
1675 ++ goto failed_mount;
1676 + }
1677 + if (sbi->s_blocks_per_group > blocksize * 8) {
1678 + ext4_msg(sb, KERN_ERR,
1679 +@@ -3675,6 +3714,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
1680 + ext4_blocks_count(es));
1681 + goto failed_mount;
1682 + }
1683 ++ if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) &&
1684 ++ (sbi->s_cluster_ratio == 1)) {
1685 ++ ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
1686 ++ "block is 0 with a 1k block and cluster size");
1687 ++ goto failed_mount;
1688 ++ }
1689 ++
1690 + blocks_count = (ext4_blocks_count(es) -
1691 + le32_to_cpu(es->s_first_data_block) +
1692 + EXT4_BLOCKS_PER_GROUP(sb) - 1);
1693 +@@ -3710,6 +3756,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
1694 + ret = -ENOMEM;
1695 + goto failed_mount;
1696 + }
1697 ++ if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
1698 ++ le32_to_cpu(es->s_inodes_count)) {
1699 ++ ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
1700 ++ le32_to_cpu(es->s_inodes_count),
1701 ++ ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
1702 ++ ret = -EINVAL;
1703 ++ goto failed_mount;
1704 ++ }
1705 +
1706 + bgl_lock_init(sbi->s_blockgroup_lock);
1707 +
1708 +@@ -4388,6 +4442,14 @@ static int ext4_commit_super(struct super_block *sb, int sync)
1709 +
1710 + if (!sbh || block_device_ejected(sb))
1711 + return error;
1712 ++
1713 ++ /*
1714 ++ * The superblock bh should be mapped, but it might not be if the
1715 ++ * device was hot-removed. Not much we can do but fail the I/O.
1716 ++ */
1717 ++ if (!buffer_mapped(sbh))
1718 ++ return error;
1719 ++
1720 + if (buffer_write_io_error(sbh)) {
1721 + /*
1722 + * Oh, dear. A previous attempt to write the
1723 +diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
1724 +index f3a31f55f372..bce343febb9e 100644
1725 +--- a/fs/jbd2/transaction.c
1726 ++++ b/fs/jbd2/transaction.c
1727 +@@ -1363,6 +1363,13 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
1728 + if (jh->b_transaction == transaction &&
1729 + jh->b_jlist != BJ_Metadata) {
1730 + jbd_lock_bh_state(bh);
1731 ++ if (jh->b_transaction == transaction &&
1732 ++ jh->b_jlist != BJ_Metadata)
1733 ++ pr_err("JBD2: assertion failure: h_type=%u "
1734 ++ "h_line_no=%u block_no=%llu jlist=%u\n",
1735 ++ handle->h_type, handle->h_line_no,
1736 ++ (unsigned long long) bh->b_blocknr,
1737 ++ jh->b_jlist);
1738 + J_ASSERT_JH(jh, jh->b_transaction != transaction ||
1739 + jh->b_jlist == BJ_Metadata);
1740 + jbd_unlock_bh_state(bh);
1741 +@@ -1382,11 +1389,11 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
1742 + * of the transaction. This needs to be done
1743 + * once a transaction -bzzz
1744 + */
1745 +- jh->b_modified = 1;
1746 + if (handle->h_buffer_credits <= 0) {
1747 + ret = -ENOSPC;
1748 + goto out_unlock_bh;
1749 + }
1750 ++ jh->b_modified = 1;
1751 + handle->h_buffer_credits--;
1752 + }
1753 +
1754 +diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
1755 +index 7fd6f5a26143..e212ec4cfb4e 100644
1756 +--- a/kernel/trace/trace_functions_graph.c
1757 ++++ b/kernel/trace/trace_functions_graph.c
1758 +@@ -768,6 +768,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
1759 + struct ftrace_graph_ret *graph_ret;
1760 + struct ftrace_graph_ent *call;
1761 + unsigned long long duration;
1762 ++ int cpu = iter->cpu;
1763 + int i;
1764 +
1765 + graph_ret = &ret_entry->ret;
1766 +@@ -776,7 +777,6 @@ print_graph_entry_leaf(struct trace_iterator *iter,
1767 +
1768 + if (data) {
1769 + struct fgraph_cpu_data *cpu_data;
1770 +- int cpu = iter->cpu;
1771 +
1772 + cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1773 +
1774 +@@ -806,6 +806,9 @@ print_graph_entry_leaf(struct trace_iterator *iter,
1775 +
1776 + trace_seq_printf(s, "%ps();\n", (void *)call->func);
1777 +
1778 ++ print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
1779 ++ cpu, iter->ent->pid, flags);
1780 ++
1781 + return trace_handle_return(s);
1782 + }
1783 +
1784 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
1785 +index 7294301d8495..a813b03021b7 100644
1786 +--- a/mm/hugetlb.c
1787 ++++ b/mm/hugetlb.c
1788 +@@ -2038,6 +2038,7 @@ static void __init gather_bootmem_prealloc(void)
1789 + */
1790 + if (hstate_is_gigantic(h))
1791 + adjust_managed_page_count(page, 1 << h->order);
1792 ++ cond_resched();
1793 + }
1794 + }
1795 +
1796 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
1797 +index a4c9cd80c7b6..fd75e27c9b40 100644
1798 +--- a/mm/page_alloc.c
1799 ++++ b/mm/page_alloc.c
1800 +@@ -3109,8 +3109,6 @@ retry:
1801 + * the allocation is high priority and these type of
1802 + * allocations are system rather than user orientated
1803 + */
1804 +- ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
1805 +-
1806 + page = __alloc_pages_high_priority(gfp_mask, order, ac);
1807 +
1808 + if (page) {
1809 +diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
1810 +index 9d144cbd4e62..03ebff3950d8 100644
1811 +--- a/net/ipv4/fib_semantics.c
1812 ++++ b/net/ipv4/fib_semantics.c
1813 +@@ -980,7 +980,7 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
1814 + return -EINVAL;
1815 + } else {
1816 + if (nla_len(nla) != sizeof(u32))
1817 +- return false;
1818 ++ return -EINVAL;
1819 + val = nla_get_u32(nla);
1820 + }
1821 + if (type == RTAX_ADVMSS && val > 65535 - 40)
1822 +diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
1823 +index 2c89f90cd7bc..f94a2e1172f0 100644
1824 +--- a/net/netfilter/nf_log.c
1825 ++++ b/net/netfilter/nf_log.c
1826 +@@ -422,14 +422,17 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
1827 + rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
1828 + mutex_unlock(&nf_log_mutex);
1829 + } else {
1830 ++ struct ctl_table tmp = *table;
1831 ++
1832 ++ tmp.data = buf;
1833 + mutex_lock(&nf_log_mutex);
1834 + logger = nft_log_dereference(net->nf.nf_loggers[tindex]);
1835 + if (!logger)
1836 +- table->data = "NONE";
1837 ++ strlcpy(buf, "NONE", sizeof(buf));
1838 + else
1839 +- table->data = logger->name;
1840 +- r = proc_dostring(table, write, buffer, lenp, ppos);
1841 ++ strlcpy(buf, logger->name, sizeof(buf));
1842 + mutex_unlock(&nf_log_mutex);
1843 ++ r = proc_dostring(&tmp, write, buffer, lenp, ppos);
1844 + }
1845 +
1846 + return r;
1847 +diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
1848 +index f3695a497408..99bc2f87a974 100644
1849 +--- a/net/netfilter/nf_tables_core.c
1850 ++++ b/net/netfilter/nf_tables_core.c
1851 +@@ -167,7 +167,8 @@ next_rule:
1852 +
1853 + switch (regs.verdict.code) {
1854 + case NFT_JUMP:
1855 +- BUG_ON(stackptr >= NFT_JUMP_STACK_SIZE);
1856 ++ if (WARN_ON_ONCE(stackptr >= NFT_JUMP_STACK_SIZE))
1857 ++ return NF_DROP;
1858 + jumpstack[stackptr].chain = chain;
1859 + jumpstack[stackptr].rule = rule;
1860 + jumpstack[stackptr].rulenum = rulenum;