Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Wed, 23 Jan 2019 11:30:36
Message-Id: 1548243009.45b8c6fc82cd3910d7ebdc1532a9001f27b5ed55.mpagano@gentoo
1 commit: 45b8c6fc82cd3910d7ebdc1532a9001f27b5ed55
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Jan 23 11:30:09 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Jan 23 11:30:09 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=45b8c6fc
7
8 proj/linux-patches: Linux patch 4.14.95
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1094_linux-4.14.95.patch | 2077 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2081 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 41aba45..e1a1f75 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -419,6 +419,10 @@ Patch: 1093_4.14.94.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.14.94
23
24 +Patch: 1094_4.14.95.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.14.95
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1094_linux-4.14.95.patch b/1094_linux-4.14.95.patch
33 new file mode 100644
34 index 0000000..efaf68a
35 --- /dev/null
36 +++ b/1094_linux-4.14.95.patch
37 @@ -0,0 +1,2077 @@
38 +diff --git a/Makefile b/Makefile
39 +index e9a138dd964a..70cc37cb3e99 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 14
46 +-SUBLEVEL = 94
47 ++SUBLEVEL = 95
48 + EXTRAVERSION =
49 + NAME = Petit Gorille
50 +
51 +diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
52 +index 73cc4309fe01..1d6d980f80ac 100644
53 +--- a/arch/arm64/include/asm/kvm_arm.h
54 ++++ b/arch/arm64/include/asm/kvm_arm.h
55 +@@ -23,6 +23,8 @@
56 + #include <asm/types.h>
57 +
58 + /* Hyp Configuration Register (HCR) bits */
59 ++#define HCR_API (UL(1) << 41)
60 ++#define HCR_APK (UL(1) << 40)
61 + #define HCR_E2H (UL(1) << 34)
62 + #define HCR_ID (UL(1) << 33)
63 + #define HCR_CD (UL(1) << 32)
64 +@@ -82,6 +84,7 @@
65 + HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW)
66 + #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
67 + #define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO)
68 ++#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
69 + #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
70 +
71 + /* TCR_EL2 Registers bits */
72 +diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
73 +index 261f3f88364c..ec393275ba04 100644
74 +--- a/arch/arm64/kernel/head.S
75 ++++ b/arch/arm64/kernel/head.S
76 +@@ -414,10 +414,9 @@ CPU_LE( bic x0, x0, #(1 << 25) ) // Clear the EE bit for EL2
77 + #endif
78 +
79 + /* Hyp configuration. */
80 +- mov x0, #HCR_RW // 64-bit EL1
81 ++ mov_q x0, HCR_HOST_NVHE_FLAGS
82 + cbz x2, set_hcr
83 +- orr x0, x0, #HCR_TGE // Enable Host Extensions
84 +- orr x0, x0, #HCR_E2H
85 ++ mov_q x0, HCR_HOST_VHE_FLAGS
86 + set_hcr:
87 + msr hcr_el2, x0
88 + isb
89 +diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
90 +index 47080c49cc7e..2bda224e8e71 100644
91 +--- a/arch/arm64/kernel/kaslr.c
92 ++++ b/arch/arm64/kernel/kaslr.c
93 +@@ -14,6 +14,7 @@
94 + #include <linux/sched.h>
95 + #include <linux/types.h>
96 +
97 ++#include <asm/cacheflush.h>
98 + #include <asm/fixmap.h>
99 + #include <asm/kernel-pgtable.h>
100 + #include <asm/memory.h>
101 +@@ -43,7 +44,7 @@ static __init u64 get_kaslr_seed(void *fdt)
102 + return ret;
103 + }
104 +
105 +-static __init const u8 *get_cmdline(void *fdt)
106 ++static __init const u8 *kaslr_get_cmdline(void *fdt)
107 + {
108 + static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE;
109 +
110 +@@ -109,7 +110,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
111 + * Check if 'nokaslr' appears on the command line, and
112 + * return 0 if that is the case.
113 + */
114 +- cmdline = get_cmdline(fdt);
115 ++ cmdline = kaslr_get_cmdline(fdt);
116 + str = strstr(cmdline, "nokaslr");
117 + if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
118 + return 0;
119 +@@ -180,5 +181,8 @@ u64 __init kaslr_early_init(u64 dt_phys)
120 + module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
121 + module_alloc_base &= PAGE_MASK;
122 +
123 ++ __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
124 ++ __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed));
125 ++
126 + return offset;
127 + }
128 +diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
129 +index b2f1992c6234..44845996b554 100644
130 +--- a/arch/arm64/kvm/hyp/switch.c
131 ++++ b/arch/arm64/kvm/hyp/switch.c
132 +@@ -127,7 +127,7 @@ static void __hyp_text __deactivate_traps_nvhe(void)
133 + mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
134 +
135 + write_sysreg(mdcr_el2, mdcr_el2);
136 +- write_sysreg(HCR_RW, hcr_el2);
137 ++ write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
138 + write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
139 + }
140 +
141 +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
142 +index 23e3d3e0ee5b..ae4450e891ab 100644
143 +--- a/arch/mips/Kconfig
144 ++++ b/arch/mips/Kconfig
145 +@@ -3153,6 +3153,7 @@ config MIPS32_O32
146 + config MIPS32_N32
147 + bool "Kernel support for n32 binaries"
148 + depends on 64BIT
149 ++ select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
150 + select COMPAT
151 + select MIPS32_COMPAT
152 + select SYSVIPC_COMPAT if SYSVIPC
153 +diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
154 +index f0bc3312ed11..c4ef1c31e0c4 100644
155 +--- a/arch/mips/lantiq/irq.c
156 ++++ b/arch/mips/lantiq/irq.c
157 +@@ -224,9 +224,11 @@ static struct irq_chip ltq_eiu_type = {
158 + .irq_set_type = ltq_eiu_settype,
159 + };
160 +
161 +-static void ltq_hw_irqdispatch(int module)
162 ++static void ltq_hw_irq_handler(struct irq_desc *desc)
163 + {
164 ++ int module = irq_desc_get_irq(desc) - 2;
165 + u32 irq;
166 ++ int hwirq;
167 +
168 + irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR);
169 + if (irq == 0)
170 +@@ -237,7 +239,8 @@ static void ltq_hw_irqdispatch(int module)
171 + * other bits might be bogus
172 + */
173 + irq = __fls(irq);
174 +- do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module));
175 ++ hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module);
176 ++ generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
177 +
178 + /* if this is a EBU irq, we need to ack it or get a deadlock */
179 + if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
180 +@@ -245,49 +248,6 @@ static void ltq_hw_irqdispatch(int module)
181 + LTQ_EBU_PCC_ISTAT);
182 + }
183 +
184 +-#define DEFINE_HWx_IRQDISPATCH(x) \
185 +- static void ltq_hw ## x ## _irqdispatch(void) \
186 +- { \
187 +- ltq_hw_irqdispatch(x); \
188 +- }
189 +-DEFINE_HWx_IRQDISPATCH(0)
190 +-DEFINE_HWx_IRQDISPATCH(1)
191 +-DEFINE_HWx_IRQDISPATCH(2)
192 +-DEFINE_HWx_IRQDISPATCH(3)
193 +-DEFINE_HWx_IRQDISPATCH(4)
194 +-
195 +-#if MIPS_CPU_TIMER_IRQ == 7
196 +-static void ltq_hw5_irqdispatch(void)
197 +-{
198 +- do_IRQ(MIPS_CPU_TIMER_IRQ);
199 +-}
200 +-#else
201 +-DEFINE_HWx_IRQDISPATCH(5)
202 +-#endif
203 +-
204 +-static void ltq_hw_irq_handler(struct irq_desc *desc)
205 +-{
206 +- ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2);
207 +-}
208 +-
209 +-asmlinkage void plat_irq_dispatch(void)
210 +-{
211 +- unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
212 +- int irq;
213 +-
214 +- if (!pending) {
215 +- spurious_interrupt();
216 +- return;
217 +- }
218 +-
219 +- pending >>= CAUSEB_IP;
220 +- while (pending) {
221 +- irq = fls(pending) - 1;
222 +- do_IRQ(MIPS_CPU_IRQ_BASE + irq);
223 +- pending &= ~BIT(irq);
224 +- }
225 +-}
226 +-
227 + static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
228 + {
229 + struct irq_chip *chip = &ltq_irq_type;
230 +@@ -343,28 +303,10 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
231 + for (i = 0; i < MAX_IM; i++)
232 + irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
233 +
234 +- if (cpu_has_vint) {
235 +- pr_info("Setting up vectored interrupts\n");
236 +- set_vi_handler(2, ltq_hw0_irqdispatch);
237 +- set_vi_handler(3, ltq_hw1_irqdispatch);
238 +- set_vi_handler(4, ltq_hw2_irqdispatch);
239 +- set_vi_handler(5, ltq_hw3_irqdispatch);
240 +- set_vi_handler(6, ltq_hw4_irqdispatch);
241 +- set_vi_handler(7, ltq_hw5_irqdispatch);
242 +- }
243 +-
244 + ltq_domain = irq_domain_add_linear(node,
245 + (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
246 + &irq_domain_ops, 0);
247 +
248 +-#ifndef CONFIG_MIPS_MT_SMP
249 +- set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
250 +- IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
251 +-#else
252 +- set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 |
253 +- IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
254 +-#endif
255 +-
256 + /* tell oprofile which irq to use */
257 + ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
258 +
259 +diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
260 +index 2a5bb849b10e..288b58b00dc8 100644
261 +--- a/arch/mips/pci/msi-octeon.c
262 ++++ b/arch/mips/pci/msi-octeon.c
263 +@@ -369,7 +369,9 @@ int __init octeon_msi_initialize(void)
264 + int irq;
265 + struct irq_chip *msi;
266 +
267 +- if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
268 ++ if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) {
269 ++ return 0;
270 ++ } else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
271 + msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0;
272 + msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1;
273 + msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2;
274 +diff --git a/crypto/authenc.c b/crypto/authenc.c
275 +index 0db344d5a01a..053287dfad65 100644
276 +--- a/crypto/authenc.c
277 ++++ b/crypto/authenc.c
278 +@@ -58,14 +58,22 @@ int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
279 + return -EINVAL;
280 + if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
281 + return -EINVAL;
282 +- if (RTA_PAYLOAD(rta) < sizeof(*param))
283 ++
284 ++ /*
285 ++ * RTA_OK() didn't align the rtattr's payload when validating that it
286 ++ * fits in the buffer. Yet, the keys should start on the next 4-byte
287 ++ * aligned boundary. To avoid confusion, require that the rtattr
288 ++ * payload be exactly the param struct, which has a 4-byte aligned size.
289 ++ */
290 ++ if (RTA_PAYLOAD(rta) != sizeof(*param))
291 + return -EINVAL;
292 ++ BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO);
293 +
294 + param = RTA_DATA(rta);
295 + keys->enckeylen = be32_to_cpu(param->enckeylen);
296 +
297 +- key += RTA_ALIGN(rta->rta_len);
298 +- keylen -= RTA_ALIGN(rta->rta_len);
299 ++ key += rta->rta_len;
300 ++ keylen -= rta->rta_len;
301 +
302 + if (keylen < keys->enckeylen)
303 + return -EINVAL;
304 +diff --git a/crypto/authencesn.c b/crypto/authencesn.c
305 +index 6de852ce4cf8..4ba4470deee1 100644
306 +--- a/crypto/authencesn.c
307 ++++ b/crypto/authencesn.c
308 +@@ -279,7 +279,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
309 + struct aead_request *req = areq->data;
310 +
311 + err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
312 +- aead_request_complete(req, err);
313 ++ authenc_esn_request_complete(req, err);
314 + }
315 +
316 + static int crypto_authenc_esn_decrypt(struct aead_request *req)
317 +diff --git a/drivers/block/loop.c b/drivers/block/loop.c
318 +index 6d61633a7f89..7910dd8b1d3a 100644
319 +--- a/drivers/block/loop.c
320 ++++ b/drivers/block/loop.c
321 +@@ -81,7 +81,7 @@
322 + #include <linux/uaccess.h>
323 +
324 + static DEFINE_IDR(loop_index_idr);
325 +-static DEFINE_MUTEX(loop_index_mutex);
326 ++static DEFINE_MUTEX(loop_ctl_mutex);
327 +
328 + static int max_part;
329 + static int part_shift;
330 +@@ -1018,7 +1018,7 @@ static int loop_clr_fd(struct loop_device *lo)
331 + */
332 + if (atomic_read(&lo->lo_refcnt) > 1) {
333 + lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
334 +- mutex_unlock(&lo->lo_ctl_mutex);
335 ++ mutex_unlock(&loop_ctl_mutex);
336 + return 0;
337 + }
338 +
339 +@@ -1070,12 +1070,12 @@ static int loop_clr_fd(struct loop_device *lo)
340 + if (!part_shift)
341 + lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
342 + loop_unprepare_queue(lo);
343 +- mutex_unlock(&lo->lo_ctl_mutex);
344 ++ mutex_unlock(&loop_ctl_mutex);
345 + /*
346 +- * Need not hold lo_ctl_mutex to fput backing file.
347 +- * Calling fput holding lo_ctl_mutex triggers a circular
348 ++ * Need not hold loop_ctl_mutex to fput backing file.
349 ++ * Calling fput holding loop_ctl_mutex triggers a circular
350 + * lock dependency possibility warning as fput can take
351 +- * bd_mutex which is usually taken before lo_ctl_mutex.
352 ++ * bd_mutex which is usually taken before loop_ctl_mutex.
353 + */
354 + fput(filp);
355 + return 0;
356 +@@ -1097,6 +1097,12 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
357 + if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
358 + return -EINVAL;
359 +
360 ++ if (lo->lo_offset != info->lo_offset ||
361 ++ lo->lo_sizelimit != info->lo_sizelimit) {
362 ++ sync_blockdev(lo->lo_device);
363 ++ kill_bdev(lo->lo_device);
364 ++ }
365 ++
366 + /* I/O need to be drained during transfer transition */
367 + blk_mq_freeze_queue(lo->lo_queue);
368 +
369 +@@ -1125,6 +1131,14 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
370 +
371 + if (lo->lo_offset != info->lo_offset ||
372 + lo->lo_sizelimit != info->lo_sizelimit) {
373 ++ /* kill_bdev should have truncated all the pages */
374 ++ if (lo->lo_device->bd_inode->i_mapping->nrpages) {
375 ++ err = -EAGAIN;
376 ++ pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
377 ++ __func__, lo->lo_number, lo->lo_file_name,
378 ++ lo->lo_device->bd_inode->i_mapping->nrpages);
379 ++ goto exit;
380 ++ }
381 + if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
382 + err = -EFBIG;
383 + goto exit;
384 +@@ -1175,12 +1189,12 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
385 + static int
386 + loop_get_status(struct loop_device *lo, struct loop_info64 *info)
387 + {
388 +- struct file *file;
389 ++ struct path path;
390 + struct kstat stat;
391 + int ret;
392 +
393 + if (lo->lo_state != Lo_bound) {
394 +- mutex_unlock(&lo->lo_ctl_mutex);
395 ++ mutex_unlock(&loop_ctl_mutex);
396 + return -ENXIO;
397 + }
398 +
399 +@@ -1199,17 +1213,17 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
400 + lo->lo_encrypt_key_size);
401 + }
402 +
403 +- /* Drop lo_ctl_mutex while we call into the filesystem. */
404 +- file = get_file(lo->lo_backing_file);
405 +- mutex_unlock(&lo->lo_ctl_mutex);
406 +- ret = vfs_getattr(&file->f_path, &stat, STATX_INO,
407 +- AT_STATX_SYNC_AS_STAT);
408 ++ /* Drop loop_ctl_mutex while we call into the filesystem. */
409 ++ path = lo->lo_backing_file->f_path;
410 ++ path_get(&path);
411 ++ mutex_unlock(&loop_ctl_mutex);
412 ++ ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
413 + if (!ret) {
414 + info->lo_device = huge_encode_dev(stat.dev);
415 + info->lo_inode = stat.ino;
416 + info->lo_rdevice = huge_encode_dev(stat.rdev);
417 + }
418 +- fput(file);
419 ++ path_put(&path);
420 + return ret;
421 + }
422 +
423 +@@ -1294,7 +1308,7 @@ loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
424 + int err;
425 +
426 + if (!arg) {
427 +- mutex_unlock(&lo->lo_ctl_mutex);
428 ++ mutex_unlock(&loop_ctl_mutex);
429 + return -EINVAL;
430 + }
431 + err = loop_get_status(lo, &info64);
432 +@@ -1312,7 +1326,7 @@ loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
433 + int err;
434 +
435 + if (!arg) {
436 +- mutex_unlock(&lo->lo_ctl_mutex);
437 ++ mutex_unlock(&loop_ctl_mutex);
438 + return -EINVAL;
439 + }
440 + err = loop_get_status(lo, &info64);
441 +@@ -1346,22 +1360,39 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg)
442 +
443 + static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
444 + {
445 ++ int err = 0;
446 ++
447 + if (lo->lo_state != Lo_bound)
448 + return -ENXIO;
449 +
450 + if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg))
451 + return -EINVAL;
452 +
453 ++ if (lo->lo_queue->limits.logical_block_size != arg) {
454 ++ sync_blockdev(lo->lo_device);
455 ++ kill_bdev(lo->lo_device);
456 ++ }
457 ++
458 + blk_mq_freeze_queue(lo->lo_queue);
459 +
460 ++ /* kill_bdev should have truncated all the pages */
461 ++ if (lo->lo_queue->limits.logical_block_size != arg &&
462 ++ lo->lo_device->bd_inode->i_mapping->nrpages) {
463 ++ err = -EAGAIN;
464 ++ pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
465 ++ __func__, lo->lo_number, lo->lo_file_name,
466 ++ lo->lo_device->bd_inode->i_mapping->nrpages);
467 ++ goto out_unfreeze;
468 ++ }
469 ++
470 + blk_queue_logical_block_size(lo->lo_queue, arg);
471 + blk_queue_physical_block_size(lo->lo_queue, arg);
472 + blk_queue_io_min(lo->lo_queue, arg);
473 + loop_update_dio(lo);
474 +-
475 ++out_unfreeze:
476 + blk_mq_unfreeze_queue(lo->lo_queue);
477 +
478 +- return 0;
479 ++ return err;
480 + }
481 +
482 + static int lo_ioctl(struct block_device *bdev, fmode_t mode,
483 +@@ -1370,7 +1401,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
484 + struct loop_device *lo = bdev->bd_disk->private_data;
485 + int err;
486 +
487 +- mutex_lock_nested(&lo->lo_ctl_mutex, 1);
488 ++ mutex_lock_nested(&loop_ctl_mutex, 1);
489 + switch (cmd) {
490 + case LOOP_SET_FD:
491 + err = loop_set_fd(lo, mode, bdev, arg);
492 +@@ -1379,7 +1410,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
493 + err = loop_change_fd(lo, bdev, arg);
494 + break;
495 + case LOOP_CLR_FD:
496 +- /* loop_clr_fd would have unlocked lo_ctl_mutex on success */
497 ++ /* loop_clr_fd would have unlocked loop_ctl_mutex on success */
498 + err = loop_clr_fd(lo);
499 + if (!err)
500 + goto out_unlocked;
501 +@@ -1392,7 +1423,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
502 + break;
503 + case LOOP_GET_STATUS:
504 + err = loop_get_status_old(lo, (struct loop_info __user *) arg);
505 +- /* loop_get_status() unlocks lo_ctl_mutex */
506 ++ /* loop_get_status() unlocks loop_ctl_mutex */
507 + goto out_unlocked;
508 + case LOOP_SET_STATUS64:
509 + err = -EPERM;
510 +@@ -1402,7 +1433,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
511 + break;
512 + case LOOP_GET_STATUS64:
513 + err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
514 +- /* loop_get_status() unlocks lo_ctl_mutex */
515 ++ /* loop_get_status() unlocks loop_ctl_mutex */
516 + goto out_unlocked;
517 + case LOOP_SET_CAPACITY:
518 + err = -EPERM;
519 +@@ -1422,7 +1453,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
520 + default:
521 + err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
522 + }
523 +- mutex_unlock(&lo->lo_ctl_mutex);
524 ++ mutex_unlock(&loop_ctl_mutex);
525 +
526 + out_unlocked:
527 + return err;
528 +@@ -1539,7 +1570,7 @@ loop_get_status_compat(struct loop_device *lo,
529 + int err;
530 +
531 + if (!arg) {
532 +- mutex_unlock(&lo->lo_ctl_mutex);
533 ++ mutex_unlock(&loop_ctl_mutex);
534 + return -EINVAL;
535 + }
536 + err = loop_get_status(lo, &info64);
537 +@@ -1556,16 +1587,16 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
538 +
539 + switch(cmd) {
540 + case LOOP_SET_STATUS:
541 +- mutex_lock(&lo->lo_ctl_mutex);
542 ++ mutex_lock(&loop_ctl_mutex);
543 + err = loop_set_status_compat(
544 + lo, (const struct compat_loop_info __user *) arg);
545 +- mutex_unlock(&lo->lo_ctl_mutex);
546 ++ mutex_unlock(&loop_ctl_mutex);
547 + break;
548 + case LOOP_GET_STATUS:
549 +- mutex_lock(&lo->lo_ctl_mutex);
550 ++ mutex_lock(&loop_ctl_mutex);
551 + err = loop_get_status_compat(
552 + lo, (struct compat_loop_info __user *) arg);
553 +- /* loop_get_status() unlocks lo_ctl_mutex */
554 ++ /* loop_get_status() unlocks loop_ctl_mutex */
555 + break;
556 + case LOOP_SET_CAPACITY:
557 + case LOOP_CLR_FD:
558 +@@ -1587,9 +1618,11 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
559 + static int lo_open(struct block_device *bdev, fmode_t mode)
560 + {
561 + struct loop_device *lo;
562 +- int err = 0;
563 ++ int err;
564 +
565 +- mutex_lock(&loop_index_mutex);
566 ++ err = mutex_lock_killable(&loop_ctl_mutex);
567 ++ if (err)
568 ++ return err;
569 + lo = bdev->bd_disk->private_data;
570 + if (!lo) {
571 + err = -ENXIO;
572 +@@ -1598,18 +1631,20 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
573 +
574 + atomic_inc(&lo->lo_refcnt);
575 + out:
576 +- mutex_unlock(&loop_index_mutex);
577 ++ mutex_unlock(&loop_ctl_mutex);
578 + return err;
579 + }
580 +
581 +-static void __lo_release(struct loop_device *lo)
582 ++static void lo_release(struct gendisk *disk, fmode_t mode)
583 + {
584 ++ struct loop_device *lo;
585 + int err;
586 +
587 ++ mutex_lock(&loop_ctl_mutex);
588 ++ lo = disk->private_data;
589 + if (atomic_dec_return(&lo->lo_refcnt))
590 +- return;
591 ++ goto out_unlock;
592 +
593 +- mutex_lock(&lo->lo_ctl_mutex);
594 + if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
595 + /*
596 + * In autoclear mode, stop the loop thread
597 +@@ -1627,14 +1662,8 @@ static void __lo_release(struct loop_device *lo)
598 + blk_mq_unfreeze_queue(lo->lo_queue);
599 + }
600 +
601 +- mutex_unlock(&lo->lo_ctl_mutex);
602 +-}
603 +-
604 +-static void lo_release(struct gendisk *disk, fmode_t mode)
605 +-{
606 +- mutex_lock(&loop_index_mutex);
607 +- __lo_release(disk->private_data);
608 +- mutex_unlock(&loop_index_mutex);
609 ++out_unlock:
610 ++ mutex_unlock(&loop_ctl_mutex);
611 + }
612 +
613 + static const struct block_device_operations lo_fops = {
614 +@@ -1673,10 +1702,10 @@ static int unregister_transfer_cb(int id, void *ptr, void *data)
615 + struct loop_device *lo = ptr;
616 + struct loop_func_table *xfer = data;
617 +
618 +- mutex_lock(&lo->lo_ctl_mutex);
619 ++ mutex_lock(&loop_ctl_mutex);
620 + if (lo->lo_encryption == xfer)
621 + loop_release_xfer(lo);
622 +- mutex_unlock(&lo->lo_ctl_mutex);
623 ++ mutex_unlock(&loop_ctl_mutex);
624 + return 0;
625 + }
626 +
627 +@@ -1849,7 +1878,6 @@ static int loop_add(struct loop_device **l, int i)
628 + if (!part_shift)
629 + disk->flags |= GENHD_FL_NO_PART_SCAN;
630 + disk->flags |= GENHD_FL_EXT_DEVT;
631 +- mutex_init(&lo->lo_ctl_mutex);
632 + atomic_set(&lo->lo_refcnt, 0);
633 + lo->lo_number = i;
634 + spin_lock_init(&lo->lo_lock);
635 +@@ -1928,7 +1956,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
636 + struct kobject *kobj;
637 + int err;
638 +
639 +- mutex_lock(&loop_index_mutex);
640 ++ mutex_lock(&loop_ctl_mutex);
641 + err = loop_lookup(&lo, MINOR(dev) >> part_shift);
642 + if (err < 0)
643 + err = loop_add(&lo, MINOR(dev) >> part_shift);
644 +@@ -1936,7 +1964,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
645 + kobj = NULL;
646 + else
647 + kobj = get_disk(lo->lo_disk);
648 +- mutex_unlock(&loop_index_mutex);
649 ++ mutex_unlock(&loop_ctl_mutex);
650 +
651 + *part = 0;
652 + return kobj;
653 +@@ -1946,9 +1974,13 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
654 + unsigned long parm)
655 + {
656 + struct loop_device *lo;
657 +- int ret = -ENOSYS;
658 ++ int ret;
659 ++
660 ++ ret = mutex_lock_killable(&loop_ctl_mutex);
661 ++ if (ret)
662 ++ return ret;
663 +
664 +- mutex_lock(&loop_index_mutex);
665 ++ ret = -ENOSYS;
666 + switch (cmd) {
667 + case LOOP_CTL_ADD:
668 + ret = loop_lookup(&lo, parm);
669 +@@ -1962,19 +1994,15 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
670 + ret = loop_lookup(&lo, parm);
671 + if (ret < 0)
672 + break;
673 +- mutex_lock(&lo->lo_ctl_mutex);
674 + if (lo->lo_state != Lo_unbound) {
675 + ret = -EBUSY;
676 +- mutex_unlock(&lo->lo_ctl_mutex);
677 + break;
678 + }
679 + if (atomic_read(&lo->lo_refcnt) > 0) {
680 + ret = -EBUSY;
681 +- mutex_unlock(&lo->lo_ctl_mutex);
682 + break;
683 + }
684 + lo->lo_disk->private_data = NULL;
685 +- mutex_unlock(&lo->lo_ctl_mutex);
686 + idr_remove(&loop_index_idr, lo->lo_number);
687 + loop_remove(lo);
688 + break;
689 +@@ -1984,7 +2012,7 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
690 + break;
691 + ret = loop_add(&lo, -1);
692 + }
693 +- mutex_unlock(&loop_index_mutex);
694 ++ mutex_unlock(&loop_ctl_mutex);
695 +
696 + return ret;
697 + }
698 +@@ -2068,10 +2096,10 @@ static int __init loop_init(void)
699 + THIS_MODULE, loop_probe, NULL, NULL);
700 +
701 + /* pre-create number of devices given by config or max_loop */
702 +- mutex_lock(&loop_index_mutex);
703 ++ mutex_lock(&loop_ctl_mutex);
704 + for (i = 0; i < nr; i++)
705 + loop_add(&lo, i);
706 +- mutex_unlock(&loop_index_mutex);
707 ++ mutex_unlock(&loop_ctl_mutex);
708 +
709 + printk(KERN_INFO "loop: module loaded\n");
710 + return 0;
711 +diff --git a/drivers/block/loop.h b/drivers/block/loop.h
712 +index dfc54ceba410..b2251752452b 100644
713 +--- a/drivers/block/loop.h
714 ++++ b/drivers/block/loop.h
715 +@@ -54,7 +54,6 @@ struct loop_device {
716 +
717 + spinlock_t lo_lock;
718 + int lo_state;
719 +- struct mutex lo_ctl_mutex;
720 + struct kthread_worker worker;
721 + struct task_struct *worker_task;
722 + bool use_dio;
723 +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
724 +index fe1414df0f33..d32cd943dff2 100644
725 +--- a/drivers/block/nbd.c
726 ++++ b/drivers/block/nbd.c
727 +@@ -275,9 +275,10 @@ static void nbd_size_update(struct nbd_device *nbd)
728 + blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
729 + set_capacity(nbd->disk, config->bytesize >> 9);
730 + if (bdev) {
731 +- if (bdev->bd_disk)
732 ++ if (bdev->bd_disk) {
733 + bd_set_size(bdev, config->bytesize);
734 +- else
735 ++ set_blocksize(bdev, config->blksize);
736 ++ } else
737 + bdev->bd_invalidated = 1;
738 + bdput(bdev);
739 + }
740 +diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
741 +index 143f8bc403b9..342bc777841c 100644
742 +--- a/drivers/crypto/Kconfig
743 ++++ b/drivers/crypto/Kconfig
744 +@@ -679,6 +679,7 @@ config CRYPTO_DEV_BCM_SPU
745 + depends on ARCH_BCM_IPROC
746 + depends on MAILBOX
747 + default m
748 ++ select CRYPTO_AUTHENC
749 + select CRYPTO_DES
750 + select CRYPTO_MD5
751 + select CRYPTO_SHA1
752 +diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
753 +index ee52c355bee0..b6be383a51a6 100644
754 +--- a/drivers/crypto/bcm/cipher.c
755 ++++ b/drivers/crypto/bcm/cipher.c
756 +@@ -2846,44 +2846,28 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
757 + struct spu_hw *spu = &iproc_priv.spu;
758 + struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
759 + struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
760 +- struct rtattr *rta = (void *)key;
761 +- struct crypto_authenc_key_param *param;
762 +- const u8 *origkey = key;
763 +- const unsigned int origkeylen = keylen;
764 +-
765 +- int ret = 0;
766 ++ struct crypto_authenc_keys keys;
767 ++ int ret;
768 +
769 + flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
770 + keylen);
771 + flow_dump(" key: ", key, keylen);
772 +
773 +- if (!RTA_OK(rta, keylen))
774 +- goto badkey;
775 +- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
776 +- goto badkey;
777 +- if (RTA_PAYLOAD(rta) < sizeof(*param))
778 ++ ret = crypto_authenc_extractkeys(&keys, key, keylen);
779 ++ if (ret)
780 + goto badkey;
781 +
782 +- param = RTA_DATA(rta);
783 +- ctx->enckeylen = be32_to_cpu(param->enckeylen);
784 +-
785 +- key += RTA_ALIGN(rta->rta_len);
786 +- keylen -= RTA_ALIGN(rta->rta_len);
787 +-
788 +- if (keylen < ctx->enckeylen)
789 +- goto badkey;
790 +- if (ctx->enckeylen > MAX_KEY_SIZE)
791 ++ if (keys.enckeylen > MAX_KEY_SIZE ||
792 ++ keys.authkeylen > MAX_KEY_SIZE)
793 + goto badkey;
794 +
795 +- ctx->authkeylen = keylen - ctx->enckeylen;
796 +-
797 +- if (ctx->authkeylen > MAX_KEY_SIZE)
798 +- goto badkey;
799 ++ ctx->enckeylen = keys.enckeylen;
800 ++ ctx->authkeylen = keys.authkeylen;
801 +
802 +- memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen);
803 ++ memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
804 + /* May end up padding auth key. So make sure it's zeroed. */
805 + memset(ctx->authkey, 0, sizeof(ctx->authkey));
806 +- memcpy(ctx->authkey, key, ctx->authkeylen);
807 ++ memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
808 +
809 + switch (ctx->alg->cipher_info.alg) {
810 + case CIPHER_ALG_DES:
811 +@@ -2891,7 +2875,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
812 + u32 tmp[DES_EXPKEY_WORDS];
813 + u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
814 +
815 +- if (des_ekey(tmp, key) == 0) {
816 ++ if (des_ekey(tmp, keys.enckey) == 0) {
817 + if (crypto_aead_get_flags(cipher) &
818 + CRYPTO_TFM_REQ_WEAK_KEY) {
819 + crypto_aead_set_flags(cipher, flags);
820 +@@ -2906,7 +2890,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
821 + break;
822 + case CIPHER_ALG_3DES:
823 + if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
824 +- const u32 *K = (const u32 *)key;
825 ++ const u32 *K = (const u32 *)keys.enckey;
826 + u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
827 +
828 + if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
829 +@@ -2957,9 +2941,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
830 + ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
831 + ctx->fallback_cipher->base.crt_flags |=
832 + tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
833 +- ret =
834 +- crypto_aead_setkey(ctx->fallback_cipher, origkey,
835 +- origkeylen);
836 ++ ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
837 + if (ret) {
838 + flow_log(" fallback setkey() returned:%d\n", ret);
839 + tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
840 +diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
841 +index 698580b60b2f..8fa35bc75870 100644
842 +--- a/drivers/crypto/caam/caamhash.c
843 ++++ b/drivers/crypto/caam/caamhash.c
844 +@@ -1109,13 +1109,16 @@ static int ahash_final_no_ctx(struct ahash_request *req)
845 +
846 + desc = edesc->hw_desc;
847 +
848 +- state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
849 +- if (dma_mapping_error(jrdev, state->buf_dma)) {
850 +- dev_err(jrdev, "unable to map src\n");
851 +- goto unmap;
852 +- }
853 ++ if (buflen) {
854 ++ state->buf_dma = dma_map_single(jrdev, buf, buflen,
855 ++ DMA_TO_DEVICE);
856 ++ if (dma_mapping_error(jrdev, state->buf_dma)) {
857 ++ dev_err(jrdev, "unable to map src\n");
858 ++ goto unmap;
859 ++ }
860 +
861 +- append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
862 ++ append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
863 ++ }
864 +
865 + edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
866 + digestsize);
867 +diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
868 +index 57e1b203cf36..4388f4e3840c 100644
869 +--- a/drivers/crypto/talitos.c
870 ++++ b/drivers/crypto/talitos.c
871 +@@ -1347,23 +1347,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
872 + struct talitos_private *priv = dev_get_drvdata(dev);
873 + bool is_sec1 = has_ftr_sec1(priv);
874 + int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
875 +- void *err;
876 +
877 + if (cryptlen + authsize > max_len) {
878 + dev_err(dev, "length exceeds h/w max limit\n");
879 + return ERR_PTR(-EINVAL);
880 + }
881 +
882 +- if (ivsize)
883 +- iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
884 +-
885 + if (!dst || dst == src) {
886 + src_len = assoclen + cryptlen + authsize;
887 + src_nents = sg_nents_for_len(src, src_len);
888 + if (src_nents < 0) {
889 + dev_err(dev, "Invalid number of src SG.\n");
890 +- err = ERR_PTR(-EINVAL);
891 +- goto error_sg;
892 ++ return ERR_PTR(-EINVAL);
893 + }
894 + src_nents = (src_nents == 1) ? 0 : src_nents;
895 + dst_nents = dst ? src_nents : 0;
896 +@@ -1373,16 +1368,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
897 + src_nents = sg_nents_for_len(src, src_len);
898 + if (src_nents < 0) {
899 + dev_err(dev, "Invalid number of src SG.\n");
900 +- err = ERR_PTR(-EINVAL);
901 +- goto error_sg;
902 ++ return ERR_PTR(-EINVAL);
903 + }
904 + src_nents = (src_nents == 1) ? 0 : src_nents;
905 + dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
906 + dst_nents = sg_nents_for_len(dst, dst_len);
907 + if (dst_nents < 0) {
908 + dev_err(dev, "Invalid number of dst SG.\n");
909 +- err = ERR_PTR(-EINVAL);
910 +- goto error_sg;
911 ++ return ERR_PTR(-EINVAL);
912 + }
913 + dst_nents = (dst_nents == 1) ? 0 : dst_nents;
914 + }
915 +@@ -1405,12 +1398,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
916 + dma_len = 0;
917 + alloc_len += icv_stashing ? authsize : 0;
918 + }
919 ++ alloc_len += ivsize;
920 +
921 + edesc = kmalloc(alloc_len, GFP_DMA | flags);
922 +- if (!edesc) {
923 +- dev_err(dev, "could not allocate edescriptor\n");
924 +- err = ERR_PTR(-ENOMEM);
925 +- goto error_sg;
926 ++ if (!edesc)
927 ++ return ERR_PTR(-ENOMEM);
928 ++ if (ivsize) {
929 ++ iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
930 ++ iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
931 + }
932 +
933 + edesc->src_nents = src_nents;
934 +@@ -1423,10 +1418,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
935 + DMA_BIDIRECTIONAL);
936 +
937 + return edesc;
938 +-error_sg:
939 +- if (iv_dma)
940 +- dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
941 +- return err;
942 + }
943 +
944 + static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
945 +diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
946 +index ad6812baa611..f1259a0c2883 100644
947 +--- a/drivers/gpu/drm/drm_fb_helper.c
948 ++++ b/drivers/gpu/drm/drm_fb_helper.c
949 +@@ -1578,9 +1578,14 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
950 + struct drm_fb_helper *fb_helper = info->par;
951 + struct drm_framebuffer *fb = fb_helper->fb;
952 +
953 +- if (var->pixclock != 0 || in_dbg_master())
954 ++ if (in_dbg_master())
955 + return -EINVAL;
956 +
957 ++ if (var->pixclock != 0) {
958 ++ DRM_DEBUG("fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n");
959 ++ var->pixclock = 0;
960 ++ }
961 ++
962 + /*
963 + * Changes struct fb_var_screeninfo are currently not pushed back
964 + * to KMS, hence fail if different settings are requested.
965 +diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
966 +index 6ca71aabb576..d300e5e7eadc 100644
967 +--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
968 ++++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
969 +@@ -877,8 +877,11 @@ int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
970 + "%s-vid-cap", dev->v4l2_dev.name);
971 +
972 + if (IS_ERR(dev->kthread_vid_cap)) {
973 ++ int err = PTR_ERR(dev->kthread_vid_cap);
974 ++
975 ++ dev->kthread_vid_cap = NULL;
976 + v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
977 +- return PTR_ERR(dev->kthread_vid_cap);
978 ++ return err;
979 + }
980 + *pstreaming = true;
981 + vivid_grab_controls(dev, true);
982 +diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c
983 +index 98eed5889bc1..7c8d75852816 100644
984 +--- a/drivers/media/platform/vivid/vivid-kthread-out.c
985 ++++ b/drivers/media/platform/vivid/vivid-kthread-out.c
986 +@@ -248,8 +248,11 @@ int vivid_start_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
987 + "%s-vid-out", dev->v4l2_dev.name);
988 +
989 + if (IS_ERR(dev->kthread_vid_out)) {
990 ++ int err = PTR_ERR(dev->kthread_vid_out);
991 ++
992 ++ dev->kthread_vid_out = NULL;
993 + v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
994 +- return PTR_ERR(dev->kthread_vid_out);
995 ++ return err;
996 + }
997 + *pstreaming = true;
998 + vivid_grab_controls(dev, true);
999 +diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
1000 +index 6f6d4df1e8a8..11b014bbacd8 100644
1001 +--- a/drivers/media/platform/vivid/vivid-vid-common.c
1002 ++++ b/drivers/media/platform/vivid/vivid-vid-common.c
1003 +@@ -33,7 +33,7 @@ const struct v4l2_dv_timings_cap vivid_dv_timings_cap = {
1004 + .type = V4L2_DV_BT_656_1120,
1005 + /* keep this initialization for compatibility with GCC < 4.4.6 */
1006 + .reserved = { 0 },
1007 +- V4L2_INIT_BT_TIMINGS(0, MAX_WIDTH, 0, MAX_HEIGHT, 14000000, 775000000,
1008 ++ V4L2_INIT_BT_TIMINGS(16, MAX_WIDTH, 16, MAX_HEIGHT, 14000000, 775000000,
1009 + V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
1010 + V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF,
1011 + V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_INTERLACED)
1012 +diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
1013 +index 92a74bc34527..bd8de78e0ffd 100644
1014 +--- a/drivers/media/usb/em28xx/em28xx-video.c
1015 ++++ b/drivers/media/usb/em28xx/em28xx-video.c
1016 +@@ -900,8 +900,6 @@ static int em28xx_enable_analog_tuner(struct em28xx *dev)
1017 + if (!mdev || !v4l2->decoder)
1018 + return 0;
1019 +
1020 +- dev->v4l2->field_count = 0;
1021 +-
1022 + /*
1023 + * This will find the tuner that is connected into the decoder.
1024 + * Technically, this is not 100% correct, as the device may be
1025 +@@ -1074,6 +1072,8 @@ int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count)
1026 +
1027 + em28xx_videodbg("%s\n", __func__);
1028 +
1029 ++ dev->v4l2->field_count = 0;
1030 ++
1031 + /* Make sure streaming is not already in progress for this type
1032 + of filehandle (e.g. video, vbi) */
1033 + rc = res_get(dev, vq->type);
1034 +diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
1035 +index d06941cc6a55..f1ef4e97238e 100644
1036 +--- a/drivers/media/v4l2-core/v4l2-ioctl.c
1037 ++++ b/drivers/media/v4l2-core/v4l2-ioctl.c
1038 +@@ -249,6 +249,7 @@ static void v4l_print_format(const void *arg, bool write_only)
1039 + const struct v4l2_window *win;
1040 + const struct v4l2_sdr_format *sdr;
1041 + const struct v4l2_meta_format *meta;
1042 ++ u32 planes;
1043 + unsigned i;
1044 +
1045 + pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
1046 +@@ -279,7 +280,8 @@ static void v4l_print_format(const void *arg, bool write_only)
1047 + prt_names(mp->field, v4l2_field_names),
1048 + mp->colorspace, mp->num_planes, mp->flags,
1049 + mp->ycbcr_enc, mp->quantization, mp->xfer_func);
1050 +- for (i = 0; i < mp->num_planes; i++)
1051 ++ planes = min_t(u32, mp->num_planes, VIDEO_MAX_PLANES);
1052 ++ for (i = 0; i < planes; i++)
1053 + printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i,
1054 + mp->plane_fmt[i].bytesperline,
1055 + mp->plane_fmt[i].sizeimage);
1056 +diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
1057 +index 43522a09b11d..f1725da2a90d 100644
1058 +--- a/drivers/media/v4l2-core/videobuf2-core.c
1059 ++++ b/drivers/media/v4l2-core/videobuf2-core.c
1060 +@@ -1925,9 +1925,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
1061 + return -EINVAL;
1062 + }
1063 + }
1064 ++
1065 ++ mutex_lock(&q->mmap_lock);
1066 ++
1067 + if (vb2_fileio_is_active(q)) {
1068 + dprintk(1, "mmap: file io in progress\n");
1069 +- return -EBUSY;
1070 ++ ret = -EBUSY;
1071 ++ goto unlock;
1072 + }
1073 +
1074 + /*
1075 +@@ -1935,7 +1939,7 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
1076 + */
1077 + ret = __find_plane_by_offset(q, off, &buffer, &plane);
1078 + if (ret)
1079 +- return ret;
1080 ++ goto unlock;
1081 +
1082 + vb = q->bufs[buffer];
1083 +
1084 +@@ -1948,11 +1952,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
1085 + if (length < (vma->vm_end - vma->vm_start)) {
1086 + dprintk(1,
1087 + "MMAP invalid, as it would overflow buffer length\n");
1088 +- return -EINVAL;
1089 ++ ret = -EINVAL;
1090 ++ goto unlock;
1091 + }
1092 +
1093 +- mutex_lock(&q->mmap_lock);
1094 + ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
1095 ++
1096 ++unlock:
1097 + mutex_unlock(&q->mmap_lock);
1098 + if (ret)
1099 + return ret;
1100 +diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
1101 +index 5628a6b5b19b..c5c320efc7b4 100644
1102 +--- a/drivers/mfd/tps6586x.c
1103 ++++ b/drivers/mfd/tps6586x.c
1104 +@@ -594,6 +594,29 @@ static int tps6586x_i2c_remove(struct i2c_client *client)
1105 + return 0;
1106 + }
1107 +
1108 ++static int __maybe_unused tps6586x_i2c_suspend(struct device *dev)
1109 ++{
1110 ++ struct tps6586x *tps6586x = dev_get_drvdata(dev);
1111 ++
1112 ++ if (tps6586x->client->irq)
1113 ++ disable_irq(tps6586x->client->irq);
1114 ++
1115 ++ return 0;
1116 ++}
1117 ++
1118 ++static int __maybe_unused tps6586x_i2c_resume(struct device *dev)
1119 ++{
1120 ++ struct tps6586x *tps6586x = dev_get_drvdata(dev);
1121 ++
1122 ++ if (tps6586x->client->irq)
1123 ++ enable_irq(tps6586x->client->irq);
1124 ++
1125 ++ return 0;
1126 ++}
1127 ++
1128 ++static SIMPLE_DEV_PM_OPS(tps6586x_pm_ops, tps6586x_i2c_suspend,
1129 ++ tps6586x_i2c_resume);
1130 ++
1131 + static const struct i2c_device_id tps6586x_id_table[] = {
1132 + { "tps6586x", 0 },
1133 + { },
1134 +@@ -604,6 +627,7 @@ static struct i2c_driver tps6586x_driver = {
1135 + .driver = {
1136 + .name = "tps6586x",
1137 + .of_match_table = of_match_ptr(tps6586x_of_match),
1138 ++ .pm = &tps6586x_pm_ops,
1139 + },
1140 + .probe = tps6586x_i2c_probe,
1141 + .remove = tps6586x_i2c_remove,
1142 +diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
1143 +index 92c483ec6cb2..192844b50c69 100644
1144 +--- a/drivers/mmc/host/sdhci-msm.c
1145 ++++ b/drivers/mmc/host/sdhci-msm.c
1146 +@@ -138,6 +138,8 @@ struct sdhci_msm_host {
1147 + bool calibration_done;
1148 + u8 saved_tuning_phase;
1149 + bool use_cdclp533;
1150 ++ bool use_cdr;
1151 ++ u32 transfer_mode;
1152 + };
1153 +
1154 + static unsigned int msm_get_clock_rate_for_bus_mode(struct sdhci_host *host,
1155 +@@ -815,6 +817,23 @@ out:
1156 + return ret;
1157 + }
1158 +
1159 ++static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable)
1160 ++{
1161 ++ u32 config, oldconfig = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
1162 ++
1163 ++ config = oldconfig;
1164 ++ if (enable) {
1165 ++ config |= CORE_CDR_EN;
1166 ++ config &= ~CORE_CDR_EXT_EN;
1167 ++ } else {
1168 ++ config &= ~CORE_CDR_EN;
1169 ++ config |= CORE_CDR_EXT_EN;
1170 ++ }
1171 ++
1172 ++ if (config != oldconfig)
1173 ++ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
1174 ++}
1175 ++
1176 + static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
1177 + {
1178 + struct sdhci_host *host = mmc_priv(mmc);
1179 +@@ -832,8 +851,14 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
1180 + if (host->clock <= CORE_FREQ_100MHZ ||
1181 + !(ios.timing == MMC_TIMING_MMC_HS400 ||
1182 + ios.timing == MMC_TIMING_MMC_HS200 ||
1183 +- ios.timing == MMC_TIMING_UHS_SDR104))
1184 ++ ios.timing == MMC_TIMING_UHS_SDR104)) {
1185 ++ msm_host->use_cdr = false;
1186 ++ sdhci_msm_set_cdr(host, false);
1187 + return 0;
1188 ++ }
1189 ++
1190 ++ /* Clock-Data-Recovery used to dynamically adjust RX sampling point */
1191 ++ msm_host->use_cdr = true;
1192 +
1193 + /*
1194 + * For HS400 tuning in HS200 timing requires:
1195 +@@ -1092,6 +1117,29 @@ out:
1196 + __sdhci_msm_set_clock(host, clock);
1197 + }
1198 +
1199 ++static void sdhci_msm_write_w(struct sdhci_host *host, u16 val, int reg)
1200 ++{
1201 ++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1202 ++ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1203 ++
1204 ++ switch (reg) {
1205 ++ case SDHCI_TRANSFER_MODE:
1206 ++ msm_host->transfer_mode = val;
1207 ++ break;
1208 ++ case SDHCI_COMMAND:
1209 ++ if (!msm_host->use_cdr)
1210 ++ break;
1211 ++ if ((msm_host->transfer_mode & SDHCI_TRNS_READ) &&
1212 ++ (SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK_HS200) &&
1213 ++ (SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK))
1214 ++ sdhci_msm_set_cdr(host, true);
1215 ++ else
1216 ++ sdhci_msm_set_cdr(host, false);
1217 ++ break;
1218 ++ }
1219 ++ writew(val, host->ioaddr + reg);
1220 ++}
1221 ++
1222 + static const struct of_device_id sdhci_msm_dt_match[] = {
1223 + { .compatible = "qcom,sdhci-msm-v4" },
1224 + {},
1225 +@@ -1107,6 +1155,7 @@ static const struct sdhci_ops sdhci_msm_ops = {
1226 + .set_bus_width = sdhci_set_bus_width,
1227 + .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
1228 + .voltage_switch = sdhci_msm_voltage_switch,
1229 ++ .write_w = sdhci_msm_write_w,
1230 + };
1231 +
1232 + static const struct sdhci_pltfm_data sdhci_msm_pdata = {
1233 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1234 +index cf64a365362b..65c5a65af0ba 100644
1235 +--- a/drivers/net/bonding/bond_main.c
1236 ++++ b/drivers/net/bonding/bond_main.c
1237 +@@ -1928,6 +1928,9 @@ static int __bond_release_one(struct net_device *bond_dev,
1238 + if (!bond_has_slaves(bond)) {
1239 + bond_set_carrier(bond);
1240 + eth_hw_addr_random(bond_dev);
1241 ++ bond->nest_level = SINGLE_DEPTH_NESTING;
1242 ++ } else {
1243 ++ bond->nest_level = dev_get_nest_level(bond_dev) + 1;
1244 + }
1245 +
1246 + unblock_netpoll_tx();
1247 +diff --git a/drivers/of/property.c b/drivers/of/property.c
1248 +index 264c355ba1ff..fd9b734fff33 100644
1249 +--- a/drivers/of/property.c
1250 ++++ b/drivers/of/property.c
1251 +@@ -810,6 +810,7 @@ struct device_node *of_graph_get_remote_node(const struct device_node *node,
1252 +
1253 + if (!of_device_is_available(remote)) {
1254 + pr_debug("not available for remote node\n");
1255 ++ of_node_put(remote);
1256 + return NULL;
1257 + }
1258 +
1259 +diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
1260 +index b44c1bb687a2..ebc193f7f7dd 100644
1261 +--- a/drivers/scsi/scsi_pm.c
1262 ++++ b/drivers/scsi/scsi_pm.c
1263 +@@ -79,8 +79,22 @@ static int scsi_dev_type_resume(struct device *dev,
1264 +
1265 + if (err == 0) {
1266 + pm_runtime_disable(dev);
1267 +- pm_runtime_set_active(dev);
1268 ++ err = pm_runtime_set_active(dev);
1269 + pm_runtime_enable(dev);
1270 ++
1271 ++ /*
1272 ++ * Forcibly set runtime PM status of request queue to "active"
1273 ++ * to make sure we can again get requests from the queue
1274 ++ * (see also blk_pm_peek_request()).
1275 ++ *
1276 ++ * The resume hook will correct runtime PM status of the disk.
1277 ++ */
1278 ++ if (!err && scsi_is_sdev_device(dev)) {
1279 ++ struct scsi_device *sdev = to_scsi_device(dev);
1280 ++
1281 ++ if (sdev->request_queue->dev)
1282 ++ blk_set_runtime_active(sdev->request_queue);
1283 ++ }
1284 + }
1285 +
1286 + return err;
1287 +@@ -139,16 +153,6 @@ static int scsi_bus_resume_common(struct device *dev,
1288 + else
1289 + fn = NULL;
1290 +
1291 +- /*
1292 +- * Forcibly set runtime PM status of request queue to "active" to
1293 +- * make sure we can again get requests from the queue (see also
1294 +- * blk_pm_peek_request()).
1295 +- *
1296 +- * The resume hook will correct runtime PM status of the disk.
1297 +- */
1298 +- if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev))
1299 +- blk_set_runtime_active(to_scsi_device(dev)->request_queue);
1300 +-
1301 + if (fn) {
1302 + async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
1303 +
1304 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
1305 +index 39754cc90043..048fccc72e03 100644
1306 +--- a/drivers/scsi/sd.c
1307 ++++ b/drivers/scsi/sd.c
1308 +@@ -206,6 +206,12 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
1309 + sp = buffer_data[0] & 0x80 ? 1 : 0;
1310 + buffer_data[0] &= ~0x80;
1311 +
1312 ++ /*
1313 ++ * Ensure WP, DPOFUA, and RESERVED fields are cleared in
1314 ++ * received mode parameter buffer before doing MODE SELECT.
1315 ++ */
1316 ++ data.device_specific = 0;
1317 ++
1318 + if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
1319 + SD_MAX_RETRIES, &data, &sshdr)) {
1320 + if (scsi_sense_valid(&sshdr))
1321 +diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
1322 +index 83376caa571b..417b81c67fe9 100644
1323 +--- a/drivers/tty/tty_io.c
1324 ++++ b/drivers/tty/tty_io.c
1325 +@@ -1254,7 +1254,8 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *
1326 + static int tty_reopen(struct tty_struct *tty)
1327 + {
1328 + struct tty_driver *driver = tty->driver;
1329 +- int retval;
1330 ++ struct tty_ldisc *ld;
1331 ++ int retval = 0;
1332 +
1333 + if (driver->type == TTY_DRIVER_TYPE_PTY &&
1334 + driver->subtype == PTY_TYPE_MASTER)
1335 +@@ -1266,14 +1267,21 @@ static int tty_reopen(struct tty_struct *tty)
1336 + if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
1337 + return -EBUSY;
1338 +
1339 +- tty->count++;
1340 ++ ld = tty_ldisc_ref_wait(tty);
1341 ++ if (ld) {
1342 ++ tty_ldisc_deref(ld);
1343 ++ } else {
1344 ++ retval = tty_ldisc_lock(tty, 5 * HZ);
1345 ++ if (retval)
1346 ++ return retval;
1347 +
1348 +- if (tty->ldisc)
1349 +- return 0;
1350 ++ if (!tty->ldisc)
1351 ++ retval = tty_ldisc_reinit(tty, tty->termios.c_line);
1352 ++ tty_ldisc_unlock(tty);
1353 ++ }
1354 +
1355 +- retval = tty_ldisc_reinit(tty, tty->termios.c_line);
1356 +- if (retval)
1357 +- tty->count--;
1358 ++ if (retval == 0)
1359 ++ tty->count++;
1360 +
1361 + return retval;
1362 + }
1363 +diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
1364 +index 52b7baef4f7a..5c2cec298816 100644
1365 +--- a/drivers/tty/tty_ldsem.c
1366 ++++ b/drivers/tty/tty_ldsem.c
1367 +@@ -307,6 +307,16 @@ down_write_failed(struct ld_semaphore *sem, long count, long timeout)
1368 + if (!locked)
1369 + ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
1370 + list_del(&waiter.list);
1371 ++
1372 ++ /*
1373 ++ * In case of timeout, wake up every reader who gave the right of way
1374 ++ * to writer. Prevent separation readers into two groups:
1375 ++ * one that helds semaphore and another that sleeps.
1376 ++ * (in case of no contention with a writer)
1377 ++ */
1378 ++ if (!locked && list_empty(&sem->write_wait))
1379 ++ __ldsem_wake_readers(sem);
1380 ++
1381 + raw_spin_unlock_irq(&sem->wait_lock);
1382 +
1383 + __set_current_state(TASK_RUNNING);
1384 +diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
1385 +index a3edb20ea4c3..a846d32ee653 100644
1386 +--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
1387 ++++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
1388 +@@ -609,6 +609,8 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
1389 +
1390 + int r = 0;
1391 +
1392 ++ memset(&p, 0, sizeof(p));
1393 ++
1394 + switch (cmd) {
1395 + case OMAPFB_SYNC_GFX:
1396 + DBG("ioctl SYNC_GFX\n");
1397 +diff --git a/fs/block_dev.c b/fs/block_dev.c
1398 +index 3323eec5c164..3911c1a80219 100644
1399 +--- a/fs/block_dev.c
1400 ++++ b/fs/block_dev.c
1401 +@@ -116,6 +116,20 @@ void invalidate_bdev(struct block_device *bdev)
1402 + }
1403 + EXPORT_SYMBOL(invalidate_bdev);
1404 +
1405 ++static void set_init_blocksize(struct block_device *bdev)
1406 ++{
1407 ++ unsigned bsize = bdev_logical_block_size(bdev);
1408 ++ loff_t size = i_size_read(bdev->bd_inode);
1409 ++
1410 ++ while (bsize < PAGE_SIZE) {
1411 ++ if (size & bsize)
1412 ++ break;
1413 ++ bsize <<= 1;
1414 ++ }
1415 ++ bdev->bd_block_size = bsize;
1416 ++ bdev->bd_inode->i_blkbits = blksize_bits(bsize);
1417 ++}
1418 ++
1419 + int set_blocksize(struct block_device *bdev, int size)
1420 + {
1421 + /* Size must be a power of two, and between 512 and PAGE_SIZE */
1422 +@@ -1393,18 +1407,9 @@ EXPORT_SYMBOL(check_disk_change);
1423 +
1424 + void bd_set_size(struct block_device *bdev, loff_t size)
1425 + {
1426 +- unsigned bsize = bdev_logical_block_size(bdev);
1427 +-
1428 + inode_lock(bdev->bd_inode);
1429 + i_size_write(bdev->bd_inode, size);
1430 + inode_unlock(bdev->bd_inode);
1431 +- while (bsize < PAGE_SIZE) {
1432 +- if (size & bsize)
1433 +- break;
1434 +- bsize <<= 1;
1435 +- }
1436 +- bdev->bd_block_size = bsize;
1437 +- bdev->bd_inode->i_blkbits = blksize_bits(bsize);
1438 + }
1439 + EXPORT_SYMBOL(bd_set_size);
1440 +
1441 +@@ -1482,8 +1487,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1442 + }
1443 + }
1444 +
1445 +- if (!ret)
1446 ++ if (!ret) {
1447 + bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
1448 ++ set_init_blocksize(bdev);
1449 ++ }
1450 +
1451 + /*
1452 + * If the device is invalidated, rescan partition
1453 +@@ -1518,6 +1525,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1454 + goto out_clear;
1455 + }
1456 + bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
1457 ++ set_init_blocksize(bdev);
1458 + }
1459 +
1460 + if (bdev->bd_bdi == &noop_backing_dev_info)
1461 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
1462 +index 858d5812eb8f..e0bdc0c902e4 100644
1463 +--- a/fs/btrfs/disk-io.c
1464 ++++ b/fs/btrfs/disk-io.c
1465 +@@ -4115,6 +4115,14 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
1466 + spin_lock(&fs_info->ordered_root_lock);
1467 + }
1468 + spin_unlock(&fs_info->ordered_root_lock);
1469 ++
1470 ++ /*
1471 ++ * We need this here because if we've been flipped read-only we won't
1472 ++ * get sync() from the umount, so we need to make sure any ordered
1473 ++ * extents that haven't had their dirty pages IO start writeout yet
1474 ++ * actually get run and error out properly.
1475 ++ */
1476 ++ btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
1477 + }
1478 +
1479 + static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
1480 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
1481 +index 09829e8d759e..909f7ea92e0b 100644
1482 +--- a/fs/btrfs/inode.c
1483 ++++ b/fs/btrfs/inode.c
1484 +@@ -3170,9 +3170,6 @@ out:
1485 + /* once for the tree */
1486 + btrfs_put_ordered_extent(ordered_extent);
1487 +
1488 +- /* Try to release some metadata so we don't get an OOM but don't wait */
1489 +- btrfs_btree_balance_dirty_nodelay(fs_info);
1490 +-
1491 + return ret;
1492 + }
1493 +
1494 +diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
1495 +index 9f7e546d7050..f371e03cf3bf 100644
1496 +--- a/fs/pstore/ram.c
1497 ++++ b/fs/pstore/ram.c
1498 +@@ -711,18 +711,15 @@ static int ramoops_probe(struct platform_device *pdev)
1499 + {
1500 + struct device *dev = &pdev->dev;
1501 + struct ramoops_platform_data *pdata = dev->platform_data;
1502 ++ struct ramoops_platform_data pdata_local;
1503 + struct ramoops_context *cxt = &oops_cxt;
1504 + size_t dump_mem_sz;
1505 + phys_addr_t paddr;
1506 + int err = -EINVAL;
1507 +
1508 + if (dev_of_node(dev) && !pdata) {
1509 +- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1510 +- if (!pdata) {
1511 +- pr_err("cannot allocate platform data buffer\n");
1512 +- err = -ENOMEM;
1513 +- goto fail_out;
1514 +- }
1515 ++ pdata = &pdata_local;
1516 ++ memset(pdata, 0, sizeof(*pdata));
1517 +
1518 + err = ramoops_parse_dt(pdev, pdata);
1519 + if (err < 0)
1520 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
1521 +index 6e108af21481..f33b24080b1c 100644
1522 +--- a/kernel/sched/fair.c
1523 ++++ b/kernel/sched/fair.c
1524 +@@ -4087,6 +4087,7 @@ void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
1525 + now = sched_clock_cpu(smp_processor_id());
1526 + cfs_b->runtime = cfs_b->quota;
1527 + cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
1528 ++ cfs_b->expires_seq++;
1529 + }
1530 +
1531 + static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
1532 +@@ -4109,6 +4110,7 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
1533 + struct task_group *tg = cfs_rq->tg;
1534 + struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
1535 + u64 amount = 0, min_amount, expires;
1536 ++ int expires_seq;
1537 +
1538 + /* note: this is a positive sum as runtime_remaining <= 0 */
1539 + min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
1540 +@@ -4125,6 +4127,7 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
1541 + cfs_b->idle = 0;
1542 + }
1543 + }
1544 ++ expires_seq = cfs_b->expires_seq;
1545 + expires = cfs_b->runtime_expires;
1546 + raw_spin_unlock(&cfs_b->lock);
1547 +
1548 +@@ -4134,8 +4137,10 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
1549 + * spread between our sched_clock and the one on which runtime was
1550 + * issued.
1551 + */
1552 +- if ((s64)(expires - cfs_rq->runtime_expires) > 0)
1553 ++ if (cfs_rq->expires_seq != expires_seq) {
1554 ++ cfs_rq->expires_seq = expires_seq;
1555 + cfs_rq->runtime_expires = expires;
1556 ++ }
1557 +
1558 + return cfs_rq->runtime_remaining > 0;
1559 + }
1560 +@@ -4161,12 +4166,9 @@ static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
1561 + * has not truly expired.
1562 + *
1563 + * Fortunately we can check determine whether this the case by checking
1564 +- * whether the global deadline has advanced. It is valid to compare
1565 +- * cfs_b->runtime_expires without any locks since we only care about
1566 +- * exact equality, so a partial write will still work.
1567 ++ * whether the global deadline(cfs_b->expires_seq) has advanced.
1568 + */
1569 +-
1570 +- if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
1571 ++ if (cfs_rq->expires_seq == cfs_b->expires_seq) {
1572 + /* extend local deadline, drift is bounded above by 2 ticks */
1573 + cfs_rq->runtime_expires += TICK_NSEC;
1574 + } else {
1575 +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
1576 +index b3ba6e5e99f2..452b56923c6d 100644
1577 +--- a/kernel/sched/sched.h
1578 ++++ b/kernel/sched/sched.h
1579 +@@ -281,8 +281,9 @@ struct cfs_bandwidth {
1580 + u64 quota, runtime;
1581 + s64 hierarchical_quota;
1582 + u64 runtime_expires;
1583 ++ int expires_seq;
1584 +
1585 +- int idle, period_active;
1586 ++ short idle, period_active;
1587 + struct hrtimer period_timer, slack_timer;
1588 + struct list_head throttled_cfs_rq;
1589 +
1590 +@@ -488,6 +489,7 @@ struct cfs_rq {
1591 +
1592 + #ifdef CONFIG_CFS_BANDWIDTH
1593 + int runtime_enabled;
1594 ++ int expires_seq;
1595 + u64 runtime_expires;
1596 + s64 runtime_remaining;
1597 +
1598 +diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
1599 +index 7582f28ab306..3f3859b8d49f 100644
1600 +--- a/net/bridge/br_netfilter_hooks.c
1601 ++++ b/net/bridge/br_netfilter_hooks.c
1602 +@@ -275,7 +275,7 @@ int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_
1603 + struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
1604 + int ret;
1605 +
1606 +- if (neigh->hh.hh_len) {
1607 ++ if ((neigh->nud_state & NUD_CONNECTED) && neigh->hh.hh_len) {
1608 + neigh_hh_bridge(&neigh->hh, skb);
1609 + skb->dev = nf_bridge->physindev;
1610 + ret = br_handle_frame_finish(net, sk, skb);
1611 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
1612 +index 54c7fe68040f..22e4c15a1fc3 100644
1613 +--- a/net/bridge/netfilter/ebtables.c
1614 ++++ b/net/bridge/netfilter/ebtables.c
1615 +@@ -1134,14 +1134,16 @@ static int do_replace(struct net *net, const void __user *user,
1616 + tmp.name[sizeof(tmp.name) - 1] = 0;
1617 +
1618 + countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1619 +- newinfo = vmalloc(sizeof(*newinfo) + countersize);
1620 ++ newinfo = __vmalloc(sizeof(*newinfo) + countersize, GFP_KERNEL_ACCOUNT,
1621 ++ PAGE_KERNEL);
1622 + if (!newinfo)
1623 + return -ENOMEM;
1624 +
1625 + if (countersize)
1626 + memset(newinfo->counters, 0, countersize);
1627 +
1628 +- newinfo->entries = vmalloc(tmp.entries_size);
1629 ++ newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT,
1630 ++ PAGE_KERNEL);
1631 + if (!newinfo->entries) {
1632 + ret = -ENOMEM;
1633 + goto free_newinfo;
1634 +diff --git a/net/can/gw.c b/net/can/gw.c
1635 +index 73a02af4b5d7..5114b8f07fd4 100644
1636 +--- a/net/can/gw.c
1637 ++++ b/net/can/gw.c
1638 +@@ -416,13 +416,29 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
1639 + while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
1640 + (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
1641 +
1642 +- /* check for checksum updates when the CAN frame has been modified */
1643 ++ /* Has the CAN frame been modified? */
1644 + if (modidx) {
1645 +- if (gwj->mod.csumfunc.crc8)
1646 ++ /* get available space for the processed CAN frame type */
1647 ++ int max_len = nskb->len - offsetof(struct can_frame, data);
1648 ++
1649 ++ /* dlc may have changed, make sure it fits to the CAN frame */
1650 ++ if (cf->can_dlc > max_len)
1651 ++ goto out_delete;
1652 ++
1653 ++ /* check for checksum updates in classic CAN length only */
1654 ++ if (gwj->mod.csumfunc.crc8) {
1655 ++ if (cf->can_dlc > 8)
1656 ++ goto out_delete;
1657 ++
1658 + (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
1659 ++ }
1660 ++
1661 ++ if (gwj->mod.csumfunc.xor) {
1662 ++ if (cf->can_dlc > 8)
1663 ++ goto out_delete;
1664 +
1665 +- if (gwj->mod.csumfunc.xor)
1666 + (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
1667 ++ }
1668 + }
1669 +
1670 + /* clear the skb timestamp if not configured the other way */
1671 +@@ -434,6 +450,14 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
1672 + gwj->dropped_frames++;
1673 + else
1674 + gwj->handled_frames++;
1675 ++
1676 ++ return;
1677 ++
1678 ++ out_delete:
1679 ++ /* delete frame due to misconfiguration */
1680 ++ gwj->deleted_frames++;
1681 ++ kfree_skb(nskb);
1682 ++ return;
1683 + }
1684 +
1685 + static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj)
1686 +diff --git a/net/core/filter.c b/net/core/filter.c
1687 +index d5158a10ac8f..542fd04bc44d 100644
1688 +--- a/net/core/filter.c
1689 ++++ b/net/core/filter.c
1690 +@@ -1714,18 +1714,19 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
1691 + static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
1692 + u32 flags)
1693 + {
1694 +- /* skb->mac_len is not set on normal egress */
1695 +- unsigned int mlen = skb->network_header - skb->mac_header;
1696 ++ unsigned int mlen = skb_network_offset(skb);
1697 +
1698 +- __skb_pull(skb, mlen);
1699 ++ if (mlen) {
1700 ++ __skb_pull(skb, mlen);
1701 +
1702 +- /* At ingress, the mac header has already been pulled once.
1703 +- * At egress, skb_pospull_rcsum has to be done in case that
1704 +- * the skb is originated from ingress (i.e. a forwarded skb)
1705 +- * to ensure that rcsum starts at net header.
1706 +- */
1707 +- if (!skb_at_tc_ingress(skb))
1708 +- skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
1709 ++ /* At ingress, the mac header has already been pulled once.
1710 ++ * At egress, skb_pospull_rcsum has to be done in case that
1711 ++ * the skb is originated from ingress (i.e. a forwarded skb)
1712 ++ * to ensure that rcsum starts at net header.
1713 ++ */
1714 ++ if (!skb_at_tc_ingress(skb))
1715 ++ skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
1716 ++ }
1717 + skb_pop_mac_header(skb);
1718 + skb_reset_mac_len(skb);
1719 + return flags & BPF_F_INGRESS ?
1720 +diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
1721 +index 832d69649cb6..65313c766ab3 100644
1722 +--- a/net/core/lwt_bpf.c
1723 ++++ b/net/core/lwt_bpf.c
1724 +@@ -65,6 +65,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
1725 + lwt->name ? : "<unknown>");
1726 + ret = BPF_OK;
1727 + } else {
1728 ++ skb_reset_mac_header(skb);
1729 + ret = skb_do_redirect(skb);
1730 + if (ret == 0)
1731 + ret = BPF_REDIRECT;
1732 +diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
1733 +index 4ef92ebc4f6d..d1081eac3b49 100644
1734 +--- a/net/ipv4/ip_sockglue.c
1735 ++++ b/net/ipv4/ip_sockglue.c
1736 +@@ -146,19 +146,17 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
1737 +
1738 + static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
1739 + {
1740 ++ __be16 _ports[2], *ports;
1741 + struct sockaddr_in sin;
1742 +- __be16 *ports;
1743 +- int end;
1744 +-
1745 +- end = skb_transport_offset(skb) + 4;
1746 +- if (end > 0 && !pskb_may_pull(skb, end))
1747 +- return;
1748 +
1749 + /* All current transport protocols have the port numbers in the
1750 + * first four bytes of the transport header and this function is
1751 + * written with this assumption in mind.
1752 + */
1753 +- ports = (__be16 *)skb_transport_header(skb);
1754 ++ ports = skb_header_pointer(skb, skb_transport_offset(skb),
1755 ++ sizeof(_ports), &_ports);
1756 ++ if (!ports)
1757 ++ return;
1758 +
1759 + sin.sin_family = AF_INET;
1760 + sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
1761 +diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
1762 +index 461825e0680f..1ee3e0d2b587 100644
1763 +--- a/net/ipv6/datagram.c
1764 ++++ b/net/ipv6/datagram.c
1765 +@@ -349,6 +349,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info)
1766 + skb_reset_network_header(skb);
1767 + iph = ipv6_hdr(skb);
1768 + iph->daddr = fl6->daddr;
1769 ++ ip6_flow_hdr(iph, 0, 0);
1770 +
1771 + serr = SKB_EXT_ERR(skb);
1772 + serr->ee.ee_errno = err;
1773 +@@ -708,17 +709,15 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
1774 + }
1775 + if (np->rxopt.bits.rxorigdstaddr) {
1776 + struct sockaddr_in6 sin6;
1777 +- __be16 *ports;
1778 +- int end;
1779 ++ __be16 _ports[2], *ports;
1780 +
1781 +- end = skb_transport_offset(skb) + 4;
1782 +- if (end <= 0 || pskb_may_pull(skb, end)) {
1783 ++ ports = skb_header_pointer(skb, skb_transport_offset(skb),
1784 ++ sizeof(_ports), &_ports);
1785 ++ if (ports) {
1786 + /* All current transport protocols have the port numbers in the
1787 + * first four bytes of the transport header and this function is
1788 + * written with this assumption in mind.
1789 + */
1790 +- ports = (__be16 *)skb_transport_header(skb);
1791 +-
1792 + sin6.sin6_family = AF_INET6;
1793 + sin6.sin6_addr = ipv6_hdr(skb)->daddr;
1794 + sin6.sin6_port = ports[1];
1795 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1796 +index 91a323f99d47..44a093c75567 100644
1797 +--- a/net/packet/af_packet.c
1798 ++++ b/net/packet/af_packet.c
1799 +@@ -2666,7 +2666,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
1800 + addr = saddr->sll_halen ? saddr->sll_addr : NULL;
1801 + dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
1802 + if (addr && dev && saddr->sll_halen < dev->addr_len)
1803 +- goto out;
1804 ++ goto out_put;
1805 + }
1806 +
1807 + err = -ENXIO;
1808 +@@ -2866,7 +2866,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
1809 + addr = saddr->sll_halen ? saddr->sll_addr : NULL;
1810 + dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
1811 + if (addr && dev && saddr->sll_halen < dev->addr_len)
1812 +- goto out;
1813 ++ goto out_unlock;
1814 + }
1815 +
1816 + err = -ENXIO;
1817 +diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
1818 +index 8002a72aae1a..7eb06fa75730 100644
1819 +--- a/net/sctp/ipv6.c
1820 ++++ b/net/sctp/ipv6.c
1821 +@@ -97,11 +97,9 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
1822 +
1823 + switch (ev) {
1824 + case NETDEV_UP:
1825 +- addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
1826 ++ addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
1827 + if (addr) {
1828 + addr->a.v6.sin6_family = AF_INET6;
1829 +- addr->a.v6.sin6_port = 0;
1830 +- addr->a.v6.sin6_flowinfo = 0;
1831 + addr->a.v6.sin6_addr = ifa->addr;
1832 + addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
1833 + addr->valid = 1;
1834 +@@ -415,7 +413,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
1835 + addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
1836 + if (addr) {
1837 + addr->a.v6.sin6_family = AF_INET6;
1838 +- addr->a.v6.sin6_port = 0;
1839 + addr->a.v6.sin6_addr = ifp->addr;
1840 + addr->a.v6.sin6_scope_id = dev->ifindex;
1841 + addr->valid = 1;
1842 +diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
1843 +index df22a9c352ad..cbb04d66f564 100644
1844 +--- a/net/sctp/protocol.c
1845 ++++ b/net/sctp/protocol.c
1846 +@@ -151,7 +151,6 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
1847 + addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
1848 + if (addr) {
1849 + addr->a.v4.sin_family = AF_INET;
1850 +- addr->a.v4.sin_port = 0;
1851 + addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
1852 + addr->valid = 1;
1853 + INIT_LIST_HEAD(&addr->list);
1854 +@@ -782,10 +781,9 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
1855 +
1856 + switch (ev) {
1857 + case NETDEV_UP:
1858 +- addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
1859 ++ addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
1860 + if (addr) {
1861 + addr->a.v4.sin_family = AF_INET;
1862 +- addr->a.v4.sin_port = 0;
1863 + addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
1864 + addr->valid = 1;
1865 + spin_lock_bh(&net->sctp.local_addr_lock);
1866 +diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
1867 +index ea0676f199c8..da21efac80f4 100644
1868 +--- a/net/sunrpc/rpcb_clnt.c
1869 ++++ b/net/sunrpc/rpcb_clnt.c
1870 +@@ -771,6 +771,12 @@ void rpcb_getport_async(struct rpc_task *task)
1871 + case RPCBVERS_3:
1872 + map->r_netid = xprt->address_strings[RPC_DISPLAY_NETID];
1873 + map->r_addr = rpc_sockaddr2uaddr(sap, GFP_ATOMIC);
1874 ++ if (!map->r_addr) {
1875 ++ status = -ENOMEM;
1876 ++ dprintk("RPC: %5u %s: no memory available\n",
1877 ++ task->tk_pid, __func__);
1878 ++ goto bailout_free_args;
1879 ++ }
1880 + map->r_owner = "";
1881 + break;
1882 + case RPCBVERS_2:
1883 +@@ -793,6 +799,8 @@ void rpcb_getport_async(struct rpc_task *task)
1884 + rpc_put_task(child);
1885 + return;
1886 +
1887 ++bailout_free_args:
1888 ++ kfree(map);
1889 + bailout_release_client:
1890 + rpc_release_client(rpcb_clnt);
1891 + bailout_nofree:
1892 +diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
1893 +index e48f0b2c01b9..73895daf8943 100644
1894 +--- a/net/tipc/netlink_compat.c
1895 ++++ b/net/tipc/netlink_compat.c
1896 +@@ -87,6 +87,11 @@ static int tipc_skb_tailroom(struct sk_buff *skb)
1897 + return limit;
1898 + }
1899 +
1900 ++static inline int TLV_GET_DATA_LEN(struct tlv_desc *tlv)
1901 ++{
1902 ++ return TLV_GET_LEN(tlv) - TLV_SPACE(0);
1903 ++}
1904 ++
1905 + static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
1906 + {
1907 + struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb);
1908 +@@ -166,6 +171,11 @@ static struct sk_buff *tipc_get_err_tlv(char *str)
1909 + return buf;
1910 + }
1911 +
1912 ++static inline bool string_is_valid(char *s, int len)
1913 ++{
1914 ++ return memchr(s, '\0', len) ? true : false;
1915 ++}
1916 ++
1917 + static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
1918 + struct tipc_nl_compat_msg *msg,
1919 + struct sk_buff *arg)
1920 +@@ -370,6 +380,7 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
1921 + struct nlattr *prop;
1922 + struct nlattr *bearer;
1923 + struct tipc_bearer_config *b;
1924 ++ int len;
1925 +
1926 + b = (struct tipc_bearer_config *)TLV_DATA(msg->req);
1927 +
1928 +@@ -377,6 +388,10 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
1929 + if (!bearer)
1930 + return -EMSGSIZE;
1931 +
1932 ++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
1933 ++ if (!string_is_valid(b->name, len))
1934 ++ return -EINVAL;
1935 ++
1936 + if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name))
1937 + return -EMSGSIZE;
1938 +
1939 +@@ -402,6 +417,7 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
1940 + {
1941 + char *name;
1942 + struct nlattr *bearer;
1943 ++ int len;
1944 +
1945 + name = (char *)TLV_DATA(msg->req);
1946 +
1947 +@@ -409,6 +425,10 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
1948 + if (!bearer)
1949 + return -EMSGSIZE;
1950 +
1951 ++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
1952 ++ if (!string_is_valid(name, len))
1953 ++ return -EINVAL;
1954 ++
1955 + if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name))
1956 + return -EMSGSIZE;
1957 +
1958 +@@ -469,6 +489,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
1959 + struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
1960 + struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
1961 + int err;
1962 ++ int len;
1963 +
1964 + if (!attrs[TIPC_NLA_LINK])
1965 + return -EINVAL;
1966 +@@ -495,6 +516,11 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
1967 + return err;
1968 +
1969 + name = (char *)TLV_DATA(msg->req);
1970 ++
1971 ++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
1972 ++ if (!string_is_valid(name, len))
1973 ++ return -EINVAL;
1974 ++
1975 + if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
1976 + return 0;
1977 +
1978 +@@ -635,6 +661,7 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
1979 + struct nlattr *prop;
1980 + struct nlattr *media;
1981 + struct tipc_link_config *lc;
1982 ++ int len;
1983 +
1984 + lc = (struct tipc_link_config *)TLV_DATA(msg->req);
1985 +
1986 +@@ -642,6 +669,10 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
1987 + if (!media)
1988 + return -EMSGSIZE;
1989 +
1990 ++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
1991 ++ if (!string_is_valid(lc->name, len))
1992 ++ return -EINVAL;
1993 ++
1994 + if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name))
1995 + return -EMSGSIZE;
1996 +
1997 +@@ -662,6 +693,7 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
1998 + struct nlattr *prop;
1999 + struct nlattr *bearer;
2000 + struct tipc_link_config *lc;
2001 ++ int len;
2002 +
2003 + lc = (struct tipc_link_config *)TLV_DATA(msg->req);
2004 +
2005 +@@ -669,6 +701,10 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
2006 + if (!bearer)
2007 + return -EMSGSIZE;
2008 +
2009 ++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
2010 ++ if (!string_is_valid(lc->name, len))
2011 ++ return -EINVAL;
2012 ++
2013 + if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name))
2014 + return -EMSGSIZE;
2015 +
2016 +@@ -717,9 +753,14 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
2017 + struct tipc_link_config *lc;
2018 + struct tipc_bearer *bearer;
2019 + struct tipc_media *media;
2020 ++ int len;
2021 +
2022 + lc = (struct tipc_link_config *)TLV_DATA(msg->req);
2023 +
2024 ++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
2025 ++ if (!string_is_valid(lc->name, len))
2026 ++ return -EINVAL;
2027 ++
2028 + media = tipc_media_find(lc->name);
2029 + if (media) {
2030 + cmd->doit = &tipc_nl_media_set;
2031 +@@ -741,6 +782,7 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
2032 + {
2033 + char *name;
2034 + struct nlattr *link;
2035 ++ int len;
2036 +
2037 + name = (char *)TLV_DATA(msg->req);
2038 +
2039 +@@ -748,6 +790,10 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
2040 + if (!link)
2041 + return -EMSGSIZE;
2042 +
2043 ++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
2044 ++ if (!string_is_valid(name, len))
2045 ++ return -EINVAL;
2046 ++
2047 + if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name))
2048 + return -EMSGSIZE;
2049 +
2050 +@@ -769,6 +815,8 @@ static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg)
2051 + };
2052 +
2053 + ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
2054 ++ if (TLV_GET_DATA_LEN(msg->req) < sizeof(struct tipc_name_table_query))
2055 ++ return -EINVAL;
2056 +
2057 + depth = ntohl(ntq->depth);
2058 +
2059 +@@ -1192,7 +1240,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
2060 + }
2061 +
2062 + len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
2063 +- if (len && !TLV_OK(msg.req, len)) {
2064 ++ if (!len || !TLV_OK(msg.req, len)) {
2065 + msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
2066 + err = -EOPNOTSUPP;
2067 + goto send;
2068 +diff --git a/security/security.c b/security/security.c
2069 +index 95a1a0f52880..4fbe4e495c02 100644
2070 +--- a/security/security.c
2071 ++++ b/security/security.c
2072 +@@ -993,6 +993,13 @@ int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
2073 +
2074 + void security_cred_free(struct cred *cred)
2075 + {
2076 ++ /*
2077 ++ * There is a failure case in prepare_creds() that
2078 ++ * may result in a call here with ->security being NULL.
2079 ++ */
2080 ++ if (unlikely(cred->security == NULL))
2081 ++ return;
2082 ++
2083 + call_void_hook(cred_free, cred);
2084 + }
2085 +
2086 +diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
2087 +index ffeb644bfecd..524068d71bc1 100644
2088 +--- a/security/selinux/ss/policydb.c
2089 ++++ b/security/selinux/ss/policydb.c
2090 +@@ -730,7 +730,8 @@ static int sens_destroy(void *key, void *datum, void *p)
2091 + kfree(key);
2092 + if (datum) {
2093 + levdatum = datum;
2094 +- ebitmap_destroy(&levdatum->level->cat);
2095 ++ if (levdatum->level)
2096 ++ ebitmap_destroy(&levdatum->level->cat);
2097 + kfree(levdatum->level);
2098 + }
2099 + kfree(datum);
2100 +diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
2101 +index 8298e094f4f7..7d5541c6a225 100644
2102 +--- a/security/yama/yama_lsm.c
2103 ++++ b/security/yama/yama_lsm.c
2104 +@@ -373,7 +373,9 @@ static int yama_ptrace_access_check(struct task_struct *child,
2105 + break;
2106 + case YAMA_SCOPE_RELATIONAL:
2107 + rcu_read_lock();
2108 +- if (!task_is_descendant(current, child) &&
2109 ++ if (!pid_alive(child))
2110 ++ rc = -EPERM;
2111 ++ if (!rc && !task_is_descendant(current, child) &&
2112 + !ptracer_exception_found(current, child) &&
2113 + !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
2114 + rc = -EPERM;