Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Tue, 22 Jan 2019 23:06:49
Message-Id: 1548198370.4c1637502cb24d5975265fabab265c23bb5cbb3e.mpagano@gentoo
1 commit: 4c1637502cb24d5975265fabab265c23bb5cbb3e
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Jan 22 23:06:10 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Jan 22 23:06:10 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4c163750
7
8 proj/linux-patches: Linux patch 4.19.17
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1016_linux-4.19.17.patch | 3869 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 3873 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 3ddd86b..f7061f2 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -107,6 +107,10 @@ Patch: 1015_linux-4.19.16.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.19.16
23
24 +Patch: 1016_linux-4.19.17.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.19.17
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1016_linux-4.19.17.patch b/1016_linux-4.19.17.patch
33 new file mode 100644
34 index 0000000..592d0e3
35 --- /dev/null
36 +++ b/1016_linux-4.19.17.patch
37 @@ -0,0 +1,3869 @@
38 +diff --git a/Makefile b/Makefile
39 +index e8cb4875b86d..4b0bce87a36b 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 19
46 +-SUBLEVEL = 16
47 ++SUBLEVEL = 17
48 + EXTRAVERSION =
49 + NAME = "People's Front"
50 +
51 +diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
52 +index 176e38d54872..ec0da5b3d7fd 100644
53 +--- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
54 ++++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
55 +@@ -27,6 +27,23 @@
56 + method = "smc";
57 + };
58 +
59 ++ reserved-memory {
60 ++ #address-cells = <2>;
61 ++ #size-cells = <2>;
62 ++ ranges;
63 ++
64 ++ /*
65 ++ * This area matches the mapping done with a
66 ++ * mainline U-Boot, and should be updated by the
67 ++ * bootloader.
68 ++ */
69 ++
70 ++ psci-area@4000000 {
71 ++ reg = <0x0 0x4000000 0x0 0x200000>;
72 ++ no-map;
73 ++ };
74 ++ };
75 ++
76 + ap806 {
77 + #address-cells = <2>;
78 + #size-cells = <2>;
79 +diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
80 +index 95e3fa7ded8b..8b284cbf8162 100644
81 +--- a/arch/arm64/include/asm/kvm_arm.h
82 ++++ b/arch/arm64/include/asm/kvm_arm.h
83 +@@ -24,6 +24,8 @@
84 +
85 + /* Hyp Configuration Register (HCR) bits */
86 + #define HCR_FWB (UL(1) << 46)
87 ++#define HCR_API (UL(1) << 41)
88 ++#define HCR_APK (UL(1) << 40)
89 + #define HCR_TEA (UL(1) << 37)
90 + #define HCR_TERR (UL(1) << 36)
91 + #define HCR_TLOR (UL(1) << 35)
92 +@@ -87,6 +89,7 @@
93 + HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
94 + HCR_FMO | HCR_IMO)
95 + #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
96 ++#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
97 + #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
98 +
99 + /* TCR_EL2 Registers bits */
100 +diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
101 +index b0853069702f..651a06b1980f 100644
102 +--- a/arch/arm64/kernel/head.S
103 ++++ b/arch/arm64/kernel/head.S
104 +@@ -494,10 +494,9 @@ ENTRY(el2_setup)
105 + #endif
106 +
107 + /* Hyp configuration. */
108 +- mov x0, #HCR_RW // 64-bit EL1
109 ++ mov_q x0, HCR_HOST_NVHE_FLAGS
110 + cbz x2, set_hcr
111 +- orr x0, x0, #HCR_TGE // Enable Host Extensions
112 +- orr x0, x0, #HCR_E2H
113 ++ mov_q x0, HCR_HOST_VHE_FLAGS
114 + set_hcr:
115 + msr hcr_el2, x0
116 + isb
117 +diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
118 +index f0e6ab8abe9c..ba6b41790fcd 100644
119 +--- a/arch/arm64/kernel/kaslr.c
120 ++++ b/arch/arm64/kernel/kaslr.c
121 +@@ -14,6 +14,7 @@
122 + #include <linux/sched.h>
123 + #include <linux/types.h>
124 +
125 ++#include <asm/cacheflush.h>
126 + #include <asm/fixmap.h>
127 + #include <asm/kernel-pgtable.h>
128 + #include <asm/memory.h>
129 +@@ -43,7 +44,7 @@ static __init u64 get_kaslr_seed(void *fdt)
130 + return ret;
131 + }
132 +
133 +-static __init const u8 *get_cmdline(void *fdt)
134 ++static __init const u8 *kaslr_get_cmdline(void *fdt)
135 + {
136 + static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE;
137 +
138 +@@ -109,7 +110,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
139 + * Check if 'nokaslr' appears on the command line, and
140 + * return 0 if that is the case.
141 + */
142 +- cmdline = get_cmdline(fdt);
143 ++ cmdline = kaslr_get_cmdline(fdt);
144 + str = strstr(cmdline, "nokaslr");
145 + if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
146 + return 0;
147 +@@ -169,5 +170,8 @@ u64 __init kaslr_early_init(u64 dt_phys)
148 + module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
149 + module_alloc_base &= PAGE_MASK;
150 +
151 ++ __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
152 ++ __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed));
153 ++
154 + return offset;
155 + }
156 +diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
157 +index ca46153d7915..a1c32c1f2267 100644
158 +--- a/arch/arm64/kvm/hyp/switch.c
159 ++++ b/arch/arm64/kvm/hyp/switch.c
160 +@@ -157,7 +157,7 @@ static void __hyp_text __deactivate_traps_nvhe(void)
161 + mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
162 +
163 + write_sysreg(mdcr_el2, mdcr_el2);
164 +- write_sysreg(HCR_RW, hcr_el2);
165 ++ write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
166 + write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
167 + }
168 +
169 +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
170 +index 35511999156a..154b811d5894 100644
171 +--- a/arch/mips/Kconfig
172 ++++ b/arch/mips/Kconfig
173 +@@ -3149,6 +3149,7 @@ config MIPS32_O32
174 + config MIPS32_N32
175 + bool "Kernel support for n32 binaries"
176 + depends on 64BIT
177 ++ select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
178 + select COMPAT
179 + select MIPS32_COMPAT
180 + select SYSVIPC_COMPAT if SYSVIPC
181 +diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
182 +index 6054d49e608e..fe3773539eff 100644
183 +--- a/arch/mips/bcm47xx/setup.c
184 ++++ b/arch/mips/bcm47xx/setup.c
185 +@@ -173,6 +173,31 @@ void __init plat_mem_setup(void)
186 + pm_power_off = bcm47xx_machine_halt;
187 + }
188 +
189 ++#ifdef CONFIG_BCM47XX_BCMA
190 ++static struct device * __init bcm47xx_setup_device(void)
191 ++{
192 ++ struct device *dev;
193 ++ int err;
194 ++
195 ++ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
196 ++ if (!dev)
197 ++ return NULL;
198 ++
199 ++ err = dev_set_name(dev, "bcm47xx_soc");
200 ++ if (err) {
201 ++ pr_err("Failed to set SoC device name: %d\n", err);
202 ++ kfree(dev);
203 ++ return NULL;
204 ++ }
205 ++
206 ++ err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
207 ++ if (err)
208 ++ pr_err("Failed to set SoC DMA mask: %d\n", err);
209 ++
210 ++ return dev;
211 ++}
212 ++#endif
213 ++
214 + /*
215 + * This finishes bus initialization doing things that were not possible without
216 + * kmalloc. Make sure to call it late enough (after mm_init).
217 +@@ -183,6 +208,10 @@ void __init bcm47xx_bus_setup(void)
218 + if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) {
219 + int err;
220 +
221 ++ bcm47xx_bus.bcma.dev = bcm47xx_setup_device();
222 ++ if (!bcm47xx_bus.bcma.dev)
223 ++ panic("Failed to setup SoC device\n");
224 ++
225 + err = bcma_host_soc_init(&bcm47xx_bus.bcma);
226 + if (err)
227 + panic("Failed to initialize BCMA bus (err %d)", err);
228 +@@ -235,6 +264,8 @@ static int __init bcm47xx_register_bus_complete(void)
229 + #endif
230 + #ifdef CONFIG_BCM47XX_BCMA
231 + case BCM47XX_BUS_TYPE_BCMA:
232 ++ if (device_register(bcm47xx_bus.bcma.dev))
233 ++ pr_err("Failed to register SoC device\n");
234 + bcma_bus_register(&bcm47xx_bus.bcma.bus);
235 + break;
236 + #endif
237 +diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
238 +index f0bc3312ed11..c4ef1c31e0c4 100644
239 +--- a/arch/mips/lantiq/irq.c
240 ++++ b/arch/mips/lantiq/irq.c
241 +@@ -224,9 +224,11 @@ static struct irq_chip ltq_eiu_type = {
242 + .irq_set_type = ltq_eiu_settype,
243 + };
244 +
245 +-static void ltq_hw_irqdispatch(int module)
246 ++static void ltq_hw_irq_handler(struct irq_desc *desc)
247 + {
248 ++ int module = irq_desc_get_irq(desc) - 2;
249 + u32 irq;
250 ++ int hwirq;
251 +
252 + irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR);
253 + if (irq == 0)
254 +@@ -237,7 +239,8 @@ static void ltq_hw_irqdispatch(int module)
255 + * other bits might be bogus
256 + */
257 + irq = __fls(irq);
258 +- do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module));
259 ++ hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module);
260 ++ generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
261 +
262 + /* if this is a EBU irq, we need to ack it or get a deadlock */
263 + if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
264 +@@ -245,49 +248,6 @@ static void ltq_hw_irqdispatch(int module)
265 + LTQ_EBU_PCC_ISTAT);
266 + }
267 +
268 +-#define DEFINE_HWx_IRQDISPATCH(x) \
269 +- static void ltq_hw ## x ## _irqdispatch(void) \
270 +- { \
271 +- ltq_hw_irqdispatch(x); \
272 +- }
273 +-DEFINE_HWx_IRQDISPATCH(0)
274 +-DEFINE_HWx_IRQDISPATCH(1)
275 +-DEFINE_HWx_IRQDISPATCH(2)
276 +-DEFINE_HWx_IRQDISPATCH(3)
277 +-DEFINE_HWx_IRQDISPATCH(4)
278 +-
279 +-#if MIPS_CPU_TIMER_IRQ == 7
280 +-static void ltq_hw5_irqdispatch(void)
281 +-{
282 +- do_IRQ(MIPS_CPU_TIMER_IRQ);
283 +-}
284 +-#else
285 +-DEFINE_HWx_IRQDISPATCH(5)
286 +-#endif
287 +-
288 +-static void ltq_hw_irq_handler(struct irq_desc *desc)
289 +-{
290 +- ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2);
291 +-}
292 +-
293 +-asmlinkage void plat_irq_dispatch(void)
294 +-{
295 +- unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
296 +- int irq;
297 +-
298 +- if (!pending) {
299 +- spurious_interrupt();
300 +- return;
301 +- }
302 +-
303 +- pending >>= CAUSEB_IP;
304 +- while (pending) {
305 +- irq = fls(pending) - 1;
306 +- do_IRQ(MIPS_CPU_IRQ_BASE + irq);
307 +- pending &= ~BIT(irq);
308 +- }
309 +-}
310 +-
311 + static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
312 + {
313 + struct irq_chip *chip = &ltq_irq_type;
314 +@@ -343,28 +303,10 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
315 + for (i = 0; i < MAX_IM; i++)
316 + irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
317 +
318 +- if (cpu_has_vint) {
319 +- pr_info("Setting up vectored interrupts\n");
320 +- set_vi_handler(2, ltq_hw0_irqdispatch);
321 +- set_vi_handler(3, ltq_hw1_irqdispatch);
322 +- set_vi_handler(4, ltq_hw2_irqdispatch);
323 +- set_vi_handler(5, ltq_hw3_irqdispatch);
324 +- set_vi_handler(6, ltq_hw4_irqdispatch);
325 +- set_vi_handler(7, ltq_hw5_irqdispatch);
326 +- }
327 +-
328 + ltq_domain = irq_domain_add_linear(node,
329 + (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
330 + &irq_domain_ops, 0);
331 +
332 +-#ifndef CONFIG_MIPS_MT_SMP
333 +- set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
334 +- IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
335 +-#else
336 +- set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 |
337 +- IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
338 +-#endif
339 +-
340 + /* tell oprofile which irq to use */
341 + ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
342 +
343 +diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
344 +index 2a5bb849b10e..288b58b00dc8 100644
345 +--- a/arch/mips/pci/msi-octeon.c
346 ++++ b/arch/mips/pci/msi-octeon.c
347 +@@ -369,7 +369,9 @@ int __init octeon_msi_initialize(void)
348 + int irq;
349 + struct irq_chip *msi;
350 +
351 +- if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
352 ++ if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) {
353 ++ return 0;
354 ++ } else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
355 + msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0;
356 + msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1;
357 + msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2;
358 +diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
359 +index c84f1e039d84..01dcccf9185f 100644
360 +--- a/arch/x86/xen/time.c
361 ++++ b/arch/x86/xen/time.c
362 +@@ -361,8 +361,6 @@ void xen_timer_resume(void)
363 + {
364 + int cpu;
365 +
366 +- pvclock_resume();
367 +-
368 + if (xen_clockevent != &xen_vcpuop_clockevent)
369 + return;
370 +
371 +@@ -379,12 +377,15 @@ static const struct pv_time_ops xen_time_ops __initconst = {
372 + };
373 +
374 + static struct pvclock_vsyscall_time_info *xen_clock __read_mostly;
375 ++static u64 xen_clock_value_saved;
376 +
377 + void xen_save_time_memory_area(void)
378 + {
379 + struct vcpu_register_time_memory_area t;
380 + int ret;
381 +
382 ++ xen_clock_value_saved = xen_clocksource_read() - xen_sched_clock_offset;
383 ++
384 + if (!xen_clock)
385 + return;
386 +
387 +@@ -404,7 +405,7 @@ void xen_restore_time_memory_area(void)
388 + int ret;
389 +
390 + if (!xen_clock)
391 +- return;
392 ++ goto out;
393 +
394 + t.addr.v = &xen_clock->pvti;
395 +
396 +@@ -421,6 +422,11 @@ void xen_restore_time_memory_area(void)
397 + if (ret != 0)
398 + pr_notice("Cannot restore secondary vcpu_time_info (err %d)",
399 + ret);
400 ++
401 ++out:
402 ++ /* Need pvclock_resume() before using xen_clocksource_read(). */
403 ++ pvclock_resume();
404 ++ xen_sched_clock_offset = xen_clocksource_read() - xen_clock_value_saved;
405 + }
406 +
407 + static void xen_setup_vsyscall_time_info(void)
408 +diff --git a/block/partition-generic.c b/block/partition-generic.c
409 +index d3d14e81fb12..5f8db5c5140f 100644
410 +--- a/block/partition-generic.c
411 ++++ b/block/partition-generic.c
412 +@@ -249,9 +249,10 @@ struct device_type part_type = {
413 + .uevent = part_uevent,
414 + };
415 +
416 +-static void delete_partition_rcu_cb(struct rcu_head *head)
417 ++static void delete_partition_work_fn(struct work_struct *work)
418 + {
419 +- struct hd_struct *part = container_of(head, struct hd_struct, rcu_head);
420 ++ struct hd_struct *part = container_of(to_rcu_work(work), struct hd_struct,
421 ++ rcu_work);
422 +
423 + part->start_sect = 0;
424 + part->nr_sects = 0;
425 +@@ -262,7 +263,8 @@ static void delete_partition_rcu_cb(struct rcu_head *head)
426 + void __delete_partition(struct percpu_ref *ref)
427 + {
428 + struct hd_struct *part = container_of(ref, struct hd_struct, ref);
429 +- call_rcu(&part->rcu_head, delete_partition_rcu_cb);
430 ++ INIT_RCU_WORK(&part->rcu_work, delete_partition_work_fn);
431 ++ queue_rcu_work(system_wq, &part->rcu_work);
432 + }
433 +
434 + /*
435 +diff --git a/crypto/authenc.c b/crypto/authenc.c
436 +index 4fa8d40d947b..3ee10fc25aff 100644
437 +--- a/crypto/authenc.c
438 ++++ b/crypto/authenc.c
439 +@@ -58,14 +58,22 @@ int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
440 + return -EINVAL;
441 + if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
442 + return -EINVAL;
443 +- if (RTA_PAYLOAD(rta) < sizeof(*param))
444 ++
445 ++ /*
446 ++ * RTA_OK() didn't align the rtattr's payload when validating that it
447 ++ * fits in the buffer. Yet, the keys should start on the next 4-byte
448 ++ * aligned boundary. To avoid confusion, require that the rtattr
449 ++ * payload be exactly the param struct, which has a 4-byte aligned size.
450 ++ */
451 ++ if (RTA_PAYLOAD(rta) != sizeof(*param))
452 + return -EINVAL;
453 ++ BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO);
454 +
455 + param = RTA_DATA(rta);
456 + keys->enckeylen = be32_to_cpu(param->enckeylen);
457 +
458 +- key += RTA_ALIGN(rta->rta_len);
459 +- keylen -= RTA_ALIGN(rta->rta_len);
460 ++ key += rta->rta_len;
461 ++ keylen -= rta->rta_len;
462 +
463 + if (keylen < keys->enckeylen)
464 + return -EINVAL;
465 +diff --git a/crypto/authencesn.c b/crypto/authencesn.c
466 +index 50b804747e20..4eff4be6bd12 100644
467 +--- a/crypto/authencesn.c
468 ++++ b/crypto/authencesn.c
469 +@@ -279,7 +279,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
470 + struct aead_request *req = areq->data;
471 +
472 + err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
473 +- aead_request_complete(req, err);
474 ++ authenc_esn_request_complete(req, err);
475 + }
476 +
477 + static int crypto_authenc_esn_decrypt(struct aead_request *req)
478 +diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c
479 +index 9a5c60f08aad..c0cf87ae7ef6 100644
480 +--- a/crypto/sm3_generic.c
481 ++++ b/crypto/sm3_generic.c
482 +@@ -100,7 +100,7 @@ static void sm3_compress(u32 *w, u32 *wt, u32 *m)
483 +
484 + for (i = 0; i <= 63; i++) {
485 +
486 +- ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7);
487 ++ ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7);
488 +
489 + ss2 = ss1 ^ rol32(a, 12);
490 +
491 +diff --git a/drivers/block/loop.c b/drivers/block/loop.c
492 +index ea9debf59b22..c9c2bcc36e26 100644
493 +--- a/drivers/block/loop.c
494 ++++ b/drivers/block/loop.c
495 +@@ -83,7 +83,7 @@
496 + #include <linux/uaccess.h>
497 +
498 + static DEFINE_IDR(loop_index_idr);
499 +-static DEFINE_MUTEX(loop_index_mutex);
500 ++static DEFINE_MUTEX(loop_ctl_mutex);
501 +
502 + static int max_part;
503 + static int part_shift;
504 +@@ -631,18 +631,7 @@ static void loop_reread_partitions(struct loop_device *lo,
505 + {
506 + int rc;
507 +
508 +- /*
509 +- * bd_mutex has been held already in release path, so don't
510 +- * acquire it if this function is called in such case.
511 +- *
512 +- * If the reread partition isn't from release path, lo_refcnt
513 +- * must be at least one and it can only become zero when the
514 +- * current holder is released.
515 +- */
516 +- if (!atomic_read(&lo->lo_refcnt))
517 +- rc = __blkdev_reread_part(bdev);
518 +- else
519 +- rc = blkdev_reread_part(bdev);
520 ++ rc = blkdev_reread_part(bdev);
521 + if (rc)
522 + pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n",
523 + __func__, lo->lo_number, lo->lo_file_name, rc);
524 +@@ -689,26 +678,30 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
525 + static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
526 + unsigned int arg)
527 + {
528 +- struct file *file, *old_file;
529 ++ struct file *file = NULL, *old_file;
530 + int error;
531 ++ bool partscan;
532 +
533 ++ error = mutex_lock_killable(&loop_ctl_mutex);
534 ++ if (error)
535 ++ return error;
536 + error = -ENXIO;
537 + if (lo->lo_state != Lo_bound)
538 +- goto out;
539 ++ goto out_err;
540 +
541 + /* the loop device has to be read-only */
542 + error = -EINVAL;
543 + if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
544 +- goto out;
545 ++ goto out_err;
546 +
547 + error = -EBADF;
548 + file = fget(arg);
549 + if (!file)
550 +- goto out;
551 ++ goto out_err;
552 +
553 + error = loop_validate_file(file, bdev);
554 + if (error)
555 +- goto out_putf;
556 ++ goto out_err;
557 +
558 + old_file = lo->lo_backing_file;
559 +
560 +@@ -716,7 +709,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
561 +
562 + /* size of the new backing store needs to be the same */
563 + if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
564 +- goto out_putf;
565 ++ goto out_err;
566 +
567 + /* and ... switch */
568 + blk_mq_freeze_queue(lo->lo_queue);
569 +@@ -727,15 +720,22 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
570 + lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
571 + loop_update_dio(lo);
572 + blk_mq_unfreeze_queue(lo->lo_queue);
573 +-
574 ++ partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
575 ++ mutex_unlock(&loop_ctl_mutex);
576 ++ /*
577 ++ * We must drop file reference outside of loop_ctl_mutex as dropping
578 ++ * the file ref can take bd_mutex which creates circular locking
579 ++ * dependency.
580 ++ */
581 + fput(old_file);
582 +- if (lo->lo_flags & LO_FLAGS_PARTSCAN)
583 ++ if (partscan)
584 + loop_reread_partitions(lo, bdev);
585 + return 0;
586 +
587 +- out_putf:
588 +- fput(file);
589 +- out:
590 ++out_err:
591 ++ mutex_unlock(&loop_ctl_mutex);
592 ++ if (file)
593 ++ fput(file);
594 + return error;
595 + }
596 +
597 +@@ -910,6 +910,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
598 + int lo_flags = 0;
599 + int error;
600 + loff_t size;
601 ++ bool partscan;
602 +
603 + /* This is safe, since we have a reference from open(). */
604 + __module_get(THIS_MODULE);
605 +@@ -919,13 +920,17 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
606 + if (!file)
607 + goto out;
608 +
609 ++ error = mutex_lock_killable(&loop_ctl_mutex);
610 ++ if (error)
611 ++ goto out_putf;
612 ++
613 + error = -EBUSY;
614 + if (lo->lo_state != Lo_unbound)
615 +- goto out_putf;
616 ++ goto out_unlock;
617 +
618 + error = loop_validate_file(file, bdev);
619 + if (error)
620 +- goto out_putf;
621 ++ goto out_unlock;
622 +
623 + mapping = file->f_mapping;
624 + inode = mapping->host;
625 +@@ -937,10 +942,10 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
626 + error = -EFBIG;
627 + size = get_loop_size(lo, file);
628 + if ((loff_t)(sector_t)size != size)
629 +- goto out_putf;
630 ++ goto out_unlock;
631 + error = loop_prepare_queue(lo);
632 + if (error)
633 +- goto out_putf;
634 ++ goto out_unlock;
635 +
636 + error = 0;
637 +
638 +@@ -972,18 +977,22 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
639 + lo->lo_state = Lo_bound;
640 + if (part_shift)
641 + lo->lo_flags |= LO_FLAGS_PARTSCAN;
642 +- if (lo->lo_flags & LO_FLAGS_PARTSCAN)
643 +- loop_reread_partitions(lo, bdev);
644 ++ partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
645 +
646 + /* Grab the block_device to prevent its destruction after we
647 +- * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev).
648 ++ * put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev).
649 + */
650 + bdgrab(bdev);
651 ++ mutex_unlock(&loop_ctl_mutex);
652 ++ if (partscan)
653 ++ loop_reread_partitions(lo, bdev);
654 + return 0;
655 +
656 +- out_putf:
657 ++out_unlock:
658 ++ mutex_unlock(&loop_ctl_mutex);
659 ++out_putf:
660 + fput(file);
661 +- out:
662 ++out:
663 + /* This is safe: open() is still holding a reference. */
664 + module_put(THIS_MODULE);
665 + return error;
666 +@@ -1026,39 +1035,31 @@ loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
667 + return err;
668 + }
669 +
670 +-static int loop_clr_fd(struct loop_device *lo)
671 ++static int __loop_clr_fd(struct loop_device *lo, bool release)
672 + {
673 +- struct file *filp = lo->lo_backing_file;
674 ++ struct file *filp = NULL;
675 + gfp_t gfp = lo->old_gfp_mask;
676 + struct block_device *bdev = lo->lo_device;
677 ++ int err = 0;
678 ++ bool partscan = false;
679 ++ int lo_number;
680 +
681 +- if (lo->lo_state != Lo_bound)
682 +- return -ENXIO;
683 +-
684 +- /*
685 +- * If we've explicitly asked to tear down the loop device,
686 +- * and it has an elevated reference count, set it for auto-teardown when
687 +- * the last reference goes away. This stops $!~#$@ udev from
688 +- * preventing teardown because it decided that it needs to run blkid on
689 +- * the loopback device whenever they appear. xfstests is notorious for
690 +- * failing tests because blkid via udev races with a losetup
691 +- * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
692 +- * command to fail with EBUSY.
693 +- */
694 +- if (atomic_read(&lo->lo_refcnt) > 1) {
695 +- lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
696 +- mutex_unlock(&lo->lo_ctl_mutex);
697 +- return 0;
698 ++ mutex_lock(&loop_ctl_mutex);
699 ++ if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) {
700 ++ err = -ENXIO;
701 ++ goto out_unlock;
702 + }
703 +
704 +- if (filp == NULL)
705 +- return -EINVAL;
706 ++ filp = lo->lo_backing_file;
707 ++ if (filp == NULL) {
708 ++ err = -EINVAL;
709 ++ goto out_unlock;
710 ++ }
711 +
712 + /* freeze request queue during the transition */
713 + blk_mq_freeze_queue(lo->lo_queue);
714 +
715 + spin_lock_irq(&lo->lo_lock);
716 +- lo->lo_state = Lo_rundown;
717 + lo->lo_backing_file = NULL;
718 + spin_unlock_irq(&lo->lo_lock);
719 +
720 +@@ -1094,21 +1095,73 @@ static int loop_clr_fd(struct loop_device *lo)
721 + module_put(THIS_MODULE);
722 + blk_mq_unfreeze_queue(lo->lo_queue);
723 +
724 +- if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev)
725 +- loop_reread_partitions(lo, bdev);
726 ++ partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev;
727 ++ lo_number = lo->lo_number;
728 + lo->lo_flags = 0;
729 + if (!part_shift)
730 + lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
731 + loop_unprepare_queue(lo);
732 +- mutex_unlock(&lo->lo_ctl_mutex);
733 ++out_unlock:
734 ++ mutex_unlock(&loop_ctl_mutex);
735 ++ if (partscan) {
736 ++ /*
737 ++ * bd_mutex has been held already in release path, so don't
738 ++ * acquire it if this function is called in such case.
739 ++ *
740 ++ * If the reread partition isn't from release path, lo_refcnt
741 ++ * must be at least one and it can only become zero when the
742 ++ * current holder is released.
743 ++ */
744 ++ if (release)
745 ++ err = __blkdev_reread_part(bdev);
746 ++ else
747 ++ err = blkdev_reread_part(bdev);
748 ++ pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
749 ++ __func__, lo_number, err);
750 ++ /* Device is gone, no point in returning error */
751 ++ err = 0;
752 ++ }
753 + /*
754 +- * Need not hold lo_ctl_mutex to fput backing file.
755 +- * Calling fput holding lo_ctl_mutex triggers a circular
756 ++ * Need not hold loop_ctl_mutex to fput backing file.
757 ++ * Calling fput holding loop_ctl_mutex triggers a circular
758 + * lock dependency possibility warning as fput can take
759 +- * bd_mutex which is usually taken before lo_ctl_mutex.
760 ++ * bd_mutex which is usually taken before loop_ctl_mutex.
761 + */
762 +- fput(filp);
763 +- return 0;
764 ++ if (filp)
765 ++ fput(filp);
766 ++ return err;
767 ++}
768 ++
769 ++static int loop_clr_fd(struct loop_device *lo)
770 ++{
771 ++ int err;
772 ++
773 ++ err = mutex_lock_killable(&loop_ctl_mutex);
774 ++ if (err)
775 ++ return err;
776 ++ if (lo->lo_state != Lo_bound) {
777 ++ mutex_unlock(&loop_ctl_mutex);
778 ++ return -ENXIO;
779 ++ }
780 ++ /*
781 ++ * If we've explicitly asked to tear down the loop device,
782 ++ * and it has an elevated reference count, set it for auto-teardown when
783 ++ * the last reference goes away. This stops $!~#$@ udev from
784 ++ * preventing teardown because it decided that it needs to run blkid on
785 ++ * the loopback device whenever they appear. xfstests is notorious for
786 ++ * failing tests because blkid via udev races with a losetup
787 ++ * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
788 ++ * command to fail with EBUSY.
789 ++ */
790 ++ if (atomic_read(&lo->lo_refcnt) > 1) {
791 ++ lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
792 ++ mutex_unlock(&loop_ctl_mutex);
793 ++ return 0;
794 ++ }
795 ++ lo->lo_state = Lo_rundown;
796 ++ mutex_unlock(&loop_ctl_mutex);
797 ++
798 ++ return __loop_clr_fd(lo, false);
799 + }
800 +
801 + static int
802 +@@ -1117,47 +1170,72 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
803 + int err;
804 + struct loop_func_table *xfer;
805 + kuid_t uid = current_uid();
806 ++ struct block_device *bdev;
807 ++ bool partscan = false;
808 +
809 ++ err = mutex_lock_killable(&loop_ctl_mutex);
810 ++ if (err)
811 ++ return err;
812 + if (lo->lo_encrypt_key_size &&
813 + !uid_eq(lo->lo_key_owner, uid) &&
814 +- !capable(CAP_SYS_ADMIN))
815 +- return -EPERM;
816 +- if (lo->lo_state != Lo_bound)
817 +- return -ENXIO;
818 +- if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
819 +- return -EINVAL;
820 ++ !capable(CAP_SYS_ADMIN)) {
821 ++ err = -EPERM;
822 ++ goto out_unlock;
823 ++ }
824 ++ if (lo->lo_state != Lo_bound) {
825 ++ err = -ENXIO;
826 ++ goto out_unlock;
827 ++ }
828 ++ if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) {
829 ++ err = -EINVAL;
830 ++ goto out_unlock;
831 ++ }
832 ++
833 ++ if (lo->lo_offset != info->lo_offset ||
834 ++ lo->lo_sizelimit != info->lo_sizelimit) {
835 ++ sync_blockdev(lo->lo_device);
836 ++ kill_bdev(lo->lo_device);
837 ++ }
838 +
839 + /* I/O need to be drained during transfer transition */
840 + blk_mq_freeze_queue(lo->lo_queue);
841 +
842 + err = loop_release_xfer(lo);
843 + if (err)
844 +- goto exit;
845 ++ goto out_unfreeze;
846 +
847 + if (info->lo_encrypt_type) {
848 + unsigned int type = info->lo_encrypt_type;
849 +
850 + if (type >= MAX_LO_CRYPT) {
851 + err = -EINVAL;
852 +- goto exit;
853 ++ goto out_unfreeze;
854 + }
855 + xfer = xfer_funcs[type];
856 + if (xfer == NULL) {
857 + err = -EINVAL;
858 +- goto exit;
859 ++ goto out_unfreeze;
860 + }
861 + } else
862 + xfer = NULL;
863 +
864 + err = loop_init_xfer(lo, xfer, info);
865 + if (err)
866 +- goto exit;
867 ++ goto out_unfreeze;
868 +
869 + if (lo->lo_offset != info->lo_offset ||
870 + lo->lo_sizelimit != info->lo_sizelimit) {
871 ++ /* kill_bdev should have truncated all the pages */
872 ++ if (lo->lo_device->bd_inode->i_mapping->nrpages) {
873 ++ err = -EAGAIN;
874 ++ pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
875 ++ __func__, lo->lo_number, lo->lo_file_name,
876 ++ lo->lo_device->bd_inode->i_mapping->nrpages);
877 ++ goto out_unfreeze;
878 ++ }
879 + if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
880 + err = -EFBIG;
881 +- goto exit;
882 ++ goto out_unfreeze;
883 + }
884 + }
885 +
886 +@@ -1189,15 +1267,20 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
887 + /* update dio if lo_offset or transfer is changed */
888 + __loop_update_dio(lo, lo->use_dio);
889 +
890 +- exit:
891 ++out_unfreeze:
892 + blk_mq_unfreeze_queue(lo->lo_queue);
893 +
894 + if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) &&
895 + !(lo->lo_flags & LO_FLAGS_PARTSCAN)) {
896 + lo->lo_flags |= LO_FLAGS_PARTSCAN;
897 + lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
898 +- loop_reread_partitions(lo, lo->lo_device);
899 ++ bdev = lo->lo_device;
900 ++ partscan = true;
901 + }
902 ++out_unlock:
903 ++ mutex_unlock(&loop_ctl_mutex);
904 ++ if (partscan)
905 ++ loop_reread_partitions(lo, bdev);
906 +
907 + return err;
908 + }
909 +@@ -1205,12 +1288,15 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
910 + static int
911 + loop_get_status(struct loop_device *lo, struct loop_info64 *info)
912 + {
913 +- struct file *file;
914 ++ struct path path;
915 + struct kstat stat;
916 + int ret;
917 +
918 ++ ret = mutex_lock_killable(&loop_ctl_mutex);
919 ++ if (ret)
920 ++ return ret;
921 + if (lo->lo_state != Lo_bound) {
922 +- mutex_unlock(&lo->lo_ctl_mutex);
923 ++ mutex_unlock(&loop_ctl_mutex);
924 + return -ENXIO;
925 + }
926 +
927 +@@ -1229,17 +1315,17 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
928 + lo->lo_encrypt_key_size);
929 + }
930 +
931 +- /* Drop lo_ctl_mutex while we call into the filesystem. */
932 +- file = get_file(lo->lo_backing_file);
933 +- mutex_unlock(&lo->lo_ctl_mutex);
934 +- ret = vfs_getattr(&file->f_path, &stat, STATX_INO,
935 +- AT_STATX_SYNC_AS_STAT);
936 ++ /* Drop loop_ctl_mutex while we call into the filesystem. */
937 ++ path = lo->lo_backing_file->f_path;
938 ++ path_get(&path);
939 ++ mutex_unlock(&loop_ctl_mutex);
940 ++ ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
941 + if (!ret) {
942 + info->lo_device = huge_encode_dev(stat.dev);
943 + info->lo_inode = stat.ino;
944 + info->lo_rdevice = huge_encode_dev(stat.rdev);
945 + }
946 +- fput(file);
947 ++ path_put(&path);
948 + return ret;
949 + }
950 +
951 +@@ -1323,10 +1409,8 @@ loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
952 + struct loop_info64 info64;
953 + int err;
954 +
955 +- if (!arg) {
956 +- mutex_unlock(&lo->lo_ctl_mutex);
957 ++ if (!arg)
958 + return -EINVAL;
959 +- }
960 + err = loop_get_status(lo, &info64);
961 + if (!err)
962 + err = loop_info64_to_old(&info64, &info);
963 +@@ -1341,10 +1425,8 @@ loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
964 + struct loop_info64 info64;
965 + int err;
966 +
967 +- if (!arg) {
968 +- mutex_unlock(&lo->lo_ctl_mutex);
969 ++ if (!arg)
970 + return -EINVAL;
971 +- }
972 + err = loop_get_status(lo, &info64);
973 + if (!err && copy_to_user(arg, &info64, sizeof(info64)))
974 + err = -EFAULT;
975 +@@ -1376,22 +1458,64 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg)
976 +
977 + static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
978 + {
979 ++ int err = 0;
980 ++
981 + if (lo->lo_state != Lo_bound)
982 + return -ENXIO;
983 +
984 + if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg))
985 + return -EINVAL;
986 +
987 ++ if (lo->lo_queue->limits.logical_block_size != arg) {
988 ++ sync_blockdev(lo->lo_device);
989 ++ kill_bdev(lo->lo_device);
990 ++ }
991 ++
992 + blk_mq_freeze_queue(lo->lo_queue);
993 +
994 ++ /* kill_bdev should have truncated all the pages */
995 ++ if (lo->lo_queue->limits.logical_block_size != arg &&
996 ++ lo->lo_device->bd_inode->i_mapping->nrpages) {
997 ++ err = -EAGAIN;
998 ++ pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
999 ++ __func__, lo->lo_number, lo->lo_file_name,
1000 ++ lo->lo_device->bd_inode->i_mapping->nrpages);
1001 ++ goto out_unfreeze;
1002 ++ }
1003 ++
1004 + blk_queue_logical_block_size(lo->lo_queue, arg);
1005 + blk_queue_physical_block_size(lo->lo_queue, arg);
1006 + blk_queue_io_min(lo->lo_queue, arg);
1007 + loop_update_dio(lo);
1008 +-
1009 ++out_unfreeze:
1010 + blk_mq_unfreeze_queue(lo->lo_queue);
1011 +
1012 +- return 0;
1013 ++ return err;
1014 ++}
1015 ++
1016 ++static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
1017 ++ unsigned long arg)
1018 ++{
1019 ++ int err;
1020 ++
1021 ++ err = mutex_lock_killable(&loop_ctl_mutex);
1022 ++ if (err)
1023 ++ return err;
1024 ++ switch (cmd) {
1025 ++ case LOOP_SET_CAPACITY:
1026 ++ err = loop_set_capacity(lo);
1027 ++ break;
1028 ++ case LOOP_SET_DIRECT_IO:
1029 ++ err = loop_set_dio(lo, arg);
1030 ++ break;
1031 ++ case LOOP_SET_BLOCK_SIZE:
1032 ++ err = loop_set_block_size(lo, arg);
1033 ++ break;
1034 ++ default:
1035 ++ err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
1036 ++ }
1037 ++ mutex_unlock(&loop_ctl_mutex);
1038 ++ return err;
1039 + }
1040 +
1041 + static int lo_ioctl(struct block_device *bdev, fmode_t mode,
1042 +@@ -1400,64 +1524,42 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
1043 + struct loop_device *lo = bdev->bd_disk->private_data;
1044 + int err;
1045 +
1046 +- err = mutex_lock_killable_nested(&lo->lo_ctl_mutex, 1);
1047 +- if (err)
1048 +- goto out_unlocked;
1049 +-
1050 + switch (cmd) {
1051 + case LOOP_SET_FD:
1052 +- err = loop_set_fd(lo, mode, bdev, arg);
1053 +- break;
1054 ++ return loop_set_fd(lo, mode, bdev, arg);
1055 + case LOOP_CHANGE_FD:
1056 +- err = loop_change_fd(lo, bdev, arg);
1057 +- break;
1058 ++ return loop_change_fd(lo, bdev, arg);
1059 + case LOOP_CLR_FD:
1060 +- /* loop_clr_fd would have unlocked lo_ctl_mutex on success */
1061 +- err = loop_clr_fd(lo);
1062 +- if (!err)
1063 +- goto out_unlocked;
1064 +- break;
1065 ++ return loop_clr_fd(lo);
1066 + case LOOP_SET_STATUS:
1067 + err = -EPERM;
1068 +- if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1069 ++ if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
1070 + err = loop_set_status_old(lo,
1071 + (struct loop_info __user *)arg);
1072 ++ }
1073 + break;
1074 + case LOOP_GET_STATUS:
1075 +- err = loop_get_status_old(lo, (struct loop_info __user *) arg);
1076 +- /* loop_get_status() unlocks lo_ctl_mutex */
1077 +- goto out_unlocked;
1078 ++ return loop_get_status_old(lo, (struct loop_info __user *) arg);
1079 + case LOOP_SET_STATUS64:
1080 + err = -EPERM;
1081 +- if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1082 ++ if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
1083 + err = loop_set_status64(lo,
1084 + (struct loop_info64 __user *) arg);
1085 ++ }
1086 + break;
1087 + case LOOP_GET_STATUS64:
1088 +- err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
1089 +- /* loop_get_status() unlocks lo_ctl_mutex */
1090 +- goto out_unlocked;
1091 ++ return loop_get_status64(lo, (struct loop_info64 __user *) arg);
1092 + case LOOP_SET_CAPACITY:
1093 +- err = -EPERM;
1094 +- if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1095 +- err = loop_set_capacity(lo);
1096 +- break;
1097 + case LOOP_SET_DIRECT_IO:
1098 +- err = -EPERM;
1099 +- if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1100 +- err = loop_set_dio(lo, arg);
1101 +- break;
1102 + case LOOP_SET_BLOCK_SIZE:
1103 +- err = -EPERM;
1104 +- if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
1105 +- err = loop_set_block_size(lo, arg);
1106 +- break;
1107 ++ if (!(mode & FMODE_WRITE) && !capable(CAP_SYS_ADMIN))
1108 ++ return -EPERM;
1109 ++ /* Fall through */
1110 + default:
1111 +- err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
1112 ++ err = lo_simple_ioctl(lo, cmd, arg);
1113 ++ break;
1114 + }
1115 +- mutex_unlock(&lo->lo_ctl_mutex);
1116 +
1117 +-out_unlocked:
1118 + return err;
1119 + }
1120 +
1121 +@@ -1571,10 +1673,8 @@ loop_get_status_compat(struct loop_device *lo,
1122 + struct loop_info64 info64;
1123 + int err;
1124 +
1125 +- if (!arg) {
1126 +- mutex_unlock(&lo->lo_ctl_mutex);
1127 ++ if (!arg)
1128 + return -EINVAL;
1129 +- }
1130 + err = loop_get_status(lo, &info64);
1131 + if (!err)
1132 + err = loop_info64_to_compat(&info64, arg);
1133 +@@ -1589,20 +1689,12 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
1134 +
1135 + switch(cmd) {
1136 + case LOOP_SET_STATUS:
1137 +- err = mutex_lock_killable(&lo->lo_ctl_mutex);
1138 +- if (!err) {
1139 +- err = loop_set_status_compat(lo,
1140 +- (const struct compat_loop_info __user *)arg);
1141 +- mutex_unlock(&lo->lo_ctl_mutex);
1142 +- }
1143 ++ err = loop_set_status_compat(lo,
1144 ++ (const struct compat_loop_info __user *)arg);
1145 + break;
1146 + case LOOP_GET_STATUS:
1147 +- err = mutex_lock_killable(&lo->lo_ctl_mutex);
1148 +- if (!err) {
1149 +- err = loop_get_status_compat(lo,
1150 +- (struct compat_loop_info __user *)arg);
1151 +- /* loop_get_status() unlocks lo_ctl_mutex */
1152 +- }
1153 ++ err = loop_get_status_compat(lo,
1154 ++ (struct compat_loop_info __user *)arg);
1155 + break;
1156 + case LOOP_SET_CAPACITY:
1157 + case LOOP_CLR_FD:
1158 +@@ -1626,9 +1718,11 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
1159 + static int lo_open(struct block_device *bdev, fmode_t mode)
1160 + {
1161 + struct loop_device *lo;
1162 +- int err = 0;
1163 ++ int err;
1164 +
1165 +- mutex_lock(&loop_index_mutex);
1166 ++ err = mutex_lock_killable(&loop_ctl_mutex);
1167 ++ if (err)
1168 ++ return err;
1169 + lo = bdev->bd_disk->private_data;
1170 + if (!lo) {
1171 + err = -ENXIO;
1172 +@@ -1637,26 +1731,30 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
1173 +
1174 + atomic_inc(&lo->lo_refcnt);
1175 + out:
1176 +- mutex_unlock(&loop_index_mutex);
1177 ++ mutex_unlock(&loop_ctl_mutex);
1178 + return err;
1179 + }
1180 +
1181 +-static void __lo_release(struct loop_device *lo)
1182 ++static void lo_release(struct gendisk *disk, fmode_t mode)
1183 + {
1184 +- int err;
1185 ++ struct loop_device *lo;
1186 +
1187 ++ mutex_lock(&loop_ctl_mutex);
1188 ++ lo = disk->private_data;
1189 + if (atomic_dec_return(&lo->lo_refcnt))
1190 +- return;
1191 ++ goto out_unlock;
1192 +
1193 +- mutex_lock(&lo->lo_ctl_mutex);
1194 + if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
1195 ++ if (lo->lo_state != Lo_bound)
1196 ++ goto out_unlock;
1197 ++ lo->lo_state = Lo_rundown;
1198 ++ mutex_unlock(&loop_ctl_mutex);
1199 + /*
1200 + * In autoclear mode, stop the loop thread
1201 + * and remove configuration after last close.
1202 + */
1203 +- err = loop_clr_fd(lo);
1204 +- if (!err)
1205 +- return;
1206 ++ __loop_clr_fd(lo, true);
1207 ++ return;
1208 + } else if (lo->lo_state == Lo_bound) {
1209 + /*
1210 + * Otherwise keep thread (if running) and config,
1211 +@@ -1666,14 +1764,8 @@ static void __lo_release(struct loop_device *lo)
1212 + blk_mq_unfreeze_queue(lo->lo_queue);
1213 + }
1214 +
1215 +- mutex_unlock(&lo->lo_ctl_mutex);
1216 +-}
1217 +-
1218 +-static void lo_release(struct gendisk *disk, fmode_t mode)
1219 +-{
1220 +- mutex_lock(&loop_index_mutex);
1221 +- __lo_release(disk->private_data);
1222 +- mutex_unlock(&loop_index_mutex);
1223 ++out_unlock:
1224 ++ mutex_unlock(&loop_ctl_mutex);
1225 + }
1226 +
1227 + static const struct block_device_operations lo_fops = {
1228 +@@ -1712,10 +1804,10 @@ static int unregister_transfer_cb(int id, void *ptr, void *data)
1229 + struct loop_device *lo = ptr;
1230 + struct loop_func_table *xfer = data;
1231 +
1232 +- mutex_lock(&lo->lo_ctl_mutex);
1233 ++ mutex_lock(&loop_ctl_mutex);
1234 + if (lo->lo_encryption == xfer)
1235 + loop_release_xfer(lo);
1236 +- mutex_unlock(&lo->lo_ctl_mutex);
1237 ++ mutex_unlock(&loop_ctl_mutex);
1238 + return 0;
1239 + }
1240 +
1241 +@@ -1896,7 +1988,6 @@ static int loop_add(struct loop_device **l, int i)
1242 + if (!part_shift)
1243 + disk->flags |= GENHD_FL_NO_PART_SCAN;
1244 + disk->flags |= GENHD_FL_EXT_DEVT;
1245 +- mutex_init(&lo->lo_ctl_mutex);
1246 + atomic_set(&lo->lo_refcnt, 0);
1247 + lo->lo_number = i;
1248 + spin_lock_init(&lo->lo_lock);
1249 +@@ -1975,7 +2066,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
1250 + struct kobject *kobj;
1251 + int err;
1252 +
1253 +- mutex_lock(&loop_index_mutex);
1254 ++ mutex_lock(&loop_ctl_mutex);
1255 + err = loop_lookup(&lo, MINOR(dev) >> part_shift);
1256 + if (err < 0)
1257 + err = loop_add(&lo, MINOR(dev) >> part_shift);
1258 +@@ -1983,7 +2074,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
1259 + kobj = NULL;
1260 + else
1261 + kobj = get_disk_and_module(lo->lo_disk);
1262 +- mutex_unlock(&loop_index_mutex);
1263 ++ mutex_unlock(&loop_ctl_mutex);
1264 +
1265 + *part = 0;
1266 + return kobj;
1267 +@@ -1993,9 +2084,13 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
1268 + unsigned long parm)
1269 + {
1270 + struct loop_device *lo;
1271 +- int ret = -ENOSYS;
1272 ++ int ret;
1273 ++
1274 ++ ret = mutex_lock_killable(&loop_ctl_mutex);
1275 ++ if (ret)
1276 ++ return ret;
1277 +
1278 +- mutex_lock(&loop_index_mutex);
1279 ++ ret = -ENOSYS;
1280 + switch (cmd) {
1281 + case LOOP_CTL_ADD:
1282 + ret = loop_lookup(&lo, parm);
1283 +@@ -2009,21 +2104,15 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
1284 + ret = loop_lookup(&lo, parm);
1285 + if (ret < 0)
1286 + break;
1287 +- ret = mutex_lock_killable(&lo->lo_ctl_mutex);
1288 +- if (ret)
1289 +- break;
1290 + if (lo->lo_state != Lo_unbound) {
1291 + ret = -EBUSY;
1292 +- mutex_unlock(&lo->lo_ctl_mutex);
1293 + break;
1294 + }
1295 + if (atomic_read(&lo->lo_refcnt) > 0) {
1296 + ret = -EBUSY;
1297 +- mutex_unlock(&lo->lo_ctl_mutex);
1298 + break;
1299 + }
1300 + lo->lo_disk->private_data = NULL;
1301 +- mutex_unlock(&lo->lo_ctl_mutex);
1302 + idr_remove(&loop_index_idr, lo->lo_number);
1303 + loop_remove(lo);
1304 + break;
1305 +@@ -2033,7 +2122,7 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
1306 + break;
1307 + ret = loop_add(&lo, -1);
1308 + }
1309 +- mutex_unlock(&loop_index_mutex);
1310 ++ mutex_unlock(&loop_ctl_mutex);
1311 +
1312 + return ret;
1313 + }
1314 +@@ -2117,10 +2206,10 @@ static int __init loop_init(void)
1315 + THIS_MODULE, loop_probe, NULL, NULL);
1316 +
1317 + /* pre-create number of devices given by config or max_loop */
1318 +- mutex_lock(&loop_index_mutex);
1319 ++ mutex_lock(&loop_ctl_mutex);
1320 + for (i = 0; i < nr; i++)
1321 + loop_add(&lo, i);
1322 +- mutex_unlock(&loop_index_mutex);
1323 ++ mutex_unlock(&loop_ctl_mutex);
1324 +
1325 + printk(KERN_INFO "loop: module loaded\n");
1326 + return 0;
1327 +diff --git a/drivers/block/loop.h b/drivers/block/loop.h
1328 +index 4d42c7af7de7..af75a5ee4094 100644
1329 +--- a/drivers/block/loop.h
1330 ++++ b/drivers/block/loop.h
1331 +@@ -54,7 +54,6 @@ struct loop_device {
1332 +
1333 + spinlock_t lo_lock;
1334 + int lo_state;
1335 +- struct mutex lo_ctl_mutex;
1336 + struct kthread_worker worker;
1337 + struct task_struct *worker_task;
1338 + bool use_dio;
1339 +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
1340 +index 14a51254c3db..c13a6d1796a7 100644
1341 +--- a/drivers/block/nbd.c
1342 ++++ b/drivers/block/nbd.c
1343 +@@ -288,9 +288,10 @@ static void nbd_size_update(struct nbd_device *nbd)
1344 + blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
1345 + set_capacity(nbd->disk, config->bytesize >> 9);
1346 + if (bdev) {
1347 +- if (bdev->bd_disk)
1348 ++ if (bdev->bd_disk) {
1349 + bd_set_size(bdev, config->bytesize);
1350 +- else
1351 ++ set_blocksize(bdev, config->blksize);
1352 ++ } else
1353 + bdev->bd_invalidated = 1;
1354 + bdput(bdev);
1355 + }
1356 +diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
1357 +index a8c4ce07fc9d..a825b6444459 100644
1358 +--- a/drivers/crypto/Kconfig
1359 ++++ b/drivers/crypto/Kconfig
1360 +@@ -681,6 +681,7 @@ config CRYPTO_DEV_BCM_SPU
1361 + depends on ARCH_BCM_IPROC
1362 + depends on MAILBOX
1363 + default m
1364 ++ select CRYPTO_AUTHENC
1365 + select CRYPTO_DES
1366 + select CRYPTO_MD5
1367 + select CRYPTO_SHA1
1368 +diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
1369 +index 2d1f1db9f807..cd464637b0cb 100644
1370 +--- a/drivers/crypto/bcm/cipher.c
1371 ++++ b/drivers/crypto/bcm/cipher.c
1372 +@@ -2845,44 +2845,28 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
1373 + struct spu_hw *spu = &iproc_priv.spu;
1374 + struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
1375 + struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
1376 +- struct rtattr *rta = (void *)key;
1377 +- struct crypto_authenc_key_param *param;
1378 +- const u8 *origkey = key;
1379 +- const unsigned int origkeylen = keylen;
1380 +-
1381 +- int ret = 0;
1382 ++ struct crypto_authenc_keys keys;
1383 ++ int ret;
1384 +
1385 + flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
1386 + keylen);
1387 + flow_dump(" key: ", key, keylen);
1388 +
1389 +- if (!RTA_OK(rta, keylen))
1390 +- goto badkey;
1391 +- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
1392 +- goto badkey;
1393 +- if (RTA_PAYLOAD(rta) < sizeof(*param))
1394 ++ ret = crypto_authenc_extractkeys(&keys, key, keylen);
1395 ++ if (ret)
1396 + goto badkey;
1397 +
1398 +- param = RTA_DATA(rta);
1399 +- ctx->enckeylen = be32_to_cpu(param->enckeylen);
1400 +-
1401 +- key += RTA_ALIGN(rta->rta_len);
1402 +- keylen -= RTA_ALIGN(rta->rta_len);
1403 +-
1404 +- if (keylen < ctx->enckeylen)
1405 +- goto badkey;
1406 +- if (ctx->enckeylen > MAX_KEY_SIZE)
1407 ++ if (keys.enckeylen > MAX_KEY_SIZE ||
1408 ++ keys.authkeylen > MAX_KEY_SIZE)
1409 + goto badkey;
1410 +
1411 +- ctx->authkeylen = keylen - ctx->enckeylen;
1412 +-
1413 +- if (ctx->authkeylen > MAX_KEY_SIZE)
1414 +- goto badkey;
1415 ++ ctx->enckeylen = keys.enckeylen;
1416 ++ ctx->authkeylen = keys.authkeylen;
1417 +
1418 +- memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen);
1419 ++ memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1420 + /* May end up padding auth key. So make sure it's zeroed. */
1421 + memset(ctx->authkey, 0, sizeof(ctx->authkey));
1422 +- memcpy(ctx->authkey, key, ctx->authkeylen);
1423 ++ memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1424 +
1425 + switch (ctx->alg->cipher_info.alg) {
1426 + case CIPHER_ALG_DES:
1427 +@@ -2890,7 +2874,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
1428 + u32 tmp[DES_EXPKEY_WORDS];
1429 + u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
1430 +
1431 +- if (des_ekey(tmp, key) == 0) {
1432 ++ if (des_ekey(tmp, keys.enckey) == 0) {
1433 + if (crypto_aead_get_flags(cipher) &
1434 + CRYPTO_TFM_REQ_WEAK_KEY) {
1435 + crypto_aead_set_flags(cipher, flags);
1436 +@@ -2905,7 +2889,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
1437 + break;
1438 + case CIPHER_ALG_3DES:
1439 + if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
1440 +- const u32 *K = (const u32 *)key;
1441 ++ const u32 *K = (const u32 *)keys.enckey;
1442 + u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
1443 +
1444 + if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
1445 +@@ -2956,9 +2940,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
1446 + ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
1447 + ctx->fallback_cipher->base.crt_flags |=
1448 + tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
1449 +- ret =
1450 +- crypto_aead_setkey(ctx->fallback_cipher, origkey,
1451 +- origkeylen);
1452 ++ ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
1453 + if (ret) {
1454 + flow_log(" fallback setkey() returned:%d\n", ret);
1455 + tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
1456 +diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
1457 +index 43975ab5f09c..f84ca2ff61de 100644
1458 +--- a/drivers/crypto/caam/caamhash.c
1459 ++++ b/drivers/crypto/caam/caamhash.c
1460 +@@ -1131,13 +1131,16 @@ static int ahash_final_no_ctx(struct ahash_request *req)
1461 +
1462 + desc = edesc->hw_desc;
1463 +
1464 +- state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1465 +- if (dma_mapping_error(jrdev, state->buf_dma)) {
1466 +- dev_err(jrdev, "unable to map src\n");
1467 +- goto unmap;
1468 +- }
1469 ++ if (buflen) {
1470 ++ state->buf_dma = dma_map_single(jrdev, buf, buflen,
1471 ++ DMA_TO_DEVICE);
1472 ++ if (dma_mapping_error(jrdev, state->buf_dma)) {
1473 ++ dev_err(jrdev, "unable to map src\n");
1474 ++ goto unmap;
1475 ++ }
1476 +
1477 +- append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1478 ++ append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1479 ++ }
1480 +
1481 + edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1482 + digestsize);
1483 +diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
1484 +index 01b82b82f8b8..5852d29ae2da 100644
1485 +--- a/drivers/crypto/ccree/cc_aead.c
1486 ++++ b/drivers/crypto/ccree/cc_aead.c
1487 +@@ -540,13 +540,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1488 + unsigned int keylen)
1489 + {
1490 + struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1491 +- struct rtattr *rta = (struct rtattr *)key;
1492 + struct cc_crypto_req cc_req = {};
1493 +- struct crypto_authenc_key_param *param;
1494 + struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
1495 +- int rc = -EINVAL;
1496 + unsigned int seq_len = 0;
1497 + struct device *dev = drvdata_to_dev(ctx->drvdata);
1498 ++ const u8 *enckey, *authkey;
1499 ++ int rc;
1500 +
1501 + dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
1502 + ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
1503 +@@ -554,35 +553,33 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1504 + /* STAT_PHASE_0: Init and sanity checks */
1505 +
1506 + if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
1507 +- if (!RTA_OK(rta, keylen))
1508 +- goto badkey;
1509 +- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
1510 +- goto badkey;
1511 +- if (RTA_PAYLOAD(rta) < sizeof(*param))
1512 +- goto badkey;
1513 +- param = RTA_DATA(rta);
1514 +- ctx->enc_keylen = be32_to_cpu(param->enckeylen);
1515 +- key += RTA_ALIGN(rta->rta_len);
1516 +- keylen -= RTA_ALIGN(rta->rta_len);
1517 +- if (keylen < ctx->enc_keylen)
1518 ++ struct crypto_authenc_keys keys;
1519 ++
1520 ++ rc = crypto_authenc_extractkeys(&keys, key, keylen);
1521 ++ if (rc)
1522 + goto badkey;
1523 +- ctx->auth_keylen = keylen - ctx->enc_keylen;
1524 ++ enckey = keys.enckey;
1525 ++ authkey = keys.authkey;
1526 ++ ctx->enc_keylen = keys.enckeylen;
1527 ++ ctx->auth_keylen = keys.authkeylen;
1528 +
1529 + if (ctx->cipher_mode == DRV_CIPHER_CTR) {
1530 + /* the nonce is stored in bytes at end of key */
1531 ++ rc = -EINVAL;
1532 + if (ctx->enc_keylen <
1533 + (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
1534 + goto badkey;
1535 + /* Copy nonce from last 4 bytes in CTR key to
1536 + * first 4 bytes in CTR IV
1537 + */
1538 +- memcpy(ctx->ctr_nonce, key + ctx->auth_keylen +
1539 +- ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE,
1540 +- CTR_RFC3686_NONCE_SIZE);
1541 ++ memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
1542 ++ CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
1543 + /* Set CTR key size */
1544 + ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
1545 + }
1546 + } else { /* non-authenc - has just one key */
1547 ++ enckey = key;
1548 ++ authkey = NULL;
1549 + ctx->enc_keylen = keylen;
1550 + ctx->auth_keylen = 0;
1551 + }
1552 +@@ -594,13 +591,14 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1553 + /* STAT_PHASE_1: Copy key to ctx */
1554 +
1555 + /* Get key material */
1556 +- memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen);
1557 ++ memcpy(ctx->enckey, enckey, ctx->enc_keylen);
1558 + if (ctx->enc_keylen == 24)
1559 + memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
1560 + if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
1561 +- memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
1562 ++ memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
1563 ++ ctx->auth_keylen);
1564 + } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
1565 +- rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
1566 ++ rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
1567 + if (rc)
1568 + goto badkey;
1569 + }
1570 +diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
1571 +index 6988012deca4..f4f3e9a5851e 100644
1572 +--- a/drivers/crypto/talitos.c
1573 ++++ b/drivers/crypto/talitos.c
1574 +@@ -1361,23 +1361,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1575 + struct talitos_private *priv = dev_get_drvdata(dev);
1576 + bool is_sec1 = has_ftr_sec1(priv);
1577 + int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1578 +- void *err;
1579 +
1580 + if (cryptlen + authsize > max_len) {
1581 + dev_err(dev, "length exceeds h/w max limit\n");
1582 + return ERR_PTR(-EINVAL);
1583 + }
1584 +
1585 +- if (ivsize)
1586 +- iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1587 +-
1588 + if (!dst || dst == src) {
1589 + src_len = assoclen + cryptlen + authsize;
1590 + src_nents = sg_nents_for_len(src, src_len);
1591 + if (src_nents < 0) {
1592 + dev_err(dev, "Invalid number of src SG.\n");
1593 +- err = ERR_PTR(-EINVAL);
1594 +- goto error_sg;
1595 ++ return ERR_PTR(-EINVAL);
1596 + }
1597 + src_nents = (src_nents == 1) ? 0 : src_nents;
1598 + dst_nents = dst ? src_nents : 0;
1599 +@@ -1387,16 +1382,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1600 + src_nents = sg_nents_for_len(src, src_len);
1601 + if (src_nents < 0) {
1602 + dev_err(dev, "Invalid number of src SG.\n");
1603 +- err = ERR_PTR(-EINVAL);
1604 +- goto error_sg;
1605 ++ return ERR_PTR(-EINVAL);
1606 + }
1607 + src_nents = (src_nents == 1) ? 0 : src_nents;
1608 + dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1609 + dst_nents = sg_nents_for_len(dst, dst_len);
1610 + if (dst_nents < 0) {
1611 + dev_err(dev, "Invalid number of dst SG.\n");
1612 +- err = ERR_PTR(-EINVAL);
1613 +- goto error_sg;
1614 ++ return ERR_PTR(-EINVAL);
1615 + }
1616 + dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1617 + }
1618 +@@ -1423,11 +1416,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1619 + /* if its a ahash, add space for a second desc next to the first one */
1620 + if (is_sec1 && !dst)
1621 + alloc_len += sizeof(struct talitos_desc);
1622 ++ alloc_len += ivsize;
1623 +
1624 + edesc = kmalloc(alloc_len, GFP_DMA | flags);
1625 +- if (!edesc) {
1626 +- err = ERR_PTR(-ENOMEM);
1627 +- goto error_sg;
1628 ++ if (!edesc)
1629 ++ return ERR_PTR(-ENOMEM);
1630 ++ if (ivsize) {
1631 ++ iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1632 ++ iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1633 + }
1634 + memset(&edesc->desc, 0, sizeof(edesc->desc));
1635 +
1636 +@@ -1445,10 +1441,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1637 + DMA_BIDIRECTIONAL);
1638 + }
1639 + return edesc;
1640 +-error_sg:
1641 +- if (iv_dma)
1642 +- dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1643 +- return err;
1644 + }
1645 +
1646 + static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1647 +diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
1648 +index b5b9f15549c2..1bda809a7289 100644
1649 +--- a/drivers/gpu/drm/drm_fb_helper.c
1650 ++++ b/drivers/gpu/drm/drm_fb_helper.c
1651 +@@ -1690,9 +1690,14 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
1652 + struct drm_fb_helper *fb_helper = info->par;
1653 + struct drm_framebuffer *fb = fb_helper->fb;
1654 +
1655 +- if (var->pixclock != 0 || in_dbg_master())
1656 ++ if (in_dbg_master())
1657 + return -EINVAL;
1658 +
1659 ++ if (var->pixclock != 0) {
1660 ++ DRM_DEBUG("fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n");
1661 ++ var->pixclock = 0;
1662 ++ }
1663 ++
1664 + /*
1665 + * Changes struct fb_var_screeninfo are currently not pushed back
1666 + * to KMS, hence fail if different settings are requested.
1667 +diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
1668 +index 9ad89e38f6c0..12e4203c06db 100644
1669 +--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
1670 ++++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
1671 +@@ -996,7 +996,7 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
1672 + {
1673 + unsigned int index;
1674 + u64 virtaddr;
1675 +- unsigned long req_size, pgoff = 0;
1676 ++ unsigned long req_size, pgoff, req_start;
1677 + pgprot_t pg_prot;
1678 + struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1679 +
1680 +@@ -1014,7 +1014,17 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
1681 + pg_prot = vma->vm_page_prot;
1682 + virtaddr = vma->vm_start;
1683 + req_size = vma->vm_end - vma->vm_start;
1684 +- pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
1685 ++ pgoff = vma->vm_pgoff &
1686 ++ ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1687 ++ req_start = pgoff << PAGE_SHIFT;
1688 ++
1689 ++ if (!intel_vgpu_in_aperture(vgpu, req_start))
1690 ++ return -EINVAL;
1691 ++ if (req_start + req_size >
1692 ++ vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu))
1693 ++ return -EINVAL;
1694 ++
1695 ++ pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff;
1696 +
1697 + return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
1698 + }
1699 +diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
1700 +index 0385ab438320..f6fa9b115fda 100644
1701 +--- a/drivers/infiniband/core/nldev.c
1702 ++++ b/drivers/infiniband/core/nldev.c
1703 +@@ -579,10 +579,6 @@ static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb,
1704 + if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
1705 + atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
1706 + goto err;
1707 +- if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
1708 +- nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
1709 +- pd->unsafe_global_rkey))
1710 +- goto err;
1711 +
1712 + if (fill_res_name_pid(msg, res))
1713 + goto err;
1714 +diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
1715 +index 42b8685c997e..3c633ab58052 100644
1716 +--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
1717 ++++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
1718 +@@ -427,7 +427,40 @@ static inline enum ib_qp_state pvrdma_qp_state_to_ib(enum pvrdma_qp_state state)
1719 +
1720 + static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op)
1721 + {
1722 +- return (enum pvrdma_wr_opcode)op;
1723 ++ switch (op) {
1724 ++ case IB_WR_RDMA_WRITE:
1725 ++ return PVRDMA_WR_RDMA_WRITE;
1726 ++ case IB_WR_RDMA_WRITE_WITH_IMM:
1727 ++ return PVRDMA_WR_RDMA_WRITE_WITH_IMM;
1728 ++ case IB_WR_SEND:
1729 ++ return PVRDMA_WR_SEND;
1730 ++ case IB_WR_SEND_WITH_IMM:
1731 ++ return PVRDMA_WR_SEND_WITH_IMM;
1732 ++ case IB_WR_RDMA_READ:
1733 ++ return PVRDMA_WR_RDMA_READ;
1734 ++ case IB_WR_ATOMIC_CMP_AND_SWP:
1735 ++ return PVRDMA_WR_ATOMIC_CMP_AND_SWP;
1736 ++ case IB_WR_ATOMIC_FETCH_AND_ADD:
1737 ++ return PVRDMA_WR_ATOMIC_FETCH_AND_ADD;
1738 ++ case IB_WR_LSO:
1739 ++ return PVRDMA_WR_LSO;
1740 ++ case IB_WR_SEND_WITH_INV:
1741 ++ return PVRDMA_WR_SEND_WITH_INV;
1742 ++ case IB_WR_RDMA_READ_WITH_INV:
1743 ++ return PVRDMA_WR_RDMA_READ_WITH_INV;
1744 ++ case IB_WR_LOCAL_INV:
1745 ++ return PVRDMA_WR_LOCAL_INV;
1746 ++ case IB_WR_REG_MR:
1747 ++ return PVRDMA_WR_FAST_REG_MR;
1748 ++ case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
1749 ++ return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP;
1750 ++ case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
1751 ++ return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD;
1752 ++ case IB_WR_REG_SIG_MR:
1753 ++ return PVRDMA_WR_REG_SIG_MR;
1754 ++ default:
1755 ++ return PVRDMA_WR_ERROR;
1756 ++ }
1757 + }
1758 +
1759 + static inline enum ib_wc_status pvrdma_wc_status_to_ib(
1760 +diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
1761 +index 60083c0363a5..9aeb33093279 100644
1762 +--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
1763 ++++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
1764 +@@ -721,6 +721,12 @@ int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
1765 + wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1766 + wqe_hdr->ex.imm_data = wr->ex.imm_data;
1767 +
1768 ++ if (unlikely(wqe_hdr->opcode == PVRDMA_WR_ERROR)) {
1769 ++ *bad_wr = wr;
1770 ++ ret = -EINVAL;
1771 ++ goto out;
1772 ++ }
1773 ++
1774 + switch (qp->ibqp.qp_type) {
1775 + case IB_QPT_GSI:
1776 + case IB_QPT_UD:
1777 +diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
1778 +index 1faa64abc74f..6889c25c62cb 100644
1779 +--- a/drivers/media/common/videobuf2/videobuf2-core.c
1780 ++++ b/drivers/media/common/videobuf2/videobuf2-core.c
1781 +@@ -1933,9 +1933,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
1782 + return -EINVAL;
1783 + }
1784 + }
1785 ++
1786 ++ mutex_lock(&q->mmap_lock);
1787 ++
1788 + if (vb2_fileio_is_active(q)) {
1789 + dprintk(1, "mmap: file io in progress\n");
1790 +- return -EBUSY;
1791 ++ ret = -EBUSY;
1792 ++ goto unlock;
1793 + }
1794 +
1795 + /*
1796 +@@ -1943,7 +1947,7 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
1797 + */
1798 + ret = __find_plane_by_offset(q, off, &buffer, &plane);
1799 + if (ret)
1800 +- return ret;
1801 ++ goto unlock;
1802 +
1803 + vb = q->bufs[buffer];
1804 +
1805 +@@ -1956,11 +1960,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
1806 + if (length < (vma->vm_end - vma->vm_start)) {
1807 + dprintk(1,
1808 + "MMAP invalid, as it would overflow buffer length\n");
1809 +- return -EINVAL;
1810 ++ ret = -EINVAL;
1811 ++ goto unlock;
1812 + }
1813 +
1814 +- mutex_lock(&q->mmap_lock);
1815 + ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
1816 ++
1817 ++unlock:
1818 + mutex_unlock(&q->mmap_lock);
1819 + if (ret)
1820 + return ret;
1821 +diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
1822 +index f06003bb8e42..2a92e5aac9ed 100644
1823 +--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
1824 ++++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
1825 +@@ -865,8 +865,11 @@ int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
1826 + "%s-vid-cap", dev->v4l2_dev.name);
1827 +
1828 + if (IS_ERR(dev->kthread_vid_cap)) {
1829 ++ int err = PTR_ERR(dev->kthread_vid_cap);
1830 ++
1831 ++ dev->kthread_vid_cap = NULL;
1832 + v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
1833 +- return PTR_ERR(dev->kthread_vid_cap);
1834 ++ return err;
1835 + }
1836 + *pstreaming = true;
1837 + vivid_grab_controls(dev, true);
1838 +diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c
1839 +index 9981e7548019..488590594150 100644
1840 +--- a/drivers/media/platform/vivid/vivid-kthread-out.c
1841 ++++ b/drivers/media/platform/vivid/vivid-kthread-out.c
1842 +@@ -236,8 +236,11 @@ int vivid_start_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
1843 + "%s-vid-out", dev->v4l2_dev.name);
1844 +
1845 + if (IS_ERR(dev->kthread_vid_out)) {
1846 ++ int err = PTR_ERR(dev->kthread_vid_out);
1847 ++
1848 ++ dev->kthread_vid_out = NULL;
1849 + v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
1850 +- return PTR_ERR(dev->kthread_vid_out);
1851 ++ return err;
1852 + }
1853 + *pstreaming = true;
1854 + vivid_grab_controls(dev, true);
1855 +diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
1856 +index be531caa2cdf..2079861d2270 100644
1857 +--- a/drivers/media/platform/vivid/vivid-vid-common.c
1858 ++++ b/drivers/media/platform/vivid/vivid-vid-common.c
1859 +@@ -21,7 +21,7 @@ const struct v4l2_dv_timings_cap vivid_dv_timings_cap = {
1860 + .type = V4L2_DV_BT_656_1120,
1861 + /* keep this initialization for compatibility with GCC < 4.4.6 */
1862 + .reserved = { 0 },
1863 +- V4L2_INIT_BT_TIMINGS(0, MAX_WIDTH, 0, MAX_HEIGHT, 14000000, 775000000,
1864 ++ V4L2_INIT_BT_TIMINGS(16, MAX_WIDTH, 16, MAX_HEIGHT, 14000000, 775000000,
1865 + V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
1866 + V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF,
1867 + V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_INTERLACED)
1868 +diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
1869 +index 54afc9c7ee6e..a4d3e94a400c 100644
1870 +--- a/drivers/media/v4l2-core/v4l2-ioctl.c
1871 ++++ b/drivers/media/v4l2-core/v4l2-ioctl.c
1872 +@@ -286,6 +286,7 @@ static void v4l_print_format(const void *arg, bool write_only)
1873 + const struct v4l2_window *win;
1874 + const struct v4l2_sdr_format *sdr;
1875 + const struct v4l2_meta_format *meta;
1876 ++ u32 planes;
1877 + unsigned i;
1878 +
1879 + pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
1880 +@@ -316,7 +317,8 @@ static void v4l_print_format(const void *arg, bool write_only)
1881 + prt_names(mp->field, v4l2_field_names),
1882 + mp->colorspace, mp->num_planes, mp->flags,
1883 + mp->ycbcr_enc, mp->quantization, mp->xfer_func);
1884 +- for (i = 0; i < mp->num_planes; i++)
1885 ++ planes = min_t(u32, mp->num_planes, VIDEO_MAX_PLANES);
1886 ++ for (i = 0; i < planes; i++)
1887 + printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i,
1888 + mp->plane_fmt[i].bytesperline,
1889 + mp->plane_fmt[i].sizeimage);
1890 +diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
1891 +index b89379782741..9c7925ca13cf 100644
1892 +--- a/drivers/mfd/tps6586x.c
1893 ++++ b/drivers/mfd/tps6586x.c
1894 +@@ -592,6 +592,29 @@ static int tps6586x_i2c_remove(struct i2c_client *client)
1895 + return 0;
1896 + }
1897 +
1898 ++static int __maybe_unused tps6586x_i2c_suspend(struct device *dev)
1899 ++{
1900 ++ struct tps6586x *tps6586x = dev_get_drvdata(dev);
1901 ++
1902 ++ if (tps6586x->client->irq)
1903 ++ disable_irq(tps6586x->client->irq);
1904 ++
1905 ++ return 0;
1906 ++}
1907 ++
1908 ++static int __maybe_unused tps6586x_i2c_resume(struct device *dev)
1909 ++{
1910 ++ struct tps6586x *tps6586x = dev_get_drvdata(dev);
1911 ++
1912 ++ if (tps6586x->client->irq)
1913 ++ enable_irq(tps6586x->client->irq);
1914 ++
1915 ++ return 0;
1916 ++}
1917 ++
1918 ++static SIMPLE_DEV_PM_OPS(tps6586x_pm_ops, tps6586x_i2c_suspend,
1919 ++ tps6586x_i2c_resume);
1920 ++
1921 + static const struct i2c_device_id tps6586x_id_table[] = {
1922 + { "tps6586x", 0 },
1923 + { },
1924 +@@ -602,6 +625,7 @@ static struct i2c_driver tps6586x_driver = {
1925 + .driver = {
1926 + .name = "tps6586x",
1927 + .of_match_table = of_match_ptr(tps6586x_of_match),
1928 ++ .pm = &tps6586x_pm_ops,
1929 + },
1930 + .probe = tps6586x_i2c_probe,
1931 + .remove = tps6586x_i2c_remove,
1932 +diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
1933 +index 3cc8bfee6c18..8594659cb592 100644
1934 +--- a/drivers/mmc/host/sdhci-msm.c
1935 ++++ b/drivers/mmc/host/sdhci-msm.c
1936 +@@ -258,6 +258,8 @@ struct sdhci_msm_host {
1937 + bool mci_removed;
1938 + const struct sdhci_msm_variant_ops *var_ops;
1939 + const struct sdhci_msm_offset *offset;
1940 ++ bool use_cdr;
1941 ++ u32 transfer_mode;
1942 + };
1943 +
1944 + static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host)
1945 +@@ -1025,6 +1027,26 @@ out:
1946 + return ret;
1947 + }
1948 +
1949 ++static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable)
1950 ++{
1951 ++ const struct sdhci_msm_offset *msm_offset = sdhci_priv_msm_offset(host);
1952 ++ u32 config, oldconfig = readl_relaxed(host->ioaddr +
1953 ++ msm_offset->core_dll_config);
1954 ++
1955 ++ config = oldconfig;
1956 ++ if (enable) {
1957 ++ config |= CORE_CDR_EN;
1958 ++ config &= ~CORE_CDR_EXT_EN;
1959 ++ } else {
1960 ++ config &= ~CORE_CDR_EN;
1961 ++ config |= CORE_CDR_EXT_EN;
1962 ++ }
1963 ++
1964 ++ if (config != oldconfig)
1965 ++ writel_relaxed(config, host->ioaddr +
1966 ++ msm_offset->core_dll_config);
1967 ++}
1968 ++
1969 + static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
1970 + {
1971 + struct sdhci_host *host = mmc_priv(mmc);
1972 +@@ -1042,8 +1064,14 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
1973 + if (host->clock <= CORE_FREQ_100MHZ ||
1974 + !(ios.timing == MMC_TIMING_MMC_HS400 ||
1975 + ios.timing == MMC_TIMING_MMC_HS200 ||
1976 +- ios.timing == MMC_TIMING_UHS_SDR104))
1977 ++ ios.timing == MMC_TIMING_UHS_SDR104)) {
1978 ++ msm_host->use_cdr = false;
1979 ++ sdhci_msm_set_cdr(host, false);
1980 + return 0;
1981 ++ }
1982 ++
1983 ++ /* Clock-Data-Recovery used to dynamically adjust RX sampling point */
1984 ++ msm_host->use_cdr = true;
1985 +
1986 + /*
1987 + * For HS400 tuning in HS200 timing requires:
1988 +@@ -1525,6 +1553,19 @@ static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg)
1989 + case SDHCI_POWER_CONTROL:
1990 + req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON;
1991 + break;
1992 ++ case SDHCI_TRANSFER_MODE:
1993 ++ msm_host->transfer_mode = val;
1994 ++ break;
1995 ++ case SDHCI_COMMAND:
1996 ++ if (!msm_host->use_cdr)
1997 ++ break;
1998 ++ if ((msm_host->transfer_mode & SDHCI_TRNS_READ) &&
1999 ++ SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK_HS200 &&
2000 ++ SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK)
2001 ++ sdhci_msm_set_cdr(host, true);
2002 ++ else
2003 ++ sdhci_msm_set_cdr(host, false);
2004 ++ break;
2005 + }
2006 +
2007 + if (req_type) {
2008 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
2009 +index 3c597569cfae..a6fcc5c96070 100644
2010 +--- a/drivers/net/bonding/bond_main.c
2011 ++++ b/drivers/net/bonding/bond_main.c
2012 +@@ -1947,6 +1947,9 @@ static int __bond_release_one(struct net_device *bond_dev,
2013 + if (!bond_has_slaves(bond)) {
2014 + bond_set_carrier(bond);
2015 + eth_hw_addr_random(bond_dev);
2016 ++ bond->nest_level = SINGLE_DEPTH_NESTING;
2017 ++ } else {
2018 ++ bond->nest_level = dev_get_nest_level(bond_dev) + 1;
2019 + }
2020 +
2021 + unblock_netpoll_tx();
2022 +diff --git a/drivers/net/dsa/realtek-smi.c b/drivers/net/dsa/realtek-smi.c
2023 +index b4b839a1d095..ad41ec63cc9f 100644
2024 +--- a/drivers/net/dsa/realtek-smi.c
2025 ++++ b/drivers/net/dsa/realtek-smi.c
2026 +@@ -347,16 +347,17 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi)
2027 + struct device_node *mdio_np;
2028 + int ret;
2029 +
2030 +- mdio_np = of_find_compatible_node(smi->dev->of_node, NULL,
2031 +- "realtek,smi-mdio");
2032 ++ mdio_np = of_get_compatible_child(smi->dev->of_node, "realtek,smi-mdio");
2033 + if (!mdio_np) {
2034 + dev_err(smi->dev, "no MDIO bus node\n");
2035 + return -ENODEV;
2036 + }
2037 +
2038 + smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev);
2039 +- if (!smi->slave_mii_bus)
2040 +- return -ENOMEM;
2041 ++ if (!smi->slave_mii_bus) {
2042 ++ ret = -ENOMEM;
2043 ++ goto err_put_node;
2044 ++ }
2045 + smi->slave_mii_bus->priv = smi;
2046 + smi->slave_mii_bus->name = "SMI slave MII";
2047 + smi->slave_mii_bus->read = realtek_smi_mdio_read;
2048 +@@ -371,10 +372,15 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi)
2049 + if (ret) {
2050 + dev_err(smi->dev, "unable to register MDIO bus %s\n",
2051 + smi->slave_mii_bus->id);
2052 +- of_node_put(mdio_np);
2053 ++ goto err_put_node;
2054 + }
2055 +
2056 + return 0;
2057 ++
2058 ++err_put_node:
2059 ++ of_node_put(mdio_np);
2060 ++
2061 ++ return ret;
2062 + }
2063 +
2064 + static int realtek_smi_probe(struct platform_device *pdev)
2065 +@@ -457,6 +463,8 @@ static int realtek_smi_remove(struct platform_device *pdev)
2066 + struct realtek_smi *smi = dev_get_drvdata(&pdev->dev);
2067 +
2068 + dsa_unregister_switch(smi->ds);
2069 ++ if (smi->slave_mii_bus)
2070 ++ of_node_put(smi->slave_mii_bus->dev.of_node);
2071 + gpiod_set_value(smi->reset, 1);
2072 +
2073 + return 0;
2074 +diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
2075 +index 1393252c6e3c..42f5bfa33694 100644
2076 +--- a/drivers/net/ethernet/microchip/lan743x_main.c
2077 ++++ b/drivers/net/ethernet/microchip/lan743x_main.c
2078 +@@ -962,13 +962,10 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
2079 +
2080 + memset(&ksettings, 0, sizeof(ksettings));
2081 + phy_ethtool_get_link_ksettings(netdev, &ksettings);
2082 +- local_advertisement = phy_read(phydev, MII_ADVERTISE);
2083 +- if (local_advertisement < 0)
2084 +- return;
2085 +-
2086 +- remote_advertisement = phy_read(phydev, MII_LPA);
2087 +- if (remote_advertisement < 0)
2088 +- return;
2089 ++ local_advertisement =
2090 ++ ethtool_adv_to_mii_adv_t(phydev->advertising);
2091 ++ remote_advertisement =
2092 ++ ethtool_adv_to_mii_adv_t(phydev->lp_advertising);
2093 +
2094 + lan743x_phy_update_flowcontrol(adapter,
2095 + ksettings.base.duplex,
2096 +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
2097 +index 9fc8a2bc0ff1..07f3080eca18 100644
2098 +--- a/drivers/net/ethernet/realtek/r8169.c
2099 ++++ b/drivers/net/ethernet/realtek/r8169.c
2100 +@@ -717,6 +717,7 @@ module_param(use_dac, int, 0);
2101 + MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
2102 + module_param_named(debug, debug.msg_enable, int, 0);
2103 + MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
2104 ++MODULE_SOFTDEP("pre: realtek");
2105 + MODULE_LICENSE("GPL");
2106 + MODULE_FIRMWARE(FIRMWARE_8168D_1);
2107 + MODULE_FIRMWARE(FIRMWARE_8168D_2);
2108 +@@ -1730,11 +1731,13 @@ static bool rtl8169_reset_counters(struct rtl8169_private *tp)
2109 +
2110 + static bool rtl8169_update_counters(struct rtl8169_private *tp)
2111 + {
2112 ++ u8 val = RTL_R8(tp, ChipCmd);
2113 ++
2114 + /*
2115 + * Some chips are unable to dump tally counters when the receiver
2116 +- * is disabled.
2117 ++ * is disabled. If 0xff chip may be in a PCI power-save state.
2118 + */
2119 +- if ((RTL_R8(tp, ChipCmd) & CmdRxEnb) == 0)
2120 ++ if (!(val & CmdRxEnb) || val == 0xff)
2121 + return true;
2122 +
2123 + return rtl8169_do_counters(tp, CounterDump);
2124 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
2125 +index 8c1abcba4cbd..33978b0cdac8 100644
2126 +--- a/drivers/net/tun.c
2127 ++++ b/drivers/net/tun.c
2128 +@@ -859,10 +859,6 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
2129 + err = 0;
2130 + }
2131 +
2132 +- rcu_assign_pointer(tfile->tun, tun);
2133 +- rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
2134 +- tun->numqueues++;
2135 +-
2136 + if (tfile->detached) {
2137 + tun_enable_queue(tfile);
2138 + } else {
2139 +@@ -876,6 +872,13 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
2140 + * refcnt.
2141 + */
2142 +
2143 ++ /* Publish tfile->tun and tun->tfiles only after we've fully
2144 ++ * initialized tfile; otherwise we risk using half-initialized
2145 ++ * object.
2146 ++ */
2147 ++ rcu_assign_pointer(tfile->tun, tun);
2148 ++ rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
2149 ++ tun->numqueues++;
2150 + out:
2151 + return err;
2152 + }
2153 +diff --git a/drivers/of/property.c b/drivers/of/property.c
2154 +index f46828e3b082..43720c2de138 100644
2155 +--- a/drivers/of/property.c
2156 ++++ b/drivers/of/property.c
2157 +@@ -806,6 +806,7 @@ struct device_node *of_graph_get_remote_node(const struct device_node *node,
2158 +
2159 + if (!of_device_is_available(remote)) {
2160 + pr_debug("not available for remote node\n");
2161 ++ of_node_put(remote);
2162 + return NULL;
2163 + }
2164 +
2165 +diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
2166 +index b44c1bb687a2..ebc193f7f7dd 100644
2167 +--- a/drivers/scsi/scsi_pm.c
2168 ++++ b/drivers/scsi/scsi_pm.c
2169 +@@ -79,8 +79,22 @@ static int scsi_dev_type_resume(struct device *dev,
2170 +
2171 + if (err == 0) {
2172 + pm_runtime_disable(dev);
2173 +- pm_runtime_set_active(dev);
2174 ++ err = pm_runtime_set_active(dev);
2175 + pm_runtime_enable(dev);
2176 ++
2177 ++ /*
2178 ++ * Forcibly set runtime PM status of request queue to "active"
2179 ++ * to make sure we can again get requests from the queue
2180 ++ * (see also blk_pm_peek_request()).
2181 ++ *
2182 ++ * The resume hook will correct runtime PM status of the disk.
2183 ++ */
2184 ++ if (!err && scsi_is_sdev_device(dev)) {
2185 ++ struct scsi_device *sdev = to_scsi_device(dev);
2186 ++
2187 ++ if (sdev->request_queue->dev)
2188 ++ blk_set_runtime_active(sdev->request_queue);
2189 ++ }
2190 + }
2191 +
2192 + return err;
2193 +@@ -139,16 +153,6 @@ static int scsi_bus_resume_common(struct device *dev,
2194 + else
2195 + fn = NULL;
2196 +
2197 +- /*
2198 +- * Forcibly set runtime PM status of request queue to "active" to
2199 +- * make sure we can again get requests from the queue (see also
2200 +- * blk_pm_peek_request()).
2201 +- *
2202 +- * The resume hook will correct runtime PM status of the disk.
2203 +- */
2204 +- if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev))
2205 +- blk_set_runtime_active(to_scsi_device(dev)->request_queue);
2206 +-
2207 + if (fn) {
2208 + async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
2209 +
2210 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
2211 +index 5c9acb634ff7..0a27917263aa 100644
2212 +--- a/drivers/scsi/sd.c
2213 ++++ b/drivers/scsi/sd.c
2214 +@@ -205,6 +205,12 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
2215 + sp = buffer_data[0] & 0x80 ? 1 : 0;
2216 + buffer_data[0] &= ~0x80;
2217 +
2218 ++ /*
2219 ++ * Ensure WP, DPOFUA, and RESERVED fields are cleared in
2220 ++ * received mode parameter buffer before doing MODE SELECT.
2221 ++ */
2222 ++ data.device_specific = 0;
2223 ++
2224 + if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
2225 + SD_MAX_RETRIES, &data, &sshdr)) {
2226 + if (scsi_sense_valid(&sshdr))
2227 +diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
2228 +index b289b90ae6dc..b19c960d5490 100644
2229 +--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
2230 ++++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
2231 +@@ -598,9 +598,12 @@ out:
2232 + mutex_unlock(&cdev_list_lock);
2233 + }
2234 +
2235 ++static void __cxgbit_free_conn(struct cxgbit_sock *csk);
2236 ++
2237 + void cxgbit_free_np(struct iscsi_np *np)
2238 + {
2239 + struct cxgbit_np *cnp = np->np_context;
2240 ++ struct cxgbit_sock *csk, *tmp;
2241 +
2242 + cnp->com.state = CSK_STATE_DEAD;
2243 + if (cnp->com.cdev)
2244 +@@ -608,6 +611,13 @@ void cxgbit_free_np(struct iscsi_np *np)
2245 + else
2246 + cxgbit_free_all_np(cnp);
2247 +
2248 ++ spin_lock_bh(&cnp->np_accept_lock);
2249 ++ list_for_each_entry_safe(csk, tmp, &cnp->np_accept_list, accept_node) {
2250 ++ list_del_init(&csk->accept_node);
2251 ++ __cxgbit_free_conn(csk);
2252 ++ }
2253 ++ spin_unlock_bh(&cnp->np_accept_lock);
2254 ++
2255 + np->np_context = NULL;
2256 + cxgbit_put_cnp(cnp);
2257 + }
2258 +@@ -708,9 +718,9 @@ void cxgbit_abort_conn(struct cxgbit_sock *csk)
2259 + csk->tid, 600, __func__);
2260 + }
2261 +
2262 +-void cxgbit_free_conn(struct iscsi_conn *conn)
2263 ++static void __cxgbit_free_conn(struct cxgbit_sock *csk)
2264 + {
2265 +- struct cxgbit_sock *csk = conn->context;
2266 ++ struct iscsi_conn *conn = csk->conn;
2267 + bool release = false;
2268 +
2269 + pr_debug("%s: state %d\n",
2270 +@@ -719,7 +729,7 @@ void cxgbit_free_conn(struct iscsi_conn *conn)
2271 + spin_lock_bh(&csk->lock);
2272 + switch (csk->com.state) {
2273 + case CSK_STATE_ESTABLISHED:
2274 +- if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
2275 ++ if (conn && (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)) {
2276 + csk->com.state = CSK_STATE_CLOSING;
2277 + cxgbit_send_halfclose(csk);
2278 + } else {
2279 +@@ -744,6 +754,11 @@ void cxgbit_free_conn(struct iscsi_conn *conn)
2280 + cxgbit_put_csk(csk);
2281 + }
2282 +
2283 ++void cxgbit_free_conn(struct iscsi_conn *conn)
2284 ++{
2285 ++ __cxgbit_free_conn(conn->context);
2286 ++}
2287 ++
2288 + static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
2289 + {
2290 + csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
2291 +@@ -806,6 +821,7 @@ void _cxgbit_free_csk(struct kref *kref)
2292 + spin_unlock_bh(&cdev->cskq.lock);
2293 +
2294 + cxgbit_free_skb(csk);
2295 ++ cxgbit_put_cnp(csk->cnp);
2296 + cxgbit_put_cdev(cdev);
2297 +
2298 + kfree(csk);
2299 +@@ -1354,6 +1370,7 @@ cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
2300 + goto rel_skb;
2301 + }
2302 +
2303 ++ cxgbit_get_cnp(cnp);
2304 + cxgbit_get_cdev(cdev);
2305 +
2306 + spin_lock(&cdev->cskq.lock);
2307 +diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
2308 +index d6f42b528277..052ec16a4e84 100644
2309 +--- a/drivers/tty/tty_io.c
2310 ++++ b/drivers/tty/tty_io.c
2311 +@@ -1255,7 +1255,8 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *
2312 + static int tty_reopen(struct tty_struct *tty)
2313 + {
2314 + struct tty_driver *driver = tty->driver;
2315 +- int retval;
2316 ++ struct tty_ldisc *ld;
2317 ++ int retval = 0;
2318 +
2319 + if (driver->type == TTY_DRIVER_TYPE_PTY &&
2320 + driver->subtype == PTY_TYPE_MASTER)
2321 +@@ -1267,14 +1268,21 @@ static int tty_reopen(struct tty_struct *tty)
2322 + if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
2323 + return -EBUSY;
2324 +
2325 +- tty->count++;
2326 ++ ld = tty_ldisc_ref_wait(tty);
2327 ++ if (ld) {
2328 ++ tty_ldisc_deref(ld);
2329 ++ } else {
2330 ++ retval = tty_ldisc_lock(tty, 5 * HZ);
2331 ++ if (retval)
2332 ++ return retval;
2333 +
2334 +- if (tty->ldisc)
2335 +- return 0;
2336 ++ if (!tty->ldisc)
2337 ++ retval = tty_ldisc_reinit(tty, tty->termios.c_line);
2338 ++ tty_ldisc_unlock(tty);
2339 ++ }
2340 +
2341 +- retval = tty_ldisc_reinit(tty, tty->termios.c_line);
2342 +- if (retval)
2343 +- tty->count--;
2344 ++ if (retval == 0)
2345 ++ tty->count++;
2346 +
2347 + return retval;
2348 + }
2349 +diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
2350 +index 0c98d88f795a..b989ca26fc78 100644
2351 +--- a/drivers/tty/tty_ldsem.c
2352 ++++ b/drivers/tty/tty_ldsem.c
2353 +@@ -293,6 +293,16 @@ down_write_failed(struct ld_semaphore *sem, long count, long timeout)
2354 + if (!locked)
2355 + atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
2356 + list_del(&waiter.list);
2357 ++
2358 ++ /*
2359 ++ * In case of timeout, wake up every reader who gave the right of way
2360 ++ * to writer. Prevent separation readers into two groups:
2361 ++ * one that helds semaphore and another that sleeps.
2362 ++ * (in case of no contention with a writer)
2363 ++ */
2364 ++ if (!locked && list_empty(&sem->write_wait))
2365 ++ __ldsem_wake_readers(sem);
2366 ++
2367 + raw_spin_unlock_irq(&sem->wait_lock);
2368 +
2369 + __set_current_state(TASK_RUNNING);
2370 +diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
2371 +index a3edb20ea4c3..a846d32ee653 100644
2372 +--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
2373 ++++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
2374 +@@ -609,6 +609,8 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
2375 +
2376 + int r = 0;
2377 +
2378 ++ memset(&p, 0, sizeof(p));
2379 ++
2380 + switch (cmd) {
2381 + case OMAPFB_SYNC_GFX:
2382 + DBG("ioctl SYNC_GFX\n");
2383 +diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
2384 +index e6c1934734b7..fe1f16351f94 100644
2385 +--- a/drivers/xen/events/events_base.c
2386 ++++ b/drivers/xen/events/events_base.c
2387 +@@ -1650,7 +1650,7 @@ void xen_callback_vector(void)
2388 + xen_have_vector_callback = 0;
2389 + return;
2390 + }
2391 +- pr_info("Xen HVM callback vector for event delivery is enabled\n");
2392 ++ pr_info_once("Xen HVM callback vector for event delivery is enabled\n");
2393 + alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
2394 + xen_hvm_callback_vector);
2395 + }
2396 +diff --git a/fs/block_dev.c b/fs/block_dev.c
2397 +index 38b8ce05cbc7..cdbb888a8d4a 100644
2398 +--- a/fs/block_dev.c
2399 ++++ b/fs/block_dev.c
2400 +@@ -104,6 +104,20 @@ void invalidate_bdev(struct block_device *bdev)
2401 + }
2402 + EXPORT_SYMBOL(invalidate_bdev);
2403 +
2404 ++static void set_init_blocksize(struct block_device *bdev)
2405 ++{
2406 ++ unsigned bsize = bdev_logical_block_size(bdev);
2407 ++ loff_t size = i_size_read(bdev->bd_inode);
2408 ++
2409 ++ while (bsize < PAGE_SIZE) {
2410 ++ if (size & bsize)
2411 ++ break;
2412 ++ bsize <<= 1;
2413 ++ }
2414 ++ bdev->bd_block_size = bsize;
2415 ++ bdev->bd_inode->i_blkbits = blksize_bits(bsize);
2416 ++}
2417 ++
2418 + int set_blocksize(struct block_device *bdev, int size)
2419 + {
2420 + /* Size must be a power of two, and between 512 and PAGE_SIZE */
2421 +@@ -1408,18 +1422,9 @@ EXPORT_SYMBOL(check_disk_change);
2422 +
2423 + void bd_set_size(struct block_device *bdev, loff_t size)
2424 + {
2425 +- unsigned bsize = bdev_logical_block_size(bdev);
2426 +-
2427 + inode_lock(bdev->bd_inode);
2428 + i_size_write(bdev->bd_inode, size);
2429 + inode_unlock(bdev->bd_inode);
2430 +- while (bsize < PAGE_SIZE) {
2431 +- if (size & bsize)
2432 +- break;
2433 +- bsize <<= 1;
2434 +- }
2435 +- bdev->bd_block_size = bsize;
2436 +- bdev->bd_inode->i_blkbits = blksize_bits(bsize);
2437 + }
2438 + EXPORT_SYMBOL(bd_set_size);
2439 +
2440 +@@ -1496,8 +1501,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
2441 + }
2442 + }
2443 +
2444 +- if (!ret)
2445 ++ if (!ret) {
2446 + bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
2447 ++ set_init_blocksize(bdev);
2448 ++ }
2449 +
2450 + /*
2451 + * If the device is invalidated, rescan partition
2452 +@@ -1532,6 +1539,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
2453 + goto out_clear;
2454 + }
2455 + bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
2456 ++ set_init_blocksize(bdev);
2457 + }
2458 +
2459 + if (bdev->bd_bdi == &noop_backing_dev_info)
2460 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
2461 +index d4a7f7ca4145..d96d1390068a 100644
2462 +--- a/fs/btrfs/disk-io.c
2463 ++++ b/fs/btrfs/disk-io.c
2464 +@@ -4155,6 +4155,14 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
2465 + spin_lock(&fs_info->ordered_root_lock);
2466 + }
2467 + spin_unlock(&fs_info->ordered_root_lock);
2468 ++
2469 ++ /*
2470 ++ * We need this here because if we've been flipped read-only we won't
2471 ++ * get sync() from the umount, so we need to make sure any ordered
2472 ++ * extents that haven't had their dirty pages IO start writeout yet
2473 ++ * actually get run and error out properly.
2474 ++ */
2475 ++ btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
2476 + }
2477 +
2478 + static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
2479 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
2480 +index 14c85e61134d..4f6dc56b4f4d 100644
2481 +--- a/fs/btrfs/inode.c
2482 ++++ b/fs/btrfs/inode.c
2483 +@@ -3151,9 +3151,6 @@ out:
2484 + /* once for the tree */
2485 + btrfs_put_ordered_extent(ordered_extent);
2486 +
2487 +- /* Try to release some metadata so we don't get an OOM but don't wait */
2488 +- btrfs_btree_balance_dirty_nodelay(fs_info);
2489 +-
2490 + return ret;
2491 + }
2492 +
2493 +diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
2494 +index 03cd59375abe..eb67bb7f04de 100644
2495 +--- a/fs/pstore/ram.c
2496 ++++ b/fs/pstore/ram.c
2497 +@@ -713,18 +713,15 @@ static int ramoops_probe(struct platform_device *pdev)
2498 + {
2499 + struct device *dev = &pdev->dev;
2500 + struct ramoops_platform_data *pdata = dev->platform_data;
2501 ++ struct ramoops_platform_data pdata_local;
2502 + struct ramoops_context *cxt = &oops_cxt;
2503 + size_t dump_mem_sz;
2504 + phys_addr_t paddr;
2505 + int err = -EINVAL;
2506 +
2507 + if (dev_of_node(dev) && !pdata) {
2508 +- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
2509 +- if (!pdata) {
2510 +- pr_err("cannot allocate platform data buffer\n");
2511 +- err = -ENOMEM;
2512 +- goto fail_out;
2513 +- }
2514 ++ pdata = &pdata_local;
2515 ++ memset(pdata, 0, sizeof(*pdata));
2516 +
2517 + err = ramoops_parse_dt(pdev, pdata);
2518 + if (err < 0)
2519 +diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h
2520 +index 7cca5f859a90..f3c43519baa7 100644
2521 +--- a/include/linux/bcma/bcma_soc.h
2522 ++++ b/include/linux/bcma/bcma_soc.h
2523 +@@ -6,6 +6,7 @@
2524 +
2525 + struct bcma_soc {
2526 + struct bcma_bus bus;
2527 ++ struct device *dev;
2528 + };
2529 +
2530 + int __init bcma_host_soc_register(struct bcma_soc *soc);
2531 +diff --git a/include/linux/genhd.h b/include/linux/genhd.h
2532 +index 25c08c6c7f99..f767293b00e6 100644
2533 +--- a/include/linux/genhd.h
2534 ++++ b/include/linux/genhd.h
2535 +@@ -129,7 +129,7 @@ struct hd_struct {
2536 + struct disk_stats dkstats;
2537 + #endif
2538 + struct percpu_ref ref;
2539 +- struct rcu_head rcu_head;
2540 ++ struct rcu_work rcu_work;
2541 + };
2542 +
2543 + #define GENHD_FL_REMOVABLE 1
2544 +diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h
2545 +index 4b2b2baf8ab4..f32fc8289473 100644
2546 +--- a/include/net/netfilter/nf_conntrack_count.h
2547 ++++ b/include/net/netfilter/nf_conntrack_count.h
2548 +@@ -5,17 +5,10 @@
2549 +
2550 + struct nf_conncount_data;
2551 +
2552 +-enum nf_conncount_list_add {
2553 +- NF_CONNCOUNT_ADDED, /* list add was ok */
2554 +- NF_CONNCOUNT_ERR, /* -ENOMEM, must drop skb */
2555 +- NF_CONNCOUNT_SKIP, /* list is already reclaimed by gc */
2556 +-};
2557 +-
2558 + struct nf_conncount_list {
2559 + spinlock_t list_lock;
2560 + struct list_head head; /* connections with the same filtering key */
2561 + unsigned int count; /* length of list */
2562 +- bool dead;
2563 + };
2564 +
2565 + struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
2566 +@@ -29,18 +22,12 @@ unsigned int nf_conncount_count(struct net *net,
2567 + const struct nf_conntrack_tuple *tuple,
2568 + const struct nf_conntrack_zone *zone);
2569 +
2570 +-void nf_conncount_lookup(struct net *net, struct nf_conncount_list *list,
2571 +- const struct nf_conntrack_tuple *tuple,
2572 +- const struct nf_conntrack_zone *zone,
2573 +- bool *addit);
2574 ++int nf_conncount_add(struct net *net, struct nf_conncount_list *list,
2575 ++ const struct nf_conntrack_tuple *tuple,
2576 ++ const struct nf_conntrack_zone *zone);
2577 +
2578 + void nf_conncount_list_init(struct nf_conncount_list *list);
2579 +
2580 +-enum nf_conncount_list_add
2581 +-nf_conncount_add(struct nf_conncount_list *list,
2582 +- const struct nf_conntrack_tuple *tuple,
2583 +- const struct nf_conntrack_zone *zone);
2584 +-
2585 + bool nf_conncount_gc_list(struct net *net,
2586 + struct nf_conncount_list *list);
2587 +
2588 +diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h
2589 +index d13fd490b66d..6e73f0274e41 100644
2590 +--- a/include/uapi/rdma/vmw_pvrdma-abi.h
2591 ++++ b/include/uapi/rdma/vmw_pvrdma-abi.h
2592 +@@ -78,6 +78,7 @@ enum pvrdma_wr_opcode {
2593 + PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD,
2594 + PVRDMA_WR_BIND_MW,
2595 + PVRDMA_WR_REG_SIG_MR,
2596 ++ PVRDMA_WR_ERROR,
2597 + };
2598 +
2599 + enum pvrdma_wc_status {
2600 +diff --git a/init/Kconfig b/init/Kconfig
2601 +index 317d5ccb5191..864af10bb1b9 100644
2602 +--- a/init/Kconfig
2603 ++++ b/init/Kconfig
2604 +@@ -1102,6 +1102,7 @@ config LD_DEAD_CODE_DATA_ELIMINATION
2605 + bool "Dead code and data elimination (EXPERIMENTAL)"
2606 + depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION
2607 + depends on EXPERT
2608 ++ depends on !(FUNCTION_TRACER && CC_IS_GCC && GCC_VERSION < 40800)
2609 + depends on $(cc-option,-ffunction-sections -fdata-sections)
2610 + depends on $(ld-option,--gc-sections)
2611 + help
2612 +diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c
2613 +index 14436f4ca6bd..30e0f9770f88 100644
2614 +--- a/lib/int_sqrt.c
2615 ++++ b/lib/int_sqrt.c
2616 +@@ -52,7 +52,7 @@ u32 int_sqrt64(u64 x)
2617 + if (x <= ULONG_MAX)
2618 + return int_sqrt((unsigned long) x);
2619 +
2620 +- m = 1ULL << (fls64(x) & ~1ULL);
2621 ++ m = 1ULL << ((fls64(x) - 1) & ~1ULL);
2622 + while (m != 0) {
2623 + b = y + m;
2624 + y >>= 1;
2625 +diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
2626 +index 37278dc280eb..e07a7e62c705 100644
2627 +--- a/net/bridge/br_netfilter_hooks.c
2628 ++++ b/net/bridge/br_netfilter_hooks.c
2629 +@@ -278,7 +278,7 @@ int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_
2630 + struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
2631 + int ret;
2632 +
2633 +- if (neigh->hh.hh_len) {
2634 ++ if ((neigh->nud_state & NUD_CONNECTED) && neigh->hh.hh_len) {
2635 + neigh_hh_bridge(&neigh->hh, skb);
2636 + skb->dev = nf_bridge->physindev;
2637 + ret = br_handle_frame_finish(net, sk, skb);
2638 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
2639 +index 491828713e0b..5e55cef0cec3 100644
2640 +--- a/net/bridge/netfilter/ebtables.c
2641 ++++ b/net/bridge/netfilter/ebtables.c
2642 +@@ -1137,14 +1137,16 @@ static int do_replace(struct net *net, const void __user *user,
2643 + tmp.name[sizeof(tmp.name) - 1] = 0;
2644 +
2645 + countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2646 +- newinfo = vmalloc(sizeof(*newinfo) + countersize);
2647 ++ newinfo = __vmalloc(sizeof(*newinfo) + countersize, GFP_KERNEL_ACCOUNT,
2648 ++ PAGE_KERNEL);
2649 + if (!newinfo)
2650 + return -ENOMEM;
2651 +
2652 + if (countersize)
2653 + memset(newinfo->counters, 0, countersize);
2654 +
2655 +- newinfo->entries = vmalloc(tmp.entries_size);
2656 ++ newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT,
2657 ++ PAGE_KERNEL);
2658 + if (!newinfo->entries) {
2659 + ret = -ENOMEM;
2660 + goto free_newinfo;
2661 +diff --git a/net/can/gw.c b/net/can/gw.c
2662 +index faa3da88a127..53859346dc9a 100644
2663 +--- a/net/can/gw.c
2664 ++++ b/net/can/gw.c
2665 +@@ -416,13 +416,29 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
2666 + while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
2667 + (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
2668 +
2669 +- /* check for checksum updates when the CAN frame has been modified */
2670 ++ /* Has the CAN frame been modified? */
2671 + if (modidx) {
2672 +- if (gwj->mod.csumfunc.crc8)
2673 ++ /* get available space for the processed CAN frame type */
2674 ++ int max_len = nskb->len - offsetof(struct can_frame, data);
2675 ++
2676 ++ /* dlc may have changed, make sure it fits to the CAN frame */
2677 ++ if (cf->can_dlc > max_len)
2678 ++ goto out_delete;
2679 ++
2680 ++ /* check for checksum updates in classic CAN length only */
2681 ++ if (gwj->mod.csumfunc.crc8) {
2682 ++ if (cf->can_dlc > 8)
2683 ++ goto out_delete;
2684 ++
2685 + (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
2686 ++ }
2687 ++
2688 ++ if (gwj->mod.csumfunc.xor) {
2689 ++ if (cf->can_dlc > 8)
2690 ++ goto out_delete;
2691 +
2692 +- if (gwj->mod.csumfunc.xor)
2693 + (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
2694 ++ }
2695 + }
2696 +
2697 + /* clear the skb timestamp if not configured the other way */
2698 +@@ -434,6 +450,14 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
2699 + gwj->dropped_frames++;
2700 + else
2701 + gwj->handled_frames++;
2702 ++
2703 ++ return;
2704 ++
2705 ++ out_delete:
2706 ++ /* delete frame due to misconfiguration */
2707 ++ gwj->deleted_frames++;
2708 ++ kfree_skb(nskb);
2709 ++ return;
2710 + }
2711 +
2712 + static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj)
2713 +diff --git a/net/core/filter.c b/net/core/filter.c
2714 +index 5e00f2b85a56..8c2411fb2509 100644
2715 +--- a/net/core/filter.c
2716 ++++ b/net/core/filter.c
2717 +@@ -2018,18 +2018,19 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
2718 + static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
2719 + u32 flags)
2720 + {
2721 +- /* skb->mac_len is not set on normal egress */
2722 +- unsigned int mlen = skb->network_header - skb->mac_header;
2723 ++ unsigned int mlen = skb_network_offset(skb);
2724 +
2725 +- __skb_pull(skb, mlen);
2726 ++ if (mlen) {
2727 ++ __skb_pull(skb, mlen);
2728 +
2729 +- /* At ingress, the mac header has already been pulled once.
2730 +- * At egress, skb_pospull_rcsum has to be done in case that
2731 +- * the skb is originated from ingress (i.e. a forwarded skb)
2732 +- * to ensure that rcsum starts at net header.
2733 +- */
2734 +- if (!skb_at_tc_ingress(skb))
2735 +- skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
2736 ++ /* At ingress, the mac header has already been pulled once.
2737 ++ * At egress, skb_pospull_rcsum has to be done in case that
2738 ++ * the skb is originated from ingress (i.e. a forwarded skb)
2739 ++ * to ensure that rcsum starts at net header.
2740 ++ */
2741 ++ if (!skb_at_tc_ingress(skb))
2742 ++ skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
2743 ++ }
2744 + skb_pop_mac_header(skb);
2745 + skb_reset_mac_len(skb);
2746 + return flags & BPF_F_INGRESS ?
2747 +diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
2748 +index 3e85437f7106..a648568c5e8f 100644
2749 +--- a/net/core/lwt_bpf.c
2750 ++++ b/net/core/lwt_bpf.c
2751 +@@ -63,6 +63,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
2752 + lwt->name ? : "<unknown>");
2753 + ret = BPF_OK;
2754 + } else {
2755 ++ skb_reset_mac_header(skb);
2756 + ret = skb_do_redirect(skb);
2757 + if (ret == 0)
2758 + ret = BPF_REDIRECT;
2759 +diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
2760 +index 26c36cccabdc..b7a26120d552 100644
2761 +--- a/net/ipv4/ip_sockglue.c
2762 ++++ b/net/ipv4/ip_sockglue.c
2763 +@@ -148,19 +148,17 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
2764 +
2765 + static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
2766 + {
2767 ++ __be16 _ports[2], *ports;
2768 + struct sockaddr_in sin;
2769 +- __be16 *ports;
2770 +- int end;
2771 +-
2772 +- end = skb_transport_offset(skb) + 4;
2773 +- if (end > 0 && !pskb_may_pull(skb, end))
2774 +- return;
2775 +
2776 + /* All current transport protocols have the port numbers in the
2777 + * first four bytes of the transport header and this function is
2778 + * written with this assumption in mind.
2779 + */
2780 +- ports = (__be16 *)skb_transport_header(skb);
2781 ++ ports = skb_header_pointer(skb, skb_transport_offset(skb),
2782 ++ sizeof(_ports), &_ports);
2783 ++ if (!ports)
2784 ++ return;
2785 +
2786 + sin.sin_family = AF_INET;
2787 + sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
2788 +diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
2789 +index 57eae8d70ba1..b1b5a648def6 100644
2790 +--- a/net/ipv4/tcp_timer.c
2791 ++++ b/net/ipv4/tcp_timer.c
2792 +@@ -224,7 +224,7 @@ static int tcp_write_timeout(struct sock *sk)
2793 + if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
2794 + if (icsk->icsk_retransmits) {
2795 + dst_negative_advice(sk);
2796 +- } else if (!tp->syn_data && !tp->syn_fastopen) {
2797 ++ } else {
2798 + sk_rethink_txhash(sk);
2799 + }
2800 + retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
2801 +diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
2802 +index 1ede7a16a0be..cb24850d2c7f 100644
2803 +--- a/net/ipv6/datagram.c
2804 ++++ b/net/ipv6/datagram.c
2805 +@@ -341,6 +341,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info)
2806 + skb_reset_network_header(skb);
2807 + iph = ipv6_hdr(skb);
2808 + iph->daddr = fl6->daddr;
2809 ++ ip6_flow_hdr(iph, 0, 0);
2810 +
2811 + serr = SKB_EXT_ERR(skb);
2812 + serr->ee.ee_errno = err;
2813 +@@ -700,17 +701,15 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
2814 + }
2815 + if (np->rxopt.bits.rxorigdstaddr) {
2816 + struct sockaddr_in6 sin6;
2817 +- __be16 *ports;
2818 +- int end;
2819 ++ __be16 _ports[2], *ports;
2820 +
2821 +- end = skb_transport_offset(skb) + 4;
2822 +- if (end <= 0 || pskb_may_pull(skb, end)) {
2823 ++ ports = skb_header_pointer(skb, skb_transport_offset(skb),
2824 ++ sizeof(_ports), &_ports);
2825 ++ if (ports) {
2826 + /* All current transport protocols have the port numbers in the
2827 + * first four bytes of the transport header and this function is
2828 + * written with this assumption in mind.
2829 + */
2830 +- ports = (__be16 *)skb_transport_header(skb);
2831 +-
2832 + sin6.sin6_family = AF_INET6;
2833 + sin6.sin6_addr = ipv6_hdr(skb)->daddr;
2834 + sin6.sin6_port = ports[1];
2835 +diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
2836 +index c9c53ade55c3..6d14cbe443f8 100644
2837 +--- a/net/ipv6/icmp.c
2838 ++++ b/net/ipv6/icmp.c
2839 +@@ -421,10 +421,10 @@ static int icmp6_iif(const struct sk_buff *skb)
2840 + static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
2841 + const struct in6_addr *force_saddr)
2842 + {
2843 +- struct net *net = dev_net(skb->dev);
2844 + struct inet6_dev *idev = NULL;
2845 + struct ipv6hdr *hdr = ipv6_hdr(skb);
2846 + struct sock *sk;
2847 ++ struct net *net;
2848 + struct ipv6_pinfo *np;
2849 + const struct in6_addr *saddr = NULL;
2850 + struct dst_entry *dst;
2851 +@@ -435,12 +435,16 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
2852 + int iif = 0;
2853 + int addr_type = 0;
2854 + int len;
2855 +- u32 mark = IP6_REPLY_MARK(net, skb->mark);
2856 ++ u32 mark;
2857 +
2858 + if ((u8 *)hdr < skb->head ||
2859 + (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb))
2860 + return;
2861 +
2862 ++ if (!skb->dev)
2863 ++ return;
2864 ++ net = dev_net(skb->dev);
2865 ++ mark = IP6_REPLY_MARK(net, skb->mark);
2866 + /*
2867 + * Make sure we respect the rules
2868 + * i.e. RFC 1885 2.4(e)
2869 +diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
2870 +index 9cd180bda092..7554c56b2e63 100644
2871 +--- a/net/netfilter/nf_conncount.c
2872 ++++ b/net/netfilter/nf_conncount.c
2873 +@@ -33,12 +33,6 @@
2874 +
2875 + #define CONNCOUNT_SLOTS 256U
2876 +
2877 +-#ifdef CONFIG_LOCKDEP
2878 +-#define CONNCOUNT_LOCK_SLOTS 8U
2879 +-#else
2880 +-#define CONNCOUNT_LOCK_SLOTS 256U
2881 +-#endif
2882 +-
2883 + #define CONNCOUNT_GC_MAX_NODES 8
2884 + #define MAX_KEYLEN 5
2885 +
2886 +@@ -49,8 +43,6 @@ struct nf_conncount_tuple {
2887 + struct nf_conntrack_zone zone;
2888 + int cpu;
2889 + u32 jiffies32;
2890 +- bool dead;
2891 +- struct rcu_head rcu_head;
2892 + };
2893 +
2894 + struct nf_conncount_rb {
2895 +@@ -60,7 +52,7 @@ struct nf_conncount_rb {
2896 + struct rcu_head rcu_head;
2897 + };
2898 +
2899 +-static spinlock_t nf_conncount_locks[CONNCOUNT_LOCK_SLOTS] __cacheline_aligned_in_smp;
2900 ++static spinlock_t nf_conncount_locks[CONNCOUNT_SLOTS] __cacheline_aligned_in_smp;
2901 +
2902 + struct nf_conncount_data {
2903 + unsigned int keylen;
2904 +@@ -89,79 +81,25 @@ static int key_diff(const u32 *a, const u32 *b, unsigned int klen)
2905 + return memcmp(a, b, klen * sizeof(u32));
2906 + }
2907 +
2908 +-enum nf_conncount_list_add
2909 +-nf_conncount_add(struct nf_conncount_list *list,
2910 +- const struct nf_conntrack_tuple *tuple,
2911 +- const struct nf_conntrack_zone *zone)
2912 +-{
2913 +- struct nf_conncount_tuple *conn;
2914 +-
2915 +- if (WARN_ON_ONCE(list->count > INT_MAX))
2916 +- return NF_CONNCOUNT_ERR;
2917 +-
2918 +- conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
2919 +- if (conn == NULL)
2920 +- return NF_CONNCOUNT_ERR;
2921 +-
2922 +- conn->tuple = *tuple;
2923 +- conn->zone = *zone;
2924 +- conn->cpu = raw_smp_processor_id();
2925 +- conn->jiffies32 = (u32)jiffies;
2926 +- conn->dead = false;
2927 +- spin_lock_bh(&list->list_lock);
2928 +- if (list->dead == true) {
2929 +- kmem_cache_free(conncount_conn_cachep, conn);
2930 +- spin_unlock_bh(&list->list_lock);
2931 +- return NF_CONNCOUNT_SKIP;
2932 +- }
2933 +- list_add_tail(&conn->node, &list->head);
2934 +- list->count++;
2935 +- spin_unlock_bh(&list->list_lock);
2936 +- return NF_CONNCOUNT_ADDED;
2937 +-}
2938 +-EXPORT_SYMBOL_GPL(nf_conncount_add);
2939 +-
2940 +-static void __conn_free(struct rcu_head *h)
2941 +-{
2942 +- struct nf_conncount_tuple *conn;
2943 +-
2944 +- conn = container_of(h, struct nf_conncount_tuple, rcu_head);
2945 +- kmem_cache_free(conncount_conn_cachep, conn);
2946 +-}
2947 +-
2948 +-static bool conn_free(struct nf_conncount_list *list,
2949 ++static void conn_free(struct nf_conncount_list *list,
2950 + struct nf_conncount_tuple *conn)
2951 + {
2952 +- bool free_entry = false;
2953 +-
2954 +- spin_lock_bh(&list->list_lock);
2955 +-
2956 +- if (conn->dead) {
2957 +- spin_unlock_bh(&list->list_lock);
2958 +- return free_entry;
2959 +- }
2960 ++ lockdep_assert_held(&list->list_lock);
2961 +
2962 + list->count--;
2963 +- conn->dead = true;
2964 +- list_del_rcu(&conn->node);
2965 +- if (list->count == 0) {
2966 +- list->dead = true;
2967 +- free_entry = true;
2968 +- }
2969 ++ list_del(&conn->node);
2970 +
2971 +- spin_unlock_bh(&list->list_lock);
2972 +- call_rcu(&conn->rcu_head, __conn_free);
2973 +- return free_entry;
2974 ++ kmem_cache_free(conncount_conn_cachep, conn);
2975 + }
2976 +
2977 + static const struct nf_conntrack_tuple_hash *
2978 + find_or_evict(struct net *net, struct nf_conncount_list *list,
2979 +- struct nf_conncount_tuple *conn, bool *free_entry)
2980 ++ struct nf_conncount_tuple *conn)
2981 + {
2982 + const struct nf_conntrack_tuple_hash *found;
2983 + unsigned long a, b;
2984 + int cpu = raw_smp_processor_id();
2985 +- __s32 age;
2986 ++ u32 age;
2987 +
2988 + found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
2989 + if (found)
2990 +@@ -176,52 +114,45 @@ find_or_evict(struct net *net, struct nf_conncount_list *list,
2991 + */
2992 + age = a - b;
2993 + if (conn->cpu == cpu || age >= 2) {
2994 +- *free_entry = conn_free(list, conn);
2995 ++ conn_free(list, conn);
2996 + return ERR_PTR(-ENOENT);
2997 + }
2998 +
2999 + return ERR_PTR(-EAGAIN);
3000 + }
3001 +
3002 +-void nf_conncount_lookup(struct net *net,
3003 +- struct nf_conncount_list *list,
3004 +- const struct nf_conntrack_tuple *tuple,
3005 +- const struct nf_conntrack_zone *zone,
3006 +- bool *addit)
3007 ++static int __nf_conncount_add(struct net *net,
3008 ++ struct nf_conncount_list *list,
3009 ++ const struct nf_conntrack_tuple *tuple,
3010 ++ const struct nf_conntrack_zone *zone)
3011 + {
3012 + const struct nf_conntrack_tuple_hash *found;
3013 + struct nf_conncount_tuple *conn, *conn_n;
3014 + struct nf_conn *found_ct;
3015 + unsigned int collect = 0;
3016 +- bool free_entry = false;
3017 +-
3018 +- /* best effort only */
3019 +- *addit = tuple ? true : false;
3020 +
3021 + /* check the saved connections */
3022 + list_for_each_entry_safe(conn, conn_n, &list->head, node) {
3023 + if (collect > CONNCOUNT_GC_MAX_NODES)
3024 + break;
3025 +
3026 +- found = find_or_evict(net, list, conn, &free_entry);
3027 ++ found = find_or_evict(net, list, conn);
3028 + if (IS_ERR(found)) {
3029 + /* Not found, but might be about to be confirmed */
3030 + if (PTR_ERR(found) == -EAGAIN) {
3031 +- if (!tuple)
3032 +- continue;
3033 +-
3034 + if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
3035 + nf_ct_zone_id(&conn->zone, conn->zone.dir) ==
3036 + nf_ct_zone_id(zone, zone->dir))
3037 +- *addit = false;
3038 +- } else if (PTR_ERR(found) == -ENOENT)
3039 ++ return 0; /* already exists */
3040 ++ } else {
3041 + collect++;
3042 ++ }
3043 + continue;
3044 + }
3045 +
3046 + found_ct = nf_ct_tuplehash_to_ctrack(found);
3047 +
3048 +- if (tuple && nf_ct_tuple_equal(&conn->tuple, tuple) &&
3049 ++ if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
3050 + nf_ct_zone_equal(found_ct, zone, zone->dir)) {
3051 + /*
3052 + * We should not see tuples twice unless someone hooks
3053 +@@ -229,7 +160,8 @@ void nf_conncount_lookup(struct net *net,
3054 + *
3055 + * Attempt to avoid a re-add in this case.
3056 + */
3057 +- *addit = false;
3058 ++ nf_ct_put(found_ct);
3059 ++ return 0;
3060 + } else if (already_closed(found_ct)) {
3061 + /*
3062 + * we do not care about connections which are
3063 +@@ -243,19 +175,48 @@ void nf_conncount_lookup(struct net *net,
3064 +
3065 + nf_ct_put(found_ct);
3066 + }
3067 ++
3068 ++ if (WARN_ON_ONCE(list->count > INT_MAX))
3069 ++ return -EOVERFLOW;
3070 ++
3071 ++ conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
3072 ++ if (conn == NULL)
3073 ++ return -ENOMEM;
3074 ++
3075 ++ conn->tuple = *tuple;
3076 ++ conn->zone = *zone;
3077 ++ conn->cpu = raw_smp_processor_id();
3078 ++ conn->jiffies32 = (u32)jiffies;
3079 ++ list_add_tail(&conn->node, &list->head);
3080 ++ list->count++;
3081 ++ return 0;
3082 + }
3083 +-EXPORT_SYMBOL_GPL(nf_conncount_lookup);
3084 ++
3085 ++int nf_conncount_add(struct net *net,
3086 ++ struct nf_conncount_list *list,
3087 ++ const struct nf_conntrack_tuple *tuple,
3088 ++ const struct nf_conntrack_zone *zone)
3089 ++{
3090 ++ int ret;
3091 ++
3092 ++ /* check the saved connections */
3093 ++ spin_lock_bh(&list->list_lock);
3094 ++ ret = __nf_conncount_add(net, list, tuple, zone);
3095 ++ spin_unlock_bh(&list->list_lock);
3096 ++
3097 ++ return ret;
3098 ++}
3099 ++EXPORT_SYMBOL_GPL(nf_conncount_add);
3100 +
3101 + void nf_conncount_list_init(struct nf_conncount_list *list)
3102 + {
3103 + spin_lock_init(&list->list_lock);
3104 + INIT_LIST_HEAD(&list->head);
3105 + list->count = 0;
3106 +- list->dead = false;
3107 + }
3108 + EXPORT_SYMBOL_GPL(nf_conncount_list_init);
3109 +
3110 +-/* Return true if the list is empty */
3111 ++/* Return true if the list is empty. Must be called with BH disabled. */
3112 + bool nf_conncount_gc_list(struct net *net,
3113 + struct nf_conncount_list *list)
3114 + {
3115 +@@ -263,17 +224,17 @@ bool nf_conncount_gc_list(struct net *net,
3116 + struct nf_conncount_tuple *conn, *conn_n;
3117 + struct nf_conn *found_ct;
3118 + unsigned int collected = 0;
3119 +- bool free_entry = false;
3120 + bool ret = false;
3121 +
3122 ++ /* don't bother if other cpu is already doing GC */
3123 ++ if (!spin_trylock(&list->list_lock))
3124 ++ return false;
3125 ++
3126 + list_for_each_entry_safe(conn, conn_n, &list->head, node) {
3127 +- found = find_or_evict(net, list, conn, &free_entry);
3128 ++ found = find_or_evict(net, list, conn);
3129 + if (IS_ERR(found)) {
3130 +- if (PTR_ERR(found) == -ENOENT) {
3131 +- if (free_entry)
3132 +- return true;
3133 ++ if (PTR_ERR(found) == -ENOENT)
3134 + collected++;
3135 +- }
3136 + continue;
3137 + }
3138 +
3139 +@@ -284,23 +245,19 @@ bool nf_conncount_gc_list(struct net *net,
3140 + * closed already -> ditch it
3141 + */
3142 + nf_ct_put(found_ct);
3143 +- if (conn_free(list, conn))
3144 +- return true;
3145 ++ conn_free(list, conn);
3146 + collected++;
3147 + continue;
3148 + }
3149 +
3150 + nf_ct_put(found_ct);
3151 + if (collected > CONNCOUNT_GC_MAX_NODES)
3152 +- return false;
3153 ++ break;
3154 + }
3155 +
3156 +- spin_lock_bh(&list->list_lock);
3157 +- if (!list->count) {
3158 +- list->dead = true;
3159 ++ if (!list->count)
3160 + ret = true;
3161 +- }
3162 +- spin_unlock_bh(&list->list_lock);
3163 ++ spin_unlock(&list->list_lock);
3164 +
3165 + return ret;
3166 + }
3167 +@@ -314,6 +271,7 @@ static void __tree_nodes_free(struct rcu_head *h)
3168 + kmem_cache_free(conncount_rb_cachep, rbconn);
3169 + }
3170 +
3171 ++/* caller must hold tree nf_conncount_locks[] lock */
3172 + static void tree_nodes_free(struct rb_root *root,
3173 + struct nf_conncount_rb *gc_nodes[],
3174 + unsigned int gc_count)
3175 +@@ -323,8 +281,10 @@ static void tree_nodes_free(struct rb_root *root,
3176 + while (gc_count) {
3177 + rbconn = gc_nodes[--gc_count];
3178 + spin_lock(&rbconn->list.list_lock);
3179 +- rb_erase(&rbconn->node, root);
3180 +- call_rcu(&rbconn->rcu_head, __tree_nodes_free);
3181 ++ if (!rbconn->list.count) {
3182 ++ rb_erase(&rbconn->node, root);
3183 ++ call_rcu(&rbconn->rcu_head, __tree_nodes_free);
3184 ++ }
3185 + spin_unlock(&rbconn->list.list_lock);
3186 + }
3187 + }
3188 +@@ -341,20 +301,19 @@ insert_tree(struct net *net,
3189 + struct rb_root *root,
3190 + unsigned int hash,
3191 + const u32 *key,
3192 +- u8 keylen,
3193 + const struct nf_conntrack_tuple *tuple,
3194 + const struct nf_conntrack_zone *zone)
3195 + {
3196 +- enum nf_conncount_list_add ret;
3197 + struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES];
3198 + struct rb_node **rbnode, *parent;
3199 + struct nf_conncount_rb *rbconn;
3200 + struct nf_conncount_tuple *conn;
3201 + unsigned int count = 0, gc_count = 0;
3202 +- bool node_found = false;
3203 +-
3204 +- spin_lock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
3205 ++ u8 keylen = data->keylen;
3206 ++ bool do_gc = true;
3207 +
3208 ++ spin_lock_bh(&nf_conncount_locks[hash]);
3209 ++restart:
3210 + parent = NULL;
3211 + rbnode = &(root->rb_node);
3212 + while (*rbnode) {
3213 +@@ -368,45 +327,32 @@ insert_tree(struct net *net,
3214 + } else if (diff > 0) {
3215 + rbnode = &((*rbnode)->rb_right);
3216 + } else {
3217 +- /* unlikely: other cpu added node already */
3218 +- node_found = true;
3219 +- ret = nf_conncount_add(&rbconn->list, tuple, zone);
3220 +- if (ret == NF_CONNCOUNT_ERR) {
3221 ++ int ret;
3222 ++
3223 ++ ret = nf_conncount_add(net, &rbconn->list, tuple, zone);
3224 ++ if (ret)
3225 + count = 0; /* hotdrop */
3226 +- } else if (ret == NF_CONNCOUNT_ADDED) {
3227 ++ else
3228 + count = rbconn->list.count;
3229 +- } else {
3230 +- /* NF_CONNCOUNT_SKIP, rbconn is already
3231 +- * reclaimed by gc, insert a new tree node
3232 +- */
3233 +- node_found = false;
3234 +- }
3235 +- break;
3236 ++ tree_nodes_free(root, gc_nodes, gc_count);
3237 ++ goto out_unlock;
3238 + }
3239 +
3240 + if (gc_count >= ARRAY_SIZE(gc_nodes))
3241 + continue;
3242 +
3243 +- if (nf_conncount_gc_list(net, &rbconn->list))
3244 ++ if (do_gc && nf_conncount_gc_list(net, &rbconn->list))
3245 + gc_nodes[gc_count++] = rbconn;
3246 + }
3247 +
3248 + if (gc_count) {
3249 + tree_nodes_free(root, gc_nodes, gc_count);
3250 +- /* tree_node_free before new allocation permits
3251 +- * allocator to re-use newly free'd object.
3252 +- *
3253 +- * This is a rare event; in most cases we will find
3254 +- * existing node to re-use. (or gc_count is 0).
3255 +- */
3256 +-
3257 +- if (gc_count >= ARRAY_SIZE(gc_nodes))
3258 +- schedule_gc_worker(data, hash);
3259 ++ schedule_gc_worker(data, hash);
3260 ++ gc_count = 0;
3261 ++ do_gc = false;
3262 ++ goto restart;
3263 + }
3264 +
3265 +- if (node_found)
3266 +- goto out_unlock;
3267 +-
3268 + /* expected case: match, insert new node */
3269 + rbconn = kmem_cache_alloc(conncount_rb_cachep, GFP_ATOMIC);
3270 + if (rbconn == NULL)
3271 +@@ -430,7 +376,7 @@ insert_tree(struct net *net,
3272 + rb_link_node_rcu(&rbconn->node, parent, rbnode);
3273 + rb_insert_color(&rbconn->node, root);
3274 + out_unlock:
3275 +- spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
3276 ++ spin_unlock_bh(&nf_conncount_locks[hash]);
3277 + return count;
3278 + }
3279 +
3280 +@@ -441,7 +387,6 @@ count_tree(struct net *net,
3281 + const struct nf_conntrack_tuple *tuple,
3282 + const struct nf_conntrack_zone *zone)
3283 + {
3284 +- enum nf_conncount_list_add ret;
3285 + struct rb_root *root;
3286 + struct rb_node *parent;
3287 + struct nf_conncount_rb *rbconn;
3288 +@@ -454,7 +399,6 @@ count_tree(struct net *net,
3289 + parent = rcu_dereference_raw(root->rb_node);
3290 + while (parent) {
3291 + int diff;
3292 +- bool addit;
3293 +
3294 + rbconn = rb_entry(parent, struct nf_conncount_rb, node);
3295 +
3296 +@@ -464,31 +408,36 @@ count_tree(struct net *net,
3297 + } else if (diff > 0) {
3298 + parent = rcu_dereference_raw(parent->rb_right);
3299 + } else {
3300 +- /* same source network -> be counted! */
3301 +- nf_conncount_lookup(net, &rbconn->list, tuple, zone,
3302 +- &addit);
3303 ++ int ret;
3304 +
3305 +- if (!addit)
3306 ++ if (!tuple) {
3307 ++ nf_conncount_gc_list(net, &rbconn->list);
3308 + return rbconn->list.count;
3309 ++ }
3310 +
3311 +- ret = nf_conncount_add(&rbconn->list, tuple, zone);
3312 +- if (ret == NF_CONNCOUNT_ERR) {
3313 +- return 0; /* hotdrop */
3314 +- } else if (ret == NF_CONNCOUNT_ADDED) {
3315 +- return rbconn->list.count;
3316 +- } else {
3317 +- /* NF_CONNCOUNT_SKIP, rbconn is already
3318 +- * reclaimed by gc, insert a new tree node
3319 +- */
3320 ++ spin_lock_bh(&rbconn->list.list_lock);
3321 ++ /* Node might be about to be free'd.
3322 ++ * We need to defer to insert_tree() in this case.
3323 ++ */
3324 ++ if (rbconn->list.count == 0) {
3325 ++ spin_unlock_bh(&rbconn->list.list_lock);
3326 + break;
3327 + }
3328 ++
3329 ++ /* same source network -> be counted! */
3330 ++ ret = __nf_conncount_add(net, &rbconn->list, tuple, zone);
3331 ++ spin_unlock_bh(&rbconn->list.list_lock);
3332 ++ if (ret)
3333 ++ return 0; /* hotdrop */
3334 ++ else
3335 ++ return rbconn->list.count;
3336 + }
3337 + }
3338 +
3339 + if (!tuple)
3340 + return 0;
3341 +
3342 +- return insert_tree(net, data, root, hash, key, keylen, tuple, zone);
3343 ++ return insert_tree(net, data, root, hash, key, tuple, zone);
3344 + }
3345 +
3346 + static void tree_gc_worker(struct work_struct *work)
3347 +@@ -499,27 +448,47 @@ static void tree_gc_worker(struct work_struct *work)
3348 + struct rb_node *node;
3349 + unsigned int tree, next_tree, gc_count = 0;
3350 +
3351 +- tree = data->gc_tree % CONNCOUNT_LOCK_SLOTS;
3352 ++ tree = data->gc_tree % CONNCOUNT_SLOTS;
3353 + root = &data->root[tree];
3354 +
3355 ++ local_bh_disable();
3356 + rcu_read_lock();
3357 + for (node = rb_first(root); node != NULL; node = rb_next(node)) {
3358 + rbconn = rb_entry(node, struct nf_conncount_rb, node);
3359 + if (nf_conncount_gc_list(data->net, &rbconn->list))
3360 +- gc_nodes[gc_count++] = rbconn;
3361 ++ gc_count++;
3362 + }
3363 + rcu_read_unlock();
3364 ++ local_bh_enable();
3365 ++
3366 ++ cond_resched();
3367 +
3368 + spin_lock_bh(&nf_conncount_locks[tree]);
3369 ++ if (gc_count < ARRAY_SIZE(gc_nodes))
3370 ++ goto next; /* do not bother */
3371 +
3372 +- if (gc_count) {
3373 +- tree_nodes_free(root, gc_nodes, gc_count);
3374 ++ gc_count = 0;
3375 ++ node = rb_first(root);
3376 ++ while (node != NULL) {
3377 ++ rbconn = rb_entry(node, struct nf_conncount_rb, node);
3378 ++ node = rb_next(node);
3379 ++
3380 ++ if (rbconn->list.count > 0)
3381 ++ continue;
3382 ++
3383 ++ gc_nodes[gc_count++] = rbconn;
3384 ++ if (gc_count >= ARRAY_SIZE(gc_nodes)) {
3385 ++ tree_nodes_free(root, gc_nodes, gc_count);
3386 ++ gc_count = 0;
3387 ++ }
3388 + }
3389 +
3390 ++ tree_nodes_free(root, gc_nodes, gc_count);
3391 ++next:
3392 + clear_bit(tree, data->pending_trees);
3393 +
3394 + next_tree = (tree + 1) % CONNCOUNT_SLOTS;
3395 +- next_tree = find_next_bit(data->pending_trees, next_tree, CONNCOUNT_SLOTS);
3396 ++ next_tree = find_next_bit(data->pending_trees, CONNCOUNT_SLOTS, next_tree);
3397 +
3398 + if (next_tree < CONNCOUNT_SLOTS) {
3399 + data->gc_tree = next_tree;
3400 +@@ -621,10 +590,7 @@ static int __init nf_conncount_modinit(void)
3401 + {
3402 + int i;
3403 +
3404 +- BUILD_BUG_ON(CONNCOUNT_LOCK_SLOTS > CONNCOUNT_SLOTS);
3405 +- BUILD_BUG_ON((CONNCOUNT_SLOTS % CONNCOUNT_LOCK_SLOTS) != 0);
3406 +-
3407 +- for (i = 0; i < CONNCOUNT_LOCK_SLOTS; ++i)
3408 ++ for (i = 0; i < CONNCOUNT_SLOTS; ++i)
3409 + spin_lock_init(&nf_conncount_locks[i]);
3410 +
3411 + conncount_conn_cachep = kmem_cache_create("nf_conncount_tuple",
3412 +diff --git a/net/netfilter/nft_connlimit.c b/net/netfilter/nft_connlimit.c
3413 +index b90d96ba4a12..af1497ab9464 100644
3414 +--- a/net/netfilter/nft_connlimit.c
3415 ++++ b/net/netfilter/nft_connlimit.c
3416 +@@ -30,7 +30,6 @@ static inline void nft_connlimit_do_eval(struct nft_connlimit *priv,
3417 + enum ip_conntrack_info ctinfo;
3418 + const struct nf_conn *ct;
3419 + unsigned int count;
3420 +- bool addit;
3421 +
3422 + tuple_ptr = &tuple;
3423 +
3424 +@@ -44,19 +43,12 @@ static inline void nft_connlimit_do_eval(struct nft_connlimit *priv,
3425 + return;
3426 + }
3427 +
3428 +- nf_conncount_lookup(nft_net(pkt), &priv->list, tuple_ptr, zone,
3429 +- &addit);
3430 +- count = priv->list.count;
3431 +-
3432 +- if (!addit)
3433 +- goto out;
3434 +-
3435 +- if (nf_conncount_add(&priv->list, tuple_ptr, zone) == NF_CONNCOUNT_ERR) {
3436 ++ if (nf_conncount_add(nft_net(pkt), &priv->list, tuple_ptr, zone)) {
3437 + regs->verdict.code = NF_DROP;
3438 + return;
3439 + }
3440 +- count++;
3441 +-out:
3442 ++
3443 ++ count = priv->list.count;
3444 +
3445 + if ((count > priv->limit) ^ priv->invert) {
3446 + regs->verdict.code = NFT_BREAK;
3447 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
3448 +index 0541cfc93440..b6ea0fadb34f 100644
3449 +--- a/net/packet/af_packet.c
3450 ++++ b/net/packet/af_packet.c
3451 +@@ -2628,7 +2628,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
3452 + addr = saddr->sll_halen ? saddr->sll_addr : NULL;
3453 + dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
3454 + if (addr && dev && saddr->sll_halen < dev->addr_len)
3455 +- goto out;
3456 ++ goto out_put;
3457 + }
3458 +
3459 + err = -ENXIO;
3460 +@@ -2828,7 +2828,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
3461 + addr = saddr->sll_halen ? saddr->sll_addr : NULL;
3462 + dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
3463 + if (addr && dev && saddr->sll_halen < dev->addr_len)
3464 +- goto out;
3465 ++ goto out_unlock;
3466 + }
3467 +
3468 + err = -ENXIO;
3469 +diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
3470 +index 7f0539db5604..0bae07e9c9e7 100644
3471 +--- a/net/sctp/ipv6.c
3472 ++++ b/net/sctp/ipv6.c
3473 +@@ -97,11 +97,9 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
3474 +
3475 + switch (ev) {
3476 + case NETDEV_UP:
3477 +- addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
3478 ++ addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
3479 + if (addr) {
3480 + addr->a.v6.sin6_family = AF_INET6;
3481 +- addr->a.v6.sin6_port = 0;
3482 +- addr->a.v6.sin6_flowinfo = 0;
3483 + addr->a.v6.sin6_addr = ifa->addr;
3484 + addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
3485 + addr->valid = 1;
3486 +@@ -431,7 +429,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
3487 + addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
3488 + if (addr) {
3489 + addr->a.v6.sin6_family = AF_INET6;
3490 +- addr->a.v6.sin6_port = 0;
3491 + addr->a.v6.sin6_addr = ifp->addr;
3492 + addr->a.v6.sin6_scope_id = dev->ifindex;
3493 + addr->valid = 1;
3494 +diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
3495 +index e948db29ab53..d4352111e69d 100644
3496 +--- a/net/sctp/protocol.c
3497 ++++ b/net/sctp/protocol.c
3498 +@@ -101,7 +101,6 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
3499 + addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
3500 + if (addr) {
3501 + addr->a.v4.sin_family = AF_INET;
3502 +- addr->a.v4.sin_port = 0;
3503 + addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
3504 + addr->valid = 1;
3505 + INIT_LIST_HEAD(&addr->list);
3506 +@@ -776,10 +775,9 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
3507 +
3508 + switch (ev) {
3509 + case NETDEV_UP:
3510 +- addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
3511 ++ addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
3512 + if (addr) {
3513 + addr->a.v4.sin_family = AF_INET;
3514 +- addr->a.v4.sin_port = 0;
3515 + addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
3516 + addr->valid = 1;
3517 + spin_lock_bh(&net->sctp.local_addr_lock);
3518 +diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
3519 +index 2b8f95290627..e6e506b2db99 100644
3520 +--- a/net/smc/af_smc.c
3521 ++++ b/net/smc/af_smc.c
3522 +@@ -144,6 +144,9 @@ static int smc_release(struct socket *sock)
3523 + sock_set_flag(sk, SOCK_DEAD);
3524 + sk->sk_shutdown |= SHUTDOWN_MASK;
3525 + }
3526 ++
3527 ++ sk->sk_prot->unhash(sk);
3528 ++
3529 + if (smc->clcsock) {
3530 + if (smc->use_fallback && sk->sk_state == SMC_LISTEN) {
3531 + /* wake up clcsock accept */
3532 +@@ -168,7 +171,6 @@ static int smc_release(struct socket *sock)
3533 + smc_conn_free(&smc->conn);
3534 + release_sock(sk);
3535 +
3536 +- sk->sk_prot->unhash(sk);
3537 + sock_put(sk); /* final sock_put */
3538 + out:
3539 + return rc;
3540 +diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
3541 +index c7872bc13860..08b5fa4a2852 100644
3542 +--- a/net/sunrpc/rpcb_clnt.c
3543 ++++ b/net/sunrpc/rpcb_clnt.c
3544 +@@ -771,6 +771,12 @@ void rpcb_getport_async(struct rpc_task *task)
3545 + case RPCBVERS_3:
3546 + map->r_netid = xprt->address_strings[RPC_DISPLAY_NETID];
3547 + map->r_addr = rpc_sockaddr2uaddr(sap, GFP_ATOMIC);
3548 ++ if (!map->r_addr) {
3549 ++ status = -ENOMEM;
3550 ++ dprintk("RPC: %5u %s: no memory available\n",
3551 ++ task->tk_pid, __func__);
3552 ++ goto bailout_free_args;
3553 ++ }
3554 + map->r_owner = "";
3555 + break;
3556 + case RPCBVERS_2:
3557 +@@ -793,6 +799,8 @@ void rpcb_getport_async(struct rpc_task *task)
3558 + rpc_put_task(child);
3559 + return;
3560 +
3561 ++bailout_free_args:
3562 ++ kfree(map);
3563 + bailout_release_client:
3564 + rpc_release_client(rpcb_clnt);
3565 + bailout_nofree:
3566 +diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
3567 +index 6376467e78f8..0b21187d74df 100644
3568 +--- a/net/tipc/netlink_compat.c
3569 ++++ b/net/tipc/netlink_compat.c
3570 +@@ -87,6 +87,11 @@ static int tipc_skb_tailroom(struct sk_buff *skb)
3571 + return limit;
3572 + }
3573 +
3574 ++static inline int TLV_GET_DATA_LEN(struct tlv_desc *tlv)
3575 ++{
3576 ++ return TLV_GET_LEN(tlv) - TLV_SPACE(0);
3577 ++}
3578 ++
3579 + static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
3580 + {
3581 + struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb);
3582 +@@ -166,6 +171,11 @@ static struct sk_buff *tipc_get_err_tlv(char *str)
3583 + return buf;
3584 + }
3585 +
3586 ++static inline bool string_is_valid(char *s, int len)
3587 ++{
3588 ++ return memchr(s, '\0', len) ? true : false;
3589 ++}
3590 ++
3591 + static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
3592 + struct tipc_nl_compat_msg *msg,
3593 + struct sk_buff *arg)
3594 +@@ -379,6 +389,7 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
3595 + struct nlattr *prop;
3596 + struct nlattr *bearer;
3597 + struct tipc_bearer_config *b;
3598 ++ int len;
3599 +
3600 + b = (struct tipc_bearer_config *)TLV_DATA(msg->req);
3601 +
3602 +@@ -386,6 +397,10 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
3603 + if (!bearer)
3604 + return -EMSGSIZE;
3605 +
3606 ++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
3607 ++ if (!string_is_valid(b->name, len))
3608 ++ return -EINVAL;
3609 ++
3610 + if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name))
3611 + return -EMSGSIZE;
3612 +
3613 +@@ -411,6 +426,7 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
3614 + {
3615 + char *name;
3616 + struct nlattr *bearer;
3617 ++ int len;
3618 +
3619 + name = (char *)TLV_DATA(msg->req);
3620 +
3621 +@@ -418,6 +434,10 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
3622 + if (!bearer)
3623 + return -EMSGSIZE;
3624 +
3625 ++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
3626 ++ if (!string_is_valid(name, len))
3627 ++ return -EINVAL;
3628 ++
3629 + if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name))
3630 + return -EMSGSIZE;
3631 +
3632 +@@ -478,6 +498,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
3633 + struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
3634 + struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
3635 + int err;
3636 ++ int len;
3637 +
3638 + if (!attrs[TIPC_NLA_LINK])
3639 + return -EINVAL;
3640 +@@ -504,6 +525,11 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
3641 + return err;
3642 +
3643 + name = (char *)TLV_DATA(msg->req);
3644 ++
3645 ++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
3646 ++ if (!string_is_valid(name, len))
3647 ++ return -EINVAL;
3648 ++
3649 + if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
3650 + return 0;
3651 +
3652 +@@ -644,6 +670,7 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
3653 + struct nlattr *prop;
3654 + struct nlattr *media;
3655 + struct tipc_link_config *lc;
3656 ++ int len;
3657 +
3658 + lc = (struct tipc_link_config *)TLV_DATA(msg->req);
3659 +
3660 +@@ -651,6 +678,10 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
3661 + if (!media)
3662 + return -EMSGSIZE;
3663 +
3664 ++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
3665 ++ if (!string_is_valid(lc->name, len))
3666 ++ return -EINVAL;
3667 ++
3668 + if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name))
3669 + return -EMSGSIZE;
3670 +
3671 +@@ -671,6 +702,7 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
3672 + struct nlattr *prop;
3673 + struct nlattr *bearer;
3674 + struct tipc_link_config *lc;
3675 ++ int len;
3676 +
3677 + lc = (struct tipc_link_config *)TLV_DATA(msg->req);
3678 +
3679 +@@ -678,6 +710,10 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
3680 + if (!bearer)
3681 + return -EMSGSIZE;
3682 +
3683 ++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
3684 ++ if (!string_is_valid(lc->name, len))
3685 ++ return -EINVAL;
3686 ++
3687 + if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name))
3688 + return -EMSGSIZE;
3689 +
3690 +@@ -726,9 +762,14 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
3691 + struct tipc_link_config *lc;
3692 + struct tipc_bearer *bearer;
3693 + struct tipc_media *media;
3694 ++ int len;
3695 +
3696 + lc = (struct tipc_link_config *)TLV_DATA(msg->req);
3697 +
3698 ++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
3699 ++ if (!string_is_valid(lc->name, len))
3700 ++ return -EINVAL;
3701 ++
3702 + media = tipc_media_find(lc->name);
3703 + if (media) {
3704 + cmd->doit = &__tipc_nl_media_set;
3705 +@@ -750,6 +791,7 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
3706 + {
3707 + char *name;
3708 + struct nlattr *link;
3709 ++ int len;
3710 +
3711 + name = (char *)TLV_DATA(msg->req);
3712 +
3713 +@@ -757,6 +799,10 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
3714 + if (!link)
3715 + return -EMSGSIZE;
3716 +
3717 ++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
3718 ++ if (!string_is_valid(name, len))
3719 ++ return -EINVAL;
3720 ++
3721 + if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name))
3722 + return -EMSGSIZE;
3723 +
3724 +@@ -778,6 +824,8 @@ static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg)
3725 + };
3726 +
3727 + ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
3728 ++ if (TLV_GET_DATA_LEN(msg->req) < sizeof(struct tipc_name_table_query))
3729 ++ return -EINVAL;
3730 +
3731 + depth = ntohl(ntq->depth);
3732 +
3733 +@@ -1201,7 +1249,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
3734 + }
3735 +
3736 + len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
3737 +- if (len && !TLV_OK(msg.req, len)) {
3738 ++ if (!len || !TLV_OK(msg.req, len)) {
3739 + msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
3740 + err = -EOPNOTSUPP;
3741 + goto send;
3742 +diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
3743 +index b84c0059214f..d65eed88c495 100644
3744 +--- a/net/tipc/topsrv.c
3745 ++++ b/net/tipc/topsrv.c
3746 +@@ -404,7 +404,7 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
3747 + ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
3748 + if (ret == -EWOULDBLOCK)
3749 + return -EWOULDBLOCK;
3750 +- if (ret > 0) {
3751 ++ if (ret == sizeof(s)) {
3752 + read_lock_bh(&sk->sk_callback_lock);
3753 + ret = tipc_conn_rcv_sub(srv, con, &s);
3754 + read_unlock_bh(&sk->sk_callback_lock);
3755 +diff --git a/security/security.c b/security/security.c
3756 +index 736e78da1ab9..5ce2448f3a45 100644
3757 +--- a/security/security.c
3758 ++++ b/security/security.c
3759 +@@ -1003,6 +1003,13 @@ int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
3760 +
3761 + void security_cred_free(struct cred *cred)
3762 + {
3763 ++ /*
3764 ++ * There is a failure case in prepare_creds() that
3765 ++ * may result in a call here with ->security being NULL.
3766 ++ */
3767 ++ if (unlikely(cred->security == NULL))
3768 ++ return;
3769 ++
3770 + call_void_hook(cred_free, cred);
3771 + }
3772 +
3773 +diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
3774 +index b63ef865ce1e..d31a52e56b9e 100644
3775 +--- a/security/selinux/ss/policydb.c
3776 ++++ b/security/selinux/ss/policydb.c
3777 +@@ -732,7 +732,8 @@ static int sens_destroy(void *key, void *datum, void *p)
3778 + kfree(key);
3779 + if (datum) {
3780 + levdatum = datum;
3781 +- ebitmap_destroy(&levdatum->level->cat);
3782 ++ if (levdatum->level)
3783 ++ ebitmap_destroy(&levdatum->level->cat);
3784 + kfree(levdatum->level);
3785 + }
3786 + kfree(datum);
3787 +diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
3788 +index ffda91a4a1aa..02514fe558b4 100644
3789 +--- a/security/yama/yama_lsm.c
3790 ++++ b/security/yama/yama_lsm.c
3791 +@@ -368,7 +368,9 @@ static int yama_ptrace_access_check(struct task_struct *child,
3792 + break;
3793 + case YAMA_SCOPE_RELATIONAL:
3794 + rcu_read_lock();
3795 +- if (!task_is_descendant(current, child) &&
3796 ++ if (!pid_alive(child))
3797 ++ rc = -EPERM;
3798 ++ if (!rc && !task_is_descendant(current, child) &&
3799 + !ptracer_exception_found(current, child) &&
3800 + !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
3801 + rc = -EPERM;
3802 +diff --git a/tools/testing/selftests/android/Makefile b/tools/testing/selftests/android/Makefile
3803 +index d9a725478375..72c25a3cb658 100644
3804 +--- a/tools/testing/selftests/android/Makefile
3805 ++++ b/tools/testing/selftests/android/Makefile
3806 +@@ -6,7 +6,7 @@ TEST_PROGS := run.sh
3807 +
3808 + include ../lib.mk
3809 +
3810 +-all: khdr
3811 ++all:
3812 + @for DIR in $(SUBDIRS); do \
3813 + BUILD_TARGET=$(OUTPUT)/$$DIR; \
3814 + mkdir $$BUILD_TARGET -p; \
3815 +diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
3816 +index ad1eeb14fda7..30996306cabc 100644
3817 +--- a/tools/testing/selftests/futex/functional/Makefile
3818 ++++ b/tools/testing/selftests/futex/functional/Makefile
3819 +@@ -19,6 +19,7 @@ TEST_GEN_FILES := \
3820 + TEST_PROGS := run.sh
3821 +
3822 + top_srcdir = ../../../../..
3823 ++KSFT_KHDR_INSTALL := 1
3824 + include ../../lib.mk
3825 +
3826 + $(TEST_GEN_FILES): $(HEADERS)
3827 +diff --git a/tools/testing/selftests/gpio/Makefile b/tools/testing/selftests/gpio/Makefile
3828 +index 4665cdbf1a8d..59ea4c461978 100644
3829 +--- a/tools/testing/selftests/gpio/Makefile
3830 ++++ b/tools/testing/selftests/gpio/Makefile
3831 +@@ -9,6 +9,7 @@ EXTRA_OBJS := ../gpiogpio-event-mon-in.o ../gpiogpio-event-mon.o
3832 + EXTRA_OBJS += ../gpiogpio-hammer-in.o ../gpiogpio-utils.o ../gpiolsgpio-in.o
3833 + EXTRA_OBJS += ../gpiolsgpio.o
3834 +
3835 ++KSFT_KHDR_INSTALL := 1
3836 + include ../lib.mk
3837 +
3838 + all: $(BINARIES)
3839 +diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
3840 +index ec32dad3c3f0..cc83e2fd3787 100644
3841 +--- a/tools/testing/selftests/kvm/Makefile
3842 ++++ b/tools/testing/selftests/kvm/Makefile
3843 +@@ -1,6 +1,7 @@
3844 + all:
3845 +
3846 + top_srcdir = ../../../../
3847 ++KSFT_KHDR_INSTALL := 1
3848 + UNAME_M := $(shell uname -m)
3849 +
3850 + LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c
3851 +@@ -40,4 +41,3 @@ $(OUTPUT)/libkvm.a: $(LIBKVM_OBJ)
3852 +
3853 + all: $(STATIC_LIBS)
3854 + $(TEST_GEN_PROGS): $(STATIC_LIBS)
3855 +-$(STATIC_LIBS):| khdr
3856 +diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
3857 +index 0a8e75886224..8b0f16409ed7 100644
3858 +--- a/tools/testing/selftests/lib.mk
3859 ++++ b/tools/testing/selftests/lib.mk
3860 +@@ -16,18 +16,18 @@ TEST_GEN_PROGS := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS))
3861 + TEST_GEN_PROGS_EXTENDED := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS_EXTENDED))
3862 + TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES))
3863 +
3864 ++ifdef KSFT_KHDR_INSTALL
3865 + top_srcdir ?= ../../../..
3866 + include $(top_srcdir)/scripts/subarch.include
3867 + ARCH ?= $(SUBARCH)
3868 +
3869 +-all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
3870 +-
3871 + .PHONY: khdr
3872 + khdr:
3873 + make ARCH=$(ARCH) -C $(top_srcdir) headers_install
3874 +
3875 +-ifdef KSFT_KHDR_INSTALL
3876 +-$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES):| khdr
3877 ++all: khdr $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
3878 ++else
3879 ++all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
3880 + endif
3881 +
3882 + .ONESHELL:
3883 +diff --git a/tools/testing/selftests/networking/timestamping/Makefile b/tools/testing/selftests/networking/timestamping/Makefile
3884 +index 14cfcf006936..c46c0eefab9e 100644
3885 +--- a/tools/testing/selftests/networking/timestamping/Makefile
3886 ++++ b/tools/testing/selftests/networking/timestamping/Makefile
3887 +@@ -6,6 +6,7 @@ TEST_PROGS := hwtstamp_config rxtimestamp timestamping txtimestamp
3888 + all: $(TEST_PROGS)
3889 +
3890 + top_srcdir = ../../../../..
3891 ++KSFT_KHDR_INSTALL := 1
3892 + include ../../lib.mk
3893 +
3894 + clean:
3895 +diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
3896 +index e94b7b14bcb2..dc68340a6a96 100644
3897 +--- a/tools/testing/selftests/vm/Makefile
3898 ++++ b/tools/testing/selftests/vm/Makefile
3899 +@@ -24,6 +24,7 @@ TEST_GEN_FILES += virtual_address_range
3900 +
3901 + TEST_PROGS := run_vmtests
3902 +
3903 ++KSFT_KHDR_INSTALL := 1
3904 + include ../lib.mk
3905 +
3906 + $(OUTPUT)/userfaultfd: LDLIBS += -lpthread