1 |
commit: 1d0b7141a46c176a558850740bde1516bee9e89d |
2 |
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
3 |
AuthorDate: Tue Jan 22 23:07:29 2019 +0000 |
4 |
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> |
5 |
CommitDate: Tue Jan 22 23:07:29 2019 +0000 |
6 |
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1d0b7141 |
7 |
|
8 |
proj/linux-patches: Linux patch 4.20.4 |
9 |
|
10 |
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org> |
11 |
|
12 |
0000_README | 4 + |
13 |
1003_linux-4.20.4.patch | 4396 +++++++++++++++++++++++++++++++++++++++++++++++ |
14 |
2 files changed, 4400 insertions(+) |
15 |
|
16 |
diff --git a/0000_README b/0000_README |
17 |
index d6c119a..a9b0f09 100644 |
18 |
--- a/0000_README |
19 |
+++ b/0000_README |
20 |
@@ -55,6 +55,10 @@ Patch: 1002_linux-4.20.3.patch |
21 |
From: http://www.kernel.org |
22 |
Desc: Linux 4.20.3 |
23 |
|
24 |
+Patch: 1003_linux-4.20.4.patch |
25 |
+From: http://www.kernel.org |
26 |
+Desc: Linux 4.20.4 |
27 |
+ |
28 |
Patch: 1500_XATTR_USER_PREFIX.patch |
29 |
From: https://bugs.gentoo.org/show_bug.cgi?id=470644 |
30 |
Desc: Support for namespace user.pax.* on tmpfs. |
31 |
|
32 |
diff --git a/1003_linux-4.20.4.patch b/1003_linux-4.20.4.patch |
33 |
new file mode 100644 |
34 |
index 0000000..1cb8b3d |
35 |
--- /dev/null |
36 |
+++ b/1003_linux-4.20.4.patch |
37 |
@@ -0,0 +1,4396 @@ |
38 |
+diff --git a/Makefile b/Makefile |
39 |
+index 3b9e4658d31f..a056dba5ede0 100644 |
40 |
+--- a/Makefile |
41 |
++++ b/Makefile |
42 |
+@@ -1,7 +1,7 @@ |
43 |
+ # SPDX-License-Identifier: GPL-2.0 |
44 |
+ VERSION = 4 |
45 |
+ PATCHLEVEL = 20 |
46 |
+-SUBLEVEL = 3 |
47 |
++SUBLEVEL = 4 |
48 |
+ EXTRAVERSION = |
49 |
+ NAME = Shy Crocodile |
50 |
+ |
51 |
+@@ -967,6 +967,7 @@ ifdef CONFIG_STACK_VALIDATION |
52 |
+ endif |
53 |
+ endif |
54 |
+ |
55 |
++PHONY += prepare0 |
56 |
+ |
57 |
+ ifeq ($(KBUILD_EXTMOD),) |
58 |
+ core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ |
59 |
+@@ -1075,8 +1076,7 @@ scripts: scripts_basic scripts_dtc asm-generic gcc-plugins $(autoksyms_h) |
60 |
+ # archprepare is used in arch Makefiles and when processed asm symlink, |
61 |
+ # version.h and scripts_basic is processed / created. |
62 |
+ |
63 |
+-# Listed in dependency order |
64 |
+-PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3 |
65 |
++PHONY += prepare archprepare prepare1 prepare2 prepare3 |
66 |
+ |
67 |
+ # prepare3 is used to check if we are building in a separate output directory, |
68 |
+ # and if so do: |
69 |
+@@ -1545,9 +1545,6 @@ else # KBUILD_EXTMOD |
70 |
+ |
71 |
+ # We are always building modules |
72 |
+ KBUILD_MODULES := 1 |
73 |
+-PHONY += crmodverdir |
74 |
+-crmodverdir: |
75 |
+- $(cmd_crmodverdir) |
76 |
+ |
77 |
+ PHONY += $(objtree)/Module.symvers |
78 |
+ $(objtree)/Module.symvers: |
79 |
+@@ -1559,7 +1556,7 @@ $(objtree)/Module.symvers: |
80 |
+ |
81 |
+ module-dirs := $(addprefix _module_,$(KBUILD_EXTMOD)) |
82 |
+ PHONY += $(module-dirs) modules |
83 |
+-$(module-dirs): crmodverdir $(objtree)/Module.symvers |
84 |
++$(module-dirs): prepare $(objtree)/Module.symvers |
85 |
+ $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@) |
86 |
+ |
87 |
+ modules: $(module-dirs) |
88 |
+@@ -1600,7 +1597,8 @@ help: |
89 |
+ |
90 |
+ # Dummies... |
91 |
+ PHONY += prepare scripts |
92 |
+-prepare: ; |
93 |
++prepare: |
94 |
++ $(cmd_crmodverdir) |
95 |
+ scripts: ; |
96 |
+ endif # KBUILD_EXTMOD |
97 |
+ |
98 |
+@@ -1724,17 +1722,14 @@ endif |
99 |
+ |
100 |
+ # Modules |
101 |
+ /: prepare scripts FORCE |
102 |
+- $(cmd_crmodverdir) |
103 |
+ $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ |
104 |
+ $(build)=$(build-dir) |
105 |
+ # Make sure the latest headers are built for Documentation |
106 |
+ Documentation/ samples/: headers_install |
107 |
+ %/: prepare scripts FORCE |
108 |
+- $(cmd_crmodverdir) |
109 |
+ $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ |
110 |
+ $(build)=$(build-dir) |
111 |
+ %.ko: prepare scripts FORCE |
112 |
+- $(cmd_crmodverdir) |
113 |
+ $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ |
114 |
+ $(build)=$(build-dir) $(@:.ko=.o) |
115 |
+ $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost |
116 |
+diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi |
117 |
+index 7d94c1fa592a..7f799cb5668e 100644 |
118 |
+--- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi |
119 |
++++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi |
120 |
+@@ -28,6 +28,23 @@ |
121 |
+ method = "smc"; |
122 |
+ }; |
123 |
+ |
124 |
++ reserved-memory { |
125 |
++ #address-cells = <2>; |
126 |
++ #size-cells = <2>; |
127 |
++ ranges; |
128 |
++ |
129 |
++ /* |
130 |
++ * This area matches the mapping done with a |
131 |
++ * mainline U-Boot, and should be updated by the |
132 |
++ * bootloader. |
133 |
++ */ |
134 |
++ |
135 |
++ psci-area@4000000 { |
136 |
++ reg = <0x0 0x4000000 0x0 0x200000>; |
137 |
++ no-map; |
138 |
++ }; |
139 |
++ }; |
140 |
++ |
141 |
+ ap806 { |
142 |
+ #address-cells = <2>; |
143 |
+ #size-cells = <2>; |
144 |
+diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h |
145 |
+index 2dafd936d84d..bc2327d4a505 100644 |
146 |
+--- a/arch/arm64/include/asm/kvm_arm.h |
147 |
++++ b/arch/arm64/include/asm/kvm_arm.h |
148 |
+@@ -24,6 +24,8 @@ |
149 |
+ |
150 |
+ /* Hyp Configuration Register (HCR) bits */ |
151 |
+ #define HCR_FWB (UL(1) << 46) |
152 |
++#define HCR_API (UL(1) << 41) |
153 |
++#define HCR_APK (UL(1) << 40) |
154 |
+ #define HCR_TEA (UL(1) << 37) |
155 |
+ #define HCR_TERR (UL(1) << 36) |
156 |
+ #define HCR_TLOR (UL(1) << 35) |
157 |
+@@ -87,6 +89,7 @@ |
158 |
+ HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \ |
159 |
+ HCR_FMO | HCR_IMO) |
160 |
+ #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF) |
161 |
++#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK) |
162 |
+ #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) |
163 |
+ |
164 |
+ /* TCR_EL2 Registers bits */ |
165 |
+diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S |
166 |
+index 4471f570a295..b207a2ce4bc6 100644 |
167 |
+--- a/arch/arm64/kernel/head.S |
168 |
++++ b/arch/arm64/kernel/head.S |
169 |
+@@ -496,10 +496,9 @@ ENTRY(el2_setup) |
170 |
+ #endif |
171 |
+ |
172 |
+ /* Hyp configuration. */ |
173 |
+- mov x0, #HCR_RW // 64-bit EL1 |
174 |
++ mov_q x0, HCR_HOST_NVHE_FLAGS |
175 |
+ cbz x2, set_hcr |
176 |
+- orr x0, x0, #HCR_TGE // Enable Host Extensions |
177 |
+- orr x0, x0, #HCR_E2H |
178 |
++ mov_q x0, HCR_HOST_VHE_FLAGS |
179 |
+ set_hcr: |
180 |
+ msr hcr_el2, x0 |
181 |
+ isb |
182 |
+diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c |
183 |
+index f0e6ab8abe9c..ba6b41790fcd 100644 |
184 |
+--- a/arch/arm64/kernel/kaslr.c |
185 |
++++ b/arch/arm64/kernel/kaslr.c |
186 |
+@@ -14,6 +14,7 @@ |
187 |
+ #include <linux/sched.h> |
188 |
+ #include <linux/types.h> |
189 |
+ |
190 |
++#include <asm/cacheflush.h> |
191 |
+ #include <asm/fixmap.h> |
192 |
+ #include <asm/kernel-pgtable.h> |
193 |
+ #include <asm/memory.h> |
194 |
+@@ -43,7 +44,7 @@ static __init u64 get_kaslr_seed(void *fdt) |
195 |
+ return ret; |
196 |
+ } |
197 |
+ |
198 |
+-static __init const u8 *get_cmdline(void *fdt) |
199 |
++static __init const u8 *kaslr_get_cmdline(void *fdt) |
200 |
+ { |
201 |
+ static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE; |
202 |
+ |
203 |
+@@ -109,7 +110,7 @@ u64 __init kaslr_early_init(u64 dt_phys) |
204 |
+ * Check if 'nokaslr' appears on the command line, and |
205 |
+ * return 0 if that is the case. |
206 |
+ */ |
207 |
+- cmdline = get_cmdline(fdt); |
208 |
++ cmdline = kaslr_get_cmdline(fdt); |
209 |
+ str = strstr(cmdline, "nokaslr"); |
210 |
+ if (str == cmdline || (str > cmdline && *(str - 1) == ' ')) |
211 |
+ return 0; |
212 |
+@@ -169,5 +170,8 @@ u64 __init kaslr_early_init(u64 dt_phys) |
213 |
+ module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; |
214 |
+ module_alloc_base &= PAGE_MASK; |
215 |
+ |
216 |
++ __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base)); |
217 |
++ __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed)); |
218 |
++ |
219 |
+ return offset; |
220 |
+ } |
221 |
+diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c |
222 |
+index 7cc175c88a37..f6e02cc4d856 100644 |
223 |
+--- a/arch/arm64/kvm/hyp/switch.c |
224 |
++++ b/arch/arm64/kvm/hyp/switch.c |
225 |
+@@ -157,7 +157,7 @@ static void __hyp_text __deactivate_traps_nvhe(void) |
226 |
+ mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT; |
227 |
+ |
228 |
+ write_sysreg(mdcr_el2, mdcr_el2); |
229 |
+- write_sysreg(HCR_RW, hcr_el2); |
230 |
++ write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2); |
231 |
+ write_sysreg(CPTR_EL2_DEFAULT, cptr_el2); |
232 |
+ } |
233 |
+ |
234 |
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig |
235 |
+index 8272ea4c7264..6207b41473a0 100644 |
236 |
+--- a/arch/mips/Kconfig |
237 |
++++ b/arch/mips/Kconfig |
238 |
+@@ -3184,6 +3184,7 @@ config MIPS32_O32 |
239 |
+ config MIPS32_N32 |
240 |
+ bool "Kernel support for n32 binaries" |
241 |
+ depends on 64BIT |
242 |
++ select ARCH_WANT_COMPAT_IPC_PARSE_VERSION |
243 |
+ select COMPAT |
244 |
+ select MIPS32_COMPAT |
245 |
+ select SYSVIPC_COMPAT if SYSVIPC |
246 |
+diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c |
247 |
+index 6054d49e608e..fe3773539eff 100644 |
248 |
+--- a/arch/mips/bcm47xx/setup.c |
249 |
++++ b/arch/mips/bcm47xx/setup.c |
250 |
+@@ -173,6 +173,31 @@ void __init plat_mem_setup(void) |
251 |
+ pm_power_off = bcm47xx_machine_halt; |
252 |
+ } |
253 |
+ |
254 |
++#ifdef CONFIG_BCM47XX_BCMA |
255 |
++static struct device * __init bcm47xx_setup_device(void) |
256 |
++{ |
257 |
++ struct device *dev; |
258 |
++ int err; |
259 |
++ |
260 |
++ dev = kzalloc(sizeof(*dev), GFP_KERNEL); |
261 |
++ if (!dev) |
262 |
++ return NULL; |
263 |
++ |
264 |
++ err = dev_set_name(dev, "bcm47xx_soc"); |
265 |
++ if (err) { |
266 |
++ pr_err("Failed to set SoC device name: %d\n", err); |
267 |
++ kfree(dev); |
268 |
++ return NULL; |
269 |
++ } |
270 |
++ |
271 |
++ err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32)); |
272 |
++ if (err) |
273 |
++ pr_err("Failed to set SoC DMA mask: %d\n", err); |
274 |
++ |
275 |
++ return dev; |
276 |
++} |
277 |
++#endif |
278 |
++ |
279 |
+ /* |
280 |
+ * This finishes bus initialization doing things that were not possible without |
281 |
+ * kmalloc. Make sure to call it late enough (after mm_init). |
282 |
+@@ -183,6 +208,10 @@ void __init bcm47xx_bus_setup(void) |
283 |
+ if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) { |
284 |
+ int err; |
285 |
+ |
286 |
++ bcm47xx_bus.bcma.dev = bcm47xx_setup_device(); |
287 |
++ if (!bcm47xx_bus.bcma.dev) |
288 |
++ panic("Failed to setup SoC device\n"); |
289 |
++ |
290 |
+ err = bcma_host_soc_init(&bcm47xx_bus.bcma); |
291 |
+ if (err) |
292 |
+ panic("Failed to initialize BCMA bus (err %d)", err); |
293 |
+@@ -235,6 +264,8 @@ static int __init bcm47xx_register_bus_complete(void) |
294 |
+ #endif |
295 |
+ #ifdef CONFIG_BCM47XX_BCMA |
296 |
+ case BCM47XX_BUS_TYPE_BCMA: |
297 |
++ if (device_register(bcm47xx_bus.bcma.dev)) |
298 |
++ pr_err("Failed to register SoC device\n"); |
299 |
+ bcma_bus_register(&bcm47xx_bus.bcma.bus); |
300 |
+ break; |
301 |
+ #endif |
302 |
+diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c |
303 |
+index dfb95cffef3e..a3cf68538f3d 100644 |
304 |
+--- a/arch/mips/cavium-octeon/setup.c |
305 |
++++ b/arch/mips/cavium-octeon/setup.c |
306 |
+@@ -96,7 +96,7 @@ static void octeon_kexec_smp_down(void *ignored) |
307 |
+ " sync \n" |
308 |
+ " synci ($0) \n"); |
309 |
+ |
310 |
+- relocated_kexec_smp_wait(NULL); |
311 |
++ kexec_reboot(); |
312 |
+ } |
313 |
+ #endif |
314 |
+ |
315 |
+diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c |
316 |
+index f0bc3312ed11..c4ef1c31e0c4 100644 |
317 |
+--- a/arch/mips/lantiq/irq.c |
318 |
++++ b/arch/mips/lantiq/irq.c |
319 |
+@@ -224,9 +224,11 @@ static struct irq_chip ltq_eiu_type = { |
320 |
+ .irq_set_type = ltq_eiu_settype, |
321 |
+ }; |
322 |
+ |
323 |
+-static void ltq_hw_irqdispatch(int module) |
324 |
++static void ltq_hw_irq_handler(struct irq_desc *desc) |
325 |
+ { |
326 |
++ int module = irq_desc_get_irq(desc) - 2; |
327 |
+ u32 irq; |
328 |
++ int hwirq; |
329 |
+ |
330 |
+ irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR); |
331 |
+ if (irq == 0) |
332 |
+@@ -237,7 +239,8 @@ static void ltq_hw_irqdispatch(int module) |
333 |
+ * other bits might be bogus |
334 |
+ */ |
335 |
+ irq = __fls(irq); |
336 |
+- do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module)); |
337 |
++ hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module); |
338 |
++ generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq)); |
339 |
+ |
340 |
+ /* if this is a EBU irq, we need to ack it or get a deadlock */ |
341 |
+ if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT) |
342 |
+@@ -245,49 +248,6 @@ static void ltq_hw_irqdispatch(int module) |
343 |
+ LTQ_EBU_PCC_ISTAT); |
344 |
+ } |
345 |
+ |
346 |
+-#define DEFINE_HWx_IRQDISPATCH(x) \ |
347 |
+- static void ltq_hw ## x ## _irqdispatch(void) \ |
348 |
+- { \ |
349 |
+- ltq_hw_irqdispatch(x); \ |
350 |
+- } |
351 |
+-DEFINE_HWx_IRQDISPATCH(0) |
352 |
+-DEFINE_HWx_IRQDISPATCH(1) |
353 |
+-DEFINE_HWx_IRQDISPATCH(2) |
354 |
+-DEFINE_HWx_IRQDISPATCH(3) |
355 |
+-DEFINE_HWx_IRQDISPATCH(4) |
356 |
+- |
357 |
+-#if MIPS_CPU_TIMER_IRQ == 7 |
358 |
+-static void ltq_hw5_irqdispatch(void) |
359 |
+-{ |
360 |
+- do_IRQ(MIPS_CPU_TIMER_IRQ); |
361 |
+-} |
362 |
+-#else |
363 |
+-DEFINE_HWx_IRQDISPATCH(5) |
364 |
+-#endif |
365 |
+- |
366 |
+-static void ltq_hw_irq_handler(struct irq_desc *desc) |
367 |
+-{ |
368 |
+- ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2); |
369 |
+-} |
370 |
+- |
371 |
+-asmlinkage void plat_irq_dispatch(void) |
372 |
+-{ |
373 |
+- unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; |
374 |
+- int irq; |
375 |
+- |
376 |
+- if (!pending) { |
377 |
+- spurious_interrupt(); |
378 |
+- return; |
379 |
+- } |
380 |
+- |
381 |
+- pending >>= CAUSEB_IP; |
382 |
+- while (pending) { |
383 |
+- irq = fls(pending) - 1; |
384 |
+- do_IRQ(MIPS_CPU_IRQ_BASE + irq); |
385 |
+- pending &= ~BIT(irq); |
386 |
+- } |
387 |
+-} |
388 |
+- |
389 |
+ static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) |
390 |
+ { |
391 |
+ struct irq_chip *chip = <q_irq_type; |
392 |
+@@ -343,28 +303,10 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent) |
393 |
+ for (i = 0; i < MAX_IM; i++) |
394 |
+ irq_set_chained_handler(i + 2, ltq_hw_irq_handler); |
395 |
+ |
396 |
+- if (cpu_has_vint) { |
397 |
+- pr_info("Setting up vectored interrupts\n"); |
398 |
+- set_vi_handler(2, ltq_hw0_irqdispatch); |
399 |
+- set_vi_handler(3, ltq_hw1_irqdispatch); |
400 |
+- set_vi_handler(4, ltq_hw2_irqdispatch); |
401 |
+- set_vi_handler(5, ltq_hw3_irqdispatch); |
402 |
+- set_vi_handler(6, ltq_hw4_irqdispatch); |
403 |
+- set_vi_handler(7, ltq_hw5_irqdispatch); |
404 |
+- } |
405 |
+- |
406 |
+ ltq_domain = irq_domain_add_linear(node, |
407 |
+ (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE, |
408 |
+ &irq_domain_ops, 0); |
409 |
+ |
410 |
+-#ifndef CONFIG_MIPS_MT_SMP |
411 |
+- set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | |
412 |
+- IE_IRQ3 | IE_IRQ4 | IE_IRQ5); |
413 |
+-#else |
414 |
+- set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 | |
415 |
+- IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5); |
416 |
+-#endif |
417 |
+- |
418 |
+ /* tell oprofile which irq to use */ |
419 |
+ ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ); |
420 |
+ |
421 |
+diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c |
422 |
+index 2a5bb849b10e..288b58b00dc8 100644 |
423 |
+--- a/arch/mips/pci/msi-octeon.c |
424 |
++++ b/arch/mips/pci/msi-octeon.c |
425 |
+@@ -369,7 +369,9 @@ int __init octeon_msi_initialize(void) |
426 |
+ int irq; |
427 |
+ struct irq_chip *msi; |
428 |
+ |
429 |
+- if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) { |
430 |
++ if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) { |
431 |
++ return 0; |
432 |
++ } else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) { |
433 |
+ msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0; |
434 |
+ msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1; |
435 |
+ msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2; |
436 |
+diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c |
437 |
+index 6f70d1b4bf36..14b0f5b6a373 100644 |
438 |
+--- a/arch/powerpc/kernel/signal_64.c |
439 |
++++ b/arch/powerpc/kernel/signal_64.c |
440 |
+@@ -755,11 +755,12 @@ SYSCALL_DEFINE0(rt_sigreturn) |
441 |
+ if (restore_tm_sigcontexts(current, &uc->uc_mcontext, |
442 |
+ &uc_transact->uc_mcontext)) |
443 |
+ goto badframe; |
444 |
+- } |
445 |
++ } else |
446 |
+ #endif |
447 |
+- /* Fall through, for non-TM restore */ |
448 |
+- if (!MSR_TM_ACTIVE(msr)) { |
449 |
++ { |
450 |
+ /* |
451 |
++ * Fall through, for non-TM restore |
452 |
++ * |
453 |
+ * Unset MSR[TS] on the thread regs since MSR from user |
454 |
+ * context does not have MSR active, and recheckpoint was |
455 |
+ * not called since restore_tm_sigcontexts() was not called |
456 |
+diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c |
457 |
+index 72bf446c3fee..6e29794573b7 100644 |
458 |
+--- a/arch/x86/xen/time.c |
459 |
++++ b/arch/x86/xen/time.c |
460 |
+@@ -361,8 +361,6 @@ void xen_timer_resume(void) |
461 |
+ { |
462 |
+ int cpu; |
463 |
+ |
464 |
+- pvclock_resume(); |
465 |
+- |
466 |
+ if (xen_clockevent != &xen_vcpuop_clockevent) |
467 |
+ return; |
468 |
+ |
469 |
+@@ -379,12 +377,15 @@ static const struct pv_time_ops xen_time_ops __initconst = { |
470 |
+ }; |
471 |
+ |
472 |
+ static struct pvclock_vsyscall_time_info *xen_clock __read_mostly; |
473 |
++static u64 xen_clock_value_saved; |
474 |
+ |
475 |
+ void xen_save_time_memory_area(void) |
476 |
+ { |
477 |
+ struct vcpu_register_time_memory_area t; |
478 |
+ int ret; |
479 |
+ |
480 |
++ xen_clock_value_saved = xen_clocksource_read() - xen_sched_clock_offset; |
481 |
++ |
482 |
+ if (!xen_clock) |
483 |
+ return; |
484 |
+ |
485 |
+@@ -404,7 +405,7 @@ void xen_restore_time_memory_area(void) |
486 |
+ int ret; |
487 |
+ |
488 |
+ if (!xen_clock) |
489 |
+- return; |
490 |
++ goto out; |
491 |
+ |
492 |
+ t.addr.v = &xen_clock->pvti; |
493 |
+ |
494 |
+@@ -421,6 +422,11 @@ void xen_restore_time_memory_area(void) |
495 |
+ if (ret != 0) |
496 |
+ pr_notice("Cannot restore secondary vcpu_time_info (err %d)", |
497 |
+ ret); |
498 |
++ |
499 |
++out: |
500 |
++ /* Need pvclock_resume() before using xen_clocksource_read(). */ |
501 |
++ pvclock_resume(); |
502 |
++ xen_sched_clock_offset = xen_clocksource_read() - xen_clock_value_saved; |
503 |
+ } |
504 |
+ |
505 |
+ static void xen_setup_vsyscall_time_info(void) |
506 |
+diff --git a/block/partition-generic.c b/block/partition-generic.c |
507 |
+index d3d14e81fb12..5f8db5c5140f 100644 |
508 |
+--- a/block/partition-generic.c |
509 |
++++ b/block/partition-generic.c |
510 |
+@@ -249,9 +249,10 @@ struct device_type part_type = { |
511 |
+ .uevent = part_uevent, |
512 |
+ }; |
513 |
+ |
514 |
+-static void delete_partition_rcu_cb(struct rcu_head *head) |
515 |
++static void delete_partition_work_fn(struct work_struct *work) |
516 |
+ { |
517 |
+- struct hd_struct *part = container_of(head, struct hd_struct, rcu_head); |
518 |
++ struct hd_struct *part = container_of(to_rcu_work(work), struct hd_struct, |
519 |
++ rcu_work); |
520 |
+ |
521 |
+ part->start_sect = 0; |
522 |
+ part->nr_sects = 0; |
523 |
+@@ -262,7 +263,8 @@ static void delete_partition_rcu_cb(struct rcu_head *head) |
524 |
+ void __delete_partition(struct percpu_ref *ref) |
525 |
+ { |
526 |
+ struct hd_struct *part = container_of(ref, struct hd_struct, ref); |
527 |
+- call_rcu(&part->rcu_head, delete_partition_rcu_cb); |
528 |
++ INIT_RCU_WORK(&part->rcu_work, delete_partition_work_fn); |
529 |
++ queue_rcu_work(system_wq, &part->rcu_work); |
530 |
+ } |
531 |
+ |
532 |
+ /* |
533 |
+diff --git a/crypto/authenc.c b/crypto/authenc.c |
534 |
+index 37f54d1b2f66..4be293a4b5f0 100644 |
535 |
+--- a/crypto/authenc.c |
536 |
++++ b/crypto/authenc.c |
537 |
+@@ -58,14 +58,22 @@ int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, |
538 |
+ return -EINVAL; |
539 |
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) |
540 |
+ return -EINVAL; |
541 |
+- if (RTA_PAYLOAD(rta) < sizeof(*param)) |
542 |
++ |
543 |
++ /* |
544 |
++ * RTA_OK() didn't align the rtattr's payload when validating that it |
545 |
++ * fits in the buffer. Yet, the keys should start on the next 4-byte |
546 |
++ * aligned boundary. To avoid confusion, require that the rtattr |
547 |
++ * payload be exactly the param struct, which has a 4-byte aligned size. |
548 |
++ */ |
549 |
++ if (RTA_PAYLOAD(rta) != sizeof(*param)) |
550 |
+ return -EINVAL; |
551 |
++ BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO); |
552 |
+ |
553 |
+ param = RTA_DATA(rta); |
554 |
+ keys->enckeylen = be32_to_cpu(param->enckeylen); |
555 |
+ |
556 |
+- key += RTA_ALIGN(rta->rta_len); |
557 |
+- keylen -= RTA_ALIGN(rta->rta_len); |
558 |
++ key += rta->rta_len; |
559 |
++ keylen -= rta->rta_len; |
560 |
+ |
561 |
+ if (keylen < keys->enckeylen) |
562 |
+ return -EINVAL; |
563 |
+diff --git a/crypto/authencesn.c b/crypto/authencesn.c |
564 |
+index 80a25cc04aec..4741fe89ba2c 100644 |
565 |
+--- a/crypto/authencesn.c |
566 |
++++ b/crypto/authencesn.c |
567 |
+@@ -279,7 +279,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq, |
568 |
+ struct aead_request *req = areq->data; |
569 |
+ |
570 |
+ err = err ?: crypto_authenc_esn_decrypt_tail(req, 0); |
571 |
+- aead_request_complete(req, err); |
572 |
++ authenc_esn_request_complete(req, err); |
573 |
+ } |
574 |
+ |
575 |
+ static int crypto_authenc_esn_decrypt(struct aead_request *req) |
576 |
+diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c |
577 |
+index 9a5c60f08aad..c0cf87ae7ef6 100644 |
578 |
+--- a/crypto/sm3_generic.c |
579 |
++++ b/crypto/sm3_generic.c |
580 |
+@@ -100,7 +100,7 @@ static void sm3_compress(u32 *w, u32 *wt, u32 *m) |
581 |
+ |
582 |
+ for (i = 0; i <= 63; i++) { |
583 |
+ |
584 |
+- ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7); |
585 |
++ ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7); |
586 |
+ |
587 |
+ ss2 = ss1 ^ rol32(a, 12); |
588 |
+ |
589 |
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c |
590 |
+index cb0cc8685076..84b055aa81ba 100644 |
591 |
+--- a/drivers/block/loop.c |
592 |
++++ b/drivers/block/loop.c |
593 |
+@@ -83,7 +83,7 @@ |
594 |
+ #include <linux/uaccess.h> |
595 |
+ |
596 |
+ static DEFINE_IDR(loop_index_idr); |
597 |
+-static DEFINE_MUTEX(loop_index_mutex); |
598 |
++static DEFINE_MUTEX(loop_ctl_mutex); |
599 |
+ |
600 |
+ static int max_part; |
601 |
+ static int part_shift; |
602 |
+@@ -630,18 +630,7 @@ static void loop_reread_partitions(struct loop_device *lo, |
603 |
+ { |
604 |
+ int rc; |
605 |
+ |
606 |
+- /* |
607 |
+- * bd_mutex has been held already in release path, so don't |
608 |
+- * acquire it if this function is called in such case. |
609 |
+- * |
610 |
+- * If the reread partition isn't from release path, lo_refcnt |
611 |
+- * must be at least one and it can only become zero when the |
612 |
+- * current holder is released. |
613 |
+- */ |
614 |
+- if (!atomic_read(&lo->lo_refcnt)) |
615 |
+- rc = __blkdev_reread_part(bdev); |
616 |
+- else |
617 |
+- rc = blkdev_reread_part(bdev); |
618 |
++ rc = blkdev_reread_part(bdev); |
619 |
+ if (rc) |
620 |
+ pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n", |
621 |
+ __func__, lo->lo_number, lo->lo_file_name, rc); |
622 |
+@@ -688,26 +677,30 @@ static int loop_validate_file(struct file *file, struct block_device *bdev) |
623 |
+ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, |
624 |
+ unsigned int arg) |
625 |
+ { |
626 |
+- struct file *file, *old_file; |
627 |
++ struct file *file = NULL, *old_file; |
628 |
+ int error; |
629 |
++ bool partscan; |
630 |
+ |
631 |
++ error = mutex_lock_killable(&loop_ctl_mutex); |
632 |
++ if (error) |
633 |
++ return error; |
634 |
+ error = -ENXIO; |
635 |
+ if (lo->lo_state != Lo_bound) |
636 |
+- goto out; |
637 |
++ goto out_err; |
638 |
+ |
639 |
+ /* the loop device has to be read-only */ |
640 |
+ error = -EINVAL; |
641 |
+ if (!(lo->lo_flags & LO_FLAGS_READ_ONLY)) |
642 |
+- goto out; |
643 |
++ goto out_err; |
644 |
+ |
645 |
+ error = -EBADF; |
646 |
+ file = fget(arg); |
647 |
+ if (!file) |
648 |
+- goto out; |
649 |
++ goto out_err; |
650 |
+ |
651 |
+ error = loop_validate_file(file, bdev); |
652 |
+ if (error) |
653 |
+- goto out_putf; |
654 |
++ goto out_err; |
655 |
+ |
656 |
+ old_file = lo->lo_backing_file; |
657 |
+ |
658 |
+@@ -715,7 +708,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, |
659 |
+ |
660 |
+ /* size of the new backing store needs to be the same */ |
661 |
+ if (get_loop_size(lo, file) != get_loop_size(lo, old_file)) |
662 |
+- goto out_putf; |
663 |
++ goto out_err; |
664 |
+ |
665 |
+ /* and ... switch */ |
666 |
+ blk_mq_freeze_queue(lo->lo_queue); |
667 |
+@@ -726,15 +719,22 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, |
668 |
+ lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); |
669 |
+ loop_update_dio(lo); |
670 |
+ blk_mq_unfreeze_queue(lo->lo_queue); |
671 |
+- |
672 |
++ partscan = lo->lo_flags & LO_FLAGS_PARTSCAN; |
673 |
++ mutex_unlock(&loop_ctl_mutex); |
674 |
++ /* |
675 |
++ * We must drop file reference outside of loop_ctl_mutex as dropping |
676 |
++ * the file ref can take bd_mutex which creates circular locking |
677 |
++ * dependency. |
678 |
++ */ |
679 |
+ fput(old_file); |
680 |
+- if (lo->lo_flags & LO_FLAGS_PARTSCAN) |
681 |
++ if (partscan) |
682 |
+ loop_reread_partitions(lo, bdev); |
683 |
+ return 0; |
684 |
+ |
685 |
+- out_putf: |
686 |
+- fput(file); |
687 |
+- out: |
688 |
++out_err: |
689 |
++ mutex_unlock(&loop_ctl_mutex); |
690 |
++ if (file) |
691 |
++ fput(file); |
692 |
+ return error; |
693 |
+ } |
694 |
+ |
695 |
+@@ -909,6 +909,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, |
696 |
+ int lo_flags = 0; |
697 |
+ int error; |
698 |
+ loff_t size; |
699 |
++ bool partscan; |
700 |
+ |
701 |
+ /* This is safe, since we have a reference from open(). */ |
702 |
+ __module_get(THIS_MODULE); |
703 |
+@@ -918,13 +919,17 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, |
704 |
+ if (!file) |
705 |
+ goto out; |
706 |
+ |
707 |
++ error = mutex_lock_killable(&loop_ctl_mutex); |
708 |
++ if (error) |
709 |
++ goto out_putf; |
710 |
++ |
711 |
+ error = -EBUSY; |
712 |
+ if (lo->lo_state != Lo_unbound) |
713 |
+- goto out_putf; |
714 |
++ goto out_unlock; |
715 |
+ |
716 |
+ error = loop_validate_file(file, bdev); |
717 |
+ if (error) |
718 |
+- goto out_putf; |
719 |
++ goto out_unlock; |
720 |
+ |
721 |
+ mapping = file->f_mapping; |
722 |
+ inode = mapping->host; |
723 |
+@@ -936,10 +941,10 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, |
724 |
+ error = -EFBIG; |
725 |
+ size = get_loop_size(lo, file); |
726 |
+ if ((loff_t)(sector_t)size != size) |
727 |
+- goto out_putf; |
728 |
++ goto out_unlock; |
729 |
+ error = loop_prepare_queue(lo); |
730 |
+ if (error) |
731 |
+- goto out_putf; |
732 |
++ goto out_unlock; |
733 |
+ |
734 |
+ error = 0; |
735 |
+ |
736 |
+@@ -971,18 +976,22 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, |
737 |
+ lo->lo_state = Lo_bound; |
738 |
+ if (part_shift) |
739 |
+ lo->lo_flags |= LO_FLAGS_PARTSCAN; |
740 |
+- if (lo->lo_flags & LO_FLAGS_PARTSCAN) |
741 |
+- loop_reread_partitions(lo, bdev); |
742 |
++ partscan = lo->lo_flags & LO_FLAGS_PARTSCAN; |
743 |
+ |
744 |
+ /* Grab the block_device to prevent its destruction after we |
745 |
+- * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev). |
746 |
++ * put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev). |
747 |
+ */ |
748 |
+ bdgrab(bdev); |
749 |
++ mutex_unlock(&loop_ctl_mutex); |
750 |
++ if (partscan) |
751 |
++ loop_reread_partitions(lo, bdev); |
752 |
+ return 0; |
753 |
+ |
754 |
+- out_putf: |
755 |
++out_unlock: |
756 |
++ mutex_unlock(&loop_ctl_mutex); |
757 |
++out_putf: |
758 |
+ fput(file); |
759 |
+- out: |
760 |
++out: |
761 |
+ /* This is safe: open() is still holding a reference. */ |
762 |
+ module_put(THIS_MODULE); |
763 |
+ return error; |
764 |
+@@ -1025,39 +1034,31 @@ loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer, |
765 |
+ return err; |
766 |
+ } |
767 |
+ |
768 |
+-static int loop_clr_fd(struct loop_device *lo) |
769 |
++static int __loop_clr_fd(struct loop_device *lo, bool release) |
770 |
+ { |
771 |
+- struct file *filp = lo->lo_backing_file; |
772 |
++ struct file *filp = NULL; |
773 |
+ gfp_t gfp = lo->old_gfp_mask; |
774 |
+ struct block_device *bdev = lo->lo_device; |
775 |
++ int err = 0; |
776 |
++ bool partscan = false; |
777 |
++ int lo_number; |
778 |
+ |
779 |
+- if (lo->lo_state != Lo_bound) |
780 |
+- return -ENXIO; |
781 |
+- |
782 |
+- /* |
783 |
+- * If we've explicitly asked to tear down the loop device, |
784 |
+- * and it has an elevated reference count, set it for auto-teardown when |
785 |
+- * the last reference goes away. This stops $!~#$@ udev from |
786 |
+- * preventing teardown because it decided that it needs to run blkid on |
787 |
+- * the loopback device whenever they appear. xfstests is notorious for |
788 |
+- * failing tests because blkid via udev races with a losetup |
789 |
+- * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d |
790 |
+- * command to fail with EBUSY. |
791 |
+- */ |
792 |
+- if (atomic_read(&lo->lo_refcnt) > 1) { |
793 |
+- lo->lo_flags |= LO_FLAGS_AUTOCLEAR; |
794 |
+- mutex_unlock(&lo->lo_ctl_mutex); |
795 |
+- return 0; |
796 |
++ mutex_lock(&loop_ctl_mutex); |
797 |
++ if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) { |
798 |
++ err = -ENXIO; |
799 |
++ goto out_unlock; |
800 |
+ } |
801 |
+ |
802 |
+- if (filp == NULL) |
803 |
+- return -EINVAL; |
804 |
++ filp = lo->lo_backing_file; |
805 |
++ if (filp == NULL) { |
806 |
++ err = -EINVAL; |
807 |
++ goto out_unlock; |
808 |
++ } |
809 |
+ |
810 |
+ /* freeze request queue during the transition */ |
811 |
+ blk_mq_freeze_queue(lo->lo_queue); |
812 |
+ |
813 |
+ spin_lock_irq(&lo->lo_lock); |
814 |
+- lo->lo_state = Lo_rundown; |
815 |
+ lo->lo_backing_file = NULL; |
816 |
+ spin_unlock_irq(&lo->lo_lock); |
817 |
+ |
818 |
+@@ -1093,21 +1094,73 @@ static int loop_clr_fd(struct loop_device *lo) |
819 |
+ module_put(THIS_MODULE); |
820 |
+ blk_mq_unfreeze_queue(lo->lo_queue); |
821 |
+ |
822 |
+- if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev) |
823 |
+- loop_reread_partitions(lo, bdev); |
824 |
++ partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev; |
825 |
++ lo_number = lo->lo_number; |
826 |
+ lo->lo_flags = 0; |
827 |
+ if (!part_shift) |
828 |
+ lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN; |
829 |
+ loop_unprepare_queue(lo); |
830 |
+- mutex_unlock(&lo->lo_ctl_mutex); |
831 |
++out_unlock: |
832 |
++ mutex_unlock(&loop_ctl_mutex); |
833 |
++ if (partscan) { |
834 |
++ /* |
835 |
++ * bd_mutex has been held already in release path, so don't |
836 |
++ * acquire it if this function is called in such case. |
837 |
++ * |
838 |
++ * If the reread partition isn't from release path, lo_refcnt |
839 |
++ * must be at least one and it can only become zero when the |
840 |
++ * current holder is released. |
841 |
++ */ |
842 |
++ if (release) |
843 |
++ err = __blkdev_reread_part(bdev); |
844 |
++ else |
845 |
++ err = blkdev_reread_part(bdev); |
846 |
++ pr_warn("%s: partition scan of loop%d failed (rc=%d)\n", |
847 |
++ __func__, lo_number, err); |
848 |
++ /* Device is gone, no point in returning error */ |
849 |
++ err = 0; |
850 |
++ } |
851 |
+ /* |
852 |
+- * Need not hold lo_ctl_mutex to fput backing file. |
853 |
+- * Calling fput holding lo_ctl_mutex triggers a circular |
854 |
++ * Need not hold loop_ctl_mutex to fput backing file. |
855 |
++ * Calling fput holding loop_ctl_mutex triggers a circular |
856 |
+ * lock dependency possibility warning as fput can take |
857 |
+- * bd_mutex which is usually taken before lo_ctl_mutex. |
858 |
++ * bd_mutex which is usually taken before loop_ctl_mutex. |
859 |
+ */ |
860 |
+- fput(filp); |
861 |
+- return 0; |
862 |
++ if (filp) |
863 |
++ fput(filp); |
864 |
++ return err; |
865 |
++} |
866 |
++ |
867 |
++static int loop_clr_fd(struct loop_device *lo) |
868 |
++{ |
869 |
++ int err; |
870 |
++ |
871 |
++ err = mutex_lock_killable(&loop_ctl_mutex); |
872 |
++ if (err) |
873 |
++ return err; |
874 |
++ if (lo->lo_state != Lo_bound) { |
875 |
++ mutex_unlock(&loop_ctl_mutex); |
876 |
++ return -ENXIO; |
877 |
++ } |
878 |
++ /* |
879 |
++ * If we've explicitly asked to tear down the loop device, |
880 |
++ * and it has an elevated reference count, set it for auto-teardown when |
881 |
++ * the last reference goes away. This stops $!~#$@ udev from |
882 |
++ * preventing teardown because it decided that it needs to run blkid on |
883 |
++ * the loopback device whenever they appear. xfstests is notorious for |
884 |
++ * failing tests because blkid via udev races with a losetup |
885 |
++ * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d |
886 |
++ * command to fail with EBUSY. |
887 |
++ */ |
888 |
++ if (atomic_read(&lo->lo_refcnt) > 1) { |
889 |
++ lo->lo_flags |= LO_FLAGS_AUTOCLEAR; |
890 |
++ mutex_unlock(&loop_ctl_mutex); |
891 |
++ return 0; |
892 |
++ } |
893 |
++ lo->lo_state = Lo_rundown; |
894 |
++ mutex_unlock(&loop_ctl_mutex); |
895 |
++ |
896 |
++ return __loop_clr_fd(lo, false); |
897 |
+ } |
898 |
+ |
899 |
+ static int |
900 |
+@@ -1116,47 +1169,72 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) |
901 |
+ int err; |
902 |
+ struct loop_func_table *xfer; |
903 |
+ kuid_t uid = current_uid(); |
904 |
++ struct block_device *bdev; |
905 |
++ bool partscan = false; |
906 |
+ |
907 |
++ err = mutex_lock_killable(&loop_ctl_mutex); |
908 |
++ if (err) |
909 |
++ return err; |
910 |
+ if (lo->lo_encrypt_key_size && |
911 |
+ !uid_eq(lo->lo_key_owner, uid) && |
912 |
+- !capable(CAP_SYS_ADMIN)) |
913 |
+- return -EPERM; |
914 |
+- if (lo->lo_state != Lo_bound) |
915 |
+- return -ENXIO; |
916 |
+- if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) |
917 |
+- return -EINVAL; |
918 |
++ !capable(CAP_SYS_ADMIN)) { |
919 |
++ err = -EPERM; |
920 |
++ goto out_unlock; |
921 |
++ } |
922 |
++ if (lo->lo_state != Lo_bound) { |
923 |
++ err = -ENXIO; |
924 |
++ goto out_unlock; |
925 |
++ } |
926 |
++ if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) { |
927 |
++ err = -EINVAL; |
928 |
++ goto out_unlock; |
929 |
++ } |
930 |
++ |
931 |
++ if (lo->lo_offset != info->lo_offset || |
932 |
++ lo->lo_sizelimit != info->lo_sizelimit) { |
933 |
++ sync_blockdev(lo->lo_device); |
934 |
++ kill_bdev(lo->lo_device); |
935 |
++ } |
936 |
+ |
937 |
+ /* I/O need to be drained during transfer transition */ |
938 |
+ blk_mq_freeze_queue(lo->lo_queue); |
939 |
+ |
940 |
+ err = loop_release_xfer(lo); |
941 |
+ if (err) |
942 |
+- goto exit; |
943 |
++ goto out_unfreeze; |
944 |
+ |
945 |
+ if (info->lo_encrypt_type) { |
946 |
+ unsigned int type = info->lo_encrypt_type; |
947 |
+ |
948 |
+ if (type >= MAX_LO_CRYPT) { |
949 |
+ err = -EINVAL; |
950 |
+- goto exit; |
951 |
++ goto out_unfreeze; |
952 |
+ } |
953 |
+ xfer = xfer_funcs[type]; |
954 |
+ if (xfer == NULL) { |
955 |
+ err = -EINVAL; |
956 |
+- goto exit; |
957 |
++ goto out_unfreeze; |
958 |
+ } |
959 |
+ } else |
960 |
+ xfer = NULL; |
961 |
+ |
962 |
+ err = loop_init_xfer(lo, xfer, info); |
963 |
+ if (err) |
964 |
+- goto exit; |
965 |
++ goto out_unfreeze; |
966 |
+ |
967 |
+ if (lo->lo_offset != info->lo_offset || |
968 |
+ lo->lo_sizelimit != info->lo_sizelimit) { |
969 |
++ /* kill_bdev should have truncated all the pages */ |
970 |
++ if (lo->lo_device->bd_inode->i_mapping->nrpages) { |
971 |
++ err = -EAGAIN; |
972 |
++ pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n", |
973 |
++ __func__, lo->lo_number, lo->lo_file_name, |
974 |
++ lo->lo_device->bd_inode->i_mapping->nrpages); |
975 |
++ goto out_unfreeze; |
976 |
++ } |
977 |
+ if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) { |
978 |
+ err = -EFBIG; |
979 |
+- goto exit; |
980 |
++ goto out_unfreeze; |
981 |
+ } |
982 |
+ } |
983 |
+ |
984 |
+@@ -1188,15 +1266,20 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) |
985 |
+ /* update dio if lo_offset or transfer is changed */ |
986 |
+ __loop_update_dio(lo, lo->use_dio); |
987 |
+ |
988 |
+- exit: |
989 |
++out_unfreeze: |
990 |
+ blk_mq_unfreeze_queue(lo->lo_queue); |
991 |
+ |
992 |
+ if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) && |
993 |
+ !(lo->lo_flags & LO_FLAGS_PARTSCAN)) { |
994 |
+ lo->lo_flags |= LO_FLAGS_PARTSCAN; |
995 |
+ lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN; |
996 |
+- loop_reread_partitions(lo, lo->lo_device); |
997 |
++ bdev = lo->lo_device; |
998 |
++ partscan = true; |
999 |
+ } |
1000 |
++out_unlock: |
1001 |
++ mutex_unlock(&loop_ctl_mutex); |
1002 |
++ if (partscan) |
1003 |
++ loop_reread_partitions(lo, bdev); |
1004 |
+ |
1005 |
+ return err; |
1006 |
+ } |
1007 |
+@@ -1204,12 +1287,15 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) |
1008 |
+ static int |
1009 |
+ loop_get_status(struct loop_device *lo, struct loop_info64 *info) |
1010 |
+ { |
1011 |
+- struct file *file; |
1012 |
++ struct path path; |
1013 |
+ struct kstat stat; |
1014 |
+ int ret; |
1015 |
+ |
1016 |
++ ret = mutex_lock_killable(&loop_ctl_mutex); |
1017 |
++ if (ret) |
1018 |
++ return ret; |
1019 |
+ if (lo->lo_state != Lo_bound) { |
1020 |
+- mutex_unlock(&lo->lo_ctl_mutex); |
1021 |
++ mutex_unlock(&loop_ctl_mutex); |
1022 |
+ return -ENXIO; |
1023 |
+ } |
1024 |
+ |
1025 |
+@@ -1228,17 +1314,17 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info) |
1026 |
+ lo->lo_encrypt_key_size); |
1027 |
+ } |
1028 |
+ |
1029 |
+- /* Drop lo_ctl_mutex while we call into the filesystem. */ |
1030 |
+- file = get_file(lo->lo_backing_file); |
1031 |
+- mutex_unlock(&lo->lo_ctl_mutex); |
1032 |
+- ret = vfs_getattr(&file->f_path, &stat, STATX_INO, |
1033 |
+- AT_STATX_SYNC_AS_STAT); |
1034 |
++ /* Drop loop_ctl_mutex while we call into the filesystem. */ |
1035 |
++ path = lo->lo_backing_file->f_path; |
1036 |
++ path_get(&path); |
1037 |
++ mutex_unlock(&loop_ctl_mutex); |
1038 |
++ ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT); |
1039 |
+ if (!ret) { |
1040 |
+ info->lo_device = huge_encode_dev(stat.dev); |
1041 |
+ info->lo_inode = stat.ino; |
1042 |
+ info->lo_rdevice = huge_encode_dev(stat.rdev); |
1043 |
+ } |
1044 |
+- fput(file); |
1045 |
++ path_put(&path); |
1046 |
+ return ret; |
1047 |
+ } |
1048 |
+ |
1049 |
+@@ -1322,10 +1408,8 @@ loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) { |
1050 |
+ struct loop_info64 info64; |
1051 |
+ int err; |
1052 |
+ |
1053 |
+- if (!arg) { |
1054 |
+- mutex_unlock(&lo->lo_ctl_mutex); |
1055 |
++ if (!arg) |
1056 |
+ return -EINVAL; |
1057 |
+- } |
1058 |
+ err = loop_get_status(lo, &info64); |
1059 |
+ if (!err) |
1060 |
+ err = loop_info64_to_old(&info64, &info); |
1061 |
+@@ -1340,10 +1424,8 @@ loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) { |
1062 |
+ struct loop_info64 info64; |
1063 |
+ int err; |
1064 |
+ |
1065 |
+- if (!arg) { |
1066 |
+- mutex_unlock(&lo->lo_ctl_mutex); |
1067 |
++ if (!arg) |
1068 |
+ return -EINVAL; |
1069 |
+- } |
1070 |
+ err = loop_get_status(lo, &info64); |
1071 |
+ if (!err && copy_to_user(arg, &info64, sizeof(info64))) |
1072 |
+ err = -EFAULT; |
1073 |
+@@ -1375,22 +1457,64 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg) |
1074 |
+ |
1075 |
+ static int loop_set_block_size(struct loop_device *lo, unsigned long arg) |
1076 |
+ { |
1077 |
++ int err = 0; |
1078 |
++ |
1079 |
+ if (lo->lo_state != Lo_bound) |
1080 |
+ return -ENXIO; |
1081 |
+ |
1082 |
+ if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg)) |
1083 |
+ return -EINVAL; |
1084 |
+ |
1085 |
++ if (lo->lo_queue->limits.logical_block_size != arg) { |
1086 |
++ sync_blockdev(lo->lo_device); |
1087 |
++ kill_bdev(lo->lo_device); |
1088 |
++ } |
1089 |
++ |
1090 |
+ blk_mq_freeze_queue(lo->lo_queue); |
1091 |
+ |
1092 |
++ /* kill_bdev should have truncated all the pages */ |
1093 |
++ if (lo->lo_queue->limits.logical_block_size != arg && |
1094 |
++ lo->lo_device->bd_inode->i_mapping->nrpages) { |
1095 |
++ err = -EAGAIN; |
1096 |
++ pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n", |
1097 |
++ __func__, lo->lo_number, lo->lo_file_name, |
1098 |
++ lo->lo_device->bd_inode->i_mapping->nrpages); |
1099 |
++ goto out_unfreeze; |
1100 |
++ } |
1101 |
++ |
1102 |
+ blk_queue_logical_block_size(lo->lo_queue, arg); |
1103 |
+ blk_queue_physical_block_size(lo->lo_queue, arg); |
1104 |
+ blk_queue_io_min(lo->lo_queue, arg); |
1105 |
+ loop_update_dio(lo); |
1106 |
+- |
1107 |
++out_unfreeze: |
1108 |
+ blk_mq_unfreeze_queue(lo->lo_queue); |
1109 |
+ |
1110 |
+- return 0; |
1111 |
++ return err; |
1112 |
++} |
1113 |
++ |
1114 |
++static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd, |
1115 |
++ unsigned long arg) |
1116 |
++{ |
1117 |
++ int err; |
1118 |
++ |
1119 |
++ err = mutex_lock_killable(&loop_ctl_mutex); |
1120 |
++ if (err) |
1121 |
++ return err; |
1122 |
++ switch (cmd) { |
1123 |
++ case LOOP_SET_CAPACITY: |
1124 |
++ err = loop_set_capacity(lo); |
1125 |
++ break; |
1126 |
++ case LOOP_SET_DIRECT_IO: |
1127 |
++ err = loop_set_dio(lo, arg); |
1128 |
++ break; |
1129 |
++ case LOOP_SET_BLOCK_SIZE: |
1130 |
++ err = loop_set_block_size(lo, arg); |
1131 |
++ break; |
1132 |
++ default: |
1133 |
++ err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL; |
1134 |
++ } |
1135 |
++ mutex_unlock(&loop_ctl_mutex); |
1136 |
++ return err; |
1137 |
+ } |
1138 |
+ |
1139 |
+ static int lo_ioctl(struct block_device *bdev, fmode_t mode, |
1140 |
+@@ -1399,64 +1523,42 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode, |
1141 |
+ struct loop_device *lo = bdev->bd_disk->private_data; |
1142 |
+ int err; |
1143 |
+ |
1144 |
+- err = mutex_lock_killable_nested(&lo->lo_ctl_mutex, 1); |
1145 |
+- if (err) |
1146 |
+- goto out_unlocked; |
1147 |
+- |
1148 |
+ switch (cmd) { |
1149 |
+ case LOOP_SET_FD: |
1150 |
+- err = loop_set_fd(lo, mode, bdev, arg); |
1151 |
+- break; |
1152 |
++ return loop_set_fd(lo, mode, bdev, arg); |
1153 |
+ case LOOP_CHANGE_FD: |
1154 |
+- err = loop_change_fd(lo, bdev, arg); |
1155 |
+- break; |
1156 |
++ return loop_change_fd(lo, bdev, arg); |
1157 |
+ case LOOP_CLR_FD: |
1158 |
+- /* loop_clr_fd would have unlocked lo_ctl_mutex on success */ |
1159 |
+- err = loop_clr_fd(lo); |
1160 |
+- if (!err) |
1161 |
+- goto out_unlocked; |
1162 |
+- break; |
1163 |
++ return loop_clr_fd(lo); |
1164 |
+ case LOOP_SET_STATUS: |
1165 |
+ err = -EPERM; |
1166 |
+- if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) |
1167 |
++ if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) { |
1168 |
+ err = loop_set_status_old(lo, |
1169 |
+ (struct loop_info __user *)arg); |
1170 |
++ } |
1171 |
+ break; |
1172 |
+ case LOOP_GET_STATUS: |
1173 |
+- err = loop_get_status_old(lo, (struct loop_info __user *) arg); |
1174 |
+- /* loop_get_status() unlocks lo_ctl_mutex */ |
1175 |
+- goto out_unlocked; |
1176 |
++ return loop_get_status_old(lo, (struct loop_info __user *) arg); |
1177 |
+ case LOOP_SET_STATUS64: |
1178 |
+ err = -EPERM; |
1179 |
+- if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) |
1180 |
++ if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) { |
1181 |
+ err = loop_set_status64(lo, |
1182 |
+ (struct loop_info64 __user *) arg); |
1183 |
++ } |
1184 |
+ break; |
1185 |
+ case LOOP_GET_STATUS64: |
1186 |
+- err = loop_get_status64(lo, (struct loop_info64 __user *) arg); |
1187 |
+- /* loop_get_status() unlocks lo_ctl_mutex */ |
1188 |
+- goto out_unlocked; |
1189 |
++ return loop_get_status64(lo, (struct loop_info64 __user *) arg); |
1190 |
+ case LOOP_SET_CAPACITY: |
1191 |
+- err = -EPERM; |
1192 |
+- if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) |
1193 |
+- err = loop_set_capacity(lo); |
1194 |
+- break; |
1195 |
+ case LOOP_SET_DIRECT_IO: |
1196 |
+- err = -EPERM; |
1197 |
+- if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) |
1198 |
+- err = loop_set_dio(lo, arg); |
1199 |
+- break; |
1200 |
+ case LOOP_SET_BLOCK_SIZE: |
1201 |
+- err = -EPERM; |
1202 |
+- if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) |
1203 |
+- err = loop_set_block_size(lo, arg); |
1204 |
+- break; |
1205 |
++ if (!(mode & FMODE_WRITE) && !capable(CAP_SYS_ADMIN)) |
1206 |
++ return -EPERM; |
1207 |
++ /* Fall through */ |
1208 |
+ default: |
1209 |
+- err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL; |
1210 |
++ err = lo_simple_ioctl(lo, cmd, arg); |
1211 |
++ break; |
1212 |
+ } |
1213 |
+- mutex_unlock(&lo->lo_ctl_mutex); |
1214 |
+ |
1215 |
+-out_unlocked: |
1216 |
+ return err; |
1217 |
+ } |
1218 |
+ |
1219 |
+@@ -1570,10 +1672,8 @@ loop_get_status_compat(struct loop_device *lo, |
1220 |
+ struct loop_info64 info64; |
1221 |
+ int err; |
1222 |
+ |
1223 |
+- if (!arg) { |
1224 |
+- mutex_unlock(&lo->lo_ctl_mutex); |
1225 |
++ if (!arg) |
1226 |
+ return -EINVAL; |
1227 |
+- } |
1228 |
+ err = loop_get_status(lo, &info64); |
1229 |
+ if (!err) |
1230 |
+ err = loop_info64_to_compat(&info64, arg); |
1231 |
+@@ -1588,20 +1688,12 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, |
1232 |
+ |
1233 |
+ switch(cmd) { |
1234 |
+ case LOOP_SET_STATUS: |
1235 |
+- err = mutex_lock_killable(&lo->lo_ctl_mutex); |
1236 |
+- if (!err) { |
1237 |
+- err = loop_set_status_compat(lo, |
1238 |
+- (const struct compat_loop_info __user *)arg); |
1239 |
+- mutex_unlock(&lo->lo_ctl_mutex); |
1240 |
+- } |
1241 |
++ err = loop_set_status_compat(lo, |
1242 |
++ (const struct compat_loop_info __user *)arg); |
1243 |
+ break; |
1244 |
+ case LOOP_GET_STATUS: |
1245 |
+- err = mutex_lock_killable(&lo->lo_ctl_mutex); |
1246 |
+- if (!err) { |
1247 |
+- err = loop_get_status_compat(lo, |
1248 |
+- (struct compat_loop_info __user *)arg); |
1249 |
+- /* loop_get_status() unlocks lo_ctl_mutex */ |
1250 |
+- } |
1251 |
++ err = loop_get_status_compat(lo, |
1252 |
++ (struct compat_loop_info __user *)arg); |
1253 |
+ break; |
1254 |
+ case LOOP_SET_CAPACITY: |
1255 |
+ case LOOP_CLR_FD: |
1256 |
+@@ -1625,9 +1717,11 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, |
1257 |
+ static int lo_open(struct block_device *bdev, fmode_t mode) |
1258 |
+ { |
1259 |
+ struct loop_device *lo; |
1260 |
+- int err = 0; |
1261 |
++ int err; |
1262 |
+ |
1263 |
+- mutex_lock(&loop_index_mutex); |
1264 |
++ err = mutex_lock_killable(&loop_ctl_mutex); |
1265 |
++ if (err) |
1266 |
++ return err; |
1267 |
+ lo = bdev->bd_disk->private_data; |
1268 |
+ if (!lo) { |
1269 |
+ err = -ENXIO; |
1270 |
+@@ -1636,26 +1730,30 @@ static int lo_open(struct block_device *bdev, fmode_t mode) |
1271 |
+ |
1272 |
+ atomic_inc(&lo->lo_refcnt); |
1273 |
+ out: |
1274 |
+- mutex_unlock(&loop_index_mutex); |
1275 |
++ mutex_unlock(&loop_ctl_mutex); |
1276 |
+ return err; |
1277 |
+ } |
1278 |
+ |
1279 |
+-static void __lo_release(struct loop_device *lo) |
1280 |
++static void lo_release(struct gendisk *disk, fmode_t mode) |
1281 |
+ { |
1282 |
+- int err; |
1283 |
++ struct loop_device *lo; |
1284 |
+ |
1285 |
++ mutex_lock(&loop_ctl_mutex); |
1286 |
++ lo = disk->private_data; |
1287 |
+ if (atomic_dec_return(&lo->lo_refcnt)) |
1288 |
+- return; |
1289 |
++ goto out_unlock; |
1290 |
+ |
1291 |
+- mutex_lock(&lo->lo_ctl_mutex); |
1292 |
+ if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) { |
1293 |
++ if (lo->lo_state != Lo_bound) |
1294 |
++ goto out_unlock; |
1295 |
++ lo->lo_state = Lo_rundown; |
1296 |
++ mutex_unlock(&loop_ctl_mutex); |
1297 |
+ /* |
1298 |
+ * In autoclear mode, stop the loop thread |
1299 |
+ * and remove configuration after last close. |
1300 |
+ */ |
1301 |
+- err = loop_clr_fd(lo); |
1302 |
+- if (!err) |
1303 |
+- return; |
1304 |
++ __loop_clr_fd(lo, true); |
1305 |
++ return; |
1306 |
+ } else if (lo->lo_state == Lo_bound) { |
1307 |
+ /* |
1308 |
+ * Otherwise keep thread (if running) and config, |
1309 |
+@@ -1665,14 +1763,8 @@ static void __lo_release(struct loop_device *lo) |
1310 |
+ blk_mq_unfreeze_queue(lo->lo_queue); |
1311 |
+ } |
1312 |
+ |
1313 |
+- mutex_unlock(&lo->lo_ctl_mutex); |
1314 |
+-} |
1315 |
+- |
1316 |
+-static void lo_release(struct gendisk *disk, fmode_t mode) |
1317 |
+-{ |
1318 |
+- mutex_lock(&loop_index_mutex); |
1319 |
+- __lo_release(disk->private_data); |
1320 |
+- mutex_unlock(&loop_index_mutex); |
1321 |
++out_unlock: |
1322 |
++ mutex_unlock(&loop_ctl_mutex); |
1323 |
+ } |
1324 |
+ |
1325 |
+ static const struct block_device_operations lo_fops = { |
1326 |
+@@ -1711,10 +1803,10 @@ static int unregister_transfer_cb(int id, void *ptr, void *data) |
1327 |
+ struct loop_device *lo = ptr; |
1328 |
+ struct loop_func_table *xfer = data; |
1329 |
+ |
1330 |
+- mutex_lock(&lo->lo_ctl_mutex); |
1331 |
++ mutex_lock(&loop_ctl_mutex); |
1332 |
+ if (lo->lo_encryption == xfer) |
1333 |
+ loop_release_xfer(lo); |
1334 |
+- mutex_unlock(&lo->lo_ctl_mutex); |
1335 |
++ mutex_unlock(&loop_ctl_mutex); |
1336 |
+ return 0; |
1337 |
+ } |
1338 |
+ |
1339 |
+@@ -1895,7 +1987,6 @@ static int loop_add(struct loop_device **l, int i) |
1340 |
+ if (!part_shift) |
1341 |
+ disk->flags |= GENHD_FL_NO_PART_SCAN; |
1342 |
+ disk->flags |= GENHD_FL_EXT_DEVT; |
1343 |
+- mutex_init(&lo->lo_ctl_mutex); |
1344 |
+ atomic_set(&lo->lo_refcnt, 0); |
1345 |
+ lo->lo_number = i; |
1346 |
+ spin_lock_init(&lo->lo_lock); |
1347 |
+@@ -1974,7 +2065,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data) |
1348 |
+ struct kobject *kobj; |
1349 |
+ int err; |
1350 |
+ |
1351 |
+- mutex_lock(&loop_index_mutex); |
1352 |
++ mutex_lock(&loop_ctl_mutex); |
1353 |
+ err = loop_lookup(&lo, MINOR(dev) >> part_shift); |
1354 |
+ if (err < 0) |
1355 |
+ err = loop_add(&lo, MINOR(dev) >> part_shift); |
1356 |
+@@ -1982,7 +2073,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data) |
1357 |
+ kobj = NULL; |
1358 |
+ else |
1359 |
+ kobj = get_disk_and_module(lo->lo_disk); |
1360 |
+- mutex_unlock(&loop_index_mutex); |
1361 |
++ mutex_unlock(&loop_ctl_mutex); |
1362 |
+ |
1363 |
+ *part = 0; |
1364 |
+ return kobj; |
1365 |
+@@ -1992,9 +2083,13 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd, |
1366 |
+ unsigned long parm) |
1367 |
+ { |
1368 |
+ struct loop_device *lo; |
1369 |
+- int ret = -ENOSYS; |
1370 |
++ int ret; |
1371 |
++ |
1372 |
++ ret = mutex_lock_killable(&loop_ctl_mutex); |
1373 |
++ if (ret) |
1374 |
++ return ret; |
1375 |
+ |
1376 |
+- mutex_lock(&loop_index_mutex); |
1377 |
++ ret = -ENOSYS; |
1378 |
+ switch (cmd) { |
1379 |
+ case LOOP_CTL_ADD: |
1380 |
+ ret = loop_lookup(&lo, parm); |
1381 |
+@@ -2008,21 +2103,15 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd, |
1382 |
+ ret = loop_lookup(&lo, parm); |
1383 |
+ if (ret < 0) |
1384 |
+ break; |
1385 |
+- ret = mutex_lock_killable(&lo->lo_ctl_mutex); |
1386 |
+- if (ret) |
1387 |
+- break; |
1388 |
+ if (lo->lo_state != Lo_unbound) { |
1389 |
+ ret = -EBUSY; |
1390 |
+- mutex_unlock(&lo->lo_ctl_mutex); |
1391 |
+ break; |
1392 |
+ } |
1393 |
+ if (atomic_read(&lo->lo_refcnt) > 0) { |
1394 |
+ ret = -EBUSY; |
1395 |
+- mutex_unlock(&lo->lo_ctl_mutex); |
1396 |
+ break; |
1397 |
+ } |
1398 |
+ lo->lo_disk->private_data = NULL; |
1399 |
+- mutex_unlock(&lo->lo_ctl_mutex); |
1400 |
+ idr_remove(&loop_index_idr, lo->lo_number); |
1401 |
+ loop_remove(lo); |
1402 |
+ break; |
1403 |
+@@ -2032,7 +2121,7 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd, |
1404 |
+ break; |
1405 |
+ ret = loop_add(&lo, -1); |
1406 |
+ } |
1407 |
+- mutex_unlock(&loop_index_mutex); |
1408 |
++ mutex_unlock(&loop_ctl_mutex); |
1409 |
+ |
1410 |
+ return ret; |
1411 |
+ } |
1412 |
+@@ -2116,10 +2205,10 @@ static int __init loop_init(void) |
1413 |
+ THIS_MODULE, loop_probe, NULL, NULL); |
1414 |
+ |
1415 |
+ /* pre-create number of devices given by config or max_loop */ |
1416 |
+- mutex_lock(&loop_index_mutex); |
1417 |
++ mutex_lock(&loop_ctl_mutex); |
1418 |
+ for (i = 0; i < nr; i++) |
1419 |
+ loop_add(&lo, i); |
1420 |
+- mutex_unlock(&loop_index_mutex); |
1421 |
++ mutex_unlock(&loop_ctl_mutex); |
1422 |
+ |
1423 |
+ printk(KERN_INFO "loop: module loaded\n"); |
1424 |
+ return 0; |
1425 |
+diff --git a/drivers/block/loop.h b/drivers/block/loop.h |
1426 |
+index 4d42c7af7de7..af75a5ee4094 100644 |
1427 |
+--- a/drivers/block/loop.h |
1428 |
++++ b/drivers/block/loop.h |
1429 |
+@@ -54,7 +54,6 @@ struct loop_device { |
1430 |
+ |
1431 |
+ spinlock_t lo_lock; |
1432 |
+ int lo_state; |
1433 |
+- struct mutex lo_ctl_mutex; |
1434 |
+ struct kthread_worker worker; |
1435 |
+ struct task_struct *worker_task; |
1436 |
+ bool use_dio; |
1437 |
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c |
1438 |
+index 4d4d6129ff66..c964315c7b0b 100644 |
1439 |
+--- a/drivers/block/nbd.c |
1440 |
++++ b/drivers/block/nbd.c |
1441 |
+@@ -288,9 +288,10 @@ static void nbd_size_update(struct nbd_device *nbd) |
1442 |
+ blk_queue_physical_block_size(nbd->disk->queue, config->blksize); |
1443 |
+ set_capacity(nbd->disk, config->bytesize >> 9); |
1444 |
+ if (bdev) { |
1445 |
+- if (bdev->bd_disk) |
1446 |
++ if (bdev->bd_disk) { |
1447 |
+ bd_set_size(bdev, config->bytesize); |
1448 |
+- else |
1449 |
++ set_blocksize(bdev, config->blksize); |
1450 |
++ } else |
1451 |
+ bdev->bd_invalidated = 1; |
1452 |
+ bdput(bdev); |
1453 |
+ } |
1454 |
+diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig |
1455 |
+index caa98a7fe392..db330a0106b2 100644 |
1456 |
+--- a/drivers/crypto/Kconfig |
1457 |
++++ b/drivers/crypto/Kconfig |
1458 |
+@@ -692,6 +692,7 @@ config CRYPTO_DEV_BCM_SPU |
1459 |
+ depends on ARCH_BCM_IPROC |
1460 |
+ depends on MAILBOX |
1461 |
+ default m |
1462 |
++ select CRYPTO_AUTHENC |
1463 |
+ select CRYPTO_DES |
1464 |
+ select CRYPTO_MD5 |
1465 |
+ select CRYPTO_SHA1 |
1466 |
+diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c |
1467 |
+index 2d1f1db9f807..cd464637b0cb 100644 |
1468 |
+--- a/drivers/crypto/bcm/cipher.c |
1469 |
++++ b/drivers/crypto/bcm/cipher.c |
1470 |
+@@ -2845,44 +2845,28 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, |
1471 |
+ struct spu_hw *spu = &iproc_priv.spu; |
1472 |
+ struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); |
1473 |
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher); |
1474 |
+- struct rtattr *rta = (void *)key; |
1475 |
+- struct crypto_authenc_key_param *param; |
1476 |
+- const u8 *origkey = key; |
1477 |
+- const unsigned int origkeylen = keylen; |
1478 |
+- |
1479 |
+- int ret = 0; |
1480 |
++ struct crypto_authenc_keys keys; |
1481 |
++ int ret; |
1482 |
+ |
1483 |
+ flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key, |
1484 |
+ keylen); |
1485 |
+ flow_dump(" key: ", key, keylen); |
1486 |
+ |
1487 |
+- if (!RTA_OK(rta, keylen)) |
1488 |
+- goto badkey; |
1489 |
+- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) |
1490 |
+- goto badkey; |
1491 |
+- if (RTA_PAYLOAD(rta) < sizeof(*param)) |
1492 |
++ ret = crypto_authenc_extractkeys(&keys, key, keylen); |
1493 |
++ if (ret) |
1494 |
+ goto badkey; |
1495 |
+ |
1496 |
+- param = RTA_DATA(rta); |
1497 |
+- ctx->enckeylen = be32_to_cpu(param->enckeylen); |
1498 |
+- |
1499 |
+- key += RTA_ALIGN(rta->rta_len); |
1500 |
+- keylen -= RTA_ALIGN(rta->rta_len); |
1501 |
+- |
1502 |
+- if (keylen < ctx->enckeylen) |
1503 |
+- goto badkey; |
1504 |
+- if (ctx->enckeylen > MAX_KEY_SIZE) |
1505 |
++ if (keys.enckeylen > MAX_KEY_SIZE || |
1506 |
++ keys.authkeylen > MAX_KEY_SIZE) |
1507 |
+ goto badkey; |
1508 |
+ |
1509 |
+- ctx->authkeylen = keylen - ctx->enckeylen; |
1510 |
+- |
1511 |
+- if (ctx->authkeylen > MAX_KEY_SIZE) |
1512 |
+- goto badkey; |
1513 |
++ ctx->enckeylen = keys.enckeylen; |
1514 |
++ ctx->authkeylen = keys.authkeylen; |
1515 |
+ |
1516 |
+- memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen); |
1517 |
++ memcpy(ctx->enckey, keys.enckey, keys.enckeylen); |
1518 |
+ /* May end up padding auth key. So make sure it's zeroed. */ |
1519 |
+ memset(ctx->authkey, 0, sizeof(ctx->authkey)); |
1520 |
+- memcpy(ctx->authkey, key, ctx->authkeylen); |
1521 |
++ memcpy(ctx->authkey, keys.authkey, keys.authkeylen); |
1522 |
+ |
1523 |
+ switch (ctx->alg->cipher_info.alg) { |
1524 |
+ case CIPHER_ALG_DES: |
1525 |
+@@ -2890,7 +2874,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, |
1526 |
+ u32 tmp[DES_EXPKEY_WORDS]; |
1527 |
+ u32 flags = CRYPTO_TFM_RES_WEAK_KEY; |
1528 |
+ |
1529 |
+- if (des_ekey(tmp, key) == 0) { |
1530 |
++ if (des_ekey(tmp, keys.enckey) == 0) { |
1531 |
+ if (crypto_aead_get_flags(cipher) & |
1532 |
+ CRYPTO_TFM_REQ_WEAK_KEY) { |
1533 |
+ crypto_aead_set_flags(cipher, flags); |
1534 |
+@@ -2905,7 +2889,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, |
1535 |
+ break; |
1536 |
+ case CIPHER_ALG_3DES: |
1537 |
+ if (ctx->enckeylen == (DES_KEY_SIZE * 3)) { |
1538 |
+- const u32 *K = (const u32 *)key; |
1539 |
++ const u32 *K = (const u32 *)keys.enckey; |
1540 |
+ u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED; |
1541 |
+ |
1542 |
+ if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) || |
1543 |
+@@ -2956,9 +2940,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, |
1544 |
+ ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; |
1545 |
+ ctx->fallback_cipher->base.crt_flags |= |
1546 |
+ tfm->crt_flags & CRYPTO_TFM_REQ_MASK; |
1547 |
+- ret = |
1548 |
+- crypto_aead_setkey(ctx->fallback_cipher, origkey, |
1549 |
+- origkeylen); |
1550 |
++ ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen); |
1551 |
+ if (ret) { |
1552 |
+ flow_log(" fallback setkey() returned:%d\n", ret); |
1553 |
+ tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; |
1554 |
+diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c |
1555 |
+index 46924affa0bd..212fd0b3b8dd 100644 |
1556 |
+--- a/drivers/crypto/caam/caamhash.c |
1557 |
++++ b/drivers/crypto/caam/caamhash.c |
1558 |
+@@ -1071,13 +1071,16 @@ static int ahash_final_no_ctx(struct ahash_request *req) |
1559 |
+ |
1560 |
+ desc = edesc->hw_desc; |
1561 |
+ |
1562 |
+- state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); |
1563 |
+- if (dma_mapping_error(jrdev, state->buf_dma)) { |
1564 |
+- dev_err(jrdev, "unable to map src\n"); |
1565 |
+- goto unmap; |
1566 |
+- } |
1567 |
++ if (buflen) { |
1568 |
++ state->buf_dma = dma_map_single(jrdev, buf, buflen, |
1569 |
++ DMA_TO_DEVICE); |
1570 |
++ if (dma_mapping_error(jrdev, state->buf_dma)) { |
1571 |
++ dev_err(jrdev, "unable to map src\n"); |
1572 |
++ goto unmap; |
1573 |
++ } |
1574 |
+ |
1575 |
+- append_seq_in_ptr(desc, state->buf_dma, buflen, 0); |
1576 |
++ append_seq_in_ptr(desc, state->buf_dma, buflen, 0); |
1577 |
++ } |
1578 |
+ |
1579 |
+ edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, |
1580 |
+ digestsize); |
1581 |
+diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c |
1582 |
+index 01b82b82f8b8..5852d29ae2da 100644 |
1583 |
+--- a/drivers/crypto/ccree/cc_aead.c |
1584 |
++++ b/drivers/crypto/ccree/cc_aead.c |
1585 |
+@@ -540,13 +540,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, |
1586 |
+ unsigned int keylen) |
1587 |
+ { |
1588 |
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); |
1589 |
+- struct rtattr *rta = (struct rtattr *)key; |
1590 |
+ struct cc_crypto_req cc_req = {}; |
1591 |
+- struct crypto_authenc_key_param *param; |
1592 |
+ struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ]; |
1593 |
+- int rc = -EINVAL; |
1594 |
+ unsigned int seq_len = 0; |
1595 |
+ struct device *dev = drvdata_to_dev(ctx->drvdata); |
1596 |
++ const u8 *enckey, *authkey; |
1597 |
++ int rc; |
1598 |
+ |
1599 |
+ dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n", |
1600 |
+ ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen); |
1601 |
+@@ -554,35 +553,33 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, |
1602 |
+ /* STAT_PHASE_0: Init and sanity checks */ |
1603 |
+ |
1604 |
+ if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */ |
1605 |
+- if (!RTA_OK(rta, keylen)) |
1606 |
+- goto badkey; |
1607 |
+- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) |
1608 |
+- goto badkey; |
1609 |
+- if (RTA_PAYLOAD(rta) < sizeof(*param)) |
1610 |
+- goto badkey; |
1611 |
+- param = RTA_DATA(rta); |
1612 |
+- ctx->enc_keylen = be32_to_cpu(param->enckeylen); |
1613 |
+- key += RTA_ALIGN(rta->rta_len); |
1614 |
+- keylen -= RTA_ALIGN(rta->rta_len); |
1615 |
+- if (keylen < ctx->enc_keylen) |
1616 |
++ struct crypto_authenc_keys keys; |
1617 |
++ |
1618 |
++ rc = crypto_authenc_extractkeys(&keys, key, keylen); |
1619 |
++ if (rc) |
1620 |
+ goto badkey; |
1621 |
+- ctx->auth_keylen = keylen - ctx->enc_keylen; |
1622 |
++ enckey = keys.enckey; |
1623 |
++ authkey = keys.authkey; |
1624 |
++ ctx->enc_keylen = keys.enckeylen; |
1625 |
++ ctx->auth_keylen = keys.authkeylen; |
1626 |
+ |
1627 |
+ if (ctx->cipher_mode == DRV_CIPHER_CTR) { |
1628 |
+ /* the nonce is stored in bytes at end of key */ |
1629 |
++ rc = -EINVAL; |
1630 |
+ if (ctx->enc_keylen < |
1631 |
+ (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)) |
1632 |
+ goto badkey; |
1633 |
+ /* Copy nonce from last 4 bytes in CTR key to |
1634 |
+ * first 4 bytes in CTR IV |
1635 |
+ */ |
1636 |
+- memcpy(ctx->ctr_nonce, key + ctx->auth_keylen + |
1637 |
+- ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE, |
1638 |
+- CTR_RFC3686_NONCE_SIZE); |
1639 |
++ memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen - |
1640 |
++ CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE); |
1641 |
+ /* Set CTR key size */ |
1642 |
+ ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE; |
1643 |
+ } |
1644 |
+ } else { /* non-authenc - has just one key */ |
1645 |
++ enckey = key; |
1646 |
++ authkey = NULL; |
1647 |
+ ctx->enc_keylen = keylen; |
1648 |
+ ctx->auth_keylen = 0; |
1649 |
+ } |
1650 |
+@@ -594,13 +591,14 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, |
1651 |
+ /* STAT_PHASE_1: Copy key to ctx */ |
1652 |
+ |
1653 |
+ /* Get key material */ |
1654 |
+- memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen); |
1655 |
++ memcpy(ctx->enckey, enckey, ctx->enc_keylen); |
1656 |
+ if (ctx->enc_keylen == 24) |
1657 |
+ memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24); |
1658 |
+ if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { |
1659 |
+- memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen); |
1660 |
++ memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey, |
1661 |
++ ctx->auth_keylen); |
1662 |
+ } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */ |
1663 |
+- rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen); |
1664 |
++ rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen); |
1665 |
+ if (rc) |
1666 |
+ goto badkey; |
1667 |
+ } |
1668 |
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c |
1669 |
+index 6988012deca4..f4f3e9a5851e 100644 |
1670 |
+--- a/drivers/crypto/talitos.c |
1671 |
++++ b/drivers/crypto/talitos.c |
1672 |
+@@ -1361,23 +1361,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, |
1673 |
+ struct talitos_private *priv = dev_get_drvdata(dev); |
1674 |
+ bool is_sec1 = has_ftr_sec1(priv); |
1675 |
+ int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; |
1676 |
+- void *err; |
1677 |
+ |
1678 |
+ if (cryptlen + authsize > max_len) { |
1679 |
+ dev_err(dev, "length exceeds h/w max limit\n"); |
1680 |
+ return ERR_PTR(-EINVAL); |
1681 |
+ } |
1682 |
+ |
1683 |
+- if (ivsize) |
1684 |
+- iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); |
1685 |
+- |
1686 |
+ if (!dst || dst == src) { |
1687 |
+ src_len = assoclen + cryptlen + authsize; |
1688 |
+ src_nents = sg_nents_for_len(src, src_len); |
1689 |
+ if (src_nents < 0) { |
1690 |
+ dev_err(dev, "Invalid number of src SG.\n"); |
1691 |
+- err = ERR_PTR(-EINVAL); |
1692 |
+- goto error_sg; |
1693 |
++ return ERR_PTR(-EINVAL); |
1694 |
+ } |
1695 |
+ src_nents = (src_nents == 1) ? 0 : src_nents; |
1696 |
+ dst_nents = dst ? src_nents : 0; |
1697 |
+@@ -1387,16 +1382,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, |
1698 |
+ src_nents = sg_nents_for_len(src, src_len); |
1699 |
+ if (src_nents < 0) { |
1700 |
+ dev_err(dev, "Invalid number of src SG.\n"); |
1701 |
+- err = ERR_PTR(-EINVAL); |
1702 |
+- goto error_sg; |
1703 |
++ return ERR_PTR(-EINVAL); |
1704 |
+ } |
1705 |
+ src_nents = (src_nents == 1) ? 0 : src_nents; |
1706 |
+ dst_len = assoclen + cryptlen + (encrypt ? authsize : 0); |
1707 |
+ dst_nents = sg_nents_for_len(dst, dst_len); |
1708 |
+ if (dst_nents < 0) { |
1709 |
+ dev_err(dev, "Invalid number of dst SG.\n"); |
1710 |
+- err = ERR_PTR(-EINVAL); |
1711 |
+- goto error_sg; |
1712 |
++ return ERR_PTR(-EINVAL); |
1713 |
+ } |
1714 |
+ dst_nents = (dst_nents == 1) ? 0 : dst_nents; |
1715 |
+ } |
1716 |
+@@ -1423,11 +1416,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, |
1717 |
+ /* if its a ahash, add space for a second desc next to the first one */ |
1718 |
+ if (is_sec1 && !dst) |
1719 |
+ alloc_len += sizeof(struct talitos_desc); |
1720 |
++ alloc_len += ivsize; |
1721 |
+ |
1722 |
+ edesc = kmalloc(alloc_len, GFP_DMA | flags); |
1723 |
+- if (!edesc) { |
1724 |
+- err = ERR_PTR(-ENOMEM); |
1725 |
+- goto error_sg; |
1726 |
++ if (!edesc) |
1727 |
++ return ERR_PTR(-ENOMEM); |
1728 |
++ if (ivsize) { |
1729 |
++ iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize); |
1730 |
++ iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); |
1731 |
+ } |
1732 |
+ memset(&edesc->desc, 0, sizeof(edesc->desc)); |
1733 |
+ |
1734 |
+@@ -1445,10 +1441,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, |
1735 |
+ DMA_BIDIRECTIONAL); |
1736 |
+ } |
1737 |
+ return edesc; |
1738 |
+-error_sg: |
1739 |
+- if (iv_dma) |
1740 |
+- dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); |
1741 |
+- return err; |
1742 |
+ } |
1743 |
+ |
1744 |
+ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, |
1745 |
+diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c |
1746 |
+index d5b7f315098c..087470ad6436 100644 |
1747 |
+--- a/drivers/gpu/drm/drm_atomic_uapi.c |
1748 |
++++ b/drivers/gpu/drm/drm_atomic_uapi.c |
1749 |
+@@ -1275,12 +1275,11 @@ int drm_mode_atomic_ioctl(struct drm_device *dev, |
1750 |
+ (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) |
1751 |
+ return -EINVAL; |
1752 |
+ |
1753 |
+- drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); |
1754 |
+- |
1755 |
+ state = drm_atomic_state_alloc(dev); |
1756 |
+ if (!state) |
1757 |
+ return -ENOMEM; |
1758 |
+ |
1759 |
++ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); |
1760 |
+ state->acquire_ctx = &ctx; |
1761 |
+ state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); |
1762 |
+ |
1763 |
+diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c |
1764 |
+index b10ed61526a5..6950e365135c 100644 |
1765 |
+--- a/drivers/gpu/drm/drm_fb_helper.c |
1766 |
++++ b/drivers/gpu/drm/drm_fb_helper.c |
1767 |
+@@ -1690,9 +1690,14 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, |
1768 |
+ struct drm_fb_helper *fb_helper = info->par; |
1769 |
+ struct drm_framebuffer *fb = fb_helper->fb; |
1770 |
+ |
1771 |
+- if (var->pixclock != 0 || in_dbg_master()) |
1772 |
++ if (in_dbg_master()) |
1773 |
+ return -EINVAL; |
1774 |
+ |
1775 |
++ if (var->pixclock != 0) { |
1776 |
++ DRM_DEBUG("fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n"); |
1777 |
++ var->pixclock = 0; |
1778 |
++ } |
1779 |
++ |
1780 |
+ /* |
1781 |
+ * Changes struct fb_var_screeninfo are currently not pushed back |
1782 |
+ * to KMS, hence fail if different settings are requested. |
1783 |
+diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c |
1784 |
+index be8b754eaf60..9bc3654c1c7f 100644 |
1785 |
+--- a/drivers/gpu/drm/drm_mode_object.c |
1786 |
++++ b/drivers/gpu/drm/drm_mode_object.c |
1787 |
+@@ -458,11 +458,11 @@ static int set_property_atomic(struct drm_mode_object *obj, |
1788 |
+ struct drm_modeset_acquire_ctx ctx; |
1789 |
+ int ret; |
1790 |
+ |
1791 |
+- drm_modeset_acquire_init(&ctx, 0); |
1792 |
+- |
1793 |
+ state = drm_atomic_state_alloc(dev); |
1794 |
+ if (!state) |
1795 |
+ return -ENOMEM; |
1796 |
++ |
1797 |
++ drm_modeset_acquire_init(&ctx, 0); |
1798 |
+ state->acquire_ctx = &ctx; |
1799 |
+ retry: |
1800 |
+ if (prop == state->dev->mode_config.dpms_property) { |
1801 |
+diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c |
1802 |
+index c1072143da1d..e70c450427dc 100644 |
1803 |
+--- a/drivers/gpu/drm/i915/gvt/kvmgt.c |
1804 |
++++ b/drivers/gpu/drm/i915/gvt/kvmgt.c |
1805 |
+@@ -996,7 +996,7 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) |
1806 |
+ { |
1807 |
+ unsigned int index; |
1808 |
+ u64 virtaddr; |
1809 |
+- unsigned long req_size, pgoff = 0; |
1810 |
++ unsigned long req_size, pgoff, req_start; |
1811 |
+ pgprot_t pg_prot; |
1812 |
+ struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); |
1813 |
+ |
1814 |
+@@ -1014,7 +1014,17 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) |
1815 |
+ pg_prot = vma->vm_page_prot; |
1816 |
+ virtaddr = vma->vm_start; |
1817 |
+ req_size = vma->vm_end - vma->vm_start; |
1818 |
+- pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT; |
1819 |
++ pgoff = vma->vm_pgoff & |
1820 |
++ ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); |
1821 |
++ req_start = pgoff << PAGE_SHIFT; |
1822 |
++ |
1823 |
++ if (!intel_vgpu_in_aperture(vgpu, req_start)) |
1824 |
++ return -EINVAL; |
1825 |
++ if (req_start + req_size > |
1826 |
++ vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu)) |
1827 |
++ return -EINVAL; |
1828 |
++ |
1829 |
++ pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff; |
1830 |
+ |
1831 |
+ return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot); |
1832 |
+ } |
1833 |
+diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c |
1834 |
+index 96ac1458a59c..37f93022a106 100644 |
1835 |
+--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c |
1836 |
++++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c |
1837 |
+@@ -113,8 +113,10 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev, |
1838 |
+ child_count++; |
1839 |
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id, |
1840 |
+ &panel, &bridge); |
1841 |
+- if (!ret) |
1842 |
++ if (!ret) { |
1843 |
++ of_node_put(endpoint); |
1844 |
+ break; |
1845 |
++ } |
1846 |
+ } |
1847 |
+ |
1848 |
+ of_node_put(port); |
1849 |
+diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c |
1850 |
+index 7041007396ae..e3bcea4b4891 100644 |
1851 |
+--- a/drivers/gpu/drm/vkms/vkms_plane.c |
1852 |
++++ b/drivers/gpu/drm/vkms/vkms_plane.c |
1853 |
+@@ -23,8 +23,11 @@ vkms_plane_duplicate_state(struct drm_plane *plane) |
1854 |
+ return NULL; |
1855 |
+ |
1856 |
+ crc_data = kzalloc(sizeof(*crc_data), GFP_KERNEL); |
1857 |
+- if (WARN_ON(!crc_data)) |
1858 |
+- DRM_INFO("Couldn't allocate crc_data"); |
1859 |
++ if (!crc_data) { |
1860 |
++ DRM_DEBUG_KMS("Couldn't allocate crc_data\n"); |
1861 |
++ kfree(vkms_state); |
1862 |
++ return NULL; |
1863 |
++ } |
1864 |
+ |
1865 |
+ vkms_state->crc_data = crc_data; |
1866 |
+ |
1867 |
+diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c |
1868 |
+index 573399e3ccc1..ff6468e7fe79 100644 |
1869 |
+--- a/drivers/infiniband/core/nldev.c |
1870 |
++++ b/drivers/infiniband/core/nldev.c |
1871 |
+@@ -580,10 +580,6 @@ static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb, |
1872 |
+ if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, |
1873 |
+ atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD)) |
1874 |
+ goto err; |
1875 |
+- if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) && |
1876 |
+- nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, |
1877 |
+- pd->unsafe_global_rkey)) |
1878 |
+- goto err; |
1879 |
+ |
1880 |
+ if (fill_res_name_pid(msg, res)) |
1881 |
+ goto err; |
1882 |
+diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h |
1883 |
+index 42b8685c997e..3c633ab58052 100644 |
1884 |
+--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h |
1885 |
++++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h |
1886 |
+@@ -427,7 +427,40 @@ static inline enum ib_qp_state pvrdma_qp_state_to_ib(enum pvrdma_qp_state state) |
1887 |
+ |
1888 |
+ static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op) |
1889 |
+ { |
1890 |
+- return (enum pvrdma_wr_opcode)op; |
1891 |
++ switch (op) { |
1892 |
++ case IB_WR_RDMA_WRITE: |
1893 |
++ return PVRDMA_WR_RDMA_WRITE; |
1894 |
++ case IB_WR_RDMA_WRITE_WITH_IMM: |
1895 |
++ return PVRDMA_WR_RDMA_WRITE_WITH_IMM; |
1896 |
++ case IB_WR_SEND: |
1897 |
++ return PVRDMA_WR_SEND; |
1898 |
++ case IB_WR_SEND_WITH_IMM: |
1899 |
++ return PVRDMA_WR_SEND_WITH_IMM; |
1900 |
++ case IB_WR_RDMA_READ: |
1901 |
++ return PVRDMA_WR_RDMA_READ; |
1902 |
++ case IB_WR_ATOMIC_CMP_AND_SWP: |
1903 |
++ return PVRDMA_WR_ATOMIC_CMP_AND_SWP; |
1904 |
++ case IB_WR_ATOMIC_FETCH_AND_ADD: |
1905 |
++ return PVRDMA_WR_ATOMIC_FETCH_AND_ADD; |
1906 |
++ case IB_WR_LSO: |
1907 |
++ return PVRDMA_WR_LSO; |
1908 |
++ case IB_WR_SEND_WITH_INV: |
1909 |
++ return PVRDMA_WR_SEND_WITH_INV; |
1910 |
++ case IB_WR_RDMA_READ_WITH_INV: |
1911 |
++ return PVRDMA_WR_RDMA_READ_WITH_INV; |
1912 |
++ case IB_WR_LOCAL_INV: |
1913 |
++ return PVRDMA_WR_LOCAL_INV; |
1914 |
++ case IB_WR_REG_MR: |
1915 |
++ return PVRDMA_WR_FAST_REG_MR; |
1916 |
++ case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: |
1917 |
++ return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP; |
1918 |
++ case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD: |
1919 |
++ return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD; |
1920 |
++ case IB_WR_REG_SIG_MR: |
1921 |
++ return PVRDMA_WR_REG_SIG_MR; |
1922 |
++ default: |
1923 |
++ return PVRDMA_WR_ERROR; |
1924 |
++ } |
1925 |
+ } |
1926 |
+ |
1927 |
+ static inline enum ib_wc_status pvrdma_wc_status_to_ib( |
1928 |
+diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c |
1929 |
+index cf22f57a9f0d..418d9ab4ea7f 100644 |
1930 |
+--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c |
1931 |
++++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c |
1932 |
+@@ -721,6 +721,12 @@ int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, |
1933 |
+ wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) |
1934 |
+ wqe_hdr->ex.imm_data = wr->ex.imm_data; |
1935 |
+ |
1936 |
++ if (unlikely(wqe_hdr->opcode == PVRDMA_WR_ERROR)) { |
1937 |
++ *bad_wr = wr; |
1938 |
++ ret = -EINVAL; |
1939 |
++ goto out; |
1940 |
++ } |
1941 |
++ |
1942 |
+ switch (qp->ibqp.qp_type) { |
1943 |
+ case IB_QPT_GSI: |
1944 |
+ case IB_QPT_UD: |
1945 |
+diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c |
1946 |
+index 99f736c81286..fa77e2ae4ec4 100644 |
1947 |
+--- a/drivers/media/common/videobuf2/videobuf2-core.c |
1948 |
++++ b/drivers/media/common/videobuf2/videobuf2-core.c |
1949 |
+@@ -2146,9 +2146,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) |
1950 |
+ return -EINVAL; |
1951 |
+ } |
1952 |
+ } |
1953 |
++ |
1954 |
++ mutex_lock(&q->mmap_lock); |
1955 |
++ |
1956 |
+ if (vb2_fileio_is_active(q)) { |
1957 |
+ dprintk(1, "mmap: file io in progress\n"); |
1958 |
+- return -EBUSY; |
1959 |
++ ret = -EBUSY; |
1960 |
++ goto unlock; |
1961 |
+ } |
1962 |
+ |
1963 |
+ /* |
1964 |
+@@ -2156,7 +2160,7 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) |
1965 |
+ */ |
1966 |
+ ret = __find_plane_by_offset(q, off, &buffer, &plane); |
1967 |
+ if (ret) |
1968 |
+- return ret; |
1969 |
++ goto unlock; |
1970 |
+ |
1971 |
+ vb = q->bufs[buffer]; |
1972 |
+ |
1973 |
+@@ -2169,11 +2173,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) |
1974 |
+ if (length < (vma->vm_end - vma->vm_start)) { |
1975 |
+ dprintk(1, |
1976 |
+ "MMAP invalid, as it would overflow buffer length\n"); |
1977 |
+- return -EINVAL; |
1978 |
++ ret = -EINVAL; |
1979 |
++ goto unlock; |
1980 |
+ } |
1981 |
+ |
1982 |
+- mutex_lock(&q->mmap_lock); |
1983 |
+ ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma); |
1984 |
++ |
1985 |
++unlock: |
1986 |
+ mutex_unlock(&q->mmap_lock); |
1987 |
+ if (ret) |
1988 |
+ return ret; |
1989 |
+diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c |
1990 |
+index f938a2c54314..2d1ae83e2fde 100644 |
1991 |
+--- a/drivers/media/platform/vim2m.c |
1992 |
++++ b/drivers/media/platform/vim2m.c |
1993 |
+@@ -809,7 +809,9 @@ static void vim2m_stop_streaming(struct vb2_queue *q) |
1994 |
+ struct vb2_v4l2_buffer *vbuf; |
1995 |
+ unsigned long flags; |
1996 |
+ |
1997 |
+- cancel_delayed_work_sync(&dev->work_run); |
1998 |
++ if (v4l2_m2m_get_curr_priv(dev->m2m_dev) == ctx) |
1999 |
++ cancel_delayed_work_sync(&dev->work_run); |
2000 |
++ |
2001 |
+ for (;;) { |
2002 |
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) |
2003 |
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); |
2004 |
+diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c |
2005 |
+index eebfff2126be..46e46e34a9e5 100644 |
2006 |
+--- a/drivers/media/platform/vivid/vivid-kthread-cap.c |
2007 |
++++ b/drivers/media/platform/vivid/vivid-kthread-cap.c |
2008 |
+@@ -873,8 +873,11 @@ int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming) |
2009 |
+ "%s-vid-cap", dev->v4l2_dev.name); |
2010 |
+ |
2011 |
+ if (IS_ERR(dev->kthread_vid_cap)) { |
2012 |
++ int err = PTR_ERR(dev->kthread_vid_cap); |
2013 |
++ |
2014 |
++ dev->kthread_vid_cap = NULL; |
2015 |
+ v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n"); |
2016 |
+- return PTR_ERR(dev->kthread_vid_cap); |
2017 |
++ return err; |
2018 |
+ } |
2019 |
+ *pstreaming = true; |
2020 |
+ vivid_grab_controls(dev, true); |
2021 |
+diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c |
2022 |
+index 5a14810eeb69..ce5bcda2348c 100644 |
2023 |
+--- a/drivers/media/platform/vivid/vivid-kthread-out.c |
2024 |
++++ b/drivers/media/platform/vivid/vivid-kthread-out.c |
2025 |
+@@ -244,8 +244,11 @@ int vivid_start_generating_vid_out(struct vivid_dev *dev, bool *pstreaming) |
2026 |
+ "%s-vid-out", dev->v4l2_dev.name); |
2027 |
+ |
2028 |
+ if (IS_ERR(dev->kthread_vid_out)) { |
2029 |
++ int err = PTR_ERR(dev->kthread_vid_out); |
2030 |
++ |
2031 |
++ dev->kthread_vid_out = NULL; |
2032 |
+ v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n"); |
2033 |
+- return PTR_ERR(dev->kthread_vid_out); |
2034 |
++ return err; |
2035 |
+ } |
2036 |
+ *pstreaming = true; |
2037 |
+ vivid_grab_controls(dev, true); |
2038 |
+diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c |
2039 |
+index 9645a91b8782..661f4015fba1 100644 |
2040 |
+--- a/drivers/media/platform/vivid/vivid-vid-common.c |
2041 |
++++ b/drivers/media/platform/vivid/vivid-vid-common.c |
2042 |
+@@ -21,7 +21,7 @@ const struct v4l2_dv_timings_cap vivid_dv_timings_cap = { |
2043 |
+ .type = V4L2_DV_BT_656_1120, |
2044 |
+ /* keep this initialization for compatibility with GCC < 4.4.6 */ |
2045 |
+ .reserved = { 0 }, |
2046 |
+- V4L2_INIT_BT_TIMINGS(0, MAX_WIDTH, 0, MAX_HEIGHT, 14000000, 775000000, |
2047 |
++ V4L2_INIT_BT_TIMINGS(16, MAX_WIDTH, 16, MAX_HEIGHT, 14000000, 775000000, |
2048 |
+ V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT | |
2049 |
+ V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF, |
2050 |
+ V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_INTERLACED) |
2051 |
+diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c |
2052 |
+index c63746968fa3..3cdd09e4dd6b 100644 |
2053 |
+--- a/drivers/media/v4l2-core/v4l2-ioctl.c |
2054 |
++++ b/drivers/media/v4l2-core/v4l2-ioctl.c |
2055 |
+@@ -286,6 +286,7 @@ static void v4l_print_format(const void *arg, bool write_only) |
2056 |
+ const struct v4l2_window *win; |
2057 |
+ const struct v4l2_sdr_format *sdr; |
2058 |
+ const struct v4l2_meta_format *meta; |
2059 |
++ u32 planes; |
2060 |
+ unsigned i; |
2061 |
+ |
2062 |
+ pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); |
2063 |
+@@ -316,7 +317,8 @@ static void v4l_print_format(const void *arg, bool write_only) |
2064 |
+ prt_names(mp->field, v4l2_field_names), |
2065 |
+ mp->colorspace, mp->num_planes, mp->flags, |
2066 |
+ mp->ycbcr_enc, mp->quantization, mp->xfer_func); |
2067 |
+- for (i = 0; i < mp->num_planes; i++) |
2068 |
++ planes = min_t(u32, mp->num_planes, VIDEO_MAX_PLANES); |
2069 |
++ for (i = 0; i < planes; i++) |
2070 |
+ printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i, |
2071 |
+ mp->plane_fmt[i].bytesperline, |
2072 |
+ mp->plane_fmt[i].sizeimage); |
2073 |
+diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c |
2074 |
+index b89379782741..9c7925ca13cf 100644 |
2075 |
+--- a/drivers/mfd/tps6586x.c |
2076 |
++++ b/drivers/mfd/tps6586x.c |
2077 |
+@@ -592,6 +592,29 @@ static int tps6586x_i2c_remove(struct i2c_client *client) |
2078 |
+ return 0; |
2079 |
+ } |
2080 |
+ |
2081 |
++static int __maybe_unused tps6586x_i2c_suspend(struct device *dev) |
2082 |
++{ |
2083 |
++ struct tps6586x *tps6586x = dev_get_drvdata(dev); |
2084 |
++ |
2085 |
++ if (tps6586x->client->irq) |
2086 |
++ disable_irq(tps6586x->client->irq); |
2087 |
++ |
2088 |
++ return 0; |
2089 |
++} |
2090 |
++ |
2091 |
++static int __maybe_unused tps6586x_i2c_resume(struct device *dev) |
2092 |
++{ |
2093 |
++ struct tps6586x *tps6586x = dev_get_drvdata(dev); |
2094 |
++ |
2095 |
++ if (tps6586x->client->irq) |
2096 |
++ enable_irq(tps6586x->client->irq); |
2097 |
++ |
2098 |
++ return 0; |
2099 |
++} |
2100 |
++ |
2101 |
++static SIMPLE_DEV_PM_OPS(tps6586x_pm_ops, tps6586x_i2c_suspend, |
2102 |
++ tps6586x_i2c_resume); |
2103 |
++ |
2104 |
+ static const struct i2c_device_id tps6586x_id_table[] = { |
2105 |
+ { "tps6586x", 0 }, |
2106 |
+ { }, |
2107 |
+@@ -602,6 +625,7 @@ static struct i2c_driver tps6586x_driver = { |
2108 |
+ .driver = { |
2109 |
+ .name = "tps6586x", |
2110 |
+ .of_match_table = of_match_ptr(tps6586x_of_match), |
2111 |
++ .pm = &tps6586x_pm_ops, |
2112 |
+ }, |
2113 |
+ .probe = tps6586x_i2c_probe, |
2114 |
+ .remove = tps6586x_i2c_remove, |
2115 |
+diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c |
2116 |
+index 3633202e18f4..02a9aba85368 100644 |
2117 |
+--- a/drivers/misc/mic/vop/vop_main.c |
2118 |
++++ b/drivers/misc/mic/vop/vop_main.c |
2119 |
+@@ -381,16 +381,21 @@ static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs, |
2120 |
+ struct _vop_vdev *vdev = to_vopvdev(dev); |
2121 |
+ struct vop_device *vpdev = vdev->vpdev; |
2122 |
+ struct mic_device_ctrl __iomem *dc = vdev->dc; |
2123 |
+- int i, err, retry; |
2124 |
++ int i, err, retry, queue_idx = 0; |
2125 |
+ |
2126 |
+ /* We must have this many virtqueues. */ |
2127 |
+ if (nvqs > ioread8(&vdev->desc->num_vq)) |
2128 |
+ return -ENOENT; |
2129 |
+ |
2130 |
+ for (i = 0; i < nvqs; ++i) { |
2131 |
++ if (!names[i]) { |
2132 |
++ vqs[i] = NULL; |
2133 |
++ continue; |
2134 |
++ } |
2135 |
++ |
2136 |
+ dev_dbg(_vop_dev(vdev), "%s: %d: %s\n", |
2137 |
+ __func__, i, names[i]); |
2138 |
+- vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i], |
2139 |
++ vqs[i] = vop_find_vq(dev, queue_idx++, callbacks[i], names[i], |
2140 |
+ ctx ? ctx[i] : false); |
2141 |
+ if (IS_ERR(vqs[i])) { |
2142 |
+ err = PTR_ERR(vqs[i]); |
2143 |
+diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c |
2144 |
+index 3cc8bfee6c18..8594659cb592 100644 |
2145 |
+--- a/drivers/mmc/host/sdhci-msm.c |
2146 |
++++ b/drivers/mmc/host/sdhci-msm.c |
2147 |
+@@ -258,6 +258,8 @@ struct sdhci_msm_host { |
2148 |
+ bool mci_removed; |
2149 |
+ const struct sdhci_msm_variant_ops *var_ops; |
2150 |
+ const struct sdhci_msm_offset *offset; |
2151 |
++ bool use_cdr; |
2152 |
++ u32 transfer_mode; |
2153 |
+ }; |
2154 |
+ |
2155 |
+ static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host) |
2156 |
+@@ -1025,6 +1027,26 @@ out: |
2157 |
+ return ret; |
2158 |
+ } |
2159 |
+ |
2160 |
++static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable) |
2161 |
++{ |
2162 |
++ const struct sdhci_msm_offset *msm_offset = sdhci_priv_msm_offset(host); |
2163 |
++ u32 config, oldconfig = readl_relaxed(host->ioaddr + |
2164 |
++ msm_offset->core_dll_config); |
2165 |
++ |
2166 |
++ config = oldconfig; |
2167 |
++ if (enable) { |
2168 |
++ config |= CORE_CDR_EN; |
2169 |
++ config &= ~CORE_CDR_EXT_EN; |
2170 |
++ } else { |
2171 |
++ config &= ~CORE_CDR_EN; |
2172 |
++ config |= CORE_CDR_EXT_EN; |
2173 |
++ } |
2174 |
++ |
2175 |
++ if (config != oldconfig) |
2176 |
++ writel_relaxed(config, host->ioaddr + |
2177 |
++ msm_offset->core_dll_config); |
2178 |
++} |
2179 |
++ |
2180 |
+ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) |
2181 |
+ { |
2182 |
+ struct sdhci_host *host = mmc_priv(mmc); |
2183 |
+@@ -1042,8 +1064,14 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) |
2184 |
+ if (host->clock <= CORE_FREQ_100MHZ || |
2185 |
+ !(ios.timing == MMC_TIMING_MMC_HS400 || |
2186 |
+ ios.timing == MMC_TIMING_MMC_HS200 || |
2187 |
+- ios.timing == MMC_TIMING_UHS_SDR104)) |
2188 |
++ ios.timing == MMC_TIMING_UHS_SDR104)) { |
2189 |
++ msm_host->use_cdr = false; |
2190 |
++ sdhci_msm_set_cdr(host, false); |
2191 |
+ return 0; |
2192 |
++ } |
2193 |
++ |
2194 |
++ /* Clock-Data-Recovery used to dynamically adjust RX sampling point */ |
2195 |
++ msm_host->use_cdr = true; |
2196 |
+ |
2197 |
+ /* |
2198 |
+ * For HS400 tuning in HS200 timing requires: |
2199 |
+@@ -1525,6 +1553,19 @@ static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg) |
2200 |
+ case SDHCI_POWER_CONTROL: |
2201 |
+ req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON; |
2202 |
+ break; |
2203 |
++ case SDHCI_TRANSFER_MODE: |
2204 |
++ msm_host->transfer_mode = val; |
2205 |
++ break; |
2206 |
++ case SDHCI_COMMAND: |
2207 |
++ if (!msm_host->use_cdr) |
2208 |
++ break; |
2209 |
++ if ((msm_host->transfer_mode & SDHCI_TRNS_READ) && |
2210 |
++ SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK_HS200 && |
2211 |
++ SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK) |
2212 |
++ sdhci_msm_set_cdr(host, true); |
2213 |
++ else |
2214 |
++ sdhci_msm_set_cdr(host, false); |
2215 |
++ break; |
2216 |
+ } |
2217 |
+ |
2218 |
+ if (req_type) { |
2219 |
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c |
2220 |
+index 333387f1f1fe..62659abf73cd 100644 |
2221 |
+--- a/drivers/net/bonding/bond_main.c |
2222 |
++++ b/drivers/net/bonding/bond_main.c |
2223 |
+@@ -1948,6 +1948,9 @@ static int __bond_release_one(struct net_device *bond_dev, |
2224 |
+ if (!bond_has_slaves(bond)) { |
2225 |
+ bond_set_carrier(bond); |
2226 |
+ eth_hw_addr_random(bond_dev); |
2227 |
++ bond->nest_level = SINGLE_DEPTH_NESTING; |
2228 |
++ } else { |
2229 |
++ bond->nest_level = dev_get_nest_level(bond_dev) + 1; |
2230 |
+ } |
2231 |
+ |
2232 |
+ unblock_netpoll_tx(); |
2233 |
+diff --git a/drivers/net/dsa/realtek-smi.c b/drivers/net/dsa/realtek-smi.c |
2234 |
+index b4b839a1d095..ad41ec63cc9f 100644 |
2235 |
+--- a/drivers/net/dsa/realtek-smi.c |
2236 |
++++ b/drivers/net/dsa/realtek-smi.c |
2237 |
+@@ -347,16 +347,17 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi) |
2238 |
+ struct device_node *mdio_np; |
2239 |
+ int ret; |
2240 |
+ |
2241 |
+- mdio_np = of_find_compatible_node(smi->dev->of_node, NULL, |
2242 |
+- "realtek,smi-mdio"); |
2243 |
++ mdio_np = of_get_compatible_child(smi->dev->of_node, "realtek,smi-mdio"); |
2244 |
+ if (!mdio_np) { |
2245 |
+ dev_err(smi->dev, "no MDIO bus node\n"); |
2246 |
+ return -ENODEV; |
2247 |
+ } |
2248 |
+ |
2249 |
+ smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev); |
2250 |
+- if (!smi->slave_mii_bus) |
2251 |
+- return -ENOMEM; |
2252 |
++ if (!smi->slave_mii_bus) { |
2253 |
++ ret = -ENOMEM; |
2254 |
++ goto err_put_node; |
2255 |
++ } |
2256 |
+ smi->slave_mii_bus->priv = smi; |
2257 |
+ smi->slave_mii_bus->name = "SMI slave MII"; |
2258 |
+ smi->slave_mii_bus->read = realtek_smi_mdio_read; |
2259 |
+@@ -371,10 +372,15 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi) |
2260 |
+ if (ret) { |
2261 |
+ dev_err(smi->dev, "unable to register MDIO bus %s\n", |
2262 |
+ smi->slave_mii_bus->id); |
2263 |
+- of_node_put(mdio_np); |
2264 |
++ goto err_put_node; |
2265 |
+ } |
2266 |
+ |
2267 |
+ return 0; |
2268 |
++ |
2269 |
++err_put_node: |
2270 |
++ of_node_put(mdio_np); |
2271 |
++ |
2272 |
++ return ret; |
2273 |
+ } |
2274 |
+ |
2275 |
+ static int realtek_smi_probe(struct platform_device *pdev) |
2276 |
+@@ -457,6 +463,8 @@ static int realtek_smi_remove(struct platform_device *pdev) |
2277 |
+ struct realtek_smi *smi = dev_get_drvdata(&pdev->dev); |
2278 |
+ |
2279 |
+ dsa_unregister_switch(smi->ds); |
2280 |
++ if (smi->slave_mii_bus) |
2281 |
++ of_node_put(smi->slave_mii_bus->dev.of_node); |
2282 |
+ gpiod_set_value(smi->reset, 1); |
2283 |
+ |
2284 |
+ return 0; |
2285 |
+diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c |
2286 |
+index 20c9377e99cb..1ce8b729929f 100644 |
2287 |
+--- a/drivers/net/ethernet/microchip/lan743x_main.c |
2288 |
++++ b/drivers/net/ethernet/microchip/lan743x_main.c |
2289 |
+@@ -962,13 +962,10 @@ static void lan743x_phy_link_status_change(struct net_device *netdev) |
2290 |
+ |
2291 |
+ memset(&ksettings, 0, sizeof(ksettings)); |
2292 |
+ phy_ethtool_get_link_ksettings(netdev, &ksettings); |
2293 |
+- local_advertisement = phy_read(phydev, MII_ADVERTISE); |
2294 |
+- if (local_advertisement < 0) |
2295 |
+- return; |
2296 |
+- |
2297 |
+- remote_advertisement = phy_read(phydev, MII_LPA); |
2298 |
+- if (remote_advertisement < 0) |
2299 |
+- return; |
2300 |
++ local_advertisement = |
2301 |
++ ethtool_adv_to_mii_adv_t(phydev->advertising); |
2302 |
++ remote_advertisement = |
2303 |
++ ethtool_adv_to_mii_adv_t(phydev->lp_advertising); |
2304 |
+ |
2305 |
+ lan743x_phy_update_flowcontrol(adapter, |
2306 |
+ ksettings.base.duplex, |
2307 |
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c |
2308 |
+index 209566f8097b..78ea9639b622 100644 |
2309 |
+--- a/drivers/net/ethernet/realtek/r8169.c |
2310 |
++++ b/drivers/net/ethernet/realtek/r8169.c |
2311 |
+@@ -714,6 +714,7 @@ module_param(use_dac, int, 0); |
2312 |
+ MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); |
2313 |
+ module_param_named(debug, debug.msg_enable, int, 0); |
2314 |
+ MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); |
2315 |
++MODULE_SOFTDEP("pre: realtek"); |
2316 |
+ MODULE_LICENSE("GPL"); |
2317 |
+ MODULE_FIRMWARE(FIRMWARE_8168D_1); |
2318 |
+ MODULE_FIRMWARE(FIRMWARE_8168D_2); |
2319 |
+@@ -1728,11 +1729,13 @@ static bool rtl8169_reset_counters(struct rtl8169_private *tp) |
2320 |
+ |
2321 |
+ static bool rtl8169_update_counters(struct rtl8169_private *tp) |
2322 |
+ { |
2323 |
++ u8 val = RTL_R8(tp, ChipCmd); |
2324 |
++ |
2325 |
+ /* |
2326 |
+ * Some chips are unable to dump tally counters when the receiver |
2327 |
+- * is disabled. |
2328 |
++ * is disabled. If 0xff chip may be in a PCI power-save state. |
2329 |
+ */ |
2330 |
+- if ((RTL_R8(tp, ChipCmd) & CmdRxEnb) == 0) |
2331 |
++ if (!(val & CmdRxEnb) || val == 0xff) |
2332 |
+ return true; |
2333 |
+ |
2334 |
+ return rtl8169_do_counters(tp, CounterDump); |
2335 |
+diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c |
2336 |
+index f7ebdcff53e4..4d66e4bb904a 100644 |
2337 |
+--- a/drivers/net/phy/bcm87xx.c |
2338 |
++++ b/drivers/net/phy/bcm87xx.c |
2339 |
+@@ -193,6 +193,7 @@ static struct phy_driver bcm87xx_driver[] = { |
2340 |
+ .phy_id = PHY_ID_BCM8706, |
2341 |
+ .phy_id_mask = 0xffffffff, |
2342 |
+ .name = "Broadcom BCM8706", |
2343 |
++ .features = PHY_10GBIT_FEC_FEATURES, |
2344 |
+ .flags = PHY_HAS_INTERRUPT, |
2345 |
+ .config_init = bcm87xx_config_init, |
2346 |
+ .config_aneg = bcm87xx_config_aneg, |
2347 |
+@@ -205,6 +206,7 @@ static struct phy_driver bcm87xx_driver[] = { |
2348 |
+ .phy_id = PHY_ID_BCM8727, |
2349 |
+ .phy_id_mask = 0xffffffff, |
2350 |
+ .name = "Broadcom BCM8727", |
2351 |
++ .features = PHY_10GBIT_FEC_FEATURES, |
2352 |
+ .flags = PHY_HAS_INTERRUPT, |
2353 |
+ .config_init = bcm87xx_config_init, |
2354 |
+ .config_aneg = bcm87xx_config_aneg, |
2355 |
+diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c |
2356 |
+index 8022cd317f62..1a4d04afb7f0 100644 |
2357 |
+--- a/drivers/net/phy/cortina.c |
2358 |
++++ b/drivers/net/phy/cortina.c |
2359 |
+@@ -88,6 +88,7 @@ static struct phy_driver cortina_driver[] = { |
2360 |
+ .phy_id = PHY_ID_CS4340, |
2361 |
+ .phy_id_mask = 0xffffffff, |
2362 |
+ .name = "Cortina CS4340", |
2363 |
++ .features = PHY_10GBIT_FEATURES, |
2364 |
+ .config_init = gen10g_config_init, |
2365 |
+ .config_aneg = gen10g_config_aneg, |
2366 |
+ .read_status = cortina_read_status, |
2367 |
+diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c |
2368 |
+index ddc2c5ea3787..6ace118502b9 100644 |
2369 |
+--- a/drivers/net/phy/meson-gxl.c |
2370 |
++++ b/drivers/net/phy/meson-gxl.c |
2371 |
+@@ -233,6 +233,7 @@ static struct phy_driver meson_gxl_phy[] = { |
2372 |
+ .name = "Meson GXL Internal PHY", |
2373 |
+ .features = PHY_BASIC_FEATURES, |
2374 |
+ .flags = PHY_IS_INTERNAL | PHY_HAS_INTERRUPT, |
2375 |
++ .soft_reset = genphy_soft_reset, |
2376 |
+ .config_init = meson_gxl_config_init, |
2377 |
+ .aneg_done = genphy_aneg_done, |
2378 |
+ .read_status = meson_gxl_read_status, |
2379 |
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c |
2380 |
+index 9265dea79412..51611c7a23d1 100644 |
2381 |
+--- a/drivers/net/phy/micrel.c |
2382 |
++++ b/drivers/net/phy/micrel.c |
2383 |
+@@ -1105,6 +1105,7 @@ static struct phy_driver ksphy_driver[] = { |
2384 |
+ .phy_id = PHY_ID_KSZ8873MLL, |
2385 |
+ .phy_id_mask = MICREL_PHY_ID_MASK, |
2386 |
+ .name = "Micrel KSZ8873MLL Switch", |
2387 |
++ .features = PHY_BASIC_FEATURES, |
2388 |
+ .config_init = kszphy_config_init, |
2389 |
+ .config_aneg = ksz8873mll_config_aneg, |
2390 |
+ .read_status = ksz8873mll_read_status, |
2391 |
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c |
2392 |
+index 26c41ede54a4..fd051ae787cb 100644 |
2393 |
+--- a/drivers/net/phy/phy_device.c |
2394 |
++++ b/drivers/net/phy/phy_device.c |
2395 |
+@@ -61,6 +61,9 @@ EXPORT_SYMBOL_GPL(phy_gbit_all_ports_features); |
2396 |
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init; |
2397 |
+ EXPORT_SYMBOL_GPL(phy_10gbit_features); |
2398 |
+ |
2399 |
++__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init; |
2400 |
++EXPORT_SYMBOL_GPL(phy_10gbit_fec_features); |
2401 |
++ |
2402 |
+ static const int phy_basic_ports_array[] = { |
2403 |
+ ETHTOOL_LINK_MODE_Autoneg_BIT, |
2404 |
+ ETHTOOL_LINK_MODE_TP_BIT, |
2405 |
+@@ -102,6 +105,11 @@ static const int phy_10gbit_features_array[] = { |
2406 |
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT, |
2407 |
+ }; |
2408 |
+ |
2409 |
++const int phy_10gbit_fec_features_array[1] = { |
2410 |
++ ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, |
2411 |
++}; |
2412 |
++EXPORT_SYMBOL_GPL(phy_10gbit_fec_features_array); |
2413 |
++ |
2414 |
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init; |
2415 |
+ EXPORT_SYMBOL_GPL(phy_10gbit_full_features); |
2416 |
+ |
2417 |
+@@ -184,6 +192,10 @@ static void features_init(void) |
2418 |
+ linkmode_set_bit_array(phy_10gbit_full_features_array, |
2419 |
+ ARRAY_SIZE(phy_10gbit_full_features_array), |
2420 |
+ phy_10gbit_full_features); |
2421 |
++ /* 10G FEC only */ |
2422 |
++ linkmode_set_bit_array(phy_10gbit_fec_features_array, |
2423 |
++ ARRAY_SIZE(phy_10gbit_fec_features_array), |
2424 |
++ phy_10gbit_fec_features); |
2425 |
+ } |
2426 |
+ |
2427 |
+ void phy_device_free(struct phy_device *phydev) |
2428 |
+diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c |
2429 |
+index 22f3bdd8206c..91247182bc52 100644 |
2430 |
+--- a/drivers/net/phy/teranetics.c |
2431 |
++++ b/drivers/net/phy/teranetics.c |
2432 |
+@@ -80,6 +80,7 @@ static struct phy_driver teranetics_driver[] = { |
2433 |
+ .phy_id = PHY_ID_TN2020, |
2434 |
+ .phy_id_mask = 0xffffffff, |
2435 |
+ .name = "Teranetics TN2020", |
2436 |
++ .features = PHY_10GBIT_FEATURES, |
2437 |
+ .soft_reset = gen10g_no_soft_reset, |
2438 |
+ .aneg_done = teranetics_aneg_done, |
2439 |
+ .config_init = gen10g_config_init, |
2440 |
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c |
2441 |
+index 005020042be9..6658658246d2 100644 |
2442 |
+--- a/drivers/net/tun.c |
2443 |
++++ b/drivers/net/tun.c |
2444 |
+@@ -852,10 +852,6 @@ static int tun_attach(struct tun_struct *tun, struct file *file, |
2445 |
+ err = 0; |
2446 |
+ } |
2447 |
+ |
2448 |
+- rcu_assign_pointer(tfile->tun, tun); |
2449 |
+- rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); |
2450 |
+- tun->numqueues++; |
2451 |
+- |
2452 |
+ if (tfile->detached) { |
2453 |
+ tun_enable_queue(tfile); |
2454 |
+ } else { |
2455 |
+@@ -872,6 +868,13 @@ static int tun_attach(struct tun_struct *tun, struct file *file, |
2456 |
+ * refcnt. |
2457 |
+ */ |
2458 |
+ |
2459 |
++ /* Publish tfile->tun and tun->tfiles only after we've fully |
2460 |
++ * initialized tfile; otherwise we risk using half-initialized |
2461 |
++ * object. |
2462 |
++ */ |
2463 |
++ rcu_assign_pointer(tfile->tun, tun); |
2464 |
++ rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); |
2465 |
++ tun->numqueues++; |
2466 |
+ out: |
2467 |
+ return err; |
2468 |
+ } |
2469 |
+diff --git a/drivers/of/property.c b/drivers/of/property.c |
2470 |
+index f46828e3b082..43720c2de138 100644 |
2471 |
+--- a/drivers/of/property.c |
2472 |
++++ b/drivers/of/property.c |
2473 |
+@@ -806,6 +806,7 @@ struct device_node *of_graph_get_remote_node(const struct device_node *node, |
2474 |
+ |
2475 |
+ if (!of_device_is_available(remote)) { |
2476 |
+ pr_debug("not available for remote node\n"); |
2477 |
++ of_node_put(remote); |
2478 |
+ return NULL; |
2479 |
+ } |
2480 |
+ |
2481 |
+diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c |
2482 |
+index de21f620b882..21b22a150930 100644 |
2483 |
+--- a/drivers/remoteproc/remoteproc_virtio.c |
2484 |
++++ b/drivers/remoteproc/remoteproc_virtio.c |
2485 |
+@@ -153,10 +153,15 @@ static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs, |
2486 |
+ const bool * ctx, |
2487 |
+ struct irq_affinity *desc) |
2488 |
+ { |
2489 |
+- int i, ret; |
2490 |
++ int i, ret, queue_idx = 0; |
2491 |
+ |
2492 |
+ for (i = 0; i < nvqs; ++i) { |
2493 |
+- vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i], |
2494 |
++ if (!names[i]) { |
2495 |
++ vqs[i] = NULL; |
2496 |
++ continue; |
2497 |
++ } |
2498 |
++ |
2499 |
++ vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i], |
2500 |
+ ctx ? ctx[i] : false); |
2501 |
+ if (IS_ERR(vqs[i])) { |
2502 |
+ ret = PTR_ERR(vqs[i]); |
2503 |
+diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c |
2504 |
+index c9c57b4a0b71..4e1bdd03d2aa 100644 |
2505 |
+--- a/drivers/s390/virtio/virtio_ccw.c |
2506 |
++++ b/drivers/s390/virtio/virtio_ccw.c |
2507 |
+@@ -635,7 +635,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, |
2508 |
+ { |
2509 |
+ struct virtio_ccw_device *vcdev = to_vc_device(vdev); |
2510 |
+ unsigned long *indicatorp = NULL; |
2511 |
+- int ret, i; |
2512 |
++ int ret, i, queue_idx = 0; |
2513 |
+ struct ccw1 *ccw; |
2514 |
+ |
2515 |
+ ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); |
2516 |
+@@ -643,8 +643,14 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, |
2517 |
+ return -ENOMEM; |
2518 |
+ |
2519 |
+ for (i = 0; i < nvqs; ++i) { |
2520 |
+- vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i], |
2521 |
+- ctx ? ctx[i] : false, ccw); |
2522 |
++ if (!names[i]) { |
2523 |
++ vqs[i] = NULL; |
2524 |
++ continue; |
2525 |
++ } |
2526 |
++ |
2527 |
++ vqs[i] = virtio_ccw_setup_vq(vdev, queue_idx++, callbacks[i], |
2528 |
++ names[i], ctx ? ctx[i] : false, |
2529 |
++ ccw); |
2530 |
+ if (IS_ERR(vqs[i])) { |
2531 |
+ ret = PTR_ERR(vqs[i]); |
2532 |
+ vqs[i] = NULL; |
2533 |
+diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c |
2534 |
+index a2b4179bfdf7..7639df91b110 100644 |
2535 |
+--- a/drivers/scsi/scsi_pm.c |
2536 |
++++ b/drivers/scsi/scsi_pm.c |
2537 |
+@@ -80,8 +80,22 @@ static int scsi_dev_type_resume(struct device *dev, |
2538 |
+ |
2539 |
+ if (err == 0) { |
2540 |
+ pm_runtime_disable(dev); |
2541 |
+- pm_runtime_set_active(dev); |
2542 |
++ err = pm_runtime_set_active(dev); |
2543 |
+ pm_runtime_enable(dev); |
2544 |
++ |
2545 |
++ /* |
2546 |
++ * Forcibly set runtime PM status of request queue to "active" |
2547 |
++ * to make sure we can again get requests from the queue |
2548 |
++ * (see also blk_pm_peek_request()). |
2549 |
++ * |
2550 |
++ * The resume hook will correct runtime PM status of the disk. |
2551 |
++ */ |
2552 |
++ if (!err && scsi_is_sdev_device(dev)) { |
2553 |
++ struct scsi_device *sdev = to_scsi_device(dev); |
2554 |
++ |
2555 |
++ if (sdev->request_queue->dev) |
2556 |
++ blk_set_runtime_active(sdev->request_queue); |
2557 |
++ } |
2558 |
+ } |
2559 |
+ |
2560 |
+ return err; |
2561 |
+@@ -140,16 +154,6 @@ static int scsi_bus_resume_common(struct device *dev, |
2562 |
+ else |
2563 |
+ fn = NULL; |
2564 |
+ |
2565 |
+- /* |
2566 |
+- * Forcibly set runtime PM status of request queue to "active" to |
2567 |
+- * make sure we can again get requests from the queue (see also |
2568 |
+- * blk_pm_peek_request()). |
2569 |
+- * |
2570 |
+- * The resume hook will correct runtime PM status of the disk. |
2571 |
+- */ |
2572 |
+- if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev)) |
2573 |
+- blk_set_runtime_active(to_scsi_device(dev)->request_queue); |
2574 |
+- |
2575 |
+ if (fn) { |
2576 |
+ async_schedule_domain(fn, dev, &scsi_sd_pm_domain); |
2577 |
+ |
2578 |
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c |
2579 |
+index bd0a5c694a97..ba4b8b3ce8cf 100644 |
2580 |
+--- a/drivers/scsi/sd.c |
2581 |
++++ b/drivers/scsi/sd.c |
2582 |
+@@ -206,6 +206,12 @@ cache_type_store(struct device *dev, struct device_attribute *attr, |
2583 |
+ sp = buffer_data[0] & 0x80 ? 1 : 0; |
2584 |
+ buffer_data[0] &= ~0x80; |
2585 |
+ |
2586 |
++ /* |
2587 |
++ * Ensure WP, DPOFUA, and RESERVED fields are cleared in |
2588 |
++ * received mode parameter buffer before doing MODE SELECT. |
2589 |
++ */ |
2590 |
++ data.device_specific = 0; |
2591 |
++ |
2592 |
+ if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT, |
2593 |
+ SD_MAX_RETRIES, &data, &sshdr)) { |
2594 |
+ if (scsi_sense_valid(&sshdr)) |
2595 |
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c |
2596 |
+index 687250ec8032..23c6fd238422 100644 |
2597 |
+--- a/drivers/tty/tty_io.c |
2598 |
++++ b/drivers/tty/tty_io.c |
2599 |
+@@ -1256,7 +1256,8 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct * |
2600 |
+ static int tty_reopen(struct tty_struct *tty) |
2601 |
+ { |
2602 |
+ struct tty_driver *driver = tty->driver; |
2603 |
+- int retval; |
2604 |
++ struct tty_ldisc *ld; |
2605 |
++ int retval = 0; |
2606 |
+ |
2607 |
+ if (driver->type == TTY_DRIVER_TYPE_PTY && |
2608 |
+ driver->subtype == PTY_TYPE_MASTER) |
2609 |
+@@ -1268,14 +1269,21 @@ static int tty_reopen(struct tty_struct *tty) |
2610 |
+ if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN)) |
2611 |
+ return -EBUSY; |
2612 |
+ |
2613 |
+- tty->count++; |
2614 |
++ ld = tty_ldisc_ref_wait(tty); |
2615 |
++ if (ld) { |
2616 |
++ tty_ldisc_deref(ld); |
2617 |
++ } else { |
2618 |
++ retval = tty_ldisc_lock(tty, 5 * HZ); |
2619 |
++ if (retval) |
2620 |
++ return retval; |
2621 |
+ |
2622 |
+- if (tty->ldisc) |
2623 |
+- return 0; |
2624 |
++ if (!tty->ldisc) |
2625 |
++ retval = tty_ldisc_reinit(tty, tty->termios.c_line); |
2626 |
++ tty_ldisc_unlock(tty); |
2627 |
++ } |
2628 |
+ |
2629 |
+- retval = tty_ldisc_reinit(tty, tty->termios.c_line); |
2630 |
+- if (retval) |
2631 |
+- tty->count--; |
2632 |
++ if (retval == 0) |
2633 |
++ tty->count++; |
2634 |
+ |
2635 |
+ return retval; |
2636 |
+ } |
2637 |
+diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c |
2638 |
+index 0c98d88f795a..b989ca26fc78 100644 |
2639 |
+--- a/drivers/tty/tty_ldsem.c |
2640 |
++++ b/drivers/tty/tty_ldsem.c |
2641 |
+@@ -293,6 +293,16 @@ down_write_failed(struct ld_semaphore *sem, long count, long timeout) |
2642 |
+ if (!locked) |
2643 |
+ atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count); |
2644 |
+ list_del(&waiter.list); |
2645 |
++ |
2646 |
++ /* |
2647 |
++ * In case of timeout, wake up every reader who gave the right of way |
2648 |
++ * to writer. Prevent separation readers into two groups: |
2649 |
++ * one that helds semaphore and another that sleeps. |
2650 |
++ * (in case of no contention with a writer) |
2651 |
++ */ |
2652 |
++ if (!locked && list_empty(&sem->write_wait)) |
2653 |
++ __ldsem_wake_readers(sem); |
2654 |
++ |
2655 |
+ raw_spin_unlock_irq(&sem->wait_lock); |
2656 |
+ |
2657 |
+ __set_current_state(TASK_RUNNING); |
2658 |
+diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c |
2659 |
+index 31f769d67195..057d3cdef92e 100644 |
2660 |
+--- a/drivers/video/fbdev/offb.c |
2661 |
++++ b/drivers/video/fbdev/offb.c |
2662 |
+@@ -318,28 +318,28 @@ static void __iomem *offb_map_reg(struct device_node *np, int index, |
2663 |
+ } |
2664 |
+ |
2665 |
+ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp, |
2666 |
+- const char *name, unsigned long address) |
2667 |
++ unsigned long address) |
2668 |
+ { |
2669 |
+ struct offb_par *par = (struct offb_par *) info->par; |
2670 |
+ |
2671 |
+- if (dp && !strncmp(name, "ATY,Rage128", 11)) { |
2672 |
++ if (of_node_name_prefix(dp, "ATY,Rage128")) { |
2673 |
+ par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); |
2674 |
+ if (par->cmap_adr) |
2675 |
+ par->cmap_type = cmap_r128; |
2676 |
+- } else if (dp && (!strncmp(name, "ATY,RageM3pA", 12) |
2677 |
+- || !strncmp(name, "ATY,RageM3p12A", 14))) { |
2678 |
++ } else if (of_node_name_prefix(dp, "ATY,RageM3pA") || |
2679 |
++ of_node_name_prefix(dp, "ATY,RageM3p12A")) { |
2680 |
+ par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); |
2681 |
+ if (par->cmap_adr) |
2682 |
+ par->cmap_type = cmap_M3A; |
2683 |
+- } else if (dp && !strncmp(name, "ATY,RageM3pB", 12)) { |
2684 |
++ } else if (of_node_name_prefix(dp, "ATY,RageM3pB")) { |
2685 |
+ par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); |
2686 |
+ if (par->cmap_adr) |
2687 |
+ par->cmap_type = cmap_M3B; |
2688 |
+- } else if (dp && !strncmp(name, "ATY,Rage6", 9)) { |
2689 |
++ } else if (of_node_name_prefix(dp, "ATY,Rage6")) { |
2690 |
+ par->cmap_adr = offb_map_reg(dp, 1, 0, 0x1fff); |
2691 |
+ if (par->cmap_adr) |
2692 |
+ par->cmap_type = cmap_radeon; |
2693 |
+- } else if (!strncmp(name, "ATY,", 4)) { |
2694 |
++ } else if (of_node_name_prefix(dp, "ATY,")) { |
2695 |
+ unsigned long base = address & 0xff000000UL; |
2696 |
+ par->cmap_adr = |
2697 |
+ ioremap(base + 0x7ff000, 0x1000) + 0xcc0; |
2698 |
+@@ -350,7 +350,7 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp |
2699 |
+ par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000); |
2700 |
+ if (par->cmap_adr) |
2701 |
+ par->cmap_type = cmap_gxt2000; |
2702 |
+- } else if (dp && !strncmp(name, "vga,Display-", 12)) { |
2703 |
++ } else if (of_node_name_prefix(dp, "vga,Display-")) { |
2704 |
+ /* Look for AVIVO initialized by SLOF */ |
2705 |
+ struct device_node *pciparent = of_get_parent(dp); |
2706 |
+ const u32 *vid, *did; |
2707 |
+@@ -438,7 +438,7 @@ static void __init offb_init_fb(const char *name, |
2708 |
+ |
2709 |
+ par->cmap_type = cmap_unknown; |
2710 |
+ if (depth == 8) |
2711 |
+- offb_init_palette_hacks(info, dp, name, address); |
2712 |
++ offb_init_palette_hacks(info, dp, address); |
2713 |
+ else |
2714 |
+ fix->visual = FB_VISUAL_TRUECOLOR; |
2715 |
+ |
2716 |
+diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c |
2717 |
+index a3edb20ea4c3..a846d32ee653 100644 |
2718 |
+--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c |
2719 |
++++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c |
2720 |
+@@ -609,6 +609,8 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg) |
2721 |
+ |
2722 |
+ int r = 0; |
2723 |
+ |
2724 |
++ memset(&p, 0, sizeof(p)); |
2725 |
++ |
2726 |
+ switch (cmd) { |
2727 |
+ case OMAPFB_SYNC_GFX: |
2728 |
+ DBG("ioctl SYNC_GFX\n"); |
2729 |
+diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c |
2730 |
+index 728ecd1eea30..fb12fe205f86 100644 |
2731 |
+--- a/drivers/virtio/virtio_balloon.c |
2732 |
++++ b/drivers/virtio/virtio_balloon.c |
2733 |
+@@ -61,6 +61,10 @@ enum virtio_balloon_vq { |
2734 |
+ VIRTIO_BALLOON_VQ_MAX |
2735 |
+ }; |
2736 |
+ |
2737 |
++enum virtio_balloon_config_read { |
2738 |
++ VIRTIO_BALLOON_CONFIG_READ_CMD_ID = 0, |
2739 |
++}; |
2740 |
++ |
2741 |
+ struct virtio_balloon { |
2742 |
+ struct virtio_device *vdev; |
2743 |
+ struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq; |
2744 |
+@@ -77,14 +81,20 @@ struct virtio_balloon { |
2745 |
+ /* Prevent updating balloon when it is being canceled. */ |
2746 |
+ spinlock_t stop_update_lock; |
2747 |
+ bool stop_update; |
2748 |
++ /* Bitmap to indicate if reading the related config fields are needed */ |
2749 |
++ unsigned long config_read_bitmap; |
2750 |
+ |
2751 |
+ /* The list of allocated free pages, waiting to be given back to mm */ |
2752 |
+ struct list_head free_page_list; |
2753 |
+ spinlock_t free_page_list_lock; |
2754 |
+ /* The number of free page blocks on the above list */ |
2755 |
+ unsigned long num_free_page_blocks; |
2756 |
+- /* The cmd id received from host */ |
2757 |
+- u32 cmd_id_received; |
2758 |
++ /* |
2759 |
++ * The cmd id received from host. |
2760 |
++ * Read it via virtio_balloon_cmd_id_received to get the latest value |
2761 |
++ * sent from host. |
2762 |
++ */ |
2763 |
++ u32 cmd_id_received_cache; |
2764 |
+ /* The cmd id that is actively in use */ |
2765 |
+ __virtio32 cmd_id_active; |
2766 |
+ /* Buffer to store the stop sign */ |
2767 |
+@@ -390,37 +400,31 @@ static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb, |
2768 |
+ return num_returned; |
2769 |
+ } |
2770 |
+ |
2771 |
++static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb) |
2772 |
++{ |
2773 |
++ if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) |
2774 |
++ return; |
2775 |
++ |
2776 |
++ /* No need to queue the work if the bit was already set. */ |
2777 |
++ if (test_and_set_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID, |
2778 |
++ &vb->config_read_bitmap)) |
2779 |
++ return; |
2780 |
++ |
2781 |
++ queue_work(vb->balloon_wq, &vb->report_free_page_work); |
2782 |
++} |
2783 |
++ |
2784 |
+ static void virtballoon_changed(struct virtio_device *vdev) |
2785 |
+ { |
2786 |
+ struct virtio_balloon *vb = vdev->priv; |
2787 |
+ unsigned long flags; |
2788 |
+- s64 diff = towards_target(vb); |
2789 |
+- |
2790 |
+- if (diff) { |
2791 |
+- spin_lock_irqsave(&vb->stop_update_lock, flags); |
2792 |
+- if (!vb->stop_update) |
2793 |
+- queue_work(system_freezable_wq, |
2794 |
+- &vb->update_balloon_size_work); |
2795 |
+- spin_unlock_irqrestore(&vb->stop_update_lock, flags); |
2796 |
+- } |
2797 |
+ |
2798 |
+- if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { |
2799 |
+- virtio_cread(vdev, struct virtio_balloon_config, |
2800 |
+- free_page_report_cmd_id, &vb->cmd_id_received); |
2801 |
+- if (vb->cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) { |
2802 |
+- /* Pass ULONG_MAX to give back all the free pages */ |
2803 |
+- return_free_pages_to_mm(vb, ULONG_MAX); |
2804 |
+- } else if (vb->cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP && |
2805 |
+- vb->cmd_id_received != |
2806 |
+- virtio32_to_cpu(vdev, vb->cmd_id_active)) { |
2807 |
+- spin_lock_irqsave(&vb->stop_update_lock, flags); |
2808 |
+- if (!vb->stop_update) { |
2809 |
+- queue_work(vb->balloon_wq, |
2810 |
+- &vb->report_free_page_work); |
2811 |
+- } |
2812 |
+- spin_unlock_irqrestore(&vb->stop_update_lock, flags); |
2813 |
+- } |
2814 |
++ spin_lock_irqsave(&vb->stop_update_lock, flags); |
2815 |
++ if (!vb->stop_update) { |
2816 |
++ queue_work(system_freezable_wq, |
2817 |
++ &vb->update_balloon_size_work); |
2818 |
++ virtio_balloon_queue_free_page_work(vb); |
2819 |
+ } |
2820 |
++ spin_unlock_irqrestore(&vb->stop_update_lock, flags); |
2821 |
+ } |
2822 |
+ |
2823 |
+ static void update_balloon_size(struct virtio_balloon *vb) |
2824 |
+@@ -527,6 +531,17 @@ static int init_vqs(struct virtio_balloon *vb) |
2825 |
+ return 0; |
2826 |
+ } |
2827 |
+ |
2828 |
++static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb) |
2829 |
++{ |
2830 |
++ if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID, |
2831 |
++ &vb->config_read_bitmap)) |
2832 |
++ virtio_cread(vb->vdev, struct virtio_balloon_config, |
2833 |
++ free_page_report_cmd_id, |
2834 |
++ &vb->cmd_id_received_cache); |
2835 |
++ |
2836 |
++ return vb->cmd_id_received_cache; |
2837 |
++} |
2838 |
++ |
2839 |
+ static int send_cmd_id_start(struct virtio_balloon *vb) |
2840 |
+ { |
2841 |
+ struct scatterlist sg; |
2842 |
+@@ -537,7 +552,8 @@ static int send_cmd_id_start(struct virtio_balloon *vb) |
2843 |
+ while (virtqueue_get_buf(vq, &unused)) |
2844 |
+ ; |
2845 |
+ |
2846 |
+- vb->cmd_id_active = cpu_to_virtio32(vb->vdev, vb->cmd_id_received); |
2847 |
++ vb->cmd_id_active = virtio32_to_cpu(vb->vdev, |
2848 |
++ virtio_balloon_cmd_id_received(vb)); |
2849 |
+ sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active)); |
2850 |
+ err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL); |
2851 |
+ if (!err) |
2852 |
+@@ -620,7 +636,8 @@ static int send_free_pages(struct virtio_balloon *vb) |
2853 |
+ * stop the reporting. |
2854 |
+ */ |
2855 |
+ cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active); |
2856 |
+- if (cmd_id_active != vb->cmd_id_received) |
2857 |
++ if (unlikely(cmd_id_active != |
2858 |
++ virtio_balloon_cmd_id_received(vb))) |
2859 |
+ break; |
2860 |
+ |
2861 |
+ /* |
2862 |
+@@ -637,11 +654,9 @@ static int send_free_pages(struct virtio_balloon *vb) |
2863 |
+ return 0; |
2864 |
+ } |
2865 |
+ |
2866 |
+-static void report_free_page_func(struct work_struct *work) |
2867 |
++static void virtio_balloon_report_free_page(struct virtio_balloon *vb) |
2868 |
+ { |
2869 |
+ int err; |
2870 |
+- struct virtio_balloon *vb = container_of(work, struct virtio_balloon, |
2871 |
+- report_free_page_work); |
2872 |
+ struct device *dev = &vb->vdev->dev; |
2873 |
+ |
2874 |
+ /* Start by sending the received cmd id to host with an outbuf. */ |
2875 |
+@@ -659,6 +674,23 @@ static void report_free_page_func(struct work_struct *work) |
2876 |
+ dev_err(dev, "Failed to send a stop id, err = %d\n", err); |
2877 |
+ } |
2878 |
+ |
2879 |
++static void report_free_page_func(struct work_struct *work) |
2880 |
++{ |
2881 |
++ struct virtio_balloon *vb = container_of(work, struct virtio_balloon, |
2882 |
++ report_free_page_work); |
2883 |
++ u32 cmd_id_received; |
2884 |
++ |
2885 |
++ cmd_id_received = virtio_balloon_cmd_id_received(vb); |
2886 |
++ if (cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) { |
2887 |
++ /* Pass ULONG_MAX to give back all the free pages */ |
2888 |
++ return_free_pages_to_mm(vb, ULONG_MAX); |
2889 |
++ } else if (cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP && |
2890 |
++ cmd_id_received != |
2891 |
++ virtio32_to_cpu(vb->vdev, vb->cmd_id_active)) { |
2892 |
++ virtio_balloon_report_free_page(vb); |
2893 |
++ } |
2894 |
++} |
2895 |
++ |
2896 |
+ #ifdef CONFIG_BALLOON_COMPACTION |
2897 |
+ /* |
2898 |
+ * virtballoon_migratepage - perform the balloon page migration on behalf of |
2899 |
+@@ -885,7 +917,7 @@ static int virtballoon_probe(struct virtio_device *vdev) |
2900 |
+ goto out_del_vqs; |
2901 |
+ } |
2902 |
+ INIT_WORK(&vb->report_free_page_work, report_free_page_func); |
2903 |
+- vb->cmd_id_received = VIRTIO_BALLOON_CMD_ID_STOP; |
2904 |
++ vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP; |
2905 |
+ vb->cmd_id_active = cpu_to_virtio32(vb->vdev, |
2906 |
+ VIRTIO_BALLOON_CMD_ID_STOP); |
2907 |
+ vb->cmd_id_stop = cpu_to_virtio32(vb->vdev, |
2908 |
+diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c |
2909 |
+index 4cd9ea5c75be..d9dd0f789279 100644 |
2910 |
+--- a/drivers/virtio/virtio_mmio.c |
2911 |
++++ b/drivers/virtio/virtio_mmio.c |
2912 |
+@@ -468,7 +468,7 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, |
2913 |
+ { |
2914 |
+ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); |
2915 |
+ unsigned int irq = platform_get_irq(vm_dev->pdev, 0); |
2916 |
+- int i, err; |
2917 |
++ int i, err, queue_idx = 0; |
2918 |
+ |
2919 |
+ err = request_irq(irq, vm_interrupt, IRQF_SHARED, |
2920 |
+ dev_name(&vdev->dev), vm_dev); |
2921 |
+@@ -476,7 +476,12 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, |
2922 |
+ return err; |
2923 |
+ |
2924 |
+ for (i = 0; i < nvqs; ++i) { |
2925 |
+- vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i], |
2926 |
++ if (!names[i]) { |
2927 |
++ vqs[i] = NULL; |
2928 |
++ continue; |
2929 |
++ } |
2930 |
++ |
2931 |
++ vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i], |
2932 |
+ ctx ? ctx[i] : false); |
2933 |
+ if (IS_ERR(vqs[i])) { |
2934 |
+ vm_del_vqs(vdev); |
2935 |
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c |
2936 |
+index 93194f3e7540..117e76b2f939 100644 |
2937 |
+--- a/drivers/xen/events/events_base.c |
2938 |
++++ b/drivers/xen/events/events_base.c |
2939 |
+@@ -1650,7 +1650,7 @@ void xen_callback_vector(void) |
2940 |
+ xen_have_vector_callback = 0; |
2941 |
+ return; |
2942 |
+ } |
2943 |
+- pr_info("Xen HVM callback vector for event delivery is enabled\n"); |
2944 |
++ pr_info_once("Xen HVM callback vector for event delivery is enabled\n"); |
2945 |
+ alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, |
2946 |
+ xen_hvm_callback_vector); |
2947 |
+ } |
2948 |
+diff --git a/fs/block_dev.c b/fs/block_dev.c |
2949 |
+index a80b4f0ee7c4..5a35ed922c95 100644 |
2950 |
+--- a/fs/block_dev.c |
2951 |
++++ b/fs/block_dev.c |
2952 |
+@@ -104,6 +104,20 @@ void invalidate_bdev(struct block_device *bdev) |
2953 |
+ } |
2954 |
+ EXPORT_SYMBOL(invalidate_bdev); |
2955 |
+ |
2956 |
++static void set_init_blocksize(struct block_device *bdev) |
2957 |
++{ |
2958 |
++ unsigned bsize = bdev_logical_block_size(bdev); |
2959 |
++ loff_t size = i_size_read(bdev->bd_inode); |
2960 |
++ |
2961 |
++ while (bsize < PAGE_SIZE) { |
2962 |
++ if (size & bsize) |
2963 |
++ break; |
2964 |
++ bsize <<= 1; |
2965 |
++ } |
2966 |
++ bdev->bd_block_size = bsize; |
2967 |
++ bdev->bd_inode->i_blkbits = blksize_bits(bsize); |
2968 |
++} |
2969 |
++ |
2970 |
+ int set_blocksize(struct block_device *bdev, int size) |
2971 |
+ { |
2972 |
+ /* Size must be a power of two, and between 512 and PAGE_SIZE */ |
2973 |
+@@ -1408,18 +1422,9 @@ EXPORT_SYMBOL(check_disk_change); |
2974 |
+ |
2975 |
+ void bd_set_size(struct block_device *bdev, loff_t size) |
2976 |
+ { |
2977 |
+- unsigned bsize = bdev_logical_block_size(bdev); |
2978 |
+- |
2979 |
+ inode_lock(bdev->bd_inode); |
2980 |
+ i_size_write(bdev->bd_inode, size); |
2981 |
+ inode_unlock(bdev->bd_inode); |
2982 |
+- while (bsize < PAGE_SIZE) { |
2983 |
+- if (size & bsize) |
2984 |
+- break; |
2985 |
+- bsize <<= 1; |
2986 |
+- } |
2987 |
+- bdev->bd_block_size = bsize; |
2988 |
+- bdev->bd_inode->i_blkbits = blksize_bits(bsize); |
2989 |
+ } |
2990 |
+ EXPORT_SYMBOL(bd_set_size); |
2991 |
+ |
2992 |
+@@ -1496,8 +1501,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) |
2993 |
+ } |
2994 |
+ } |
2995 |
+ |
2996 |
+- if (!ret) |
2997 |
++ if (!ret) { |
2998 |
+ bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); |
2999 |
++ set_init_blocksize(bdev); |
3000 |
++ } |
3001 |
+ |
3002 |
+ /* |
3003 |
+ * If the device is invalidated, rescan partition |
3004 |
+@@ -1532,6 +1539,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) |
3005 |
+ goto out_clear; |
3006 |
+ } |
3007 |
+ bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9); |
3008 |
++ set_init_blocksize(bdev); |
3009 |
+ } |
3010 |
+ |
3011 |
+ if (bdev->bd_bdi == &noop_backing_dev_info) |
3012 |
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c |
3013 |
+index 6d776717d8b3..f74c9e6b84ce 100644 |
3014 |
+--- a/fs/btrfs/disk-io.c |
3015 |
++++ b/fs/btrfs/disk-io.c |
3016 |
+@@ -4155,6 +4155,14 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info) |
3017 |
+ spin_lock(&fs_info->ordered_root_lock); |
3018 |
+ } |
3019 |
+ spin_unlock(&fs_info->ordered_root_lock); |
3020 |
++ |
3021 |
++ /* |
3022 |
++ * We need this here because if we've been flipped read-only we won't |
3023 |
++ * get sync() from the umount, so we need to make sure any ordered |
3024 |
++ * extents that haven't had their dirty pages IO start writeout yet |
3025 |
++ * actually get run and error out properly. |
3026 |
++ */ |
3027 |
++ btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); |
3028 |
+ } |
3029 |
+ |
3030 |
+ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, |
3031 |
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c |
3032 |
+index 423281c19fad..02772f8823cf 100644 |
3033 |
+--- a/fs/btrfs/inode.c |
3034 |
++++ b/fs/btrfs/inode.c |
3035 |
+@@ -3147,9 +3147,6 @@ out: |
3036 |
+ /* once for the tree */ |
3037 |
+ btrfs_put_ordered_extent(ordered_extent); |
3038 |
+ |
3039 |
+- /* Try to release some metadata so we don't get an OOM but don't wait */ |
3040 |
+- btrfs_btree_balance_dirty_nodelay(fs_info); |
3041 |
+- |
3042 |
+ return ret; |
3043 |
+ } |
3044 |
+ |
3045 |
+diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c |
3046 |
+index e02a9039b5ea..67bdbd3da52e 100644 |
3047 |
+--- a/fs/pstore/ram.c |
3048 |
++++ b/fs/pstore/ram.c |
3049 |
+@@ -723,18 +723,15 @@ static int ramoops_probe(struct platform_device *pdev) |
3050 |
+ { |
3051 |
+ struct device *dev = &pdev->dev; |
3052 |
+ struct ramoops_platform_data *pdata = dev->platform_data; |
3053 |
++ struct ramoops_platform_data pdata_local; |
3054 |
+ struct ramoops_context *cxt = &oops_cxt; |
3055 |
+ size_t dump_mem_sz; |
3056 |
+ phys_addr_t paddr; |
3057 |
+ int err = -EINVAL; |
3058 |
+ |
3059 |
+ if (dev_of_node(dev) && !pdata) { |
3060 |
+- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); |
3061 |
+- if (!pdata) { |
3062 |
+- pr_err("cannot allocate platform data buffer\n"); |
3063 |
+- err = -ENOMEM; |
3064 |
+- goto fail_out; |
3065 |
+- } |
3066 |
++ pdata = &pdata_local; |
3067 |
++ memset(pdata, 0, sizeof(*pdata)); |
3068 |
+ |
3069 |
+ err = ramoops_parse_dt(pdev, pdata); |
3070 |
+ if (err < 0) |
3071 |
+diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h |
3072 |
+index 7cca5f859a90..f3c43519baa7 100644 |
3073 |
+--- a/include/linux/bcma/bcma_soc.h |
3074 |
++++ b/include/linux/bcma/bcma_soc.h |
3075 |
+@@ -6,6 +6,7 @@ |
3076 |
+ |
3077 |
+ struct bcma_soc { |
3078 |
+ struct bcma_bus bus; |
3079 |
++ struct device *dev; |
3080 |
+ }; |
3081 |
+ |
3082 |
+ int __init bcma_host_soc_register(struct bcma_soc *soc); |
3083 |
+diff --git a/include/linux/genhd.h b/include/linux/genhd.h |
3084 |
+index 70fc838e6773..0c5ee17b4d88 100644 |
3085 |
+--- a/include/linux/genhd.h |
3086 |
++++ b/include/linux/genhd.h |
3087 |
+@@ -129,7 +129,7 @@ struct hd_struct { |
3088 |
+ struct disk_stats dkstats; |
3089 |
+ #endif |
3090 |
+ struct percpu_ref ref; |
3091 |
+- struct rcu_head rcu_head; |
3092 |
++ struct rcu_work rcu_work; |
3093 |
+ }; |
3094 |
+ |
3095 |
+ #define GENHD_FL_REMOVABLE 1 |
3096 |
+diff --git a/include/linux/phy.h b/include/linux/phy.h |
3097 |
+index 3ea87f774a76..306630d13523 100644 |
3098 |
+--- a/include/linux/phy.h |
3099 |
++++ b/include/linux/phy.h |
3100 |
+@@ -48,6 +48,7 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init; |
3101 |
+ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init; |
3102 |
+ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init; |
3103 |
+ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init; |
3104 |
++extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init; |
3105 |
+ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init; |
3106 |
+ |
3107 |
+ #define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features) |
3108 |
+@@ -56,6 +57,7 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_ini |
3109 |
+ #define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features) |
3110 |
+ #define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features) |
3111 |
+ #define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features) |
3112 |
++#define PHY_10GBIT_FEC_FEATURES ((unsigned long *)&phy_10gbit_fec_features) |
3113 |
+ #define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features) |
3114 |
+ |
3115 |
+ /* |
3116 |
+diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h |
3117 |
+index 4b2b2baf8ab4..f32fc8289473 100644 |
3118 |
+--- a/include/net/netfilter/nf_conntrack_count.h |
3119 |
++++ b/include/net/netfilter/nf_conntrack_count.h |
3120 |
+@@ -5,17 +5,10 @@ |
3121 |
+ |
3122 |
+ struct nf_conncount_data; |
3123 |
+ |
3124 |
+-enum nf_conncount_list_add { |
3125 |
+- NF_CONNCOUNT_ADDED, /* list add was ok */ |
3126 |
+- NF_CONNCOUNT_ERR, /* -ENOMEM, must drop skb */ |
3127 |
+- NF_CONNCOUNT_SKIP, /* list is already reclaimed by gc */ |
3128 |
+-}; |
3129 |
+- |
3130 |
+ struct nf_conncount_list { |
3131 |
+ spinlock_t list_lock; |
3132 |
+ struct list_head head; /* connections with the same filtering key */ |
3133 |
+ unsigned int count; /* length of list */ |
3134 |
+- bool dead; |
3135 |
+ }; |
3136 |
+ |
3137 |
+ struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family, |
3138 |
+@@ -29,18 +22,12 @@ unsigned int nf_conncount_count(struct net *net, |
3139 |
+ const struct nf_conntrack_tuple *tuple, |
3140 |
+ const struct nf_conntrack_zone *zone); |
3141 |
+ |
3142 |
+-void nf_conncount_lookup(struct net *net, struct nf_conncount_list *list, |
3143 |
+- const struct nf_conntrack_tuple *tuple, |
3144 |
+- const struct nf_conntrack_zone *zone, |
3145 |
+- bool *addit); |
3146 |
++int nf_conncount_add(struct net *net, struct nf_conncount_list *list, |
3147 |
++ const struct nf_conntrack_tuple *tuple, |
3148 |
++ const struct nf_conntrack_zone *zone); |
3149 |
+ |
3150 |
+ void nf_conncount_list_init(struct nf_conncount_list *list); |
3151 |
+ |
3152 |
+-enum nf_conncount_list_add |
3153 |
+-nf_conncount_add(struct nf_conncount_list *list, |
3154 |
+- const struct nf_conntrack_tuple *tuple, |
3155 |
+- const struct nf_conntrack_zone *zone); |
3156 |
+- |
3157 |
+ bool nf_conncount_gc_list(struct net *net, |
3158 |
+ struct nf_conncount_list *list); |
3159 |
+ |
3160 |
+diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h |
3161 |
+index f6052e70bf40..a55cb8b10165 100644 |
3162 |
+--- a/include/uapi/linux/in.h |
3163 |
++++ b/include/uapi/linux/in.h |
3164 |
+@@ -268,7 +268,7 @@ struct sockaddr_in { |
3165 |
+ #define IN_MULTICAST(a) IN_CLASSD(a) |
3166 |
+ #define IN_MULTICAST_NET 0xe0000000 |
3167 |
+ |
3168 |
+-#define IN_BADCLASS(a) ((((long int) (a) ) == 0xffffffff) |
3169 |
++#define IN_BADCLASS(a) (((long int) (a) ) == (long int)0xffffffff) |
3170 |
+ #define IN_EXPERIMENTAL(a) IN_BADCLASS((a)) |
3171 |
+ |
3172 |
+ #define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000) |
3173 |
+diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h |
3174 |
+index d13fd490b66d..6e73f0274e41 100644 |
3175 |
+--- a/include/uapi/rdma/vmw_pvrdma-abi.h |
3176 |
++++ b/include/uapi/rdma/vmw_pvrdma-abi.h |
3177 |
+@@ -78,6 +78,7 @@ enum pvrdma_wr_opcode { |
3178 |
+ PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD, |
3179 |
+ PVRDMA_WR_BIND_MW, |
3180 |
+ PVRDMA_WR_REG_SIG_MR, |
3181 |
++ PVRDMA_WR_ERROR, |
3182 |
+ }; |
3183 |
+ |
3184 |
+ enum pvrdma_wc_status { |
3185 |
+diff --git a/init/Kconfig b/init/Kconfig |
3186 |
+index ed9352513c32..b902f9c89800 100644 |
3187 |
+--- a/init/Kconfig |
3188 |
++++ b/init/Kconfig |
3189 |
+@@ -1130,6 +1130,7 @@ config LD_DEAD_CODE_DATA_ELIMINATION |
3190 |
+ bool "Dead code and data elimination (EXPERIMENTAL)" |
3191 |
+ depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION |
3192 |
+ depends on EXPERT |
3193 |
++ depends on !(FUNCTION_TRACER && CC_IS_GCC && GCC_VERSION < 40800) |
3194 |
+ depends on $(cc-option,-ffunction-sections -fdata-sections) |
3195 |
+ depends on $(ld-option,--gc-sections) |
3196 |
+ help |
3197 |
+diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c |
3198 |
+index 14436f4ca6bd..30e0f9770f88 100644 |
3199 |
+--- a/lib/int_sqrt.c |
3200 |
++++ b/lib/int_sqrt.c |
3201 |
+@@ -52,7 +52,7 @@ u32 int_sqrt64(u64 x) |
3202 |
+ if (x <= ULONG_MAX) |
3203 |
+ return int_sqrt((unsigned long) x); |
3204 |
+ |
3205 |
+- m = 1ULL << (fls64(x) & ~1ULL); |
3206 |
++ m = 1ULL << ((fls64(x) - 1) & ~1ULL); |
3207 |
+ while (m != 0) { |
3208 |
+ b = y + m; |
3209 |
+ y >>= 1; |
3210 |
+diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c |
3211 |
+index b1b5e8516724..ed683e5b73ba 100644 |
3212 |
+--- a/net/bridge/br_netfilter_hooks.c |
3213 |
++++ b/net/bridge/br_netfilter_hooks.c |
3214 |
+@@ -278,7 +278,7 @@ int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_ |
3215 |
+ struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); |
3216 |
+ int ret; |
3217 |
+ |
3218 |
+- if (neigh->hh.hh_len) { |
3219 |
++ if ((neigh->nud_state & NUD_CONNECTED) && neigh->hh.hh_len) { |
3220 |
+ neigh_hh_bridge(&neigh->hh, skb); |
3221 |
+ skb->dev = nf_bridge->physindev; |
3222 |
+ ret = br_handle_frame_finish(net, sk, skb); |
3223 |
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c |
3224 |
+index 491828713e0b..5e55cef0cec3 100644 |
3225 |
+--- a/net/bridge/netfilter/ebtables.c |
3226 |
++++ b/net/bridge/netfilter/ebtables.c |
3227 |
+@@ -1137,14 +1137,16 @@ static int do_replace(struct net *net, const void __user *user, |
3228 |
+ tmp.name[sizeof(tmp.name) - 1] = 0; |
3229 |
+ |
3230 |
+ countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; |
3231 |
+- newinfo = vmalloc(sizeof(*newinfo) + countersize); |
3232 |
++ newinfo = __vmalloc(sizeof(*newinfo) + countersize, GFP_KERNEL_ACCOUNT, |
3233 |
++ PAGE_KERNEL); |
3234 |
+ if (!newinfo) |
3235 |
+ return -ENOMEM; |
3236 |
+ |
3237 |
+ if (countersize) |
3238 |
+ memset(newinfo->counters, 0, countersize); |
3239 |
+ |
3240 |
+- newinfo->entries = vmalloc(tmp.entries_size); |
3241 |
++ newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT, |
3242 |
++ PAGE_KERNEL); |
3243 |
+ if (!newinfo->entries) { |
3244 |
+ ret = -ENOMEM; |
3245 |
+ goto free_newinfo; |
3246 |
+diff --git a/net/can/gw.c b/net/can/gw.c |
3247 |
+index faa3da88a127..53859346dc9a 100644 |
3248 |
+--- a/net/can/gw.c |
3249 |
++++ b/net/can/gw.c |
3250 |
+@@ -416,13 +416,29 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) |
3251 |
+ while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx]) |
3252 |
+ (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod); |
3253 |
+ |
3254 |
+- /* check for checksum updates when the CAN frame has been modified */ |
3255 |
++ /* Has the CAN frame been modified? */ |
3256 |
+ if (modidx) { |
3257 |
+- if (gwj->mod.csumfunc.crc8) |
3258 |
++ /* get available space for the processed CAN frame type */ |
3259 |
++ int max_len = nskb->len - offsetof(struct can_frame, data); |
3260 |
++ |
3261 |
++ /* dlc may have changed, make sure it fits to the CAN frame */ |
3262 |
++ if (cf->can_dlc > max_len) |
3263 |
++ goto out_delete; |
3264 |
++ |
3265 |
++ /* check for checksum updates in classic CAN length only */ |
3266 |
++ if (gwj->mod.csumfunc.crc8) { |
3267 |
++ if (cf->can_dlc > 8) |
3268 |
++ goto out_delete; |
3269 |
++ |
3270 |
+ (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8); |
3271 |
++ } |
3272 |
++ |
3273 |
++ if (gwj->mod.csumfunc.xor) { |
3274 |
++ if (cf->can_dlc > 8) |
3275 |
++ goto out_delete; |
3276 |
+ |
3277 |
+- if (gwj->mod.csumfunc.xor) |
3278 |
+ (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor); |
3279 |
++ } |
3280 |
+ } |
3281 |
+ |
3282 |
+ /* clear the skb timestamp if not configured the other way */ |
3283 |
+@@ -434,6 +450,14 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) |
3284 |
+ gwj->dropped_frames++; |
3285 |
+ else |
3286 |
+ gwj->handled_frames++; |
3287 |
++ |
3288 |
++ return; |
3289 |
++ |
3290 |
++ out_delete: |
3291 |
++ /* delete frame due to misconfiguration */ |
3292 |
++ gwj->deleted_frames++; |
3293 |
++ kfree_skb(nskb); |
3294 |
++ return; |
3295 |
+ } |
3296 |
+ |
3297 |
+ static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj) |
3298 |
+diff --git a/net/core/filter.c b/net/core/filter.c |
3299 |
+index 8d2c629501e2..eb0007f30142 100644 |
3300 |
+--- a/net/core/filter.c |
3301 |
++++ b/net/core/filter.c |
3302 |
+@@ -2023,18 +2023,19 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) |
3303 |
+ static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, |
3304 |
+ u32 flags) |
3305 |
+ { |
3306 |
+- /* skb->mac_len is not set on normal egress */ |
3307 |
+- unsigned int mlen = skb->network_header - skb->mac_header; |
3308 |
++ unsigned int mlen = skb_network_offset(skb); |
3309 |
+ |
3310 |
+- __skb_pull(skb, mlen); |
3311 |
++ if (mlen) { |
3312 |
++ __skb_pull(skb, mlen); |
3313 |
+ |
3314 |
+- /* At ingress, the mac header has already been pulled once. |
3315 |
+- * At egress, skb_pospull_rcsum has to be done in case that |
3316 |
+- * the skb is originated from ingress (i.e. a forwarded skb) |
3317 |
+- * to ensure that rcsum starts at net header. |
3318 |
+- */ |
3319 |
+- if (!skb_at_tc_ingress(skb)) |
3320 |
+- skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); |
3321 |
++ /* At ingress, the mac header has already been pulled once. |
3322 |
++ * At egress, skb_pospull_rcsum has to be done in case that |
3323 |
++ * the skb is originated from ingress (i.e. a forwarded skb) |
3324 |
++ * to ensure that rcsum starts at net header. |
3325 |
++ */ |
3326 |
++ if (!skb_at_tc_ingress(skb)) |
3327 |
++ skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); |
3328 |
++ } |
3329 |
+ skb_pop_mac_header(skb); |
3330 |
+ skb_reset_mac_len(skb); |
3331 |
+ return flags & BPF_F_INGRESS ? |
3332 |
+diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c |
3333 |
+index 3e85437f7106..a648568c5e8f 100644 |
3334 |
+--- a/net/core/lwt_bpf.c |
3335 |
++++ b/net/core/lwt_bpf.c |
3336 |
+@@ -63,6 +63,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt, |
3337 |
+ lwt->name ? : "<unknown>"); |
3338 |
+ ret = BPF_OK; |
3339 |
+ } else { |
3340 |
++ skb_reset_mac_header(skb); |
3341 |
+ ret = skb_do_redirect(skb); |
3342 |
+ if (ret == 0) |
3343 |
+ ret = BPF_REDIRECT; |
3344 |
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c |
3345 |
+index fffcc130900e..82f341e84fae 100644 |
3346 |
+--- a/net/ipv4/ip_sockglue.c |
3347 |
++++ b/net/ipv4/ip_sockglue.c |
3348 |
+@@ -148,19 +148,17 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb) |
3349 |
+ |
3350 |
+ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) |
3351 |
+ { |
3352 |
++ __be16 _ports[2], *ports; |
3353 |
+ struct sockaddr_in sin; |
3354 |
+- __be16 *ports; |
3355 |
+- int end; |
3356 |
+- |
3357 |
+- end = skb_transport_offset(skb) + 4; |
3358 |
+- if (end > 0 && !pskb_may_pull(skb, end)) |
3359 |
+- return; |
3360 |
+ |
3361 |
+ /* All current transport protocols have the port numbers in the |
3362 |
+ * first four bytes of the transport header and this function is |
3363 |
+ * written with this assumption in mind. |
3364 |
+ */ |
3365 |
+- ports = (__be16 *)skb_transport_header(skb); |
3366 |
++ ports = skb_header_pointer(skb, skb_transport_offset(skb), |
3367 |
++ sizeof(_ports), &_ports); |
3368 |
++ if (!ports) |
3369 |
++ return; |
3370 |
+ |
3371 |
+ sin.sin_family = AF_INET; |
3372 |
+ sin.sin_addr.s_addr = ip_hdr(skb)->daddr; |
3373 |
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c |
3374 |
+index f87dbc78b6bc..71a29e9c0620 100644 |
3375 |
+--- a/net/ipv4/tcp_timer.c |
3376 |
++++ b/net/ipv4/tcp_timer.c |
3377 |
+@@ -226,7 +226,7 @@ static int tcp_write_timeout(struct sock *sk) |
3378 |
+ if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { |
3379 |
+ if (icsk->icsk_retransmits) { |
3380 |
+ dst_negative_advice(sk); |
3381 |
+- } else if (!tp->syn_data && !tp->syn_fastopen) { |
3382 |
++ } else { |
3383 |
+ sk_rethink_txhash(sk); |
3384 |
+ } |
3385 |
+ retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; |
3386 |
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c |
3387 |
+index 1ede7a16a0be..cb24850d2c7f 100644 |
3388 |
+--- a/net/ipv6/datagram.c |
3389 |
++++ b/net/ipv6/datagram.c |
3390 |
+@@ -341,6 +341,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info) |
3391 |
+ skb_reset_network_header(skb); |
3392 |
+ iph = ipv6_hdr(skb); |
3393 |
+ iph->daddr = fl6->daddr; |
3394 |
++ ip6_flow_hdr(iph, 0, 0); |
3395 |
+ |
3396 |
+ serr = SKB_EXT_ERR(skb); |
3397 |
+ serr->ee.ee_errno = err; |
3398 |
+@@ -700,17 +701,15 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg, |
3399 |
+ } |
3400 |
+ if (np->rxopt.bits.rxorigdstaddr) { |
3401 |
+ struct sockaddr_in6 sin6; |
3402 |
+- __be16 *ports; |
3403 |
+- int end; |
3404 |
++ __be16 _ports[2], *ports; |
3405 |
+ |
3406 |
+- end = skb_transport_offset(skb) + 4; |
3407 |
+- if (end <= 0 || pskb_may_pull(skb, end)) { |
3408 |
++ ports = skb_header_pointer(skb, skb_transport_offset(skb), |
3409 |
++ sizeof(_ports), &_ports); |
3410 |
++ if (ports) { |
3411 |
+ /* All current transport protocols have the port numbers in the |
3412 |
+ * first four bytes of the transport header and this function is |
3413 |
+ * written with this assumption in mind. |
3414 |
+ */ |
3415 |
+- ports = (__be16 *)skb_transport_header(skb); |
3416 |
+- |
3417 |
+ sin6.sin6_family = AF_INET6; |
3418 |
+ sin6.sin6_addr = ipv6_hdr(skb)->daddr; |
3419 |
+ sin6.sin6_port = ports[1]; |
3420 |
+diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c |
3421 |
+index c9c53ade55c3..6d14cbe443f8 100644 |
3422 |
+--- a/net/ipv6/icmp.c |
3423 |
++++ b/net/ipv6/icmp.c |
3424 |
+@@ -421,10 +421,10 @@ static int icmp6_iif(const struct sk_buff *skb) |
3425 |
+ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, |
3426 |
+ const struct in6_addr *force_saddr) |
3427 |
+ { |
3428 |
+- struct net *net = dev_net(skb->dev); |
3429 |
+ struct inet6_dev *idev = NULL; |
3430 |
+ struct ipv6hdr *hdr = ipv6_hdr(skb); |
3431 |
+ struct sock *sk; |
3432 |
++ struct net *net; |
3433 |
+ struct ipv6_pinfo *np; |
3434 |
+ const struct in6_addr *saddr = NULL; |
3435 |
+ struct dst_entry *dst; |
3436 |
+@@ -435,12 +435,16 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, |
3437 |
+ int iif = 0; |
3438 |
+ int addr_type = 0; |
3439 |
+ int len; |
3440 |
+- u32 mark = IP6_REPLY_MARK(net, skb->mark); |
3441 |
++ u32 mark; |
3442 |
+ |
3443 |
+ if ((u8 *)hdr < skb->head || |
3444 |
+ (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb)) |
3445 |
+ return; |
3446 |
+ |
3447 |
++ if (!skb->dev) |
3448 |
++ return; |
3449 |
++ net = dev_net(skb->dev); |
3450 |
++ mark = IP6_REPLY_MARK(net, skb->mark); |
3451 |
+ /* |
3452 |
+ * Make sure we respect the rules |
3453 |
+ * i.e. RFC 1885 2.4(e) |
3454 |
+diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c |
3455 |
+index 9cd180bda092..7554c56b2e63 100644 |
3456 |
+--- a/net/netfilter/nf_conncount.c |
3457 |
++++ b/net/netfilter/nf_conncount.c |
3458 |
+@@ -33,12 +33,6 @@ |
3459 |
+ |
3460 |
+ #define CONNCOUNT_SLOTS 256U |
3461 |
+ |
3462 |
+-#ifdef CONFIG_LOCKDEP |
3463 |
+-#define CONNCOUNT_LOCK_SLOTS 8U |
3464 |
+-#else |
3465 |
+-#define CONNCOUNT_LOCK_SLOTS 256U |
3466 |
+-#endif |
3467 |
+- |
3468 |
+ #define CONNCOUNT_GC_MAX_NODES 8 |
3469 |
+ #define MAX_KEYLEN 5 |
3470 |
+ |
3471 |
+@@ -49,8 +43,6 @@ struct nf_conncount_tuple { |
3472 |
+ struct nf_conntrack_zone zone; |
3473 |
+ int cpu; |
3474 |
+ u32 jiffies32; |
3475 |
+- bool dead; |
3476 |
+- struct rcu_head rcu_head; |
3477 |
+ }; |
3478 |
+ |
3479 |
+ struct nf_conncount_rb { |
3480 |
+@@ -60,7 +52,7 @@ struct nf_conncount_rb { |
3481 |
+ struct rcu_head rcu_head; |
3482 |
+ }; |
3483 |
+ |
3484 |
+-static spinlock_t nf_conncount_locks[CONNCOUNT_LOCK_SLOTS] __cacheline_aligned_in_smp; |
3485 |
++static spinlock_t nf_conncount_locks[CONNCOUNT_SLOTS] __cacheline_aligned_in_smp; |
3486 |
+ |
3487 |
+ struct nf_conncount_data { |
3488 |
+ unsigned int keylen; |
3489 |
+@@ -89,79 +81,25 @@ static int key_diff(const u32 *a, const u32 *b, unsigned int klen) |
3490 |
+ return memcmp(a, b, klen * sizeof(u32)); |
3491 |
+ } |
3492 |
+ |
3493 |
+-enum nf_conncount_list_add |
3494 |
+-nf_conncount_add(struct nf_conncount_list *list, |
3495 |
+- const struct nf_conntrack_tuple *tuple, |
3496 |
+- const struct nf_conntrack_zone *zone) |
3497 |
+-{ |
3498 |
+- struct nf_conncount_tuple *conn; |
3499 |
+- |
3500 |
+- if (WARN_ON_ONCE(list->count > INT_MAX)) |
3501 |
+- return NF_CONNCOUNT_ERR; |
3502 |
+- |
3503 |
+- conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC); |
3504 |
+- if (conn == NULL) |
3505 |
+- return NF_CONNCOUNT_ERR; |
3506 |
+- |
3507 |
+- conn->tuple = *tuple; |
3508 |
+- conn->zone = *zone; |
3509 |
+- conn->cpu = raw_smp_processor_id(); |
3510 |
+- conn->jiffies32 = (u32)jiffies; |
3511 |
+- conn->dead = false; |
3512 |
+- spin_lock_bh(&list->list_lock); |
3513 |
+- if (list->dead == true) { |
3514 |
+- kmem_cache_free(conncount_conn_cachep, conn); |
3515 |
+- spin_unlock_bh(&list->list_lock); |
3516 |
+- return NF_CONNCOUNT_SKIP; |
3517 |
+- } |
3518 |
+- list_add_tail(&conn->node, &list->head); |
3519 |
+- list->count++; |
3520 |
+- spin_unlock_bh(&list->list_lock); |
3521 |
+- return NF_CONNCOUNT_ADDED; |
3522 |
+-} |
3523 |
+-EXPORT_SYMBOL_GPL(nf_conncount_add); |
3524 |
+- |
3525 |
+-static void __conn_free(struct rcu_head *h) |
3526 |
+-{ |
3527 |
+- struct nf_conncount_tuple *conn; |
3528 |
+- |
3529 |
+- conn = container_of(h, struct nf_conncount_tuple, rcu_head); |
3530 |
+- kmem_cache_free(conncount_conn_cachep, conn); |
3531 |
+-} |
3532 |
+- |
3533 |
+-static bool conn_free(struct nf_conncount_list *list, |
3534 |
++static void conn_free(struct nf_conncount_list *list, |
3535 |
+ struct nf_conncount_tuple *conn) |
3536 |
+ { |
3537 |
+- bool free_entry = false; |
3538 |
+- |
3539 |
+- spin_lock_bh(&list->list_lock); |
3540 |
+- |
3541 |
+- if (conn->dead) { |
3542 |
+- spin_unlock_bh(&list->list_lock); |
3543 |
+- return free_entry; |
3544 |
+- } |
3545 |
++ lockdep_assert_held(&list->list_lock); |
3546 |
+ |
3547 |
+ list->count--; |
3548 |
+- conn->dead = true; |
3549 |
+- list_del_rcu(&conn->node); |
3550 |
+- if (list->count == 0) { |
3551 |
+- list->dead = true; |
3552 |
+- free_entry = true; |
3553 |
+- } |
3554 |
++ list_del(&conn->node); |
3555 |
+ |
3556 |
+- spin_unlock_bh(&list->list_lock); |
3557 |
+- call_rcu(&conn->rcu_head, __conn_free); |
3558 |
+- return free_entry; |
3559 |
++ kmem_cache_free(conncount_conn_cachep, conn); |
3560 |
+ } |
3561 |
+ |
3562 |
+ static const struct nf_conntrack_tuple_hash * |
3563 |
+ find_or_evict(struct net *net, struct nf_conncount_list *list, |
3564 |
+- struct nf_conncount_tuple *conn, bool *free_entry) |
3565 |
++ struct nf_conncount_tuple *conn) |
3566 |
+ { |
3567 |
+ const struct nf_conntrack_tuple_hash *found; |
3568 |
+ unsigned long a, b; |
3569 |
+ int cpu = raw_smp_processor_id(); |
3570 |
+- __s32 age; |
3571 |
++ u32 age; |
3572 |
+ |
3573 |
+ found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple); |
3574 |
+ if (found) |
3575 |
+@@ -176,52 +114,45 @@ find_or_evict(struct net *net, struct nf_conncount_list *list, |
3576 |
+ */ |
3577 |
+ age = a - b; |
3578 |
+ if (conn->cpu == cpu || age >= 2) { |
3579 |
+- *free_entry = conn_free(list, conn); |
3580 |
++ conn_free(list, conn); |
3581 |
+ return ERR_PTR(-ENOENT); |
3582 |
+ } |
3583 |
+ |
3584 |
+ return ERR_PTR(-EAGAIN); |
3585 |
+ } |
3586 |
+ |
3587 |
+-void nf_conncount_lookup(struct net *net, |
3588 |
+- struct nf_conncount_list *list, |
3589 |
+- const struct nf_conntrack_tuple *tuple, |
3590 |
+- const struct nf_conntrack_zone *zone, |
3591 |
+- bool *addit) |
3592 |
++static int __nf_conncount_add(struct net *net, |
3593 |
++ struct nf_conncount_list *list, |
3594 |
++ const struct nf_conntrack_tuple *tuple, |
3595 |
++ const struct nf_conntrack_zone *zone) |
3596 |
+ { |
3597 |
+ const struct nf_conntrack_tuple_hash *found; |
3598 |
+ struct nf_conncount_tuple *conn, *conn_n; |
3599 |
+ struct nf_conn *found_ct; |
3600 |
+ unsigned int collect = 0; |
3601 |
+- bool free_entry = false; |
3602 |
+- |
3603 |
+- /* best effort only */ |
3604 |
+- *addit = tuple ? true : false; |
3605 |
+ |
3606 |
+ /* check the saved connections */ |
3607 |
+ list_for_each_entry_safe(conn, conn_n, &list->head, node) { |
3608 |
+ if (collect > CONNCOUNT_GC_MAX_NODES) |
3609 |
+ break; |
3610 |
+ |
3611 |
+- found = find_or_evict(net, list, conn, &free_entry); |
3612 |
++ found = find_or_evict(net, list, conn); |
3613 |
+ if (IS_ERR(found)) { |
3614 |
+ /* Not found, but might be about to be confirmed */ |
3615 |
+ if (PTR_ERR(found) == -EAGAIN) { |
3616 |
+- if (!tuple) |
3617 |
+- continue; |
3618 |
+- |
3619 |
+ if (nf_ct_tuple_equal(&conn->tuple, tuple) && |
3620 |
+ nf_ct_zone_id(&conn->zone, conn->zone.dir) == |
3621 |
+ nf_ct_zone_id(zone, zone->dir)) |
3622 |
+- *addit = false; |
3623 |
+- } else if (PTR_ERR(found) == -ENOENT) |
3624 |
++ return 0; /* already exists */ |
3625 |
++ } else { |
3626 |
+ collect++; |
3627 |
++ } |
3628 |
+ continue; |
3629 |
+ } |
3630 |
+ |
3631 |
+ found_ct = nf_ct_tuplehash_to_ctrack(found); |
3632 |
+ |
3633 |
+- if (tuple && nf_ct_tuple_equal(&conn->tuple, tuple) && |
3634 |
++ if (nf_ct_tuple_equal(&conn->tuple, tuple) && |
3635 |
+ nf_ct_zone_equal(found_ct, zone, zone->dir)) { |
3636 |
+ /* |
3637 |
+ * We should not see tuples twice unless someone hooks |
3638 |
+@@ -229,7 +160,8 @@ void nf_conncount_lookup(struct net *net, |
3639 |
+ * |
3640 |
+ * Attempt to avoid a re-add in this case. |
3641 |
+ */ |
3642 |
+- *addit = false; |
3643 |
++ nf_ct_put(found_ct); |
3644 |
++ return 0; |
3645 |
+ } else if (already_closed(found_ct)) { |
3646 |
+ /* |
3647 |
+ * we do not care about connections which are |
3648 |
+@@ -243,19 +175,48 @@ void nf_conncount_lookup(struct net *net, |
3649 |
+ |
3650 |
+ nf_ct_put(found_ct); |
3651 |
+ } |
3652 |
++ |
3653 |
++ if (WARN_ON_ONCE(list->count > INT_MAX)) |
3654 |
++ return -EOVERFLOW; |
3655 |
++ |
3656 |
++ conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC); |
3657 |
++ if (conn == NULL) |
3658 |
++ return -ENOMEM; |
3659 |
++ |
3660 |
++ conn->tuple = *tuple; |
3661 |
++ conn->zone = *zone; |
3662 |
++ conn->cpu = raw_smp_processor_id(); |
3663 |
++ conn->jiffies32 = (u32)jiffies; |
3664 |
++ list_add_tail(&conn->node, &list->head); |
3665 |
++ list->count++; |
3666 |
++ return 0; |
3667 |
+ } |
3668 |
+-EXPORT_SYMBOL_GPL(nf_conncount_lookup); |
3669 |
++ |
3670 |
++int nf_conncount_add(struct net *net, |
3671 |
++ struct nf_conncount_list *list, |
3672 |
++ const struct nf_conntrack_tuple *tuple, |
3673 |
++ const struct nf_conntrack_zone *zone) |
3674 |
++{ |
3675 |
++ int ret; |
3676 |
++ |
3677 |
++ /* check the saved connections */ |
3678 |
++ spin_lock_bh(&list->list_lock); |
3679 |
++ ret = __nf_conncount_add(net, list, tuple, zone); |
3680 |
++ spin_unlock_bh(&list->list_lock); |
3681 |
++ |
3682 |
++ return ret; |
3683 |
++} |
3684 |
++EXPORT_SYMBOL_GPL(nf_conncount_add); |
3685 |
+ |
3686 |
+ void nf_conncount_list_init(struct nf_conncount_list *list) |
3687 |
+ { |
3688 |
+ spin_lock_init(&list->list_lock); |
3689 |
+ INIT_LIST_HEAD(&list->head); |
3690 |
+ list->count = 0; |
3691 |
+- list->dead = false; |
3692 |
+ } |
3693 |
+ EXPORT_SYMBOL_GPL(nf_conncount_list_init); |
3694 |
+ |
3695 |
+-/* Return true if the list is empty */ |
3696 |
++/* Return true if the list is empty. Must be called with BH disabled. */ |
3697 |
+ bool nf_conncount_gc_list(struct net *net, |
3698 |
+ struct nf_conncount_list *list) |
3699 |
+ { |
3700 |
+@@ -263,17 +224,17 @@ bool nf_conncount_gc_list(struct net *net, |
3701 |
+ struct nf_conncount_tuple *conn, *conn_n; |
3702 |
+ struct nf_conn *found_ct; |
3703 |
+ unsigned int collected = 0; |
3704 |
+- bool free_entry = false; |
3705 |
+ bool ret = false; |
3706 |
+ |
3707 |
++ /* don't bother if other cpu is already doing GC */ |
3708 |
++ if (!spin_trylock(&list->list_lock)) |
3709 |
++ return false; |
3710 |
++ |
3711 |
+ list_for_each_entry_safe(conn, conn_n, &list->head, node) { |
3712 |
+- found = find_or_evict(net, list, conn, &free_entry); |
3713 |
++ found = find_or_evict(net, list, conn); |
3714 |
+ if (IS_ERR(found)) { |
3715 |
+- if (PTR_ERR(found) == -ENOENT) { |
3716 |
+- if (free_entry) |
3717 |
+- return true; |
3718 |
++ if (PTR_ERR(found) == -ENOENT) |
3719 |
+ collected++; |
3720 |
+- } |
3721 |
+ continue; |
3722 |
+ } |
3723 |
+ |
3724 |
+@@ -284,23 +245,19 @@ bool nf_conncount_gc_list(struct net *net, |
3725 |
+ * closed already -> ditch it |
3726 |
+ */ |
3727 |
+ nf_ct_put(found_ct); |
3728 |
+- if (conn_free(list, conn)) |
3729 |
+- return true; |
3730 |
++ conn_free(list, conn); |
3731 |
+ collected++; |
3732 |
+ continue; |
3733 |
+ } |
3734 |
+ |
3735 |
+ nf_ct_put(found_ct); |
3736 |
+ if (collected > CONNCOUNT_GC_MAX_NODES) |
3737 |
+- return false; |
3738 |
++ break; |
3739 |
+ } |
3740 |
+ |
3741 |
+- spin_lock_bh(&list->list_lock); |
3742 |
+- if (!list->count) { |
3743 |
+- list->dead = true; |
3744 |
++ if (!list->count) |
3745 |
+ ret = true; |
3746 |
+- } |
3747 |
+- spin_unlock_bh(&list->list_lock); |
3748 |
++ spin_unlock(&list->list_lock); |
3749 |
+ |
3750 |
+ return ret; |
3751 |
+ } |
3752 |
+@@ -314,6 +271,7 @@ static void __tree_nodes_free(struct rcu_head *h) |
3753 |
+ kmem_cache_free(conncount_rb_cachep, rbconn); |
3754 |
+ } |
3755 |
+ |
3756 |
++/* caller must hold tree nf_conncount_locks[] lock */ |
3757 |
+ static void tree_nodes_free(struct rb_root *root, |
3758 |
+ struct nf_conncount_rb *gc_nodes[], |
3759 |
+ unsigned int gc_count) |
3760 |
+@@ -323,8 +281,10 @@ static void tree_nodes_free(struct rb_root *root, |
3761 |
+ while (gc_count) { |
3762 |
+ rbconn = gc_nodes[--gc_count]; |
3763 |
+ spin_lock(&rbconn->list.list_lock); |
3764 |
+- rb_erase(&rbconn->node, root); |
3765 |
+- call_rcu(&rbconn->rcu_head, __tree_nodes_free); |
3766 |
++ if (!rbconn->list.count) { |
3767 |
++ rb_erase(&rbconn->node, root); |
3768 |
++ call_rcu(&rbconn->rcu_head, __tree_nodes_free); |
3769 |
++ } |
3770 |
+ spin_unlock(&rbconn->list.list_lock); |
3771 |
+ } |
3772 |
+ } |
3773 |
+@@ -341,20 +301,19 @@ insert_tree(struct net *net, |
3774 |
+ struct rb_root *root, |
3775 |
+ unsigned int hash, |
3776 |
+ const u32 *key, |
3777 |
+- u8 keylen, |
3778 |
+ const struct nf_conntrack_tuple *tuple, |
3779 |
+ const struct nf_conntrack_zone *zone) |
3780 |
+ { |
3781 |
+- enum nf_conncount_list_add ret; |
3782 |
+ struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES]; |
3783 |
+ struct rb_node **rbnode, *parent; |
3784 |
+ struct nf_conncount_rb *rbconn; |
3785 |
+ struct nf_conncount_tuple *conn; |
3786 |
+ unsigned int count = 0, gc_count = 0; |
3787 |
+- bool node_found = false; |
3788 |
+- |
3789 |
+- spin_lock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]); |
3790 |
++ u8 keylen = data->keylen; |
3791 |
++ bool do_gc = true; |
3792 |
+ |
3793 |
++ spin_lock_bh(&nf_conncount_locks[hash]); |
3794 |
++restart: |
3795 |
+ parent = NULL; |
3796 |
+ rbnode = &(root->rb_node); |
3797 |
+ while (*rbnode) { |
3798 |
+@@ -368,45 +327,32 @@ insert_tree(struct net *net, |
3799 |
+ } else if (diff > 0) { |
3800 |
+ rbnode = &((*rbnode)->rb_right); |
3801 |
+ } else { |
3802 |
+- /* unlikely: other cpu added node already */ |
3803 |
+- node_found = true; |
3804 |
+- ret = nf_conncount_add(&rbconn->list, tuple, zone); |
3805 |
+- if (ret == NF_CONNCOUNT_ERR) { |
3806 |
++ int ret; |
3807 |
++ |
3808 |
++ ret = nf_conncount_add(net, &rbconn->list, tuple, zone); |
3809 |
++ if (ret) |
3810 |
+ count = 0; /* hotdrop */ |
3811 |
+- } else if (ret == NF_CONNCOUNT_ADDED) { |
3812 |
++ else |
3813 |
+ count = rbconn->list.count; |
3814 |
+- } else { |
3815 |
+- /* NF_CONNCOUNT_SKIP, rbconn is already |
3816 |
+- * reclaimed by gc, insert a new tree node |
3817 |
+- */ |
3818 |
+- node_found = false; |
3819 |
+- } |
3820 |
+- break; |
3821 |
++ tree_nodes_free(root, gc_nodes, gc_count); |
3822 |
++ goto out_unlock; |
3823 |
+ } |
3824 |
+ |
3825 |
+ if (gc_count >= ARRAY_SIZE(gc_nodes)) |
3826 |
+ continue; |
3827 |
+ |
3828 |
+- if (nf_conncount_gc_list(net, &rbconn->list)) |
3829 |
++ if (do_gc && nf_conncount_gc_list(net, &rbconn->list)) |
3830 |
+ gc_nodes[gc_count++] = rbconn; |
3831 |
+ } |
3832 |
+ |
3833 |
+ if (gc_count) { |
3834 |
+ tree_nodes_free(root, gc_nodes, gc_count); |
3835 |
+- /* tree_node_free before new allocation permits |
3836 |
+- * allocator to re-use newly free'd object. |
3837 |
+- * |
3838 |
+- * This is a rare event; in most cases we will find |
3839 |
+- * existing node to re-use. (or gc_count is 0). |
3840 |
+- */ |
3841 |
+- |
3842 |
+- if (gc_count >= ARRAY_SIZE(gc_nodes)) |
3843 |
+- schedule_gc_worker(data, hash); |
3844 |
++ schedule_gc_worker(data, hash); |
3845 |
++ gc_count = 0; |
3846 |
++ do_gc = false; |
3847 |
++ goto restart; |
3848 |
+ } |
3849 |
+ |
3850 |
+- if (node_found) |
3851 |
+- goto out_unlock; |
3852 |
+- |
3853 |
+ /* expected case: match, insert new node */ |
3854 |
+ rbconn = kmem_cache_alloc(conncount_rb_cachep, GFP_ATOMIC); |
3855 |
+ if (rbconn == NULL) |
3856 |
+@@ -430,7 +376,7 @@ insert_tree(struct net *net, |
3857 |
+ rb_link_node_rcu(&rbconn->node, parent, rbnode); |
3858 |
+ rb_insert_color(&rbconn->node, root); |
3859 |
+ out_unlock: |
3860 |
+- spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]); |
3861 |
++ spin_unlock_bh(&nf_conncount_locks[hash]); |
3862 |
+ return count; |
3863 |
+ } |
3864 |
+ |
3865 |
+@@ -441,7 +387,6 @@ count_tree(struct net *net, |
3866 |
+ const struct nf_conntrack_tuple *tuple, |
3867 |
+ const struct nf_conntrack_zone *zone) |
3868 |
+ { |
3869 |
+- enum nf_conncount_list_add ret; |
3870 |
+ struct rb_root *root; |
3871 |
+ struct rb_node *parent; |
3872 |
+ struct nf_conncount_rb *rbconn; |
3873 |
+@@ -454,7 +399,6 @@ count_tree(struct net *net, |
3874 |
+ parent = rcu_dereference_raw(root->rb_node); |
3875 |
+ while (parent) { |
3876 |
+ int diff; |
3877 |
+- bool addit; |
3878 |
+ |
3879 |
+ rbconn = rb_entry(parent, struct nf_conncount_rb, node); |
3880 |
+ |
3881 |
+@@ -464,31 +408,36 @@ count_tree(struct net *net, |
3882 |
+ } else if (diff > 0) { |
3883 |
+ parent = rcu_dereference_raw(parent->rb_right); |
3884 |
+ } else { |
3885 |
+- /* same source network -> be counted! */ |
3886 |
+- nf_conncount_lookup(net, &rbconn->list, tuple, zone, |
3887 |
+- &addit); |
3888 |
++ int ret; |
3889 |
+ |
3890 |
+- if (!addit) |
3891 |
++ if (!tuple) { |
3892 |
++ nf_conncount_gc_list(net, &rbconn->list); |
3893 |
+ return rbconn->list.count; |
3894 |
++ } |
3895 |
+ |
3896 |
+- ret = nf_conncount_add(&rbconn->list, tuple, zone); |
3897 |
+- if (ret == NF_CONNCOUNT_ERR) { |
3898 |
+- return 0; /* hotdrop */ |
3899 |
+- } else if (ret == NF_CONNCOUNT_ADDED) { |
3900 |
+- return rbconn->list.count; |
3901 |
+- } else { |
3902 |
+- /* NF_CONNCOUNT_SKIP, rbconn is already |
3903 |
+- * reclaimed by gc, insert a new tree node |
3904 |
+- */ |
3905 |
++ spin_lock_bh(&rbconn->list.list_lock); |
3906 |
++ /* Node might be about to be free'd. |
3907 |
++ * We need to defer to insert_tree() in this case. |
3908 |
++ */ |
3909 |
++ if (rbconn->list.count == 0) { |
3910 |
++ spin_unlock_bh(&rbconn->list.list_lock); |
3911 |
+ break; |
3912 |
+ } |
3913 |
++ |
3914 |
++ /* same source network -> be counted! */ |
3915 |
++ ret = __nf_conncount_add(net, &rbconn->list, tuple, zone); |
3916 |
++ spin_unlock_bh(&rbconn->list.list_lock); |
3917 |
++ if (ret) |
3918 |
++ return 0; /* hotdrop */ |
3919 |
++ else |
3920 |
++ return rbconn->list.count; |
3921 |
+ } |
3922 |
+ } |
3923 |
+ |
3924 |
+ if (!tuple) |
3925 |
+ return 0; |
3926 |
+ |
3927 |
+- return insert_tree(net, data, root, hash, key, keylen, tuple, zone); |
3928 |
++ return insert_tree(net, data, root, hash, key, tuple, zone); |
3929 |
+ } |
3930 |
+ |
3931 |
+ static void tree_gc_worker(struct work_struct *work) |
3932 |
+@@ -499,27 +448,47 @@ static void tree_gc_worker(struct work_struct *work) |
3933 |
+ struct rb_node *node; |
3934 |
+ unsigned int tree, next_tree, gc_count = 0; |
3935 |
+ |
3936 |
+- tree = data->gc_tree % CONNCOUNT_LOCK_SLOTS; |
3937 |
++ tree = data->gc_tree % CONNCOUNT_SLOTS; |
3938 |
+ root = &data->root[tree]; |
3939 |
+ |
3940 |
++ local_bh_disable(); |
3941 |
+ rcu_read_lock(); |
3942 |
+ for (node = rb_first(root); node != NULL; node = rb_next(node)) { |
3943 |
+ rbconn = rb_entry(node, struct nf_conncount_rb, node); |
3944 |
+ if (nf_conncount_gc_list(data->net, &rbconn->list)) |
3945 |
+- gc_nodes[gc_count++] = rbconn; |
3946 |
++ gc_count++; |
3947 |
+ } |
3948 |
+ rcu_read_unlock(); |
3949 |
++ local_bh_enable(); |
3950 |
++ |
3951 |
++ cond_resched(); |
3952 |
+ |
3953 |
+ spin_lock_bh(&nf_conncount_locks[tree]); |
3954 |
++ if (gc_count < ARRAY_SIZE(gc_nodes)) |
3955 |
++ goto next; /* do not bother */ |
3956 |
+ |
3957 |
+- if (gc_count) { |
3958 |
+- tree_nodes_free(root, gc_nodes, gc_count); |
3959 |
++ gc_count = 0; |
3960 |
++ node = rb_first(root); |
3961 |
++ while (node != NULL) { |
3962 |
++ rbconn = rb_entry(node, struct nf_conncount_rb, node); |
3963 |
++ node = rb_next(node); |
3964 |
++ |
3965 |
++ if (rbconn->list.count > 0) |
3966 |
++ continue; |
3967 |
++ |
3968 |
++ gc_nodes[gc_count++] = rbconn; |
3969 |
++ if (gc_count >= ARRAY_SIZE(gc_nodes)) { |
3970 |
++ tree_nodes_free(root, gc_nodes, gc_count); |
3971 |
++ gc_count = 0; |
3972 |
++ } |
3973 |
+ } |
3974 |
+ |
3975 |
++ tree_nodes_free(root, gc_nodes, gc_count); |
3976 |
++next: |
3977 |
+ clear_bit(tree, data->pending_trees); |
3978 |
+ |
3979 |
+ next_tree = (tree + 1) % CONNCOUNT_SLOTS; |
3980 |
+- next_tree = find_next_bit(data->pending_trees, next_tree, CONNCOUNT_SLOTS); |
3981 |
++ next_tree = find_next_bit(data->pending_trees, CONNCOUNT_SLOTS, next_tree); |
3982 |
+ |
3983 |
+ if (next_tree < CONNCOUNT_SLOTS) { |
3984 |
+ data->gc_tree = next_tree; |
3985 |
+@@ -621,10 +590,7 @@ static int __init nf_conncount_modinit(void) |
3986 |
+ { |
3987 |
+ int i; |
3988 |
+ |
3989 |
+- BUILD_BUG_ON(CONNCOUNT_LOCK_SLOTS > CONNCOUNT_SLOTS); |
3990 |
+- BUILD_BUG_ON((CONNCOUNT_SLOTS % CONNCOUNT_LOCK_SLOTS) != 0); |
3991 |
+- |
3992 |
+- for (i = 0; i < CONNCOUNT_LOCK_SLOTS; ++i) |
3993 |
++ for (i = 0; i < CONNCOUNT_SLOTS; ++i) |
3994 |
+ spin_lock_init(&nf_conncount_locks[i]); |
3995 |
+ |
3996 |
+ conncount_conn_cachep = kmem_cache_create("nf_conncount_tuple", |
3997 |
+diff --git a/net/netfilter/nft_connlimit.c b/net/netfilter/nft_connlimit.c |
3998 |
+index b90d96ba4a12..af1497ab9464 100644 |
3999 |
+--- a/net/netfilter/nft_connlimit.c |
4000 |
++++ b/net/netfilter/nft_connlimit.c |
4001 |
+@@ -30,7 +30,6 @@ static inline void nft_connlimit_do_eval(struct nft_connlimit *priv, |
4002 |
+ enum ip_conntrack_info ctinfo; |
4003 |
+ const struct nf_conn *ct; |
4004 |
+ unsigned int count; |
4005 |
+- bool addit; |
4006 |
+ |
4007 |
+ tuple_ptr = &tuple; |
4008 |
+ |
4009 |
+@@ -44,19 +43,12 @@ static inline void nft_connlimit_do_eval(struct nft_connlimit *priv, |
4010 |
+ return; |
4011 |
+ } |
4012 |
+ |
4013 |
+- nf_conncount_lookup(nft_net(pkt), &priv->list, tuple_ptr, zone, |
4014 |
+- &addit); |
4015 |
+- count = priv->list.count; |
4016 |
+- |
4017 |
+- if (!addit) |
4018 |
+- goto out; |
4019 |
+- |
4020 |
+- if (nf_conncount_add(&priv->list, tuple_ptr, zone) == NF_CONNCOUNT_ERR) { |
4021 |
++ if (nf_conncount_add(nft_net(pkt), &priv->list, tuple_ptr, zone)) { |
4022 |
+ regs->verdict.code = NF_DROP; |
4023 |
+ return; |
4024 |
+ } |
4025 |
+- count++; |
4026 |
+-out: |
4027 |
++ |
4028 |
++ count = priv->list.count; |
4029 |
+ |
4030 |
+ if ((count > priv->limit) ^ priv->invert) { |
4031 |
+ regs->verdict.code = NFT_BREAK; |
4032 |
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c |
4033 |
+index eedacdebcd4c..d0945253f43b 100644 |
4034 |
+--- a/net/packet/af_packet.c |
4035 |
++++ b/net/packet/af_packet.c |
4036 |
+@@ -2628,7 +2628,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) |
4037 |
+ addr = saddr->sll_halen ? saddr->sll_addr : NULL; |
4038 |
+ dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); |
4039 |
+ if (addr && dev && saddr->sll_halen < dev->addr_len) |
4040 |
+- goto out; |
4041 |
++ goto out_put; |
4042 |
+ } |
4043 |
+ |
4044 |
+ err = -ENXIO; |
4045 |
+@@ -2828,7 +2828,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) |
4046 |
+ addr = saddr->sll_halen ? saddr->sll_addr : NULL; |
4047 |
+ dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); |
4048 |
+ if (addr && dev && saddr->sll_halen < dev->addr_len) |
4049 |
+- goto out; |
4050 |
++ goto out_unlock; |
4051 |
+ } |
4052 |
+ |
4053 |
+ err = -ENXIO; |
4054 |
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c |
4055 |
+index 7f0539db5604..0bae07e9c9e7 100644 |
4056 |
+--- a/net/sctp/ipv6.c |
4057 |
++++ b/net/sctp/ipv6.c |
4058 |
+@@ -97,11 +97,9 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev, |
4059 |
+ |
4060 |
+ switch (ev) { |
4061 |
+ case NETDEV_UP: |
4062 |
+- addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); |
4063 |
++ addr = kzalloc(sizeof(*addr), GFP_ATOMIC); |
4064 |
+ if (addr) { |
4065 |
+ addr->a.v6.sin6_family = AF_INET6; |
4066 |
+- addr->a.v6.sin6_port = 0; |
4067 |
+- addr->a.v6.sin6_flowinfo = 0; |
4068 |
+ addr->a.v6.sin6_addr = ifa->addr; |
4069 |
+ addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex; |
4070 |
+ addr->valid = 1; |
4071 |
+@@ -431,7 +429,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist, |
4072 |
+ addr = kzalloc(sizeof(*addr), GFP_ATOMIC); |
4073 |
+ if (addr) { |
4074 |
+ addr->a.v6.sin6_family = AF_INET6; |
4075 |
+- addr->a.v6.sin6_port = 0; |
4076 |
+ addr->a.v6.sin6_addr = ifp->addr; |
4077 |
+ addr->a.v6.sin6_scope_id = dev->ifindex; |
4078 |
+ addr->valid = 1; |
4079 |
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c |
4080 |
+index 9b277bd36d1a..85af878f5668 100644 |
4081 |
+--- a/net/sctp/protocol.c |
4082 |
++++ b/net/sctp/protocol.c |
4083 |
+@@ -101,7 +101,6 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist, |
4084 |
+ addr = kzalloc(sizeof(*addr), GFP_ATOMIC); |
4085 |
+ if (addr) { |
4086 |
+ addr->a.v4.sin_family = AF_INET; |
4087 |
+- addr->a.v4.sin_port = 0; |
4088 |
+ addr->a.v4.sin_addr.s_addr = ifa->ifa_local; |
4089 |
+ addr->valid = 1; |
4090 |
+ INIT_LIST_HEAD(&addr->list); |
4091 |
+@@ -776,10 +775,9 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev, |
4092 |
+ |
4093 |
+ switch (ev) { |
4094 |
+ case NETDEV_UP: |
4095 |
+- addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); |
4096 |
++ addr = kzalloc(sizeof(*addr), GFP_ATOMIC); |
4097 |
+ if (addr) { |
4098 |
+ addr->a.v4.sin_family = AF_INET; |
4099 |
+- addr->a.v4.sin_port = 0; |
4100 |
+ addr->a.v4.sin_addr.s_addr = ifa->ifa_local; |
4101 |
+ addr->valid = 1; |
4102 |
+ spin_lock_bh(&net->sctp.local_addr_lock); |
4103 |
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c |
4104 |
+index 82cb0e5634bc..5d2214183601 100644 |
4105 |
+--- a/net/smc/af_smc.c |
4106 |
++++ b/net/smc/af_smc.c |
4107 |
+@@ -146,6 +146,9 @@ static int smc_release(struct socket *sock) |
4108 |
+ sock_set_flag(sk, SOCK_DEAD); |
4109 |
+ sk->sk_shutdown |= SHUTDOWN_MASK; |
4110 |
+ } |
4111 |
++ |
4112 |
++ sk->sk_prot->unhash(sk); |
4113 |
++ |
4114 |
+ if (smc->clcsock) { |
4115 |
+ if (smc->use_fallback && sk->sk_state == SMC_LISTEN) { |
4116 |
+ /* wake up clcsock accept */ |
4117 |
+@@ -170,7 +173,6 @@ static int smc_release(struct socket *sock) |
4118 |
+ smc_conn_free(&smc->conn); |
4119 |
+ release_sock(sk); |
4120 |
+ |
4121 |
+- sk->sk_prot->unhash(sk); |
4122 |
+ sock_put(sk); /* final sock_put */ |
4123 |
+ out: |
4124 |
+ return rc; |
4125 |
+diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c |
4126 |
+index c7872bc13860..08b5fa4a2852 100644 |
4127 |
+--- a/net/sunrpc/rpcb_clnt.c |
4128 |
++++ b/net/sunrpc/rpcb_clnt.c |
4129 |
+@@ -771,6 +771,12 @@ void rpcb_getport_async(struct rpc_task *task) |
4130 |
+ case RPCBVERS_3: |
4131 |
+ map->r_netid = xprt->address_strings[RPC_DISPLAY_NETID]; |
4132 |
+ map->r_addr = rpc_sockaddr2uaddr(sap, GFP_ATOMIC); |
4133 |
++ if (!map->r_addr) { |
4134 |
++ status = -ENOMEM; |
4135 |
++ dprintk("RPC: %5u %s: no memory available\n", |
4136 |
++ task->tk_pid, __func__); |
4137 |
++ goto bailout_free_args; |
4138 |
++ } |
4139 |
+ map->r_owner = ""; |
4140 |
+ break; |
4141 |
+ case RPCBVERS_2: |
4142 |
+@@ -793,6 +799,8 @@ void rpcb_getport_async(struct rpc_task *task) |
4143 |
+ rpc_put_task(child); |
4144 |
+ return; |
4145 |
+ |
4146 |
++bailout_free_args: |
4147 |
++ kfree(map); |
4148 |
+ bailout_release_client: |
4149 |
+ rpc_release_client(rpcb_clnt); |
4150 |
+ bailout_nofree: |
4151 |
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c |
4152 |
+index f0b3700cec95..9cdbb6d6e7f5 100644 |
4153 |
+--- a/net/sunrpc/xprtsock.c |
4154 |
++++ b/net/sunrpc/xprtsock.c |
4155 |
+@@ -48,6 +48,7 @@ |
4156 |
+ #include <net/udp.h> |
4157 |
+ #include <net/tcp.h> |
4158 |
+ #include <linux/bvec.h> |
4159 |
++#include <linux/highmem.h> |
4160 |
+ #include <linux/uio.h> |
4161 |
+ |
4162 |
+ #include <trace/events/sunrpc.h> |
4163 |
+@@ -380,6 +381,26 @@ xs_read_discard(struct socket *sock, struct msghdr *msg, int flags, |
4164 |
+ return sock_recvmsg(sock, msg, flags); |
4165 |
+ } |
4166 |
+ |
4167 |
++#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE |
4168 |
++static void |
4169 |
++xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek) |
4170 |
++{ |
4171 |
++ struct bvec_iter bi = { |
4172 |
++ .bi_size = count, |
4173 |
++ }; |
4174 |
++ struct bio_vec bv; |
4175 |
++ |
4176 |
++ bvec_iter_advance(bvec, &bi, seek & PAGE_MASK); |
4177 |
++ for_each_bvec(bv, bvec, bi, bi) |
4178 |
++ flush_dcache_page(bv.bv_page); |
4179 |
++} |
4180 |
++#else |
4181 |
++static inline void |
4182 |
++xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek) |
4183 |
++{ |
4184 |
++} |
4185 |
++#endif |
4186 |
++ |
4187 |
+ static ssize_t |
4188 |
+ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, |
4189 |
+ struct xdr_buf *buf, size_t count, size_t seek, size_t *read) |
4190 |
+@@ -413,6 +434,7 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, |
4191 |
+ seek + buf->page_base); |
4192 |
+ if (ret <= 0) |
4193 |
+ goto sock_err; |
4194 |
++ xs_flush_bvec(buf->bvec, ret, seek + buf->page_base); |
4195 |
+ offset += ret - buf->page_base; |
4196 |
+ if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) |
4197 |
+ goto out; |
4198 |
+diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c |
4199 |
+index 6376467e78f8..0b21187d74df 100644 |
4200 |
+--- a/net/tipc/netlink_compat.c |
4201 |
++++ b/net/tipc/netlink_compat.c |
4202 |
+@@ -87,6 +87,11 @@ static int tipc_skb_tailroom(struct sk_buff *skb) |
4203 |
+ return limit; |
4204 |
+ } |
4205 |
+ |
4206 |
++static inline int TLV_GET_DATA_LEN(struct tlv_desc *tlv) |
4207 |
++{ |
4208 |
++ return TLV_GET_LEN(tlv) - TLV_SPACE(0); |
4209 |
++} |
4210 |
++ |
4211 |
+ static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len) |
4212 |
+ { |
4213 |
+ struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb); |
4214 |
+@@ -166,6 +171,11 @@ static struct sk_buff *tipc_get_err_tlv(char *str) |
4215 |
+ return buf; |
4216 |
+ } |
4217 |
+ |
4218 |
++static inline bool string_is_valid(char *s, int len) |
4219 |
++{ |
4220 |
++ return memchr(s, '\0', len) ? true : false; |
4221 |
++} |
4222 |
++ |
4223 |
+ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, |
4224 |
+ struct tipc_nl_compat_msg *msg, |
4225 |
+ struct sk_buff *arg) |
4226 |
+@@ -379,6 +389,7 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd, |
4227 |
+ struct nlattr *prop; |
4228 |
+ struct nlattr *bearer; |
4229 |
+ struct tipc_bearer_config *b; |
4230 |
++ int len; |
4231 |
+ |
4232 |
+ b = (struct tipc_bearer_config *)TLV_DATA(msg->req); |
4233 |
+ |
4234 |
+@@ -386,6 +397,10 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd, |
4235 |
+ if (!bearer) |
4236 |
+ return -EMSGSIZE; |
4237 |
+ |
4238 |
++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME); |
4239 |
++ if (!string_is_valid(b->name, len)) |
4240 |
++ return -EINVAL; |
4241 |
++ |
4242 |
+ if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name)) |
4243 |
+ return -EMSGSIZE; |
4244 |
+ |
4245 |
+@@ -411,6 +426,7 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd, |
4246 |
+ { |
4247 |
+ char *name; |
4248 |
+ struct nlattr *bearer; |
4249 |
++ int len; |
4250 |
+ |
4251 |
+ name = (char *)TLV_DATA(msg->req); |
4252 |
+ |
4253 |
+@@ -418,6 +434,10 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd, |
4254 |
+ if (!bearer) |
4255 |
+ return -EMSGSIZE; |
4256 |
+ |
4257 |
++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME); |
4258 |
++ if (!string_is_valid(name, len)) |
4259 |
++ return -EINVAL; |
4260 |
++ |
4261 |
+ if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name)) |
4262 |
+ return -EMSGSIZE; |
4263 |
+ |
4264 |
+@@ -478,6 +498,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg, |
4265 |
+ struct nlattr *prop[TIPC_NLA_PROP_MAX + 1]; |
4266 |
+ struct nlattr *stats[TIPC_NLA_STATS_MAX + 1]; |
4267 |
+ int err; |
4268 |
++ int len; |
4269 |
+ |
4270 |
+ if (!attrs[TIPC_NLA_LINK]) |
4271 |
+ return -EINVAL; |
4272 |
+@@ -504,6 +525,11 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg, |
4273 |
+ return err; |
4274 |
+ |
4275 |
+ name = (char *)TLV_DATA(msg->req); |
4276 |
++ |
4277 |
++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME); |
4278 |
++ if (!string_is_valid(name, len)) |
4279 |
++ return -EINVAL; |
4280 |
++ |
4281 |
+ if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0) |
4282 |
+ return 0; |
4283 |
+ |
4284 |
+@@ -644,6 +670,7 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb, |
4285 |
+ struct nlattr *prop; |
4286 |
+ struct nlattr *media; |
4287 |
+ struct tipc_link_config *lc; |
4288 |
++ int len; |
4289 |
+ |
4290 |
+ lc = (struct tipc_link_config *)TLV_DATA(msg->req); |
4291 |
+ |
4292 |
+@@ -651,6 +678,10 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb, |
4293 |
+ if (!media) |
4294 |
+ return -EMSGSIZE; |
4295 |
+ |
4296 |
++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME); |
4297 |
++ if (!string_is_valid(lc->name, len)) |
4298 |
++ return -EINVAL; |
4299 |
++ |
4300 |
+ if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name)) |
4301 |
+ return -EMSGSIZE; |
4302 |
+ |
4303 |
+@@ -671,6 +702,7 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb, |
4304 |
+ struct nlattr *prop; |
4305 |
+ struct nlattr *bearer; |
4306 |
+ struct tipc_link_config *lc; |
4307 |
++ int len; |
4308 |
+ |
4309 |
+ lc = (struct tipc_link_config *)TLV_DATA(msg->req); |
4310 |
+ |
4311 |
+@@ -678,6 +710,10 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb, |
4312 |
+ if (!bearer) |
4313 |
+ return -EMSGSIZE; |
4314 |
+ |
4315 |
++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME); |
4316 |
++ if (!string_is_valid(lc->name, len)) |
4317 |
++ return -EINVAL; |
4318 |
++ |
4319 |
+ if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name)) |
4320 |
+ return -EMSGSIZE; |
4321 |
+ |
4322 |
+@@ -726,9 +762,14 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd, |
4323 |
+ struct tipc_link_config *lc; |
4324 |
+ struct tipc_bearer *bearer; |
4325 |
+ struct tipc_media *media; |
4326 |
++ int len; |
4327 |
+ |
4328 |
+ lc = (struct tipc_link_config *)TLV_DATA(msg->req); |
4329 |
+ |
4330 |
++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME); |
4331 |
++ if (!string_is_valid(lc->name, len)) |
4332 |
++ return -EINVAL; |
4333 |
++ |
4334 |
+ media = tipc_media_find(lc->name); |
4335 |
+ if (media) { |
4336 |
+ cmd->doit = &__tipc_nl_media_set; |
4337 |
+@@ -750,6 +791,7 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd, |
4338 |
+ { |
4339 |
+ char *name; |
4340 |
+ struct nlattr *link; |
4341 |
++ int len; |
4342 |
+ |
4343 |
+ name = (char *)TLV_DATA(msg->req); |
4344 |
+ |
4345 |
+@@ -757,6 +799,10 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd, |
4346 |
+ if (!link) |
4347 |
+ return -EMSGSIZE; |
4348 |
+ |
4349 |
++ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME); |
4350 |
++ if (!string_is_valid(name, len)) |
4351 |
++ return -EINVAL; |
4352 |
++ |
4353 |
+ if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name)) |
4354 |
+ return -EMSGSIZE; |
4355 |
+ |
4356 |
+@@ -778,6 +824,8 @@ static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg) |
4357 |
+ }; |
4358 |
+ |
4359 |
+ ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req); |
4360 |
++ if (TLV_GET_DATA_LEN(msg->req) < sizeof(struct tipc_name_table_query)) |
4361 |
++ return -EINVAL; |
4362 |
+ |
4363 |
+ depth = ntohl(ntq->depth); |
4364 |
+ |
4365 |
+@@ -1201,7 +1249,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info) |
4366 |
+ } |
4367 |
+ |
4368 |
+ len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN); |
4369 |
+- if (len && !TLV_OK(msg.req, len)) { |
4370 |
++ if (!len || !TLV_OK(msg.req, len)) { |
4371 |
+ msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED); |
4372 |
+ err = -EOPNOTSUPP; |
4373 |
+ goto send; |
4374 |
+diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c |
4375 |
+index efb16f69bd2c..a457c0fbbef1 100644 |
4376 |
+--- a/net/tipc/topsrv.c |
4377 |
++++ b/net/tipc/topsrv.c |
4378 |
+@@ -398,7 +398,7 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con) |
4379 |
+ ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT); |
4380 |
+ if (ret == -EWOULDBLOCK) |
4381 |
+ return -EWOULDBLOCK; |
4382 |
+- if (ret > 0) { |
4383 |
++ if (ret == sizeof(s)) { |
4384 |
+ read_lock_bh(&sk->sk_callback_lock); |
4385 |
+ ret = tipc_conn_rcv_sub(srv, con, &s); |
4386 |
+ read_unlock_bh(&sk->sk_callback_lock); |
4387 |
+diff --git a/security/security.c b/security/security.c |
4388 |
+index 04d173eb93f6..414a45d70c7b 100644 |
4389 |
+--- a/security/security.c |
4390 |
++++ b/security/security.c |
4391 |
+@@ -1014,6 +1014,13 @@ int security_cred_alloc_blank(struct cred *cred, gfp_t gfp) |
4392 |
+ |
4393 |
+ void security_cred_free(struct cred *cred) |
4394 |
+ { |
4395 |
++ /* |
4396 |
++ * There is a failure case in prepare_creds() that |
4397 |
++ * may result in a call here with ->security being NULL. |
4398 |
++ */ |
4399 |
++ if (unlikely(cred->security == NULL)) |
4400 |
++ return; |
4401 |
++ |
4402 |
+ call_void_hook(cred_free, cred); |
4403 |
+ } |
4404 |
+ |
4405 |
+diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c |
4406 |
+index b63ef865ce1e..d31a52e56b9e 100644 |
4407 |
+--- a/security/selinux/ss/policydb.c |
4408 |
++++ b/security/selinux/ss/policydb.c |
4409 |
+@@ -732,7 +732,8 @@ static int sens_destroy(void *key, void *datum, void *p) |
4410 |
+ kfree(key); |
4411 |
+ if (datum) { |
4412 |
+ levdatum = datum; |
4413 |
+- ebitmap_destroy(&levdatum->level->cat); |
4414 |
++ if (levdatum->level) |
4415 |
++ ebitmap_destroy(&levdatum->level->cat); |
4416 |
+ kfree(levdatum->level); |
4417 |
+ } |
4418 |
+ kfree(datum); |
4419 |
+diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c |
4420 |
+index ffda91a4a1aa..02514fe558b4 100644 |
4421 |
+--- a/security/yama/yama_lsm.c |
4422 |
++++ b/security/yama/yama_lsm.c |
4423 |
+@@ -368,7 +368,9 @@ static int yama_ptrace_access_check(struct task_struct *child, |
4424 |
+ break; |
4425 |
+ case YAMA_SCOPE_RELATIONAL: |
4426 |
+ rcu_read_lock(); |
4427 |
+- if (!task_is_descendant(current, child) && |
4428 |
++ if (!pid_alive(child)) |
4429 |
++ rc = -EPERM; |
4430 |
++ if (!rc && !task_is_descendant(current, child) && |
4431 |
+ !ptracer_exception_found(current, child) && |
4432 |
+ !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE)) |
4433 |
+ rc = -EPERM; |