Gentoo Archives: gentoo-commits

From: "Tom Wijsman (tomwij)" <tomwij@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] linux-patches r2500 - genpatches-2.6/trunk/3.10
Date: Fri, 30 Aug 2013 12:09:12
Message-Id: 20130830120906.11EB82004C@flycatcher.gentoo.org
1 Author: tomwij
2 Date: 2013-08-30 12:09:05 +0000 (Fri, 30 Aug 2013)
3 New Revision: 2500
4
5 Added:
6 genpatches-2.6/trunk/3.10/1009_linux-3.10.10.patch
7 Modified:
8 genpatches-2.6/trunk/3.10/0000_README
9 Log:
10 Linux patch 3.10.10.
11
12 Modified: genpatches-2.6/trunk/3.10/0000_README
13 ===================================================================
14 --- genpatches-2.6/trunk/3.10/0000_README 2013-08-29 13:09:44 UTC (rev 2499)
15 +++ genpatches-2.6/trunk/3.10/0000_README 2013-08-30 12:09:05 UTC (rev 2500)
16 @@ -75,6 +75,10 @@
17 From: http://www.kernel.org
18 Desc: Linux 3.10.9
19
20 +Patch: 1009_linux-3.10.10.patch
21 +From: http://www.kernel.org
22 +Desc: Linux 3.10.10
23 +
24 Patch: 1500_XATTR_USER_PREFIX.patch
25 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
26 Desc: Support for namespace user.pax.* on tmpfs.
27
28 Added: genpatches-2.6/trunk/3.10/1009_linux-3.10.10.patch
29 ===================================================================
30 --- genpatches-2.6/trunk/3.10/1009_linux-3.10.10.patch (rev 0)
31 +++ genpatches-2.6/trunk/3.10/1009_linux-3.10.10.patch 2013-08-30 12:09:05 UTC (rev 2500)
32 @@ -0,0 +1,2993 @@
33 +diff --git a/Makefile b/Makefile
34 +index 4b31d62..b119684 100644
35 +--- a/Makefile
36 ++++ b/Makefile
37 +@@ -1,6 +1,6 @@
38 + VERSION = 3
39 + PATCHLEVEL = 10
40 +-SUBLEVEL = 9
41 ++SUBLEVEL = 10
42 + EXTRAVERSION =
43 + NAME = TOSSUG Baby Fish
44 +
45 +diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
46 +index 6179de7..2046a89 100644
47 +--- a/arch/arc/include/asm/ptrace.h
48 ++++ b/arch/arc/include/asm/ptrace.h
49 +@@ -52,12 +52,14 @@ struct pt_regs {
50 +
51 + /*to distinguish bet excp, syscall, irq */
52 + union {
53 ++ struct {
54 + #ifdef CONFIG_CPU_BIG_ENDIAN
55 + /* so that assembly code is same for LE/BE */
56 + unsigned long orig_r8:16, event:16;
57 + #else
58 + unsigned long event:16, orig_r8:16;
59 + #endif
60 ++ };
61 + long orig_r8_word;
62 + };
63 + };
64 +diff --git a/arch/arc/include/asm/syscall.h b/arch/arc/include/asm/syscall.h
65 +index 33ab304..29de098 100644
66 +--- a/arch/arc/include/asm/syscall.h
67 ++++ b/arch/arc/include/asm/syscall.h
68 +@@ -18,7 +18,7 @@ static inline long
69 + syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
70 + {
71 + if (user_mode(regs) && in_syscall(regs))
72 +- return regs->orig_r8;
73 ++ return regs->r8;
74 + else
75 + return -1;
76 + }
77 +@@ -26,8 +26,7 @@ syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
78 + static inline void
79 + syscall_rollback(struct task_struct *task, struct pt_regs *regs)
80 + {
81 +- /* XXX: I can't fathom how pt_regs->r8 will be clobbered ? */
82 +- regs->r8 = regs->orig_r8;
83 ++ regs->r0 = regs->orig_r0;
84 + }
85 +
86 + static inline long
87 +diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
88 +index 0c6d664..6dbe359 100644
89 +--- a/arch/arc/kernel/entry.S
90 ++++ b/arch/arc/kernel/entry.S
91 +@@ -498,7 +498,7 @@ tracesys_exit:
92 + trap_with_param:
93 +
94 + ; stop_pc info by gdb needs this info
95 +- stw orig_r8_IS_BRKPT, [sp, PT_orig_r8]
96 ++ st orig_r8_IS_BRKPT, [sp, PT_orig_r8]
97 +
98 + mov r0, r12
99 + lr r1, [efa]
100 +@@ -723,7 +723,7 @@ not_exception:
101 + ; things to what they were, before returning from L2 context
102 + ;----------------------------------------------------------------
103 +
104 +- ldw r9, [sp, PT_orig_r8] ; get orig_r8 to make sure it is
105 ++ ld r9, [sp, PT_orig_r8] ; get orig_r8 to make sure it is
106 + brne r9, orig_r8_IS_IRQ2, 149f ; infact a L2 ISR ret path
107 +
108 + ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs)
109 +diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S
110 +index 99c1047..9c548c7 100644
111 +--- a/arch/arc/lib/strchr-700.S
112 ++++ b/arch/arc/lib/strchr-700.S
113 +@@ -39,9 +39,18 @@ ARC_ENTRY strchr
114 + ld.a r2,[r0,4]
115 + sub r12,r6,r7
116 + bic r12,r12,r6
117 ++#ifdef __LITTLE_ENDIAN__
118 + and r7,r12,r4
119 + breq r7,0,.Loop ; For speed, we want this branch to be unaligned.
120 + b .Lfound_char ; Likewise this one.
121 ++#else
122 ++ and r12,r12,r4
123 ++ breq r12,0,.Loop ; For speed, we want this branch to be unaligned.
124 ++ lsr_s r12,r12,7
125 ++ bic r2,r7,r6
126 ++ b.d .Lfound_char_b
127 ++ and_s r2,r2,r12
128 ++#endif
129 + ; /* We require this code address to be unaligned for speed... */
130 + .Laligned:
131 + ld_s r2,[r0]
132 +@@ -95,6 +104,7 @@ ARC_ENTRY strchr
133 + lsr r7,r7,7
134 +
135 + bic r2,r7,r6
136 ++.Lfound_char_b:
137 + norm r2,r2
138 + sub_s r0,r0,4
139 + asr_s r2,r2,3
140 +diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts
141 +index d30e48b..28ba798 100644
142 +--- a/arch/arm/boot/dts/at91sam9n12ek.dts
143 ++++ b/arch/arm/boot/dts/at91sam9n12ek.dts
144 +@@ -14,11 +14,11 @@
145 + compatible = "atmel,at91sam9n12ek", "atmel,at91sam9n12", "atmel,at91sam9";
146 +
147 + chosen {
148 +- bootargs = "mem=128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2";
149 ++ bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2";
150 + };
151 +
152 + memory {
153 +- reg = <0x20000000 0x10000000>;
154 ++ reg = <0x20000000 0x8000000>;
155 + };
156 +
157 + clocks {
158 +diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
159 +index 1145ac3..b5833d1f 100644
160 +--- a/arch/arm/boot/dts/at91sam9x5.dtsi
161 ++++ b/arch/arm/boot/dts/at91sam9x5.dtsi
162 +@@ -643,7 +643,7 @@
163 + };
164 +
165 + rtc@fffffeb0 {
166 +- compatible = "atmel,at91rm9200-rtc";
167 ++ compatible = "atmel,at91sam9x5-rtc";
168 + reg = <0xfffffeb0 0x40>;
169 + interrupts = <1 4 7>;
170 + status = "disabled";
171 +diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c
172 +index dff4ddc..139e42d 100644
173 +--- a/arch/arm/mach-davinci/board-dm355-leopard.c
174 ++++ b/arch/arm/mach-davinci/board-dm355-leopard.c
175 +@@ -75,6 +75,7 @@ static struct davinci_nand_pdata davinci_nand_data = {
176 + .parts = davinci_nand_partitions,
177 + .nr_parts = ARRAY_SIZE(davinci_nand_partitions),
178 + .ecc_mode = NAND_ECC_HW_SYNDROME,
179 ++ .ecc_bits = 4,
180 + .bbt_options = NAND_BBT_USE_FLASH,
181 + };
182 +
183 +diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
184 +index a33686a..fa4bfaf 100644
185 +--- a/arch/arm/mach-davinci/board-dm644x-evm.c
186 ++++ b/arch/arm/mach-davinci/board-dm644x-evm.c
187 +@@ -153,6 +153,7 @@ static struct davinci_nand_pdata davinci_evm_nandflash_data = {
188 + .parts = davinci_evm_nandflash_partition,
189 + .nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition),
190 + .ecc_mode = NAND_ECC_HW,
191 ++ .ecc_bits = 1,
192 + .bbt_options = NAND_BBT_USE_FLASH,
193 + .timing = &davinci_evm_nandflash_timing,
194 + };
195 +diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
196 +index fbb8e5a..0c005e8 100644
197 +--- a/arch/arm/mach-davinci/board-dm646x-evm.c
198 ++++ b/arch/arm/mach-davinci/board-dm646x-evm.c
199 +@@ -90,6 +90,7 @@ static struct davinci_nand_pdata davinci_nand_data = {
200 + .parts = davinci_nand_partitions,
201 + .nr_parts = ARRAY_SIZE(davinci_nand_partitions),
202 + .ecc_mode = NAND_ECC_HW,
203 ++ .ecc_bits = 1,
204 + .options = 0,
205 + };
206 +
207 +diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
208 +index 2bc112a..808233b 100644
209 +--- a/arch/arm/mach-davinci/board-neuros-osd2.c
210 ++++ b/arch/arm/mach-davinci/board-neuros-osd2.c
211 +@@ -88,6 +88,7 @@ static struct davinci_nand_pdata davinci_ntosd2_nandflash_data = {
212 + .parts = davinci_ntosd2_nandflash_partition,
213 + .nr_parts = ARRAY_SIZE(davinci_ntosd2_nandflash_partition),
214 + .ecc_mode = NAND_ECC_HW,
215 ++ .ecc_bits = 1,
216 + .bbt_options = NAND_BBT_USE_FLASH,
217 + };
218 +
219 +diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
220 +index 2950082..08c9fe9 100644
221 +--- a/arch/arm/mm/Kconfig
222 ++++ b/arch/arm/mm/Kconfig
223 +@@ -789,15 +789,18 @@ config KUSER_HELPERS
224 + the CPU type fitted to the system. This permits binaries to be
225 + run on ARMv4 through to ARMv7 without modification.
226 +
227 ++ See Documentation/arm/kernel_user_helpers.txt for details.
228 ++
229 + However, the fixed address nature of these helpers can be used
230 + by ROP (return orientated programming) authors when creating
231 + exploits.
232 +
233 + If all of the binaries and libraries which run on your platform
234 + are built specifically for your platform, and make no use of
235 +- these helpers, then you can turn this option off. However,
236 +- when such an binary or library is run, it will receive a SIGILL
237 +- signal, which will terminate the program.
238 ++ these helpers, then you can turn this option off to hinder
239 ++ such exploits. However, in that case, if a binary or library
240 ++ relying on those helpers is run, it will receive a SIGILL signal,
241 ++ which will terminate the program.
242 +
243 + Say N here only if you are absolutely certain that you do not
244 + need these helpers; otherwise, the safe option is to say Y.
245 +diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
246 +index 9ba33c4..12e6ccb 100644
247 +--- a/arch/arm64/kernel/perf_event.c
248 ++++ b/arch/arm64/kernel/perf_event.c
249 +@@ -107,7 +107,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
250 + static int
251 + armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
252 + {
253 +- int mapping = (*event_map)[config];
254 ++ int mapping;
255 ++
256 ++ if (config >= PERF_COUNT_HW_MAX)
257 ++ return -EINVAL;
258 ++
259 ++ mapping = (*event_map)[config];
260 + return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
261 + }
262 +
263 +@@ -317,6 +322,9 @@ validate_event(struct pmu_hw_events *hw_events,
264 + struct hw_perf_event fake_event = event->hw;
265 + struct pmu *leader_pmu = event->group_leader->pmu;
266 +
267 ++ if (is_software_event(event))
268 ++ return 1;
269 ++
270 + if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
271 + return 1;
272 +
273 +diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
274 +index c1c7c68..698fb82 100644
275 +--- a/arch/s390/kvm/kvm-s390.c
276 ++++ b/arch/s390/kvm/kvm-s390.c
277 +@@ -622,14 +622,25 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
278 + kvm_s390_deliver_pending_interrupts(vcpu);
279 +
280 + vcpu->arch.sie_block->icptcode = 0;
281 +- preempt_disable();
282 +- kvm_guest_enter();
283 +- preempt_enable();
284 + VCPU_EVENT(vcpu, 6, "entering sie flags %x",
285 + atomic_read(&vcpu->arch.sie_block->cpuflags));
286 + trace_kvm_s390_sie_enter(vcpu,
287 + atomic_read(&vcpu->arch.sie_block->cpuflags));
288 ++
289 ++ /*
290 ++ * As PF_VCPU will be used in fault handler, between guest_enter
291 ++ * and guest_exit should be no uaccess.
292 ++ */
293 ++ preempt_disable();
294 ++ kvm_guest_enter();
295 ++ preempt_enable();
296 + rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
297 ++ kvm_guest_exit();
298 ++
299 ++ VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
300 ++ vcpu->arch.sie_block->icptcode);
301 ++ trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
302 ++
303 + if (rc) {
304 + if (kvm_is_ucontrol(vcpu->kvm)) {
305 + rc = SIE_INTERCEPT_UCONTROL;
306 +@@ -639,10 +650,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
307 + rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
308 + }
309 + }
310 +- VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
311 +- vcpu->arch.sie_block->icptcode);
312 +- trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
313 +- kvm_guest_exit();
314 +
315 + memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
316 + return rc;
317 +diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
318 +index 653668d..4a8cb8d 100644
319 +--- a/arch/x86/include/asm/bootparam_utils.h
320 ++++ b/arch/x86/include/asm/bootparam_utils.h
321 +@@ -35,9 +35,9 @@ static void sanitize_boot_params(struct boot_params *boot_params)
322 + */
323 + if (boot_params->sentinel) {
324 + /* fields in boot_params are left uninitialized, clear them */
325 +- memset(&boot_params->olpc_ofw_header, 0,
326 ++ memset(&boot_params->ext_ramdisk_image, 0,
327 + (char *)&boot_params->efi_info -
328 +- (char *)&boot_params->olpc_ofw_header);
329 ++ (char *)&boot_params->ext_ramdisk_image);
330 + memset(&boot_params->kbd_status, 0,
331 + (char *)&boot_params->hdr -
332 + (char *)&boot_params->kbd_status);
333 +diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
334 +index 48f8375..30277e2 100644
335 +--- a/arch/x86/kernel/sys_x86_64.c
336 ++++ b/arch/x86/kernel/sys_x86_64.c
337 +@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
338 + *begin = new_begin;
339 + }
340 + } else {
341 +- *begin = mmap_legacy_base();
342 ++ *begin = current->mm->mmap_legacy_base;
343 + *end = TASK_SIZE;
344 + }
345 + }
346 +diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
347 +index c1af323..5c1ae28 100644
348 +--- a/arch/x86/mm/mmap.c
349 ++++ b/arch/x86/mm/mmap.c
350 +@@ -98,7 +98,7 @@ static unsigned long mmap_base(void)
351 + * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
352 + * does, but not when emulating X86_32
353 + */
354 +-unsigned long mmap_legacy_base(void)
355 ++static unsigned long mmap_legacy_base(void)
356 + {
357 + if (mmap_is_ia32())
358 + return TASK_UNMAPPED_BASE;
359 +@@ -112,12 +112,14 @@ unsigned long mmap_legacy_base(void)
360 + */
361 + void arch_pick_mmap_layout(struct mm_struct *mm)
362 + {
363 ++ mm->mmap_legacy_base = mmap_legacy_base();
364 ++ mm->mmap_base = mmap_base();
365 ++
366 + if (mmap_is_legacy()) {
367 +- mm->mmap_base = mmap_legacy_base();
368 ++ mm->mmap_base = mm->mmap_legacy_base;
369 + mm->get_unmapped_area = arch_get_unmapped_area;
370 + mm->unmap_area = arch_unmap_area;
371 + } else {
372 +- mm->mmap_base = mmap_base();
373 + mm->get_unmapped_area = arch_get_unmapped_area_topdown;
374 + mm->unmap_area = arch_unmap_area_topdown;
375 + }
376 +diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
377 +index 94eac5c..0a9fb7a 100644
378 +--- a/arch/x86/xen/setup.c
379 ++++ b/arch/x86/xen/setup.c
380 +@@ -313,6 +313,17 @@ static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
381 + e820_add_region(start, end - start, type);
382 + }
383 +
384 ++void xen_ignore_unusable(struct e820entry *list, size_t map_size)
385 ++{
386 ++ struct e820entry *entry;
387 ++ unsigned int i;
388 ++
389 ++ for (i = 0, entry = list; i < map_size; i++, entry++) {
390 ++ if (entry->type == E820_UNUSABLE)
391 ++ entry->type = E820_RAM;
392 ++ }
393 ++}
394 ++
395 + /**
396 + * machine_specific_memory_setup - Hook for machine specific memory setup.
397 + **/
398 +@@ -353,6 +364,17 @@ char * __init xen_memory_setup(void)
399 + }
400 + BUG_ON(rc);
401 +
402 ++ /*
403 ++ * Xen won't allow a 1:1 mapping to be created to UNUSABLE
404 ++ * regions, so if we're using the machine memory map leave the
405 ++ * region as RAM as it is in the pseudo-physical map.
406 ++ *
407 ++ * UNUSABLE regions in domUs are not handled and will need
408 ++ * a patch in the future.
409 ++ */
410 ++ if (xen_initial_domain())
411 ++ xen_ignore_unusable(map, memmap.nr_entries);
412 ++
413 + /* Make sure the Xen-supplied memory map is well-ordered. */
414 + sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
415 +
416 +diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
417 +index d99cae8..a1e58e1 100644
418 +--- a/arch/x86/xen/smp.c
419 ++++ b/arch/x86/xen/smp.c
420 +@@ -667,8 +667,15 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
421 + static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
422 + {
423 + int rc;
424 +- rc = native_cpu_up(cpu, tidle);
425 +- WARN_ON (xen_smp_intr_init(cpu));
426 ++ /*
427 ++ * xen_smp_intr_init() needs to run before native_cpu_up()
428 ++ * so that IPI vectors are set up on the booting CPU before
429 ++ * it is marked online in native_cpu_up().
430 ++ */
431 ++ rc = xen_smp_intr_init(cpu);
432 ++ WARN_ON(rc);
433 ++ if (!rc)
434 ++ rc = native_cpu_up(cpu, tidle);
435 + return rc;
436 + }
437 +
438 +diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
439 +index 40a84cc..2384120 100644
440 +--- a/drivers/acpi/glue.c
441 ++++ b/drivers/acpi/glue.c
442 +@@ -78,32 +78,99 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
443 + return ret;
444 + }
445 +
446 +-static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used,
447 +- void *addr_p, void **ret_p)
448 ++static acpi_status acpi_dev_present(acpi_handle handle, u32 lvl_not_used,
449 ++ void *not_used, void **ret_p)
450 + {
451 +- unsigned long long addr;
452 +- acpi_status status;
453 ++ struct acpi_device *adev = NULL;
454 +
455 +- status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr);
456 +- if (ACPI_SUCCESS(status) && addr == *((u64 *)addr_p)) {
457 ++ acpi_bus_get_device(handle, &adev);
458 ++ if (adev) {
459 + *ret_p = handle;
460 + return AE_CTRL_TERMINATE;
461 + }
462 + return AE_OK;
463 + }
464 +
465 +-acpi_handle acpi_get_child(acpi_handle parent, u64 address)
466 ++static bool acpi_extra_checks_passed(acpi_handle handle, bool is_bridge)
467 + {
468 +- void *ret = NULL;
469 ++ unsigned long long sta;
470 ++ acpi_status status;
471 +
472 +- if (!parent)
473 +- return NULL;
474 ++ status = acpi_bus_get_status_handle(handle, &sta);
475 ++ if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
476 ++ return false;
477 ++
478 ++ if (is_bridge) {
479 ++ void *test = NULL;
480 ++
481 ++ /* Check if this object has at least one child device. */
482 ++ acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
483 ++ acpi_dev_present, NULL, NULL, &test);
484 ++ return !!test;
485 ++ }
486 ++ return true;
487 ++}
488 ++
489 ++struct find_child_context {
490 ++ u64 addr;
491 ++ bool is_bridge;
492 ++ acpi_handle ret;
493 ++ bool ret_checked;
494 ++};
495 ++
496 ++static acpi_status do_find_child(acpi_handle handle, u32 lvl_not_used,
497 ++ void *data, void **not_used)
498 ++{
499 ++ struct find_child_context *context = data;
500 ++ unsigned long long addr;
501 ++ acpi_status status;
502 +
503 +- acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, NULL,
504 +- do_acpi_find_child, &address, &ret);
505 +- return (acpi_handle)ret;
506 ++ status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr);
507 ++ if (ACPI_FAILURE(status) || addr != context->addr)
508 ++ return AE_OK;
509 ++
510 ++ if (!context->ret) {
511 ++ /* This is the first matching object. Save its handle. */
512 ++ context->ret = handle;
513 ++ return AE_OK;
514 ++ }
515 ++ /*
516 ++ * There is more than one matching object with the same _ADR value.
517 ++ * That really is unexpected, so we are kind of beyond the scope of the
518 ++ * spec here. We have to choose which one to return, though.
519 ++ *
520 ++ * First, check if the previously found object is good enough and return
521 ++ * its handle if so. Second, check the same for the object that we've
522 ++ * just found.
523 ++ */
524 ++ if (!context->ret_checked) {
525 ++ if (acpi_extra_checks_passed(context->ret, context->is_bridge))
526 ++ return AE_CTRL_TERMINATE;
527 ++ else
528 ++ context->ret_checked = true;
529 ++ }
530 ++ if (acpi_extra_checks_passed(handle, context->is_bridge)) {
531 ++ context->ret = handle;
532 ++ return AE_CTRL_TERMINATE;
533 ++ }
534 ++ return AE_OK;
535 ++}
536 ++
537 ++acpi_handle acpi_find_child(acpi_handle parent, u64 addr, bool is_bridge)
538 ++{
539 ++ if (parent) {
540 ++ struct find_child_context context = {
541 ++ .addr = addr,
542 ++ .is_bridge = is_bridge,
543 ++ };
544 ++
545 ++ acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, do_find_child,
546 ++ NULL, &context, NULL);
547 ++ return context.ret;
548 ++ }
549 ++ return NULL;
550 + }
551 +-EXPORT_SYMBOL(acpi_get_child);
552 ++EXPORT_SYMBOL_GPL(acpi_find_child);
553 +
554 + static int acpi_bind_one(struct device *dev, acpi_handle handle)
555 + {
556 +diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
557 +index 1c41722..20fd337 100644
558 +--- a/drivers/ata/libata-pmp.c
559 ++++ b/drivers/ata/libata-pmp.c
560 +@@ -289,24 +289,24 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info)
561 +
562 + /* Disable sending Early R_OK.
563 + * With "cached read" HDD testing and multiple ports busy on a SATA
564 +- * host controller, 3726 PMP will very rarely drop a deferred
565 ++ * host controller, 3x26 PMP will very rarely drop a deferred
566 + * R_OK that was intended for the host. Symptom will be all
567 + * 5 drives under test will timeout, get reset, and recover.
568 + */
569 +- if (vendor == 0x1095 && devid == 0x3726) {
570 ++ if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
571 + u32 reg;
572 +
573 + err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, &reg);
574 + if (err_mask) {
575 + rc = -EIO;
576 +- reason = "failed to read Sil3726 Private Register";
577 ++ reason = "failed to read Sil3x26 Private Register";
578 + goto fail;
579 + }
580 + reg &= ~0x1;
581 + err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg);
582 + if (err_mask) {
583 + rc = -EIO;
584 +- reason = "failed to write Sil3726 Private Register";
585 ++ reason = "failed to write Sil3x26 Private Register";
586 + goto fail;
587 + }
588 + }
589 +@@ -383,8 +383,8 @@ static void sata_pmp_quirks(struct ata_port *ap)
590 + u16 devid = sata_pmp_gscr_devid(gscr);
591 + struct ata_link *link;
592 +
593 +- if (vendor == 0x1095 && devid == 0x3726) {
594 +- /* sil3726 quirks */
595 ++ if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
596 ++ /* sil3x26 quirks */
597 + ata_for_each_link(link, ap, EDGE) {
598 + /* link reports offline after LPM */
599 + link->flags |= ATA_LFLAG_NO_LPM;
600 +diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
601 +index d40e403..8401061 100644
602 +--- a/drivers/ata/sata_fsl.c
603 ++++ b/drivers/ata/sata_fsl.c
604 +@@ -293,6 +293,7 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
605 + {
606 + struct sata_fsl_host_priv *host_priv = host->private_data;
607 + void __iomem *hcr_base = host_priv->hcr_base;
608 ++ unsigned long flags;
609 +
610 + if (count > ICC_MAX_INT_COUNT_THRESHOLD)
611 + count = ICC_MAX_INT_COUNT_THRESHOLD;
612 +@@ -305,12 +306,12 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
613 + (count > ICC_MIN_INT_COUNT_THRESHOLD))
614 + ticks = ICC_SAFE_INT_TICKS;
615 +
616 +- spin_lock(&host->lock);
617 ++ spin_lock_irqsave(&host->lock, flags);
618 + iowrite32((count << 24 | ticks), hcr_base + ICC);
619 +
620 + intr_coalescing_count = count;
621 + intr_coalescing_ticks = ticks;
622 +- spin_unlock(&host->lock);
623 ++ spin_unlock_irqrestore(&host->lock, flags);
624 +
625 + DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n",
626 + intr_coalescing_count, intr_coalescing_ticks);
627 +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
628 +index 80b0a66..01f6c2c 100644
629 +--- a/drivers/gpu/drm/i915/i915_reg.h
630 ++++ b/drivers/gpu/drm/i915/i915_reg.h
631 +@@ -617,6 +617,8 @@
632 + will not assert AGPBUSY# and will only
633 + be delivered when out of C3. */
634 + #define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
635 ++#define INSTPM_TLB_INVALIDATE (1<<9)
636 ++#define INSTPM_SYNC_FLUSH (1<<5)
637 + #define ACTHD 0x020c8
638 + #define FW_BLC 0x020d8
639 + #define FW_BLC2 0x020dc
640 +diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
641 +index 1424f20..48fe23e 100644
642 +--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
643 ++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
644 +@@ -907,6 +907,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
645 +
646 + I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
647 + POSTING_READ(mmio);
648 ++
649 ++ /* Flush the TLB for this page */
650 ++ if (INTEL_INFO(dev)->gen >= 6) {
651 ++ u32 reg = RING_INSTPM(ring->mmio_base);
652 ++ I915_WRITE(reg,
653 ++ _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
654 ++ INSTPM_SYNC_FLUSH));
655 ++ if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
656 ++ 1000))
657 ++ DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
658 ++ ring->name);
659 ++ }
660 + }
661 +
662 + static int
663 +diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
664 +index bdd9d56..d4ff48c 100644
665 +--- a/drivers/gpu/drm/radeon/radeon.h
666 ++++ b/drivers/gpu/drm/radeon/radeon.h
667 +@@ -1764,7 +1764,7 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
668 + WREG32(reg, tmp_); \
669 + } while (0)
670 + #define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
671 +-#define WREG32_OR(reg, or) WREG32_P(reg, or, ~or)
672 ++#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
673 + #define WREG32_PLL_P(reg, val, mask) \
674 + do { \
675 + uint32_t tmp_ = RREG32_PLL(reg); \
676 +diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
677 +index 97002a0..f3ccf6d 100644
678 +--- a/drivers/gpu/drm/radeon/radeon_uvd.c
679 ++++ b/drivers/gpu/drm/radeon/radeon_uvd.c
680 +@@ -359,6 +359,14 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
681 + return -EINVAL;
682 + }
683 +
684 ++ if (bo->tbo.sync_obj) {
685 ++ r = radeon_fence_wait(bo->tbo.sync_obj, false);
686 ++ if (r) {
687 ++ DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
688 ++ return r;
689 ++ }
690 ++ }
691 ++
692 + r = radeon_bo_kmap(bo, &ptr);
693 + if (r)
694 + return r;
695 +diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
696 +index bcc68ec..f5e92cf 100644
697 +--- a/drivers/gpu/drm/radeon/rv770.c
698 ++++ b/drivers/gpu/drm/radeon/rv770.c
699 +@@ -744,10 +744,10 @@ static void rv770_init_golden_registers(struct radeon_device *rdev)
700 + (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
701 + radeon_program_register_sequence(rdev,
702 + rv730_golden_registers,
703 +- (const u32)ARRAY_SIZE(rv770_golden_registers));
704 ++ (const u32)ARRAY_SIZE(rv730_golden_registers));
705 + radeon_program_register_sequence(rdev,
706 + rv730_mgcg_init,
707 +- (const u32)ARRAY_SIZE(rv770_mgcg_init));
708 ++ (const u32)ARRAY_SIZE(rv730_mgcg_init));
709 + break;
710 + case CHIP_RV710:
711 + radeon_program_register_sequence(rdev,
712 +@@ -758,18 +758,18 @@ static void rv770_init_golden_registers(struct radeon_device *rdev)
713 + (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
714 + radeon_program_register_sequence(rdev,
715 + rv710_golden_registers,
716 +- (const u32)ARRAY_SIZE(rv770_golden_registers));
717 ++ (const u32)ARRAY_SIZE(rv710_golden_registers));
718 + radeon_program_register_sequence(rdev,
719 + rv710_mgcg_init,
720 +- (const u32)ARRAY_SIZE(rv770_mgcg_init));
721 ++ (const u32)ARRAY_SIZE(rv710_mgcg_init));
722 + break;
723 + case CHIP_RV740:
724 + radeon_program_register_sequence(rdev,
725 + rv740_golden_registers,
726 +- (const u32)ARRAY_SIZE(rv770_golden_registers));
727 ++ (const u32)ARRAY_SIZE(rv740_golden_registers));
728 + radeon_program_register_sequence(rdev,
729 + rv740_mgcg_init,
730 +- (const u32)ARRAY_SIZE(rv770_mgcg_init));
731 ++ (const u32)ARRAY_SIZE(rv740_mgcg_init));
732 + break;
733 + default:
734 + break;
735 +diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
736 +index 7b687a6..833c590 100644
737 +--- a/drivers/md/bcache/btree.c
738 ++++ b/drivers/md/bcache/btree.c
739 +@@ -326,10 +326,25 @@ static void do_btree_write(struct btree *b)
740 + i->csum = btree_csum_set(b, i);
741 +
742 + btree_bio_init(b);
743 +- b->bio->bi_rw = REQ_META|WRITE_SYNC;
744 ++ b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
745 + b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c);
746 + bch_bio_map(b->bio, i);
747 +
748 ++ /*
749 ++ * If we're appending to a leaf node, we don't technically need FUA -
750 ++ * this write just needs to be persisted before the next journal write,
751 ++ * which will be marked FLUSH|FUA.
752 ++ *
753 ++ * Similarly if we're writing a new btree root - the pointer is going to
754 ++ * be in the next journal entry.
755 ++ *
756 ++ * But if we're writing a new btree node (that isn't a root) or
757 ++ * appending to a non leaf btree node, we need either FUA or a flush
758 ++ * when we write the parent with the new pointer. FUA is cheaper than a
759 ++ * flush, and writes appending to leaf nodes aren't blocking anything so
760 ++ * just make all btree node writes FUA to keep things sane.
761 ++ */
762 ++
763 + bkey_copy(&k.key, &b->key);
764 + SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i));
765 +
766 +@@ -2142,6 +2157,9 @@ int bch_btree_insert(struct btree_op *op, struct cache_set *c)
767 + void bch_btree_set_root(struct btree *b)
768 + {
769 + unsigned i;
770 ++ struct closure cl;
771 ++
772 ++ closure_init_stack(&cl);
773 +
774 + BUG_ON(!b->written);
775 +
776 +@@ -2155,8 +2173,9 @@ void bch_btree_set_root(struct btree *b)
777 + b->c->root = b;
778 + __bkey_put(b->c, &b->key);
779 +
780 +- bch_journal_meta(b->c, NULL);
781 ++ bch_journal_meta(b->c, &cl);
782 + pr_debug("%s for %pf", pbtree(b), __builtin_return_address(0));
783 ++ closure_sync(&cl);
784 + }
785 +
786 + /* Cache lookup */
787 +diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
788 +index 48efd4d..d285cd4 100644
789 +--- a/drivers/md/bcache/io.c
790 ++++ b/drivers/md/bcache/io.c
791 +@@ -97,6 +97,8 @@ struct bio *bch_bio_split(struct bio *bio, int sectors,
792 +
793 + if (bio->bi_rw & REQ_DISCARD) {
794 + ret = bio_alloc_bioset(gfp, 1, bs);
795 ++ if (!ret)
796 ++ return NULL;
797 + idx = 0;
798 + goto out;
799 + }
800 +diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
801 +index 8a54d3b..b49abb2 100644
802 +--- a/drivers/md/bcache/journal.c
803 ++++ b/drivers/md/bcache/journal.c
804 +@@ -622,7 +622,7 @@ static void journal_write_unlocked(struct closure *cl)
805 + bio_reset(bio);
806 + bio->bi_sector = PTR_OFFSET(k, i);
807 + bio->bi_bdev = ca->bdev;
808 +- bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH;
809 ++ bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
810 + bio->bi_size = sectors << 9;
811 +
812 + bio->bi_end_io = journal_write_endio;
813 +diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
814 +index 2f36743..afb9a99 100644
815 +--- a/drivers/md/bcache/request.c
816 ++++ b/drivers/md/bcache/request.c
817 +@@ -1053,9 +1053,20 @@ static void request_write(struct cached_dev *dc, struct search *s)
818 + trace_bcache_writethrough(s->orig_bio);
819 + closure_bio_submit(bio, cl, s->d);
820 + } else {
821 +- s->op.cache_bio = bio;
822 + trace_bcache_writeback(s->orig_bio);
823 + bch_writeback_add(dc, bio_sectors(bio));
824 ++
825 ++ if (s->op.flush_journal) {
826 ++ /* Also need to send a flush to the backing device */
827 ++ s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
828 ++ dc->disk.bio_split);
829 ++
830 ++ bio->bi_size = 0;
831 ++ bio->bi_vcnt = 0;
832 ++ closure_bio_submit(bio, cl, s->d);
833 ++ } else {
834 ++ s->op.cache_bio = bio;
835 ++ }
836 + }
837 + out:
838 + closure_call(&s->op.cl, bch_insert_data, NULL, cl);
839 +diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
840 +index 822170f..700fe55 100644
841 +--- a/drivers/misc/mei/hw-me.c
842 ++++ b/drivers/misc/mei/hw-me.c
843 +@@ -238,14 +238,18 @@ static int mei_me_hw_ready_wait(struct mei_device *dev)
844 + if (mei_me_hw_is_ready(dev))
845 + return 0;
846 +
847 ++ dev->recvd_hw_ready = false;
848 + mutex_unlock(&dev->device_lock);
849 + err = wait_event_interruptible_timeout(dev->wait_hw_ready,
850 +- dev->recvd_hw_ready, MEI_INTEROP_TIMEOUT);
851 ++ dev->recvd_hw_ready,
852 ++ mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT));
853 + mutex_lock(&dev->device_lock);
854 + if (!err && !dev->recvd_hw_ready) {
855 ++ if (!err)
856 ++ err = -ETIMEDOUT;
857 + dev_err(&dev->pdev->dev,
858 +- "wait hw ready failed. status = 0x%x\n", err);
859 +- return -ETIMEDOUT;
860 ++ "wait hw ready failed. status = %d\n", err);
861 ++ return err;
862 + }
863 +
864 + dev->recvd_hw_ready = false;
865 +@@ -482,7 +486,9 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
866 + /* check if ME wants a reset */
867 + if (!mei_hw_is_ready(dev) &&
868 + dev->dev_state != MEI_DEV_RESETTING &&
869 +- dev->dev_state != MEI_DEV_INITIALIZING) {
870 ++ dev->dev_state != MEI_DEV_INITIALIZING &&
871 ++ dev->dev_state != MEI_DEV_POWER_DOWN &&
872 ++ dev->dev_state != MEI_DEV_POWER_UP) {
873 + dev_dbg(&dev->pdev->dev, "FW not ready.\n");
874 + mei_reset(dev, 1);
875 + mutex_unlock(&dev->device_lock);
876 +diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
877 +index f580d30..6eec689 100644
878 +--- a/drivers/misc/mei/init.c
879 ++++ b/drivers/misc/mei/init.c
880 +@@ -143,7 +143,8 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
881 +
882 + dev->hbm_state = MEI_HBM_IDLE;
883 +
884 +- if (dev->dev_state != MEI_DEV_INITIALIZING) {
885 ++ if (dev->dev_state != MEI_DEV_INITIALIZING &&
886 ++ dev->dev_state != MEI_DEV_POWER_UP) {
887 + if (dev->dev_state != MEI_DEV_DISABLED &&
888 + dev->dev_state != MEI_DEV_POWER_DOWN)
889 + dev->dev_state = MEI_DEV_RESETTING;
890 +diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
891 +index ac07473..e509030 100644
892 +--- a/drivers/net/wireless/hostap/hostap_ioctl.c
893 ++++ b/drivers/net/wireless/hostap/hostap_ioctl.c
894 +@@ -523,9 +523,9 @@ static int prism2_ioctl_giwaplist(struct net_device *dev,
895 +
896 + data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1);
897 +
898 +- memcpy(extra, &addr, sizeof(struct sockaddr) * data->length);
899 ++ memcpy(extra, addr, sizeof(struct sockaddr) * data->length);
900 + data->flags = 1; /* has quality information */
901 +- memcpy(extra + sizeof(struct sockaddr) * data->length, &qual,
902 ++ memcpy(extra + sizeof(struct sockaddr) * data->length, qual,
903 + sizeof(struct iw_quality) * data->length);
904 +
905 + kfree(addr);
906 +diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
907 +index cab23af..e04f3da 100644
908 +--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
909 ++++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
910 +@@ -1059,7 +1059,10 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
911 + if (test_bit(STATUS_EXIT_PENDING, &priv->status))
912 + return;
913 +
914 +- if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
915 ++ if (!test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
916 ++ return;
917 ++
918 ++ if (ctx->vif)
919 + ieee80211_chswitch_done(ctx->vif, is_success);
920 + }
921 +
922 +diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
923 +index 50263e8..dc94d44 100644
924 +--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
925 ++++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
926 +@@ -67,16 +67,16 @@
927 + #include "iwl-agn-hw.h"
928 +
929 + /* Highest firmware API version supported */
930 +-#define IWL7260_UCODE_API_MAX 6
931 +-#define IWL3160_UCODE_API_MAX 6
932 ++#define IWL7260_UCODE_API_MAX 7
933 ++#define IWL3160_UCODE_API_MAX 7
934 +
935 + /* Oldest version we won't warn about */
936 +-#define IWL7260_UCODE_API_OK 6
937 +-#define IWL3160_UCODE_API_OK 6
938 ++#define IWL7260_UCODE_API_OK 7
939 ++#define IWL3160_UCODE_API_OK 7
940 +
941 + /* Lowest firmware API version supported */
942 +-#define IWL7260_UCODE_API_MIN 6
943 +-#define IWL3160_UCODE_API_MIN 6
944 ++#define IWL7260_UCODE_API_MIN 7
945 ++#define IWL3160_UCODE_API_MIN 7
946 +
947 + /* NVM versions */
948 + #define IWL7260_NVM_VERSION 0x0a1d
949 +diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
950 +index 51e015d..6f8b2c1 100644
951 +--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
952 ++++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
953 +@@ -75,13 +75,15 @@ enum iwl_d3_wakeup_flags {
954 + * struct iwl_d3_manager_config - D3 manager configuration command
955 + * @min_sleep_time: minimum sleep time (in usec)
956 + * @wakeup_flags: wakeup flags, see &enum iwl_d3_wakeup_flags
957 ++ * @wakeup_host_timer: force wakeup after this many seconds
958 + *
959 + * The structure is used for the D3_CONFIG_CMD command.
960 + */
961 + struct iwl_d3_manager_config {
962 + __le32 min_sleep_time;
963 + __le32 wakeup_flags;
964 +-} __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_3 */
965 ++ __le32 wakeup_host_timer;
966 ++} __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_4 */
967 +
968 +
969 + /* TODO: OFFLOADS_QUERY_API_S_VER_1 */
970 +diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
971 +index d68640e..98b1feb 100644
972 +--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
973 ++++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
974 +@@ -71,7 +71,13 @@
975 + #define MAC_INDEX_MIN_DRIVER 0
976 + #define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX
977 +
978 +-#define AC_NUM 4 /* Number of access categories */
979 ++enum iwl_ac {
980 ++ AC_BK,
981 ++ AC_BE,
982 ++ AC_VI,
983 ++ AC_VO,
984 ++ AC_NUM,
985 ++};
986 +
987 + /**
988 + * enum iwl_mac_protection_flags - MAC context flags
989 +diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
990 +index b2cc3d9..d8e858c 100644
991 +--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
992 ++++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
993 +@@ -193,14 +193,11 @@ static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
994 + u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm,
995 + struct ieee80211_vif *vif)
996 + {
997 +- u32 qmask, ac;
998 ++ u32 qmask = 0, ac;
999 +
1000 + if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
1001 + return BIT(IWL_MVM_OFFCHANNEL_QUEUE);
1002 +
1003 +- qmask = (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE) ?
1004 +- BIT(vif->cab_queue) : 0;
1005 +-
1006 + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
1007 + if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
1008 + qmask |= BIT(vif->hw_queue[ac]);
1009 +@@ -362,7 +359,7 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1010 + break;
1011 + case NL80211_IFTYPE_AP:
1012 + iwl_trans_ac_txq_enable(mvm->trans, vif->cab_queue,
1013 +- IWL_MVM_TX_FIFO_VO);
1014 ++ IWL_MVM_TX_FIFO_MCAST);
1015 + /* fall through */
1016 + default:
1017 + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
1018 +@@ -550,6 +547,10 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
1019 + cmd->ac[i].fifos_mask = BIT(iwl_mvm_ac_to_tx_fifo[i]);
1020 + }
1021 +
1022 ++ /* in AP mode, the MCAST FIFO takes the EDCA params from VO */
1023 ++ if (vif->type == NL80211_IFTYPE_AP)
1024 ++ cmd->ac[AC_VO].fifos_mask |= BIT(IWL_MVM_TX_FIFO_MCAST);
1025 ++
1026 + if (vif->bss_conf.qos)
1027 + cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
1028 +
1029 +diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
1030 +index b7e95b0..f7545e0 100644
1031 +--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
1032 ++++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
1033 +@@ -243,7 +243,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
1034 + if (ret)
1035 + return ret;
1036 +
1037 +- return ieee80211_register_hw(mvm->hw);
1038 ++ ret = ieee80211_register_hw(mvm->hw);
1039 ++ if (ret)
1040 ++ iwl_mvm_leds_exit(mvm);
1041 ++
1042 ++ return ret;
1043 + }
1044 +
1045 + static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
1046 +diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
1047 +index 9f46b23..8086231 100644
1048 +--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
1049 ++++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
1050 +@@ -88,6 +88,7 @@ enum iwl_mvm_tx_fifo {
1051 + IWL_MVM_TX_FIFO_BE,
1052 + IWL_MVM_TX_FIFO_VI,
1053 + IWL_MVM_TX_FIFO_VO,
1054 ++ IWL_MVM_TX_FIFO_MCAST = 5,
1055 + };
1056 +
1057 + extern struct ieee80211_ops iwl_mvm_hw_ops;
1058 +diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
1059 +index 736b50b..68f0bbe 100644
1060 +--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
1061 ++++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
1062 +@@ -226,9 +226,6 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1063 + if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
1064 + mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
1065 +
1066 +- if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE)
1067 +- mvm_sta->tfd_queue_msk |= BIT(vif->cab_queue);
1068 +-
1069 + /* for HW restart - need to reset the seq_number etc... */
1070 + memset(mvm_sta->tid_data, 0, sizeof(mvm_sta->tid_data));
1071 +
1072 +@@ -1296,17 +1293,11 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
1073 + struct iwl_mvm_add_sta_cmd cmd = {
1074 + .add_modify = STA_MODE_MODIFY,
1075 + .sta_id = mvmsta->sta_id,
1076 +- .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
1077 +- .sleep_state_flags = cpu_to_le16(STA_SLEEP_STATE_AWAKE),
1078 ++ .station_flags_msk = cpu_to_le32(STA_FLG_PS),
1079 + .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
1080 + };
1081 + int ret;
1082 +
1083 +- /*
1084 +- * Same modify mask for sleep_tx_count and sleep_state_flags but this
1085 +- * should be fine since if we set the STA as "awake", then
1086 +- * sleep_tx_count is not relevant.
1087 +- */
1088 + ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
1089 + if (ret)
1090 + IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
1091 +diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
1092 +index 48c1891..a2e6112e 100644
1093 +--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
1094 ++++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
1095 +@@ -175,7 +175,7 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
1096 + * table is controlled by LINK_QUALITY commands
1097 + */
1098 +
1099 +- if (ieee80211_is_data(fc)) {
1100 ++ if (ieee80211_is_data(fc) && sta) {
1101 + tx_cmd->initial_rate_index = 0;
1102 + tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
1103 + return;
1104 +@@ -610,8 +610,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
1105 + !(info->flags & IEEE80211_TX_STAT_ACK))
1106 + info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1107 +
1108 +- /* W/A FW bug: seq_ctl is wrong when the queue is flushed */
1109 +- if (status == TX_STATUS_FAIL_FIFO_FLUSHED) {
1110 ++ /* W/A FW bug: seq_ctl is wrong when the status isn't success */
1111 ++ if (status != TX_STATUS_SUCCESS) {
1112 + struct ieee80211_hdr *hdr = (void *)skb->data;
1113 + seq_ctl = le16_to_cpu(hdr->seq_ctrl);
1114 + }
1115 +diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
1116 +index 50ba0a4..aeb70e1 100644
1117 +--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
1118 ++++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
1119 +@@ -1481,16 +1481,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1120 + spin_lock_init(&trans_pcie->reg_lock);
1121 + init_waitqueue_head(&trans_pcie->ucode_write_waitq);
1122 +
1123 +- /* W/A - seems to solve weird behavior. We need to remove this if we
1124 +- * don't want to stay in L1 all the time. This wastes a lot of power */
1125 +- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
1126 +- PCIE_LINK_STATE_CLKPM);
1127 +-
1128 + if (pci_enable_device(pdev)) {
1129 + err = -ENODEV;
1130 + goto out_no_pci;
1131 + }
1132 +
1133 ++ /* W/A - seems to solve weird behavior. We need to remove this if we
1134 ++ * don't want to stay in L1 all the time. This wastes a lot of power */
1135 ++ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
1136 ++ PCIE_LINK_STATE_CLKPM);
1137 ++
1138 + pci_set_master(pdev);
1139 +
1140 + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
1141 +diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
1142 +index 4941f20..b8ba1f9 100644
1143 +--- a/drivers/net/wireless/zd1201.c
1144 ++++ b/drivers/net/wireless/zd1201.c
1145 +@@ -98,10 +98,12 @@ static int zd1201_fw_upload(struct usb_device *dev, int apfw)
1146 + goto exit;
1147 +
1148 + err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4,
1149 +- USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT);
1150 ++ USB_DIR_IN | 0x40, 0, 0, buf, sizeof(ret), ZD1201_FW_TIMEOUT);
1151 + if (err < 0)
1152 + goto exit;
1153 +
1154 ++ memcpy(&ret, buf, sizeof(ret));
1155 ++
1156 + if (ret & 0x80) {
1157 + err = -EIO;
1158 + goto exit;
1159 +diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
1160 +index 808be06..1187737 100644
1161 +--- a/drivers/of/fdt.c
1162 ++++ b/drivers/of/fdt.c
1163 +@@ -392,6 +392,8 @@ static void __unflatten_device_tree(struct boot_param_header *blob,
1164 + mem = (unsigned long)
1165 + dt_alloc(size + 4, __alignof__(struct device_node));
1166 +
1167 ++ memset((void *)mem, 0, size);
1168 ++
1169 + ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef);
1170 +
1171 + pr_debug(" unflattening %lx...\n", mem);
1172 +diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
1173 +index e4b1fb2..336b3f9 100644
1174 +--- a/drivers/pci/pci-acpi.c
1175 ++++ b/drivers/pci/pci-acpi.c
1176 +@@ -317,13 +317,20 @@ void acpi_pci_remove_bus(struct pci_bus *bus)
1177 + /* ACPI bus type */
1178 + static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
1179 + {
1180 +- struct pci_dev * pci_dev;
1181 +- u64 addr;
1182 ++ struct pci_dev *pci_dev = to_pci_dev(dev);
1183 ++ bool is_bridge;
1184 ++ u64 addr;
1185 +
1186 +- pci_dev = to_pci_dev(dev);
1187 ++ /*
1188 ++ * pci_is_bridge() is not suitable here, because pci_dev->subordinate
1189 ++ * is set only after acpi_pci_find_device() has been called for the
1190 ++ * given device.
1191 ++ */
1192 ++ is_bridge = pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE
1193 ++ || pci_dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
1194 + /* Please ref to ACPI spec for the syntax of _ADR */
1195 + addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
1196 +- *handle = acpi_get_child(DEVICE_ACPI_HANDLE(dev->parent), addr);
1197 ++ *handle = acpi_find_child(ACPI_HANDLE(dev->parent), addr, is_bridge);
1198 + if (!*handle)
1199 + return -ENODEV;
1200 + return 0;
1201 +diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c
1202 +index 0f9f859..f911952 100644
1203 +--- a/drivers/platform/olpc/olpc-ec.c
1204 ++++ b/drivers/platform/olpc/olpc-ec.c
1205 +@@ -330,7 +330,7 @@ static int __init olpc_ec_init_module(void)
1206 + return platform_driver_register(&olpc_ec_plat_driver);
1207 + }
1208 +
1209 +-module_init(olpc_ec_init_module);
1210 ++arch_initcall(olpc_ec_init_module);
1211 +
1212 + MODULE_AUTHOR("Andres Salomon <dilinger@××××××.net>");
1213 + MODULE_LICENSE("GPL");
1214 +diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
1215 +index 4133ab6..8e8f353 100644
1216 +--- a/drivers/s390/scsi/zfcp_erp.c
1217 ++++ b/drivers/s390/scsi/zfcp_erp.c
1218 +@@ -102,10 +102,13 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
1219 +
1220 + if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
1221 + zfcp_erp_action_dismiss(&port->erp_action);
1222 +- else
1223 +- shost_for_each_device(sdev, port->adapter->scsi_host)
1224 ++ else {
1225 ++ spin_lock(port->adapter->scsi_host->host_lock);
1226 ++ __shost_for_each_device(sdev, port->adapter->scsi_host)
1227 + if (sdev_to_zfcp(sdev)->port == port)
1228 + zfcp_erp_action_dismiss_lun(sdev);
1229 ++ spin_unlock(port->adapter->scsi_host->host_lock);
1230 ++ }
1231 + }
1232 +
1233 + static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
1234 +@@ -592,9 +595,11 @@ static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
1235 + {
1236 + struct scsi_device *sdev;
1237 +
1238 +- shost_for_each_device(sdev, port->adapter->scsi_host)
1239 ++ spin_lock(port->adapter->scsi_host->host_lock);
1240 ++ __shost_for_each_device(sdev, port->adapter->scsi_host)
1241 + if (sdev_to_zfcp(sdev)->port == port)
1242 + _zfcp_erp_lun_reopen(sdev, clear, id, 0);
1243 ++ spin_unlock(port->adapter->scsi_host->host_lock);
1244 + }
1245 +
1246 + static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
1247 +@@ -1435,8 +1440,10 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
1248 + atomic_set_mask(common_mask, &port->status);
1249 + read_unlock_irqrestore(&adapter->port_list_lock, flags);
1250 +
1251 +- shost_for_each_device(sdev, adapter->scsi_host)
1252 ++ spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
1253 ++ __shost_for_each_device(sdev, adapter->scsi_host)
1254 + atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
1255 ++ spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
1256 + }
1257 +
1258 + /**
1259 +@@ -1470,11 +1477,13 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
1260 + }
1261 + read_unlock_irqrestore(&adapter->port_list_lock, flags);
1262 +
1263 +- shost_for_each_device(sdev, adapter->scsi_host) {
1264 ++ spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
1265 ++ __shost_for_each_device(sdev, adapter->scsi_host) {
1266 + atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
1267 + if (clear_counter)
1268 + atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
1269 + }
1270 ++ spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
1271 + }
1272 +
1273 + /**
1274 +@@ -1488,16 +1497,19 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
1275 + {
1276 + struct scsi_device *sdev;
1277 + u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1278 ++ unsigned long flags;
1279 +
1280 + atomic_set_mask(mask, &port->status);
1281 +
1282 + if (!common_mask)
1283 + return;
1284 +
1285 +- shost_for_each_device(sdev, port->adapter->scsi_host)
1286 ++ spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
1287 ++ __shost_for_each_device(sdev, port->adapter->scsi_host)
1288 + if (sdev_to_zfcp(sdev)->port == port)
1289 + atomic_set_mask(common_mask,
1290 + &sdev_to_zfcp(sdev)->status);
1291 ++ spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
1292 + }
1293 +
1294 + /**
1295 +@@ -1512,6 +1524,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
1296 + struct scsi_device *sdev;
1297 + u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1298 + u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
1299 ++ unsigned long flags;
1300 +
1301 + atomic_clear_mask(mask, &port->status);
1302 +
1303 +@@ -1521,13 +1534,15 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
1304 + if (clear_counter)
1305 + atomic_set(&port->erp_counter, 0);
1306 +
1307 +- shost_for_each_device(sdev, port->adapter->scsi_host)
1308 ++ spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
1309 ++ __shost_for_each_device(sdev, port->adapter->scsi_host)
1310 + if (sdev_to_zfcp(sdev)->port == port) {
1311 + atomic_clear_mask(common_mask,
1312 + &sdev_to_zfcp(sdev)->status);
1313 + if (clear_counter)
1314 + atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
1315 + }
1316 ++ spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
1317 + }
1318 +
1319 + /**
1320 +diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
1321 +index 665e3cf..de0598e 100644
1322 +--- a/drivers/s390/scsi/zfcp_qdio.c
1323 ++++ b/drivers/s390/scsi/zfcp_qdio.c
1324 +@@ -224,11 +224,9 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
1325 +
1326 + static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
1327 + {
1328 +- spin_lock_irq(&qdio->req_q_lock);
1329 + if (atomic_read(&qdio->req_q_free) ||
1330 + !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
1331 + return 1;
1332 +- spin_unlock_irq(&qdio->req_q_lock);
1333 + return 0;
1334 + }
1335 +
1336 +@@ -246,9 +244,8 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
1337 + {
1338 + long ret;
1339 +
1340 +- spin_unlock_irq(&qdio->req_q_lock);
1341 +- ret = wait_event_interruptible_timeout(qdio->req_q_wq,
1342 +- zfcp_qdio_sbal_check(qdio), 5 * HZ);
1343 ++ ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
1344 ++ zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
1345 +
1346 + if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
1347 + return -EIO;
1348 +@@ -262,7 +259,6 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
1349 + zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
1350 + }
1351 +
1352 +- spin_lock_irq(&qdio->req_q_lock);
1353 + return -EIO;
1354 + }
1355 +
1356 +diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
1357 +index 86af29f..1348fa4 100644
1358 +--- a/drivers/scsi/Kconfig
1359 ++++ b/drivers/scsi/Kconfig
1360 +@@ -1353,7 +1353,6 @@ config SCSI_LPFC
1361 + tristate "Emulex LightPulse Fibre Channel Support"
1362 + depends on PCI && SCSI
1363 + select SCSI_FC_ATTRS
1364 +- select GENERIC_CSUM
1365 + select CRC_T10DIF
1366 + help
1367 + This lpfc driver supports the Emulex LightPulse
1368 +diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
1369 +index 06d190f..4a2b042 100644
1370 +--- a/drivers/staging/comedi/drivers.c
1371 ++++ b/drivers/staging/comedi/drivers.c
1372 +@@ -464,7 +464,7 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
1373 + ret = comedi_device_postconfig(dev);
1374 + if (ret < 0) {
1375 + comedi_device_detach(dev);
1376 +- module_put(dev->driver->module);
1377 ++ module_put(driv->module);
1378 + }
1379 + /* On success, the driver module count has been incremented. */
1380 + return ret;
1381 +diff --git a/drivers/usb/phy/phy-fsl-usb.h b/drivers/usb/phy/phy-fsl-usb.h
1382 +index ca26628..e1859b8 100644
1383 +--- a/drivers/usb/phy/phy-fsl-usb.h
1384 ++++ b/drivers/usb/phy/phy-fsl-usb.h
1385 +@@ -15,7 +15,7 @@
1386 + * 675 Mass Ave, Cambridge, MA 02139, USA.
1387 + */
1388 +
1389 +-#include "otg_fsm.h"
1390 ++#include "phy-fsm-usb.h"
1391 + #include <linux/usb/otg.h>
1392 + #include <linux/ioctl.h>
1393 +
1394 +diff --git a/drivers/usb/phy/phy-fsm-usb.c b/drivers/usb/phy/phy-fsm-usb.c
1395 +index c520b35..7f45966 100644
1396 +--- a/drivers/usb/phy/phy-fsm-usb.c
1397 ++++ b/drivers/usb/phy/phy-fsm-usb.c
1398 +@@ -29,7 +29,7 @@
1399 + #include <linux/usb/gadget.h>
1400 + #include <linux/usb/otg.h>
1401 +
1402 +-#include "phy-otg-fsm.h"
1403 ++#include "phy-fsm-usb.h"
1404 +
1405 + /* Change USB protocol when there is a protocol change */
1406 + static int otg_set_protocol(struct otg_fsm *fsm, int protocol)
1407 +diff --git a/drivers/xen/events.c b/drivers/xen/events.c
1408 +index 6a6bbe4..1faa130 100644
1409 +--- a/drivers/xen/events.c
1410 ++++ b/drivers/xen/events.c
1411 +@@ -346,7 +346,7 @@ static void init_evtchn_cpu_bindings(void)
1412 +
1413 + for_each_possible_cpu(i)
1414 + memset(per_cpu(cpu_evtchn_mask, i),
1415 +- (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
1416 ++ (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8);
1417 + }
1418 +
1419 + static inline void clear_evtchn(int port)
1420 +@@ -1492,8 +1492,10 @@ void rebind_evtchn_irq(int evtchn, int irq)
1421 + /* Rebind an evtchn so that it gets delivered to a specific cpu */
1422 + static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1423 + {
1424 ++ struct shared_info *s = HYPERVISOR_shared_info;
1425 + struct evtchn_bind_vcpu bind_vcpu;
1426 + int evtchn = evtchn_from_irq(irq);
1427 ++ int masked;
1428 +
1429 + if (!VALID_EVTCHN(evtchn))
1430 + return -1;
1431 +@@ -1510,6 +1512,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1432 + bind_vcpu.vcpu = tcpu;
1433 +
1434 + /*
1435 ++ * Mask the event while changing the VCPU binding to prevent
1436 ++ * it being delivered on an unexpected VCPU.
1437 ++ */
1438 ++ masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask));
1439 ++
1440 ++ /*
1441 + * If this fails, it usually just indicates that we're dealing with a
1442 + * virq or IPI channel, which don't actually need to be rebound. Ignore
1443 + * it, but don't do the xenlinux-level rebind in that case.
1444 +@@ -1517,6 +1525,9 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1445 + if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1446 + bind_evtchn_to_cpu(evtchn, tcpu);
1447 +
1448 ++ if (!masked)
1449 ++ unmask_evtchn(evtchn);
1450 ++
1451 + return 0;
1452 + }
1453 +
1454 +diff --git a/fs/bio.c b/fs/bio.c
1455 +index 94bbc04..c5eae72 100644
1456 +--- a/fs/bio.c
1457 ++++ b/fs/bio.c
1458 +@@ -1045,12 +1045,22 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
1459 + int bio_uncopy_user(struct bio *bio)
1460 + {
1461 + struct bio_map_data *bmd = bio->bi_private;
1462 +- int ret = 0;
1463 ++ struct bio_vec *bvec;
1464 ++ int ret = 0, i;
1465 +
1466 +- if (!bio_flagged(bio, BIO_NULL_MAPPED))
1467 +- ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
1468 +- bmd->nr_sgvecs, bio_data_dir(bio) == READ,
1469 +- 0, bmd->is_our_pages);
1470 ++ if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1471 ++ /*
1472 ++ * if we're in a workqueue, the request is orphaned, so
1473 ++ * don't copy into a random user address space, just free.
1474 ++ */
1475 ++ if (current->mm)
1476 ++ ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
1477 ++ bmd->nr_sgvecs, bio_data_dir(bio) == READ,
1478 ++ 0, bmd->is_our_pages);
1479 ++ else if (bmd->is_our_pages)
1480 ++ bio_for_each_segment_all(bvec, bio, i)
1481 ++ __free_page(bvec->bv_page);
1482 ++ }
1483 + bio_free_map_data(bmd);
1484 + bio_put(bio);
1485 + return ret;
1486 +diff --git a/fs/namespace.c b/fs/namespace.c
1487 +index 7b1ca9b..a45ba4f 100644
1488 +--- a/fs/namespace.c
1489 ++++ b/fs/namespace.c
1490 +@@ -1429,7 +1429,7 @@ struct vfsmount *collect_mounts(struct path *path)
1491 + CL_COPY_ALL | CL_PRIVATE);
1492 + namespace_unlock();
1493 + if (IS_ERR(tree))
1494 +- return NULL;
1495 ++ return ERR_CAST(tree);
1496 + return &tree->mnt;
1497 + }
1498 +
1499 +diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
1500 +index dc9a913..2d8be51 100644
1501 +--- a/fs/nilfs2/segbuf.c
1502 ++++ b/fs/nilfs2/segbuf.c
1503 +@@ -345,8 +345,7 @@ static void nilfs_end_bio_write(struct bio *bio, int err)
1504 +
1505 + if (err == -EOPNOTSUPP) {
1506 + set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
1507 +- bio_put(bio);
1508 +- /* to be detected by submit_seg_bio() */
1509 ++ /* to be detected by nilfs_segbuf_submit_bio() */
1510 + }
1511 +
1512 + if (!uptodate)
1513 +@@ -377,12 +376,12 @@ static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
1514 + bio->bi_private = segbuf;
1515 + bio_get(bio);
1516 + submit_bio(mode, bio);
1517 ++ segbuf->sb_nbio++;
1518 + if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
1519 + bio_put(bio);
1520 + err = -EOPNOTSUPP;
1521 + goto failed;
1522 + }
1523 +- segbuf->sb_nbio++;
1524 + bio_put(bio);
1525 +
1526 + wi->bio = NULL;
1527 +diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
1528 +index c13c919..f45b2a78 100644
1529 +--- a/include/acpi/acpi_bus.h
1530 ++++ b/include/acpi/acpi_bus.h
1531 +@@ -455,7 +455,11 @@ struct acpi_pci_root {
1532 + };
1533 +
1534 + /* helper */
1535 +-acpi_handle acpi_get_child(acpi_handle, u64);
1536 ++acpi_handle acpi_find_child(acpi_handle, u64, bool);
1537 ++static inline acpi_handle acpi_get_child(acpi_handle handle, u64 addr)
1538 ++{
1539 ++ return acpi_find_child(handle, addr, false);
1540 ++}
1541 + int acpi_is_root_bridge(acpi_handle);
1542 + struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle);
1543 + #define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)ACPI_HANDLE(dev))
1544 +diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
1545 +index 44cdc11..120d57a 100644
1546 +--- a/include/linux/ftrace_event.h
1547 ++++ b/include/linux/ftrace_event.h
1548 +@@ -334,7 +334,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
1549 + const char *name, int offset, int size,
1550 + int is_signed, int filter_type);
1551 + extern int trace_add_event_call(struct ftrace_event_call *call);
1552 +-extern void trace_remove_event_call(struct ftrace_event_call *call);
1553 ++extern int trace_remove_event_call(struct ftrace_event_call *call);
1554 +
1555 + #define is_signed_type(type) (((type)(-1)) < (type)1)
1556 +
1557 +diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
1558 +index ace9a5f..4a189ba 100644
1559 +--- a/include/linux/mm_types.h
1560 ++++ b/include/linux/mm_types.h
1561 +@@ -333,6 +333,7 @@ struct mm_struct {
1562 + void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
1563 + #endif
1564 + unsigned long mmap_base; /* base of mmap area */
1565 ++ unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
1566 + unsigned long task_size; /* size of task vm space */
1567 + unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */
1568 + unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */
1569 +diff --git a/include/linux/sched.h b/include/linux/sched.h
1570 +index 3aeb14b..178a8d9 100644
1571 +--- a/include/linux/sched.h
1572 ++++ b/include/linux/sched.h
1573 +@@ -314,7 +314,6 @@ struct nsproxy;
1574 + struct user_namespace;
1575 +
1576 + #ifdef CONFIG_MMU
1577 +-extern unsigned long mmap_legacy_base(void);
1578 + extern void arch_pick_mmap_layout(struct mm_struct *mm);
1579 + extern unsigned long
1580 + arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
1581 +diff --git a/include/linux/wait.h b/include/linux/wait.h
1582 +index 1133695..c8e5760 100644
1583 +--- a/include/linux/wait.h
1584 ++++ b/include/linux/wait.h
1585 +@@ -805,6 +805,63 @@ do { \
1586 + __ret; \
1587 + })
1588 +
1589 ++#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
1590 ++ lock, ret) \
1591 ++do { \
1592 ++ DEFINE_WAIT(__wait); \
1593 ++ \
1594 ++ for (;;) { \
1595 ++ prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
1596 ++ if (condition) \
1597 ++ break; \
1598 ++ if (signal_pending(current)) { \
1599 ++ ret = -ERESTARTSYS; \
1600 ++ break; \
1601 ++ } \
1602 ++ spin_unlock_irq(&lock); \
1603 ++ ret = schedule_timeout(ret); \
1604 ++ spin_lock_irq(&lock); \
1605 ++ if (!ret) \
1606 ++ break; \
1607 ++ } \
1608 ++ finish_wait(&wq, &__wait); \
1609 ++} while (0)
1610 ++
1611 ++/**
1612 ++ * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
1613 ++ * The condition is checked under the lock. This is expected
1614 ++ * to be called with the lock taken.
1615 ++ * @wq: the waitqueue to wait on
1616 ++ * @condition: a C expression for the event to wait for
1617 ++ * @lock: a locked spinlock_t, which will be released before schedule()
1618 ++ * and reacquired afterwards.
1619 ++ * @timeout: timeout, in jiffies
1620 ++ *
1621 ++ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1622 ++ * @condition evaluates to true or signal is received. The @condition is
1623 ++ * checked each time the waitqueue @wq is woken up.
1624 ++ *
1625 ++ * wake_up() has to be called after changing any variable that could
1626 ++ * change the result of the wait condition.
1627 ++ *
1628 ++ * This is supposed to be called while holding the lock. The lock is
1629 ++ * dropped before going to sleep and is reacquired afterwards.
1630 ++ *
1631 ++ * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
1632 ++ * was interrupted by a signal, and the remaining jiffies otherwise
1633 ++ * if the condition evaluated to true before the timeout elapsed.
1634 ++ */
1635 ++#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
1636 ++ timeout) \
1637 ++({ \
1638 ++ int __ret = timeout; \
1639 ++ \
1640 ++ if (!(condition)) \
1641 ++ __wait_event_interruptible_lock_irq_timeout( \
1642 ++ wq, condition, lock, __ret); \
1643 ++ __ret; \
1644 ++})
1645 ++
1646 +
1647 + /*
1648 + * These are the old interfaces to sleep waiting for an event.
1649 +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
1650 +index 6c508ff..f23449d 100644
1651 +--- a/kernel/trace/ftrace.c
1652 ++++ b/kernel/trace/ftrace.c
1653 +@@ -1416,12 +1416,22 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
1654 + * the hashes are freed with call_rcu_sched().
1655 + */
1656 + static int
1657 +-ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1658 ++ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1659 + {
1660 + struct ftrace_hash *filter_hash;
1661 + struct ftrace_hash *notrace_hash;
1662 + int ret;
1663 +
1664 ++#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1665 ++ /*
1666 ++ * There's a small race when adding ops that the ftrace handler
1667 ++ * that wants regs, may be called without them. We can not
1668 ++ * allow that handler to be called if regs is NULL.
1669 ++ */
1670 ++ if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1671 ++ return 0;
1672 ++#endif
1673 ++
1674 + filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
1675 + notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
1676 +
1677 +@@ -2134,12 +2144,57 @@ static cycle_t ftrace_update_time;
1678 + static unsigned long ftrace_update_cnt;
1679 + unsigned long ftrace_update_tot_cnt;
1680 +
1681 +-static int ops_traces_mod(struct ftrace_ops *ops)
1682 ++static inline int ops_traces_mod(struct ftrace_ops *ops)
1683 + {
1684 +- struct ftrace_hash *hash;
1685 ++ /*
1686 ++ * Filter_hash being empty will default to trace module.
1687 ++ * But notrace hash requires a test of individual module functions.
1688 ++ */
1689 ++ return ftrace_hash_empty(ops->filter_hash) &&
1690 ++ ftrace_hash_empty(ops->notrace_hash);
1691 ++}
1692 ++
1693 ++/*
1694 ++ * Check if the current ops references the record.
1695 ++ *
1696 ++ * If the ops traces all functions, then it was already accounted for.
1697 ++ * If the ops does not trace the current record function, skip it.
1698 ++ * If the ops ignores the function via notrace filter, skip it.
1699 ++ */
1700 ++static inline bool
1701 ++ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
1702 ++{
1703 ++ /* If ops isn't enabled, ignore it */
1704 ++ if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1705 ++ return 0;
1706 ++
1707 ++ /* If ops traces all mods, we already accounted for it */
1708 ++ if (ops_traces_mod(ops))
1709 ++ return 0;
1710 +
1711 +- hash = ops->filter_hash;
1712 +- return ftrace_hash_empty(hash);
1713 ++ /* The function must be in the filter */
1714 ++ if (!ftrace_hash_empty(ops->filter_hash) &&
1715 ++ !ftrace_lookup_ip(ops->filter_hash, rec->ip))
1716 ++ return 0;
1717 ++
1718 ++ /* If in notrace hash, we ignore it too */
1719 ++ if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
1720 ++ return 0;
1721 ++
1722 ++ return 1;
1723 ++}
1724 ++
1725 ++static int referenced_filters(struct dyn_ftrace *rec)
1726 ++{
1727 ++ struct ftrace_ops *ops;
1728 ++ int cnt = 0;
1729 ++
1730 ++ for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
1731 ++ if (ops_references_rec(ops, rec))
1732 ++ cnt++;
1733 ++ }
1734 ++
1735 ++ return cnt;
1736 + }
1737 +
1738 + static int ftrace_update_code(struct module *mod)
1739 +@@ -2148,6 +2203,7 @@ static int ftrace_update_code(struct module *mod)
1740 + struct dyn_ftrace *p;
1741 + cycle_t start, stop;
1742 + unsigned long ref = 0;
1743 ++ bool test = false;
1744 + int i;
1745 +
1746 + /*
1747 +@@ -2161,9 +2217,12 @@ static int ftrace_update_code(struct module *mod)
1748 +
1749 + for (ops = ftrace_ops_list;
1750 + ops != &ftrace_list_end; ops = ops->next) {
1751 +- if (ops->flags & FTRACE_OPS_FL_ENABLED &&
1752 +- ops_traces_mod(ops))
1753 +- ref++;
1754 ++ if (ops->flags & FTRACE_OPS_FL_ENABLED) {
1755 ++ if (ops_traces_mod(ops))
1756 ++ ref++;
1757 ++ else
1758 ++ test = true;
1759 ++ }
1760 + }
1761 + }
1762 +
1763 +@@ -2173,12 +2232,16 @@ static int ftrace_update_code(struct module *mod)
1764 + for (pg = ftrace_new_pgs; pg; pg = pg->next) {
1765 +
1766 + for (i = 0; i < pg->index; i++) {
1767 ++ int cnt = ref;
1768 ++
1769 + /* If something went wrong, bail without enabling anything */
1770 + if (unlikely(ftrace_disabled))
1771 + return -1;
1772 +
1773 + p = &pg->records[i];
1774 +- p->flags = ref;
1775 ++ if (test)
1776 ++ cnt += referenced_filters(p);
1777 ++ p->flags = cnt;
1778 +
1779 + /*
1780 + * Do the initial record conversion from mcount jump
1781 +@@ -2198,7 +2261,7 @@ static int ftrace_update_code(struct module *mod)
1782 + * conversion puts the module to the correct state, thus
1783 + * passing the ftrace_make_call check.
1784 + */
1785 +- if (ftrace_start_up && ref) {
1786 ++ if (ftrace_start_up && cnt) {
1787 + int failed = __ftrace_replace_code(p, 1);
1788 + if (failed)
1789 + ftrace_bug(failed, p->ip);
1790 +@@ -4188,7 +4251,7 @@ static inline void ftrace_startup_enable(int command) { }
1791 + # define ftrace_shutdown_sysctl() do { } while (0)
1792 +
1793 + static inline int
1794 +-ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1795 ++ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1796 + {
1797 + return 1;
1798 + }
1799 +@@ -4211,7 +4274,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
1800 + do_for_each_ftrace_op(op, ftrace_control_list) {
1801 + if (!(op->flags & FTRACE_OPS_FL_STUB) &&
1802 + !ftrace_function_local_disabled(op) &&
1803 +- ftrace_ops_test(op, ip))
1804 ++ ftrace_ops_test(op, ip, regs))
1805 + op->func(ip, parent_ip, op, regs);
1806 + } while_for_each_ftrace_op(op);
1807 + trace_recursion_clear(TRACE_CONTROL_BIT);
1808 +@@ -4244,7 +4307,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
1809 + */
1810 + preempt_disable_notrace();
1811 + do_for_each_ftrace_op(op, ftrace_ops_list) {
1812 +- if (ftrace_ops_test(op, ip))
1813 ++ if (ftrace_ops_test(op, ip, regs))
1814 + op->func(ip, parent_ip, op, regs);
1815 + } while_for_each_ftrace_op(op);
1816 + preempt_enable_notrace();
1817 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
1818 +index 06a5bce..0582a01 100644
1819 +--- a/kernel/trace/trace.c
1820 ++++ b/kernel/trace/trace.c
1821 +@@ -2834,6 +2834,17 @@ static int s_show(struct seq_file *m, void *v)
1822 + return 0;
1823 + }
1824 +
1825 ++/*
1826 ++ * Should be used after trace_array_get(), trace_types_lock
1827 ++ * ensures that i_cdev was already initialized.
1828 ++ */
1829 ++static inline int tracing_get_cpu(struct inode *inode)
1830 ++{
1831 ++ if (inode->i_cdev) /* See trace_create_cpu_file() */
1832 ++ return (long)inode->i_cdev - 1;
1833 ++ return RING_BUFFER_ALL_CPUS;
1834 ++}
1835 ++
1836 + static const struct seq_operations tracer_seq_ops = {
1837 + .start = s_start,
1838 + .next = s_next,
1839 +@@ -2842,9 +2853,9 @@ static const struct seq_operations tracer_seq_ops = {
1840 + };
1841 +
1842 + static struct trace_iterator *
1843 +-__tracing_open(struct trace_array *tr, struct trace_cpu *tc,
1844 +- struct inode *inode, struct file *file, bool snapshot)
1845 ++__tracing_open(struct inode *inode, struct file *file, bool snapshot)
1846 + {
1847 ++ struct trace_array *tr = inode->i_private;
1848 + struct trace_iterator *iter;
1849 + int cpu;
1850 +
1851 +@@ -2885,8 +2896,8 @@ __tracing_open(struct trace_array *tr, struct trace_cpu *tc,
1852 + iter->trace_buffer = &tr->trace_buffer;
1853 + iter->snapshot = snapshot;
1854 + iter->pos = -1;
1855 ++ iter->cpu_file = tracing_get_cpu(inode);
1856 + mutex_init(&iter->mutex);
1857 +- iter->cpu_file = tc->cpu;
1858 +
1859 + /* Notify the tracer early; before we stop tracing. */
1860 + if (iter->trace && iter->trace->open)
1861 +@@ -2962,44 +2973,22 @@ int tracing_open_generic_tr(struct inode *inode, struct file *filp)
1862 + filp->private_data = inode->i_private;
1863 +
1864 + return 0;
1865 +-
1866 +-}
1867 +-
1868 +-int tracing_open_generic_tc(struct inode *inode, struct file *filp)
1869 +-{
1870 +- struct trace_cpu *tc = inode->i_private;
1871 +- struct trace_array *tr = tc->tr;
1872 +-
1873 +- if (tracing_disabled)
1874 +- return -ENODEV;
1875 +-
1876 +- if (trace_array_get(tr) < 0)
1877 +- return -ENODEV;
1878 +-
1879 +- filp->private_data = inode->i_private;
1880 +-
1881 +- return 0;
1882 +-
1883 + }
1884 +
1885 + static int tracing_release(struct inode *inode, struct file *file)
1886 + {
1887 ++ struct trace_array *tr = inode->i_private;
1888 + struct seq_file *m = file->private_data;
1889 + struct trace_iterator *iter;
1890 +- struct trace_array *tr;
1891 + int cpu;
1892 +
1893 +- /* Writes do not use seq_file, need to grab tr from inode */
1894 + if (!(file->f_mode & FMODE_READ)) {
1895 +- struct trace_cpu *tc = inode->i_private;
1896 +-
1897 +- trace_array_put(tc->tr);
1898 ++ trace_array_put(tr);
1899 + return 0;
1900 + }
1901 +
1902 ++ /* Writes do not use seq_file */
1903 + iter = m->private;
1904 +- tr = iter->tr;
1905 +-
1906 + mutex_lock(&trace_types_lock);
1907 +
1908 + for_each_tracing_cpu(cpu) {
1909 +@@ -3035,15 +3024,6 @@ static int tracing_release_generic_tr(struct inode *inode, struct file *file)
1910 + return 0;
1911 + }
1912 +
1913 +-static int tracing_release_generic_tc(struct inode *inode, struct file *file)
1914 +-{
1915 +- struct trace_cpu *tc = inode->i_private;
1916 +- struct trace_array *tr = tc->tr;
1917 +-
1918 +- trace_array_put(tr);
1919 +- return 0;
1920 +-}
1921 +-
1922 + static int tracing_single_release_tr(struct inode *inode, struct file *file)
1923 + {
1924 + struct trace_array *tr = inode->i_private;
1925 +@@ -3055,8 +3035,7 @@ static int tracing_single_release_tr(struct inode *inode, struct file *file)
1926 +
1927 + static int tracing_open(struct inode *inode, struct file *file)
1928 + {
1929 +- struct trace_cpu *tc = inode->i_private;
1930 +- struct trace_array *tr = tc->tr;
1931 ++ struct trace_array *tr = inode->i_private;
1932 + struct trace_iterator *iter;
1933 + int ret = 0;
1934 +
1935 +@@ -3064,16 +3043,17 @@ static int tracing_open(struct inode *inode, struct file *file)
1936 + return -ENODEV;
1937 +
1938 + /* If this file was open for write, then erase contents */
1939 +- if ((file->f_mode & FMODE_WRITE) &&
1940 +- (file->f_flags & O_TRUNC)) {
1941 +- if (tc->cpu == RING_BUFFER_ALL_CPUS)
1942 ++ if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
1943 ++ int cpu = tracing_get_cpu(inode);
1944 ++
1945 ++ if (cpu == RING_BUFFER_ALL_CPUS)
1946 + tracing_reset_online_cpus(&tr->trace_buffer);
1947 + else
1948 +- tracing_reset(&tr->trace_buffer, tc->cpu);
1949 ++ tracing_reset(&tr->trace_buffer, cpu);
1950 + }
1951 +
1952 + if (file->f_mode & FMODE_READ) {
1953 +- iter = __tracing_open(tr, tc, inode, file, false);
1954 ++ iter = __tracing_open(inode, file, false);
1955 + if (IS_ERR(iter))
1956 + ret = PTR_ERR(iter);
1957 + else if (trace_flags & TRACE_ITER_LATENCY_FMT)
1958 +@@ -3939,8 +3919,7 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf,
1959 +
1960 + static int tracing_open_pipe(struct inode *inode, struct file *filp)
1961 + {
1962 +- struct trace_cpu *tc = inode->i_private;
1963 +- struct trace_array *tr = tc->tr;
1964 ++ struct trace_array *tr = inode->i_private;
1965 + struct trace_iterator *iter;
1966 + int ret = 0;
1967 +
1968 +@@ -3986,9 +3965,9 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
1969 + if (trace_clocks[tr->clock_id].in_ns)
1970 + iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
1971 +
1972 +- iter->cpu_file = tc->cpu;
1973 +- iter->tr = tc->tr;
1974 +- iter->trace_buffer = &tc->tr->trace_buffer;
1975 ++ iter->tr = tr;
1976 ++ iter->trace_buffer = &tr->trace_buffer;
1977 ++ iter->cpu_file = tracing_get_cpu(inode);
1978 + mutex_init(&iter->mutex);
1979 + filp->private_data = iter;
1980 +
1981 +@@ -4011,8 +3990,7 @@ fail:
1982 + static int tracing_release_pipe(struct inode *inode, struct file *file)
1983 + {
1984 + struct trace_iterator *iter = file->private_data;
1985 +- struct trace_cpu *tc = inode->i_private;
1986 +- struct trace_array *tr = tc->tr;
1987 ++ struct trace_array *tr = inode->i_private;
1988 +
1989 + mutex_lock(&trace_types_lock);
1990 +
1991 +@@ -4366,15 +4344,16 @@ static ssize_t
1992 + tracing_entries_read(struct file *filp, char __user *ubuf,
1993 + size_t cnt, loff_t *ppos)
1994 + {
1995 +- struct trace_cpu *tc = filp->private_data;
1996 +- struct trace_array *tr = tc->tr;
1997 ++ struct inode *inode = file_inode(filp);
1998 ++ struct trace_array *tr = inode->i_private;
1999 ++ int cpu = tracing_get_cpu(inode);
2000 + char buf[64];
2001 + int r = 0;
2002 + ssize_t ret;
2003 +
2004 + mutex_lock(&trace_types_lock);
2005 +
2006 +- if (tc->cpu == RING_BUFFER_ALL_CPUS) {
2007 ++ if (cpu == RING_BUFFER_ALL_CPUS) {
2008 + int cpu, buf_size_same;
2009 + unsigned long size;
2010 +
2011 +@@ -4401,7 +4380,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
2012 + } else
2013 + r = sprintf(buf, "X\n");
2014 + } else
2015 +- r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, tc->cpu)->entries >> 10);
2016 ++ r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
2017 +
2018 + mutex_unlock(&trace_types_lock);
2019 +
2020 +@@ -4413,7 +4392,8 @@ static ssize_t
2021 + tracing_entries_write(struct file *filp, const char __user *ubuf,
2022 + size_t cnt, loff_t *ppos)
2023 + {
2024 +- struct trace_cpu *tc = filp->private_data;
2025 ++ struct inode *inode = file_inode(filp);
2026 ++ struct trace_array *tr = inode->i_private;
2027 + unsigned long val;
2028 + int ret;
2029 +
2030 +@@ -4427,8 +4407,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2031 +
2032 + /* value is in KB */
2033 + val <<= 10;
2034 +-
2035 +- ret = tracing_resize_ring_buffer(tc->tr, val, tc->cpu);
2036 ++ ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
2037 + if (ret < 0)
2038 + return ret;
2039 +
2040 +@@ -4689,8 +4668,7 @@ struct ftrace_buffer_info {
2041 + #ifdef CONFIG_TRACER_SNAPSHOT
2042 + static int tracing_snapshot_open(struct inode *inode, struct file *file)
2043 + {
2044 +- struct trace_cpu *tc = inode->i_private;
2045 +- struct trace_array *tr = tc->tr;
2046 ++ struct trace_array *tr = inode->i_private;
2047 + struct trace_iterator *iter;
2048 + struct seq_file *m;
2049 + int ret = 0;
2050 +@@ -4699,7 +4677,7 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file)
2051 + return -ENODEV;
2052 +
2053 + if (file->f_mode & FMODE_READ) {
2054 +- iter = __tracing_open(tr, tc, inode, file, true);
2055 ++ iter = __tracing_open(inode, file, true);
2056 + if (IS_ERR(iter))
2057 + ret = PTR_ERR(iter);
2058 + } else {
2059 +@@ -4716,8 +4694,8 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file)
2060 + ret = 0;
2061 +
2062 + iter->tr = tr;
2063 +- iter->trace_buffer = &tc->tr->max_buffer;
2064 +- iter->cpu_file = tc->cpu;
2065 ++ iter->trace_buffer = &tr->max_buffer;
2066 ++ iter->cpu_file = tracing_get_cpu(inode);
2067 + m->private = iter;
2068 + file->private_data = m;
2069 + }
2070 +@@ -4876,11 +4854,11 @@ static const struct file_operations tracing_pipe_fops = {
2071 + };
2072 +
2073 + static const struct file_operations tracing_entries_fops = {
2074 +- .open = tracing_open_generic_tc,
2075 ++ .open = tracing_open_generic_tr,
2076 + .read = tracing_entries_read,
2077 + .write = tracing_entries_write,
2078 + .llseek = generic_file_llseek,
2079 +- .release = tracing_release_generic_tc,
2080 ++ .release = tracing_release_generic_tr,
2081 + };
2082 +
2083 + static const struct file_operations tracing_total_entries_fops = {
2084 +@@ -4932,8 +4910,7 @@ static const struct file_operations snapshot_raw_fops = {
2085 +
2086 + static int tracing_buffers_open(struct inode *inode, struct file *filp)
2087 + {
2088 +- struct trace_cpu *tc = inode->i_private;
2089 +- struct trace_array *tr = tc->tr;
2090 ++ struct trace_array *tr = inode->i_private;
2091 + struct ftrace_buffer_info *info;
2092 + int ret;
2093 +
2094 +@@ -4952,7 +4929,7 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
2095 + mutex_lock(&trace_types_lock);
2096 +
2097 + info->iter.tr = tr;
2098 +- info->iter.cpu_file = tc->cpu;
2099 ++ info->iter.cpu_file = tracing_get_cpu(inode);
2100 + info->iter.trace = tr->current_trace;
2101 + info->iter.trace_buffer = &tr->trace_buffer;
2102 + info->spare = NULL;
2103 +@@ -5269,14 +5246,14 @@ static ssize_t
2104 + tracing_stats_read(struct file *filp, char __user *ubuf,
2105 + size_t count, loff_t *ppos)
2106 + {
2107 +- struct trace_cpu *tc = filp->private_data;
2108 +- struct trace_array *tr = tc->tr;
2109 ++ struct inode *inode = file_inode(filp);
2110 ++ struct trace_array *tr = inode->i_private;
2111 + struct trace_buffer *trace_buf = &tr->trace_buffer;
2112 ++ int cpu = tracing_get_cpu(inode);
2113 + struct trace_seq *s;
2114 + unsigned long cnt;
2115 + unsigned long long t;
2116 + unsigned long usec_rem;
2117 +- int cpu = tc->cpu;
2118 +
2119 + s = kmalloc(sizeof(*s), GFP_KERNEL);
2120 + if (!s)
2121 +@@ -5329,10 +5306,10 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
2122 + }
2123 +
2124 + static const struct file_operations tracing_stats_fops = {
2125 +- .open = tracing_open_generic_tc,
2126 ++ .open = tracing_open_generic_tr,
2127 + .read = tracing_stats_read,
2128 + .llseek = generic_file_llseek,
2129 +- .release = tracing_release_generic_tc,
2130 ++ .release = tracing_release_generic_tr,
2131 + };
2132 +
2133 + #ifdef CONFIG_DYNAMIC_FTRACE
2134 +@@ -5521,10 +5498,20 @@ static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
2135 + return tr->percpu_dir;
2136 + }
2137 +
2138 ++static struct dentry *
2139 ++trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
2140 ++ void *data, long cpu, const struct file_operations *fops)
2141 ++{
2142 ++ struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
2143 ++
2144 ++ if (ret) /* See tracing_get_cpu() */
2145 ++ ret->d_inode->i_cdev = (void *)(cpu + 1);
2146 ++ return ret;
2147 ++}
2148 ++
2149 + static void
2150 + tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
2151 + {
2152 +- struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
2153 + struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
2154 + struct dentry *d_cpu;
2155 + char cpu_dir[30]; /* 30 characters should be more than enough */
2156 +@@ -5540,28 +5527,28 @@ tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
2157 + }
2158 +
2159 + /* per cpu trace_pipe */
2160 +- trace_create_file("trace_pipe", 0444, d_cpu,
2161 +- (void *)&data->trace_cpu, &tracing_pipe_fops);
2162 ++ trace_create_cpu_file("trace_pipe", 0444, d_cpu,
2163 ++ tr, cpu, &tracing_pipe_fops);
2164 +
2165 + /* per cpu trace */
2166 +- trace_create_file("trace", 0644, d_cpu,
2167 +- (void *)&data->trace_cpu, &tracing_fops);
2168 ++ trace_create_cpu_file("trace", 0644, d_cpu,
2169 ++ tr, cpu, &tracing_fops);
2170 +
2171 +- trace_create_file("trace_pipe_raw", 0444, d_cpu,
2172 +- (void *)&data->trace_cpu, &tracing_buffers_fops);
2173 ++ trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
2174 ++ tr, cpu, &tracing_buffers_fops);
2175 +
2176 +- trace_create_file("stats", 0444, d_cpu,
2177 +- (void *)&data->trace_cpu, &tracing_stats_fops);
2178 ++ trace_create_cpu_file("stats", 0444, d_cpu,
2179 ++ tr, cpu, &tracing_stats_fops);
2180 +
2181 +- trace_create_file("buffer_size_kb", 0444, d_cpu,
2182 +- (void *)&data->trace_cpu, &tracing_entries_fops);
2183 ++ trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
2184 ++ tr, cpu, &tracing_entries_fops);
2185 +
2186 + #ifdef CONFIG_TRACER_SNAPSHOT
2187 +- trace_create_file("snapshot", 0644, d_cpu,
2188 +- (void *)&data->trace_cpu, &snapshot_fops);
2189 ++ trace_create_cpu_file("snapshot", 0644, d_cpu,
2190 ++ tr, cpu, &snapshot_fops);
2191 +
2192 +- trace_create_file("snapshot_raw", 0444, d_cpu,
2193 +- (void *)&data->trace_cpu, &snapshot_raw_fops);
2194 ++ trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
2195 ++ tr, cpu, &snapshot_raw_fops);
2196 + #endif
2197 + }
2198 +
2199 +@@ -6124,13 +6111,13 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
2200 + tr, &tracing_iter_fops);
2201 +
2202 + trace_create_file("trace", 0644, d_tracer,
2203 +- (void *)&tr->trace_cpu, &tracing_fops);
2204 ++ tr, &tracing_fops);
2205 +
2206 + trace_create_file("trace_pipe", 0444, d_tracer,
2207 +- (void *)&tr->trace_cpu, &tracing_pipe_fops);
2208 ++ tr, &tracing_pipe_fops);
2209 +
2210 + trace_create_file("buffer_size_kb", 0644, d_tracer,
2211 +- (void *)&tr->trace_cpu, &tracing_entries_fops);
2212 ++ tr, &tracing_entries_fops);
2213 +
2214 + trace_create_file("buffer_total_size_kb", 0444, d_tracer,
2215 + tr, &tracing_total_entries_fops);
2216 +@@ -6145,11 +6132,11 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
2217 + &trace_clock_fops);
2218 +
2219 + trace_create_file("tracing_on", 0644, d_tracer,
2220 +- tr, &rb_simple_fops);
2221 ++ tr, &rb_simple_fops);
2222 +
2223 + #ifdef CONFIG_TRACER_SNAPSHOT
2224 + trace_create_file("snapshot", 0644, d_tracer,
2225 +- (void *)&tr->trace_cpu, &snapshot_fops);
2226 ++ tr, &snapshot_fops);
2227 + #endif
2228 +
2229 + for_each_tracing_cpu(cpu)
2230 +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
2231 +index 6953263..3d18aad 100644
2232 +--- a/kernel/trace/trace_events.c
2233 ++++ b/kernel/trace/trace_events.c
2234 +@@ -114,7 +114,7 @@ static int __trace_define_field(struct list_head *head, const char *type,
2235 +
2236 + field = kmem_cache_alloc(field_cachep, GFP_TRACE);
2237 + if (!field)
2238 +- goto err;
2239 ++ return -ENOMEM;
2240 +
2241 + field->name = name;
2242 + field->type = type;
2243 +@@ -131,11 +131,6 @@ static int __trace_define_field(struct list_head *head, const char *type,
2244 + list_add(&field->link, head);
2245 +
2246 + return 0;
2247 +-
2248 +-err:
2249 +- kmem_cache_free(field_cachep, field);
2250 +-
2251 +- return -ENOMEM;
2252 + }
2253 +
2254 + int trace_define_field(struct ftrace_event_call *call, const char *type,
2255 +@@ -412,33 +407,42 @@ static void put_system(struct ftrace_subsystem_dir *dir)
2256 + mutex_unlock(&event_mutex);
2257 + }
2258 +
2259 +-/*
2260 +- * Open and update trace_array ref count.
2261 +- * Must have the current trace_array passed to it.
2262 +- */
2263 +-static int tracing_open_generic_file(struct inode *inode, struct file *filp)
2264 ++static void remove_subsystem(struct ftrace_subsystem_dir *dir)
2265 + {
2266 +- struct ftrace_event_file *file = inode->i_private;
2267 +- struct trace_array *tr = file->tr;
2268 +- int ret;
2269 ++ if (!dir)
2270 ++ return;
2271 +
2272 +- if (trace_array_get(tr) < 0)
2273 +- return -ENODEV;
2274 ++ if (!--dir->nr_events) {
2275 ++ debugfs_remove_recursive(dir->entry);
2276 ++ list_del(&dir->list);
2277 ++ __put_system_dir(dir);
2278 ++ }
2279 ++}
2280 +
2281 +- ret = tracing_open_generic(inode, filp);
2282 +- if (ret < 0)
2283 +- trace_array_put(tr);
2284 +- return ret;
2285 ++static void *event_file_data(struct file *filp)
2286 ++{
2287 ++ return ACCESS_ONCE(file_inode(filp)->i_private);
2288 + }
2289 +
2290 +-static int tracing_release_generic_file(struct inode *inode, struct file *filp)
2291 ++static void remove_event_file_dir(struct ftrace_event_file *file)
2292 + {
2293 +- struct ftrace_event_file *file = inode->i_private;
2294 +- struct trace_array *tr = file->tr;
2295 ++ struct dentry *dir = file->dir;
2296 ++ struct dentry *child;
2297 +
2298 +- trace_array_put(tr);
2299 ++ if (dir) {
2300 ++ spin_lock(&dir->d_lock); /* probably unneeded */
2301 ++ list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
2302 ++ if (child->d_inode) /* probably unneeded */
2303 ++ child->d_inode->i_private = NULL;
2304 ++ }
2305 ++ spin_unlock(&dir->d_lock);
2306 +
2307 +- return 0;
2308 ++ debugfs_remove_recursive(dir);
2309 ++ }
2310 ++
2311 ++ list_del(&file->list);
2312 ++ remove_subsystem(file->system);
2313 ++ kmem_cache_free(file_cachep, file);
2314 + }
2315 +
2316 + /*
2317 +@@ -682,13 +686,23 @@ static ssize_t
2318 + event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
2319 + loff_t *ppos)
2320 + {
2321 +- struct ftrace_event_file *file = filp->private_data;
2322 ++ struct ftrace_event_file *file;
2323 ++ unsigned long flags;
2324 + char *buf;
2325 +
2326 +- if (file->flags & FTRACE_EVENT_FL_ENABLED) {
2327 +- if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)
2328 ++ mutex_lock(&event_mutex);
2329 ++ file = event_file_data(filp);
2330 ++ if (likely(file))
2331 ++ flags = file->flags;
2332 ++ mutex_unlock(&event_mutex);
2333 ++
2334 ++ if (!file)
2335 ++ return -ENODEV;
2336 ++
2337 ++ if (flags & FTRACE_EVENT_FL_ENABLED) {
2338 ++ if (flags & FTRACE_EVENT_FL_SOFT_DISABLED)
2339 + buf = "0*\n";
2340 +- else if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
2341 ++ else if (flags & FTRACE_EVENT_FL_SOFT_MODE)
2342 + buf = "1*\n";
2343 + else
2344 + buf = "1\n";
2345 +@@ -702,13 +716,10 @@ static ssize_t
2346 + event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
2347 + loff_t *ppos)
2348 + {
2349 +- struct ftrace_event_file *file = filp->private_data;
2350 ++ struct ftrace_event_file *file;
2351 + unsigned long val;
2352 + int ret;
2353 +
2354 +- if (!file)
2355 +- return -EINVAL;
2356 +-
2357 + ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
2358 + if (ret)
2359 + return ret;
2360 +@@ -720,8 +731,11 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
2361 + switch (val) {
2362 + case 0:
2363 + case 1:
2364 ++ ret = -ENODEV;
2365 + mutex_lock(&event_mutex);
2366 +- ret = ftrace_event_enable_disable(file, val);
2367 ++ file = event_file_data(filp);
2368 ++ if (likely(file))
2369 ++ ret = ftrace_event_enable_disable(file, val);
2370 + mutex_unlock(&event_mutex);
2371 + break;
2372 +
2373 +@@ -828,7 +842,7 @@ enum {
2374 +
2375 + static void *f_next(struct seq_file *m, void *v, loff_t *pos)
2376 + {
2377 +- struct ftrace_event_call *call = m->private;
2378 ++ struct ftrace_event_call *call = event_file_data(m->private);
2379 + struct ftrace_event_field *field;
2380 + struct list_head *common_head = &ftrace_common_fields;
2381 + struct list_head *head = trace_get_fields(call);
2382 +@@ -872,6 +886,11 @@ static void *f_start(struct seq_file *m, loff_t *pos)
2383 + loff_t l = 0;
2384 + void *p;
2385 +
2386 ++ /* ->stop() is called even if ->start() fails */
2387 ++ mutex_lock(&event_mutex);
2388 ++ if (!event_file_data(m->private))
2389 ++ return ERR_PTR(-ENODEV);
2390 ++
2391 + /* Start by showing the header */
2392 + if (!*pos)
2393 + return (void *)FORMAT_HEADER;
2394 +@@ -886,7 +905,7 @@ static void *f_start(struct seq_file *m, loff_t *pos)
2395 +
2396 + static int f_show(struct seq_file *m, void *v)
2397 + {
2398 +- struct ftrace_event_call *call = m->private;
2399 ++ struct ftrace_event_call *call = event_file_data(m->private);
2400 + struct ftrace_event_field *field;
2401 + const char *array_descriptor;
2402 +
2403 +@@ -937,6 +956,7 @@ static int f_show(struct seq_file *m, void *v)
2404 +
2405 + static void f_stop(struct seq_file *m, void *p)
2406 + {
2407 ++ mutex_unlock(&event_mutex);
2408 + }
2409 +
2410 + static const struct seq_operations trace_format_seq_ops = {
2411 +@@ -948,7 +968,6 @@ static const struct seq_operations trace_format_seq_ops = {
2412 +
2413 + static int trace_format_open(struct inode *inode, struct file *file)
2414 + {
2415 +- struct ftrace_event_call *call = inode->i_private;
2416 + struct seq_file *m;
2417 + int ret;
2418 +
2419 +@@ -957,7 +976,7 @@ static int trace_format_open(struct inode *inode, struct file *file)
2420 + return ret;
2421 +
2422 + m = file->private_data;
2423 +- m->private = call;
2424 ++ m->private = file;
2425 +
2426 + return 0;
2427 + }
2428 +@@ -965,19 +984,22 @@ static int trace_format_open(struct inode *inode, struct file *file)
2429 + static ssize_t
2430 + event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
2431 + {
2432 +- struct ftrace_event_call *call = filp->private_data;
2433 ++ int id = (long)event_file_data(filp);
2434 + struct trace_seq *s;
2435 + int r;
2436 +
2437 + if (*ppos)
2438 + return 0;
2439 +
2440 ++ if (unlikely(!id))
2441 ++ return -ENODEV;
2442 ++
2443 + s = kmalloc(sizeof(*s), GFP_KERNEL);
2444 + if (!s)
2445 + return -ENOMEM;
2446 +
2447 + trace_seq_init(s);
2448 +- trace_seq_printf(s, "%d\n", call->event.type);
2449 ++ trace_seq_printf(s, "%d\n", id);
2450 +
2451 + r = simple_read_from_buffer(ubuf, cnt, ppos,
2452 + s->buffer, s->len);
2453 +@@ -989,21 +1011,28 @@ static ssize_t
2454 + event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
2455 + loff_t *ppos)
2456 + {
2457 +- struct ftrace_event_call *call = filp->private_data;
2458 ++ struct ftrace_event_call *call;
2459 + struct trace_seq *s;
2460 +- int r;
2461 ++ int r = -ENODEV;
2462 +
2463 + if (*ppos)
2464 + return 0;
2465 +
2466 + s = kmalloc(sizeof(*s), GFP_KERNEL);
2467 ++
2468 + if (!s)
2469 + return -ENOMEM;
2470 +
2471 + trace_seq_init(s);
2472 +
2473 +- print_event_filter(call, s);
2474 +- r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
2475 ++ mutex_lock(&event_mutex);
2476 ++ call = event_file_data(filp);
2477 ++ if (call)
2478 ++ print_event_filter(call, s);
2479 ++ mutex_unlock(&event_mutex);
2480 ++
2481 ++ if (call)
2482 ++ r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
2483 +
2484 + kfree(s);
2485 +
2486 +@@ -1014,9 +1043,9 @@ static ssize_t
2487 + event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
2488 + loff_t *ppos)
2489 + {
2490 +- struct ftrace_event_call *call = filp->private_data;
2491 ++ struct ftrace_event_call *call;
2492 + char *buf;
2493 +- int err;
2494 ++ int err = -ENODEV;
2495 +
2496 + if (cnt >= PAGE_SIZE)
2497 + return -EINVAL;
2498 +@@ -1031,7 +1060,12 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
2499 + }
2500 + buf[cnt] = '\0';
2501 +
2502 +- err = apply_event_filter(call, buf);
2503 ++ mutex_lock(&event_mutex);
2504 ++ call = event_file_data(filp);
2505 ++ if (call)
2506 ++ err = apply_event_filter(call, buf);
2507 ++ mutex_unlock(&event_mutex);
2508 ++
2509 + free_page((unsigned long) buf);
2510 + if (err < 0)
2511 + return err;
2512 +@@ -1253,10 +1287,9 @@ static const struct file_operations ftrace_set_event_fops = {
2513 + };
2514 +
2515 + static const struct file_operations ftrace_enable_fops = {
2516 +- .open = tracing_open_generic_file,
2517 ++ .open = tracing_open_generic,
2518 + .read = event_enable_read,
2519 + .write = event_enable_write,
2520 +- .release = tracing_release_generic_file,
2521 + .llseek = default_llseek,
2522 + };
2523 +
2524 +@@ -1268,7 +1301,6 @@ static const struct file_operations ftrace_event_format_fops = {
2525 + };
2526 +
2527 + static const struct file_operations ftrace_event_id_fops = {
2528 +- .open = tracing_open_generic,
2529 + .read = event_id_read,
2530 + .llseek = default_llseek,
2531 + };
2532 +@@ -1516,8 +1548,8 @@ event_create_dir(struct dentry *parent,
2533 +
2534 + #ifdef CONFIG_PERF_EVENTS
2535 + if (call->event.type && call->class->reg)
2536 +- trace_create_file("id", 0444, file->dir, call,
2537 +- id);
2538 ++ trace_create_file("id", 0444, file->dir,
2539 ++ (void *)(long)call->event.type, id);
2540 + #endif
2541 +
2542 + /*
2543 +@@ -1542,33 +1574,16 @@ event_create_dir(struct dentry *parent,
2544 + return 0;
2545 + }
2546 +
2547 +-static void remove_subsystem(struct ftrace_subsystem_dir *dir)
2548 +-{
2549 +- if (!dir)
2550 +- return;
2551 +-
2552 +- if (!--dir->nr_events) {
2553 +- debugfs_remove_recursive(dir->entry);
2554 +- list_del(&dir->list);
2555 +- __put_system_dir(dir);
2556 +- }
2557 +-}
2558 +-
2559 + static void remove_event_from_tracers(struct ftrace_event_call *call)
2560 + {
2561 + struct ftrace_event_file *file;
2562 + struct trace_array *tr;
2563 +
2564 + do_for_each_event_file_safe(tr, file) {
2565 +-
2566 + if (file->event_call != call)
2567 + continue;
2568 +
2569 +- list_del(&file->list);
2570 +- debugfs_remove_recursive(file->dir);
2571 +- remove_subsystem(file->system);
2572 +- kmem_cache_free(file_cachep, file);
2573 +-
2574 ++ remove_event_file_dir(file);
2575 + /*
2576 + * The do_for_each_event_file_safe() is
2577 + * a double loop. After finding the call for this
2578 +@@ -1720,16 +1735,47 @@ static void __trace_remove_event_call(struct ftrace_event_call *call)
2579 + destroy_preds(call);
2580 + }
2581 +
2582 ++static int probe_remove_event_call(struct ftrace_event_call *call)
2583 ++{
2584 ++ struct trace_array *tr;
2585 ++ struct ftrace_event_file *file;
2586 ++
2587 ++#ifdef CONFIG_PERF_EVENTS
2588 ++ if (call->perf_refcount)
2589 ++ return -EBUSY;
2590 ++#endif
2591 ++ do_for_each_event_file(tr, file) {
2592 ++ if (file->event_call != call)
2593 ++ continue;
2594 ++ /*
2595 ++ * We can't rely on ftrace_event_enable_disable(enable => 0)
2596 ++ * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress
2597 ++ * TRACE_REG_UNREGISTER.
2598 ++ */
2599 ++ if (file->flags & FTRACE_EVENT_FL_ENABLED)
2600 ++ return -EBUSY;
2601 ++ break;
2602 ++ } while_for_each_event_file();
2603 ++
2604 ++ __trace_remove_event_call(call);
2605 ++
2606 ++ return 0;
2607 ++}
2608 ++
2609 + /* Remove an event_call */
2610 +-void trace_remove_event_call(struct ftrace_event_call *call)
2611 ++int trace_remove_event_call(struct ftrace_event_call *call)
2612 + {
2613 ++ int ret;
2614 ++
2615 + mutex_lock(&trace_types_lock);
2616 + mutex_lock(&event_mutex);
2617 + down_write(&trace_event_sem);
2618 +- __trace_remove_event_call(call);
2619 ++ ret = probe_remove_event_call(call);
2620 + up_write(&trace_event_sem);
2621 + mutex_unlock(&event_mutex);
2622 + mutex_unlock(&trace_types_lock);
2623 ++
2624 ++ return ret;
2625 + }
2626 +
2627 + #define for_each_event(event, start, end) \
2628 +@@ -2301,12 +2347,8 @@ __trace_remove_event_dirs(struct trace_array *tr)
2629 + {
2630 + struct ftrace_event_file *file, *next;
2631 +
2632 +- list_for_each_entry_safe(file, next, &tr->events, list) {
2633 +- list_del(&file->list);
2634 +- debugfs_remove_recursive(file->dir);
2635 +- remove_subsystem(file->system);
2636 +- kmem_cache_free(file_cachep, file);
2637 +- }
2638 ++ list_for_each_entry_safe(file, next, &tr->events, list)
2639 ++ remove_event_file_dir(file);
2640 + }
2641 +
2642 + static void
2643 +diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
2644 +index e1b653f..0a1edc6 100644
2645 +--- a/kernel/trace/trace_events_filter.c
2646 ++++ b/kernel/trace/trace_events_filter.c
2647 +@@ -631,17 +631,15 @@ static void append_filter_err(struct filter_parse_state *ps,
2648 + free_page((unsigned long) buf);
2649 + }
2650 +
2651 ++/* caller must hold event_mutex */
2652 + void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s)
2653 + {
2654 +- struct event_filter *filter;
2655 ++ struct event_filter *filter = call->filter;
2656 +
2657 +- mutex_lock(&event_mutex);
2658 +- filter = call->filter;
2659 + if (filter && filter->filter_string)
2660 + trace_seq_printf(s, "%s\n", filter->filter_string);
2661 + else
2662 + trace_seq_printf(s, "none\n");
2663 +- mutex_unlock(&event_mutex);
2664 + }
2665 +
2666 + void print_subsystem_event_filter(struct event_subsystem *system,
2667 +@@ -1835,23 +1833,22 @@ static int create_system_filter(struct event_subsystem *system,
2668 + return err;
2669 + }
2670 +
2671 ++/* caller must hold event_mutex */
2672 + int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
2673 + {
2674 + struct event_filter *filter;
2675 +- int err = 0;
2676 +-
2677 +- mutex_lock(&event_mutex);
2678 ++ int err;
2679 +
2680 + if (!strcmp(strstrip(filter_string), "0")) {
2681 + filter_disable(call);
2682 + filter = call->filter;
2683 + if (!filter)
2684 +- goto out_unlock;
2685 ++ return 0;
2686 + RCU_INIT_POINTER(call->filter, NULL);
2687 + /* Make sure the filter is not being used */
2688 + synchronize_sched();
2689 + __free_filter(filter);
2690 +- goto out_unlock;
2691 ++ return 0;
2692 + }
2693 +
2694 + err = create_filter(call, filter_string, true, &filter);
2695 +@@ -1878,8 +1875,6 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
2696 + __free_filter(tmp);
2697 + }
2698 + }
2699 +-out_unlock:
2700 +- mutex_unlock(&event_mutex);
2701 +
2702 + return err;
2703 + }
2704 +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
2705 +index 9f46e98..64abc8c 100644
2706 +--- a/kernel/trace/trace_kprobe.c
2707 ++++ b/kernel/trace/trace_kprobe.c
2708 +@@ -90,7 +90,7 @@ static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp)
2709 + }
2710 +
2711 + static int register_probe_event(struct trace_probe *tp);
2712 +-static void unregister_probe_event(struct trace_probe *tp);
2713 ++static int unregister_probe_event(struct trace_probe *tp);
2714 +
2715 + static DEFINE_MUTEX(probe_lock);
2716 + static LIST_HEAD(probe_list);
2717 +@@ -281,6 +281,8 @@ trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file)
2718 + static int
2719 + disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
2720 + {
2721 ++ struct ftrace_event_file **old = NULL;
2722 ++ int wait = 0;
2723 + int ret = 0;
2724 +
2725 + mutex_lock(&probe_enable_lock);
2726 +@@ -314,10 +316,7 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
2727 + }
2728 +
2729 + rcu_assign_pointer(tp->files, new);
2730 +-
2731 +- /* Make sure the probe is done with old files */
2732 +- synchronize_sched();
2733 +- kfree(old);
2734 ++ wait = 1;
2735 + } else
2736 + tp->flags &= ~TP_FLAG_PROFILE;
2737 +
2738 +@@ -326,11 +325,25 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
2739 + disable_kretprobe(&tp->rp);
2740 + else
2741 + disable_kprobe(&tp->rp.kp);
2742 ++ wait = 1;
2743 + }
2744 +
2745 + out_unlock:
2746 + mutex_unlock(&probe_enable_lock);
2747 +
2748 ++ if (wait) {
2749 ++ /*
2750 ++ * Synchronize with kprobe_trace_func/kretprobe_trace_func
2751 ++ * to ensure disabled (all running handlers are finished).
2752 ++ * This is not only for kfree(), but also the caller,
2753 ++ * trace_remove_event_call() supposes it for releasing
2754 ++ * event_call related objects, which will be accessed in
2755 ++ * the kprobe_trace_func/kretprobe_trace_func.
2756 ++ */
2757 ++ synchronize_sched();
2758 ++ kfree(old); /* Ignored if link == NULL */
2759 ++ }
2760 ++
2761 + return ret;
2762 + }
2763 +
2764 +@@ -398,9 +411,12 @@ static int unregister_trace_probe(struct trace_probe *tp)
2765 + if (trace_probe_is_enabled(tp))
2766 + return -EBUSY;
2767 +
2768 ++ /* Will fail if probe is being used by ftrace or perf */
2769 ++ if (unregister_probe_event(tp))
2770 ++ return -EBUSY;
2771 ++
2772 + __unregister_trace_probe(tp);
2773 + list_del(&tp->list);
2774 +- unregister_probe_event(tp);
2775 +
2776 + return 0;
2777 + }
2778 +@@ -679,7 +695,9 @@ static int release_all_trace_probes(void)
2779 + /* TODO: Use batch unregistration */
2780 + while (!list_empty(&probe_list)) {
2781 + tp = list_entry(probe_list.next, struct trace_probe, list);
2782 +- unregister_trace_probe(tp);
2783 ++ ret = unregister_trace_probe(tp);
2784 ++ if (ret)
2785 ++ goto end;
2786 + free_trace_probe(tp);
2787 + }
2788 +
2789 +@@ -1312,11 +1330,15 @@ static int register_probe_event(struct trace_probe *tp)
2790 + return ret;
2791 + }
2792 +
2793 +-static void unregister_probe_event(struct trace_probe *tp)
2794 ++static int unregister_probe_event(struct trace_probe *tp)
2795 + {
2796 ++ int ret;
2797 ++
2798 + /* tp->event is unregistered in trace_remove_event_call() */
2799 +- trace_remove_event_call(&tp->call);
2800 +- kfree(tp->call.print_fmt);
2801 ++ ret = trace_remove_event_call(&tp->call);
2802 ++ if (!ret)
2803 ++ kfree(tp->call.print_fmt);
2804 ++ return ret;
2805 + }
2806 +
2807 + /* Make a debugfs interface for controlling probe points */
2808 +diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
2809 +index d5d0cd3..6fd72b7 100644
2810 +--- a/kernel/trace/trace_uprobe.c
2811 ++++ b/kernel/trace/trace_uprobe.c
2812 +@@ -70,7 +70,7 @@ struct trace_uprobe {
2813 + (sizeof(struct probe_arg) * (n)))
2814 +
2815 + static int register_uprobe_event(struct trace_uprobe *tu);
2816 +-static void unregister_uprobe_event(struct trace_uprobe *tu);
2817 ++static int unregister_uprobe_event(struct trace_uprobe *tu);
2818 +
2819 + static DEFINE_MUTEX(uprobe_lock);
2820 + static LIST_HEAD(uprobe_list);
2821 +@@ -164,11 +164,17 @@ static struct trace_uprobe *find_probe_event(const char *event, const char *grou
2822 + }
2823 +
2824 + /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
2825 +-static void unregister_trace_uprobe(struct trace_uprobe *tu)
2826 ++static int unregister_trace_uprobe(struct trace_uprobe *tu)
2827 + {
2828 ++ int ret;
2829 ++
2830 ++ ret = unregister_uprobe_event(tu);
2831 ++ if (ret)
2832 ++ return ret;
2833 ++
2834 + list_del(&tu->list);
2835 +- unregister_uprobe_event(tu);
2836 + free_trace_uprobe(tu);
2837 ++ return 0;
2838 + }
2839 +
2840 + /* Register a trace_uprobe and probe_event */
2841 +@@ -181,9 +187,12 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
2842 +
2843 + /* register as an event */
2844 + old_tp = find_probe_event(tu->call.name, tu->call.class->system);
2845 +- if (old_tp)
2846 ++ if (old_tp) {
2847 + /* delete old event */
2848 +- unregister_trace_uprobe(old_tp);
2849 ++ ret = unregister_trace_uprobe(old_tp);
2850 ++ if (ret)
2851 ++ goto end;
2852 ++ }
2853 +
2854 + ret = register_uprobe_event(tu);
2855 + if (ret) {
2856 +@@ -256,6 +265,8 @@ static int create_trace_uprobe(int argc, char **argv)
2857 + group = UPROBE_EVENT_SYSTEM;
2858 +
2859 + if (is_delete) {
2860 ++ int ret;
2861 ++
2862 + if (!event) {
2863 + pr_info("Delete command needs an event name.\n");
2864 + return -EINVAL;
2865 +@@ -269,9 +280,9 @@ static int create_trace_uprobe(int argc, char **argv)
2866 + return -ENOENT;
2867 + }
2868 + /* delete an event */
2869 +- unregister_trace_uprobe(tu);
2870 ++ ret = unregister_trace_uprobe(tu);
2871 + mutex_unlock(&uprobe_lock);
2872 +- return 0;
2873 ++ return ret;
2874 + }
2875 +
2876 + if (argc < 2) {
2877 +@@ -408,16 +419,20 @@ fail_address_parse:
2878 + return ret;
2879 + }
2880 +
2881 +-static void cleanup_all_probes(void)
2882 ++static int cleanup_all_probes(void)
2883 + {
2884 + struct trace_uprobe *tu;
2885 ++ int ret = 0;
2886 +
2887 + mutex_lock(&uprobe_lock);
2888 + while (!list_empty(&uprobe_list)) {
2889 + tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
2890 +- unregister_trace_uprobe(tu);
2891 ++ ret = unregister_trace_uprobe(tu);
2892 ++ if (ret)
2893 ++ break;
2894 + }
2895 + mutex_unlock(&uprobe_lock);
2896 ++ return ret;
2897 + }
2898 +
2899 + /* Probes listing interfaces */
2900 +@@ -462,8 +477,13 @@ static const struct seq_operations probes_seq_op = {
2901 +
2902 + static int probes_open(struct inode *inode, struct file *file)
2903 + {
2904 +- if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
2905 +- cleanup_all_probes();
2906 ++ int ret;
2907 ++
2908 ++ if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
2909 ++ ret = cleanup_all_probes();
2910 ++ if (ret)
2911 ++ return ret;
2912 ++ }
2913 +
2914 + return seq_open(file, &probes_seq_op);
2915 + }
2916 +@@ -970,12 +990,17 @@ static int register_uprobe_event(struct trace_uprobe *tu)
2917 + return ret;
2918 + }
2919 +
2920 +-static void unregister_uprobe_event(struct trace_uprobe *tu)
2921 ++static int unregister_uprobe_event(struct trace_uprobe *tu)
2922 + {
2923 ++ int ret;
2924 ++
2925 + /* tu->event is unregistered in trace_remove_event_call() */
2926 +- trace_remove_event_call(&tu->call);
2927 ++ ret = trace_remove_event_call(&tu->call);
2928 ++ if (ret)
2929 ++ return ret;
2930 + kfree(tu->call.print_fmt);
2931 + tu->call.print_fmt = NULL;
2932 ++ return 0;
2933 + }
2934 +
2935 + /* Make a trace interface for controling probe points */
2936 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
2937 +index 55a42f9..5b4328d 100644
2938 +--- a/net/mac80211/mlme.c
2939 ++++ b/net/mac80211/mlme.c
2940 +@@ -31,10 +31,12 @@
2941 + #include "led.h"
2942 +
2943 + #define IEEE80211_AUTH_TIMEOUT (HZ / 5)
2944 ++#define IEEE80211_AUTH_TIMEOUT_LONG (HZ / 2)
2945 + #define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10)
2946 + #define IEEE80211_AUTH_MAX_TRIES 3
2947 + #define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5)
2948 + #define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
2949 ++#define IEEE80211_ASSOC_TIMEOUT_LONG (HZ / 2)
2950 + #define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10)
2951 + #define IEEE80211_ASSOC_MAX_TRIES 3
2952 +
2953 +@@ -3470,10 +3472,13 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
2954 +
2955 + if (tx_flags == 0) {
2956 + auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
2957 +- ifmgd->auth_data->timeout_started = true;
2958 ++ auth_data->timeout_started = true;
2959 + run_again(ifmgd, auth_data->timeout);
2960 + } else {
2961 +- auth_data->timeout_started = false;
2962 ++ auth_data->timeout =
2963 ++ round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG);
2964 ++ auth_data->timeout_started = true;
2965 ++ run_again(ifmgd, auth_data->timeout);
2966 + }
2967 +
2968 + return 0;
2969 +@@ -3510,7 +3515,11 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
2970 + assoc_data->timeout_started = true;
2971 + run_again(&sdata->u.mgd, assoc_data->timeout);
2972 + } else {
2973 +- assoc_data->timeout_started = false;
2974 ++ assoc_data->timeout =
2975 ++ round_jiffies_up(jiffies +
2976 ++ IEEE80211_ASSOC_TIMEOUT_LONG);
2977 ++ assoc_data->timeout_started = true;
2978 ++ run_again(&sdata->u.mgd, assoc_data->timeout);
2979 + }
2980 +
2981 + return 0;
2982 +diff --git a/net/nfc/llcp.h b/net/nfc/llcp.h
2983 +index ff8c434..f924dd2 100644
2984 +--- a/net/nfc/llcp.h
2985 ++++ b/net/nfc/llcp.h
2986 +@@ -19,6 +19,7 @@
2987 +
2988 + enum llcp_state {
2989 + LLCP_CONNECTED = 1, /* wait_for_packet() wants that */
2990 ++ LLCP_CONNECTING,
2991 + LLCP_CLOSED,
2992 + LLCP_BOUND,
2993 + LLCP_LISTEN,
2994 +diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
2995 +index 380253e..7522c37 100644
2996 +--- a/net/nfc/llcp_sock.c
2997 ++++ b/net/nfc/llcp_sock.c
2998 +@@ -571,7 +571,7 @@ static unsigned int llcp_sock_poll(struct file *file, struct socket *sock,
2999 + if (sk->sk_shutdown == SHUTDOWN_MASK)
3000 + mask |= POLLHUP;
3001 +
3002 +- if (sock_writeable(sk))
3003 ++ if (sock_writeable(sk) && sk->sk_state == LLCP_CONNECTED)
3004 + mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
3005 + else
3006 + set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
3007 +@@ -722,14 +722,16 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
3008 + if (ret)
3009 + goto sock_unlink;
3010 +
3011 ++ sk->sk_state = LLCP_CONNECTING;
3012 ++
3013 + ret = sock_wait_state(sk, LLCP_CONNECTED,
3014 + sock_sndtimeo(sk, flags & O_NONBLOCK));
3015 +- if (ret)
3016 ++ if (ret && ret != -EINPROGRESS)
3017 + goto sock_unlink;
3018 +
3019 + release_sock(sk);
3020 +
3021 +- return 0;
3022 ++ return ret;
3023 +
3024 + sock_unlink:
3025 + nfc_llcp_put_ssap(local, llcp_sock->ssap);