Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:3.14 commit in: /
Date: Fri, 04 Mar 2016 00:16:38
Message-Id: 1457050625.6dc320cec290ffd2f75628c4e3cacdd1a6a84ddd.mpagano@gentoo
1 commit: 6dc320cec290ffd2f75628c4e3cacdd1a6a84ddd
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Mar 4 00:17:05 2016 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Mar 4 00:17:05 2016 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=6dc320ce
7
8 Linux patch 3.14.63
9
10 0000_README | 4 +
11 1062_linux-3.14.63.patch | 4346 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 4350 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 705f2d2..1173db7 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -290,6 +290,10 @@ Patch: 1061_linux-3.14.62.patch
19 From: http://www.kernel.org
20 Desc: Linux 3.14.62
21
22 +Patch: 1062_linux-3.14.63.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 3.14.63
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1062_linux-3.14.63.patch b/1062_linux-3.14.63.patch
31 new file mode 100644
32 index 0000000..5dcb8ea
33 --- /dev/null
34 +++ b/1062_linux-3.14.63.patch
35 @@ -0,0 +1,4346 @@
36 +diff --git a/Makefile b/Makefile
37 +index b738f644c71e..0843ef4cc0a4 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 3
42 + PATCHLEVEL = 14
43 +-SUBLEVEL = 62
44 ++SUBLEVEL = 63
45 + EXTRAVERSION =
46 + NAME = Remembering Coco
47 +
48 +diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
49 +index e550b117ec4f..2d6a36ea8aaf 100644
50 +--- a/arch/arc/kernel/unwind.c
51 ++++ b/arch/arc/kernel/unwind.c
52 +@@ -986,42 +986,13 @@ int arc_unwind(struct unwind_frame_info *frame)
53 + (const u8 *)(fde +
54 + 1) +
55 + *fde, ptrType);
56 +- if (pc >= endLoc)
57 ++ if (pc >= endLoc) {
58 + fde = NULL;
59 +- } else
60 +- fde = NULL;
61 +- }
62 +- if (fde == NULL) {
63 +- for (fde = table->address, tableSize = table->size;
64 +- cie = NULL, tableSize > sizeof(*fde)
65 +- && tableSize - sizeof(*fde) >= *fde;
66 +- tableSize -= sizeof(*fde) + *fde,
67 +- fde += 1 + *fde / sizeof(*fde)) {
68 +- cie = cie_for_fde(fde, table);
69 +- if (cie == &bad_cie) {
70 + cie = NULL;
71 +- break;
72 + }
73 +- if (cie == NULL
74 +- || cie == &not_fde
75 +- || (ptrType = fde_pointer_type(cie)) < 0)
76 +- continue;
77 +- ptr = (const u8 *)(fde + 2);
78 +- startLoc = read_pointer(&ptr,
79 +- (const u8 *)(fde + 1) +
80 +- *fde, ptrType);
81 +- if (!startLoc)
82 +- continue;
83 +- if (!(ptrType & DW_EH_PE_indirect))
84 +- ptrType &=
85 +- DW_EH_PE_FORM | DW_EH_PE_signed;
86 +- endLoc =
87 +- startLoc + read_pointer(&ptr,
88 +- (const u8 *)(fde +
89 +- 1) +
90 +- *fde, ptrType);
91 +- if (pc >= startLoc && pc < endLoc)
92 +- break;
93 ++ } else {
94 ++ fde = NULL;
95 ++ cie = NULL;
96 + }
97 + }
98 + }
99 +diff --git a/arch/arm/boot/dts/wm8650.dtsi b/arch/arm/boot/dts/wm8650.dtsi
100 +index 7525982262ac..2897c1ac47d8 100644
101 +--- a/arch/arm/boot/dts/wm8650.dtsi
102 ++++ b/arch/arm/boot/dts/wm8650.dtsi
103 +@@ -187,6 +187,15 @@
104 + interrupts = <43>;
105 + };
106 +
107 ++ sdhc@d800a000 {
108 ++ compatible = "wm,wm8505-sdhc";
109 ++ reg = <0xd800a000 0x400>;
110 ++ interrupts = <20>, <21>;
111 ++ clocks = <&clksdhc>;
112 ++ bus-width = <4>;
113 ++ sdon-inverted;
114 ++ };
115 ++
116 + fb: fb@d8050800 {
117 + compatible = "wm,wm8505-fb";
118 + reg = <0xd8050800 0x200>;
119 +diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S
120 +index 03a2db58b22d..ba5ce99c021d 100644
121 +--- a/arch/mips/kvm/kvm_locore.S
122 ++++ b/arch/mips/kvm/kvm_locore.S
123 +@@ -159,9 +159,11 @@ FEXPORT(__kvm_mips_vcpu_run)
124 +
125 + FEXPORT(__kvm_mips_load_asid)
126 + /* Set the ASID for the Guest Kernel */
127 +- INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
128 +- /* addresses shift to 0x80000000 */
129 +- bltz t0, 1f /* If kernel */
130 ++ PTR_L t0, VCPU_COP0(k1)
131 ++ LONG_L t0, COP0_STATUS(t0)
132 ++ andi t0, KSU_USER | ST0_ERL | ST0_EXL
133 ++ xori t0, KSU_USER
134 ++ bnez t0, 1f /* If kernel */
135 + INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
136 + INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
137 + 1:
138 +@@ -438,9 +440,11 @@ __kvm_mips_return_to_guest:
139 + mtc0 t0, CP0_EPC
140 +
141 + /* Set the ASID for the Guest Kernel */
142 +- INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
143 +- /* addresses shift to 0x80000000 */
144 +- bltz t0, 1f /* If kernel */
145 ++ PTR_L t0, VCPU_COP0(k1)
146 ++ LONG_L t0, COP0_STATUS(t0)
147 ++ andi t0, KSU_USER | ST0_ERL | ST0_EXL
148 ++ xori t0, KSU_USER
149 ++ bnez t0, 1f /* If kernel */
150 + INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
151 + INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
152 + 1:
153 +diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
154 +index 897c605263f2..12d850b68763 100644
155 +--- a/arch/mips/kvm/kvm_mips.c
156 ++++ b/arch/mips/kvm/kvm_mips.c
157 +@@ -313,7 +313,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
158 +
159 + if (!gebase) {
160 + err = -ENOMEM;
161 +- goto out_free_cpu;
162 ++ goto out_uninit_cpu;
163 + }
164 + kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n",
165 + ALIGN(size, PAGE_SIZE), gebase);
166 +@@ -373,6 +373,9 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
167 + out_free_gebase:
168 + kfree(gebase);
169 +
170 ++out_uninit_cpu:
171 ++ kvm_vcpu_uninit(vcpu);
172 ++
173 + out_free_cpu:
174 + kfree(vcpu);
175 +
176 +diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
177 +index c76f297b7149..33085819cd89 100644
178 +--- a/arch/mips/kvm/kvm_mips_emul.c
179 ++++ b/arch/mips/kvm/kvm_mips_emul.c
180 +@@ -935,7 +935,7 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
181 +
182 + base = (inst >> 21) & 0x1f;
183 + op_inst = (inst >> 16) & 0x1f;
184 +- offset = inst & 0xffff;
185 ++ offset = (int16_t)inst;
186 + cache = (inst >> 16) & 0x3;
187 + op = (inst >> 18) & 0x7;
188 +
189 +diff --git a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c
190 +index 4d1ee88864e8..18c8b819b0aa 100644
191 +--- a/arch/s390/mm/extable.c
192 ++++ b/arch/s390/mm/extable.c
193 +@@ -52,12 +52,16 @@ void sort_extable(struct exception_table_entry *start,
194 + int i;
195 +
196 + /* Normalize entries to being relative to the start of the section */
197 +- for (p = start, i = 0; p < finish; p++, i += 8)
198 ++ for (p = start, i = 0; p < finish; p++, i += 8) {
199 + p->insn += i;
200 ++ p->fixup += i + 4;
201 ++ }
202 + sort(start, finish - start, sizeof(*start), cmp_ex, NULL);
203 + /* Denormalize all entries */
204 +- for (p = start, i = 0; p < finish; p++, i += 8)
205 ++ for (p = start, i = 0; p < finish; p++, i += 8) {
206 + p->insn -= i;
207 ++ p->fixup -= i + 4;
208 ++ }
209 + }
210 +
211 + #ifdef CONFIG_MODULES
212 +diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
213 +index 25db14a33d03..47ae8d757773 100644
214 +--- a/arch/sparc/kernel/sys_sparc_64.c
215 ++++ b/arch/sparc/kernel/sys_sparc_64.c
216 +@@ -412,7 +412,7 @@ out:
217 +
218 + SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
219 + {
220 +- int ret;
221 ++ long ret;
222 +
223 + if (personality(current->personality) == PER_LINUX32 &&
224 + personality(personality) == PER_LINUX)
225 +diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
226 +index 337518c5042a..b412c62486f0 100644
227 +--- a/arch/um/os-Linux/start_up.c
228 ++++ b/arch/um/os-Linux/start_up.c
229 +@@ -95,6 +95,8 @@ static int start_ptraced_child(void)
230 + {
231 + int pid, n, status;
232 +
233 ++ fflush(stdout);
234 ++
235 + pid = fork();
236 + if (pid == 0)
237 + ptrace_child();
238 +diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
239 +index ae7d543f23ed..8894f5bc4620 100644
240 +--- a/arch/x86/platform/efi/efi.c
241 ++++ b/arch/x86/platform/efi/efi.c
242 +@@ -248,12 +248,19 @@ static efi_status_t __init phys_efi_set_virtual_address_map(
243 + efi_memory_desc_t *virtual_map)
244 + {
245 + efi_status_t status;
246 ++ unsigned long flags;
247 +
248 + efi_call_phys_prelog();
249 ++
250 ++ /* Disable interrupts around EFI calls: */
251 ++ local_irq_save(flags);
252 + status = efi_call_phys4(efi_phys.set_virtual_address_map,
253 + memory_map_size, descriptor_size,
254 + descriptor_version, virtual_map);
255 ++ local_irq_restore(flags);
256 ++
257 + efi_call_phys_epilog();
258 ++
259 + return status;
260 + }
261 +
262 +diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
263 +index 9ee3491e31fb..be4e7eb41674 100644
264 +--- a/arch/x86/platform/efi/efi_32.c
265 ++++ b/arch/x86/platform/efi/efi_32.c
266 +@@ -33,11 +33,10 @@
267 +
268 + /*
269 + * To make EFI call EFI runtime service in physical addressing mode we need
270 +- * prelog/epilog before/after the invocation to disable interrupt, to
271 +- * claim EFI runtime service handler exclusively and to duplicate a memory in
272 +- * low memory space say 0 - 3G.
273 ++ * prolog/epilog before/after the invocation to claim the EFI runtime service
274 ++ * handler exclusively and to duplicate a memory mapping in low memory space,
275 ++ * say 0 - 3G.
276 + */
277 +-static unsigned long efi_rt_eflags;
278 +
279 + void efi_sync_low_kernel_mappings(void) {}
280 + void __init efi_dump_pagetable(void) {}
281 +@@ -59,8 +58,6 @@ void efi_call_phys_prelog(void)
282 + {
283 + struct desc_ptr gdt_descr;
284 +
285 +- local_irq_save(efi_rt_eflags);
286 +-
287 + load_cr3(initial_page_table);
288 + __flush_tlb_all();
289 +
290 +@@ -79,8 +76,6 @@ void efi_call_phys_epilog(void)
291 +
292 + load_cr3(swapper_pg_dir);
293 + __flush_tlb_all();
294 +-
295 +- local_irq_restore(efi_rt_eflags);
296 + }
297 +
298 + void __init efi_runtime_mkexec(void)
299 +diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
300 +index 666b74a09092..b1be0425c686 100644
301 +--- a/arch/x86/platform/efi/efi_64.c
302 ++++ b/arch/x86/platform/efi/efi_64.c
303 +@@ -41,7 +41,6 @@
304 + #include <asm/realmode.h>
305 +
306 + static pgd_t *save_pgd __initdata;
307 +-static unsigned long efi_flags __initdata;
308 +
309 + /*
310 + * We allocate runtime services regions bottom-up, starting from -4G, i.e.
311 +@@ -87,7 +86,6 @@ void __init efi_call_phys_prelog(void)
312 + return;
313 +
314 + early_code_mapping_set_exec(1);
315 +- local_irq_save(efi_flags);
316 +
317 + n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
318 + save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL);
319 +@@ -115,7 +113,6 @@ void __init efi_call_phys_epilog(void)
320 + set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
321 + kfree(save_pgd);
322 + __flush_tlb_all();
323 +- local_irq_restore(efi_flags);
324 + early_code_mapping_set_exec(0);
325 + }
326 +
327 +diff --git a/block/partitions/mac.c b/block/partitions/mac.c
328 +index 76d8ba6379a9..bd5b91465230 100644
329 +--- a/block/partitions/mac.c
330 ++++ b/block/partitions/mac.c
331 +@@ -32,7 +32,7 @@ int mac_partition(struct parsed_partitions *state)
332 + Sector sect;
333 + unsigned char *data;
334 + int slot, blocks_in_map;
335 +- unsigned secsize;
336 ++ unsigned secsize, datasize, partoffset;
337 + #ifdef CONFIG_PPC_PMAC
338 + int found_root = 0;
339 + int found_root_goodness = 0;
340 +@@ -50,10 +50,14 @@ int mac_partition(struct parsed_partitions *state)
341 + }
342 + secsize = be16_to_cpu(md->block_size);
343 + put_dev_sector(sect);
344 +- data = read_part_sector(state, secsize/512, &sect);
345 ++ datasize = round_down(secsize, 512);
346 ++ data = read_part_sector(state, datasize / 512, &sect);
347 + if (!data)
348 + return -1;
349 +- part = (struct mac_partition *) (data + secsize%512);
350 ++ partoffset = secsize % 512;
351 ++ if (partoffset + sizeof(*part) > datasize)
352 ++ return -1;
353 ++ part = (struct mac_partition *) (data + partoffset);
354 + if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) {
355 + put_dev_sector(sect);
356 + return 0; /* not a MacOS disk */
357 +diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
358 +index f8c0b8dbeb75..88bc8e6b2a54 100644
359 +--- a/crypto/async_tx/async_memcpy.c
360 ++++ b/crypto/async_tx/async_memcpy.c
361 +@@ -53,7 +53,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
362 + struct dmaengine_unmap_data *unmap = NULL;
363 +
364 + if (device)
365 +- unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO);
366 ++ unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
367 +
368 + if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
369 + unsigned long dma_prep_flags = 0;
370 +diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
371 +index d05327caf69d..7eb264e65267 100644
372 +--- a/crypto/async_tx/async_pq.c
373 ++++ b/crypto/async_tx/async_pq.c
374 +@@ -176,7 +176,7 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
375 + BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
376 +
377 + if (device)
378 +- unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO);
379 ++ unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
380 +
381 + if (unmap &&
382 + (src_cnt <= dma_maxpq(device, 0) ||
383 +@@ -294,7 +294,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
384 + BUG_ON(disks < 4);
385 +
386 + if (device)
387 +- unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO);
388 ++ unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
389 +
390 + if (unmap && disks <= dma_maxpq(device, 0) &&
391 + is_dma_pq_aligned(device, offset, 0, len)) {
392 +diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c
393 +index 934a84981495..8fab6275ea1f 100644
394 +--- a/crypto/async_tx/async_raid6_recov.c
395 ++++ b/crypto/async_tx/async_raid6_recov.c
396 +@@ -41,7 +41,7 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
397 + u8 *a, *b, *c;
398 +
399 + if (dma)
400 +- unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO);
401 ++ unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
402 +
403 + if (unmap) {
404 + struct device *dev = dma->dev;
405 +@@ -105,7 +105,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
406 + u8 *d, *s;
407 +
408 + if (dma)
409 +- unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO);
410 ++ unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
411 +
412 + if (unmap) {
413 + dma_addr_t dma_dest[2];
414 +diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
415 +index e1bce26cd4f9..da75777f2b3f 100644
416 +--- a/crypto/async_tx/async_xor.c
417 ++++ b/crypto/async_tx/async_xor.c
418 +@@ -182,7 +182,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
419 + BUG_ON(src_cnt <= 1);
420 +
421 + if (device)
422 +- unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOIO);
423 ++ unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOWAIT);
424 +
425 + if (unmap && is_dma_xor_aligned(device, offset, 0, len)) {
426 + struct dma_async_tx_descriptor *tx;
427 +@@ -278,7 +278,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
428 + BUG_ON(src_cnt <= 1);
429 +
430 + if (device)
431 +- unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOIO);
432 ++ unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOWAIT);
433 +
434 + if (unmap && src_cnt <= device->max_xor &&
435 + is_dma_xor_aligned(device, offset, 0, len)) {
436 +diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
437 +index 136803c47cdb..96e5ed188636 100644
438 +--- a/drivers/ata/libata-sff.c
439 ++++ b/drivers/ata/libata-sff.c
440 +@@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
441 + static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
442 + {
443 + struct ata_port *ap = qc->ap;
444 +- unsigned long flags;
445 +
446 + if (ap->ops->error_handler) {
447 + if (in_wq) {
448 +- spin_lock_irqsave(ap->lock, flags);
449 +-
450 + /* EH might have kicked in while host lock is
451 + * released.
452 + */
453 +@@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
454 + } else
455 + ata_port_freeze(ap);
456 + }
457 +-
458 +- spin_unlock_irqrestore(ap->lock, flags);
459 + } else {
460 + if (likely(!(qc->err_mask & AC_ERR_HSM)))
461 + ata_qc_complete(qc);
462 +@@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
463 + }
464 + } else {
465 + if (in_wq) {
466 +- spin_lock_irqsave(ap->lock, flags);
467 + ata_sff_irq_on(ap);
468 + ata_qc_complete(qc);
469 +- spin_unlock_irqrestore(ap->lock, flags);
470 + } else
471 + ata_qc_complete(qc);
472 + }
473 +@@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
474 + {
475 + struct ata_link *link = qc->dev->link;
476 + struct ata_eh_info *ehi = &link->eh_info;
477 +- unsigned long flags = 0;
478 + int poll_next;
479 +
480 ++ lockdep_assert_held(ap->lock);
481 ++
482 + WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
483 +
484 + /* Make sure ata_sff_qc_issue() does not throw things
485 +@@ -1112,14 +1106,6 @@ fsm_start:
486 + }
487 + }
488 +
489 +- /* Send the CDB (atapi) or the first data block (ata pio out).
490 +- * During the state transition, interrupt handler shouldn't
491 +- * be invoked before the data transfer is complete and
492 +- * hsm_task_state is changed. Hence, the following locking.
493 +- */
494 +- if (in_wq)
495 +- spin_lock_irqsave(ap->lock, flags);
496 +-
497 + if (qc->tf.protocol == ATA_PROT_PIO) {
498 + /* PIO data out protocol.
499 + * send first data block.
500 +@@ -1135,9 +1121,6 @@ fsm_start:
501 + /* send CDB */
502 + atapi_send_cdb(ap, qc);
503 +
504 +- if (in_wq)
505 +- spin_unlock_irqrestore(ap->lock, flags);
506 +-
507 + /* if polling, ata_sff_pio_task() handles the rest.
508 + * otherwise, interrupt handler takes over from here.
509 + */
510 +@@ -1361,12 +1344,14 @@ static void ata_sff_pio_task(struct work_struct *work)
511 + u8 status;
512 + int poll_next;
513 +
514 ++ spin_lock_irq(ap->lock);
515 ++
516 + BUG_ON(ap->sff_pio_task_link == NULL);
517 + /* qc can be NULL if timeout occurred */
518 + qc = ata_qc_from_tag(ap, link->active_tag);
519 + if (!qc) {
520 + ap->sff_pio_task_link = NULL;
521 +- return;
522 ++ goto out_unlock;
523 + }
524 +
525 + fsm_start:
526 +@@ -1381,11 +1366,14 @@ fsm_start:
527 + */
528 + status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
529 + if (status & ATA_BUSY) {
530 ++ spin_unlock_irq(ap->lock);
531 + ata_msleep(ap, 2);
532 ++ spin_lock_irq(ap->lock);
533 ++
534 + status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
535 + if (status & ATA_BUSY) {
536 + ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
537 +- return;
538 ++ goto out_unlock;
539 + }
540 + }
541 +
542 +@@ -1402,6 +1390,8 @@ fsm_start:
543 + */
544 + if (poll_next)
545 + goto fsm_start;
546 ++out_unlock:
547 ++ spin_unlock_irq(ap->lock);
548 + }
549 +
550 + /**
551 +diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
552 +index b7695e804635..fa94fba8fa21 100644
553 +--- a/drivers/ata/sata_sil.c
554 ++++ b/drivers/ata/sata_sil.c
555 +@@ -631,6 +631,9 @@ static void sil_dev_config(struct ata_device *dev)
556 + unsigned int n, quirks = 0;
557 + unsigned char model_num[ATA_ID_PROD_LEN + 1];
558 +
559 ++ /* This controller doesn't support trim */
560 ++ dev->horkage |= ATA_HORKAGE_NOTRIM;
561 ++
562 + ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
563 +
564 + for (n = 0; sil_blacklist[n].product; n++)
565 +diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
566 +index 1098ed3b9b89..dc45ddb36117 100644
567 +--- a/drivers/clocksource/vt8500_timer.c
568 ++++ b/drivers/clocksource/vt8500_timer.c
569 +@@ -50,6 +50,8 @@
570 +
571 + #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
572 +
573 ++#define MIN_OSCR_DELTA 16
574 ++
575 + static void __iomem *regbase;
576 +
577 + static cycle_t vt8500_timer_read(struct clocksource *cs)
578 +@@ -80,7 +82,7 @@ static int vt8500_timer_set_next_event(unsigned long cycles,
579 + cpu_relax();
580 + writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL);
581 +
582 +- if ((signed)(alarm - clocksource.read(&clocksource)) <= 16)
583 ++ if ((signed)(alarm - clocksource.read(&clocksource)) <= MIN_OSCR_DELTA)
584 + return -ETIME;
585 +
586 + writel(1, regbase + TIMER_IER_VAL);
587 +@@ -160,7 +162,7 @@ static void __init vt8500_timer_init(struct device_node *np)
588 + pr_err("%s: setup_irq failed for %s\n", __func__,
589 + clockevent.name);
590 + clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ,
591 +- 4, 0xf0000000);
592 ++ MIN_OSCR_DELTA * 2, 0xf0000000);
593 + }
594 +
595 + CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init);
596 +diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
597 +index 3ae48ee2f488..df79cb0bf04e 100644
598 +--- a/drivers/dma/dw/core.c
599 ++++ b/drivers/dma/dw/core.c
600 +@@ -176,7 +176,7 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
601 +
602 + /*----------------------------------------------------------------------*/
603 +
604 +-static inline unsigned int dwc_fast_fls(unsigned long long v)
605 ++static inline unsigned int dwc_fast_ffs(unsigned long long v)
606 + {
607 + /*
608 + * We can be a lot more clever here, but this should take care
609 +@@ -720,7 +720,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
610 + dw->data_width[dwc->dst_master]);
611 +
612 + src_width = dst_width = min_t(unsigned int, data_width,
613 +- dwc_fast_fls(src | dest | len));
614 ++ dwc_fast_ffs(src | dest | len));
615 +
616 + ctllo = DWC_DEFAULT_CTLLO(chan)
617 + | DWC_CTLL_DST_WIDTH(dst_width)
618 +@@ -799,7 +799,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
619 +
620 + switch (direction) {
621 + case DMA_MEM_TO_DEV:
622 +- reg_width = __fls(sconfig->dst_addr_width);
623 ++ reg_width = __ffs(sconfig->dst_addr_width);
624 + reg = sconfig->dst_addr;
625 + ctllo = (DWC_DEFAULT_CTLLO(chan)
626 + | DWC_CTLL_DST_WIDTH(reg_width)
627 +@@ -819,7 +819,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
628 + len = sg_dma_len(sg);
629 +
630 + mem_width = min_t(unsigned int,
631 +- data_width, dwc_fast_fls(mem | len));
632 ++ data_width, dwc_fast_ffs(mem | len));
633 +
634 + slave_sg_todev_fill_desc:
635 + desc = dwc_desc_get(dwc);
636 +@@ -859,7 +859,7 @@ slave_sg_todev_fill_desc:
637 + }
638 + break;
639 + case DMA_DEV_TO_MEM:
640 +- reg_width = __fls(sconfig->src_addr_width);
641 ++ reg_width = __ffs(sconfig->src_addr_width);
642 + reg = sconfig->src_addr;
643 + ctllo = (DWC_DEFAULT_CTLLO(chan)
644 + | DWC_CTLL_SRC_WIDTH(reg_width)
645 +@@ -879,7 +879,7 @@ slave_sg_todev_fill_desc:
646 + len = sg_dma_len(sg);
647 +
648 + mem_width = min_t(unsigned int,
649 +- data_width, dwc_fast_fls(mem | len));
650 ++ data_width, dwc_fast_ffs(mem | len));
651 +
652 + slave_sg_fromdev_fill_desc:
653 + desc = dwc_desc_get(dwc);
654 +diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
655 +index 592af5f0cf39..53587377e672 100644
656 +--- a/drivers/edac/edac_device.c
657 ++++ b/drivers/edac/edac_device.c
658 +@@ -435,16 +435,13 @@ void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
659 + */
660 + void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
661 + {
662 +- int status;
663 +-
664 + if (!edac_dev->edac_check)
665 + return;
666 +
667 +- status = cancel_delayed_work(&edac_dev->work);
668 +- if (status == 0) {
669 +- /* workq instance might be running, wait for it */
670 +- flush_workqueue(edac_workqueue);
671 +- }
672 ++ edac_dev->op_state = OP_OFFLINE;
673 ++
674 ++ cancel_delayed_work_sync(&edac_dev->work);
675 ++ flush_workqueue(edac_workqueue);
676 + }
677 +
678 + /*
679 +diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
680 +index 33edd6766344..19dc0bc9b136 100644
681 +--- a/drivers/edac/edac_mc.c
682 ++++ b/drivers/edac/edac_mc.c
683 +@@ -584,18 +584,10 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec,
684 + */
685 + static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
686 + {
687 +- int status;
688 +-
689 +- if (mci->op_state != OP_RUNNING_POLL)
690 +- return;
691 +-
692 +- status = cancel_delayed_work(&mci->work);
693 +- if (status == 0) {
694 +- edac_dbg(0, "not canceled, flush the queue\n");
695 ++ mci->op_state = OP_OFFLINE;
696 +
697 +- /* workq instance might be running, wait for it */
698 +- flush_workqueue(edac_workqueue);
699 +- }
700 ++ cancel_delayed_work_sync(&mci->work);
701 ++ flush_workqueue(edac_workqueue);
702 + }
703 +
704 + /*
705 +diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
706 +index 2cf44b4db80c..b4b38603b804 100644
707 +--- a/drivers/edac/edac_pci.c
708 ++++ b/drivers/edac/edac_pci.c
709 +@@ -274,13 +274,12 @@ static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci,
710 + */
711 + static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
712 + {
713 +- int status;
714 +-
715 + edac_dbg(0, "\n");
716 +
717 +- status = cancel_delayed_work(&pci->work);
718 +- if (status == 0)
719 +- flush_workqueue(edac_workqueue);
720 ++ pci->op_state = OP_OFFLINE;
721 ++
722 ++ cancel_delayed_work_sync(&pci->work);
723 ++ flush_workqueue(edac_workqueue);
724 + }
725 +
726 + /*
727 +diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
728 +index 9833a1b1acc1..3fc122306f1f 100644
729 +--- a/drivers/gpu/drm/ast/ast_drv.h
730 ++++ b/drivers/gpu/drm/ast/ast_drv.h
731 +@@ -296,6 +296,7 @@ int ast_framebuffer_init(struct drm_device *dev,
732 + int ast_fbdev_init(struct drm_device *dev);
733 + void ast_fbdev_fini(struct drm_device *dev);
734 + void ast_fbdev_set_suspend(struct drm_device *dev, int state);
735 ++void ast_fbdev_set_base(struct ast_private *ast, unsigned long gpu_addr);
736 +
737 + struct ast_bo {
738 + struct ttm_buffer_object bo;
739 +diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
740 +index a28640f47c27..b55b6b1c9fe2 100644
741 +--- a/drivers/gpu/drm/ast/ast_fb.c
742 ++++ b/drivers/gpu/drm/ast/ast_fb.c
743 +@@ -367,3 +367,10 @@ void ast_fbdev_set_suspend(struct drm_device *dev, int state)
744 +
745 + fb_set_suspend(ast->fbdev->helper.fbdev, state);
746 + }
747 ++
748 ++void ast_fbdev_set_base(struct ast_private *ast, unsigned long gpu_addr)
749 ++{
750 ++ ast->fbdev->helper.fbdev->fix.smem_start =
751 ++ ast->fbdev->helper.fbdev->apertures->ranges[0].base + gpu_addr;
752 ++ ast->fbdev->helper.fbdev->fix.smem_len = ast->vram_size - gpu_addr;
753 ++}
754 +diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
755 +index d830b38e54f6..c0f284230a39 100644
756 +--- a/drivers/gpu/drm/ast/ast_main.c
757 ++++ b/drivers/gpu/drm/ast/ast_main.c
758 +@@ -312,6 +312,7 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
759 + dev->mode_config.min_height = 0;
760 + dev->mode_config.preferred_depth = 24;
761 + dev->mode_config.prefer_shadow = 1;
762 ++ dev->mode_config.fb_base = pci_resource_start(ast->dev->pdev, 0);
763 +
764 + if (ast->chip == AST2100 ||
765 + ast->chip == AST2200 ||
766 +diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
767 +index d2e56e95d886..cea916fa164b 100644
768 +--- a/drivers/gpu/drm/ast/ast_mode.c
769 ++++ b/drivers/gpu/drm/ast/ast_mode.c
770 +@@ -509,6 +509,8 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc,
771 + ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
772 + if (ret)
773 + DRM_ERROR("failed to kmap fbcon\n");
774 ++ else
775 ++ ast_fbdev_set_base(ast, gpu_addr);
776 + }
777 + ast_bo_unreserve(bo);
778 +
779 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
780 +index 958b26dcac8a..0a9d1fd32994 100644
781 +--- a/drivers/gpu/drm/i915/intel_display.c
782 ++++ b/drivers/gpu/drm/i915/intel_display.c
783 +@@ -8821,11 +8821,21 @@ connected_sink_compute_bpp(struct intel_connector * connector,
784 + pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
785 + }
786 +
787 +- /* Clamp bpp to 8 on screens without EDID 1.4 */
788 +- if (connector->base.display_info.bpc == 0 && bpp > 24) {
789 +- DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
790 +- bpp);
791 +- pipe_config->pipe_bpp = 24;
792 ++ /* Clamp bpp to default limit on screens without EDID 1.4 */
793 ++ if (connector->base.display_info.bpc == 0) {
794 ++ int type = connector->base.connector_type;
795 ++ int clamp_bpp = 24;
796 ++
797 ++ /* Fall back to 18 bpp when DP sink capability is unknown. */
798 ++ if (type == DRM_MODE_CONNECTOR_DisplayPort ||
799 ++ type == DRM_MODE_CONNECTOR_eDP)
800 ++ clamp_bpp = 18;
801 ++
802 ++ if (bpp > clamp_bpp) {
803 ++ DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
804 ++ bpp, clamp_bpp);
805 ++ pipe_config->pipe_bpp = clamp_bpp;
806 ++ }
807 + }
808 + }
809 +
810 +diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
811 +index 56a13a915155..0928c5e2bafd 100644
812 +--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
813 ++++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
814 +@@ -168,7 +168,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
815 + cmd->command_size))
816 + return -EFAULT;
817 +
818 +- reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL);
819 ++ reloc_info = kmalloc_array(cmd->relocs_num,
820 ++ sizeof(struct qxl_reloc_info), GFP_KERNEL);
821 + if (!reloc_info)
822 + return -ENOMEM;
823 +
824 +diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
825 +index 2fa3cf615a67..6a3b5f92219f 100644
826 +--- a/drivers/gpu/drm/radeon/radeon_atombios.c
827 ++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
828 +@@ -436,7 +436,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
829 + }
830 +
831 + /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
832 +- if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
833 ++ if (((dev->pdev->device == 0x9802) ||
834 ++ (dev->pdev->device == 0x9805) ||
835 ++ (dev->pdev->device == 0x9806)) &&
836 + (dev->pdev->subsystem_vendor == 0x1734) &&
837 + (dev->pdev->subsystem_device == 0x11bd)) {
838 + if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
839 +@@ -447,14 +449,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
840 + }
841 + }
842 +
843 +- /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
844 +- if ((dev->pdev->device == 0x9805) &&
845 +- (dev->pdev->subsystem_vendor == 0x1734) &&
846 +- (dev->pdev->subsystem_device == 0x11bd)) {
847 +- if (*connector_type == DRM_MODE_CONNECTOR_VGA)
848 +- return false;
849 +- }
850 +-
851 + return true;
852 + }
853 +
854 +diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
855 +index f8b20e1c0820..614144d34aea 100644
856 +--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
857 ++++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
858 +@@ -79,6 +79,11 @@ static void radeon_hotplug_work_func(struct work_struct *work)
859 + struct drm_mode_config *mode_config = &dev->mode_config;
860 + struct drm_connector *connector;
861 +
862 ++ /* we can race here at startup, some boards seem to trigger
863 ++ * hotplug irqs when they shouldn't. */
864 ++ if (!rdev->mode_info.mode_config_initialized)
865 ++ return;
866 ++
867 + mutex_lock(&mode_config->mutex);
868 + if (mode_config->num_connector) {
869 + list_for_each_entry(connector, &mode_config->connector_list, head)
870 +diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
871 +index 0b00de55b2a4..9a559140e4a3 100644
872 +--- a/drivers/gpu/drm/radeon/radeon_pm.c
873 ++++ b/drivers/gpu/drm/radeon/radeon_pm.c
874 +@@ -915,8 +915,6 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
875 +
876 + /* update display watermarks based on new power state */
877 + radeon_bandwidth_update(rdev);
878 +- /* update displays */
879 +- radeon_dpm_display_configuration_changed(rdev);
880 +
881 + rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
882 + rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
883 +@@ -936,6 +934,9 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
884 +
885 + radeon_dpm_post_set_power_state(rdev);
886 +
887 ++ /* update displays */
888 ++ radeon_dpm_display_configuration_changed(rdev);
889 ++
890 + if (rdev->asic->dpm.force_performance_level) {
891 + if (rdev->pm.dpm.thermal_active) {
892 + enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
893 +@@ -1364,8 +1365,7 @@ int radeon_pm_late_init(struct radeon_device *rdev)
894 + ret = device_create_file(rdev->dev, &dev_attr_power_method);
895 + if (ret)
896 + DRM_ERROR("failed to create device file for power method\n");
897 +- if (!ret)
898 +- rdev->pm.sysfs_initialized = true;
899 ++ rdev->pm.sysfs_initialized = true;
900 + }
901 +
902 + mutex_lock(&rdev->pm.mutex);
903 +diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
904 +index c0625805cdd7..a1d684266549 100644
905 +--- a/drivers/gpu/drm/radeon/radeon_sa.c
906 ++++ b/drivers/gpu/drm/radeon/radeon_sa.c
907 +@@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
908 + /* see if we can skip over some allocations */
909 + } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
910 +
911 ++ for (i = 0; i < RADEON_NUM_RINGS; ++i)
912 ++ radeon_fence_ref(fences[i]);
913 ++
914 + spin_unlock(&sa_manager->wq.lock);
915 + r = radeon_fence_wait_any(rdev, fences, false);
916 ++ for (i = 0; i < RADEON_NUM_RINGS; ++i)
917 ++ radeon_fence_unref(&fences[i]);
918 + spin_lock(&sa_manager->wq.lock);
919 + /* if we have nothing to wait for block */
920 + if (r == -ENOENT && block) {
921 +diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
922 +index 45a9a03efc06..2d50433b8c72 100644
923 +--- a/drivers/gpu/drm/radeon/radeon_ttm.c
924 ++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
925 +@@ -623,7 +623,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
926 + 0, PAGE_SIZE,
927 + PCI_DMA_BIDIRECTIONAL);
928 + if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
929 +- while (--i) {
930 ++ while (i--) {
931 + pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
932 + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
933 + gtt->ttm.dma_address[i] = 0;
934 +diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
935 +index 8fcb932a3a55..aaefb10aa09e 100644
936 +--- a/drivers/gpu/drm/radeon/rv770_dpm.c
937 ++++ b/drivers/gpu/drm/radeon/rv770_dpm.c
938 +@@ -1415,7 +1415,7 @@ int rv770_resume_smc(struct radeon_device *rdev)
939 + int rv770_set_sw_state(struct radeon_device *rdev)
940 + {
941 + if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) != PPSMC_Result_OK)
942 +- return -EINVAL;
943 ++ DRM_ERROR("rv770_set_sw_state failed\n");
944 + return 0;
945 + }
946 +
947 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
948 +index 0771dcbf9ed0..7c48070cf9d8 100644
949 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
950 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
951 +@@ -25,6 +25,7 @@
952 + *
953 + **************************************************************************/
954 + #include <linux/module.h>
955 ++#include <linux/console.h>
956 +
957 + #include <drm/drmP.h>
958 + #include "vmwgfx_drv.h"
959 +@@ -1383,6 +1384,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
960 + static int __init vmwgfx_init(void)
961 + {
962 + int ret;
963 ++
964 ++#ifdef CONFIG_VGA_CONSOLE
965 ++ if (vgacon_text_force())
966 ++ return -EINVAL;
967 ++#endif
968 ++
969 + ret = drm_pci_init(&driver, &vmw_pci_driver);
970 + if (ret)
971 + DRM_ERROR("Failed initializing DRM.\n");
972 +diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
973 +index af0259708358..bbb554d586d4 100644
974 +--- a/drivers/gpu/vga/vgaarb.c
975 ++++ b/drivers/gpu/vga/vgaarb.c
976 +@@ -392,8 +392,10 @@ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
977 + set_current_state(interruptible ?
978 + TASK_INTERRUPTIBLE :
979 + TASK_UNINTERRUPTIBLE);
980 +- if (signal_pending(current)) {
981 +- rc = -EINTR;
982 ++ if (interruptible && signal_pending(current)) {
983 ++ __set_current_state(TASK_RUNNING);
984 ++ remove_wait_queue(&vga_wait_queue, &wait);
985 ++ rc = -ERESTARTSYS;
986 + break;
987 + }
988 + schedule();
989 +diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
990 +index 126516414c11..44223f5d92d8 100644
991 +--- a/drivers/hwmon/ads1015.c
992 ++++ b/drivers/hwmon/ads1015.c
993 +@@ -126,7 +126,7 @@ static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel,
994 + struct ads1015_data *data = i2c_get_clientdata(client);
995 + unsigned int pga = data->channel_data[channel].pga;
996 + int fullscale = fullscale_table[pga];
997 +- const unsigned mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
998 ++ const int mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
999 +
1000 + return DIV_ROUND_CLOSEST(reg * fullscale, mask);
1001 + }
1002 +diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
1003 +index 095bb046e2c8..875348699e6e 100644
1004 +--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
1005 ++++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
1006 +@@ -149,7 +149,7 @@ static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_en
1007 + error = l2t_send(tdev, skb, l2e);
1008 + if (error < 0)
1009 + kfree_skb(skb);
1010 +- return error;
1011 ++ return error < 0 ? error : 0;
1012 + }
1013 +
1014 + int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
1015 +@@ -165,7 +165,7 @@ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
1016 + error = cxgb3_ofld_send(tdev, skb);
1017 + if (error < 0)
1018 + kfree_skb(skb);
1019 +- return error;
1020 ++ return error < 0 ? error : 0;
1021 + }
1022 +
1023 + static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
1024 +diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
1025 +index dabb697b1c2a..48ba1c3e945a 100644
1026 +--- a/drivers/infiniband/hw/qib/qib_verbs_mcast.c
1027 ++++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
1028 +@@ -286,15 +286,13 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1029 + struct qib_ibdev *dev = to_idev(ibqp->device);
1030 + struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
1031 + struct qib_mcast *mcast = NULL;
1032 +- struct qib_mcast_qp *p, *tmp;
1033 ++ struct qib_mcast_qp *p, *tmp, *delp = NULL;
1034 + struct rb_node *n;
1035 + int last = 0;
1036 + int ret;
1037 +
1038 +- if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
1039 +- ret = -EINVAL;
1040 +- goto bail;
1041 +- }
1042 ++ if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
1043 ++ return -EINVAL;
1044 +
1045 + spin_lock_irq(&ibp->lock);
1046 +
1047 +@@ -303,8 +301,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1048 + while (1) {
1049 + if (n == NULL) {
1050 + spin_unlock_irq(&ibp->lock);
1051 +- ret = -EINVAL;
1052 +- goto bail;
1053 ++ return -EINVAL;
1054 + }
1055 +
1056 + mcast = rb_entry(n, struct qib_mcast, rb_node);
1057 +@@ -328,6 +325,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1058 + */
1059 + list_del_rcu(&p->list);
1060 + mcast->n_attached--;
1061 ++ delp = p;
1062 +
1063 + /* If this was the last attached QP, remove the GID too. */
1064 + if (list_empty(&mcast->qp_list)) {
1065 +@@ -338,15 +336,16 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1066 + }
1067 +
1068 + spin_unlock_irq(&ibp->lock);
1069 ++ /* QP not attached */
1070 ++ if (!delp)
1071 ++ return -EINVAL;
1072 ++ /*
1073 ++ * Wait for any list walkers to finish before freeing the
1074 ++ * list element.
1075 ++ */
1076 ++ wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
1077 ++ qib_mcast_qp_free(delp);
1078 +
1079 +- if (p) {
1080 +- /*
1081 +- * Wait for any list walkers to finish before freeing the
1082 +- * list element.
1083 +- */
1084 +- wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
1085 +- qib_mcast_qp_free(p);
1086 +- }
1087 + if (last) {
1088 + atomic_dec(&mcast->refcount);
1089 + wait_event(mcast->wait, !atomic_read(&mcast->refcount));
1090 +@@ -355,11 +354,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1091 + dev->n_mcast_grps_allocated--;
1092 + spin_unlock_irq(&dev->n_mcast_grps_lock);
1093 + }
1094 +-
1095 +- ret = 0;
1096 +-
1097 +-bail:
1098 +- return ret;
1099 ++ return 0;
1100 + }
1101 +
1102 + int qib_mcast_tree_empty(struct qib_ibport *ibp)
1103 +diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
1104 +index 3ae2bb8d9cf2..21a44b168d46 100644
1105 +--- a/drivers/irqchip/irq-versatile-fpga.c
1106 ++++ b/drivers/irqchip/irq-versatile-fpga.c
1107 +@@ -204,7 +204,12 @@ int __init fpga_irq_of_init(struct device_node *node,
1108 + if (!parent_irq)
1109 + parent_irq = -1;
1110 +
1111 ++#ifdef CONFIG_ARCH_VERSATILE
1112 ++ fpga_irq_init(base, node->name, IRQ_SIC_START, parent_irq, valid_mask,
1113 ++ node);
1114 ++#else
1115 + fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node);
1116 ++#endif
1117 +
1118 + writel(clear_mask, base + IRQ_ENABLE_CLEAR);
1119 + writel(clear_mask, base + FIQ_ENABLE_CLEAR);
1120 +diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
1121 +index fbcb6225f794..74a5786ddcce 100644
1122 +--- a/drivers/md/bcache/btree.c
1123 ++++ b/drivers/md/bcache/btree.c
1124 +@@ -1641,6 +1641,7 @@ static void bch_btree_gc(struct cache_set *c)
1125 + do {
1126 + ret = btree_root(gc_root, c, &op, &writes, &stats);
1127 + closure_sync(&writes);
1128 ++ cond_resched();
1129 +
1130 + if (ret && ret != -EAGAIN)
1131 + pr_warn("gc failed!");
1132 +@@ -2037,8 +2038,10 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
1133 + rw_lock(true, b, b->level);
1134 +
1135 + if (b->key.ptr[0] != btree_ptr ||
1136 +- b->seq != seq + 1)
1137 ++ b->seq != seq + 1) {
1138 ++ op->lock = b->level;
1139 + goto out;
1140 ++ }
1141 + }
1142 +
1143 + SET_KEY_PTRS(check_key, 1);
1144 +diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
1145 +index 24a3a1546caa..1b6beb1e3142 100644
1146 +--- a/drivers/md/bcache/super.c
1147 ++++ b/drivers/md/bcache/super.c
1148 +@@ -712,6 +712,8 @@ static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
1149 + WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") ||
1150 + sysfs_create_link(&c->kobj, &d->kobj, d->name),
1151 + "Couldn't create device <-> cache set symlinks");
1152 ++
1153 ++ clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
1154 + }
1155 +
1156 + static void bcache_device_detach(struct bcache_device *d)
1157 +@@ -882,8 +884,11 @@ void bch_cached_dev_run(struct cached_dev *dc)
1158 + buf[SB_LABEL_SIZE] = '\0';
1159 + env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf);
1160 +
1161 +- if (atomic_xchg(&dc->running, 1))
1162 ++ if (atomic_xchg(&dc->running, 1)) {
1163 ++ kfree(env[1]);
1164 ++ kfree(env[2]);
1165 + return;
1166 ++ }
1167 +
1168 + if (!d->c &&
1169 + BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
1170 +@@ -2081,8 +2086,10 @@ static int __init bcache_init(void)
1171 + closure_debug_init();
1172 +
1173 + bcache_major = register_blkdev(0, "bcache");
1174 +- if (bcache_major < 0)
1175 ++ if (bcache_major < 0) {
1176 ++ unregister_reboot_notifier(&reboot);
1177 + return bcache_major;
1178 ++ }
1179 +
1180 + if (!(bcache_wq = create_workqueue("bcache")) ||
1181 + !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
1182 +diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
1183 +index f4300e4c0114..d6525c12c8d8 100644
1184 +--- a/drivers/md/bcache/writeback.c
1185 ++++ b/drivers/md/bcache/writeback.c
1186 +@@ -323,6 +323,10 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
1187 +
1188 + static bool dirty_pred(struct keybuf *buf, struct bkey *k)
1189 + {
1190 ++ struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys);
1191 ++
1192 ++ BUG_ON(KEY_INODE(k) != dc->disk.id);
1193 ++
1194 + return KEY_DIRTY(k);
1195 + }
1196 +
1197 +@@ -372,11 +376,24 @@ next:
1198 + }
1199 + }
1200 +
1201 ++/*
1202 ++ * Returns true if we scanned the entire disk
1203 ++ */
1204 + static bool refill_dirty(struct cached_dev *dc)
1205 + {
1206 + struct keybuf *buf = &dc->writeback_keys;
1207 ++ struct bkey start = KEY(dc->disk.id, 0, 0);
1208 + struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
1209 +- bool searched_from_start = false;
1210 ++ struct bkey start_pos;
1211 ++
1212 ++ /*
1213 ++ * make sure keybuf pos is inside the range for this disk - at bringup
1214 ++ * we might not be attached yet so this disk's inode nr isn't
1215 ++ * initialized then
1216 ++ */
1217 ++ if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
1218 ++ bkey_cmp(&buf->last_scanned, &end) > 0)
1219 ++ buf->last_scanned = start;
1220 +
1221 + if (dc->partial_stripes_expensive) {
1222 + refill_full_stripes(dc);
1223 +@@ -384,14 +401,20 @@ static bool refill_dirty(struct cached_dev *dc)
1224 + return false;
1225 + }
1226 +
1227 +- if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
1228 +- buf->last_scanned = KEY(dc->disk.id, 0, 0);
1229 +- searched_from_start = true;
1230 +- }
1231 +-
1232 ++ start_pos = buf->last_scanned;
1233 + bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
1234 +
1235 +- return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start;
1236 ++ if (bkey_cmp(&buf->last_scanned, &end) < 0)
1237 ++ return false;
1238 ++
1239 ++ /*
1240 ++ * If we get to the end start scanning again from the beginning, and
1241 ++ * only scan up to where we initially started scanning from:
1242 ++ */
1243 ++ buf->last_scanned = start;
1244 ++ bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
1245 ++
1246 ++ return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
1247 + }
1248 +
1249 + static int bch_writeback_thread(void *arg)
1250 +diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
1251 +index e2f8598937ac..afe7ecada503 100644
1252 +--- a/drivers/md/bcache/writeback.h
1253 ++++ b/drivers/md/bcache/writeback.h
1254 +@@ -63,7 +63,8 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
1255 +
1256 + static inline void bch_writeback_queue(struct cached_dev *dc)
1257 + {
1258 +- wake_up_process(dc->writeback_thread);
1259 ++ if (!IS_ERR_OR_NULL(dc->writeback_thread))
1260 ++ wake_up_process(dc->writeback_thread);
1261 + }
1262 +
1263 + static inline void bch_writeback_add(struct cached_dev *dc)
1264 +diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
1265 +index 0b2536247cf5..84e27708ad97 100644
1266 +--- a/drivers/md/dm-exception-store.h
1267 ++++ b/drivers/md/dm-exception-store.h
1268 +@@ -70,7 +70,7 @@ struct dm_exception_store_type {
1269 + * Update the metadata with this exception.
1270 + */
1271 + void (*commit_exception) (struct dm_exception_store *store,
1272 +- struct dm_exception *e,
1273 ++ struct dm_exception *e, int valid,
1274 + void (*callback) (void *, int success),
1275 + void *callback_context);
1276 +
1277 +diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
1278 +index d6e88178d22c..d3272acc0f0e 100644
1279 +--- a/drivers/md/dm-snap-persistent.c
1280 ++++ b/drivers/md/dm-snap-persistent.c
1281 +@@ -700,7 +700,7 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
1282 + }
1283 +
1284 + static void persistent_commit_exception(struct dm_exception_store *store,
1285 +- struct dm_exception *e,
1286 ++ struct dm_exception *e, int valid,
1287 + void (*callback) (void *, int success),
1288 + void *callback_context)
1289 + {
1290 +@@ -709,6 +709,9 @@ static void persistent_commit_exception(struct dm_exception_store *store,
1291 + struct core_exception ce;
1292 + struct commit_callback *cb;
1293 +
1294 ++ if (!valid)
1295 ++ ps->valid = 0;
1296 ++
1297 + ce.old_chunk = e->old_chunk;
1298 + ce.new_chunk = e->new_chunk;
1299 + write_exception(ps, ps->current_committed++, &ce);
1300 +diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c
1301 +index 1ce9a2586e41..31439d53cf7e 100644
1302 +--- a/drivers/md/dm-snap-transient.c
1303 ++++ b/drivers/md/dm-snap-transient.c
1304 +@@ -52,12 +52,12 @@ static int transient_prepare_exception(struct dm_exception_store *store,
1305 + }
1306 +
1307 + static void transient_commit_exception(struct dm_exception_store *store,
1308 +- struct dm_exception *e,
1309 ++ struct dm_exception *e, int valid,
1310 + void (*callback) (void *, int success),
1311 + void *callback_context)
1312 + {
1313 + /* Just succeed */
1314 +- callback(callback_context, 1);
1315 ++ callback(callback_context, valid);
1316 + }
1317 +
1318 + static void transient_usage(struct dm_exception_store *store,
1319 +diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
1320 +index c356a10b9ba5..2e9117630dbe 100644
1321 +--- a/drivers/md/dm-snap.c
1322 ++++ b/drivers/md/dm-snap.c
1323 +@@ -1388,8 +1388,9 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
1324 + dm_table_event(s->ti->table);
1325 + }
1326 +
1327 +-static void pending_complete(struct dm_snap_pending_exception *pe, int success)
1328 ++static void pending_complete(void *context, int success)
1329 + {
1330 ++ struct dm_snap_pending_exception *pe = context;
1331 + struct dm_exception *e;
1332 + struct dm_snapshot *s = pe->snap;
1333 + struct bio *origin_bios = NULL;
1334 +@@ -1460,24 +1461,13 @@ out:
1335 + free_pending_exception(pe);
1336 + }
1337 +
1338 +-static void commit_callback(void *context, int success)
1339 +-{
1340 +- struct dm_snap_pending_exception *pe = context;
1341 +-
1342 +- pending_complete(pe, success);
1343 +-}
1344 +-
1345 + static void complete_exception(struct dm_snap_pending_exception *pe)
1346 + {
1347 + struct dm_snapshot *s = pe->snap;
1348 +
1349 +- if (unlikely(pe->copy_error))
1350 +- pending_complete(pe, 0);
1351 +-
1352 +- else
1353 +- /* Update the metadata if we are persistent */
1354 +- s->store->type->commit_exception(s->store, &pe->e,
1355 +- commit_callback, pe);
1356 ++ /* Update the metadata if we are persistent */
1357 ++ s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error,
1358 ++ pending_complete, pe);
1359 + }
1360 +
1361 + /*
1362 +diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
1363 +index 3412b86e79fd..7768de60f699 100644
1364 +--- a/drivers/md/dm-thin-metadata.c
1365 ++++ b/drivers/md/dm-thin-metadata.c
1366 +@@ -1205,6 +1205,12 @@ static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
1367 + dm_block_t held_root;
1368 +
1369 + /*
1370 ++ * We commit to ensure the btree roots which we increment in a
1371 ++ * moment are up to date.
1372 ++ */
1373 ++ __commit_transaction(pmd);
1374 ++
1375 ++ /*
1376 + * Copy the superblock.
1377 + */
1378 + dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION);
1379 +diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
1380 +index b94e4648c199..d633a3921b3c 100644
1381 +--- a/drivers/md/dm-thin.c
1382 ++++ b/drivers/md/dm-thin.c
1383 +@@ -1619,6 +1619,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1384 + case PM_WRITE:
1385 + if (old_mode != new_mode)
1386 + notify_of_pool_mode_change(pool, "write");
1387 ++ pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space;
1388 + dm_pool_metadata_read_write(pool->pmd);
1389 + pool->process_bio = process_bio;
1390 + pool->process_discard = process_discard;
1391 +@@ -2567,8 +2568,8 @@ static void pool_postsuspend(struct dm_target *ti)
1392 + struct pool_c *pt = ti->private;
1393 + struct pool *pool = pt->pool;
1394 +
1395 +- cancel_delayed_work(&pool->waker);
1396 +- cancel_delayed_work(&pool->no_space_timeout);
1397 ++ cancel_delayed_work_sync(&pool->waker);
1398 ++ cancel_delayed_work_sync(&pool->no_space_timeout);
1399 + flush_workqueue(pool->wq);
1400 + (void) commit(pool);
1401 + }
1402 +diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
1403 +index 7ba85e2b146b..7b4bb1f09b01 100644
1404 +--- a/drivers/md/persistent-data/dm-btree.c
1405 ++++ b/drivers/md/persistent-data/dm-btree.c
1406 +@@ -250,6 +250,16 @@ static void pop_frame(struct del_stack *s)
1407 + dm_tm_unlock(s->tm, f->b);
1408 + }
1409 +
1410 ++static void unlock_all_frames(struct del_stack *s)
1411 ++{
1412 ++ struct frame *f;
1413 ++
1414 ++ while (unprocessed_frames(s)) {
1415 ++ f = s->spine + s->top--;
1416 ++ dm_tm_unlock(s->tm, f->b);
1417 ++ }
1418 ++}
1419 ++
1420 + int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
1421 + {
1422 + int r;
1423 +@@ -306,9 +316,13 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
1424 + pop_frame(s);
1425 + }
1426 + }
1427 +-
1428 + out:
1429 ++ if (r) {
1430 ++ /* cleanup all frames of del_stack */
1431 ++ unlock_all_frames(s);
1432 ++ }
1433 + kfree(s);
1434 ++
1435 + return r;
1436 + }
1437 + EXPORT_SYMBOL_GPL(dm_btree_del);
1438 +diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
1439 +index 199c9ccd1f5d..032ee39a0e9b 100644
1440 +--- a/drivers/md/persistent-data/dm-space-map-metadata.c
1441 ++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
1442 +@@ -136,7 +136,7 @@ static int brb_push(struct bop_ring_buffer *brb,
1443 + return 0;
1444 + }
1445 +
1446 +-static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result)
1447 ++static int brb_peek(struct bop_ring_buffer *brb, struct block_op *result)
1448 + {
1449 + struct block_op *bop;
1450 +
1451 +@@ -147,6 +147,14 @@ static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result)
1452 + result->type = bop->type;
1453 + result->block = bop->block;
1454 +
1455 ++ return 0;
1456 ++}
1457 ++
1458 ++static int brb_pop(struct bop_ring_buffer *brb)
1459 ++{
1460 ++ if (brb_empty(brb))
1461 ++ return -ENODATA;
1462 ++
1463 + brb->begin = brb_next(brb, brb->begin);
1464 +
1465 + return 0;
1466 +@@ -211,7 +219,7 @@ static int apply_bops(struct sm_metadata *smm)
1467 + while (!brb_empty(&smm->uncommitted)) {
1468 + struct block_op bop;
1469 +
1470 +- r = brb_pop(&smm->uncommitted, &bop);
1471 ++ r = brb_peek(&smm->uncommitted, &bop);
1472 + if (r) {
1473 + DMERR("bug in bop ring buffer");
1474 + break;
1475 +@@ -220,6 +228,8 @@ static int apply_bops(struct sm_metadata *smm)
1476 + r = commit_bop(smm, &bop);
1477 + if (r)
1478 + break;
1479 ++
1480 ++ brb_pop(&smm->uncommitted);
1481 + }
1482 +
1483 + return r;
1484 +@@ -681,7 +691,6 @@ static struct dm_space_map bootstrap_ops = {
1485 + static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
1486 + {
1487 + int r, i;
1488 +- enum allocation_event ev;
1489 + struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
1490 + dm_block_t old_len = smm->ll.nr_blocks;
1491 +
1492 +@@ -703,11 +712,12 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
1493 + * allocate any new blocks.
1494 + */
1495 + do {
1496 +- for (i = old_len; !r && i < smm->begin; i++) {
1497 +- r = sm_ll_inc(&smm->ll, i, &ev);
1498 +- if (r)
1499 +- goto out;
1500 +- }
1501 ++ for (i = old_len; !r && i < smm->begin; i++)
1502 ++ r = add_bop(smm, BOP_INC, i);
1503 ++
1504 ++ if (r)
1505 ++ goto out;
1506 ++
1507 + old_len = smm->begin;
1508 +
1509 + r = apply_bops(smm);
1510 +@@ -752,7 +762,6 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
1511 + {
1512 + int r;
1513 + dm_block_t i;
1514 +- enum allocation_event ev;
1515 + struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
1516 +
1517 + smm->begin = superblock + 1;
1518 +@@ -780,7 +789,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
1519 + * allocated blocks that they were built from.
1520 + */
1521 + for (i = superblock; !r && i < smm->begin; i++)
1522 +- r = sm_ll_inc(&smm->ll, i, &ev);
1523 ++ r = add_bop(smm, BOP_INC, i);
1524 +
1525 + if (r)
1526 + return r;
1527 +diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
1528 +index 1f925e856974..46a984291b7d 100644
1529 +--- a/drivers/media/dvb-core/dvb_frontend.c
1530 ++++ b/drivers/media/dvb-core/dvb_frontend.c
1531 +@@ -2195,9 +2195,9 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
1532 + dev_dbg(fe->dvb->device, "%s: current delivery system on cache: %d, V3 type: %d\n",
1533 + __func__, c->delivery_system, fe->ops.info.type);
1534 +
1535 +- /* Force the CAN_INVERSION_AUTO bit on. If the frontend doesn't
1536 +- * do it, it is done for it. */
1537 +- info->caps |= FE_CAN_INVERSION_AUTO;
1538 ++ /* Set CAN_INVERSION_AUTO bit on in other than oneshot mode */
1539 ++ if (!(fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT))
1540 ++ info->caps |= FE_CAN_INVERSION_AUTO;
1541 + err = 0;
1542 + break;
1543 + }
1544 +diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c
1545 +index a2631be7ffac..08e0f0dd8728 100644
1546 +--- a/drivers/media/dvb-frontends/tda1004x.c
1547 ++++ b/drivers/media/dvb-frontends/tda1004x.c
1548 +@@ -903,9 +903,18 @@ static int tda1004x_get_fe(struct dvb_frontend *fe)
1549 + {
1550 + struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache;
1551 + struct tda1004x_state* state = fe->demodulator_priv;
1552 ++ int status;
1553 +
1554 + dprintk("%s\n", __func__);
1555 +
1556 ++ status = tda1004x_read_byte(state, TDA1004X_STATUS_CD);
1557 ++ if (status == -1)
1558 ++ return -EIO;
1559 ++
1560 ++ /* Only update the properties cache if device is locked */
1561 ++ if (!(status & 8))
1562 ++ return 0;
1563 ++
1564 + // inversion status
1565 + fe_params->inversion = INVERSION_OFF;
1566 + if (tda1004x_read_byte(state, TDA1004X_CONFC1) & 0x20)
1567 +diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
1568 +index 90f0d637cd9d..cd05840abc91 100644
1569 +--- a/drivers/media/usb/gspca/ov534.c
1570 ++++ b/drivers/media/usb/gspca/ov534.c
1571 +@@ -1490,8 +1490,13 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
1572 + struct v4l2_fract *tpf = &cp->timeperframe;
1573 + struct sd *sd = (struct sd *) gspca_dev;
1574 +
1575 +- /* Set requested framerate */
1576 +- sd->frame_rate = tpf->denominator / tpf->numerator;
1577 ++ if (tpf->numerator == 0 || tpf->denominator == 0)
1578 ++ /* Set default framerate */
1579 ++ sd->frame_rate = 30;
1580 ++ else
1581 ++ /* Set requested framerate */
1582 ++ sd->frame_rate = tpf->denominator / tpf->numerator;
1583 ++
1584 + if (gspca_dev->streaming)
1585 + set_frame_rate(gspca_dev);
1586 +
1587 +diff --git a/drivers/media/usb/gspca/topro.c b/drivers/media/usb/gspca/topro.c
1588 +index 640c2fe760b3..a6fbb2a07979 100644
1589 +--- a/drivers/media/usb/gspca/topro.c
1590 ++++ b/drivers/media/usb/gspca/topro.c
1591 +@@ -4792,7 +4792,11 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
1592 + struct v4l2_fract *tpf = &cp->timeperframe;
1593 + int fr, i;
1594 +
1595 +- sd->framerate = tpf->denominator / tpf->numerator;
1596 ++ if (tpf->numerator == 0 || tpf->denominator == 0)
1597 ++ sd->framerate = 30;
1598 ++ else
1599 ++ sd->framerate = tpf->denominator / tpf->numerator;
1600 ++
1601 + if (gspca_dev->streaming)
1602 + setframerate(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure));
1603 +
1604 +diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
1605 +index d71f5ef036e0..92aeb1d2b41b 100644
1606 +--- a/drivers/mmc/card/block.c
1607 ++++ b/drivers/mmc/card/block.c
1608 +@@ -62,8 +62,7 @@ MODULE_ALIAS("mmc:block");
1609 + #define MMC_SANITIZE_REQ_TIMEOUT 240000
1610 + #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
1611 +
1612 +-#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \
1613 +- (req->cmd_flags & REQ_META)) && \
1614 ++#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
1615 + (rq_data_dir(req) == WRITE))
1616 + #define PACKED_CMD_VER 0x01
1617 + #define PACKED_CMD_WR 0x02
1618 +@@ -1328,13 +1327,9 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1619 +
1620 + /*
1621 + * Reliable writes are used to implement Forced Unit Access and
1622 +- * REQ_META accesses, and are supported only on MMCs.
1623 +- *
1624 +- * XXX: this really needs a good explanation of why REQ_META
1625 +- * is treated special.
1626 ++ * are supported only on MMCs.
1627 + */
1628 +- bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
1629 +- (req->cmd_flags & REQ_META)) &&
1630 ++ bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
1631 + (rq_data_dir(req) == WRITE) &&
1632 + (md->flags & MMC_BLK_REL_WR);
1633 +
1634 +diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
1635 +index 4d721c6e2af0..ae360b3b4fda 100644
1636 +--- a/drivers/mmc/core/sdio.c
1637 ++++ b/drivers/mmc/core/sdio.c
1638 +@@ -670,7 +670,7 @@ try_again:
1639 + */
1640 + if (!powered_resume && (rocr & ocr & R4_18V_PRESENT)) {
1641 + err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
1642 +- ocr);
1643 ++ ocr_card);
1644 + if (err == -EAGAIN) {
1645 + sdio_reset(host);
1646 + mmc_go_idle(host);
1647 +diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
1648 +index b93122636531..8103db25db69 100644
1649 +--- a/drivers/mmc/host/mmci.c
1650 ++++ b/drivers/mmc/host/mmci.c
1651 +@@ -1860,7 +1860,7 @@ static struct amba_id mmci_ids[] = {
1652 + {
1653 + .id = 0x00280180,
1654 + .mask = 0x00ffffff,
1655 +- .data = &variant_u300,
1656 ++ .data = &variant_nomadik,
1657 + },
1658 + {
1659 + .id = 0x00480180,
1660 +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
1661 +index 881bf89acfcc..75d3c28940f1 100644
1662 +--- a/drivers/mmc/host/sdhci.c
1663 ++++ b/drivers/mmc/host/sdhci.c
1664 +@@ -2663,7 +2663,7 @@ static int sdhci_runtime_pm_put(struct sdhci_host *host)
1665 +
1666 + static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
1667 + {
1668 +- if (host->runtime_suspended || host->bus_on)
1669 ++ if (host->bus_on)
1670 + return;
1671 + host->bus_on = true;
1672 + pm_runtime_get_noresume(host->mmc->parent);
1673 +@@ -2671,7 +2671,7 @@ static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
1674 +
1675 + static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
1676 + {
1677 +- if (host->runtime_suspended || !host->bus_on)
1678 ++ if (!host->bus_on)
1679 + return;
1680 + host->bus_on = false;
1681 + pm_runtime_put_noidle(host->mmc->parent);
1682 +diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
1683 +index c2d0559115d3..732a8ed571c2 100644
1684 +--- a/drivers/net/can/sja1000/sja1000.c
1685 ++++ b/drivers/net/can/sja1000/sja1000.c
1686 +@@ -187,6 +187,9 @@ static void sja1000_start(struct net_device *dev)
1687 + /* clear interrupt flags */
1688 + priv->read_reg(priv, SJA1000_IR);
1689 +
1690 ++ /* clear interrupt flags */
1691 ++ priv->read_reg(priv, SJA1000_IR);
1692 ++
1693 + /* leave reset mode */
1694 + set_normal_mode(dev);
1695 + }
1696 +diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
1697 +index 52c42fd49510..a5735a7797f8 100644
1698 +--- a/drivers/net/can/usb/ems_usb.c
1699 ++++ b/drivers/net/can/usb/ems_usb.c
1700 +@@ -117,6 +117,9 @@ MODULE_LICENSE("GPL v2");
1701 + */
1702 + #define EMS_USB_ARM7_CLOCK 8000000
1703 +
1704 ++#define CPC_TX_QUEUE_TRIGGER_LOW 25
1705 ++#define CPC_TX_QUEUE_TRIGGER_HIGH 35
1706 ++
1707 + /*
1708 + * CAN-Message representation in a CPC_MSG. Message object type is
1709 + * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or
1710 +@@ -278,6 +281,11 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
1711 + switch (urb->status) {
1712 + case 0:
1713 + dev->free_slots = dev->intr_in_buffer[1];
1714 ++ if(dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH){
1715 ++ if (netif_queue_stopped(netdev)){
1716 ++ netif_wake_queue(netdev);
1717 ++ }
1718 ++ }
1719 + break;
1720 +
1721 + case -ECONNRESET: /* unlink */
1722 +@@ -529,8 +537,6 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
1723 + /* Release context */
1724 + context->echo_index = MAX_TX_URBS;
1725 +
1726 +- if (netif_queue_stopped(netdev))
1727 +- netif_wake_queue(netdev);
1728 + }
1729 +
1730 + /*
1731 +@@ -590,7 +596,7 @@ static int ems_usb_start(struct ems_usb *dev)
1732 + int err, i;
1733 +
1734 + dev->intr_in_buffer[0] = 0;
1735 +- dev->free_slots = 15; /* initial size */
1736 ++ dev->free_slots = 50; /* initial size */
1737 +
1738 + for (i = 0; i < MAX_RX_URBS; i++) {
1739 + struct urb *urb = NULL;
1740 +@@ -841,7 +847,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
1741 +
1742 + /* Slow down tx path */
1743 + if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
1744 +- dev->free_slots < 5) {
1745 ++ dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) {
1746 + netif_stop_queue(netdev);
1747 + }
1748 + }
1749 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
1750 +index 1fbeaa9dd202..6ef93562c6b7 100644
1751 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
1752 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
1753 +@@ -2401,10 +2401,13 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
1754 + AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \
1755 + AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR)
1756 +
1757 +-#define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
1758 +- AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
1759 +- AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
1760 +- AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
1761 ++#define HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD \
1762 ++ (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
1763 ++ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
1764 ++ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY)
1765 ++
1766 ++#define HW_PRTY_ASSERT_SET_3 (HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD | \
1767 ++ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
1768 +
1769 + #define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \
1770 + AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)
1771 +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1772 +index 242874041ba4..e157adb85b2a 100644
1773 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1774 ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1775 +@@ -4631,9 +4631,7 @@ static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
1776 + res |= true;
1777 + break;
1778 + case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
1779 +- if (print)
1780 +- _print_next_block((*par_num)++,
1781 +- "MCP SCPAD");
1782 ++ (*par_num)++;
1783 + /* clear latched SCPAD PATIRY from MCP */
1784 + REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
1785 + 1UL << 10);
1786 +@@ -4695,6 +4693,7 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
1787 + (sig[3] & HW_PRTY_ASSERT_SET_3) ||
1788 + (sig[4] & HW_PRTY_ASSERT_SET_4)) {
1789 + int par_num = 0;
1790 ++
1791 + DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
1792 + "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
1793 + sig[0] & HW_PRTY_ASSERT_SET_0,
1794 +@@ -4702,9 +4701,18 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
1795 + sig[2] & HW_PRTY_ASSERT_SET_2,
1796 + sig[3] & HW_PRTY_ASSERT_SET_3,
1797 + sig[4] & HW_PRTY_ASSERT_SET_4);
1798 +- if (print)
1799 +- netdev_err(bp->dev,
1800 +- "Parity errors detected in blocks: ");
1801 ++ if (print) {
1802 ++ if (((sig[0] & HW_PRTY_ASSERT_SET_0) ||
1803 ++ (sig[1] & HW_PRTY_ASSERT_SET_1) ||
1804 ++ (sig[2] & HW_PRTY_ASSERT_SET_2) ||
1805 ++ (sig[4] & HW_PRTY_ASSERT_SET_4)) ||
1806 ++ (sig[3] & HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD)) {
1807 ++ netdev_err(bp->dev,
1808 ++ "Parity errors detected in blocks: ");
1809 ++ } else {
1810 ++ print = false;
1811 ++ }
1812 ++ }
1813 + res |= bnx2x_check_blocks_with_parity0(bp,
1814 + sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
1815 + res |= bnx2x_check_blocks_with_parity1(bp,
1816 +diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
1817 +index 988f9fec0bff..d8c1b69d0f66 100644
1818 +--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
1819 ++++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
1820 +@@ -367,6 +367,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
1821 + {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
1822 + {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)},
1823 + {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
1824 ++ {IWL_PCI_DEVICE(0x095A, 0x5C10, iwl7265_2ac_cfg)},
1825 + {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
1826 + {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
1827 + {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
1828 +@@ -383,10 +384,10 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
1829 + {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)},
1830 + {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
1831 + {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
1832 +- {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
1833 ++ {IWL_PCI_DEVICE(0x095B, 0x9210, iwl7265_2ac_cfg)},
1834 + {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)},
1835 + {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
1836 +- {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
1837 ++ {IWL_PCI_DEVICE(0x095B, 0x9310, iwl7265_2ac_cfg)},
1838 + {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
1839 + {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
1840 + {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
1841 +diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
1842 +index 7c7a388c85ab..126f641a9582 100644
1843 +--- a/drivers/pci/hotplug/acpiphp_glue.c
1844 ++++ b/drivers/pci/hotplug/acpiphp_glue.c
1845 +@@ -1133,8 +1133,10 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
1846 + {
1847 + pci_lock_rescan_remove();
1848 +
1849 +- if (slot->flags & SLOT_IS_GOING_AWAY)
1850 ++ if (slot->flags & SLOT_IS_GOING_AWAY) {
1851 ++ pci_unlock_rescan_remove();
1852 + return -ENODEV;
1853 ++ }
1854 +
1855 + mutex_lock(&slot->crit_sect);
1856 + /* configure all functions */
1857 +diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
1858 +index 0bf82a20a0fb..48d21e0edd56 100644
1859 +--- a/drivers/pci/pcie/aer/aerdrv.c
1860 ++++ b/drivers/pci/pcie/aer/aerdrv.c
1861 +@@ -262,7 +262,6 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
1862 + rpc->rpd = dev;
1863 + INIT_WORK(&rpc->dpc_handler, aer_isr);
1864 + mutex_init(&rpc->rpc_mutex);
1865 +- init_waitqueue_head(&rpc->wait_release);
1866 +
1867 + /* Use PCIe bus function to store rpc into PCIe device */
1868 + set_service_data(dev, rpc);
1869 +@@ -285,8 +284,7 @@ static void aer_remove(struct pcie_device *dev)
1870 + if (rpc->isr)
1871 + free_irq(dev->irq, dev);
1872 +
1873 +- wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx);
1874 +-
1875 ++ flush_work(&rpc->dpc_handler);
1876 + aer_disable_rootport(rpc);
1877 + kfree(rpc);
1878 + set_service_data(dev, NULL);
1879 +diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
1880 +index 84420b7c9456..945c939a86c5 100644
1881 +--- a/drivers/pci/pcie/aer/aerdrv.h
1882 ++++ b/drivers/pci/pcie/aer/aerdrv.h
1883 +@@ -72,7 +72,6 @@ struct aer_rpc {
1884 + * recovery on the same
1885 + * root port hierarchy
1886 + */
1887 +- wait_queue_head_t wait_release;
1888 + };
1889 +
1890 + struct aer_broadcast_data {
1891 +diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
1892 +index b2c8881da764..777edcc4aab6 100644
1893 +--- a/drivers/pci/pcie/aer/aerdrv_core.c
1894 ++++ b/drivers/pci/pcie/aer/aerdrv_core.c
1895 +@@ -785,8 +785,6 @@ void aer_isr(struct work_struct *work)
1896 + while (get_e_source(rpc, &e_src))
1897 + aer_isr_one_error(p_device, &e_src);
1898 + mutex_unlock(&rpc->rpc_mutex);
1899 +-
1900 +- wake_up(&rpc->wait_release);
1901 + }
1902 +
1903 + /**
1904 +diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
1905 +index 179b8edc2262..318d535e337d 100644
1906 +--- a/drivers/pci/xen-pcifront.c
1907 ++++ b/drivers/pci/xen-pcifront.c
1908 +@@ -52,7 +52,7 @@ struct pcifront_device {
1909 + };
1910 +
1911 + struct pcifront_sd {
1912 +- int domain;
1913 ++ struct pci_sysdata sd;
1914 + struct pcifront_device *pdev;
1915 + };
1916 +
1917 +@@ -66,7 +66,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd,
1918 + unsigned int domain, unsigned int bus,
1919 + struct pcifront_device *pdev)
1920 + {
1921 +- sd->domain = domain;
1922 ++ /* Because we do not expose that information via XenBus. */
1923 ++ sd->sd.node = first_online_node;
1924 ++ sd->sd.domain = domain;
1925 + sd->pdev = pdev;
1926 + }
1927 +
1928 +@@ -464,8 +466,8 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
1929 + dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
1930 + domain, bus);
1931 +
1932 +- bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
1933 +- sd = kmalloc(sizeof(*sd), GFP_KERNEL);
1934 ++ bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL);
1935 ++ sd = kzalloc(sizeof(*sd), GFP_KERNEL);
1936 + if (!bus_entry || !sd) {
1937 + err = -ENOMEM;
1938 + goto err_out;
1939 +diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c
1940 +index 3bed2f55cf7d..3ccadf631d45 100644
1941 +--- a/drivers/power/wm831x_power.c
1942 ++++ b/drivers/power/wm831x_power.c
1943 +@@ -567,7 +567,7 @@ static int wm831x_power_probe(struct platform_device *pdev)
1944 +
1945 + irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
1946 + ret = request_threaded_irq(irq, NULL, wm831x_syslo_irq,
1947 +- IRQF_TRIGGER_RISING, "System power low",
1948 ++ IRQF_TRIGGER_RISING | IRQF_ONESHOT, "System power low",
1949 + power);
1950 + if (ret != 0) {
1951 + dev_err(&pdev->dev, "Failed to request SYSLO IRQ %d: %d\n",
1952 +@@ -577,7 +577,7 @@ static int wm831x_power_probe(struct platform_device *pdev)
1953 +
1954 + irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC"));
1955 + ret = request_threaded_irq(irq, NULL, wm831x_pwr_src_irq,
1956 +- IRQF_TRIGGER_RISING, "Power source",
1957 ++ IRQF_TRIGGER_RISING | IRQF_ONESHOT, "Power source",
1958 + power);
1959 + if (ret != 0) {
1960 + dev_err(&pdev->dev, "Failed to request PWR SRC IRQ %d: %d\n",
1961 +@@ -590,7 +590,7 @@ static int wm831x_power_probe(struct platform_device *pdev)
1962 + platform_get_irq_byname(pdev,
1963 + wm831x_bat_irqs[i]));
1964 + ret = request_threaded_irq(irq, NULL, wm831x_bat_irq,
1965 +- IRQF_TRIGGER_RISING,
1966 ++ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
1967 + wm831x_bat_irqs[i],
1968 + power);
1969 + if (ret != 0) {
1970 +diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
1971 +index 3c6768378a94..4048d7f5babd 100644
1972 +--- a/drivers/powercap/intel_rapl.c
1973 ++++ b/drivers/powercap/intel_rapl.c
1974 +@@ -1194,10 +1194,13 @@ static int rapl_detect_domains(struct rapl_package *rp, int cpu)
1975 +
1976 + for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
1977 + /* check if the domain is locked by BIOS */
1978 +- if (rapl_read_data_raw(rd, FW_LOCK, false, &locked)) {
1979 ++ ret = rapl_read_data_raw(rd, FW_LOCK, false, &locked);
1980 ++ if (ret)
1981 ++ return ret;
1982 ++ if (locked) {
1983 + pr_info("RAPL package %d domain %s locked by BIOS\n",
1984 + rp->id, rd->name);
1985 +- rd->state |= DOMAIN_STATE_BIOS_LOCKED;
1986 ++ rd->state |= DOMAIN_STATE_BIOS_LOCKED;
1987 + }
1988 + }
1989 +
1990 +diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
1991 +index a2597e683e79..6a64e86e8ccd 100644
1992 +--- a/drivers/s390/block/dasd_alias.c
1993 ++++ b/drivers/s390/block/dasd_alias.c
1994 +@@ -264,8 +264,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
1995 + spin_unlock_irqrestore(&lcu->lock, flags);
1996 + cancel_work_sync(&lcu->suc_data.worker);
1997 + spin_lock_irqsave(&lcu->lock, flags);
1998 +- if (device == lcu->suc_data.device)
1999 ++ if (device == lcu->suc_data.device) {
2000 ++ dasd_put_device(device);
2001 + lcu->suc_data.device = NULL;
2002 ++ }
2003 + }
2004 + was_pending = 0;
2005 + if (device == lcu->ruac_data.device) {
2006 +@@ -273,8 +275,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
2007 + was_pending = 1;
2008 + cancel_delayed_work_sync(&lcu->ruac_data.dwork);
2009 + spin_lock_irqsave(&lcu->lock, flags);
2010 +- if (device == lcu->ruac_data.device)
2011 ++ if (device == lcu->ruac_data.device) {
2012 ++ dasd_put_device(device);
2013 + lcu->ruac_data.device = NULL;
2014 ++ }
2015 + }
2016 + private->lcu = NULL;
2017 + spin_unlock_irqrestore(&lcu->lock, flags);
2018 +@@ -549,8 +553,10 @@ static void lcu_update_work(struct work_struct *work)
2019 + if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
2020 + DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
2021 + " alias data in lcu (rc = %d), retry later", rc);
2022 +- schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
2023 ++ if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
2024 ++ dasd_put_device(device);
2025 + } else {
2026 ++ dasd_put_device(device);
2027 + lcu->ruac_data.device = NULL;
2028 + lcu->flags &= ~UPDATE_PENDING;
2029 + }
2030 +@@ -593,8 +599,10 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
2031 + */
2032 + if (!usedev)
2033 + return -EINVAL;
2034 ++ dasd_get_device(usedev);
2035 + lcu->ruac_data.device = usedev;
2036 +- schedule_delayed_work(&lcu->ruac_data.dwork, 0);
2037 ++ if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
2038 ++ dasd_put_device(usedev);
2039 + return 0;
2040 + }
2041 +
2042 +@@ -722,7 +730,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
2043 + ASCEBC((char *) &cqr->magic, 4);
2044 + ccw = cqr->cpaddr;
2045 + ccw->cmd_code = DASD_ECKD_CCW_RSCK;
2046 +- ccw->flags = 0 ;
2047 ++ ccw->flags = CCW_FLAG_SLI;
2048 + ccw->count = 16;
2049 + ccw->cda = (__u32)(addr_t) cqr->data;
2050 + ((char *)cqr->data)[0] = reason;
2051 +@@ -926,6 +934,7 @@ static void summary_unit_check_handling_work(struct work_struct *work)
2052 + /* 3. read new alias configuration */
2053 + _schedule_lcu_update(lcu, device);
2054 + lcu->suc_data.device = NULL;
2055 ++ dasd_put_device(device);
2056 + spin_unlock_irqrestore(&lcu->lock, flags);
2057 + }
2058 +
2059 +@@ -985,6 +994,8 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
2060 + }
2061 + lcu->suc_data.reason = reason;
2062 + lcu->suc_data.device = device;
2063 ++ dasd_get_device(device);
2064 + spin_unlock(&lcu->lock);
2065 +- schedule_work(&lcu->suc_data.worker);
2066 ++ if (!schedule_work(&lcu->suc_data.worker))
2067 ++ dasd_put_device(device);
2068 + };
2069 +diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
2070 +index 34452ea386ac..52636cfbab8f 100644
2071 +--- a/drivers/scsi/megaraid/megaraid_sas.h
2072 ++++ b/drivers/scsi/megaraid/megaraid_sas.h
2073 +@@ -334,6 +334,8 @@ enum MR_EVT_ARGS {
2074 + MR_EVT_ARGS_GENERIC,
2075 + };
2076 +
2077 ++
2078 ++#define SGE_BUFFER_SIZE 4096
2079 + /*
2080 + * define constants for device list query options
2081 + */
2082 +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
2083 +index c80afde97e96..9f833f1504cc 100644
2084 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c
2085 ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
2086 +@@ -3821,7 +3821,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
2087 + }
2088 + }
2089 + instance->max_sectors_per_req = instance->max_num_sge *
2090 +- PAGE_SIZE / 512;
2091 ++ SGE_BUFFER_SIZE / 512;
2092 + if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
2093 + instance->max_sectors_per_req = tmp_sectors;
2094 +
2095 +@@ -5281,6 +5281,9 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
2096 + int i;
2097 + int error = 0;
2098 + compat_uptr_t ptr;
2099 ++ unsigned long local_raw_ptr;
2100 ++ u32 local_sense_off;
2101 ++ u32 local_sense_len;
2102 +
2103 + if (clear_user(ioc, sizeof(*ioc)))
2104 + return -EFAULT;
2105 +@@ -5298,9 +5301,15 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
2106 + * sense_len is not null, so prepare the 64bit value under
2107 + * the same condition.
2108 + */
2109 +- if (ioc->sense_len) {
2110 ++ if (get_user(local_raw_ptr, ioc->frame.raw) ||
2111 ++ get_user(local_sense_off, &ioc->sense_off) ||
2112 ++ get_user(local_sense_len, &ioc->sense_len))
2113 ++ return -EFAULT;
2114 ++
2115 ++
2116 ++ if (local_sense_len) {
2117 + void __user **sense_ioc_ptr =
2118 +- (void __user **)(ioc->frame.raw + ioc->sense_off);
2119 ++ (void __user **)((u8*)local_raw_ptr + local_sense_off);
2120 + compat_uptr_t *sense_cioc_ptr =
2121 + (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off);
2122 + if (get_user(ptr, sense_cioc_ptr) ||
2123 +diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
2124 +index eba183c428cf..3643bbf5456d 100644
2125 +--- a/drivers/scsi/ses.c
2126 ++++ b/drivers/scsi/ses.c
2127 +@@ -70,6 +70,7 @@ static int ses_probe(struct device *dev)
2128 + static int ses_recv_diag(struct scsi_device *sdev, int page_code,
2129 + void *buf, int bufflen)
2130 + {
2131 ++ int ret;
2132 + unsigned char cmd[] = {
2133 + RECEIVE_DIAGNOSTIC,
2134 + 1, /* Set PCV bit */
2135 +@@ -78,9 +79,26 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
2136 + bufflen & 0xff,
2137 + 0
2138 + };
2139 ++ unsigned char recv_page_code;
2140 +
2141 +- return scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
2142 ++ ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
2143 + NULL, SES_TIMEOUT, SES_RETRIES, NULL);
2144 ++ if (unlikely(!ret))
2145 ++ return ret;
2146 ++
2147 ++ recv_page_code = ((unsigned char *)buf)[0];
2148 ++
2149 ++ if (likely(recv_page_code == page_code))
2150 ++ return ret;
2151 ++
2152 ++ /* successful diagnostic but wrong page code. This happens to some
2153 ++ * USB devices, just print a message and pretend there was an error */
2154 ++
2155 ++ sdev_printk(KERN_ERR, sdev,
2156 ++ "Wrong diagnostic page; asked for %d got %u\n",
2157 ++ page_code, recv_page_code);
2158 ++
2159 ++ return -EINVAL;
2160 + }
2161 +
2162 + static int ses_send_diag(struct scsi_device *sdev, int page_code,
2163 +@@ -436,7 +454,15 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
2164 + if (desc_ptr)
2165 + desc_ptr += len;
2166 +
2167 +- if (addl_desc_ptr)
2168 ++ if (addl_desc_ptr &&
2169 ++ /* only find additional descriptions for specific devices */
2170 ++ (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
2171 ++ type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE ||
2172 ++ type_ptr[0] == ENCLOSURE_COMPONENT_SAS_EXPANDER ||
2173 ++ /* these elements are optional */
2174 ++ type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_TARGET_PORT ||
2175 ++ type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT ||
2176 ++ type_ptr[0] == ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS))
2177 + addl_desc_ptr += addl_desc_ptr[1] + 2;
2178 +
2179 + }
2180 +diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
2181 +index 3bb6646bb406..f9da66fa850b 100644
2182 +--- a/drivers/scsi/storvsc_drv.c
2183 ++++ b/drivers/scsi/storvsc_drv.c
2184 +@@ -1610,8 +1610,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
2185 + vm_srb->win8_extension.time_out_value = 60;
2186 +
2187 + vm_srb->win8_extension.srb_flags |=
2188 +- (SRB_FLAGS_QUEUE_ACTION_ENABLE |
2189 +- SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
2190 ++ SRB_FLAGS_DISABLE_SYNCH_TRANSFER;
2191 +
2192 + /* Build the SRB */
2193 + switch (scmnd->sc_data_direction) {
2194 +diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
2195 +index f89b24a09b19..231d63caa663 100644
2196 +--- a/drivers/target/target_core_sbc.c
2197 ++++ b/drivers/target/target_core_sbc.c
2198 +@@ -314,7 +314,8 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
2199 + return 0;
2200 + }
2201 +
2202 +-static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success)
2203 ++static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success,
2204 ++ int *post_ret)
2205 + {
2206 + unsigned char *buf, *addr;
2207 + struct scatterlist *sg;
2208 +@@ -378,7 +379,8 @@ sbc_execute_rw(struct se_cmd *cmd)
2209 + cmd->data_direction);
2210 + }
2211 +
2212 +-static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
2213 ++static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
2214 ++ int *post_ret)
2215 + {
2216 + struct se_device *dev = cmd->se_dev;
2217 +
2218 +@@ -388,8 +390,10 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
2219 + * sent to the backend driver.
2220 + */
2221 + spin_lock_irq(&cmd->t_state_lock);
2222 +- if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status)
2223 ++ if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) {
2224 + cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
2225 ++ *post_ret = 1;
2226 ++ }
2227 + spin_unlock_irq(&cmd->t_state_lock);
2228 +
2229 + /*
2230 +@@ -401,7 +405,8 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
2231 + return TCM_NO_SENSE;
2232 + }
2233 +
2234 +-static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success)
2235 ++static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
2236 ++ int *post_ret)
2237 + {
2238 + struct se_device *dev = cmd->se_dev;
2239 + struct scatterlist *write_sg = NULL, *sg;
2240 +@@ -497,11 +502,11 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
2241 +
2242 + if (block_size < PAGE_SIZE) {
2243 + sg_set_page(&write_sg[i], m.page, block_size,
2244 +- block_size);
2245 ++ m.piter.sg->offset + block_size);
2246 + } else {
2247 + sg_miter_next(&m);
2248 + sg_set_page(&write_sg[i], m.page, block_size,
2249 +- 0);
2250 ++ m.piter.sg->offset);
2251 + }
2252 + len -= block_size;
2253 + i++;
2254 +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
2255 +index 6fc38903046c..7afea9b59e2c 100644
2256 +--- a/drivers/target/target_core_transport.c
2257 ++++ b/drivers/target/target_core_transport.c
2258 +@@ -1581,7 +1581,7 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
2259 + void transport_generic_request_failure(struct se_cmd *cmd,
2260 + sense_reason_t sense_reason)
2261 + {
2262 +- int ret = 0;
2263 ++ int ret = 0, post_ret = 0;
2264 +
2265 + pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
2266 + " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
2267 +@@ -1604,7 +1604,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
2268 + */
2269 + if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
2270 + cmd->transport_complete_callback)
2271 +- cmd->transport_complete_callback(cmd, false);
2272 ++ cmd->transport_complete_callback(cmd, false, &post_ret);
2273 +
2274 + switch (sense_reason) {
2275 + case TCM_NON_EXISTENT_LUN:
2276 +@@ -1940,11 +1940,13 @@ static void target_complete_ok_work(struct work_struct *work)
2277 + */
2278 + if (cmd->transport_complete_callback) {
2279 + sense_reason_t rc;
2280 ++ bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
2281 ++ bool zero_dl = !(cmd->data_length);
2282 ++ int post_ret = 0;
2283 +
2284 +- rc = cmd->transport_complete_callback(cmd, true);
2285 +- if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) {
2286 +- if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
2287 +- !cmd->data_length)
2288 ++ rc = cmd->transport_complete_callback(cmd, true, &post_ret);
2289 ++ if (!rc && !post_ret) {
2290 ++ if (caw && zero_dl)
2291 + goto queue_rsp;
2292 +
2293 + return;
2294 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
2295 +index 2aca88715632..584514c7ed1f 100644
2296 +--- a/drivers/usb/class/cdc-acm.c
2297 ++++ b/drivers/usb/class/cdc-acm.c
2298 +@@ -1810,6 +1810,11 @@ static const struct usb_device_id acm_ids[] = {
2299 + },
2300 + #endif
2301 +
2302 ++ /*Samsung phone in firmware update mode */
2303 ++ { USB_DEVICE(0x04e8, 0x685d),
2304 ++ .driver_info = IGNORE_DEVICE,
2305 ++ },
2306 ++
2307 + /* Exclude Infineon Flash Loader utility */
2308 + { USB_DEVICE(0x058b, 0x0041),
2309 + .driver_info = IGNORE_DEVICE,
2310 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
2311 +index 02e6fe228a63..21bf168981f9 100644
2312 +--- a/drivers/usb/serial/cp210x.c
2313 ++++ b/drivers/usb/serial/cp210x.c
2314 +@@ -162,6 +162,8 @@ static const struct usb_device_id id_table[] = {
2315 + { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
2316 + { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
2317 + { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
2318 ++ { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
2319 ++ { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
2320 + { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
2321 + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
2322 + { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
2323 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2324 +index 81f6a572f016..9bab34cf01d4 100644
2325 +--- a/drivers/usb/serial/option.c
2326 ++++ b/drivers/usb/serial/option.c
2327 +@@ -315,6 +315,7 @@ static void option_instat_callback(struct urb *urb);
2328 + #define TOSHIBA_PRODUCT_G450 0x0d45
2329 +
2330 + #define ALINK_VENDOR_ID 0x1e0e
2331 ++#define SIMCOM_PRODUCT_SIM7100E 0x9001 /* Yes, ALINK_VENDOR_ID */
2332 + #define ALINK_PRODUCT_PH300 0x9100
2333 + #define ALINK_PRODUCT_3GU 0x9200
2334 +
2335 +@@ -615,6 +616,10 @@ static const struct option_blacklist_info zte_1255_blacklist = {
2336 + .reserved = BIT(3) | BIT(4),
2337 + };
2338 +
2339 ++static const struct option_blacklist_info simcom_sim7100e_blacklist = {
2340 ++ .reserved = BIT(5) | BIT(6),
2341 ++};
2342 ++
2343 + static const struct option_blacklist_info telit_le910_blacklist = {
2344 + .sendsetup = BIT(0),
2345 + .reserved = BIT(1) | BIT(2),
2346 +@@ -1130,6 +1135,8 @@ static const struct usb_device_id option_ids[] = {
2347 + { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
2348 + { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
2349 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
2350 ++ { USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
2351 ++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
2352 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
2353 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
2354 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
2355 +@@ -1645,6 +1652,8 @@ static const struct usb_device_id option_ids[] = {
2356 + { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
2357 + { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
2358 + { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
2359 ++ { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
2360 ++ .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
2361 + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
2362 + .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
2363 + },
2364 +diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
2365 +index fed0ce198ae3..64eba4f51f71 100644
2366 +--- a/drivers/virtio/virtio.c
2367 ++++ b/drivers/virtio/virtio.c
2368 +@@ -249,6 +249,7 @@ static int virtio_init(void)
2369 + static void __exit virtio_exit(void)
2370 + {
2371 + bus_unregister(&virtio_bus);
2372 ++ ida_destroy(&virtio_index_ida);
2373 + }
2374 + core_initcall(virtio_init);
2375 + module_exit(virtio_exit);
2376 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
2377 +index f48d5fc352a9..469051b01fbf 100644
2378 +--- a/fs/btrfs/disk-io.c
2379 ++++ b/fs/btrfs/disk-io.c
2380 +@@ -2336,6 +2336,7 @@ int open_ctree(struct super_block *sb,
2381 + if (btrfs_check_super_csum(bh->b_data)) {
2382 + printk(KERN_ERR "BTRFS: superblock checksum mismatch\n");
2383 + err = -EINVAL;
2384 ++ brelse(bh);
2385 + goto fail_alloc;
2386 + }
2387 +
2388 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
2389 +index 08824fe6ef44..fb37441a592f 100644
2390 +--- a/fs/btrfs/inode.c
2391 ++++ b/fs/btrfs/inode.c
2392 +@@ -7511,15 +7511,28 @@ int btrfs_readpage(struct file *file, struct page *page)
2393 + static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
2394 + {
2395 + struct extent_io_tree *tree;
2396 +-
2397 ++ struct inode *inode = page->mapping->host;
2398 ++ int ret;
2399 +
2400 + if (current->flags & PF_MEMALLOC) {
2401 + redirty_page_for_writepage(wbc, page);
2402 + unlock_page(page);
2403 + return 0;
2404 + }
2405 ++
2406 ++ /*
2407 ++ * If we are under memory pressure we will call this directly from the
2408 ++ * VM, we need to make sure we have the inode referenced for the ordered
2409 ++ * extent. If not just return like we didn't do anything.
2410 ++ */
2411 ++ if (!igrab(inode)) {
2412 ++ redirty_page_for_writepage(wbc, page);
2413 ++ return AOP_WRITEPAGE_ACTIVATE;
2414 ++ }
2415 + tree = &BTRFS_I(page->mapping->host)->io_tree;
2416 +- return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
2417 ++ ret = extent_write_full_page(tree, page, btrfs_get_extent, wbc);
2418 ++ btrfs_add_delayed_iput(inode);
2419 ++ return ret;
2420 + }
2421 +
2422 + static int btrfs_writepages(struct address_space *mapping,
2423 +@@ -8612,9 +8625,11 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
2424 + /*
2425 + * 2 items for inode item and ref
2426 + * 2 items for dir items
2427 ++ * 1 item for updating parent inode item
2428 ++ * 1 item for the inline extent item
2429 + * 1 item for xattr if selinux is on
2430 + */
2431 +- trans = btrfs_start_transaction(root, 5);
2432 ++ trans = btrfs_start_transaction(root, 7);
2433 + if (IS_ERR(trans))
2434 + return PTR_ERR(trans);
2435 +
2436 +diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
2437 +index 20d793542096..0fd23ab3b4ad 100644
2438 +--- a/fs/btrfs/send.c
2439 ++++ b/fs/btrfs/send.c
2440 +@@ -1377,7 +1377,21 @@ static int read_symlink(struct btrfs_root *root,
2441 + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2442 + if (ret < 0)
2443 + goto out;
2444 +- BUG_ON(ret);
2445 ++ if (ret) {
2446 ++ /*
2447 ++ * An empty symlink inode. Can happen in rare error paths when
2448 ++ * creating a symlink (transaction committed before the inode
2449 ++ * eviction handler removed the symlink inode items and a crash
2450 ++ * happened in between or the subvol was snapshoted in between).
2451 ++ * Print an informative message to dmesg/syslog so that the user
2452 ++ * can delete the symlink.
2453 ++ */
2454 ++ btrfs_err(root->fs_info,
2455 ++ "Found empty symlink inode %llu at root %llu",
2456 ++ ino, root->root_key.objectid);
2457 ++ ret = -EIO;
2458 ++ goto out;
2459 ++ }
2460 +
2461 + ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2462 + struct btrfs_file_extent_item);
2463 +diff --git a/fs/dcache.c b/fs/dcache.c
2464 +index 65ccdf0e2854..9b235362efcd 100644
2465 +--- a/fs/dcache.c
2466 ++++ b/fs/dcache.c
2467 +@@ -439,42 +439,12 @@ void d_drop(struct dentry *dentry)
2468 + }
2469 + EXPORT_SYMBOL(d_drop);
2470 +
2471 +-/*
2472 +- * Finish off a dentry we've decided to kill.
2473 +- * dentry->d_lock must be held, returns with it unlocked.
2474 +- * If ref is non-zero, then decrement the refcount too.
2475 +- * Returns dentry requiring refcount drop, or NULL if we're done.
2476 +- */
2477 +-static struct dentry *
2478 +-dentry_kill(struct dentry *dentry, int unlock_on_failure)
2479 +- __releases(dentry->d_lock)
2480 ++static void __dentry_kill(struct dentry *dentry)
2481 + {
2482 +- struct inode *inode;
2483 + struct dentry *parent = NULL;
2484 + bool can_free = true;
2485 +-
2486 +- if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
2487 +- can_free = dentry->d_flags & DCACHE_MAY_FREE;
2488 +- spin_unlock(&dentry->d_lock);
2489 +- goto out;
2490 +- }
2491 +-
2492 +- inode = dentry->d_inode;
2493 +- if (inode && !spin_trylock(&inode->i_lock)) {
2494 +-relock:
2495 +- if (unlock_on_failure) {
2496 +- spin_unlock(&dentry->d_lock);
2497 +- cpu_relax();
2498 +- }
2499 +- return dentry; /* try again with same dentry */
2500 +- }
2501 + if (!IS_ROOT(dentry))
2502 + parent = dentry->d_parent;
2503 +- if (parent && !spin_trylock(&parent->d_lock)) {
2504 +- if (inode)
2505 +- spin_unlock(&inode->i_lock);
2506 +- goto relock;
2507 +- }
2508 +
2509 + /*
2510 + * The dentry is now unrecoverably dead to the world.
2511 +@@ -518,9 +488,74 @@ relock:
2512 + can_free = false;
2513 + }
2514 + spin_unlock(&dentry->d_lock);
2515 +-out:
2516 + if (likely(can_free))
2517 + dentry_free(dentry);
2518 ++}
2519 ++
2520 ++/*
2521 ++ * Finish off a dentry we've decided to kill.
2522 ++ * dentry->d_lock must be held, returns with it unlocked.
2523 ++ * If ref is non-zero, then decrement the refcount too.
2524 ++ * Returns dentry requiring refcount drop, or NULL if we're done.
2525 ++ */
2526 ++static struct dentry *dentry_kill(struct dentry *dentry)
2527 ++ __releases(dentry->d_lock)
2528 ++{
2529 ++ struct inode *inode = dentry->d_inode;
2530 ++ struct dentry *parent = NULL;
2531 ++
2532 ++ if (inode && unlikely(!spin_trylock(&inode->i_lock)))
2533 ++ goto failed;
2534 ++
2535 ++ if (!IS_ROOT(dentry)) {
2536 ++ parent = dentry->d_parent;
2537 ++ if (unlikely(!spin_trylock(&parent->d_lock))) {
2538 ++ if (inode)
2539 ++ spin_unlock(&inode->i_lock);
2540 ++ goto failed;
2541 ++ }
2542 ++ }
2543 ++
2544 ++ __dentry_kill(dentry);
2545 ++ return parent;
2546 ++
2547 ++failed:
2548 ++ spin_unlock(&dentry->d_lock);
2549 ++ cpu_relax();
2550 ++ return dentry; /* try again with same dentry */
2551 ++}
2552 ++
2553 ++static inline struct dentry *lock_parent(struct dentry *dentry)
2554 ++{
2555 ++ struct dentry *parent = dentry->d_parent;
2556 ++ if (IS_ROOT(dentry))
2557 ++ return NULL;
2558 ++ if (unlikely((int)dentry->d_lockref.count < 0))
2559 ++ return NULL;
2560 ++ if (likely(spin_trylock(&parent->d_lock)))
2561 ++ return parent;
2562 ++ rcu_read_lock();
2563 ++ spin_unlock(&dentry->d_lock);
2564 ++again:
2565 ++ parent = ACCESS_ONCE(dentry->d_parent);
2566 ++ spin_lock(&parent->d_lock);
2567 ++ /*
2568 ++ * We can't blindly lock dentry until we are sure
2569 ++ * that we won't violate the locking order.
2570 ++ * Any changes of dentry->d_parent must have
2571 ++ * been done with parent->d_lock held, so
2572 ++ * spin_lock() above is enough of a barrier
2573 ++ * for checking if it's still our child.
2574 ++ */
2575 ++ if (unlikely(parent != dentry->d_parent)) {
2576 ++ spin_unlock(&parent->d_lock);
2577 ++ goto again;
2578 ++ }
2579 ++ rcu_read_unlock();
2580 ++ if (parent != dentry)
2581 ++ spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
2582 ++ else
2583 ++ parent = NULL;
2584 + return parent;
2585 + }
2586 +
2587 +@@ -580,7 +615,7 @@ repeat:
2588 + return;
2589 +
2590 + kill_it:
2591 +- dentry = dentry_kill(dentry, 1);
2592 ++ dentry = dentry_kill(dentry);
2593 + if (dentry)
2594 + goto repeat;
2595 + }
2596 +@@ -798,8 +833,11 @@ static void shrink_dentry_list(struct list_head *list)
2597 + struct dentry *dentry, *parent;
2598 +
2599 + while (!list_empty(list)) {
2600 ++ struct inode *inode;
2601 + dentry = list_entry(list->prev, struct dentry, d_lru);
2602 + spin_lock(&dentry->d_lock);
2603 ++ parent = lock_parent(dentry);
2604 ++
2605 + /*
2606 + * The dispose list is isolated and dentries are not accounted
2607 + * to the LRU here, so we can simply remove it from the list
2608 +@@ -813,26 +851,33 @@ static void shrink_dentry_list(struct list_head *list)
2609 + */
2610 + if ((int)dentry->d_lockref.count > 0) {
2611 + spin_unlock(&dentry->d_lock);
2612 ++ if (parent)
2613 ++ spin_unlock(&parent->d_lock);
2614 + continue;
2615 + }
2616 +
2617 +- parent = dentry_kill(dentry, 0);
2618 +- /*
2619 +- * If dentry_kill returns NULL, we have nothing more to do.
2620 +- */
2621 +- if (!parent)
2622 ++
2623 ++ if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
2624 ++ bool can_free = dentry->d_flags & DCACHE_MAY_FREE;
2625 ++ spin_unlock(&dentry->d_lock);
2626 ++ if (parent)
2627 ++ spin_unlock(&parent->d_lock);
2628 ++ if (can_free)
2629 ++ dentry_free(dentry);
2630 + continue;
2631 ++ }
2632 +
2633 +- if (unlikely(parent == dentry)) {
2634 +- /*
2635 +- * trylocks have failed and d_lock has been held the
2636 +- * whole time, so it could not have been added to any
2637 +- * other lists. Just add it back to the shrink list.
2638 +- */
2639 ++ inode = dentry->d_inode;
2640 ++ if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
2641 + d_shrink_add(dentry, list);
2642 + spin_unlock(&dentry->d_lock);
2643 ++ if (parent)
2644 ++ spin_unlock(&parent->d_lock);
2645 + continue;
2646 + }
2647 ++
2648 ++ __dentry_kill(dentry);
2649 ++
2650 + /*
2651 + * We need to prune ancestors too. This is necessary to prevent
2652 + * quadratic behavior of shrink_dcache_parent(), but is also
2653 +@@ -840,8 +885,26 @@ static void shrink_dentry_list(struct list_head *list)
2654 + * fragmentation.
2655 + */
2656 + dentry = parent;
2657 +- while (dentry && !lockref_put_or_lock(&dentry->d_lockref))
2658 +- dentry = dentry_kill(dentry, 1);
2659 ++ while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
2660 ++ parent = lock_parent(dentry);
2661 ++ if (dentry->d_lockref.count != 1) {
2662 ++ dentry->d_lockref.count--;
2663 ++ spin_unlock(&dentry->d_lock);
2664 ++ if (parent)
2665 ++ spin_unlock(&parent->d_lock);
2666 ++ break;
2667 ++ }
2668 ++ inode = dentry->d_inode; /* can't be NULL */
2669 ++ if (unlikely(!spin_trylock(&inode->i_lock))) {
2670 ++ spin_unlock(&dentry->d_lock);
2671 ++ if (parent)
2672 ++ spin_unlock(&parent->d_lock);
2673 ++ cpu_relax();
2674 ++ continue;
2675 ++ }
2676 ++ __dentry_kill(dentry);
2677 ++ dentry = parent;
2678 ++ }
2679 + }
2680 + }
2681 +
2682 +diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
2683 +index fe649d325b1f..ce653dfb0ae3 100644
2684 +--- a/fs/hostfs/hostfs_kern.c
2685 ++++ b/fs/hostfs/hostfs_kern.c
2686 +@@ -720,15 +720,13 @@ static int hostfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
2687 +
2688 + init_special_inode(inode, mode, dev);
2689 + err = do_mknod(name, mode, MAJOR(dev), MINOR(dev));
2690 +- if (!err)
2691 ++ if (err)
2692 + goto out_free;
2693 +
2694 + err = read_name(inode, name);
2695 + __putname(name);
2696 + if (err)
2697 + goto out_put;
2698 +- if (err)
2699 +- goto out_put;
2700 +
2701 + d_instantiate(dentry, inode);
2702 + return 0;
2703 +diff --git a/fs/lockd/host.c b/fs/lockd/host.c
2704 +index 969d589c848d..b5f3c3ab0d5f 100644
2705 +--- a/fs/lockd/host.c
2706 ++++ b/fs/lockd/host.c
2707 +@@ -116,7 +116,7 @@ static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni,
2708 + atomic_inc(&nsm->sm_count);
2709 + else {
2710 + host = NULL;
2711 +- nsm = nsm_get_handle(ni->sap, ni->salen,
2712 ++ nsm = nsm_get_handle(ni->net, ni->sap, ni->salen,
2713 + ni->hostname, ni->hostname_len);
2714 + if (unlikely(nsm == NULL)) {
2715 + dprintk("lockd: %s failed; no nsm handle\n",
2716 +@@ -534,17 +534,18 @@ static struct nlm_host *next_host_state(struct hlist_head *cache,
2717 +
2718 + /**
2719 + * nlm_host_rebooted - Release all resources held by rebooted host
2720 ++ * @net: network namespace
2721 + * @info: pointer to decoded results of NLM_SM_NOTIFY call
2722 + *
2723 + * We were notified that the specified host has rebooted. Release
2724 + * all resources held by that peer.
2725 + */
2726 +-void nlm_host_rebooted(const struct nlm_reboot *info)
2727 ++void nlm_host_rebooted(const struct net *net, const struct nlm_reboot *info)
2728 + {
2729 + struct nsm_handle *nsm;
2730 + struct nlm_host *host;
2731 +
2732 +- nsm = nsm_reboot_lookup(info);
2733 ++ nsm = nsm_reboot_lookup(net, info);
2734 + if (unlikely(nsm == NULL))
2735 + return;
2736 +
2737 +diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
2738 +index 6ae664b489af..13fac49aff7f 100644
2739 +--- a/fs/lockd/mon.c
2740 ++++ b/fs/lockd/mon.c
2741 +@@ -51,7 +51,6 @@ struct nsm_res {
2742 + };
2743 +
2744 + static const struct rpc_program nsm_program;
2745 +-static LIST_HEAD(nsm_handles);
2746 + static DEFINE_SPINLOCK(nsm_lock);
2747 +
2748 + /*
2749 +@@ -259,33 +258,35 @@ void nsm_unmonitor(const struct nlm_host *host)
2750 + }
2751 + }
2752 +
2753 +-static struct nsm_handle *nsm_lookup_hostname(const char *hostname,
2754 +- const size_t len)
2755 ++static struct nsm_handle *nsm_lookup_hostname(const struct list_head *nsm_handles,
2756 ++ const char *hostname, const size_t len)
2757 + {
2758 + struct nsm_handle *nsm;
2759 +
2760 +- list_for_each_entry(nsm, &nsm_handles, sm_link)
2761 ++ list_for_each_entry(nsm, nsm_handles, sm_link)
2762 + if (strlen(nsm->sm_name) == len &&
2763 + memcmp(nsm->sm_name, hostname, len) == 0)
2764 + return nsm;
2765 + return NULL;
2766 + }
2767 +
2768 +-static struct nsm_handle *nsm_lookup_addr(const struct sockaddr *sap)
2769 ++static struct nsm_handle *nsm_lookup_addr(const struct list_head *nsm_handles,
2770 ++ const struct sockaddr *sap)
2771 + {
2772 + struct nsm_handle *nsm;
2773 +
2774 +- list_for_each_entry(nsm, &nsm_handles, sm_link)
2775 ++ list_for_each_entry(nsm, nsm_handles, sm_link)
2776 + if (rpc_cmp_addr(nsm_addr(nsm), sap))
2777 + return nsm;
2778 + return NULL;
2779 + }
2780 +
2781 +-static struct nsm_handle *nsm_lookup_priv(const struct nsm_private *priv)
2782 ++static struct nsm_handle *nsm_lookup_priv(const struct list_head *nsm_handles,
2783 ++ const struct nsm_private *priv)
2784 + {
2785 + struct nsm_handle *nsm;
2786 +
2787 +- list_for_each_entry(nsm, &nsm_handles, sm_link)
2788 ++ list_for_each_entry(nsm, nsm_handles, sm_link)
2789 + if (memcmp(nsm->sm_priv.data, priv->data,
2790 + sizeof(priv->data)) == 0)
2791 + return nsm;
2792 +@@ -350,6 +351,7 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap,
2793 +
2794 + /**
2795 + * nsm_get_handle - Find or create a cached nsm_handle
2796 ++ * @net: network namespace
2797 + * @sap: pointer to socket address of handle to find
2798 + * @salen: length of socket address
2799 + * @hostname: pointer to C string containing hostname to find
2800 +@@ -362,11 +364,13 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap,
2801 + * @hostname cannot be found in the handle cache. Returns NULL if
2802 + * an error occurs.
2803 + */
2804 +-struct nsm_handle *nsm_get_handle(const struct sockaddr *sap,
2805 ++struct nsm_handle *nsm_get_handle(const struct net *net,
2806 ++ const struct sockaddr *sap,
2807 + const size_t salen, const char *hostname,
2808 + const size_t hostname_len)
2809 + {
2810 + struct nsm_handle *cached, *new = NULL;
2811 ++ struct lockd_net *ln = net_generic(net, lockd_net_id);
2812 +
2813 + if (hostname && memchr(hostname, '/', hostname_len) != NULL) {
2814 + if (printk_ratelimit()) {
2815 +@@ -381,9 +385,10 @@ retry:
2816 + spin_lock(&nsm_lock);
2817 +
2818 + if (nsm_use_hostnames && hostname != NULL)
2819 +- cached = nsm_lookup_hostname(hostname, hostname_len);
2820 ++ cached = nsm_lookup_hostname(&ln->nsm_handles,
2821 ++ hostname, hostname_len);
2822 + else
2823 +- cached = nsm_lookup_addr(sap);
2824 ++ cached = nsm_lookup_addr(&ln->nsm_handles, sap);
2825 +
2826 + if (cached != NULL) {
2827 + atomic_inc(&cached->sm_count);
2828 +@@ -397,7 +402,7 @@ retry:
2829 + }
2830 +
2831 + if (new != NULL) {
2832 +- list_add(&new->sm_link, &nsm_handles);
2833 ++ list_add(&new->sm_link, &ln->nsm_handles);
2834 + spin_unlock(&nsm_lock);
2835 + dprintk("lockd: created nsm_handle for %s (%s)\n",
2836 + new->sm_name, new->sm_addrbuf);
2837 +@@ -414,19 +419,22 @@ retry:
2838 +
2839 + /**
2840 + * nsm_reboot_lookup - match NLMPROC_SM_NOTIFY arguments to an nsm_handle
2841 ++ * @net: network namespace
2842 + * @info: pointer to NLMPROC_SM_NOTIFY arguments
2843 + *
2844 + * Returns a matching nsm_handle if found in the nsm cache. The returned
2845 + * nsm_handle's reference count is bumped. Otherwise returns NULL if some
2846 + * error occurred.
2847 + */
2848 +-struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info)
2849 ++struct nsm_handle *nsm_reboot_lookup(const struct net *net,
2850 ++ const struct nlm_reboot *info)
2851 + {
2852 + struct nsm_handle *cached;
2853 ++ struct lockd_net *ln = net_generic(net, lockd_net_id);
2854 +
2855 + spin_lock(&nsm_lock);
2856 +
2857 +- cached = nsm_lookup_priv(&info->priv);
2858 ++ cached = nsm_lookup_priv(&ln->nsm_handles, &info->priv);
2859 + if (unlikely(cached == NULL)) {
2860 + spin_unlock(&nsm_lock);
2861 + dprintk("lockd: never saw rebooted peer '%.*s' before\n",
2862 +diff --git a/fs/lockd/netns.h b/fs/lockd/netns.h
2863 +index 5010b55628b4..414da99744e9 100644
2864 +--- a/fs/lockd/netns.h
2865 ++++ b/fs/lockd/netns.h
2866 +@@ -16,6 +16,7 @@ struct lockd_net {
2867 + spinlock_t nsm_clnt_lock;
2868 + unsigned int nsm_users;
2869 + struct rpc_clnt *nsm_clnt;
2870 ++ struct list_head nsm_handles;
2871 + };
2872 +
2873 + extern int lockd_net_id;
2874 +diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
2875 +index 59a53f664005..bb1ad4df024d 100644
2876 +--- a/fs/lockd/svc.c
2877 ++++ b/fs/lockd/svc.c
2878 +@@ -583,6 +583,7 @@ static int lockd_init_net(struct net *net)
2879 + INIT_DELAYED_WORK(&ln->grace_period_end, grace_ender);
2880 + INIT_LIST_HEAD(&ln->grace_list);
2881 + spin_lock_init(&ln->nsm_clnt_lock);
2882 ++ INIT_LIST_HEAD(&ln->nsm_handles);
2883 + return 0;
2884 + }
2885 +
2886 +diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
2887 +index b147d1ae71fd..09c576f26c7b 100644
2888 +--- a/fs/lockd/svc4proc.c
2889 ++++ b/fs/lockd/svc4proc.c
2890 +@@ -421,7 +421,7 @@ nlm4svc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
2891 + return rpc_system_err;
2892 + }
2893 +
2894 +- nlm_host_rebooted(argp);
2895 ++ nlm_host_rebooted(SVC_NET(rqstp), argp);
2896 + return rpc_success;
2897 + }
2898 +
2899 +diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
2900 +index 21171f0c6477..fb26b9f522e7 100644
2901 +--- a/fs/lockd/svcproc.c
2902 ++++ b/fs/lockd/svcproc.c
2903 +@@ -464,7 +464,7 @@ nlmsvc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
2904 + return rpc_system_err;
2905 + }
2906 +
2907 +- nlm_host_rebooted(argp);
2908 ++ nlm_host_rebooted(SVC_NET(rqstp), argp);
2909 + return rpc_success;
2910 + }
2911 +
2912 +diff --git a/fs/namei.c b/fs/namei.c
2913 +index f4f6460b6958..c24781f07cf3 100644
2914 +--- a/fs/namei.c
2915 ++++ b/fs/namei.c
2916 +@@ -3085,6 +3085,10 @@ opened:
2917 + goto exit_fput;
2918 + }
2919 + out:
2920 ++ if (unlikely(error > 0)) {
2921 ++ WARN_ON(1);
2922 ++ error = -EINVAL;
2923 ++ }
2924 + if (got_write)
2925 + mnt_drop_write(nd->path.mnt);
2926 + path_put(&save_parent);
2927 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
2928 +index 45a7dd36b4a6..3b5e86fd2800 100644
2929 +--- a/fs/nfs/nfs4proc.c
2930 ++++ b/fs/nfs/nfs4proc.c
2931 +@@ -2187,9 +2187,9 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
2932 + dentry = d_add_unique(dentry, igrab(state->inode));
2933 + if (dentry == NULL) {
2934 + dentry = opendata->dentry;
2935 +- } else if (dentry != ctx->dentry) {
2936 ++ } else {
2937 + dput(ctx->dentry);
2938 +- ctx->dentry = dget(dentry);
2939 ++ ctx->dentry = dentry;
2940 + }
2941 + nfs_set_verifier(dentry,
2942 + nfs_save_change_attribute(opendata->dir->d_inode));
2943 +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
2944 +index c402b672a474..1c02b300dc5d 100644
2945 +--- a/fs/nfs/nfs4state.c
2946 ++++ b/fs/nfs/nfs4state.c
2947 +@@ -1482,7 +1482,7 @@ restart:
2948 + spin_unlock(&state->state_lock);
2949 + }
2950 + nfs4_put_open_state(state);
2951 +- clear_bit(NFS4CLNT_RECLAIM_NOGRACE,
2952 ++ clear_bit(NFS_STATE_RECLAIM_NOGRACE,
2953 + &state->flags);
2954 + spin_lock(&sp->so_lock);
2955 + goto restart;
2956 +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
2957 +index eaa7374305a3..6b1d8498d208 100644
2958 +--- a/fs/proc/task_mmu.c
2959 ++++ b/fs/proc/task_mmu.c
2960 +@@ -165,7 +165,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
2961 + if (!priv->task)
2962 + return ERR_PTR(-ESRCH);
2963 +
2964 +- mm = mm_access(priv->task, PTRACE_MODE_READ);
2965 ++ mm = mm_access(priv->task, PTRACE_MODE_READ_FSCREDS);
2966 + if (!mm || IS_ERR(mm))
2967 + return mm;
2968 + down_read(&mm->mmap_sem);
2969 +@@ -1182,7 +1182,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
2970 + if (!pm.buffer)
2971 + goto out_task;
2972 +
2973 +- mm = mm_access(task, PTRACE_MODE_READ);
2974 ++ mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
2975 + ret = PTR_ERR(mm);
2976 + if (!mm || IS_ERR(mm))
2977 + goto out_free;
2978 +diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
2979 +index 678455d2d683..f9db7e9f6966 100644
2980 +--- a/fs/proc/task_nommu.c
2981 ++++ b/fs/proc/task_nommu.c
2982 +@@ -216,7 +216,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
2983 + if (!priv->task)
2984 + return ERR_PTR(-ESRCH);
2985 +
2986 +- mm = mm_access(priv->task, PTRACE_MODE_READ);
2987 ++ mm = mm_access(priv->task, PTRACE_MODE_READ_FSCREDS);
2988 + if (!mm || IS_ERR(mm)) {
2989 + put_task_struct(priv->task);
2990 + priv->task = NULL;
2991 +diff --git a/fs/splice.c b/fs/splice.c
2992 +index f345d53f94da..e64f59960ec5 100644
2993 +--- a/fs/splice.c
2994 ++++ b/fs/splice.c
2995 +@@ -949,6 +949,7 @@ ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd,
2996 +
2997 + splice_from_pipe_begin(sd);
2998 + do {
2999 ++ cond_resched();
3000 + ret = splice_from_pipe_next(pipe, sd);
3001 + if (ret > 0)
3002 + ret = splice_from_pipe_feed(pipe, sd, actor);
3003 +@@ -1175,7 +1176,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
3004 + long ret, bytes;
3005 + umode_t i_mode;
3006 + size_t len;
3007 +- int i, flags;
3008 ++ int i, flags, more;
3009 +
3010 + /*
3011 + * We require the input being a regular file, as we don't want to
3012 +@@ -1218,6 +1219,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
3013 + * Don't block on output, we have to drain the direct pipe.
3014 + */
3015 + sd->flags &= ~SPLICE_F_NONBLOCK;
3016 ++ more = sd->flags & SPLICE_F_MORE;
3017 +
3018 + while (len) {
3019 + size_t read_len;
3020 +@@ -1231,6 +1233,15 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
3021 + sd->total_len = read_len;
3022 +
3023 + /*
3024 ++ * If more data is pending, set SPLICE_F_MORE
3025 ++ * If this is the last data and SPLICE_F_MORE was not set
3026 ++ * initially, clears it.
3027 ++ */
3028 ++ if (read_len < len)
3029 ++ sd->flags |= SPLICE_F_MORE;
3030 ++ else if (!more)
3031 ++ sd->flags &= ~SPLICE_F_MORE;
3032 ++ /*
3033 + * NOTE: nonblocking mode only applies to the input. We
3034 + * must not do the output in nonblocking mode as then we
3035 + * could get stuck data in the internal pipe:
3036 +diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
3037 +index 2c9e62c2bfd0..f55fb04501ec 100644
3038 +--- a/include/asm-generic/cputime_nsecs.h
3039 ++++ b/include/asm-generic/cputime_nsecs.h
3040 +@@ -70,7 +70,7 @@ typedef u64 __nocast cputime64_t;
3041 + */
3042 + static inline cputime_t timespec_to_cputime(const struct timespec *val)
3043 + {
3044 +- u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
3045 ++ u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
3046 + return (__force cputime_t) ret;
3047 + }
3048 + static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
3049 +@@ -86,7 +86,8 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
3050 + */
3051 + static inline cputime_t timeval_to_cputime(const struct timeval *val)
3052 + {
3053 +- u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
3054 ++ u64 ret = (u64)val->tv_sec * NSEC_PER_SEC +
3055 ++ val->tv_usec * NSEC_PER_USEC;
3056 + return (__force cputime_t) ret;
3057 + }
3058 + static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
3059 +diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h
3060 +index 9a33c5f7e126..f6c229e2bffa 100644
3061 +--- a/include/linux/enclosure.h
3062 ++++ b/include/linux/enclosure.h
3063 +@@ -29,7 +29,11 @@
3064 + /* A few generic types ... taken from ses-2 */
3065 + enum enclosure_component_type {
3066 + ENCLOSURE_COMPONENT_DEVICE = 0x01,
3067 ++ ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS = 0x07,
3068 ++ ENCLOSURE_COMPONENT_SCSI_TARGET_PORT = 0x14,
3069 ++ ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT = 0x15,
3070 + ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17,
3071 ++ ENCLOSURE_COMPONENT_SAS_EXPANDER = 0x18,
3072 + };
3073 +
3074 + /* ses-2 common element status */
3075 +diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
3076 +index dcaad79f54ed..0adf073f13b3 100644
3077 +--- a/include/linux/lockd/lockd.h
3078 ++++ b/include/linux/lockd/lockd.h
3079 +@@ -236,7 +236,8 @@ void nlm_rebind_host(struct nlm_host *);
3080 + struct nlm_host * nlm_get_host(struct nlm_host *);
3081 + void nlm_shutdown_hosts(void);
3082 + void nlm_shutdown_hosts_net(struct net *net);
3083 +-void nlm_host_rebooted(const struct nlm_reboot *);
3084 ++void nlm_host_rebooted(const struct net *net,
3085 ++ const struct nlm_reboot *);
3086 +
3087 + /*
3088 + * Host monitoring
3089 +@@ -244,11 +245,13 @@ void nlm_host_rebooted(const struct nlm_reboot *);
3090 + int nsm_monitor(const struct nlm_host *host);
3091 + void nsm_unmonitor(const struct nlm_host *host);
3092 +
3093 +-struct nsm_handle *nsm_get_handle(const struct sockaddr *sap,
3094 ++struct nsm_handle *nsm_get_handle(const struct net *net,
3095 ++ const struct sockaddr *sap,
3096 + const size_t salen,
3097 + const char *hostname,
3098 + const size_t hostname_len);
3099 +-struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info);
3100 ++struct nsm_handle *nsm_reboot_lookup(const struct net *net,
3101 ++ const struct nlm_reboot *info);
3102 + void nsm_release(struct nsm_handle *nsm);
3103 +
3104 + /*
3105 +diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
3106 +index 0ae5807480f4..1e122cc9ea3e 100644
3107 +--- a/include/linux/nfs_fs.h
3108 ++++ b/include/linux/nfs_fs.h
3109 +@@ -580,9 +580,7 @@ static inline int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
3110 +
3111 + static inline loff_t nfs_size_to_loff_t(__u64 size)
3112 + {
3113 +- if (size > (__u64) OFFSET_MAX - 1)
3114 +- return OFFSET_MAX - 1;
3115 +- return (loff_t) size;
3116 ++ return min_t(u64, size, OFFSET_MAX);
3117 + }
3118 +
3119 + static inline ino_t
3120 +diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
3121 +index 7159a0a933df..97c8689c7e51 100644
3122 +--- a/include/linux/tracepoint.h
3123 ++++ b/include/linux/tracepoint.h
3124 +@@ -14,8 +14,11 @@
3125 + * See the file COPYING for more details.
3126 + */
3127 +
3128 ++#include <linux/smp.h>
3129 + #include <linux/errno.h>
3130 + #include <linux/types.h>
3131 ++#include <linux/percpu.h>
3132 ++#include <linux/cpumask.h>
3133 + #include <linux/rcupdate.h>
3134 + #include <linux/static_key.h>
3135 +
3136 +@@ -126,6 +129,9 @@ static inline void tracepoint_synchronize_unregister(void)
3137 + void *it_func; \
3138 + void *__data; \
3139 + \
3140 ++ if (!cpu_online(raw_smp_processor_id())) \
3141 ++ return; \
3142 ++ \
3143 + if (!(cond)) \
3144 + return; \
3145 + prercu; \
3146 +diff --git a/include/net/af_unix.h b/include/net/af_unix.h
3147 +index e830c3dff61a..7bb69c9c3c43 100644
3148 +--- a/include/net/af_unix.h
3149 ++++ b/include/net/af_unix.h
3150 +@@ -6,8 +6,8 @@
3151 + #include <linux/mutex.h>
3152 + #include <net/sock.h>
3153 +
3154 +-void unix_inflight(struct file *fp);
3155 +-void unix_notinflight(struct file *fp);
3156 ++void unix_inflight(struct user_struct *user, struct file *fp);
3157 ++void unix_notinflight(struct user_struct *user, struct file *fp);
3158 + void unix_gc(void);
3159 + void wait_for_unix_gc(void);
3160 + struct sock *unix_get_socket(struct file *filp);
3161 +diff --git a/include/net/scm.h b/include/net/scm.h
3162 +index 262532d111f5..59fa93c01d2a 100644
3163 +--- a/include/net/scm.h
3164 ++++ b/include/net/scm.h
3165 +@@ -21,6 +21,7 @@ struct scm_creds {
3166 + struct scm_fp_list {
3167 + short count;
3168 + short max;
3169 ++ struct user_struct *user;
3170 + struct file *fp[SCM_MAX_FD];
3171 + };
3172 +
3173 +diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
3174 +index e4b9e011d2a1..42606764d830 100644
3175 +--- a/include/target/target_core_base.h
3176 ++++ b/include/target/target_core_base.h
3177 +@@ -513,7 +513,7 @@ struct se_cmd {
3178 + sense_reason_t (*execute_cmd)(struct se_cmd *);
3179 + sense_reason_t (*execute_rw)(struct se_cmd *, struct scatterlist *,
3180 + u32, enum dma_data_direction);
3181 +- sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool);
3182 ++ sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *);
3183 +
3184 + unsigned char *t_task_cdb;
3185 + unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
3186 +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
3187 +index ebb8a9e937fa..2c2e5e70e4f3 100644
3188 +--- a/kernel/irq/manage.c
3189 ++++ b/kernel/irq/manage.c
3190 +@@ -1230,6 +1230,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
3191 + if (!desc)
3192 + return NULL;
3193 +
3194 ++ chip_bus_lock(desc);
3195 + raw_spin_lock_irqsave(&desc->lock, flags);
3196 +
3197 + /*
3198 +@@ -1243,7 +1244,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
3199 + if (!action) {
3200 + WARN(1, "Trying to free already-free IRQ %d\n", irq);
3201 + raw_spin_unlock_irqrestore(&desc->lock, flags);
3202 +-
3203 ++ chip_bus_sync_unlock(desc);
3204 + return NULL;
3205 + }
3206 +
3207 +@@ -1266,6 +1267,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
3208 + #endif
3209 +
3210 + raw_spin_unlock_irqrestore(&desc->lock, flags);
3211 ++ chip_bus_sync_unlock(desc);
3212 +
3213 + unregister_handler_proc(irq, action);
3214 +
3215 +@@ -1339,9 +1341,7 @@ void free_irq(unsigned int irq, void *dev_id)
3216 + desc->affinity_notify = NULL;
3217 + #endif
3218 +
3219 +- chip_bus_lock(desc);
3220 + kfree(__free_irq(irq, dev_id));
3221 +- chip_bus_sync_unlock(desc);
3222 + }
3223 + EXPORT_SYMBOL(free_irq);
3224 +
3225 +diff --git a/kernel/resource.c b/kernel/resource.c
3226 +index 3f285dce9347..449282e48bb1 100644
3227 +--- a/kernel/resource.c
3228 ++++ b/kernel/resource.c
3229 +@@ -961,9 +961,10 @@ struct resource * __request_region(struct resource *parent,
3230 + if (!conflict)
3231 + break;
3232 + if (conflict != parent) {
3233 +- parent = conflict;
3234 +- if (!(conflict->flags & IORESOURCE_BUSY))
3235 ++ if (!(conflict->flags & IORESOURCE_BUSY)) {
3236 ++ parent = conflict;
3237 + continue;
3238 ++ }
3239 + }
3240 + if (conflict->flags & flags & IORESOURCE_MUXED) {
3241 + add_wait_queue(&muxed_resource_wait, &wait);
3242 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
3243 +index bbe957762ace..46afc8cd69dd 100644
3244 +--- a/kernel/sched/core.c
3245 ++++ b/kernel/sched/core.c
3246 +@@ -937,6 +937,13 @@ inline int task_curr(const struct task_struct *p)
3247 + return cpu_curr(task_cpu(p)) == p;
3248 + }
3249 +
3250 ++/*
3251 ++ * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
3252 ++ * use the balance_callback list if you want balancing.
3253 ++ *
3254 ++ * this means any call to check_class_changed() must be followed by a call to
3255 ++ * balance_callback().
3256 ++ */
3257 + static inline void check_class_changed(struct rq *rq, struct task_struct *p,
3258 + const struct sched_class *prev_class,
3259 + int oldprio)
3260 +@@ -1423,8 +1430,12 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
3261 +
3262 + p->state = TASK_RUNNING;
3263 + #ifdef CONFIG_SMP
3264 +- if (p->sched_class->task_woken)
3265 ++ if (p->sched_class->task_woken) {
3266 ++ /*
3267 ++ * XXX can drop rq->lock; most likely ok.
3268 ++ */
3269 + p->sched_class->task_woken(rq, p);
3270 ++ }
3271 +
3272 + if (rq->idle_stamp) {
3273 + u64 delta = rq_clock(rq) - rq->idle_stamp;
3274 +@@ -1685,7 +1696,6 @@ out:
3275 + */
3276 + int wake_up_process(struct task_struct *p)
3277 + {
3278 +- WARN_ON(task_is_stopped_or_traced(p));
3279 + return try_to_wake_up(p, TASK_NORMAL, 0);
3280 + }
3281 + EXPORT_SYMBOL(wake_up_process);
3282 +@@ -2179,18 +2189,30 @@ static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
3283 + }
3284 +
3285 + /* rq->lock is NOT held, but preemption is disabled */
3286 +-static inline void post_schedule(struct rq *rq)
3287 ++static void __balance_callback(struct rq *rq)
3288 + {
3289 +- if (rq->post_schedule) {
3290 +- unsigned long flags;
3291 ++ struct callback_head *head, *next;
3292 ++ void (*func)(struct rq *rq);
3293 ++ unsigned long flags;
3294 +
3295 +- raw_spin_lock_irqsave(&rq->lock, flags);
3296 +- if (rq->curr->sched_class->post_schedule)
3297 +- rq->curr->sched_class->post_schedule(rq);
3298 +- raw_spin_unlock_irqrestore(&rq->lock, flags);
3299 ++ raw_spin_lock_irqsave(&rq->lock, flags);
3300 ++ head = rq->balance_callback;
3301 ++ rq->balance_callback = NULL;
3302 ++ while (head) {
3303 ++ func = (void (*)(struct rq *))head->func;
3304 ++ next = head->next;
3305 ++ head->next = NULL;
3306 ++ head = next;
3307 +
3308 +- rq->post_schedule = 0;
3309 ++ func(rq);
3310 + }
3311 ++ raw_spin_unlock_irqrestore(&rq->lock, flags);
3312 ++}
3313 ++
3314 ++static inline void balance_callback(struct rq *rq)
3315 ++{
3316 ++ if (unlikely(rq->balance_callback))
3317 ++ __balance_callback(rq);
3318 + }
3319 +
3320 + #else
3321 +@@ -2199,7 +2221,7 @@ static inline void pre_schedule(struct rq *rq, struct task_struct *p)
3322 + {
3323 + }
3324 +
3325 +-static inline void post_schedule(struct rq *rq)
3326 ++static inline void balance_callback(struct rq *rq)
3327 + {
3328 + }
3329 +
3330 +@@ -2220,7 +2242,7 @@ asmlinkage void schedule_tail(struct task_struct *prev)
3331 + * FIXME: do we need to worry about rq being invalidated by the
3332 + * task_switch?
3333 + */
3334 +- post_schedule(rq);
3335 ++ balance_callback(rq);
3336 +
3337 + #ifdef __ARCH_WANT_UNLOCKED_CTXSW
3338 + /* In this case, finish_task_switch does not reenable preemption */
3339 +@@ -2732,7 +2754,7 @@ need_resched:
3340 + } else
3341 + raw_spin_unlock_irq(&rq->lock);
3342 +
3343 +- post_schedule(rq);
3344 ++ balance_callback(rq);
3345 +
3346 + sched_preempt_enable_no_resched();
3347 + if (need_resched())
3348 +@@ -2994,7 +3016,11 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
3349 +
3350 + check_class_changed(rq, p, prev_class, oldprio);
3351 + out_unlock:
3352 ++ preempt_disable(); /* avoid rq from going away on us */
3353 + __task_rq_unlock(rq);
3354 ++
3355 ++ balance_callback(rq);
3356 ++ preempt_enable();
3357 + }
3358 + #endif
3359 +
3360 +@@ -3500,10 +3526,17 @@ change:
3361 + enqueue_task(rq, p, 0);
3362 +
3363 + check_class_changed(rq, p, prev_class, oldprio);
3364 ++ preempt_disable(); /* avoid rq from going away on us */
3365 + task_rq_unlock(rq, p, &flags);
3366 +
3367 + rt_mutex_adjust_pi(p);
3368 +
3369 ++ /*
3370 ++ * Run balance callbacks after we've adjusted the PI chain.
3371 ++ */
3372 ++ balance_callback(rq);
3373 ++ preempt_enable();
3374 ++
3375 + return 0;
3376 + }
3377 +
3378 +@@ -5386,13 +5419,13 @@ static int init_rootdomain(struct root_domain *rd)
3379 + {
3380 + memset(rd, 0, sizeof(*rd));
3381 +
3382 +- if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
3383 ++ if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
3384 + goto out;
3385 +- if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
3386 ++ if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
3387 + goto free_span;
3388 +- if (!alloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
3389 ++ if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
3390 + goto free_online;
3391 +- if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
3392 ++ if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
3393 + goto free_dlo_mask;
3394 +
3395 + init_dl_bw(&rd->dl_bw);
3396 +@@ -6902,7 +6935,7 @@ void __init sched_init(void)
3397 + rq->sd = NULL;
3398 + rq->rd = NULL;
3399 + rq->cpu_power = SCHED_POWER_SCALE;
3400 +- rq->post_schedule = 0;
3401 ++ rq->balance_callback = NULL;
3402 + rq->active_balance = 0;
3403 + rq->next_balance = jiffies;
3404 + rq->push_cpu = 0;
3405 +diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
3406 +index 8d3c5ddfdfdd..6ab59bb2947b 100644
3407 +--- a/kernel/sched/deadline.c
3408 ++++ b/kernel/sched/deadline.c
3409 +@@ -210,6 +210,25 @@ static inline int has_pushable_dl_tasks(struct rq *rq)
3410 +
3411 + static int push_dl_task(struct rq *rq);
3412 +
3413 ++static DEFINE_PER_CPU(struct callback_head, dl_push_head);
3414 ++static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
3415 ++
3416 ++static void push_dl_tasks(struct rq *);
3417 ++static void pull_dl_task(struct rq *);
3418 ++
3419 ++static inline void queue_push_tasks(struct rq *rq)
3420 ++{
3421 ++ if (!has_pushable_dl_tasks(rq))
3422 ++ return;
3423 ++
3424 ++ queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
3425 ++}
3426 ++
3427 ++static inline void queue_pull_task(struct rq *rq)
3428 ++{
3429 ++ queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
3430 ++}
3431 ++
3432 + #else
3433 +
3434 + static inline
3435 +@@ -232,6 +251,13 @@ void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
3436 + {
3437 + }
3438 +
3439 ++static inline void queue_push_tasks(struct rq *rq)
3440 ++{
3441 ++}
3442 ++
3443 ++static inline void queue_pull_task(struct rq *rq)
3444 ++{
3445 ++}
3446 + #endif /* CONFIG_SMP */
3447 +
3448 + static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
3449 +@@ -1005,7 +1031,7 @@ struct task_struct *pick_next_task_dl(struct rq *rq)
3450 + #endif
3451 +
3452 + #ifdef CONFIG_SMP
3453 +- rq->post_schedule = has_pushable_dl_tasks(rq);
3454 ++ queue_push_tasks(rq);
3455 + #endif /* CONFIG_SMP */
3456 +
3457 + return p;
3458 +@@ -1336,15 +1362,16 @@ static void push_dl_tasks(struct rq *rq)
3459 + ;
3460 + }
3461 +
3462 +-static int pull_dl_task(struct rq *this_rq)
3463 ++static void pull_dl_task(struct rq *this_rq)
3464 + {
3465 +- int this_cpu = this_rq->cpu, ret = 0, cpu;
3466 ++ int this_cpu = this_rq->cpu, cpu;
3467 + struct task_struct *p;
3468 ++ bool resched = false;
3469 + struct rq *src_rq;
3470 + u64 dmin = LONG_MAX;
3471 +
3472 + if (likely(!dl_overloaded(this_rq)))
3473 +- return 0;
3474 ++ return;
3475 +
3476 + /*
3477 + * Match the barrier from dl_set_overloaded; this guarantees that if we
3478 +@@ -1399,7 +1426,7 @@ static int pull_dl_task(struct rq *this_rq)
3479 + src_rq->curr->dl.deadline))
3480 + goto skip;
3481 +
3482 +- ret = 1;
3483 ++ resched = true;
3484 +
3485 + deactivate_task(src_rq, p, 0);
3486 + set_task_cpu(p, this_cpu);
3487 +@@ -1412,7 +1439,8 @@ skip:
3488 + double_unlock_balance(this_rq, src_rq);
3489 + }
3490 +
3491 +- return ret;
3492 ++ if (resched)
3493 ++ resched_task(this_rq->curr);
3494 + }
3495 +
3496 + static void pre_schedule_dl(struct rq *rq, struct task_struct *prev)
3497 +@@ -1422,11 +1450,6 @@ static void pre_schedule_dl(struct rq *rq, struct task_struct *prev)
3498 + pull_dl_task(rq);
3499 + }
3500 +
3501 +-static void post_schedule_dl(struct rq *rq)
3502 +-{
3503 +- push_dl_tasks(rq);
3504 +-}
3505 +-
3506 + /*
3507 + * Since the task is not running and a reschedule is not going to happen
3508 + * anytime soon on its runqueue, we try pushing it away now.
3509 +@@ -1529,7 +1552,7 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
3510 + * from an overloaded cpu, if any.
3511 + */
3512 + if (!rq->dl.dl_nr_running)
3513 +- pull_dl_task(rq);
3514 ++ queue_pull_task(rq);
3515 + #endif
3516 + }
3517 +
3518 +@@ -1539,8 +1562,6 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
3519 + */
3520 + static void switched_to_dl(struct rq *rq, struct task_struct *p)
3521 + {
3522 +- int check_resched = 1;
3523 +-
3524 + /*
3525 + * If p is throttled, don't consider the possibility
3526 + * of preempting rq->curr, the check will be done right
3527 +@@ -1551,12 +1572,12 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
3528 +
3529 + if (p->on_rq || rq->curr != p) {
3530 + #ifdef CONFIG_SMP
3531 +- if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p))
3532 +- /* Only reschedule if pushing failed */
3533 +- check_resched = 0;
3534 +-#endif /* CONFIG_SMP */
3535 +- if (check_resched && task_has_dl_policy(rq->curr))
3536 ++ if (rq->dl.overloaded)
3537 ++ queue_push_tasks(rq);
3538 ++#else
3539 ++ if (task_has_dl_policy(rq->curr))
3540 + check_preempt_curr_dl(rq, p, 0);
3541 ++#endif /* CONFIG_SMP */
3542 + }
3543 + }
3544 +
3545 +@@ -1576,15 +1597,14 @@ static void prio_changed_dl(struct rq *rq, struct task_struct *p,
3546 + * or lowering its prio, so...
3547 + */
3548 + if (!rq->dl.overloaded)
3549 +- pull_dl_task(rq);
3550 ++ queue_pull_task(rq);
3551 +
3552 + /*
3553 + * If we now have a earlier deadline task than p,
3554 + * then reschedule, provided p is still on this
3555 + * runqueue.
3556 + */
3557 +- if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline) &&
3558 +- rq->curr == p)
3559 ++ if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
3560 + resched_task(p);
3561 + #else
3562 + /*
3563 +@@ -1615,7 +1635,6 @@ const struct sched_class dl_sched_class = {
3564 + .rq_online = rq_online_dl,
3565 + .rq_offline = rq_offline_dl,
3566 + .pre_schedule = pre_schedule_dl,
3567 +- .post_schedule = post_schedule_dl,
3568 + .task_woken = task_woken_dl,
3569 + #endif
3570 +
3571 +diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
3572 +index 516c3d9ceea1..d08678d38d12 100644
3573 +--- a/kernel/sched/idle_task.c
3574 ++++ b/kernel/sched/idle_task.c
3575 +@@ -19,11 +19,6 @@ static void pre_schedule_idle(struct rq *rq, struct task_struct *prev)
3576 + idle_exit_fair(rq);
3577 + rq_last_tick_reset(rq);
3578 + }
3579 +-
3580 +-static void post_schedule_idle(struct rq *rq)
3581 +-{
3582 +- idle_enter_fair(rq);
3583 +-}
3584 + #endif /* CONFIG_SMP */
3585 + /*
3586 + * Idle tasks are unconditionally rescheduled:
3587 +@@ -37,8 +32,7 @@ static struct task_struct *pick_next_task_idle(struct rq *rq)
3588 + {
3589 + schedstat_inc(rq, sched_goidle);
3590 + #ifdef CONFIG_SMP
3591 +- /* Trigger the post schedule to do an idle_enter for CFS */
3592 +- rq->post_schedule = 1;
3593 ++ idle_enter_fair(rq);
3594 + #endif
3595 + return rq->idle;
3596 + }
3597 +@@ -102,7 +96,6 @@ const struct sched_class idle_sched_class = {
3598 + #ifdef CONFIG_SMP
3599 + .select_task_rq = select_task_rq_idle,
3600 + .pre_schedule = pre_schedule_idle,
3601 +- .post_schedule = post_schedule_idle,
3602 + #endif
3603 +
3604 + .set_curr_task = set_curr_task_idle,
3605 +diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
3606 +index 27b8e836307f..0fb72ae876e7 100644
3607 +--- a/kernel/sched/rt.c
3608 ++++ b/kernel/sched/rt.c
3609 +@@ -315,6 +315,25 @@ static inline int has_pushable_tasks(struct rq *rq)
3610 + return !plist_head_empty(&rq->rt.pushable_tasks);
3611 + }
3612 +
3613 ++static DEFINE_PER_CPU(struct callback_head, rt_push_head);
3614 ++static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
3615 ++
3616 ++static void push_rt_tasks(struct rq *);
3617 ++static void pull_rt_task(struct rq *);
3618 ++
3619 ++static inline void queue_push_tasks(struct rq *rq)
3620 ++{
3621 ++ if (!has_pushable_tasks(rq))
3622 ++ return;
3623 ++
3624 ++ queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
3625 ++}
3626 ++
3627 ++static inline void queue_pull_task(struct rq *rq)
3628 ++{
3629 ++ queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
3630 ++}
3631 ++
3632 + static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
3633 + {
3634 + plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
3635 +@@ -359,6 +378,9 @@ void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
3636 + {
3637 + }
3638 +
3639 ++static inline void queue_push_tasks(struct rq *rq)
3640 ++{
3641 ++}
3642 + #endif /* CONFIG_SMP */
3643 +
3644 + static inline int on_rt_rq(struct sched_rt_entity *rt_se)
3645 +@@ -1349,11 +1371,7 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
3646 + dequeue_pushable_task(rq, p);
3647 +
3648 + #ifdef CONFIG_SMP
3649 +- /*
3650 +- * We detect this state here so that we can avoid taking the RQ
3651 +- * lock again later if there is no need to push
3652 +- */
3653 +- rq->post_schedule = has_pushable_tasks(rq);
3654 ++ queue_push_tasks(rq);
3655 + #endif
3656 +
3657 + return p;
3658 +@@ -1641,14 +1659,15 @@ static void push_rt_tasks(struct rq *rq)
3659 + ;
3660 + }
3661 +
3662 +-static int pull_rt_task(struct rq *this_rq)
3663 ++static void pull_rt_task(struct rq *this_rq)
3664 + {
3665 +- int this_cpu = this_rq->cpu, ret = 0, cpu;
3666 ++ int this_cpu = this_rq->cpu, cpu;
3667 ++ bool resched = false;
3668 + struct task_struct *p;
3669 + struct rq *src_rq;
3670 +
3671 + if (likely(!rt_overloaded(this_rq)))
3672 +- return 0;
3673 ++ return;
3674 +
3675 + /*
3676 + * Match the barrier from rt_set_overloaded; this guarantees that if we
3677 +@@ -1705,7 +1724,7 @@ static int pull_rt_task(struct rq *this_rq)
3678 + if (p->prio < src_rq->curr->prio)
3679 + goto skip;
3680 +
3681 +- ret = 1;
3682 ++ resched = true;
3683 +
3684 + deactivate_task(src_rq, p, 0);
3685 + set_task_cpu(p, this_cpu);
3686 +@@ -1721,7 +1740,8 @@ skip:
3687 + double_unlock_balance(this_rq, src_rq);
3688 + }
3689 +
3690 +- return ret;
3691 ++ if (resched)
3692 ++ resched_task(this_rq->curr);
3693 + }
3694 +
3695 + static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
3696 +@@ -1731,11 +1751,6 @@ static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
3697 + pull_rt_task(rq);
3698 + }
3699 +
3700 +-static void post_schedule_rt(struct rq *rq)
3701 +-{
3702 +- push_rt_tasks(rq);
3703 +-}
3704 +-
3705 + /*
3706 + * If we are not running and we are not going to reschedule soon, we should
3707 + * try to push tasks away now
3708 +@@ -1829,8 +1844,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
3709 + if (!p->on_rq || rq->rt.rt_nr_running)
3710 + return;
3711 +
3712 +- if (pull_rt_task(rq))
3713 +- resched_task(rq->curr);
3714 ++ queue_pull_task(rq);
3715 + }
3716 +
3717 + void init_sched_rt_class(void)
3718 +@@ -1851,8 +1865,6 @@ void init_sched_rt_class(void)
3719 + */
3720 + static void switched_to_rt(struct rq *rq, struct task_struct *p)
3721 + {
3722 +- int check_resched = 1;
3723 +-
3724 + /*
3725 + * If we are already running, then there's nothing
3726 + * that needs to be done. But if we are not running
3727 +@@ -1862,13 +1874,12 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
3728 + */
3729 + if (p->on_rq && rq->curr != p) {
3730 + #ifdef CONFIG_SMP
3731 +- if (rq->rt.overloaded && push_rt_task(rq) &&
3732 +- /* Don't resched if we changed runqueues */
3733 +- rq != task_rq(p))
3734 +- check_resched = 0;
3735 +-#endif /* CONFIG_SMP */
3736 +- if (check_resched && p->prio < rq->curr->prio)
3737 ++ if (rq->rt.overloaded)
3738 ++ queue_push_tasks(rq);
3739 ++#else
3740 ++ if (p->prio < rq->curr->prio)
3741 + resched_task(rq->curr);
3742 ++#endif /* CONFIG_SMP */
3743 + }
3744 + }
3745 +
3746 +@@ -1889,14 +1900,13 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
3747 + * may need to pull tasks to this runqueue.
3748 + */
3749 + if (oldprio < p->prio)
3750 +- pull_rt_task(rq);
3751 ++ queue_pull_task(rq);
3752 ++
3753 + /*
3754 + * If there's a higher priority task waiting to run
3755 +- * then reschedule. Note, the above pull_rt_task
3756 +- * can release the rq lock and p could migrate.
3757 +- * Only reschedule if p is still on the same runqueue.
3758 ++ * then reschedule.
3759 + */
3760 +- if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
3761 ++ if (p->prio > rq->rt.highest_prio.curr)
3762 + resched_task(p);
3763 + #else
3764 + /* For UP simply resched on drop of prio */
3765 +@@ -2008,7 +2018,6 @@ const struct sched_class rt_sched_class = {
3766 + .rq_online = rq_online_rt,
3767 + .rq_offline = rq_offline_rt,
3768 + .pre_schedule = pre_schedule_rt,
3769 +- .post_schedule = post_schedule_rt,
3770 + .task_woken = task_woken_rt,
3771 + .switched_from = switched_from_rt,
3772 + #endif
3773 +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
3774 +index 835b6efa8bd6..675e147a86f2 100644
3775 +--- a/kernel/sched/sched.h
3776 ++++ b/kernel/sched/sched.h
3777 +@@ -587,9 +587,10 @@ struct rq {
3778 +
3779 + unsigned long cpu_power;
3780 +
3781 ++ struct callback_head *balance_callback;
3782 ++
3783 + unsigned char idle_balance;
3784 + /* For active balancing */
3785 +- int post_schedule;
3786 + int active_balance;
3787 + int push_cpu;
3788 + struct cpu_stop_work active_balance_work;
3789 +@@ -690,6 +691,21 @@ extern int migrate_swap(struct task_struct *, struct task_struct *);
3790 +
3791 + #ifdef CONFIG_SMP
3792 +
3793 ++static inline void
3794 ++queue_balance_callback(struct rq *rq,
3795 ++ struct callback_head *head,
3796 ++ void (*func)(struct rq *rq))
3797 ++{
3798 ++ lockdep_assert_held(&rq->lock);
3799 ++
3800 ++ if (unlikely(head->next))
3801 ++ return;
3802 ++
3803 ++ head->func = (void (*)(struct callback_head *))func;
3804 ++ head->next = rq->balance_callback;
3805 ++ rq->balance_callback = head;
3806 ++}
3807 ++
3808 + #define rcu_dereference_check_sched_domain(p) \
3809 + rcu_dereference_check((p), \
3810 + lockdep_is_held(&sched_domains_mutex))
3811 +@@ -1131,7 +1147,6 @@ struct sched_class {
3812 + void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
3813 +
3814 + void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
3815 +- void (*post_schedule) (struct rq *this_rq);
3816 + void (*task_waking) (struct task_struct *task);
3817 + void (*task_woken) (struct rq *this_rq, struct task_struct *task);
3818 +
3819 +diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
3820 +index ce033c7aa2e8..9cff0ab82b63 100644
3821 +--- a/kernel/time/posix-clock.c
3822 ++++ b/kernel/time/posix-clock.c
3823 +@@ -69,10 +69,10 @@ static ssize_t posix_clock_read(struct file *fp, char __user *buf,
3824 + static unsigned int posix_clock_poll(struct file *fp, poll_table *wait)
3825 + {
3826 + struct posix_clock *clk = get_posix_clock(fp);
3827 +- int result = 0;
3828 ++ unsigned int result = 0;
3829 +
3830 + if (!clk)
3831 +- return -ENODEV;
3832 ++ return POLLERR;
3833 +
3834 + if (clk->ops.poll)
3835 + result = clk->ops.poll(clk, fp, wait);
3836 +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
3837 +index da41de9dc319..c798ed2fc281 100644
3838 +--- a/kernel/trace/ring_buffer.c
3839 ++++ b/kernel/trace/ring_buffer.c
3840 +@@ -1949,12 +1949,6 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
3841 + goto again;
3842 + }
3843 +
3844 +-static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3845 +-{
3846 +- cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
3847 +- cpu_buffer->reader_page->read = 0;
3848 +-}
3849 +-
3850 + static void rb_inc_iter(struct ring_buffer_iter *iter)
3851 + {
3852 + struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3853 +@@ -3592,7 +3586,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3854 +
3855 + /* Finally update the reader page to the new head */
3856 + cpu_buffer->reader_page = reader;
3857 +- rb_reset_reader_page(cpu_buffer);
3858 ++ cpu_buffer->reader_page->read = 0;
3859 +
3860 + if (overwrite != cpu_buffer->last_overrun) {
3861 + cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
3862 +@@ -3602,6 +3596,10 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3863 + goto again;
3864 +
3865 + out:
3866 ++ /* Update the read_stamp on the first event */
3867 ++ if (reader && reader->read == 0)
3868 ++ cpu_buffer->read_stamp = reader->page->time_stamp;
3869 ++
3870 + arch_spin_unlock(&cpu_buffer->lock);
3871 + local_irq_restore(flags);
3872 +
3873 +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
3874 +index c6646a58d23e..bb1ac9cbe30a 100644
3875 +--- a/kernel/trace/trace_events.c
3876 ++++ b/kernel/trace/trace_events.c
3877 +@@ -606,7 +606,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
3878 + * The ftrace subsystem is for showing formats only.
3879 + * They can not be enabled or disabled via the event files.
3880 + */
3881 +- if (call->class && call->class->reg)
3882 ++ if (call->class && call->class->reg &&
3883 ++ !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
3884 + return file;
3885 + }
3886 +
3887 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
3888 +index 646a8b81bee1..423c9e37a9e7 100644
3889 +--- a/kernel/workqueue.c
3890 ++++ b/kernel/workqueue.c
3891 +@@ -1475,13 +1475,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
3892 + timer_stats_timer_set_start_info(&dwork->timer);
3893 +
3894 + dwork->wq = wq;
3895 +- /* timer isn't guaranteed to run in this cpu, record earlier */
3896 +- if (cpu == WORK_CPU_UNBOUND)
3897 +- cpu = raw_smp_processor_id();
3898 + dwork->cpu = cpu;
3899 + timer->expires = jiffies + delay;
3900 +
3901 +- add_timer_on(timer, cpu);
3902 ++ if (unlikely(cpu != WORK_CPU_UNBOUND))
3903 ++ add_timer_on(timer, cpu);
3904 ++ else
3905 ++ add_timer(timer);
3906 + }
3907 +
3908 + /**
3909 +diff --git a/lib/devres.c b/lib/devres.c
3910 +index 823533138fa0..20afaf181b27 100644
3911 +--- a/lib/devres.c
3912 ++++ b/lib/devres.c
3913 +@@ -423,7 +423,7 @@ void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
3914 + if (!iomap)
3915 + return;
3916 +
3917 +- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3918 ++ for (i = 0; i < PCIM_IOMAP_MAX; i++) {
3919 + if (!(mask & (1 << i)))
3920 + continue;
3921 +
3922 +diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
3923 +index 2e87eecec8f6..04dd542697a7 100644
3924 +--- a/net/ceph/messenger.c
3925 ++++ b/net/ceph/messenger.c
3926 +@@ -2279,7 +2279,7 @@ static int read_partial_message(struct ceph_connection *con)
3927 + con->in_base_pos = -front_len - middle_len - data_len -
3928 + sizeof(m->footer);
3929 + con->in_tag = CEPH_MSGR_TAG_READY;
3930 +- return 0;
3931 ++ return 1;
3932 + } else if ((s64)seq - (s64)con->in_seq > 1) {
3933 + pr_err("read_partial_message bad seq %lld expected %lld\n",
3934 + seq, con->in_seq + 1);
3935 +@@ -2312,7 +2312,7 @@ static int read_partial_message(struct ceph_connection *con)
3936 + sizeof(m->footer);
3937 + con->in_tag = CEPH_MSGR_TAG_READY;
3938 + con->in_seq++;
3939 +- return 0;
3940 ++ return 1;
3941 + }
3942 +
3943 + BUG_ON(!con->in_msg);
3944 +diff --git a/net/core/scm.c b/net/core/scm.c
3945 +index d30eb057fa7b..cad57a1390dd 100644
3946 +--- a/net/core/scm.c
3947 ++++ b/net/core/scm.c
3948 +@@ -87,6 +87,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
3949 + *fplp = fpl;
3950 + fpl->count = 0;
3951 + fpl->max = SCM_MAX_FD;
3952 ++ fpl->user = NULL;
3953 + }
3954 + fpp = &fpl->fp[fpl->count];
3955 +
3956 +@@ -107,6 +108,10 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
3957 + *fpp++ = file;
3958 + fpl->count++;
3959 + }
3960 ++
3961 ++ if (!fpl->user)
3962 ++ fpl->user = get_uid(current_user());
3963 ++
3964 + return num;
3965 + }
3966 +
3967 +@@ -119,6 +124,7 @@ void __scm_destroy(struct scm_cookie *scm)
3968 + scm->fp = NULL;
3969 + for (i=fpl->count-1; i>=0; i--)
3970 + fput(fpl->fp[i]);
3971 ++ free_uid(fpl->user);
3972 + kfree(fpl);
3973 + }
3974 + }
3975 +@@ -337,6 +343,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
3976 + for (i = 0; i < fpl->count; i++)
3977 + get_file(fpl->fp[i]);
3978 + new_fpl->max = new_fpl->count;
3979 ++ new_fpl->user = get_uid(fpl->user);
3980 + }
3981 + return new_fpl;
3982 + }
3983 +diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
3984 +index c49dcd0284a0..56dd8ac6d28b 100644
3985 +--- a/net/ipv4/netfilter/ipt_rpfilter.c
3986 ++++ b/net/ipv4/netfilter/ipt_rpfilter.c
3987 +@@ -61,9 +61,7 @@ static bool rpfilter_lookup_reverse(struct flowi4 *fl4,
3988 + if (FIB_RES_DEV(res) == dev)
3989 + dev_match = true;
3990 + #endif
3991 +- if (dev_match || flags & XT_RPFILTER_LOOSE)
3992 +- return FIB_RES_NH(res).nh_scope <= RT_SCOPE_HOST;
3993 +- return dev_match;
3994 ++ return dev_match || flags & XT_RPFILTER_LOOSE;
3995 + }
3996 +
3997 + static bool rpfilter_is_local(const struct sk_buff *skb)
3998 +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
3999 +index 3f0ec063d7f8..7b74fca4d850 100644
4000 +--- a/net/ipv6/addrconf.c
4001 ++++ b/net/ipv6/addrconf.c
4002 +@@ -4793,6 +4793,21 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
4003 + return ret;
4004 + }
4005 +
4006 ++static
4007 ++int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
4008 ++ void __user *buffer, size_t *lenp, loff_t *ppos)
4009 ++{
4010 ++ struct inet6_dev *idev = ctl->extra1;
4011 ++ int min_mtu = IPV6_MIN_MTU;
4012 ++ struct ctl_table lctl;
4013 ++
4014 ++ lctl = *ctl;
4015 ++ lctl.extra1 = &min_mtu;
4016 ++ lctl.extra2 = idev ? &idev->dev->mtu : NULL;
4017 ++
4018 ++ return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
4019 ++}
4020 ++
4021 + static void dev_disable_change(struct inet6_dev *idev)
4022 + {
4023 + struct netdev_notifier_info info;
4024 +@@ -4944,7 +4959,7 @@ static struct addrconf_sysctl_table
4025 + .data = &ipv6_devconf.mtu6,
4026 + .maxlen = sizeof(int),
4027 + .mode = 0644,
4028 +- .proc_handler = proc_dointvec,
4029 ++ .proc_handler = addrconf_sysctl_mtu,
4030 + },
4031 + {
4032 + .procname = "accept_ra",
4033 +diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
4034 +index a0d17270117c..bd174540eb21 100644
4035 +--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
4036 ++++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
4037 +@@ -37,12 +37,13 @@ synproxy_build_ip(struct sk_buff *skb, const struct in6_addr *saddr,
4038 + }
4039 +
4040 + static void
4041 +-synproxy_send_tcp(const struct sk_buff *skb, struct sk_buff *nskb,
4042 ++synproxy_send_tcp(const struct synproxy_net *snet,
4043 ++ const struct sk_buff *skb, struct sk_buff *nskb,
4044 + struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
4045 + struct ipv6hdr *niph, struct tcphdr *nth,
4046 + unsigned int tcp_hdr_size)
4047 + {
4048 +- struct net *net = nf_ct_net((struct nf_conn *)nfct);
4049 ++ struct net *net = nf_ct_net(snet->tmpl);
4050 + struct dst_entry *dst;
4051 + struct flowi6 fl6;
4052 +
4053 +@@ -83,7 +84,8 @@ free_nskb:
4054 + }
4055 +
4056 + static void
4057 +-synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th,
4058 ++synproxy_send_client_synack(const struct synproxy_net *snet,
4059 ++ const struct sk_buff *skb, const struct tcphdr *th,
4060 + const struct synproxy_options *opts)
4061 + {
4062 + struct sk_buff *nskb;
4063 +@@ -119,7 +121,7 @@ synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th,
4064 +
4065 + synproxy_build_options(nth, opts);
4066 +
4067 +- synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
4068 ++ synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
4069 + niph, nth, tcp_hdr_size);
4070 + }
4071 +
4072 +@@ -163,7 +165,7 @@ synproxy_send_server_syn(const struct synproxy_net *snet,
4073 +
4074 + synproxy_build_options(nth, opts);
4075 +
4076 +- synproxy_send_tcp(skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
4077 ++ synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
4078 + niph, nth, tcp_hdr_size);
4079 + }
4080 +
4081 +@@ -203,7 +205,7 @@ synproxy_send_server_ack(const struct synproxy_net *snet,
4082 +
4083 + synproxy_build_options(nth, opts);
4084 +
4085 +- synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
4086 ++ synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
4087 + }
4088 +
4089 + static void
4090 +@@ -241,7 +243,7 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
4091 +
4092 + synproxy_build_options(nth, opts);
4093 +
4094 +- synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
4095 ++ synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
4096 + }
4097 +
4098 + static bool
4099 +@@ -301,7 +303,7 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
4100 + XT_SYNPROXY_OPT_SACK_PERM |
4101 + XT_SYNPROXY_OPT_ECN);
4102 +
4103 +- synproxy_send_client_synack(skb, th, &opts);
4104 ++ synproxy_send_client_synack(snet, skb, th, &opts);
4105 + return NF_DROP;
4106 +
4107 + } else if (th->ack && !(th->fin || th->rst || th->syn)) {
4108 +diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
4109 +index 7d050ed6fe5a..6d28bd434ce8 100644
4110 +--- a/net/mac80211/mesh_pathtbl.c
4111 ++++ b/net/mac80211/mesh_pathtbl.c
4112 +@@ -746,10 +746,8 @@ void mesh_plink_broken(struct sta_info *sta)
4113 + static void mesh_path_node_reclaim(struct rcu_head *rp)
4114 + {
4115 + struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
4116 +- struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
4117 +
4118 + del_timer_sync(&node->mpath->timer);
4119 +- atomic_dec(&sdata->u.mesh.mpaths);
4120 + kfree(node->mpath);
4121 + kfree(node);
4122 + }
4123 +@@ -757,8 +755,9 @@ static void mesh_path_node_reclaim(struct rcu_head *rp)
4124 + /* needs to be called with the corresponding hashwlock taken */
4125 + static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
4126 + {
4127 +- struct mesh_path *mpath;
4128 +- mpath = node->mpath;
4129 ++ struct mesh_path *mpath = node->mpath;
4130 ++ struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
4131 ++
4132 + spin_lock(&mpath->state_lock);
4133 + mpath->flags |= MESH_PATH_RESOLVING;
4134 + if (mpath->is_gate)
4135 +@@ -766,6 +765,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
4136 + hlist_del_rcu(&node->list);
4137 + call_rcu(&node->rcu, mesh_path_node_reclaim);
4138 + spin_unlock(&mpath->state_lock);
4139 ++ atomic_dec(&sdata->u.mesh.mpaths);
4140 + atomic_dec(&tbl->entries);
4141 + }
4142 +
4143 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
4144 +index 99de2409f731..4e8d90b8fc01 100644
4145 +--- a/net/netfilter/nf_tables_api.c
4146 ++++ b/net/netfilter/nf_tables_api.c
4147 +@@ -3316,9 +3316,9 @@ EXPORT_SYMBOL_GPL(nft_data_init);
4148 + */
4149 + void nft_data_uninit(const struct nft_data *data, enum nft_data_types type)
4150 + {
4151 +- switch (type) {
4152 +- case NFT_DATA_VALUE:
4153 ++ if (type < NFT_DATA_VERDICT)
4154 + return;
4155 ++ switch (type) {
4156 + case NFT_DATA_VERDICT:
4157 + return nft_verdict_uninit(data);
4158 + default:
4159 +diff --git a/net/rds/send.c b/net/rds/send.c
4160 +index a82fb660ec00..44222c0607c7 100644
4161 +--- a/net/rds/send.c
4162 ++++ b/net/rds/send.c
4163 +@@ -955,11 +955,13 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
4164 + release_sock(sk);
4165 + }
4166 +
4167 +- /* racing with another thread binding seems ok here */
4168 ++ lock_sock(sk);
4169 + if (daddr == 0 || rs->rs_bound_addr == 0) {
4170 ++ release_sock(sk);
4171 + ret = -ENOTCONN; /* XXX not a great errno */
4172 + goto out;
4173 + }
4174 ++ release_sock(sk);
4175 +
4176 + /* size of rm including all sgs */
4177 + ret = rds_rm_size(msg, payload_len);
4178 +diff --git a/net/rfkill/core.c b/net/rfkill/core.c
4179 +index ed7e0b4e7f90..4b9dc2460772 100644
4180 +--- a/net/rfkill/core.c
4181 ++++ b/net/rfkill/core.c
4182 +@@ -49,7 +49,6 @@
4183 + struct rfkill {
4184 + spinlock_t lock;
4185 +
4186 +- const char *name;
4187 + enum rfkill_type type;
4188 +
4189 + unsigned long state;
4190 +@@ -73,6 +72,7 @@ struct rfkill {
4191 + struct delayed_work poll_work;
4192 + struct work_struct uevent_work;
4193 + struct work_struct sync_work;
4194 ++ char name[];
4195 + };
4196 + #define to_rfkill(d) container_of(d, struct rfkill, dev)
4197 +
4198 +@@ -861,14 +861,14 @@ struct rfkill * __must_check rfkill_alloc(const char *name,
4199 + if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES))
4200 + return NULL;
4201 +
4202 +- rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL);
4203 ++ rfkill = kzalloc(sizeof(*rfkill) + strlen(name) + 1, GFP_KERNEL);
4204 + if (!rfkill)
4205 + return NULL;
4206 +
4207 + spin_lock_init(&rfkill->lock);
4208 + INIT_LIST_HEAD(&rfkill->node);
4209 + rfkill->type = type;
4210 +- rfkill->name = name;
4211 ++ strcpy(rfkill->name, name);
4212 + rfkill->ops = ops;
4213 + rfkill->data = ops_data;
4214 +
4215 +@@ -1078,17 +1078,6 @@ static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
4216 + return res;
4217 + }
4218 +
4219 +-static bool rfkill_readable(struct rfkill_data *data)
4220 +-{
4221 +- bool r;
4222 +-
4223 +- mutex_lock(&data->mtx);
4224 +- r = !list_empty(&data->events);
4225 +- mutex_unlock(&data->mtx);
4226 +-
4227 +- return r;
4228 +-}
4229 +-
4230 + static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
4231 + size_t count, loff_t *pos)
4232 + {
4233 +@@ -1105,8 +1094,11 @@ static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
4234 + goto out;
4235 + }
4236 + mutex_unlock(&data->mtx);
4237 ++ /* since we re-check and it just compares pointers,
4238 ++ * using !list_empty() without locking isn't a problem
4239 ++ */
4240 + ret = wait_event_interruptible(data->read_wait,
4241 +- rfkill_readable(data));
4242 ++ !list_empty(&data->events));
4243 + mutex_lock(&data->mtx);
4244 +
4245 + if (ret)
4246 +diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
4247 +index 0adc66caae2f..07edbcd8697e 100644
4248 +--- a/net/sunrpc/cache.c
4249 ++++ b/net/sunrpc/cache.c
4250 +@@ -1230,7 +1230,7 @@ int qword_get(char **bpp, char *dest, int bufsize)
4251 + if (bp[0] == '\\' && bp[1] == 'x') {
4252 + /* HEX STRING */
4253 + bp += 2;
4254 +- while (len < bufsize) {
4255 ++ while (len < bufsize - 1) {
4256 + int h, l;
4257 +
4258 + h = hex_to_bin(bp[0]);
4259 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
4260 +index 0cd18c240250..ab2eeb1cb32c 100644
4261 +--- a/net/unix/af_unix.c
4262 ++++ b/net/unix/af_unix.c
4263 +@@ -1469,7 +1469,7 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
4264 + UNIXCB(skb).fp = NULL;
4265 +
4266 + for (i = scm->fp->count-1; i >= 0; i--)
4267 +- unix_notinflight(scm->fp->fp[i]);
4268 ++ unix_notinflight(scm->fp->user, scm->fp->fp[i]);
4269 + }
4270 +
4271 + static void unix_destruct_scm(struct sk_buff *skb)
4272 +@@ -1534,7 +1534,7 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
4273 + return -ENOMEM;
4274 +
4275 + for (i = scm->fp->count - 1; i >= 0; i--)
4276 +- unix_inflight(scm->fp->fp[i]);
4277 ++ unix_inflight(scm->fp->user, scm->fp->fp[i]);
4278 + return max_level;
4279 + }
4280 +
4281 +diff --git a/net/unix/garbage.c b/net/unix/garbage.c
4282 +index 06730fe6ad9d..a72182d6750f 100644
4283 +--- a/net/unix/garbage.c
4284 ++++ b/net/unix/garbage.c
4285 +@@ -122,7 +122,7 @@ struct sock *unix_get_socket(struct file *filp)
4286 + * descriptor if it is for an AF_UNIX socket.
4287 + */
4288 +
4289 +-void unix_inflight(struct file *fp)
4290 ++void unix_inflight(struct user_struct *user, struct file *fp)
4291 + {
4292 + struct sock *s = unix_get_socket(fp);
4293 +
4294 +@@ -139,11 +139,11 @@ void unix_inflight(struct file *fp)
4295 + }
4296 + unix_tot_inflight++;
4297 + }
4298 +- fp->f_cred->user->unix_inflight++;
4299 ++ user->unix_inflight++;
4300 + spin_unlock(&unix_gc_lock);
4301 + }
4302 +
4303 +-void unix_notinflight(struct file *fp)
4304 ++void unix_notinflight(struct user_struct *user, struct file *fp)
4305 + {
4306 + struct sock *s = unix_get_socket(fp);
4307 +
4308 +@@ -157,7 +157,7 @@ void unix_notinflight(struct file *fp)
4309 + list_del_init(&u->link);
4310 + unix_tot_inflight--;
4311 + }
4312 +- fp->f_cred->user->unix_inflight--;
4313 ++ user->unix_inflight--;
4314 + spin_unlock(&unix_gc_lock);
4315 + }
4316 +
4317 +diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
4318 +index 9c22317778eb..ee625e3a56ba 100644
4319 +--- a/scripts/recordmcount.c
4320 ++++ b/scripts/recordmcount.c
4321 +@@ -189,6 +189,20 @@ static void *mmap_file(char const *fname)
4322 + addr = umalloc(sb.st_size);
4323 + uread(fd_map, addr, sb.st_size);
4324 + }
4325 ++ if (sb.st_nlink != 1) {
4326 ++ /* file is hard-linked, break the hard link */
4327 ++ close(fd_map);
4328 ++ if (unlink(fname) < 0) {
4329 ++ perror(fname);
4330 ++ fail_file();
4331 ++ }
4332 ++ fd_map = open(fname, O_RDWR | O_CREAT, sb.st_mode);
4333 ++ if (fd_map < 0) {
4334 ++ perror(fname);
4335 ++ fail_file();
4336 ++ }
4337 ++ uwrite(fd_map, addr, sb.st_size);
4338 ++ }
4339 + return addr;
4340 + }
4341 +
4342 +diff --git a/tools/Makefile b/tools/Makefile
4343 +index feec3ad5fd09..6e8ac8982149 100644
4344 +--- a/tools/Makefile
4345 ++++ b/tools/Makefile
4346 +@@ -24,6 +24,10 @@ help:
4347 + @echo ' from the kernel command line to build and install one of'
4348 + @echo ' the tools above'
4349 + @echo ''
4350 ++ @echo ' $$ make tools/all'
4351 ++ @echo ''
4352 ++ @echo ' builds all tools.'
4353 ++ @echo ''
4354 + @echo ' $$ make tools/install'
4355 + @echo ''
4356 + @echo ' installs all tools.'
4357 +@@ -58,6 +62,11 @@ turbostat x86_energy_perf_policy: FORCE
4358 + tmon: FORCE
4359 + $(call descend,thermal/$@)
4360 +
4361 ++all: acpi cgroup cpupower firewire lguest \
4362 ++ perf selftests turbostat usb \
4363 ++ virtio vm net x86_energy_perf_policy \
4364 ++ tmon
4365 ++
4366 + acpi_install:
4367 + $(call descend,power/$(@:_install=),install)
4368 +
4369 +diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
4370 +index f2c80d5451c3..919095029528 100644
4371 +--- a/virt/kvm/async_pf.c
4372 ++++ b/virt/kvm/async_pf.c
4373 +@@ -152,7 +152,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
4374 + * do alloc nowait since if we are going to sleep anyway we
4375 + * may as well sleep faulting in page
4376 + */
4377 +- work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT);
4378 ++ work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
4379 + if (!work)
4380 + return 0;
4381 +