Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:3.10 commit in: /
Date: Fri, 04 Mar 2016 00:10:25
Message-Id: 1457050194.7873784e8132df42248ac3796965e243062b8a6d.mpagano@gentoo
1 commit: 7873784e8132df42248ac3796965e243062b8a6d
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Mar 4 00:09:54 2016 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Mar 4 00:09:54 2016 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7873784e
7
8 Linux patch 3.10.99
9
10 0000_README | 4 +
11 1098_linux-3.10.99.patch | 2573 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 2577 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 180e0a0..44fb3ac 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -434,6 +434,10 @@ Patch: 1097_linux-3.10.98.patch
19 From: http://www.kernel.org
20 Desc: Linux 3.10.98
21
22 +Patch: 1098_linux-3.10.99.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 3.10.99
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1098_linux-3.10.99.patch b/1098_linux-3.10.99.patch
31 new file mode 100644
32 index 0000000..e405e5a
33 --- /dev/null
34 +++ b/1098_linux-3.10.99.patch
35 @@ -0,0 +1,2573 @@
36 +diff --git a/Makefile b/Makefile
37 +index dadd1edc6f84..f1e6491fd7d8 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 3
42 + PATCHLEVEL = 10
43 +-SUBLEVEL = 98
44 ++SUBLEVEL = 99
45 + EXTRAVERSION =
46 + NAME = TOSSUG Baby Fish
47 +
48 +diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
49 +index a8d02223da44..14558a9fa3b3 100644
50 +--- a/arch/arc/kernel/unwind.c
51 ++++ b/arch/arc/kernel/unwind.c
52 +@@ -984,42 +984,13 @@ int arc_unwind(struct unwind_frame_info *frame)
53 + (const u8 *)(fde +
54 + 1) +
55 + *fde, ptrType);
56 +- if (pc >= endLoc)
57 ++ if (pc >= endLoc) {
58 + fde = NULL;
59 +- } else
60 +- fde = NULL;
61 +- }
62 +- if (fde == NULL) {
63 +- for (fde = table->address, tableSize = table->size;
64 +- cie = NULL, tableSize > sizeof(*fde)
65 +- && tableSize - sizeof(*fde) >= *fde;
66 +- tableSize -= sizeof(*fde) + *fde,
67 +- fde += 1 + *fde / sizeof(*fde)) {
68 +- cie = cie_for_fde(fde, table);
69 +- if (cie == &bad_cie) {
70 + cie = NULL;
71 +- break;
72 + }
73 +- if (cie == NULL
74 +- || cie == &not_fde
75 +- || (ptrType = fde_pointer_type(cie)) < 0)
76 +- continue;
77 +- ptr = (const u8 *)(fde + 2);
78 +- startLoc = read_pointer(&ptr,
79 +- (const u8 *)(fde + 1) +
80 +- *fde, ptrType);
81 +- if (!startLoc)
82 +- continue;
83 +- if (!(ptrType & DW_EH_PE_indirect))
84 +- ptrType &=
85 +- DW_EH_PE_FORM | DW_EH_PE_signed;
86 +- endLoc =
87 +- startLoc + read_pointer(&ptr,
88 +- (const u8 *)(fde +
89 +- 1) +
90 +- *fde, ptrType);
91 +- if (pc >= startLoc && pc < endLoc)
92 +- break;
93 ++ } else {
94 ++ fde = NULL;
95 ++ cie = NULL;
96 + }
97 + }
98 + }
99 +diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S
100 +index 920b63210806..34c35f0e3290 100644
101 +--- a/arch/mips/kvm/kvm_locore.S
102 ++++ b/arch/mips/kvm/kvm_locore.S
103 +@@ -156,9 +156,11 @@ FEXPORT(__kvm_mips_vcpu_run)
104 +
105 + FEXPORT(__kvm_mips_load_asid)
106 + /* Set the ASID for the Guest Kernel */
107 +- sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
108 +- /* addresses shift to 0x80000000 */
109 +- bltz t0, 1f /* If kernel */
110 ++ PTR_L t0, VCPU_COP0(k1)
111 ++ LONG_L t0, COP0_STATUS(t0)
112 ++ andi t0, KSU_USER | ST0_ERL | ST0_EXL
113 ++ xori t0, KSU_USER
114 ++ bnez t0, 1f /* If kernel */
115 + addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
116 + addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */
117 + 1:
118 +@@ -442,9 +444,11 @@ __kvm_mips_return_to_guest:
119 + mtc0 t0, CP0_EPC
120 +
121 + /* Set the ASID for the Guest Kernel */
122 +- sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
123 +- /* addresses shift to 0x80000000 */
124 +- bltz t0, 1f /* If kernel */
125 ++ PTR_L t0, VCPU_COP0(k1)
126 ++ LONG_L t0, COP0_STATUS(t0)
127 ++ andi t0, KSU_USER | ST0_ERL | ST0_EXL
128 ++ xori t0, KSU_USER
129 ++ bnez t0, 1f /* If kernel */
130 + addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
131 + addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */
132 + 1:
133 +diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
134 +index 843ec38fec7b..8aa5f30d8579 100644
135 +--- a/arch/mips/kvm/kvm_mips.c
136 ++++ b/arch/mips/kvm/kvm_mips.c
137 +@@ -308,7 +308,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
138 +
139 + if (!gebase) {
140 + err = -ENOMEM;
141 +- goto out_free_cpu;
142 ++ goto out_uninit_cpu;
143 + }
144 + kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n",
145 + ALIGN(size, PAGE_SIZE), gebase);
146 +@@ -368,6 +368,9 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
147 + out_free_gebase:
148 + kfree(gebase);
149 +
150 ++out_uninit_cpu:
151 ++ kvm_vcpu_uninit(vcpu);
152 ++
153 + out_free_cpu:
154 + kfree(vcpu);
155 +
156 +diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
157 +index c76f297b7149..33085819cd89 100644
158 +--- a/arch/mips/kvm/kvm_mips_emul.c
159 ++++ b/arch/mips/kvm/kvm_mips_emul.c
160 +@@ -935,7 +935,7 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
161 +
162 + base = (inst >> 21) & 0x1f;
163 + op_inst = (inst >> 16) & 0x1f;
164 +- offset = inst & 0xffff;
165 ++ offset = (int16_t)inst;
166 + cache = (inst >> 16) & 0x3;
167 + op = (inst >> 18) & 0x7;
168 +
169 +diff --git a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c
170 +index 4d1ee88864e8..18c8b819b0aa 100644
171 +--- a/arch/s390/mm/extable.c
172 ++++ b/arch/s390/mm/extable.c
173 +@@ -52,12 +52,16 @@ void sort_extable(struct exception_table_entry *start,
174 + int i;
175 +
176 + /* Normalize entries to being relative to the start of the section */
177 +- for (p = start, i = 0; p < finish; p++, i += 8)
178 ++ for (p = start, i = 0; p < finish; p++, i += 8) {
179 + p->insn += i;
180 ++ p->fixup += i + 4;
181 ++ }
182 + sort(start, finish - start, sizeof(*start), cmp_ex, NULL);
183 + /* Denormalize all entries */
184 +- for (p = start, i = 0; p < finish; p++, i += 8)
185 ++ for (p = start, i = 0; p < finish; p++, i += 8) {
186 + p->insn -= i;
187 ++ p->fixup -= i + 4;
188 ++ }
189 + }
190 +
191 + #ifdef CONFIG_MODULES
192 +diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
193 +index be8db9bb7878..666510b39870 100644
194 +--- a/arch/sparc/kernel/sys_sparc_64.c
195 ++++ b/arch/sparc/kernel/sys_sparc_64.c
196 +@@ -416,7 +416,7 @@ out:
197 +
198 + SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
199 + {
200 +- int ret;
201 ++ long ret;
202 +
203 + if (personality(current->personality) == PER_LINUX32 &&
204 + personality(personality) == PER_LINUX)
205 +diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
206 +index 337518c5042a..b412c62486f0 100644
207 +--- a/arch/um/os-Linux/start_up.c
208 ++++ b/arch/um/os-Linux/start_up.c
209 +@@ -95,6 +95,8 @@ static int start_ptraced_child(void)
210 + {
211 + int pid, n, status;
212 +
213 ++ fflush(stdout);
214 ++
215 + pid = fork();
216 + if (pid == 0)
217 + ptrace_child();
218 +diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
219 +index 6033be9ff81a..3c8bffdc71c8 100644
220 +--- a/arch/x86/platform/efi/efi.c
221 ++++ b/arch/x86/platform/efi/efi.c
222 +@@ -250,12 +250,19 @@ static efi_status_t __init phys_efi_set_virtual_address_map(
223 + efi_memory_desc_t *virtual_map)
224 + {
225 + efi_status_t status;
226 ++ unsigned long flags;
227 +
228 + efi_call_phys_prelog();
229 ++
230 ++ /* Disable interrupts around EFI calls: */
231 ++ local_irq_save(flags);
232 + status = efi_call_phys4(efi_phys.set_virtual_address_map,
233 + memory_map_size, descriptor_size,
234 + descriptor_version, virtual_map);
235 ++ local_irq_restore(flags);
236 ++
237 + efi_call_phys_epilog();
238 ++
239 + return status;
240 + }
241 +
242 +diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
243 +index 40e446941dd7..bebbee05e331 100644
244 +--- a/arch/x86/platform/efi/efi_32.c
245 ++++ b/arch/x86/platform/efi/efi_32.c
246 +@@ -33,19 +33,16 @@
247 +
248 + /*
249 + * To make EFI call EFI runtime service in physical addressing mode we need
250 +- * prelog/epilog before/after the invocation to disable interrupt, to
251 +- * claim EFI runtime service handler exclusively and to duplicate a memory in
252 +- * low memory space say 0 - 3G.
253 ++ * prolog/epilog before/after the invocation to claim the EFI runtime service
254 ++ * handler exclusively and to duplicate a memory mapping in low memory space,
255 ++ * say 0 - 3G.
256 + */
257 +
258 +-static unsigned long efi_rt_eflags;
259 +
260 + void efi_call_phys_prelog(void)
261 + {
262 + struct desc_ptr gdt_descr;
263 +
264 +- local_irq_save(efi_rt_eflags);
265 +-
266 + load_cr3(initial_page_table);
267 + __flush_tlb_all();
268 +
269 +@@ -64,6 +61,4 @@ void efi_call_phys_epilog(void)
270 +
271 + load_cr3(swapper_pg_dir);
272 + __flush_tlb_all();
273 +-
274 +- local_irq_restore(efi_rt_eflags);
275 + }
276 +diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
277 +index 39a0e7f1f0a3..2f6c1a9734c8 100644
278 +--- a/arch/x86/platform/efi/efi_64.c
279 ++++ b/arch/x86/platform/efi/efi_64.c
280 +@@ -40,7 +40,6 @@
281 + #include <asm/fixmap.h>
282 +
283 + static pgd_t *save_pgd __initdata;
284 +-static unsigned long efi_flags __initdata;
285 +
286 + static void __init early_code_mapping_set_exec(int executable)
287 + {
288 +@@ -66,7 +65,6 @@ void __init efi_call_phys_prelog(void)
289 + int n_pgds;
290 +
291 + early_code_mapping_set_exec(1);
292 +- local_irq_save(efi_flags);
293 +
294 + n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
295 + save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL);
296 +@@ -90,7 +88,6 @@ void __init efi_call_phys_epilog(void)
297 + set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
298 + kfree(save_pgd);
299 + __flush_tlb_all();
300 +- local_irq_restore(efi_flags);
301 + early_code_mapping_set_exec(0);
302 + }
303 +
304 +diff --git a/block/partitions/mac.c b/block/partitions/mac.c
305 +index 76d8ba6379a9..bd5b91465230 100644
306 +--- a/block/partitions/mac.c
307 ++++ b/block/partitions/mac.c
308 +@@ -32,7 +32,7 @@ int mac_partition(struct parsed_partitions *state)
309 + Sector sect;
310 + unsigned char *data;
311 + int slot, blocks_in_map;
312 +- unsigned secsize;
313 ++ unsigned secsize, datasize, partoffset;
314 + #ifdef CONFIG_PPC_PMAC
315 + int found_root = 0;
316 + int found_root_goodness = 0;
317 +@@ -50,10 +50,14 @@ int mac_partition(struct parsed_partitions *state)
318 + }
319 + secsize = be16_to_cpu(md->block_size);
320 + put_dev_sector(sect);
321 +- data = read_part_sector(state, secsize/512, &sect);
322 ++ datasize = round_down(secsize, 512);
323 ++ data = read_part_sector(state, datasize / 512, &sect);
324 + if (!data)
325 + return -1;
326 +- part = (struct mac_partition *) (data + secsize%512);
327 ++ partoffset = secsize % 512;
328 ++ if (partoffset + sizeof(*part) > datasize)
329 ++ return -1;
330 ++ part = (struct mac_partition *) (data + partoffset);
331 + if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) {
332 + put_dev_sector(sect);
333 + return 0; /* not a MacOS disk */
334 +diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
335 +index 136803c47cdb..96e5ed188636 100644
336 +--- a/drivers/ata/libata-sff.c
337 ++++ b/drivers/ata/libata-sff.c
338 +@@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
339 + static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
340 + {
341 + struct ata_port *ap = qc->ap;
342 +- unsigned long flags;
343 +
344 + if (ap->ops->error_handler) {
345 + if (in_wq) {
346 +- spin_lock_irqsave(ap->lock, flags);
347 +-
348 + /* EH might have kicked in while host lock is
349 + * released.
350 + */
351 +@@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
352 + } else
353 + ata_port_freeze(ap);
354 + }
355 +-
356 +- spin_unlock_irqrestore(ap->lock, flags);
357 + } else {
358 + if (likely(!(qc->err_mask & AC_ERR_HSM)))
359 + ata_qc_complete(qc);
360 +@@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
361 + }
362 + } else {
363 + if (in_wq) {
364 +- spin_lock_irqsave(ap->lock, flags);
365 + ata_sff_irq_on(ap);
366 + ata_qc_complete(qc);
367 +- spin_unlock_irqrestore(ap->lock, flags);
368 + } else
369 + ata_qc_complete(qc);
370 + }
371 +@@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
372 + {
373 + struct ata_link *link = qc->dev->link;
374 + struct ata_eh_info *ehi = &link->eh_info;
375 +- unsigned long flags = 0;
376 + int poll_next;
377 +
378 ++ lockdep_assert_held(ap->lock);
379 ++
380 + WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
381 +
382 + /* Make sure ata_sff_qc_issue() does not throw things
383 +@@ -1112,14 +1106,6 @@ fsm_start:
384 + }
385 + }
386 +
387 +- /* Send the CDB (atapi) or the first data block (ata pio out).
388 +- * During the state transition, interrupt handler shouldn't
389 +- * be invoked before the data transfer is complete and
390 +- * hsm_task_state is changed. Hence, the following locking.
391 +- */
392 +- if (in_wq)
393 +- spin_lock_irqsave(ap->lock, flags);
394 +-
395 + if (qc->tf.protocol == ATA_PROT_PIO) {
396 + /* PIO data out protocol.
397 + * send first data block.
398 +@@ -1135,9 +1121,6 @@ fsm_start:
399 + /* send CDB */
400 + atapi_send_cdb(ap, qc);
401 +
402 +- if (in_wq)
403 +- spin_unlock_irqrestore(ap->lock, flags);
404 +-
405 + /* if polling, ata_sff_pio_task() handles the rest.
406 + * otherwise, interrupt handler takes over from here.
407 + */
408 +@@ -1361,12 +1344,14 @@ static void ata_sff_pio_task(struct work_struct *work)
409 + u8 status;
410 + int poll_next;
411 +
412 ++ spin_lock_irq(ap->lock);
413 ++
414 + BUG_ON(ap->sff_pio_task_link == NULL);
415 + /* qc can be NULL if timeout occurred */
416 + qc = ata_qc_from_tag(ap, link->active_tag);
417 + if (!qc) {
418 + ap->sff_pio_task_link = NULL;
419 +- return;
420 ++ goto out_unlock;
421 + }
422 +
423 + fsm_start:
424 +@@ -1381,11 +1366,14 @@ fsm_start:
425 + */
426 + status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
427 + if (status & ATA_BUSY) {
428 ++ spin_unlock_irq(ap->lock);
429 + ata_msleep(ap, 2);
430 ++ spin_lock_irq(ap->lock);
431 ++
432 + status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
433 + if (status & ATA_BUSY) {
434 + ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
435 +- return;
436 ++ goto out_unlock;
437 + }
438 + }
439 +
440 +@@ -1402,6 +1390,8 @@ fsm_start:
441 + */
442 + if (poll_next)
443 + goto fsm_start;
444 ++out_unlock:
445 ++ spin_unlock_irq(ap->lock);
446 + }
447 +
448 + /**
449 +diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
450 +index dd1faa564eb2..cdfb98e70cfd 100644
451 +--- a/drivers/ata/sata_sil.c
452 ++++ b/drivers/ata/sata_sil.c
453 +@@ -631,6 +631,9 @@ static void sil_dev_config(struct ata_device *dev)
454 + unsigned int n, quirks = 0;
455 + unsigned char model_num[ATA_ID_PROD_LEN + 1];
456 +
457 ++ /* This controller doesn't support trim */
458 ++ dev->horkage |= ATA_HORKAGE_NOTRIM;
459 ++
460 + ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
461 +
462 + for (n = 0; sil_blacklist[n].product; n++)
463 +diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
464 +index 64f553f04fa4..5874ebf9dced 100644
465 +--- a/drivers/clocksource/vt8500_timer.c
466 ++++ b/drivers/clocksource/vt8500_timer.c
467 +@@ -50,6 +50,8 @@
468 +
469 + #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
470 +
471 ++#define MIN_OSCR_DELTA 16
472 ++
473 + static void __iomem *regbase;
474 +
475 + static cycle_t vt8500_timer_read(struct clocksource *cs)
476 +@@ -80,7 +82,7 @@ static int vt8500_timer_set_next_event(unsigned long cycles,
477 + cpu_relax();
478 + writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL);
479 +
480 +- if ((signed)(alarm - clocksource.read(&clocksource)) <= 16)
481 ++ if ((signed)(alarm - clocksource.read(&clocksource)) <= MIN_OSCR_DELTA)
482 + return -ETIME;
483 +
484 + writel(1, regbase + TIMER_IER_VAL);
485 +@@ -162,7 +164,7 @@ static void __init vt8500_timer_init(struct device_node *np)
486 + pr_err("%s: setup_irq failed for %s\n", __func__,
487 + clockevent.name);
488 + clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ,
489 +- 4, 0xf0000000);
490 ++ MIN_OSCR_DELTA * 2, 0xf0000000);
491 + }
492 +
493 + CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init);
494 +diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
495 +index b6b7d70f2832..5cfc1765af74 100644
496 +--- a/drivers/gpu/drm/ast/ast_drv.h
497 ++++ b/drivers/gpu/drm/ast/ast_drv.h
498 +@@ -296,6 +296,7 @@ int ast_framebuffer_init(struct drm_device *dev,
499 + int ast_fbdev_init(struct drm_device *dev);
500 + void ast_fbdev_fini(struct drm_device *dev);
501 + void ast_fbdev_set_suspend(struct drm_device *dev, int state);
502 ++void ast_fbdev_set_base(struct ast_private *ast, unsigned long gpu_addr);
503 +
504 + struct ast_bo {
505 + struct ttm_buffer_object bo;
506 +diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
507 +index fbc0823cfa18..a298d8f72225 100644
508 +--- a/drivers/gpu/drm/ast/ast_fb.c
509 ++++ b/drivers/gpu/drm/ast/ast_fb.c
510 +@@ -366,3 +366,10 @@ void ast_fbdev_set_suspend(struct drm_device *dev, int state)
511 +
512 + fb_set_suspend(ast->fbdev->helper.fbdev, state);
513 + }
514 ++
515 ++void ast_fbdev_set_base(struct ast_private *ast, unsigned long gpu_addr)
516 ++{
517 ++ ast->fbdev->helper.fbdev->fix.smem_start =
518 ++ ast->fbdev->helper.fbdev->apertures->ranges[0].base + gpu_addr;
519 ++ ast->fbdev->helper.fbdev->fix.smem_len = ast->vram_size - gpu_addr;
520 ++}
521 +diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
522 +index 96f874a508e2..313ccaf25f49 100644
523 +--- a/drivers/gpu/drm/ast/ast_main.c
524 ++++ b/drivers/gpu/drm/ast/ast_main.c
525 +@@ -359,6 +359,7 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
526 + dev->mode_config.min_height = 0;
527 + dev->mode_config.preferred_depth = 24;
528 + dev->mode_config.prefer_shadow = 1;
529 ++ dev->mode_config.fb_base = pci_resource_start(ast->dev->pdev, 0);
530 +
531 + if (ast->chip == AST2100 ||
532 + ast->chip == AST2200 ||
533 +diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
534 +index e8f6418b6dec..f3a54ad77e3f 100644
535 +--- a/drivers/gpu/drm/ast/ast_mode.c
536 ++++ b/drivers/gpu/drm/ast/ast_mode.c
537 +@@ -509,6 +509,8 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc,
538 + ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
539 + if (ret)
540 + DRM_ERROR("failed to kmap fbcon\n");
541 ++ else
542 ++ ast_fbdev_set_base(ast, gpu_addr);
543 + }
544 + ast_bo_unreserve(bo);
545 +
546 +diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
547 +index ba2ab9a9b988..f3cce23f4a62 100644
548 +--- a/drivers/gpu/drm/radeon/radeon_atombios.c
549 ++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
550 +@@ -452,7 +452,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
551 + }
552 +
553 + /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
554 +- if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
555 ++ if (((dev->pdev->device == 0x9802) ||
556 ++ (dev->pdev->device == 0x9805) ||
557 ++ (dev->pdev->device == 0x9806)) &&
558 + (dev->pdev->subsystem_vendor == 0x1734) &&
559 + (dev->pdev->subsystem_device == 0x11bd)) {
560 + if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
561 +@@ -463,14 +465,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
562 + }
563 + }
564 +
565 +- /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
566 +- if ((dev->pdev->device == 0x9805) &&
567 +- (dev->pdev->subsystem_vendor == 0x1734) &&
568 +- (dev->pdev->subsystem_device == 0x11bd)) {
569 +- if (*connector_type == DRM_MODE_CONNECTOR_VGA)
570 +- return false;
571 +- }
572 +-
573 + return true;
574 + }
575 +
576 +diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
577 +index db83d075606e..6acd3646ac08 100644
578 +--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
579 ++++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
580 +@@ -73,6 +73,11 @@ static void radeon_hotplug_work_func(struct work_struct *work)
581 + struct drm_mode_config *mode_config = &dev->mode_config;
582 + struct drm_connector *connector;
583 +
584 ++ /* we can race here at startup, some boards seem to trigger
585 ++ * hotplug irqs when they shouldn't. */
586 ++ if (!rdev->mode_info.mode_config_initialized)
587 ++ return;
588 ++
589 + mutex_lock(&mode_config->mutex);
590 + if (mode_config->num_connector) {
591 + list_for_each_entry(connector, &mode_config->connector_list, head)
592 +diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
593 +index f0bac68254b7..bb166849aa6e 100644
594 +--- a/drivers/gpu/drm/radeon/radeon_sa.c
595 ++++ b/drivers/gpu/drm/radeon/radeon_sa.c
596 +@@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
597 + /* see if we can skip over some allocations */
598 + } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
599 +
600 ++ for (i = 0; i < RADEON_NUM_RINGS; ++i)
601 ++ radeon_fence_ref(fences[i]);
602 ++
603 + spin_unlock(&sa_manager->wq.lock);
604 + r = radeon_fence_wait_any(rdev, fences, false);
605 ++ for (i = 0; i < RADEON_NUM_RINGS; ++i)
606 ++ radeon_fence_unref(&fences[i]);
607 + spin_lock(&sa_manager->wq.lock);
608 + /* if we have nothing to wait for block */
609 + if (r == -ENOENT && block) {
610 +diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
611 +index 4a14e113369d..f7015592544f 100644
612 +--- a/drivers/gpu/drm/radeon/radeon_ttm.c
613 ++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
614 +@@ -619,7 +619,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
615 + 0, PAGE_SIZE,
616 + PCI_DMA_BIDIRECTIONAL);
617 + if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
618 +- while (--i) {
619 ++ while (i--) {
620 + pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
621 + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
622 + gtt->ttm.dma_address[i] = 0;
623 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
624 +index 6c44c69a5ba4..94a0baac93dd 100644
625 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
626 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
627 +@@ -25,6 +25,7 @@
628 + *
629 + **************************************************************************/
630 + #include <linux/module.h>
631 ++#include <linux/console.h>
632 +
633 + #include <drm/drmP.h>
634 + #include "vmwgfx_drv.h"
635 +@@ -1192,6 +1193,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
636 + static int __init vmwgfx_init(void)
637 + {
638 + int ret;
639 ++
640 ++#ifdef CONFIG_VGA_CONSOLE
641 ++ if (vgacon_text_force())
642 ++ return -EINVAL;
643 ++#endif
644 ++
645 + ret = drm_pci_init(&driver, &vmw_pci_driver);
646 + if (ret)
647 + DRM_ERROR("Failed initializing DRM.\n");
648 +diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
649 +index e893f6e1937d..3c84e96a485a 100644
650 +--- a/drivers/gpu/vga/vgaarb.c
651 ++++ b/drivers/gpu/vga/vgaarb.c
652 +@@ -392,8 +392,10 @@ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
653 + set_current_state(interruptible ?
654 + TASK_INTERRUPTIBLE :
655 + TASK_UNINTERRUPTIBLE);
656 +- if (signal_pending(current)) {
657 +- rc = -EINTR;
658 ++ if (interruptible && signal_pending(current)) {
659 ++ __set_current_state(TASK_RUNNING);
660 ++ remove_wait_queue(&vga_wait_queue, &wait);
661 ++ rc = -ERESTARTSYS;
662 + break;
663 + }
664 + schedule();
665 +diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
666 +index 3e094cd6a0e3..a9194ef626cd 100644
667 +--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
668 ++++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
669 +@@ -149,7 +149,7 @@ static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_en
670 + error = l2t_send(tdev, skb, l2e);
671 + if (error < 0)
672 + kfree_skb(skb);
673 +- return error;
674 ++ return error < 0 ? error : 0;
675 + }
676 +
677 + int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
678 +@@ -165,7 +165,7 @@ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
679 + error = cxgb3_ofld_send(tdev, skb);
680 + if (error < 0)
681 + kfree_skb(skb);
682 +- return error;
683 ++ return error < 0 ? error : 0;
684 + }
685 +
686 + static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
687 +diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
688 +index dabb697b1c2a..48ba1c3e945a 100644
689 +--- a/drivers/infiniband/hw/qib/qib_verbs_mcast.c
690 ++++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
691 +@@ -286,15 +286,13 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
692 + struct qib_ibdev *dev = to_idev(ibqp->device);
693 + struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
694 + struct qib_mcast *mcast = NULL;
695 +- struct qib_mcast_qp *p, *tmp;
696 ++ struct qib_mcast_qp *p, *tmp, *delp = NULL;
697 + struct rb_node *n;
698 + int last = 0;
699 + int ret;
700 +
701 +- if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
702 +- ret = -EINVAL;
703 +- goto bail;
704 +- }
705 ++ if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
706 ++ return -EINVAL;
707 +
708 + spin_lock_irq(&ibp->lock);
709 +
710 +@@ -303,8 +301,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
711 + while (1) {
712 + if (n == NULL) {
713 + spin_unlock_irq(&ibp->lock);
714 +- ret = -EINVAL;
715 +- goto bail;
716 ++ return -EINVAL;
717 + }
718 +
719 + mcast = rb_entry(n, struct qib_mcast, rb_node);
720 +@@ -328,6 +325,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
721 + */
722 + list_del_rcu(&p->list);
723 + mcast->n_attached--;
724 ++ delp = p;
725 +
726 + /* If this was the last attached QP, remove the GID too. */
727 + if (list_empty(&mcast->qp_list)) {
728 +@@ -338,15 +336,16 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
729 + }
730 +
731 + spin_unlock_irq(&ibp->lock);
732 ++ /* QP not attached */
733 ++ if (!delp)
734 ++ return -EINVAL;
735 ++ /*
736 ++ * Wait for any list walkers to finish before freeing the
737 ++ * list element.
738 ++ */
739 ++ wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
740 ++ qib_mcast_qp_free(delp);
741 +
742 +- if (p) {
743 +- /*
744 +- * Wait for any list walkers to finish before freeing the
745 +- * list element.
746 +- */
747 +- wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
748 +- qib_mcast_qp_free(p);
749 +- }
750 + if (last) {
751 + atomic_dec(&mcast->refcount);
752 + wait_event(mcast->wait, !atomic_read(&mcast->refcount));
753 +@@ -355,11 +354,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
754 + dev->n_mcast_grps_allocated--;
755 + spin_unlock_irq(&dev->n_mcast_grps_lock);
756 + }
757 +-
758 +- ret = 0;
759 +-
760 +-bail:
761 +- return ret;
762 ++ return 0;
763 + }
764 +
765 + int qib_mcast_tree_empty(struct qib_ibport *ibp)
766 +diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
767 +index b4713cea1913..2d2915fdbf02 100644
768 +--- a/drivers/md/bcache/super.c
769 ++++ b/drivers/md/bcache/super.c
770 +@@ -1959,8 +1959,10 @@ static int __init bcache_init(void)
771 + closure_debug_init();
772 +
773 + bcache_major = register_blkdev(0, "bcache");
774 +- if (bcache_major < 0)
775 ++ if (bcache_major < 0) {
776 ++ unregister_reboot_notifier(&reboot);
777 + return bcache_major;
778 ++ }
779 +
780 + if (!(bcache_wq = create_workqueue("bcache")) ||
781 + !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
782 +diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
783 +index 0b2536247cf5..84e27708ad97 100644
784 +--- a/drivers/md/dm-exception-store.h
785 ++++ b/drivers/md/dm-exception-store.h
786 +@@ -70,7 +70,7 @@ struct dm_exception_store_type {
787 + * Update the metadata with this exception.
788 + */
789 + void (*commit_exception) (struct dm_exception_store *store,
790 +- struct dm_exception *e,
791 ++ struct dm_exception *e, int valid,
792 + void (*callback) (void *, int success),
793 + void *callback_context);
794 +
795 +diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
796 +index 2d2b1b7588d7..8f6d3ea55401 100644
797 +--- a/drivers/md/dm-snap-persistent.c
798 ++++ b/drivers/md/dm-snap-persistent.c
799 +@@ -646,7 +646,7 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
800 + }
801 +
802 + static void persistent_commit_exception(struct dm_exception_store *store,
803 +- struct dm_exception *e,
804 ++ struct dm_exception *e, int valid,
805 + void (*callback) (void *, int success),
806 + void *callback_context)
807 + {
808 +@@ -655,6 +655,9 @@ static void persistent_commit_exception(struct dm_exception_store *store,
809 + struct core_exception ce;
810 + struct commit_callback *cb;
811 +
812 ++ if (!valid)
813 ++ ps->valid = 0;
814 ++
815 + ce.old_chunk = e->old_chunk;
816 + ce.new_chunk = e->new_chunk;
817 + write_exception(ps, ps->current_committed++, &ce);
818 +diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c
819 +index 1ce9a2586e41..31439d53cf7e 100644
820 +--- a/drivers/md/dm-snap-transient.c
821 ++++ b/drivers/md/dm-snap-transient.c
822 +@@ -52,12 +52,12 @@ static int transient_prepare_exception(struct dm_exception_store *store,
823 + }
824 +
825 + static void transient_commit_exception(struct dm_exception_store *store,
826 +- struct dm_exception *e,
827 ++ struct dm_exception *e, int valid,
828 + void (*callback) (void *, int success),
829 + void *callback_context)
830 + {
831 + /* Just succeed */
832 +- callback(callback_context, 1);
833 ++ callback(callback_context, valid);
834 + }
835 +
836 + static void transient_usage(struct dm_exception_store *store,
837 +diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
838 +index d892a05c84f4..dbd0f00f7395 100644
839 +--- a/drivers/md/dm-snap.c
840 ++++ b/drivers/md/dm-snap.c
841 +@@ -1388,8 +1388,9 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
842 + dm_table_event(s->ti->table);
843 + }
844 +
845 +-static void pending_complete(struct dm_snap_pending_exception *pe, int success)
846 ++static void pending_complete(void *context, int success)
847 + {
848 ++ struct dm_snap_pending_exception *pe = context;
849 + struct dm_exception *e;
850 + struct dm_snapshot *s = pe->snap;
851 + struct bio *origin_bios = NULL;
852 +@@ -1459,24 +1460,13 @@ out:
853 + free_pending_exception(pe);
854 + }
855 +
856 +-static void commit_callback(void *context, int success)
857 +-{
858 +- struct dm_snap_pending_exception *pe = context;
859 +-
860 +- pending_complete(pe, success);
861 +-}
862 +-
863 + static void complete_exception(struct dm_snap_pending_exception *pe)
864 + {
865 + struct dm_snapshot *s = pe->snap;
866 +
867 +- if (unlikely(pe->copy_error))
868 +- pending_complete(pe, 0);
869 +-
870 +- else
871 +- /* Update the metadata if we are persistent */
872 +- s->store->type->commit_exception(s->store, &pe->e,
873 +- commit_callback, pe);
874 ++ /* Update the metadata if we are persistent */
875 ++ s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error,
876 ++ pending_complete, pe);
877 + }
878 +
879 + /*
880 +diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
881 +index 43f6250baadd..4bf9211b2740 100644
882 +--- a/drivers/md/dm-thin-metadata.c
883 ++++ b/drivers/md/dm-thin-metadata.c
884 +@@ -1191,6 +1191,12 @@ static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
885 + dm_block_t held_root;
886 +
887 + /*
888 ++ * We commit to ensure the btree roots which we increment in a
889 ++ * moment are up to date.
890 ++ */
891 ++ __commit_transaction(pmd);
892 ++
893 ++ /*
894 + * Copy the superblock.
895 + */
896 + dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION);
897 +diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
898 +index ec56072c6326..295f74d4f0ab 100644
899 +--- a/drivers/md/dm-thin.c
900 ++++ b/drivers/md/dm-thin.c
901 +@@ -2281,7 +2281,7 @@ static void pool_postsuspend(struct dm_target *ti)
902 + struct pool_c *pt = ti->private;
903 + struct pool *pool = pt->pool;
904 +
905 +- cancel_delayed_work(&pool->waker);
906 ++ cancel_delayed_work_sync(&pool->waker);
907 + flush_workqueue(pool->wq);
908 + (void) commit_or_fallback(pool);
909 + }
910 +diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
911 +index 6d7f4d950b8f..b07fcda9ca71 100644
912 +--- a/drivers/md/persistent-data/dm-btree.c
913 ++++ b/drivers/md/persistent-data/dm-btree.c
914 +@@ -235,6 +235,16 @@ static bool is_internal_level(struct dm_btree_info *info, struct frame *f)
915 + return f->level < (info->levels - 1);
916 + }
917 +
918 ++static void unlock_all_frames(struct del_stack *s)
919 ++{
920 ++ struct frame *f;
921 ++
922 ++ while (unprocessed_frames(s)) {
923 ++ f = s->spine + s->top--;
924 ++ dm_tm_unlock(s->tm, f->b);
925 ++ }
926 ++}
927 ++
928 + int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
929 + {
930 + int r;
931 +@@ -290,9 +300,13 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
932 + f->current_child = f->nr_children;
933 + }
934 + }
935 +-
936 + out:
937 ++ if (r) {
938 ++ /* cleanup all frames of del_stack */
939 ++ unlock_all_frames(s);
940 ++ }
941 + kfree(s);
942 ++
943 + return r;
944 + }
945 + EXPORT_SYMBOL_GPL(dm_btree_del);
946 +diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
947 +index 1f925e856974..46a984291b7d 100644
948 +--- a/drivers/media/dvb-core/dvb_frontend.c
949 ++++ b/drivers/media/dvb-core/dvb_frontend.c
950 +@@ -2195,9 +2195,9 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
951 + dev_dbg(fe->dvb->device, "%s: current delivery system on cache: %d, V3 type: %d\n",
952 + __func__, c->delivery_system, fe->ops.info.type);
953 +
954 +- /* Force the CAN_INVERSION_AUTO bit on. If the frontend doesn't
955 +- * do it, it is done for it. */
956 +- info->caps |= FE_CAN_INVERSION_AUTO;
957 ++ /* Set CAN_INVERSION_AUTO bit on in other than oneshot mode */
958 ++ if (!(fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT))
959 ++ info->caps |= FE_CAN_INVERSION_AUTO;
960 + err = 0;
961 + break;
962 + }
963 +diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c
964 +index a2631be7ffac..08e0f0dd8728 100644
965 +--- a/drivers/media/dvb-frontends/tda1004x.c
966 ++++ b/drivers/media/dvb-frontends/tda1004x.c
967 +@@ -903,9 +903,18 @@ static int tda1004x_get_fe(struct dvb_frontend *fe)
968 + {
969 + struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache;
970 + struct tda1004x_state* state = fe->demodulator_priv;
971 ++ int status;
972 +
973 + dprintk("%s\n", __func__);
974 +
975 ++ status = tda1004x_read_byte(state, TDA1004X_STATUS_CD);
976 ++ if (status == -1)
977 ++ return -EIO;
978 ++
979 ++ /* Only update the properties cache if device is locked */
980 ++ if (!(status & 8))
981 ++ return 0;
982 ++
983 + // inversion status
984 + fe_params->inversion = INVERSION_OFF;
985 + if (tda1004x_read_byte(state, TDA1004X_CONFC1) & 0x20)
986 +diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
987 +index 2e28c81a03ab..a5bee0d0d686 100644
988 +--- a/drivers/media/usb/gspca/ov534.c
989 ++++ b/drivers/media/usb/gspca/ov534.c
990 +@@ -1490,8 +1490,13 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
991 + struct v4l2_fract *tpf = &cp->timeperframe;
992 + struct sd *sd = (struct sd *) gspca_dev;
993 +
994 +- /* Set requested framerate */
995 +- sd->frame_rate = tpf->denominator / tpf->numerator;
996 ++ if (tpf->numerator == 0 || tpf->denominator == 0)
997 ++ /* Set default framerate */
998 ++ sd->frame_rate = 30;
999 ++ else
1000 ++ /* Set requested framerate */
1001 ++ sd->frame_rate = tpf->denominator / tpf->numerator;
1002 ++
1003 + if (gspca_dev->streaming)
1004 + set_frame_rate(gspca_dev);
1005 +
1006 +diff --git a/drivers/media/usb/gspca/topro.c b/drivers/media/usb/gspca/topro.c
1007 +index 4cb511ccc5f6..22ea6aefd22f 100644
1008 +--- a/drivers/media/usb/gspca/topro.c
1009 ++++ b/drivers/media/usb/gspca/topro.c
1010 +@@ -4791,7 +4791,11 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
1011 + struct v4l2_fract *tpf = &cp->timeperframe;
1012 + int fr, i;
1013 +
1014 +- sd->framerate = tpf->denominator / tpf->numerator;
1015 ++ if (tpf->numerator == 0 || tpf->denominator == 0)
1016 ++ sd->framerate = 30;
1017 ++ else
1018 ++ sd->framerate = tpf->denominator / tpf->numerator;
1019 ++
1020 + if (gspca_dev->streaming)
1021 + setframerate(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure));
1022 +
1023 +diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
1024 +index 885ba4a19a6c..ebb40a292d67 100644
1025 +--- a/drivers/mmc/card/block.c
1026 ++++ b/drivers/mmc/card/block.c
1027 +@@ -59,8 +59,7 @@ MODULE_ALIAS("mmc:block");
1028 + #define INAND_CMD38_ARG_SECTRIM2 0x88
1029 + #define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
1030 +
1031 +-#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \
1032 +- (req->cmd_flags & REQ_META)) && \
1033 ++#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
1034 + (rq_data_dir(req) == WRITE))
1035 + #define PACKED_CMD_VER 0x01
1036 + #define PACKED_CMD_WR 0x02
1037 +@@ -1300,13 +1299,9 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1038 +
1039 + /*
1040 + * Reliable writes are used to implement Forced Unit Access and
1041 +- * REQ_META accesses, and are supported only on MMCs.
1042 +- *
1043 +- * XXX: this really needs a good explanation of why REQ_META
1044 +- * is treated special.
1045 ++ * are supported only on MMCs.
1046 + */
1047 +- bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
1048 +- (req->cmd_flags & REQ_META)) &&
1049 ++ bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
1050 + (rq_data_dir(req) == WRITE) &&
1051 + (md->flags & MMC_BLK_REL_WR);
1052 +
1053 +diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
1054 +index f4f3038c1df0..faeda85e78fa 100644
1055 +--- a/drivers/mmc/host/mmci.c
1056 ++++ b/drivers/mmc/host/mmci.c
1057 +@@ -1740,7 +1740,7 @@ static struct amba_id mmci_ids[] = {
1058 + {
1059 + .id = 0x00280180,
1060 + .mask = 0x00ffffff,
1061 +- .data = &variant_u300,
1062 ++ .data = &variant_nomadik,
1063 + },
1064 + {
1065 + .id = 0x00480180,
1066 +diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
1067 +index c2d0559115d3..732a8ed571c2 100644
1068 +--- a/drivers/net/can/sja1000/sja1000.c
1069 ++++ b/drivers/net/can/sja1000/sja1000.c
1070 +@@ -187,6 +187,9 @@ static void sja1000_start(struct net_device *dev)
1071 + /* clear interrupt flags */
1072 + priv->read_reg(priv, SJA1000_IR);
1073 +
1074 ++ /* clear interrupt flags */
1075 ++ priv->read_reg(priv, SJA1000_IR);
1076 ++
1077 + /* leave reset mode */
1078 + set_normal_mode(dev);
1079 + }
1080 +diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
1081 +index 5f9a7ad9b964..d921416295ce 100644
1082 +--- a/drivers/net/can/usb/ems_usb.c
1083 ++++ b/drivers/net/can/usb/ems_usb.c
1084 +@@ -118,6 +118,9 @@ MODULE_LICENSE("GPL v2");
1085 + */
1086 + #define EMS_USB_ARM7_CLOCK 8000000
1087 +
1088 ++#define CPC_TX_QUEUE_TRIGGER_LOW 25
1089 ++#define CPC_TX_QUEUE_TRIGGER_HIGH 35
1090 ++
1091 + /*
1092 + * CAN-Message representation in a CPC_MSG. Message object type is
1093 + * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or
1094 +@@ -279,6 +282,11 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
1095 + switch (urb->status) {
1096 + case 0:
1097 + dev->free_slots = dev->intr_in_buffer[1];
1098 ++ if(dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH){
1099 ++ if (netif_queue_stopped(netdev)){
1100 ++ netif_wake_queue(netdev);
1101 ++ }
1102 ++ }
1103 + break;
1104 +
1105 + case -ECONNRESET: /* unlink */
1106 +@@ -530,8 +538,6 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
1107 + /* Release context */
1108 + context->echo_index = MAX_TX_URBS;
1109 +
1110 +- if (netif_queue_stopped(netdev))
1111 +- netif_wake_queue(netdev);
1112 + }
1113 +
1114 + /*
1115 +@@ -591,7 +597,7 @@ static int ems_usb_start(struct ems_usb *dev)
1116 + int err, i;
1117 +
1118 + dev->intr_in_buffer[0] = 0;
1119 +- dev->free_slots = 15; /* initial size */
1120 ++ dev->free_slots = 50; /* initial size */
1121 +
1122 + for (i = 0; i < MAX_RX_URBS; i++) {
1123 + struct urb *urb = NULL;
1124 +@@ -841,7 +847,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
1125 +
1126 + /* Slow down tx path */
1127 + if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
1128 +- dev->free_slots < 5) {
1129 ++ dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) {
1130 + netif_stop_queue(netdev);
1131 + }
1132 + }
1133 +diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
1134 +index 76ef634caf6f..b84e713445d0 100644
1135 +--- a/drivers/pci/pcie/aer/aerdrv.c
1136 ++++ b/drivers/pci/pcie/aer/aerdrv.c
1137 +@@ -262,7 +262,6 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
1138 + rpc->rpd = dev;
1139 + INIT_WORK(&rpc->dpc_handler, aer_isr);
1140 + mutex_init(&rpc->rpc_mutex);
1141 +- init_waitqueue_head(&rpc->wait_release);
1142 +
1143 + /* Use PCIe bus function to store rpc into PCIe device */
1144 + set_service_data(dev, rpc);
1145 +@@ -285,8 +284,7 @@ static void aer_remove(struct pcie_device *dev)
1146 + if (rpc->isr)
1147 + free_irq(dev->irq, dev);
1148 +
1149 +- wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx);
1150 +-
1151 ++ flush_work(&rpc->dpc_handler);
1152 + aer_disable_rootport(rpc);
1153 + kfree(rpc);
1154 + set_service_data(dev, NULL);
1155 +diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
1156 +index d12c77cd6991..3b8766e1e51b 100644
1157 +--- a/drivers/pci/pcie/aer/aerdrv.h
1158 ++++ b/drivers/pci/pcie/aer/aerdrv.h
1159 +@@ -76,7 +76,6 @@ struct aer_rpc {
1160 + * recovery on the same
1161 + * root port hierarchy
1162 + */
1163 +- wait_queue_head_t wait_release;
1164 + };
1165 +
1166 + struct aer_broadcast_data {
1167 +diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
1168 +index 0f4554e48cc5..a017aac0d7ed 100644
1169 +--- a/drivers/pci/pcie/aer/aerdrv_core.c
1170 ++++ b/drivers/pci/pcie/aer/aerdrv_core.c
1171 +@@ -817,8 +817,6 @@ void aer_isr(struct work_struct *work)
1172 + while (get_e_source(rpc, &e_src))
1173 + aer_isr_one_error(p_device, &e_src);
1174 + mutex_unlock(&rpc->rpc_mutex);
1175 +-
1176 +- wake_up(&rpc->wait_release);
1177 + }
1178 +
1179 + /**
1180 +diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
1181 +index f7197a790341..eb402f4f0e2f 100644
1182 +--- a/drivers/pci/xen-pcifront.c
1183 ++++ b/drivers/pci/xen-pcifront.c
1184 +@@ -51,7 +51,7 @@ struct pcifront_device {
1185 + };
1186 +
1187 + struct pcifront_sd {
1188 +- int domain;
1189 ++ struct pci_sysdata sd;
1190 + struct pcifront_device *pdev;
1191 + };
1192 +
1193 +@@ -65,7 +65,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd,
1194 + unsigned int domain, unsigned int bus,
1195 + struct pcifront_device *pdev)
1196 + {
1197 +- sd->domain = domain;
1198 ++ /* Because we do not expose that information via XenBus. */
1199 ++ sd->sd.node = first_online_node;
1200 ++ sd->sd.domain = domain;
1201 + sd->pdev = pdev;
1202 + }
1203 +
1204 +@@ -463,8 +465,8 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
1205 + dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
1206 + domain, bus);
1207 +
1208 +- bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
1209 +- sd = kmalloc(sizeof(*sd), GFP_KERNEL);
1210 ++ bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL);
1211 ++ sd = kzalloc(sizeof(*sd), GFP_KERNEL);
1212 + if (!bus_entry || !sd) {
1213 + err = -ENOMEM;
1214 + goto err_out;
1215 +diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c
1216 +index 3bed2f55cf7d..3ccadf631d45 100644
1217 +--- a/drivers/power/wm831x_power.c
1218 ++++ b/drivers/power/wm831x_power.c
1219 +@@ -567,7 +567,7 @@ static int wm831x_power_probe(struct platform_device *pdev)
1220 +
1221 + irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
1222 + ret = request_threaded_irq(irq, NULL, wm831x_syslo_irq,
1223 +- IRQF_TRIGGER_RISING, "System power low",
1224 ++ IRQF_TRIGGER_RISING | IRQF_ONESHOT, "System power low",
1225 + power);
1226 + if (ret != 0) {
1227 + dev_err(&pdev->dev, "Failed to request SYSLO IRQ %d: %d\n",
1228 +@@ -577,7 +577,7 @@ static int wm831x_power_probe(struct platform_device *pdev)
1229 +
1230 + irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC"));
1231 + ret = request_threaded_irq(irq, NULL, wm831x_pwr_src_irq,
1232 +- IRQF_TRIGGER_RISING, "Power source",
1233 ++ IRQF_TRIGGER_RISING | IRQF_ONESHOT, "Power source",
1234 + power);
1235 + if (ret != 0) {
1236 + dev_err(&pdev->dev, "Failed to request PWR SRC IRQ %d: %d\n",
1237 +@@ -590,7 +590,7 @@ static int wm831x_power_probe(struct platform_device *pdev)
1238 + platform_get_irq_byname(pdev,
1239 + wm831x_bat_irqs[i]));
1240 + ret = request_threaded_irq(irq, NULL, wm831x_bat_irq,
1241 +- IRQF_TRIGGER_RISING,
1242 ++ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
1243 + wm831x_bat_irqs[i],
1244 + power);
1245 + if (ret != 0) {
1246 +diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
1247 +index a2597e683e79..6a64e86e8ccd 100644
1248 +--- a/drivers/s390/block/dasd_alias.c
1249 ++++ b/drivers/s390/block/dasd_alias.c
1250 +@@ -264,8 +264,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
1251 + spin_unlock_irqrestore(&lcu->lock, flags);
1252 + cancel_work_sync(&lcu->suc_data.worker);
1253 + spin_lock_irqsave(&lcu->lock, flags);
1254 +- if (device == lcu->suc_data.device)
1255 ++ if (device == lcu->suc_data.device) {
1256 ++ dasd_put_device(device);
1257 + lcu->suc_data.device = NULL;
1258 ++ }
1259 + }
1260 + was_pending = 0;
1261 + if (device == lcu->ruac_data.device) {
1262 +@@ -273,8 +275,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
1263 + was_pending = 1;
1264 + cancel_delayed_work_sync(&lcu->ruac_data.dwork);
1265 + spin_lock_irqsave(&lcu->lock, flags);
1266 +- if (device == lcu->ruac_data.device)
1267 ++ if (device == lcu->ruac_data.device) {
1268 ++ dasd_put_device(device);
1269 + lcu->ruac_data.device = NULL;
1270 ++ }
1271 + }
1272 + private->lcu = NULL;
1273 + spin_unlock_irqrestore(&lcu->lock, flags);
1274 +@@ -549,8 +553,10 @@ static void lcu_update_work(struct work_struct *work)
1275 + if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
1276 + DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
1277 + " alias data in lcu (rc = %d), retry later", rc);
1278 +- schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
1279 ++ if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
1280 ++ dasd_put_device(device);
1281 + } else {
1282 ++ dasd_put_device(device);
1283 + lcu->ruac_data.device = NULL;
1284 + lcu->flags &= ~UPDATE_PENDING;
1285 + }
1286 +@@ -593,8 +599,10 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
1287 + */
1288 + if (!usedev)
1289 + return -EINVAL;
1290 ++ dasd_get_device(usedev);
1291 + lcu->ruac_data.device = usedev;
1292 +- schedule_delayed_work(&lcu->ruac_data.dwork, 0);
1293 ++ if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
1294 ++ dasd_put_device(usedev);
1295 + return 0;
1296 + }
1297 +
1298 +@@ -722,7 +730,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
1299 + ASCEBC((char *) &cqr->magic, 4);
1300 + ccw = cqr->cpaddr;
1301 + ccw->cmd_code = DASD_ECKD_CCW_RSCK;
1302 +- ccw->flags = 0 ;
1303 ++ ccw->flags = CCW_FLAG_SLI;
1304 + ccw->count = 16;
1305 + ccw->cda = (__u32)(addr_t) cqr->data;
1306 + ((char *)cqr->data)[0] = reason;
1307 +@@ -926,6 +934,7 @@ static void summary_unit_check_handling_work(struct work_struct *work)
1308 + /* 3. read new alias configuration */
1309 + _schedule_lcu_update(lcu, device);
1310 + lcu->suc_data.device = NULL;
1311 ++ dasd_put_device(device);
1312 + spin_unlock_irqrestore(&lcu->lock, flags);
1313 + }
1314 +
1315 +@@ -985,6 +994,8 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
1316 + }
1317 + lcu->suc_data.reason = reason;
1318 + lcu->suc_data.device = device;
1319 ++ dasd_get_device(device);
1320 + spin_unlock(&lcu->lock);
1321 +- schedule_work(&lcu->suc_data.worker);
1322 ++ if (!schedule_work(&lcu->suc_data.worker))
1323 ++ dasd_put_device(device);
1324 + };
1325 +diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
1326 +index b52121358385..280e769a1686 100644
1327 +--- a/drivers/scsi/megaraid/megaraid_sas.h
1328 ++++ b/drivers/scsi/megaraid/megaraid_sas.h
1329 +@@ -300,6 +300,8 @@ enum MR_EVT_ARGS {
1330 + MR_EVT_ARGS_GENERIC,
1331 + };
1332 +
1333 ++
1334 ++#define SGE_BUFFER_SIZE 4096
1335 + /*
1336 + * define constants for device list query options
1337 + */
1338 +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
1339 +index 78b4fe845245..e6dfa8108301 100644
1340 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c
1341 ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
1342 +@@ -3602,7 +3602,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
1343 + }
1344 +
1345 + instance->max_sectors_per_req = instance->max_num_sge *
1346 +- PAGE_SIZE / 512;
1347 ++ SGE_BUFFER_SIZE / 512;
1348 + if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
1349 + instance->max_sectors_per_req = tmp_sectors;
1350 +
1351 +@@ -5051,6 +5051,9 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
1352 + int i;
1353 + int error = 0;
1354 + compat_uptr_t ptr;
1355 ++ unsigned long local_raw_ptr;
1356 ++ u32 local_sense_off;
1357 ++ u32 local_sense_len;
1358 +
1359 + if (clear_user(ioc, sizeof(*ioc)))
1360 + return -EFAULT;
1361 +@@ -5068,9 +5071,15 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
1362 + * sense_len is not null, so prepare the 64bit value under
1363 + * the same condition.
1364 + */
1365 +- if (ioc->sense_len) {
1366 ++ if (get_user(local_raw_ptr, ioc->frame.raw) ||
1367 ++ get_user(local_sense_off, &ioc->sense_off) ||
1368 ++ get_user(local_sense_len, &ioc->sense_len))
1369 ++ return -EFAULT;
1370 ++
1371 ++
1372 ++ if (local_sense_len) {
1373 + void __user **sense_ioc_ptr =
1374 +- (void __user **)(ioc->frame.raw + ioc->sense_off);
1375 ++ (void __user **)((u8*)local_raw_ptr + local_sense_off);
1376 + compat_uptr_t *sense_cioc_ptr =
1377 + (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off);
1378 + if (get_user(ptr, sense_cioc_ptr) ||
1379 +diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
1380 +index eba183c428cf..3643bbf5456d 100644
1381 +--- a/drivers/scsi/ses.c
1382 ++++ b/drivers/scsi/ses.c
1383 +@@ -70,6 +70,7 @@ static int ses_probe(struct device *dev)
1384 + static int ses_recv_diag(struct scsi_device *sdev, int page_code,
1385 + void *buf, int bufflen)
1386 + {
1387 ++ int ret;
1388 + unsigned char cmd[] = {
1389 + RECEIVE_DIAGNOSTIC,
1390 + 1, /* Set PCV bit */
1391 +@@ -78,9 +79,26 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
1392 + bufflen & 0xff,
1393 + 0
1394 + };
1395 ++ unsigned char recv_page_code;
1396 +
1397 +- return scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
1398 ++ ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
1399 + NULL, SES_TIMEOUT, SES_RETRIES, NULL);
1400 ++ if (unlikely(!ret))
1401 ++ return ret;
1402 ++
1403 ++ recv_page_code = ((unsigned char *)buf)[0];
1404 ++
1405 ++ if (likely(recv_page_code == page_code))
1406 ++ return ret;
1407 ++
1408 ++ /* successful diagnostic but wrong page code. This happens to some
1409 ++ * USB devices, just print a message and pretend there was an error */
1410 ++
1411 ++ sdev_printk(KERN_ERR, sdev,
1412 ++ "Wrong diagnostic page; asked for %d got %u\n",
1413 ++ page_code, recv_page_code);
1414 ++
1415 ++ return -EINVAL;
1416 + }
1417 +
1418 + static int ses_send_diag(struct scsi_device *sdev, int page_code,
1419 +@@ -436,7 +454,15 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
1420 + if (desc_ptr)
1421 + desc_ptr += len;
1422 +
1423 +- if (addl_desc_ptr)
1424 ++ if (addl_desc_ptr &&
1425 ++ /* only find additional descriptions for specific devices */
1426 ++ (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
1427 ++ type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE ||
1428 ++ type_ptr[0] == ENCLOSURE_COMPONENT_SAS_EXPANDER ||
1429 ++ /* these elements are optional */
1430 ++ type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_TARGET_PORT ||
1431 ++ type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT ||
1432 ++ type_ptr[0] == ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS))
1433 + addl_desc_ptr += addl_desc_ptr[1] + 2;
1434 +
1435 + }
1436 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
1437 +index d2ea64de92df..d6dab8adf60e 100644
1438 +--- a/drivers/usb/class/cdc-acm.c
1439 ++++ b/drivers/usb/class/cdc-acm.c
1440 +@@ -1726,6 +1726,11 @@ static const struct usb_device_id acm_ids[] = {
1441 + },
1442 + #endif
1443 +
1444 ++ /*Samsung phone in firmware update mode */
1445 ++ { USB_DEVICE(0x04e8, 0x685d),
1446 ++ .driver_info = IGNORE_DEVICE,
1447 ++ },
1448 ++
1449 + /* Exclude Infineon Flash Loader utility */
1450 + { USB_DEVICE(0x058b, 0x0041),
1451 + .driver_info = IGNORE_DEVICE,
1452 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
1453 +index 89ba7cfba5bc..303f3b3fb65f 100644
1454 +--- a/drivers/usb/serial/cp210x.c
1455 ++++ b/drivers/usb/serial/cp210x.c
1456 +@@ -162,6 +162,8 @@ static const struct usb_device_id id_table[] = {
1457 + { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
1458 + { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
1459 + { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
1460 ++ { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
1461 ++ { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
1462 + { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
1463 + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
1464 + { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
1465 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1466 +index 81f6a572f016..9bab34cf01d4 100644
1467 +--- a/drivers/usb/serial/option.c
1468 ++++ b/drivers/usb/serial/option.c
1469 +@@ -315,6 +315,7 @@ static void option_instat_callback(struct urb *urb);
1470 + #define TOSHIBA_PRODUCT_G450 0x0d45
1471 +
1472 + #define ALINK_VENDOR_ID 0x1e0e
1473 ++#define SIMCOM_PRODUCT_SIM7100E 0x9001 /* Yes, ALINK_VENDOR_ID */
1474 + #define ALINK_PRODUCT_PH300 0x9100
1475 + #define ALINK_PRODUCT_3GU 0x9200
1476 +
1477 +@@ -615,6 +616,10 @@ static const struct option_blacklist_info zte_1255_blacklist = {
1478 + .reserved = BIT(3) | BIT(4),
1479 + };
1480 +
1481 ++static const struct option_blacklist_info simcom_sim7100e_blacklist = {
1482 ++ .reserved = BIT(5) | BIT(6),
1483 ++};
1484 ++
1485 + static const struct option_blacklist_info telit_le910_blacklist = {
1486 + .sendsetup = BIT(0),
1487 + .reserved = BIT(1) | BIT(2),
1488 +@@ -1130,6 +1135,8 @@ static const struct usb_device_id option_ids[] = {
1489 + { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
1490 + { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
1491 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
1492 ++ { USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
1493 ++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
1494 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
1495 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
1496 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
1497 +@@ -1645,6 +1652,8 @@ static const struct usb_device_id option_ids[] = {
1498 + { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
1499 + { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
1500 + { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
1501 ++ { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
1502 ++ .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
1503 + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
1504 + .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
1505 + },
1506 +diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
1507 +index ee59b74768d9..beaa7cc4e857 100644
1508 +--- a/drivers/virtio/virtio.c
1509 ++++ b/drivers/virtio/virtio.c
1510 +@@ -238,6 +238,7 @@ static int virtio_init(void)
1511 + static void __exit virtio_exit(void)
1512 + {
1513 + bus_unregister(&virtio_bus);
1514 ++ ida_destroy(&virtio_index_ida);
1515 + }
1516 + core_initcall(virtio_init);
1517 + module_exit(virtio_exit);
1518 +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
1519 +index 7360f03ddbe1..9612a01198df 100644
1520 +--- a/fs/btrfs/disk-io.c
1521 ++++ b/fs/btrfs/disk-io.c
1522 +@@ -2437,6 +2437,7 @@ int open_ctree(struct super_block *sb,
1523 + "unsupported option features (%Lx).\n",
1524 + (unsigned long long)features);
1525 + err = -EINVAL;
1526 ++ brelse(bh);
1527 + goto fail_alloc;
1528 + }
1529 +
1530 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
1531 +index ae29b403a7e2..b5d13c4eea00 100644
1532 +--- a/fs/btrfs/inode.c
1533 ++++ b/fs/btrfs/inode.c
1534 +@@ -7477,15 +7477,28 @@ int btrfs_readpage(struct file *file, struct page *page)
1535 + static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
1536 + {
1537 + struct extent_io_tree *tree;
1538 +-
1539 ++ struct inode *inode = page->mapping->host;
1540 ++ int ret;
1541 +
1542 + if (current->flags & PF_MEMALLOC) {
1543 + redirty_page_for_writepage(wbc, page);
1544 + unlock_page(page);
1545 + return 0;
1546 + }
1547 ++
1548 ++ /*
1549 ++ * If we are under memory pressure we will call this directly from the
1550 ++ * VM, we need to make sure we have the inode referenced for the ordered
1551 ++ * extent. If not just return like we didn't do anything.
1552 ++ */
1553 ++ if (!igrab(inode)) {
1554 ++ redirty_page_for_writepage(wbc, page);
1555 ++ return AOP_WRITEPAGE_ACTIVATE;
1556 ++ }
1557 + tree = &BTRFS_I(page->mapping->host)->io_tree;
1558 +- return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
1559 ++ ret = extent_write_full_page(tree, page, btrfs_get_extent, wbc);
1560 ++ btrfs_add_delayed_iput(inode);
1561 ++ return ret;
1562 + }
1563 +
1564 + static int btrfs_writepages(struct address_space *mapping,
1565 +@@ -8474,9 +8487,11 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
1566 + /*
1567 + * 2 items for inode item and ref
1568 + * 2 items for dir items
1569 ++ * 1 item for updating parent inode item
1570 ++ * 1 item for the inline extent item
1571 + * 1 item for xattr if selinux is on
1572 + */
1573 +- trans = btrfs_start_transaction(root, 5);
1574 ++ trans = btrfs_start_transaction(root, 7);
1575 + if (IS_ERR(trans))
1576 + return PTR_ERR(trans);
1577 +
1578 +diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
1579 +index 414c1b9eb896..3104e0eec816 100644
1580 +--- a/fs/btrfs/send.c
1581 ++++ b/fs/btrfs/send.c
1582 +@@ -1338,7 +1338,21 @@ static int read_symlink(struct send_ctx *sctx,
1583 + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1584 + if (ret < 0)
1585 + goto out;
1586 +- BUG_ON(ret);
1587 ++ if (ret) {
1588 ++ /*
1589 ++ * An empty symlink inode. Can happen in rare error paths when
1590 ++ * creating a symlink (transaction committed before the inode
1591 ++ * eviction handler removed the symlink inode items and a crash
1592 ++ * happened in between or the subvol was snapshoted in between).
1593 ++ * Print an informative message to dmesg/syslog so that the user
1594 ++ * can delete the symlink.
1595 ++ */
1596 ++ btrfs_err(root->fs_info,
1597 ++ "Found empty symlink inode %llu at root %llu",
1598 ++ ino, root->root_key.objectid);
1599 ++ ret = -EIO;
1600 ++ goto out;
1601 ++ }
1602 +
1603 + ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
1604 + struct btrfs_file_extent_item);
1605 +diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
1606 +index 32f35f187989..b58a9cbb9695 100644
1607 +--- a/fs/hostfs/hostfs_kern.c
1608 ++++ b/fs/hostfs/hostfs_kern.c
1609 +@@ -720,15 +720,13 @@ static int hostfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
1610 +
1611 + init_special_inode(inode, mode, dev);
1612 + err = do_mknod(name, mode, MAJOR(dev), MINOR(dev));
1613 +- if (!err)
1614 ++ if (err)
1615 + goto out_free;
1616 +
1617 + err = read_name(inode, name);
1618 + __putname(name);
1619 + if (err)
1620 + goto out_put;
1621 +- if (err)
1622 +- goto out_put;
1623 +
1624 + d_instantiate(dentry, inode);
1625 + return 0;
1626 +diff --git a/fs/lockd/host.c b/fs/lockd/host.c
1627 +index 969d589c848d..b5f3c3ab0d5f 100644
1628 +--- a/fs/lockd/host.c
1629 ++++ b/fs/lockd/host.c
1630 +@@ -116,7 +116,7 @@ static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni,
1631 + atomic_inc(&nsm->sm_count);
1632 + else {
1633 + host = NULL;
1634 +- nsm = nsm_get_handle(ni->sap, ni->salen,
1635 ++ nsm = nsm_get_handle(ni->net, ni->sap, ni->salen,
1636 + ni->hostname, ni->hostname_len);
1637 + if (unlikely(nsm == NULL)) {
1638 + dprintk("lockd: %s failed; no nsm handle\n",
1639 +@@ -534,17 +534,18 @@ static struct nlm_host *next_host_state(struct hlist_head *cache,
1640 +
1641 + /**
1642 + * nlm_host_rebooted - Release all resources held by rebooted host
1643 ++ * @net: network namespace
1644 + * @info: pointer to decoded results of NLM_SM_NOTIFY call
1645 + *
1646 + * We were notified that the specified host has rebooted. Release
1647 + * all resources held by that peer.
1648 + */
1649 +-void nlm_host_rebooted(const struct nlm_reboot *info)
1650 ++void nlm_host_rebooted(const struct net *net, const struct nlm_reboot *info)
1651 + {
1652 + struct nsm_handle *nsm;
1653 + struct nlm_host *host;
1654 +
1655 +- nsm = nsm_reboot_lookup(info);
1656 ++ nsm = nsm_reboot_lookup(net, info);
1657 + if (unlikely(nsm == NULL))
1658 + return;
1659 +
1660 +diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
1661 +index 6ae664b489af..13fac49aff7f 100644
1662 +--- a/fs/lockd/mon.c
1663 ++++ b/fs/lockd/mon.c
1664 +@@ -51,7 +51,6 @@ struct nsm_res {
1665 + };
1666 +
1667 + static const struct rpc_program nsm_program;
1668 +-static LIST_HEAD(nsm_handles);
1669 + static DEFINE_SPINLOCK(nsm_lock);
1670 +
1671 + /*
1672 +@@ -259,33 +258,35 @@ void nsm_unmonitor(const struct nlm_host *host)
1673 + }
1674 + }
1675 +
1676 +-static struct nsm_handle *nsm_lookup_hostname(const char *hostname,
1677 +- const size_t len)
1678 ++static struct nsm_handle *nsm_lookup_hostname(const struct list_head *nsm_handles,
1679 ++ const char *hostname, const size_t len)
1680 + {
1681 + struct nsm_handle *nsm;
1682 +
1683 +- list_for_each_entry(nsm, &nsm_handles, sm_link)
1684 ++ list_for_each_entry(nsm, nsm_handles, sm_link)
1685 + if (strlen(nsm->sm_name) == len &&
1686 + memcmp(nsm->sm_name, hostname, len) == 0)
1687 + return nsm;
1688 + return NULL;
1689 + }
1690 +
1691 +-static struct nsm_handle *nsm_lookup_addr(const struct sockaddr *sap)
1692 ++static struct nsm_handle *nsm_lookup_addr(const struct list_head *nsm_handles,
1693 ++ const struct sockaddr *sap)
1694 + {
1695 + struct nsm_handle *nsm;
1696 +
1697 +- list_for_each_entry(nsm, &nsm_handles, sm_link)
1698 ++ list_for_each_entry(nsm, nsm_handles, sm_link)
1699 + if (rpc_cmp_addr(nsm_addr(nsm), sap))
1700 + return nsm;
1701 + return NULL;
1702 + }
1703 +
1704 +-static struct nsm_handle *nsm_lookup_priv(const struct nsm_private *priv)
1705 ++static struct nsm_handle *nsm_lookup_priv(const struct list_head *nsm_handles,
1706 ++ const struct nsm_private *priv)
1707 + {
1708 + struct nsm_handle *nsm;
1709 +
1710 +- list_for_each_entry(nsm, &nsm_handles, sm_link)
1711 ++ list_for_each_entry(nsm, nsm_handles, sm_link)
1712 + if (memcmp(nsm->sm_priv.data, priv->data,
1713 + sizeof(priv->data)) == 0)
1714 + return nsm;
1715 +@@ -350,6 +351,7 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap,
1716 +
1717 + /**
1718 + * nsm_get_handle - Find or create a cached nsm_handle
1719 ++ * @net: network namespace
1720 + * @sap: pointer to socket address of handle to find
1721 + * @salen: length of socket address
1722 + * @hostname: pointer to C string containing hostname to find
1723 +@@ -362,11 +364,13 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap,
1724 + * @hostname cannot be found in the handle cache. Returns NULL if
1725 + * an error occurs.
1726 + */
1727 +-struct nsm_handle *nsm_get_handle(const struct sockaddr *sap,
1728 ++struct nsm_handle *nsm_get_handle(const struct net *net,
1729 ++ const struct sockaddr *sap,
1730 + const size_t salen, const char *hostname,
1731 + const size_t hostname_len)
1732 + {
1733 + struct nsm_handle *cached, *new = NULL;
1734 ++ struct lockd_net *ln = net_generic(net, lockd_net_id);
1735 +
1736 + if (hostname && memchr(hostname, '/', hostname_len) != NULL) {
1737 + if (printk_ratelimit()) {
1738 +@@ -381,9 +385,10 @@ retry:
1739 + spin_lock(&nsm_lock);
1740 +
1741 + if (nsm_use_hostnames && hostname != NULL)
1742 +- cached = nsm_lookup_hostname(hostname, hostname_len);
1743 ++ cached = nsm_lookup_hostname(&ln->nsm_handles,
1744 ++ hostname, hostname_len);
1745 + else
1746 +- cached = nsm_lookup_addr(sap);
1747 ++ cached = nsm_lookup_addr(&ln->nsm_handles, sap);
1748 +
1749 + if (cached != NULL) {
1750 + atomic_inc(&cached->sm_count);
1751 +@@ -397,7 +402,7 @@ retry:
1752 + }
1753 +
1754 + if (new != NULL) {
1755 +- list_add(&new->sm_link, &nsm_handles);
1756 ++ list_add(&new->sm_link, &ln->nsm_handles);
1757 + spin_unlock(&nsm_lock);
1758 + dprintk("lockd: created nsm_handle for %s (%s)\n",
1759 + new->sm_name, new->sm_addrbuf);
1760 +@@ -414,19 +419,22 @@ retry:
1761 +
1762 + /**
1763 + * nsm_reboot_lookup - match NLMPROC_SM_NOTIFY arguments to an nsm_handle
1764 ++ * @net: network namespace
1765 + * @info: pointer to NLMPROC_SM_NOTIFY arguments
1766 + *
1767 + * Returns a matching nsm_handle if found in the nsm cache. The returned
1768 + * nsm_handle's reference count is bumped. Otherwise returns NULL if some
1769 + * error occurred.
1770 + */
1771 +-struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info)
1772 ++struct nsm_handle *nsm_reboot_lookup(const struct net *net,
1773 ++ const struct nlm_reboot *info)
1774 + {
1775 + struct nsm_handle *cached;
1776 ++ struct lockd_net *ln = net_generic(net, lockd_net_id);
1777 +
1778 + spin_lock(&nsm_lock);
1779 +
1780 +- cached = nsm_lookup_priv(&info->priv);
1781 ++ cached = nsm_lookup_priv(&ln->nsm_handles, &info->priv);
1782 + if (unlikely(cached == NULL)) {
1783 + spin_unlock(&nsm_lock);
1784 + dprintk("lockd: never saw rebooted peer '%.*s' before\n",
1785 +diff --git a/fs/lockd/netns.h b/fs/lockd/netns.h
1786 +index 5010b55628b4..414da99744e9 100644
1787 +--- a/fs/lockd/netns.h
1788 ++++ b/fs/lockd/netns.h
1789 +@@ -16,6 +16,7 @@ struct lockd_net {
1790 + spinlock_t nsm_clnt_lock;
1791 + unsigned int nsm_users;
1792 + struct rpc_clnt *nsm_clnt;
1793 ++ struct list_head nsm_handles;
1794 + };
1795 +
1796 + extern int lockd_net_id;
1797 +diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
1798 +index 7a318480ab7a..ce05c60ff06d 100644
1799 +--- a/fs/lockd/svc.c
1800 ++++ b/fs/lockd/svc.c
1801 +@@ -583,6 +583,7 @@ static int lockd_init_net(struct net *net)
1802 + INIT_DELAYED_WORK(&ln->grace_period_end, grace_ender);
1803 + INIT_LIST_HEAD(&ln->grace_list);
1804 + spin_lock_init(&ln->nsm_clnt_lock);
1805 ++ INIT_LIST_HEAD(&ln->nsm_handles);
1806 + return 0;
1807 + }
1808 +
1809 +diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
1810 +index b147d1ae71fd..09c576f26c7b 100644
1811 +--- a/fs/lockd/svc4proc.c
1812 ++++ b/fs/lockd/svc4proc.c
1813 +@@ -421,7 +421,7 @@ nlm4svc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
1814 + return rpc_system_err;
1815 + }
1816 +
1817 +- nlm_host_rebooted(argp);
1818 ++ nlm_host_rebooted(SVC_NET(rqstp), argp);
1819 + return rpc_success;
1820 + }
1821 +
1822 +diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
1823 +index 21171f0c6477..fb26b9f522e7 100644
1824 +--- a/fs/lockd/svcproc.c
1825 ++++ b/fs/lockd/svcproc.c
1826 +@@ -464,7 +464,7 @@ nlmsvc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
1827 + return rpc_system_err;
1828 + }
1829 +
1830 +- nlm_host_rebooted(argp);
1831 ++ nlm_host_rebooted(SVC_NET(rqstp), argp);
1832 + return rpc_success;
1833 + }
1834 +
1835 +diff --git a/fs/namei.c b/fs/namei.c
1836 +index 157c3dbacf6c..c87e15ee9255 100644
1837 +--- a/fs/namei.c
1838 ++++ b/fs/namei.c
1839 +@@ -2917,6 +2917,10 @@ opened:
1840 + goto exit_fput;
1841 + }
1842 + out:
1843 ++ if (unlikely(error > 0)) {
1844 ++ WARN_ON(1);
1845 ++ error = -EINVAL;
1846 ++ }
1847 + if (got_write)
1848 + mnt_drop_write(nd->path.mnt);
1849 + path_put(&save_parent);
1850 +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
1851 +index cfa9163b3bb7..2bdaf57c82d0 100644
1852 +--- a/fs/nfs/nfs4state.c
1853 ++++ b/fs/nfs/nfs4state.c
1854 +@@ -1452,7 +1452,7 @@ restart:
1855 + }
1856 + spin_unlock(&state->state_lock);
1857 + nfs4_put_open_state(state);
1858 +- clear_bit(NFS4CLNT_RECLAIM_NOGRACE,
1859 ++ clear_bit(NFS_STATE_RECLAIM_NOGRACE,
1860 + &state->flags);
1861 + spin_lock(&sp->so_lock);
1862 + goto restart;
1863 +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
1864 +index 9f285fb9bab3..b86db1236c7c 100644
1865 +--- a/fs/proc/task_mmu.c
1866 ++++ b/fs/proc/task_mmu.c
1867 +@@ -170,7 +170,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
1868 + if (!priv->task)
1869 + return ERR_PTR(-ESRCH);
1870 +
1871 +- mm = mm_access(priv->task, PTRACE_MODE_READ);
1872 ++ mm = mm_access(priv->task, PTRACE_MODE_READ_FSCREDS);
1873 + if (!mm || IS_ERR(mm))
1874 + return mm;
1875 + down_read(&mm->mmap_sem);
1876 +@@ -1044,7 +1044,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
1877 + if (!pm.buffer)
1878 + goto out_task;
1879 +
1880 +- mm = mm_access(task, PTRACE_MODE_READ);
1881 ++ mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
1882 + ret = PTR_ERR(mm);
1883 + if (!mm || IS_ERR(mm))
1884 + goto out_free;
1885 +diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
1886 +index 56123a6f462e..123c19890b14 100644
1887 +--- a/fs/proc/task_nommu.c
1888 ++++ b/fs/proc/task_nommu.c
1889 +@@ -223,7 +223,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
1890 + if (!priv->task)
1891 + return ERR_PTR(-ESRCH);
1892 +
1893 +- mm = mm_access(priv->task, PTRACE_MODE_READ);
1894 ++ mm = mm_access(priv->task, PTRACE_MODE_READ_FSCREDS);
1895 + if (!mm || IS_ERR(mm)) {
1896 + put_task_struct(priv->task);
1897 + priv->task = NULL;
1898 +diff --git a/fs/splice.c b/fs/splice.c
1899 +index f183f1342c01..3b94a6bba29f 100644
1900 +--- a/fs/splice.c
1901 ++++ b/fs/splice.c
1902 +@@ -949,6 +949,7 @@ ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd,
1903 +
1904 + splice_from_pipe_begin(sd);
1905 + do {
1906 ++ cond_resched();
1907 + ret = splice_from_pipe_next(pipe, sd);
1908 + if (ret > 0)
1909 + ret = splice_from_pipe_feed(pipe, sd, actor);
1910 +@@ -1189,7 +1190,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
1911 + long ret, bytes;
1912 + umode_t i_mode;
1913 + size_t len;
1914 +- int i, flags;
1915 ++ int i, flags, more;
1916 +
1917 + /*
1918 + * We require the input being a regular file, as we don't want to
1919 +@@ -1232,6 +1233,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
1920 + * Don't block on output, we have to drain the direct pipe.
1921 + */
1922 + sd->flags &= ~SPLICE_F_NONBLOCK;
1923 ++ more = sd->flags & SPLICE_F_MORE;
1924 +
1925 + while (len) {
1926 + size_t read_len;
1927 +@@ -1245,6 +1247,15 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
1928 + sd->total_len = read_len;
1929 +
1930 + /*
1931 ++ * If more data is pending, set SPLICE_F_MORE
1932 ++ * If this is the last data and SPLICE_F_MORE was not set
1933 ++ * initially, clears it.
1934 ++ */
1935 ++ if (read_len < len)
1936 ++ sd->flags |= SPLICE_F_MORE;
1937 ++ else if (!more)
1938 ++ sd->flags &= ~SPLICE_F_MORE;
1939 ++ /*
1940 + * NOTE: nonblocking mode only applies to the input. We
1941 + * must not do the output in nonblocking mode as then we
1942 + * could get stuck data in the internal pipe:
1943 +diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h
1944 +index 9a33c5f7e126..f6c229e2bffa 100644
1945 +--- a/include/linux/enclosure.h
1946 ++++ b/include/linux/enclosure.h
1947 +@@ -29,7 +29,11 @@
1948 + /* A few generic types ... taken from ses-2 */
1949 + enum enclosure_component_type {
1950 + ENCLOSURE_COMPONENT_DEVICE = 0x01,
1951 ++ ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS = 0x07,
1952 ++ ENCLOSURE_COMPONENT_SCSI_TARGET_PORT = 0x14,
1953 ++ ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT = 0x15,
1954 + ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17,
1955 ++ ENCLOSURE_COMPONENT_SAS_EXPANDER = 0x18,
1956 + };
1957 +
1958 + /* ses-2 common element status */
1959 +diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
1960 +index dcaad79f54ed..0adf073f13b3 100644
1961 +--- a/include/linux/lockd/lockd.h
1962 ++++ b/include/linux/lockd/lockd.h
1963 +@@ -236,7 +236,8 @@ void nlm_rebind_host(struct nlm_host *);
1964 + struct nlm_host * nlm_get_host(struct nlm_host *);
1965 + void nlm_shutdown_hosts(void);
1966 + void nlm_shutdown_hosts_net(struct net *net);
1967 +-void nlm_host_rebooted(const struct nlm_reboot *);
1968 ++void nlm_host_rebooted(const struct net *net,
1969 ++ const struct nlm_reboot *);
1970 +
1971 + /*
1972 + * Host monitoring
1973 +@@ -244,11 +245,13 @@ void nlm_host_rebooted(const struct nlm_reboot *);
1974 + int nsm_monitor(const struct nlm_host *host);
1975 + void nsm_unmonitor(const struct nlm_host *host);
1976 +
1977 +-struct nsm_handle *nsm_get_handle(const struct sockaddr *sap,
1978 ++struct nsm_handle *nsm_get_handle(const struct net *net,
1979 ++ const struct sockaddr *sap,
1980 + const size_t salen,
1981 + const char *hostname,
1982 + const size_t hostname_len);
1983 +-struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info);
1984 ++struct nsm_handle *nsm_reboot_lookup(const struct net *net,
1985 ++ const struct nlm_reboot *info);
1986 + void nsm_release(struct nsm_handle *nsm);
1987 +
1988 + /*
1989 +diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
1990 +index fc01d5cb4cf1..7d2021d3ee08 100644
1991 +--- a/include/linux/nfs_fs.h
1992 ++++ b/include/linux/nfs_fs.h
1993 +@@ -578,9 +578,7 @@ static inline void nfs3_forget_cached_acls(struct inode *inode)
1994 +
1995 + static inline loff_t nfs_size_to_loff_t(__u64 size)
1996 + {
1997 +- if (size > (__u64) OFFSET_MAX - 1)
1998 +- return OFFSET_MAX - 1;
1999 +- return (loff_t) size;
2000 ++ return min_t(u64, size, OFFSET_MAX);
2001 + }
2002 +
2003 + static inline ino_t
2004 +diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
2005 +index ba605015c4d8..0b2d0cbe0bab 100644
2006 +--- a/include/linux/tracepoint.h
2007 ++++ b/include/linux/tracepoint.h
2008 +@@ -14,8 +14,11 @@
2009 + * See the file COPYING for more details.
2010 + */
2011 +
2012 ++#include <linux/smp.h>
2013 + #include <linux/errno.h>
2014 + #include <linux/types.h>
2015 ++#include <linux/percpu.h>
2016 ++#include <linux/cpumask.h>
2017 + #include <linux/rcupdate.h>
2018 + #include <linux/static_key.h>
2019 +
2020 +@@ -126,6 +129,9 @@ static inline void tracepoint_synchronize_unregister(void)
2021 + void *it_func; \
2022 + void *__data; \
2023 + \
2024 ++ if (!cpu_online(raw_smp_processor_id())) \
2025 ++ return; \
2026 ++ \
2027 + if (!(cond)) \
2028 + return; \
2029 + prercu; \
2030 +diff --git a/include/net/af_unix.h b/include/net/af_unix.h
2031 +index 686760024572..6278e4d32612 100644
2032 +--- a/include/net/af_unix.h
2033 ++++ b/include/net/af_unix.h
2034 +@@ -6,8 +6,8 @@
2035 + #include <linux/mutex.h>
2036 + #include <net/sock.h>
2037 +
2038 +-extern void unix_inflight(struct file *fp);
2039 +-extern void unix_notinflight(struct file *fp);
2040 ++extern void unix_inflight(struct user_struct *user, struct file *fp);
2041 ++extern void unix_notinflight(struct user_struct *user, struct file *fp);
2042 + extern void unix_gc(void);
2043 + extern void wait_for_unix_gc(void);
2044 + extern struct sock *unix_get_socket(struct file *filp);
2045 +diff --git a/include/net/scm.h b/include/net/scm.h
2046 +index 8de2d37d2077..d00cd43a990c 100644
2047 +--- a/include/net/scm.h
2048 ++++ b/include/net/scm.h
2049 +@@ -21,6 +21,7 @@ struct scm_creds {
2050 + struct scm_fp_list {
2051 + short count;
2052 + short max;
2053 ++ struct user_struct *user;
2054 + struct file *fp[SCM_MAX_FD];
2055 + };
2056 +
2057 +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
2058 +index a79d267b64ec..7b0d31b67f6a 100644
2059 +--- a/kernel/irq/manage.c
2060 ++++ b/kernel/irq/manage.c
2061 +@@ -1229,6 +1229,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
2062 + if (!desc)
2063 + return NULL;
2064 +
2065 ++ chip_bus_lock(desc);
2066 + raw_spin_lock_irqsave(&desc->lock, flags);
2067 +
2068 + /*
2069 +@@ -1242,7 +1243,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
2070 + if (!action) {
2071 + WARN(1, "Trying to free already-free IRQ %d\n", irq);
2072 + raw_spin_unlock_irqrestore(&desc->lock, flags);
2073 +-
2074 ++ chip_bus_sync_unlock(desc);
2075 + return NULL;
2076 + }
2077 +
2078 +@@ -1265,6 +1266,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
2079 + #endif
2080 +
2081 + raw_spin_unlock_irqrestore(&desc->lock, flags);
2082 ++ chip_bus_sync_unlock(desc);
2083 +
2084 + unregister_handler_proc(irq, action);
2085 +
2086 +@@ -1338,9 +1340,7 @@ void free_irq(unsigned int irq, void *dev_id)
2087 + desc->affinity_notify = NULL;
2088 + #endif
2089 +
2090 +- chip_bus_lock(desc);
2091 + kfree(__free_irq(irq, dev_id));
2092 +- chip_bus_sync_unlock(desc);
2093 + }
2094 + EXPORT_SYMBOL(free_irq);
2095 +
2096 +diff --git a/kernel/resource.c b/kernel/resource.c
2097 +index d7386986e10e..b8422b135b68 100644
2098 +--- a/kernel/resource.c
2099 ++++ b/kernel/resource.c
2100 +@@ -961,9 +961,10 @@ struct resource * __request_region(struct resource *parent,
2101 + if (!conflict)
2102 + break;
2103 + if (conflict != parent) {
2104 +- parent = conflict;
2105 +- if (!(conflict->flags & IORESOURCE_BUSY))
2106 ++ if (!(conflict->flags & IORESOURCE_BUSY)) {
2107 ++ parent = conflict;
2108 + continue;
2109 ++ }
2110 + }
2111 + if (conflict->flags & flags & IORESOURCE_MUXED) {
2112 + add_wait_queue(&muxed_resource_wait, &wait);
2113 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
2114 +index c771f2547bef..f234c84d36c8 100644
2115 +--- a/kernel/sched/core.c
2116 ++++ b/kernel/sched/core.c
2117 +@@ -1587,7 +1587,6 @@ out:
2118 + */
2119 + int wake_up_process(struct task_struct *p)
2120 + {
2121 +- WARN_ON(task_is_stopped_or_traced(p));
2122 + return try_to_wake_up(p, TASK_NORMAL, 0);
2123 + }
2124 + EXPORT_SYMBOL(wake_up_process);
2125 +diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
2126 +index ce033c7aa2e8..9cff0ab82b63 100644
2127 +--- a/kernel/time/posix-clock.c
2128 ++++ b/kernel/time/posix-clock.c
2129 +@@ -69,10 +69,10 @@ static ssize_t posix_clock_read(struct file *fp, char __user *buf,
2130 + static unsigned int posix_clock_poll(struct file *fp, poll_table *wait)
2131 + {
2132 + struct posix_clock *clk = get_posix_clock(fp);
2133 +- int result = 0;
2134 ++ unsigned int result = 0;
2135 +
2136 + if (!clk)
2137 +- return -ENODEV;
2138 ++ return POLLERR;
2139 +
2140 + if (clk->ops.poll)
2141 + result = clk->ops.poll(clk, fp, wait);
2142 +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
2143 +index ab21b8c66535..cb73c4e0741e 100644
2144 +--- a/kernel/trace/ring_buffer.c
2145 ++++ b/kernel/trace/ring_buffer.c
2146 +@@ -1948,12 +1948,6 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
2147 + goto again;
2148 + }
2149 +
2150 +-static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2151 +-{
2152 +- cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
2153 +- cpu_buffer->reader_page->read = 0;
2154 +-}
2155 +-
2156 + static void rb_inc_iter(struct ring_buffer_iter *iter)
2157 + {
2158 + struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2159 +@@ -3591,7 +3585,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2160 +
2161 + /* Finally update the reader page to the new head */
2162 + cpu_buffer->reader_page = reader;
2163 +- rb_reset_reader_page(cpu_buffer);
2164 ++ cpu_buffer->reader_page->read = 0;
2165 +
2166 + if (overwrite != cpu_buffer->last_overrun) {
2167 + cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
2168 +@@ -3601,6 +3595,10 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2169 + goto again;
2170 +
2171 + out:
2172 ++ /* Update the read_stamp on the first event */
2173 ++ if (reader && reader->read == 0)
2174 ++ cpu_buffer->read_stamp = reader->page->time_stamp;
2175 ++
2176 + arch_spin_unlock(&cpu_buffer->lock);
2177 + local_irq_restore(flags);
2178 +
2179 +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
2180 +index 5a898f15bfc6..7d054b7671ec 100644
2181 +--- a/kernel/trace/trace_events.c
2182 ++++ b/kernel/trace/trace_events.c
2183 +@@ -602,7 +602,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
2184 + * The ftrace subsystem is for showing formats only.
2185 + * They can not be enabled or disabled via the event files.
2186 + */
2187 +- if (call->class && call->class->reg)
2188 ++ if (call->class && call->class->reg &&
2189 ++ !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
2190 + return file;
2191 + }
2192 +
2193 +diff --git a/kernel/workqueue.c b/kernel/workqueue.c
2194 +index fa927fd5778d..fe7c4b91d2e7 100644
2195 +--- a/kernel/workqueue.c
2196 ++++ b/kernel/workqueue.c
2197 +@@ -1450,13 +1450,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
2198 + timer_stats_timer_set_start_info(&dwork->timer);
2199 +
2200 + dwork->wq = wq;
2201 +- /* timer isn't guaranteed to run in this cpu, record earlier */
2202 +- if (cpu == WORK_CPU_UNBOUND)
2203 +- cpu = raw_smp_processor_id();
2204 + dwork->cpu = cpu;
2205 + timer->expires = jiffies + delay;
2206 +
2207 +- add_timer_on(timer, cpu);
2208 ++ if (unlikely(cpu != WORK_CPU_UNBOUND))
2209 ++ add_timer_on(timer, cpu);
2210 ++ else
2211 ++ add_timer(timer);
2212 + }
2213 +
2214 + /**
2215 +diff --git a/lib/devres.c b/lib/devres.c
2216 +index 823533138fa0..20afaf181b27 100644
2217 +--- a/lib/devres.c
2218 ++++ b/lib/devres.c
2219 +@@ -423,7 +423,7 @@ void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
2220 + if (!iomap)
2221 + return;
2222 +
2223 +- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2224 ++ for (i = 0; i < PCIM_IOMAP_MAX; i++) {
2225 + if (!(mask & (1 << i)))
2226 + continue;
2227 +
2228 +diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
2229 +index e3bea2e0821a..025ced8fbb57 100644
2230 +--- a/net/ceph/messenger.c
2231 ++++ b/net/ceph/messenger.c
2232 +@@ -2277,7 +2277,7 @@ static int read_partial_message(struct ceph_connection *con)
2233 + con->in_base_pos = -front_len - middle_len - data_len -
2234 + sizeof(m->footer);
2235 + con->in_tag = CEPH_MSGR_TAG_READY;
2236 +- return 0;
2237 ++ return 1;
2238 + } else if ((s64)seq - (s64)con->in_seq > 1) {
2239 + pr_err("read_partial_message bad seq %lld expected %lld\n",
2240 + seq, con->in_seq + 1);
2241 +@@ -2310,7 +2310,7 @@ static int read_partial_message(struct ceph_connection *con)
2242 + sizeof(m->footer);
2243 + con->in_tag = CEPH_MSGR_TAG_READY;
2244 + con->in_seq++;
2245 +- return 0;
2246 ++ return 1;
2247 + }
2248 +
2249 + BUG_ON(!con->in_msg);
2250 +diff --git a/net/core/scm.c b/net/core/scm.c
2251 +index dbc6bfcdf446..7a6cf8351cde 100644
2252 +--- a/net/core/scm.c
2253 ++++ b/net/core/scm.c
2254 +@@ -87,6 +87,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
2255 + *fplp = fpl;
2256 + fpl->count = 0;
2257 + fpl->max = SCM_MAX_FD;
2258 ++ fpl->user = NULL;
2259 + }
2260 + fpp = &fpl->fp[fpl->count];
2261 +
2262 +@@ -107,6 +108,10 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
2263 + *fpp++ = file;
2264 + fpl->count++;
2265 + }
2266 ++
2267 ++ if (!fpl->user)
2268 ++ fpl->user = get_uid(current_user());
2269 ++
2270 + return num;
2271 + }
2272 +
2273 +@@ -119,6 +124,7 @@ void __scm_destroy(struct scm_cookie *scm)
2274 + scm->fp = NULL;
2275 + for (i=fpl->count-1; i>=0; i--)
2276 + fput(fpl->fp[i]);
2277 ++ free_uid(fpl->user);
2278 + kfree(fpl);
2279 + }
2280 + }
2281 +@@ -337,6 +343,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
2282 + for (i = 0; i < fpl->count; i++)
2283 + get_file(fpl->fp[i]);
2284 + new_fpl->max = new_fpl->count;
2285 ++ new_fpl->user = get_uid(fpl->user);
2286 + }
2287 + return new_fpl;
2288 + }
2289 +diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
2290 +index 89aacfd2756d..9ba6d8c7c793 100644
2291 +--- a/net/mac80211/mesh_pathtbl.c
2292 ++++ b/net/mac80211/mesh_pathtbl.c
2293 +@@ -747,10 +747,8 @@ void mesh_plink_broken(struct sta_info *sta)
2294 + static void mesh_path_node_reclaim(struct rcu_head *rp)
2295 + {
2296 + struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
2297 +- struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
2298 +
2299 + del_timer_sync(&node->mpath->timer);
2300 +- atomic_dec(&sdata->u.mesh.mpaths);
2301 + kfree(node->mpath);
2302 + kfree(node);
2303 + }
2304 +@@ -758,8 +756,9 @@ static void mesh_path_node_reclaim(struct rcu_head *rp)
2305 + /* needs to be called with the corresponding hashwlock taken */
2306 + static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
2307 + {
2308 +- struct mesh_path *mpath;
2309 +- mpath = node->mpath;
2310 ++ struct mesh_path *mpath = node->mpath;
2311 ++ struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
2312 ++
2313 + spin_lock(&mpath->state_lock);
2314 + mpath->flags |= MESH_PATH_RESOLVING;
2315 + if (mpath->is_gate)
2316 +@@ -767,6 +766,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
2317 + hlist_del_rcu(&node->list);
2318 + call_rcu(&node->rcu, mesh_path_node_reclaim);
2319 + spin_unlock(&mpath->state_lock);
2320 ++ atomic_dec(&sdata->u.mesh.mpaths);
2321 + atomic_dec(&tbl->entries);
2322 + }
2323 +
2324 +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
2325 +index 0283baedcdfb..9dc979abb461 100644
2326 +--- a/net/netfilter/nf_conntrack_core.c
2327 ++++ b/net/netfilter/nf_conntrack_core.c
2328 +@@ -311,6 +311,21 @@ static void death_by_timeout(unsigned long ul_conntrack)
2329 + nf_ct_put(ct);
2330 + }
2331 +
2332 ++static inline bool
2333 ++nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
2334 ++ const struct nf_conntrack_tuple *tuple,
2335 ++ u16 zone)
2336 ++{
2337 ++ struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
2338 ++
2339 ++ /* A conntrack can be recreated with the equal tuple,
2340 ++ * so we need to check that the conntrack is confirmed
2341 ++ */
2342 ++ return nf_ct_tuple_equal(tuple, &h->tuple) &&
2343 ++ nf_ct_zone(ct) == zone &&
2344 ++ nf_ct_is_confirmed(ct);
2345 ++}
2346 ++
2347 + /*
2348 + * Warning :
2349 + * - Caller must take a reference on returned object
2350 +@@ -332,8 +347,7 @@ ____nf_conntrack_find(struct net *net, u16 zone,
2351 + local_bh_disable();
2352 + begin:
2353 + hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) {
2354 +- if (nf_ct_tuple_equal(tuple, &h->tuple) &&
2355 +- nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) {
2356 ++ if (nf_ct_key_equal(h, tuple, zone)) {
2357 + NF_CT_STAT_INC(net, found);
2358 + local_bh_enable();
2359 + return h;
2360 +@@ -380,8 +394,7 @@ begin:
2361 + !atomic_inc_not_zero(&ct->ct_general.use)))
2362 + h = NULL;
2363 + else {
2364 +- if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) ||
2365 +- nf_ct_zone(ct) != zone)) {
2366 ++ if (unlikely(!nf_ct_key_equal(h, tuple, zone))) {
2367 + nf_ct_put(ct);
2368 + goto begin;
2369 + }
2370 +diff --git a/net/rds/connection.c b/net/rds/connection.c
2371 +index e88bf3976e54..642ad42c416b 100644
2372 +--- a/net/rds/connection.c
2373 ++++ b/net/rds/connection.c
2374 +@@ -177,12 +177,6 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
2375 + }
2376 + }
2377 +
2378 +- if (trans == NULL) {
2379 +- kmem_cache_free(rds_conn_slab, conn);
2380 +- conn = ERR_PTR(-ENODEV);
2381 +- goto out;
2382 +- }
2383 +-
2384 + conn->c_trans = trans;
2385 +
2386 + ret = trans->conn_alloc(conn, gfp);
2387 +diff --git a/net/rds/send.c b/net/rds/send.c
2388 +index 88eace57dd6b..31c9fa464b11 100644
2389 +--- a/net/rds/send.c
2390 ++++ b/net/rds/send.c
2391 +@@ -955,11 +955,13 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
2392 + release_sock(sk);
2393 + }
2394 +
2395 +- /* racing with another thread binding seems ok here */
2396 ++ lock_sock(sk);
2397 + if (daddr == 0 || rs->rs_bound_addr == 0) {
2398 ++ release_sock(sk);
2399 + ret = -ENOTCONN; /* XXX not a great errno */
2400 + goto out;
2401 + }
2402 ++ release_sock(sk);
2403 +
2404 + /* size of rm including all sgs */
2405 + ret = rds_rm_size(msg, payload_len);
2406 +diff --git a/net/rfkill/core.c b/net/rfkill/core.c
2407 +index 1cec5e4f3a5e..6563cc04c578 100644
2408 +--- a/net/rfkill/core.c
2409 ++++ b/net/rfkill/core.c
2410 +@@ -51,7 +51,6 @@
2411 + struct rfkill {
2412 + spinlock_t lock;
2413 +
2414 +- const char *name;
2415 + enum rfkill_type type;
2416 +
2417 + unsigned long state;
2418 +@@ -75,6 +74,7 @@ struct rfkill {
2419 + struct delayed_work poll_work;
2420 + struct work_struct uevent_work;
2421 + struct work_struct sync_work;
2422 ++ char name[];
2423 + };
2424 + #define to_rfkill(d) container_of(d, struct rfkill, dev)
2425 +
2426 +@@ -871,14 +871,14 @@ struct rfkill * __must_check rfkill_alloc(const char *name,
2427 + if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES))
2428 + return NULL;
2429 +
2430 +- rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL);
2431 ++ rfkill = kzalloc(sizeof(*rfkill) + strlen(name) + 1, GFP_KERNEL);
2432 + if (!rfkill)
2433 + return NULL;
2434 +
2435 + spin_lock_init(&rfkill->lock);
2436 + INIT_LIST_HEAD(&rfkill->node);
2437 + rfkill->type = type;
2438 +- rfkill->name = name;
2439 ++ strcpy(rfkill->name, name);
2440 + rfkill->ops = ops;
2441 + rfkill->data = ops_data;
2442 +
2443 +@@ -1088,17 +1088,6 @@ static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
2444 + return res;
2445 + }
2446 +
2447 +-static bool rfkill_readable(struct rfkill_data *data)
2448 +-{
2449 +- bool r;
2450 +-
2451 +- mutex_lock(&data->mtx);
2452 +- r = !list_empty(&data->events);
2453 +- mutex_unlock(&data->mtx);
2454 +-
2455 +- return r;
2456 +-}
2457 +-
2458 + static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
2459 + size_t count, loff_t *pos)
2460 + {
2461 +@@ -1115,8 +1104,11 @@ static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
2462 + goto out;
2463 + }
2464 + mutex_unlock(&data->mtx);
2465 ++ /* since we re-check and it just compares pointers,
2466 ++ * using !list_empty() without locking isn't a problem
2467 ++ */
2468 + ret = wait_event_interruptible(data->read_wait,
2469 +- rfkill_readable(data));
2470 ++ !list_empty(&data->events));
2471 + mutex_lock(&data->mtx);
2472 +
2473 + if (ret)
2474 +diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
2475 +index 231b71944c52..a4266b9b2429 100644
2476 +--- a/net/sunrpc/cache.c
2477 ++++ b/net/sunrpc/cache.c
2478 +@@ -1221,7 +1221,7 @@ int qword_get(char **bpp, char *dest, int bufsize)
2479 + if (bp[0] == '\\' && bp[1] == 'x') {
2480 + /* HEX STRING */
2481 + bp += 2;
2482 +- while (len < bufsize) {
2483 ++ while (len < bufsize - 1) {
2484 + int h, l;
2485 +
2486 + h = hex_to_bin(bp[0]);
2487 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
2488 +index a673c1f4f638..8f118c7c19e1 100644
2489 +--- a/net/unix/af_unix.c
2490 ++++ b/net/unix/af_unix.c
2491 +@@ -1466,7 +1466,7 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
2492 + UNIXCB(skb).fp = NULL;
2493 +
2494 + for (i = scm->fp->count-1; i >= 0; i--)
2495 +- unix_notinflight(scm->fp->fp[i]);
2496 ++ unix_notinflight(scm->fp->user, scm->fp->fp[i]);
2497 + }
2498 +
2499 + static void unix_destruct_scm(struct sk_buff *skb)
2500 +@@ -1531,7 +1531,7 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
2501 + return -ENOMEM;
2502 +
2503 + for (i = scm->fp->count - 1; i >= 0; i--)
2504 +- unix_inflight(scm->fp->fp[i]);
2505 ++ unix_inflight(scm->fp->user, scm->fp->fp[i]);
2506 + return max_level;
2507 + }
2508 +
2509 +diff --git a/net/unix/garbage.c b/net/unix/garbage.c
2510 +index 06730fe6ad9d..a72182d6750f 100644
2511 +--- a/net/unix/garbage.c
2512 ++++ b/net/unix/garbage.c
2513 +@@ -122,7 +122,7 @@ struct sock *unix_get_socket(struct file *filp)
2514 + * descriptor if it is for an AF_UNIX socket.
2515 + */
2516 +
2517 +-void unix_inflight(struct file *fp)
2518 ++void unix_inflight(struct user_struct *user, struct file *fp)
2519 + {
2520 + struct sock *s = unix_get_socket(fp);
2521 +
2522 +@@ -139,11 +139,11 @@ void unix_inflight(struct file *fp)
2523 + }
2524 + unix_tot_inflight++;
2525 + }
2526 +- fp->f_cred->user->unix_inflight++;
2527 ++ user->unix_inflight++;
2528 + spin_unlock(&unix_gc_lock);
2529 + }
2530 +
2531 +-void unix_notinflight(struct file *fp)
2532 ++void unix_notinflight(struct user_struct *user, struct file *fp)
2533 + {
2534 + struct sock *s = unix_get_socket(fp);
2535 +
2536 +@@ -157,7 +157,7 @@ void unix_notinflight(struct file *fp)
2537 + list_del_init(&u->link);
2538 + unix_tot_inflight--;
2539 + }
2540 +- fp->f_cred->user->unix_inflight--;
2541 ++ user->unix_inflight--;
2542 + spin_unlock(&unix_gc_lock);
2543 + }
2544 +
2545 +diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
2546 +index 9c22317778eb..ee625e3a56ba 100644
2547 +--- a/scripts/recordmcount.c
2548 ++++ b/scripts/recordmcount.c
2549 +@@ -189,6 +189,20 @@ static void *mmap_file(char const *fname)
2550 + addr = umalloc(sb.st_size);
2551 + uread(fd_map, addr, sb.st_size);
2552 + }
2553 ++ if (sb.st_nlink != 1) {
2554 ++ /* file is hard-linked, break the hard link */
2555 ++ close(fd_map);
2556 ++ if (unlink(fname) < 0) {
2557 ++ perror(fname);
2558 ++ fail_file();
2559 ++ }
2560 ++ fd_map = open(fname, O_RDWR | O_CREAT, sb.st_mode);
2561 ++ if (fd_map < 0) {
2562 ++ perror(fname);
2563 ++ fail_file();
2564 ++ }
2565 ++ uwrite(fd_map, addr, sb.st_size);
2566 ++ }
2567 + return addr;
2568 + }
2569 +
2570 +diff --git a/tools/Makefile b/tools/Makefile
2571 +index 41067f304215..b82a15b92b1c 100644
2572 +--- a/tools/Makefile
2573 ++++ b/tools/Makefile
2574 +@@ -22,6 +22,10 @@ help:
2575 + @echo ' from the kernel command line to build and install one of'
2576 + @echo ' the tools above'
2577 + @echo ''
2578 ++ @echo ' $$ make tools/all'
2579 ++ @echo ''
2580 ++ @echo ' builds all tools.'
2581 ++ @echo ''
2582 + @echo ' $$ make tools/install'
2583 + @echo ''
2584 + @echo ' installs all tools.'
2585 +@@ -50,6 +54,10 @@ selftests: FORCE
2586 + turbostat x86_energy_perf_policy: FORCE
2587 + $(call descend,power/x86/$@)
2588 +
2589 ++all: cgroup cpupower firewire lguest \
2590 ++ perf selftests turbostat usb \
2591 ++ virtio vm net x86_energy_perf_policy
2592 ++
2593 + cpupower_install:
2594 + $(call descend,power/$(@:_install=),install)
2595 +
2596 +diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
2597 +index ea475cd03511..ca2d05a07b57 100644
2598 +--- a/virt/kvm/async_pf.c
2599 ++++ b/virt/kvm/async_pf.c
2600 +@@ -158,7 +158,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
2601 + * do alloc nowait since if we are going to sleep anyway we
2602 + * may as well sleep faulting in page
2603 + */
2604 +- work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT);
2605 ++ work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
2606 + if (!work)
2607 + return 0;
2608 +