Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.0 commit in: /
Date: Tue, 23 Jun 2015 14:01:49
Message-Id: 1435067550.bac443972d6de3c565d4d103ca34dda24d258876.mpagano@gentoo
1 commit: bac443972d6de3c565d4d103ca34dda24d258876
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Jun 23 13:52:30 2015 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Jun 23 13:52:30 2015 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=bac44397
7
8 Linux patch 4.0.6
9
10 0000_README | 4 +
11 1005_linux-4.0.6.patch | 3730 ++++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 3734 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 0f63559..8761846 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -63,6 +63,10 @@ Patch: 1004_linux-4.0.5.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.0.5
21
22 +Patch: 1005_linux-4.0.6.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.0.6
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1005_linux-4.0.6.patch b/1005_linux-4.0.6.patch
31 new file mode 100644
32 index 0000000..15519e7
33 --- /dev/null
34 +++ b/1005_linux-4.0.6.patch
35 @@ -0,0 +1,3730 @@
36 +diff --git a/Makefile b/Makefile
37 +index 1880cf77059b..af6da040b952 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 0
43 +-SUBLEVEL = 5
44 ++SUBLEVEL = 6
45 + EXTRAVERSION =
46 + NAME = Hurr durr I'ma sheep
47 +
48 +diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
49 +index c3255e0c90aa..dbb3f4d2bf84 100644
50 +--- a/arch/arm/boot/dts/am335x-bone-common.dtsi
51 ++++ b/arch/arm/boot/dts/am335x-bone-common.dtsi
52 +@@ -223,6 +223,25 @@
53 + /include/ "tps65217.dtsi"
54 +
55 + &tps {
56 ++ /*
57 ++ * Configure pmic to enter OFF-state instead of SLEEP-state ("RTC-only
58 ++ * mode") at poweroff. Most BeagleBone versions do not support RTC-only
59 ++ * mode and risk hardware damage if this mode is entered.
60 ++ *
61 ++ * For details, see linux-omap mailing list May 2015 thread
62 ++ * [PATCH] ARM: dts: am335x-bone* enable pmic-shutdown-controller
63 ++ * In particular, messages:
64 ++ * http://www.spinics.net/lists/linux-omap/msg118585.html
65 ++ * http://www.spinics.net/lists/linux-omap/msg118615.html
66 ++ *
67 ++ * You can override this later with
68 ++ * &tps { /delete-property/ ti,pmic-shutdown-controller; }
69 ++ * if you want to use RTC-only mode and made sure you are not affected
70 ++ * by the hardware problems. (Tip: double-check by performing a current
71 ++ * measurement after shutdown: it should be less than 1 mA.)
72 ++ */
73 ++ ti,pmic-shutdown-controller;
74 ++
75 + regulators {
76 + dcdc1_reg: regulator@0 {
77 + regulator-name = "vdds_dpr";
78 +diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
79 +index 43d54017b779..d0ab012fa379 100644
80 +--- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
81 ++++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
82 +@@ -16,7 +16,8 @@
83 + #include "mt8173.dtsi"
84 +
85 + / {
86 +- model = "mediatek,mt8173-evb";
87 ++ model = "MediaTek MT8173 evaluation board";
88 ++ compatible = "mediatek,mt8173-evb", "mediatek,mt8173";
89 +
90 + aliases {
91 + serial0 = &uart0;
92 +diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
93 +index d2bfbc2e8995..be15e52a47a0 100644
94 +--- a/arch/mips/kernel/irq.c
95 ++++ b/arch/mips/kernel/irq.c
96 +@@ -109,7 +109,7 @@ void __init init_IRQ(void)
97 + #endif
98 + }
99 +
100 +-#ifdef DEBUG_STACKOVERFLOW
101 ++#ifdef CONFIG_DEBUG_STACKOVERFLOW
102 + static inline void check_stack_overflow(void)
103 + {
104 + unsigned long sp;
105 +diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
106 +index 838d3a6a5b7d..cea02968a908 100644
107 +--- a/arch/mips/kvm/emulate.c
108 ++++ b/arch/mips/kvm/emulate.c
109 +@@ -2101,7 +2101,7 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
110 + if (vcpu->mmio_needed == 2)
111 + *gpr = *(int16_t *) run->mmio.data;
112 + else
113 +- *gpr = *(int16_t *) run->mmio.data;
114 ++ *gpr = *(uint16_t *)run->mmio.data;
115 +
116 + break;
117 + case 1:
118 +diff --git a/arch/mips/ralink/ill_acc.c b/arch/mips/ralink/ill_acc.c
119 +index e20b02e3ae28..e10d10b9e82a 100644
120 +--- a/arch/mips/ralink/ill_acc.c
121 ++++ b/arch/mips/ralink/ill_acc.c
122 +@@ -41,7 +41,7 @@ static irqreturn_t ill_acc_irq_handler(int irq, void *_priv)
123 + addr, (type >> ILL_ACC_OFF_S) & ILL_ACC_OFF_M,
124 + type & ILL_ACC_LEN_M);
125 +
126 +- rt_memc_w32(REG_ILL_ACC_TYPE, REG_ILL_ACC_TYPE);
127 ++ rt_memc_w32(ILL_INT_STATUS, REG_ILL_ACC_TYPE);
128 +
129 + return IRQ_HANDLED;
130 + }
131 +diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
132 +index db257a58571f..e657b7ba3292 100644
133 +--- a/arch/x86/include/asm/segment.h
134 ++++ b/arch/x86/include/asm/segment.h
135 +@@ -200,10 +200,21 @@
136 + #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
137 +
138 + #ifdef __KERNEL__
139 ++
140 ++/*
141 ++ * early_idt_handler_array is an array of entry points referenced in the
142 ++ * early IDT. For simplicity, it's a real array with one entry point
143 ++ * every nine bytes. That leaves room for an optional 'push $0' if the
144 ++ * vector has no error code (two bytes), a 'push $vector_number' (two
145 ++ * bytes), and a jump to the common entry code (up to five bytes).
146 ++ */
147 ++#define EARLY_IDT_HANDLER_SIZE 9
148 ++
149 + #ifndef __ASSEMBLY__
150 +-extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5];
151 ++
152 ++extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE];
153 + #ifdef CONFIG_TRACING
154 +-#define trace_early_idt_handlers early_idt_handlers
155 ++# define trace_early_idt_handler_array early_idt_handler_array
156 + #endif
157 +
158 + /*
159 +diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
160 +index c4f8d4659070..b111ab5c4509 100644
161 +--- a/arch/x86/kernel/head64.c
162 ++++ b/arch/x86/kernel/head64.c
163 +@@ -167,7 +167,7 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
164 + clear_bss();
165 +
166 + for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
167 +- set_intr_gate(i, early_idt_handlers[i]);
168 ++ set_intr_gate(i, early_idt_handler_array[i]);
169 + load_idt((const struct desc_ptr *)&idt_descr);
170 +
171 + copy_bootdata(__va(real_mode_data));
172 +diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
173 +index f36bd42d6f0c..30a2aa3782fa 100644
174 +--- a/arch/x86/kernel/head_32.S
175 ++++ b/arch/x86/kernel/head_32.S
176 +@@ -477,21 +477,22 @@ is486:
177 + __INIT
178 + setup_once:
179 + /*
180 +- * Set up a idt with 256 entries pointing to ignore_int,
181 +- * interrupt gates. It doesn't actually load idt - that needs
182 +- * to be done on each CPU. Interrupts are enabled elsewhere,
183 +- * when we can be relatively sure everything is ok.
184 ++ * Set up a idt with 256 interrupt gates that push zero if there
185 ++ * is no error code and then jump to early_idt_handler_common.
186 ++ * It doesn't actually load the idt - that needs to be done on
187 ++ * each CPU. Interrupts are enabled elsewhere, when we can be
188 ++ * relatively sure everything is ok.
189 + */
190 +
191 + movl $idt_table,%edi
192 +- movl $early_idt_handlers,%eax
193 ++ movl $early_idt_handler_array,%eax
194 + movl $NUM_EXCEPTION_VECTORS,%ecx
195 + 1:
196 + movl %eax,(%edi)
197 + movl %eax,4(%edi)
198 + /* interrupt gate, dpl=0, present */
199 + movl $(0x8E000000 + __KERNEL_CS),2(%edi)
200 +- addl $9,%eax
201 ++ addl $EARLY_IDT_HANDLER_SIZE,%eax
202 + addl $8,%edi
203 + loop 1b
204 +
205 +@@ -523,26 +524,28 @@ setup_once:
206 + andl $0,setup_once_ref /* Once is enough, thanks */
207 + ret
208 +
209 +-ENTRY(early_idt_handlers)
210 ++ENTRY(early_idt_handler_array)
211 + # 36(%esp) %eflags
212 + # 32(%esp) %cs
213 + # 28(%esp) %eip
214 + # 24(%rsp) error code
215 + i = 0
216 + .rept NUM_EXCEPTION_VECTORS
217 +- .if (EXCEPTION_ERRCODE_MASK >> i) & 1
218 +- ASM_NOP2
219 +- .else
220 ++ .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
221 + pushl $0 # Dummy error code, to make stack frame uniform
222 + .endif
223 + pushl $i # 20(%esp) Vector number
224 +- jmp early_idt_handler
225 ++ jmp early_idt_handler_common
226 + i = i + 1
227 ++ .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
228 + .endr
229 +-ENDPROC(early_idt_handlers)
230 ++ENDPROC(early_idt_handler_array)
231 +
232 +- /* This is global to keep gas from relaxing the jumps */
233 +-ENTRY(early_idt_handler)
234 ++early_idt_handler_common:
235 ++ /*
236 ++ * The stack is the hardware frame, an error code or zero, and the
237 ++ * vector number.
238 ++ */
239 + cld
240 +
241 + cmpl $2,(%esp) # X86_TRAP_NMI
242 +@@ -602,7 +605,7 @@ ex_entry:
243 + is_nmi:
244 + addl $8,%esp /* drop vector number and error code */
245 + iret
246 +-ENDPROC(early_idt_handler)
247 ++ENDPROC(early_idt_handler_common)
248 +
249 + /* This is the default interrupt "handler" :-) */
250 + ALIGN
251 +diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
252 +index 6fd514d9f69a..f8a8406033c3 100644
253 +--- a/arch/x86/kernel/head_64.S
254 ++++ b/arch/x86/kernel/head_64.S
255 +@@ -321,26 +321,28 @@ bad_address:
256 + jmp bad_address
257 +
258 + __INIT
259 +- .globl early_idt_handlers
260 +-early_idt_handlers:
261 ++ENTRY(early_idt_handler_array)
262 + # 104(%rsp) %rflags
263 + # 96(%rsp) %cs
264 + # 88(%rsp) %rip
265 + # 80(%rsp) error code
266 + i = 0
267 + .rept NUM_EXCEPTION_VECTORS
268 +- .if (EXCEPTION_ERRCODE_MASK >> i) & 1
269 +- ASM_NOP2
270 +- .else
271 ++ .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
272 + pushq $0 # Dummy error code, to make stack frame uniform
273 + .endif
274 + pushq $i # 72(%rsp) Vector number
275 +- jmp early_idt_handler
276 ++ jmp early_idt_handler_common
277 + i = i + 1
278 ++ .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
279 + .endr
280 ++ENDPROC(early_idt_handler_array)
281 +
282 +-/* This is global to keep gas from relaxing the jumps */
283 +-ENTRY(early_idt_handler)
284 ++early_idt_handler_common:
285 ++ /*
286 ++ * The stack is the hardware frame, an error code or zero, and the
287 ++ * vector number.
288 ++ */
289 + cld
290 +
291 + cmpl $2,(%rsp) # X86_TRAP_NMI
292 +@@ -412,7 +414,7 @@ ENTRY(early_idt_handler)
293 + is_nmi:
294 + addq $16,%rsp # drop vector number and error code
295 + INTERRUPT_RETURN
296 +-ENDPROC(early_idt_handler)
297 ++ENDPROC(early_idt_handler_common)
298 +
299 + __INITDATA
300 +
301 +diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
302 +index 987514396c1e..ddeff4844a10 100644
303 +--- a/arch/x86/net/bpf_jit_comp.c
304 ++++ b/arch/x86/net/bpf_jit_comp.c
305 +@@ -559,6 +559,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
306 + if (is_ereg(dst_reg))
307 + EMIT1(0x41);
308 + EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
309 ++
310 ++ /* emit 'movzwl eax, ax' */
311 ++ if (is_ereg(dst_reg))
312 ++ EMIT3(0x45, 0x0F, 0xB7);
313 ++ else
314 ++ EMIT2(0x0F, 0xB7);
315 ++ EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
316 + break;
317 + case 32:
318 + /* emit 'bswap eax' to swap lower 4 bytes */
319 +@@ -577,6 +584,27 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
320 + break;
321 +
322 + case BPF_ALU | BPF_END | BPF_FROM_LE:
323 ++ switch (imm32) {
324 ++ case 16:
325 ++ /* emit 'movzwl eax, ax' to zero extend 16-bit
326 ++ * into 64 bit
327 ++ */
328 ++ if (is_ereg(dst_reg))
329 ++ EMIT3(0x45, 0x0F, 0xB7);
330 ++ else
331 ++ EMIT2(0x0F, 0xB7);
332 ++ EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
333 ++ break;
334 ++ case 32:
335 ++ /* emit 'mov eax, eax' to clear upper 32-bits */
336 ++ if (is_ereg(dst_reg))
337 ++ EMIT1(0x45);
338 ++ EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
339 ++ break;
340 ++ case 64:
341 ++ /* nop */
342 ++ break;
343 ++ }
344 + break;
345 +
346 + /* ST: *(u8*)(dst_reg + off) = imm */
347 +@@ -938,7 +966,12 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
348 + }
349 + ctx.cleanup_addr = proglen;
350 +
351 +- for (pass = 0; pass < 10; pass++) {
352 ++ /* JITed image shrinks with every pass and the loop iterates
353 ++ * until the image stops shrinking. Very large bpf programs
354 ++ * may converge on the last pass. In such case do one more
355 ++ * pass to emit the final image
356 ++ */
357 ++ for (pass = 0; pass < 10 || image; pass++) {
358 + proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
359 + if (proglen <= 0) {
360 + image = NULL;
361 +diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
362 +index 7b9be9822724..8533c96bab13 100644
363 +--- a/arch/x86/vdso/Makefile
364 ++++ b/arch/x86/vdso/Makefile
365 +@@ -51,7 +51,7 @@ VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
366 + $(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
367 + $(call if_changed,vdso)
368 +
369 +-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
370 ++HOST_EXTRACFLAGS += -I$(srctree)/tools/include -I$(srctree)/include/uapi -I$(srctree)/arch/x86/include/uapi
371 + hostprogs-y += vdso2c
372 +
373 + quiet_cmd_vdso2c = VDSO2C $@
374 +diff --git a/block/blk-mq.c b/block/blk-mq.c
375 +index 5c39703e644f..b2e73e1ef8a4 100644
376 +--- a/block/blk-mq.c
377 ++++ b/block/blk-mq.c
378 +@@ -1589,6 +1589,7 @@ static int blk_mq_hctx_notify(void *data, unsigned long action,
379 + return NOTIFY_OK;
380 + }
381 +
382 ++/* hctx->ctxs will be freed in queue's release handler */
383 + static void blk_mq_exit_hctx(struct request_queue *q,
384 + struct blk_mq_tag_set *set,
385 + struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
386 +@@ -1607,7 +1608,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
387 +
388 + blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
389 + blk_free_flush_queue(hctx->fq);
390 +- kfree(hctx->ctxs);
391 + blk_mq_free_bitmap(&hctx->ctx_map);
392 + }
393 +
394 +@@ -1873,8 +1873,12 @@ void blk_mq_release(struct request_queue *q)
395 + unsigned int i;
396 +
397 + /* hctx kobj stays in hctx */
398 +- queue_for_each_hw_ctx(q, hctx, i)
399 ++ queue_for_each_hw_ctx(q, hctx, i) {
400 ++ if (!hctx)
401 ++ continue;
402 ++ kfree(hctx->ctxs);
403 + kfree(hctx);
404 ++ }
405 +
406 + kfree(q->queue_hw_ctx);
407 +
408 +diff --git a/block/genhd.c b/block/genhd.c
409 +index 0a536dc05f3b..ea982eadaf63 100644
410 +--- a/block/genhd.c
411 ++++ b/block/genhd.c
412 +@@ -422,9 +422,9 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
413 + /* allocate ext devt */
414 + idr_preload(GFP_KERNEL);
415 +
416 +- spin_lock(&ext_devt_lock);
417 ++ spin_lock_bh(&ext_devt_lock);
418 + idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT);
419 +- spin_unlock(&ext_devt_lock);
420 ++ spin_unlock_bh(&ext_devt_lock);
421 +
422 + idr_preload_end();
423 + if (idx < 0)
424 +@@ -449,9 +449,9 @@ void blk_free_devt(dev_t devt)
425 + return;
426 +
427 + if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
428 +- spin_lock(&ext_devt_lock);
429 ++ spin_lock_bh(&ext_devt_lock);
430 + idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
431 +- spin_unlock(&ext_devt_lock);
432 ++ spin_unlock_bh(&ext_devt_lock);
433 + }
434 + }
435 +
436 +@@ -653,7 +653,6 @@ void del_gendisk(struct gendisk *disk)
437 + disk->flags &= ~GENHD_FL_UP;
438 +
439 + sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
440 +- bdi_unregister(&disk->queue->backing_dev_info);
441 + blk_unregister_queue(disk);
442 + blk_unregister_region(disk_devt(disk), disk->minors);
443 +
444 +@@ -691,13 +690,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
445 + } else {
446 + struct hd_struct *part;
447 +
448 +- spin_lock(&ext_devt_lock);
449 ++ spin_lock_bh(&ext_devt_lock);
450 + part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
451 + if (part && get_disk(part_to_disk(part))) {
452 + *partno = part->partno;
453 + disk = part_to_disk(part);
454 + }
455 +- spin_unlock(&ext_devt_lock);
456 ++ spin_unlock_bh(&ext_devt_lock);
457 + }
458 +
459 + return disk;
460 +diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
461 +index 23716dd8a7ec..5928d0746a27 100644
462 +--- a/drivers/ata/ahci_mvebu.c
463 ++++ b/drivers/ata/ahci_mvebu.c
464 +@@ -45,7 +45,7 @@ static void ahci_mvebu_mbus_config(struct ahci_host_priv *hpriv,
465 + writel((cs->mbus_attr << 8) |
466 + (dram->mbus_dram_target_id << 4) | 1,
467 + hpriv->mmio + AHCI_WINDOW_CTRL(i));
468 +- writel(cs->base, hpriv->mmio + AHCI_WINDOW_BASE(i));
469 ++ writel(cs->base >> 16, hpriv->mmio + AHCI_WINDOW_BASE(i));
470 + writel(((cs->size - 1) & 0xffff0000),
471 + hpriv->mmio + AHCI_WINDOW_SIZE(i));
472 + }
473 +diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
474 +index 80a80548ad0a..27245957eee3 100644
475 +--- a/drivers/ata/pata_octeon_cf.c
476 ++++ b/drivers/ata/pata_octeon_cf.c
477 +@@ -1053,7 +1053,7 @@ static struct of_device_id octeon_cf_match[] = {
478 + },
479 + {},
480 + };
481 +-MODULE_DEVICE_TABLE(of, octeon_i2c_match);
482 ++MODULE_DEVICE_TABLE(of, octeon_cf_match);
483 +
484 + static struct platform_driver octeon_cf_driver = {
485 + .probe = octeon_cf_probe,
486 +diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
487 +index 9c2ba1c97c42..df0c66cb7ad3 100644
488 +--- a/drivers/base/cacheinfo.c
489 ++++ b/drivers/base/cacheinfo.c
490 +@@ -179,7 +179,7 @@ static int detect_cache_attributes(unsigned int cpu)
491 + {
492 + int ret;
493 +
494 +- if (init_cache_level(cpu))
495 ++ if (init_cache_level(cpu) || !cache_leaves(cpu))
496 + return -ENOENT;
497 +
498 + per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
499 +diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
500 +index fb9ec6221730..6f047dcb94c2 100644
501 +--- a/drivers/bus/mvebu-mbus.c
502 ++++ b/drivers/bus/mvebu-mbus.c
503 +@@ -58,7 +58,6 @@
504 + #include <linux/debugfs.h>
505 + #include <linux/log2.h>
506 + #include <linux/syscore_ops.h>
507 +-#include <linux/memblock.h>
508 +
509 + /*
510 + * DDR target is the same on all platforms.
511 +@@ -70,6 +69,7 @@
512 + */
513 + #define WIN_CTRL_OFF 0x0000
514 + #define WIN_CTRL_ENABLE BIT(0)
515 ++/* Only on HW I/O coherency capable platforms */
516 + #define WIN_CTRL_SYNCBARRIER BIT(1)
517 + #define WIN_CTRL_TGT_MASK 0xf0
518 + #define WIN_CTRL_TGT_SHIFT 4
519 +@@ -102,9 +102,7 @@
520 +
521 + /* Relative to mbusbridge_base */
522 + #define MBUS_BRIDGE_CTRL_OFF 0x0
523 +-#define MBUS_BRIDGE_SIZE_MASK 0xffff0000
524 + #define MBUS_BRIDGE_BASE_OFF 0x4
525 +-#define MBUS_BRIDGE_BASE_MASK 0xffff0000
526 +
527 + /* Maximum number of windows, for all known platforms */
528 + #define MBUS_WINS_MAX 20
529 +@@ -323,8 +321,9 @@ static int mvebu_mbus_setup_window(struct mvebu_mbus_state *mbus,
530 + ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) |
531 + (attr << WIN_CTRL_ATTR_SHIFT) |
532 + (target << WIN_CTRL_TGT_SHIFT) |
533 +- WIN_CTRL_SYNCBARRIER |
534 + WIN_CTRL_ENABLE;
535 ++ if (mbus->hw_io_coherency)
536 ++ ctrl |= WIN_CTRL_SYNCBARRIER;
537 +
538 + writel(base & WIN_BASE_LOW, addr + WIN_BASE_OFF);
539 + writel(ctrl, addr + WIN_CTRL_OFF);
540 +@@ -577,106 +576,36 @@ static unsigned int armada_xp_mbus_win_remap_offset(int win)
541 + return MVEBU_MBUS_NO_REMAP;
542 + }
543 +
544 +-/*
545 +- * Use the memblock information to find the MBus bridge hole in the
546 +- * physical address space.
547 +- */
548 +-static void __init
549 +-mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end)
550 +-{
551 +- struct memblock_region *r;
552 +- uint64_t s = 0;
553 +-
554 +- for_each_memblock(memory, r) {
555 +- /*
556 +- * This part of the memory is above 4 GB, so we don't
557 +- * care for the MBus bridge hole.
558 +- */
559 +- if (r->base >= 0x100000000)
560 +- continue;
561 +-
562 +- /*
563 +- * The MBus bridge hole is at the end of the RAM under
564 +- * the 4 GB limit.
565 +- */
566 +- if (r->base + r->size > s)
567 +- s = r->base + r->size;
568 +- }
569 +-
570 +- *start = s;
571 +- *end = 0x100000000;
572 +-}
573 +-
574 + static void __init
575 + mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
576 + {
577 + int i;
578 + int cs;
579 +- uint64_t mbus_bridge_base, mbus_bridge_end;
580 +
581 + mvebu_mbus_dram_info.mbus_dram_target_id = TARGET_DDR;
582 +
583 +- mvebu_mbus_find_bridge_hole(&mbus_bridge_base, &mbus_bridge_end);
584 +-
585 + for (i = 0, cs = 0; i < 4; i++) {
586 +- u64 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
587 +- u64 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
588 +- u64 end;
589 +- struct mbus_dram_window *w;
590 +-
591 +- /* Ignore entries that are not enabled */
592 +- if (!(size & DDR_SIZE_ENABLED))
593 +- continue;
594 +-
595 +- /*
596 +- * Ignore entries whose base address is above 2^32,
597 +- * since devices cannot DMA to such high addresses
598 +- */
599 +- if (base & DDR_BASE_CS_HIGH_MASK)
600 +- continue;
601 +-
602 +- base = base & DDR_BASE_CS_LOW_MASK;
603 +- size = (size | ~DDR_SIZE_MASK) + 1;
604 +- end = base + size;
605 +-
606 +- /*
607 +- * Adjust base/size of the current CS to make sure it
608 +- * doesn't overlap with the MBus bridge hole. This is
609 +- * particularly important for devices that do DMA from
610 +- * DRAM to a SRAM mapped in a MBus window, such as the
611 +- * CESA cryptographic engine.
612 +- */
613 ++ u32 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
614 ++ u32 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
615 +
616 + /*
617 +- * The CS is fully enclosed inside the MBus bridge
618 +- * area, so ignore it.
619 ++ * We only take care of entries for which the chip
620 ++ * select is enabled, and that don't have high base
621 ++ * address bits set (devices can only access the first
622 ++ * 32 bits of the memory).
623 + */
624 +- if (base >= mbus_bridge_base && end <= mbus_bridge_end)
625 +- continue;
626 ++ if ((size & DDR_SIZE_ENABLED) &&
627 ++ !(base & DDR_BASE_CS_HIGH_MASK)) {
628 ++ struct mbus_dram_window *w;
629 +
630 +- /*
631 +- * Beginning of CS overlaps with end of MBus, raise CS
632 +- * base address, and shrink its size.
633 +- */
634 +- if (base >= mbus_bridge_base && end > mbus_bridge_end) {
635 +- size -= mbus_bridge_end - base;
636 +- base = mbus_bridge_end;
637 ++ w = &mvebu_mbus_dram_info.cs[cs++];
638 ++ w->cs_index = i;
639 ++ w->mbus_attr = 0xf & ~(1 << i);
640 ++ if (mbus->hw_io_coherency)
641 ++ w->mbus_attr |= ATTR_HW_COHERENCY;
642 ++ w->base = base & DDR_BASE_CS_LOW_MASK;
643 ++ w->size = (size | ~DDR_SIZE_MASK) + 1;
644 + }
645 +-
646 +- /*
647 +- * End of CS overlaps with beginning of MBus, shrink
648 +- * CS size.
649 +- */
650 +- if (base < mbus_bridge_base && end > mbus_bridge_base)
651 +- size -= end - mbus_bridge_base;
652 +-
653 +- w = &mvebu_mbus_dram_info.cs[cs++];
654 +- w->cs_index = i;
655 +- w->mbus_attr = 0xf & ~(1 << i);
656 +- if (mbus->hw_io_coherency)
657 +- w->mbus_attr |= ATTR_HW_COHERENCY;
658 +- w->base = base;
659 +- w->size = size;
660 + }
661 + mvebu_mbus_dram_info.num_cs = cs;
662 + }
663 +diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
664 +index d9891d3461f6..7992164ea9ec 100644
665 +--- a/drivers/dma/at_xdmac.c
666 ++++ b/drivers/dma/at_xdmac.c
667 +@@ -174,6 +174,8 @@
668 + #define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */
669 +
670 + #define AT_XDMAC_MAX_CHAN 0x20
671 ++#define AT_XDMAC_MAX_CSIZE 16 /* 16 data */
672 ++#define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */
673 +
674 + #define AT_XDMAC_DMA_BUSWIDTHS\
675 + (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
676 +@@ -192,20 +194,17 @@ struct at_xdmac_chan {
677 + struct dma_chan chan;
678 + void __iomem *ch_regs;
679 + u32 mask; /* Channel Mask */
680 +- u32 cfg[2]; /* Channel Configuration Register */
681 +- #define AT_XDMAC_DEV_TO_MEM_CFG 0 /* Predifined dev to mem channel conf */
682 +- #define AT_XDMAC_MEM_TO_DEV_CFG 1 /* Predifined mem to dev channel conf */
683 ++ u32 cfg; /* Channel Configuration Register */
684 + u8 perid; /* Peripheral ID */
685 + u8 perif; /* Peripheral Interface */
686 + u8 memif; /* Memory Interface */
687 +- u32 per_src_addr;
688 +- u32 per_dst_addr;
689 + u32 save_cc;
690 + u32 save_cim;
691 + u32 save_cnda;
692 + u32 save_cndc;
693 + unsigned long status;
694 + struct tasklet_struct tasklet;
695 ++ struct dma_slave_config sconfig;
696 +
697 + spinlock_t lock;
698 +
699 +@@ -415,8 +414,9 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
700 + struct at_xdmac_desc *desc = txd_to_at_desc(tx);
701 + struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan);
702 + dma_cookie_t cookie;
703 ++ unsigned long irqflags;
704 +
705 +- spin_lock_bh(&atchan->lock);
706 ++ spin_lock_irqsave(&atchan->lock, irqflags);
707 + cookie = dma_cookie_assign(tx);
708 +
709 + dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
710 +@@ -425,7 +425,7 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
711 + if (list_is_singular(&atchan->xfers_list))
712 + at_xdmac_start_xfer(atchan, desc);
713 +
714 +- spin_unlock_bh(&atchan->lock);
715 ++ spin_unlock_irqrestore(&atchan->lock, irqflags);
716 + return cookie;
717 + }
718 +
719 +@@ -494,61 +494,94 @@ static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
720 + return chan;
721 + }
722 +
723 ++static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
724 ++ enum dma_transfer_direction direction)
725 ++{
726 ++ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
727 ++ int csize, dwidth;
728 ++
729 ++ if (direction == DMA_DEV_TO_MEM) {
730 ++ atchan->cfg =
731 ++ AT91_XDMAC_DT_PERID(atchan->perid)
732 ++ | AT_XDMAC_CC_DAM_INCREMENTED_AM
733 ++ | AT_XDMAC_CC_SAM_FIXED_AM
734 ++ | AT_XDMAC_CC_DIF(atchan->memif)
735 ++ | AT_XDMAC_CC_SIF(atchan->perif)
736 ++ | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
737 ++ | AT_XDMAC_CC_DSYNC_PER2MEM
738 ++ | AT_XDMAC_CC_MBSIZE_SIXTEEN
739 ++ | AT_XDMAC_CC_TYPE_PER_TRAN;
740 ++ csize = ffs(atchan->sconfig.src_maxburst) - 1;
741 ++ if (csize < 0) {
742 ++ dev_err(chan2dev(chan), "invalid src maxburst value\n");
743 ++ return -EINVAL;
744 ++ }
745 ++ atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
746 ++ dwidth = ffs(atchan->sconfig.src_addr_width) - 1;
747 ++ if (dwidth < 0) {
748 ++ dev_err(chan2dev(chan), "invalid src addr width value\n");
749 ++ return -EINVAL;
750 ++ }
751 ++ atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
752 ++ } else if (direction == DMA_MEM_TO_DEV) {
753 ++ atchan->cfg =
754 ++ AT91_XDMAC_DT_PERID(atchan->perid)
755 ++ | AT_XDMAC_CC_DAM_FIXED_AM
756 ++ | AT_XDMAC_CC_SAM_INCREMENTED_AM
757 ++ | AT_XDMAC_CC_DIF(atchan->perif)
758 ++ | AT_XDMAC_CC_SIF(atchan->memif)
759 ++ | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
760 ++ | AT_XDMAC_CC_DSYNC_MEM2PER
761 ++ | AT_XDMAC_CC_MBSIZE_SIXTEEN
762 ++ | AT_XDMAC_CC_TYPE_PER_TRAN;
763 ++ csize = ffs(atchan->sconfig.dst_maxburst) - 1;
764 ++ if (csize < 0) {
765 ++ dev_err(chan2dev(chan), "invalid src maxburst value\n");
766 ++ return -EINVAL;
767 ++ }
768 ++ atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
769 ++ dwidth = ffs(atchan->sconfig.dst_addr_width) - 1;
770 ++ if (dwidth < 0) {
771 ++ dev_err(chan2dev(chan), "invalid dst addr width value\n");
772 ++ return -EINVAL;
773 ++ }
774 ++ atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
775 ++ }
776 ++
777 ++ dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg);
778 ++
779 ++ return 0;
780 ++}
781 ++
782 ++/*
783 ++ * Only check that maxburst and addr width values are supported by the
784 ++ * the controller but not that the configuration is good to perform the
785 ++ * transfer since we don't know the direction at this stage.
786 ++ */
787 ++static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig)
788 ++{
789 ++ if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE)
790 ++ || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE))
791 ++ return -EINVAL;
792 ++
793 ++ if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH)
794 ++ || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH))
795 ++ return -EINVAL;
796 ++
797 ++ return 0;
798 ++}
799 ++
800 + static int at_xdmac_set_slave_config(struct dma_chan *chan,
801 + struct dma_slave_config *sconfig)
802 + {
803 + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
804 +- u8 dwidth;
805 +- int csize;
806 +
807 +- atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] =
808 +- AT91_XDMAC_DT_PERID(atchan->perid)
809 +- | AT_XDMAC_CC_DAM_INCREMENTED_AM
810 +- | AT_XDMAC_CC_SAM_FIXED_AM
811 +- | AT_XDMAC_CC_DIF(atchan->memif)
812 +- | AT_XDMAC_CC_SIF(atchan->perif)
813 +- | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
814 +- | AT_XDMAC_CC_DSYNC_PER2MEM
815 +- | AT_XDMAC_CC_MBSIZE_SIXTEEN
816 +- | AT_XDMAC_CC_TYPE_PER_TRAN;
817 +- csize = at_xdmac_csize(sconfig->src_maxburst);
818 +- if (csize < 0) {
819 +- dev_err(chan2dev(chan), "invalid src maxburst value\n");
820 ++ if (at_xdmac_check_slave_config(sconfig)) {
821 ++ dev_err(chan2dev(chan), "invalid slave configuration\n");
822 + return -EINVAL;
823 + }
824 +- atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_CSIZE(csize);
825 +- dwidth = ffs(sconfig->src_addr_width) - 1;
826 +- atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth);
827 +-
828 +-
829 +- atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] =
830 +- AT91_XDMAC_DT_PERID(atchan->perid)
831 +- | AT_XDMAC_CC_DAM_FIXED_AM
832 +- | AT_XDMAC_CC_SAM_INCREMENTED_AM
833 +- | AT_XDMAC_CC_DIF(atchan->perif)
834 +- | AT_XDMAC_CC_SIF(atchan->memif)
835 +- | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
836 +- | AT_XDMAC_CC_DSYNC_MEM2PER
837 +- | AT_XDMAC_CC_MBSIZE_SIXTEEN
838 +- | AT_XDMAC_CC_TYPE_PER_TRAN;
839 +- csize = at_xdmac_csize(sconfig->dst_maxburst);
840 +- if (csize < 0) {
841 +- dev_err(chan2dev(chan), "invalid src maxburst value\n");
842 +- return -EINVAL;
843 +- }
844 +- atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_CSIZE(csize);
845 +- dwidth = ffs(sconfig->dst_addr_width) - 1;
846 +- atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth);
847 +-
848 +- /* Src and dst addr are needed to configure the link list descriptor. */
849 +- atchan->per_src_addr = sconfig->src_addr;
850 +- atchan->per_dst_addr = sconfig->dst_addr;
851 +
852 +- dev_dbg(chan2dev(chan),
853 +- "%s: cfg[dev2mem]=0x%08x, cfg[mem2dev]=0x%08x, per_src_addr=0x%08x, per_dst_addr=0x%08x\n",
854 +- __func__, atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG],
855 +- atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG],
856 +- atchan->per_src_addr, atchan->per_dst_addr);
857 ++ memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig));
858 +
859 + return 0;
860 + }
861 +@@ -563,6 +596,8 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
862 + struct scatterlist *sg;
863 + int i;
864 + unsigned int xfer_size = 0;
865 ++ unsigned long irqflags;
866 ++ struct dma_async_tx_descriptor *ret = NULL;
867 +
868 + if (!sgl)
869 + return NULL;
870 +@@ -578,7 +613,10 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
871 + flags);
872 +
873 + /* Protect dma_sconfig field that can be modified by set_slave_conf. */
874 +- spin_lock_bh(&atchan->lock);
875 ++ spin_lock_irqsave(&atchan->lock, irqflags);
876 ++
877 ++ if (at_xdmac_compute_chan_conf(chan, direction))
878 ++ goto spin_unlock;
879 +
880 + /* Prepare descriptors. */
881 + for_each_sg(sgl, sg, sg_len, i) {
882 +@@ -589,8 +627,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
883 + mem = sg_dma_address(sg);
884 + if (unlikely(!len)) {
885 + dev_err(chan2dev(chan), "sg data length is zero\n");
886 +- spin_unlock_bh(&atchan->lock);
887 +- return NULL;
888 ++ goto spin_unlock;
889 + }
890 + dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
891 + __func__, i, len, mem);
892 +@@ -600,20 +637,18 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
893 + dev_err(chan2dev(chan), "can't get descriptor\n");
894 + if (first)
895 + list_splice_init(&first->descs_list, &atchan->free_descs_list);
896 +- spin_unlock_bh(&atchan->lock);
897 +- return NULL;
898 ++ goto spin_unlock;
899 + }
900 +
901 + /* Linked list descriptor setup. */
902 + if (direction == DMA_DEV_TO_MEM) {
903 +- desc->lld.mbr_sa = atchan->per_src_addr;
904 ++ desc->lld.mbr_sa = atchan->sconfig.src_addr;
905 + desc->lld.mbr_da = mem;
906 +- desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
907 + } else {
908 + desc->lld.mbr_sa = mem;
909 +- desc->lld.mbr_da = atchan->per_dst_addr;
910 +- desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
911 ++ desc->lld.mbr_da = atchan->sconfig.dst_addr;
912 + }
913 ++ desc->lld.mbr_cfg = atchan->cfg;
914 + dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
915 + fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
916 + ? at_xdmac_get_dwidth(desc->lld.mbr_cfg)
917 +@@ -645,13 +680,15 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
918 + xfer_size += len;
919 + }
920 +
921 +- spin_unlock_bh(&atchan->lock);
922 +
923 + first->tx_dma_desc.flags = flags;
924 + first->xfer_size = xfer_size;
925 + first->direction = direction;
926 ++ ret = &first->tx_dma_desc;
927 +
928 +- return &first->tx_dma_desc;
929 ++spin_unlock:
930 ++ spin_unlock_irqrestore(&atchan->lock, irqflags);
931 ++ return ret;
932 + }
933 +
934 + static struct dma_async_tx_descriptor *
935 +@@ -664,6 +701,7 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
936 + struct at_xdmac_desc *first = NULL, *prev = NULL;
937 + unsigned int periods = buf_len / period_len;
938 + int i;
939 ++ unsigned long irqflags;
940 +
941 + dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
942 + __func__, &buf_addr, buf_len, period_len,
943 +@@ -679,32 +717,34 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
944 + return NULL;
945 + }
946 +
947 ++ if (at_xdmac_compute_chan_conf(chan, direction))
948 ++ return NULL;
949 ++
950 + for (i = 0; i < periods; i++) {
951 + struct at_xdmac_desc *desc = NULL;
952 +
953 +- spin_lock_bh(&atchan->lock);
954 ++ spin_lock_irqsave(&atchan->lock, irqflags);
955 + desc = at_xdmac_get_desc(atchan);
956 + if (!desc) {
957 + dev_err(chan2dev(chan), "can't get descriptor\n");
958 + if (first)
959 + list_splice_init(&first->descs_list, &atchan->free_descs_list);
960 +- spin_unlock_bh(&atchan->lock);
961 ++ spin_unlock_irqrestore(&atchan->lock, irqflags);
962 + return NULL;
963 + }
964 +- spin_unlock_bh(&atchan->lock);
965 ++ spin_unlock_irqrestore(&atchan->lock, irqflags);
966 + dev_dbg(chan2dev(chan),
967 + "%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
968 + __func__, desc, &desc->tx_dma_desc.phys);
969 +
970 + if (direction == DMA_DEV_TO_MEM) {
971 +- desc->lld.mbr_sa = atchan->per_src_addr;
972 ++ desc->lld.mbr_sa = atchan->sconfig.src_addr;
973 + desc->lld.mbr_da = buf_addr + i * period_len;
974 +- desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
975 + } else {
976 + desc->lld.mbr_sa = buf_addr + i * period_len;
977 +- desc->lld.mbr_da = atchan->per_dst_addr;
978 +- desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
979 ++ desc->lld.mbr_da = atchan->sconfig.dst_addr;
980 + }
981 ++ desc->lld.mbr_cfg = atchan->cfg;
982 + desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
983 + | AT_XDMAC_MBR_UBC_NDEN
984 + | AT_XDMAC_MBR_UBC_NSEN
985 +@@ -766,6 +806,7 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
986 + | AT_XDMAC_CC_SIF(0)
987 + | AT_XDMAC_CC_MBSIZE_SIXTEEN
988 + | AT_XDMAC_CC_TYPE_MEM_TRAN;
989 ++ unsigned long irqflags;
990 +
991 + dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
992 + __func__, &src, &dest, len, flags);
993 +@@ -798,9 +839,9 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
994 +
995 + dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
996 +
997 +- spin_lock_bh(&atchan->lock);
998 ++ spin_lock_irqsave(&atchan->lock, irqflags);
999 + desc = at_xdmac_get_desc(atchan);
1000 +- spin_unlock_bh(&atchan->lock);
1001 ++ spin_unlock_irqrestore(&atchan->lock, irqflags);
1002 + if (!desc) {
1003 + dev_err(chan2dev(chan), "can't get descriptor\n");
1004 + if (first)
1005 +@@ -886,6 +927,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1006 + int residue;
1007 + u32 cur_nda, mask, value;
1008 + u8 dwidth = 0;
1009 ++ unsigned long flags;
1010 +
1011 + ret = dma_cookie_status(chan, cookie, txstate);
1012 + if (ret == DMA_COMPLETE)
1013 +@@ -894,7 +936,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1014 + if (!txstate)
1015 + return ret;
1016 +
1017 +- spin_lock_bh(&atchan->lock);
1018 ++ spin_lock_irqsave(&atchan->lock, flags);
1019 +
1020 + desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
1021 +
1022 +@@ -904,8 +946,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1023 + */
1024 + if (!desc->active_xfer) {
1025 + dma_set_residue(txstate, desc->xfer_size);
1026 +- spin_unlock_bh(&atchan->lock);
1027 +- return ret;
1028 ++ goto spin_unlock;
1029 + }
1030 +
1031 + residue = desc->xfer_size;
1032 +@@ -936,14 +977,14 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1033 + }
1034 + residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth;
1035 +
1036 +- spin_unlock_bh(&atchan->lock);
1037 +-
1038 + dma_set_residue(txstate, residue);
1039 +
1040 + dev_dbg(chan2dev(chan),
1041 + "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
1042 + __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
1043 +
1044 ++spin_unlock:
1045 ++ spin_unlock_irqrestore(&atchan->lock, flags);
1046 + return ret;
1047 + }
1048 +
1049 +@@ -964,8 +1005,9 @@ static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
1050 + static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
1051 + {
1052 + struct at_xdmac_desc *desc;
1053 ++ unsigned long flags;
1054 +
1055 +- spin_lock_bh(&atchan->lock);
1056 ++ spin_lock_irqsave(&atchan->lock, flags);
1057 +
1058 + /*
1059 + * If channel is enabled, do nothing, advance_work will be triggered
1060 +@@ -980,7 +1022,7 @@ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
1061 + at_xdmac_start_xfer(atchan, desc);
1062 + }
1063 +
1064 +- spin_unlock_bh(&atchan->lock);
1065 ++ spin_unlock_irqrestore(&atchan->lock, flags);
1066 + }
1067 +
1068 + static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
1069 +@@ -1116,12 +1158,13 @@ static int at_xdmac_device_config(struct dma_chan *chan,
1070 + {
1071 + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1072 + int ret;
1073 ++ unsigned long flags;
1074 +
1075 + dev_dbg(chan2dev(chan), "%s\n", __func__);
1076 +
1077 +- spin_lock_bh(&atchan->lock);
1078 ++ spin_lock_irqsave(&atchan->lock, flags);
1079 + ret = at_xdmac_set_slave_config(chan, config);
1080 +- spin_unlock_bh(&atchan->lock);
1081 ++ spin_unlock_irqrestore(&atchan->lock, flags);
1082 +
1083 + return ret;
1084 + }
1085 +@@ -1130,18 +1173,19 @@ static int at_xdmac_device_pause(struct dma_chan *chan)
1086 + {
1087 + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1088 + struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1089 ++ unsigned long flags;
1090 +
1091 + dev_dbg(chan2dev(chan), "%s\n", __func__);
1092 +
1093 + if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
1094 + return 0;
1095 +
1096 +- spin_lock_bh(&atchan->lock);
1097 ++ spin_lock_irqsave(&atchan->lock, flags);
1098 + at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
1099 + while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
1100 + & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
1101 + cpu_relax();
1102 +- spin_unlock_bh(&atchan->lock);
1103 ++ spin_unlock_irqrestore(&atchan->lock, flags);
1104 +
1105 + return 0;
1106 + }
1107 +@@ -1150,16 +1194,19 @@ static int at_xdmac_device_resume(struct dma_chan *chan)
1108 + {
1109 + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1110 + struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1111 ++ unsigned long flags;
1112 +
1113 + dev_dbg(chan2dev(chan), "%s\n", __func__);
1114 +
1115 +- spin_lock_bh(&atchan->lock);
1116 +- if (!at_xdmac_chan_is_paused(atchan))
1117 ++ spin_lock_irqsave(&atchan->lock, flags);
1118 ++ if (!at_xdmac_chan_is_paused(atchan)) {
1119 ++ spin_unlock_irqrestore(&atchan->lock, flags);
1120 + return 0;
1121 ++ }
1122 +
1123 + at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
1124 + clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1125 +- spin_unlock_bh(&atchan->lock);
1126 ++ spin_unlock_irqrestore(&atchan->lock, flags);
1127 +
1128 + return 0;
1129 + }
1130 +@@ -1169,10 +1216,11 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
1131 + struct at_xdmac_desc *desc, *_desc;
1132 + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1133 + struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1134 ++ unsigned long flags;
1135 +
1136 + dev_dbg(chan2dev(chan), "%s\n", __func__);
1137 +
1138 +- spin_lock_bh(&atchan->lock);
1139 ++ spin_lock_irqsave(&atchan->lock, flags);
1140 + at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1141 + while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
1142 + cpu_relax();
1143 +@@ -1182,7 +1230,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
1144 + at_xdmac_remove_xfer(atchan, desc);
1145 +
1146 + clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
1147 +- spin_unlock_bh(&atchan->lock);
1148 ++ spin_unlock_irqrestore(&atchan->lock, flags);
1149 +
1150 + return 0;
1151 + }
1152 +@@ -1192,8 +1240,9 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
1153 + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1154 + struct at_xdmac_desc *desc;
1155 + int i;
1156 ++ unsigned long flags;
1157 +
1158 +- spin_lock_bh(&atchan->lock);
1159 ++ spin_lock_irqsave(&atchan->lock, flags);
1160 +
1161 + if (at_xdmac_chan_is_enabled(atchan)) {
1162 + dev_err(chan2dev(chan),
1163 +@@ -1224,7 +1273,7 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
1164 + dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
1165 +
1166 + spin_unlock:
1167 +- spin_unlock_bh(&atchan->lock);
1168 ++ spin_unlock_irqrestore(&atchan->lock, flags);
1169 + return i;
1170 + }
1171 +
1172 +diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
1173 +index ac336a961dea..8e70e580c98a 100644
1174 +--- a/drivers/dma/dmaengine.c
1175 ++++ b/drivers/dma/dmaengine.c
1176 +@@ -505,7 +505,11 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
1177 + caps->directions = device->directions;
1178 + caps->residue_granularity = device->residue_granularity;
1179 +
1180 +- caps->cmd_pause = !!device->device_pause;
1181 ++ /*
1182 ++ * Some devices implement only pause (e.g. to get residuum) but no
1183 ++ * resume. However cmd_pause is advertised as pause AND resume.
1184 ++ */
1185 ++ caps->cmd_pause = !!(device->device_pause && device->device_resume);
1186 + caps->cmd_terminate = !!device->device_terminate_all;
1187 +
1188 + return 0;
1189 +diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
1190 +index 0e1f56772855..a2771a8d4377 100644
1191 +--- a/drivers/dma/pl330.c
1192 ++++ b/drivers/dma/pl330.c
1193 +@@ -2127,6 +2127,7 @@ static int pl330_terminate_all(struct dma_chan *chan)
1194 + struct pl330_dmac *pl330 = pch->dmac;
1195 + LIST_HEAD(list);
1196 +
1197 ++ pm_runtime_get_sync(pl330->ddma.dev);
1198 + spin_lock_irqsave(&pch->lock, flags);
1199 + spin_lock(&pl330->lock);
1200 + _stop(pch->thread);
1201 +@@ -2151,6 +2152,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
1202 + list_splice_tail_init(&pch->work_list, &pl330->desc_pool);
1203 + list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
1204 + spin_unlock_irqrestore(&pch->lock, flags);
1205 ++ pm_runtime_mark_last_busy(pl330->ddma.dev);
1206 ++ pm_runtime_put_autosuspend(pl330->ddma.dev);
1207 +
1208 + return 0;
1209 + }
1210 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
1211 +index 406624a0b201..340e21918f33 100644
1212 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
1213 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
1214 +@@ -684,8 +684,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
1215 + dev->node_props.cpu_core_id_base);
1216 + sysfs_show_32bit_prop(buffer, "simd_id_base",
1217 + dev->node_props.simd_id_base);
1218 +- sysfs_show_32bit_prop(buffer, "capability",
1219 +- dev->node_props.capability);
1220 + sysfs_show_32bit_prop(buffer, "max_waves_per_simd",
1221 + dev->node_props.max_waves_per_simd);
1222 + sysfs_show_32bit_prop(buffer, "lds_size_in_kb",
1223 +@@ -735,6 +733,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
1224 + kfd2kgd->get_fw_version(
1225 + dev->gpu->kgd,
1226 + KGD_ENGINE_MEC1));
1227 ++ sysfs_show_32bit_prop(buffer, "capability",
1228 ++ dev->node_props.capability);
1229 + }
1230 +
1231 + return sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
1232 +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
1233 +index 27ea6bdebce7..7a628e4cb27a 100644
1234 +--- a/drivers/gpu/drm/i915/i915_gem.c
1235 ++++ b/drivers/gpu/drm/i915/i915_gem.c
1236 +@@ -2732,9 +2732,6 @@ void i915_gem_reset(struct drm_device *dev)
1237 + void
1238 + i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
1239 + {
1240 +- if (list_empty(&ring->request_list))
1241 +- return;
1242 +-
1243 + WARN_ON(i915_verify_lists(ring->dev));
1244 +
1245 + /* Retire requests first as we use it above for the early return.
1246 +diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
1247 +index 88b36a9173c9..336e8b63ca08 100644
1248 +--- a/drivers/gpu/drm/i915/intel_dp.c
1249 ++++ b/drivers/gpu/drm/i915/intel_dp.c
1250 +@@ -881,10 +881,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
1251 + DP_AUX_CH_CTL_RECEIVE_ERROR))
1252 + continue;
1253 + if (status & DP_AUX_CH_CTL_DONE)
1254 +- break;
1255 ++ goto done;
1256 + }
1257 +- if (status & DP_AUX_CH_CTL_DONE)
1258 +- break;
1259 + }
1260 +
1261 + if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1262 +@@ -893,6 +891,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
1263 + goto out;
1264 + }
1265 +
1266 ++done:
1267 + /* Check for timeout or receive error.
1268 + * Timeouts occur when the sink is not connected
1269 + */
1270 +diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
1271 +index 56e437e31580..ae628001fd97 100644
1272 +--- a/drivers/gpu/drm/i915/intel_i2c.c
1273 ++++ b/drivers/gpu/drm/i915/intel_i2c.c
1274 +@@ -435,7 +435,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
1275 + struct intel_gmbus,
1276 + adapter);
1277 + struct drm_i915_private *dev_priv = bus->dev_priv;
1278 +- int i, reg_offset;
1279 ++ int i = 0, inc, try = 0, reg_offset;
1280 + int ret = 0;
1281 +
1282 + intel_aux_display_runtime_get(dev_priv);
1283 +@@ -448,12 +448,14 @@ gmbus_xfer(struct i2c_adapter *adapter,
1284 +
1285 + reg_offset = dev_priv->gpio_mmio_base;
1286 +
1287 ++retry:
1288 + I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
1289 +
1290 +- for (i = 0; i < num; i++) {
1291 ++ for (; i < num; i += inc) {
1292 ++ inc = 1;
1293 + if (gmbus_is_index_read(msgs, i, num)) {
1294 + ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
1295 +- i += 1; /* set i to the index of the read xfer */
1296 ++ inc = 2; /* an index read is two msgs */
1297 + } else if (msgs[i].flags & I2C_M_RD) {
1298 + ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
1299 + } else {
1300 +@@ -525,6 +527,18 @@ clear_err:
1301 + adapter->name, msgs[i].addr,
1302 + (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
1303 +
1304 ++ /*
1305 ++ * Passive adapters sometimes NAK the first probe. Retry the first
1306 ++ * message once on -ENXIO for GMBUS transfers; the bit banging algorithm
1307 ++ * has retries internally. See also the retry loop in
1308 ++ * drm_do_probe_ddc_edid, which bails out on the first -ENXIO.
1309 ++ */
1310 ++ if (ret == -ENXIO && i == 0 && try++ == 0) {
1311 ++ DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n",
1312 ++ adapter->name);
1313 ++ goto retry;
1314 ++ }
1315 ++
1316 + goto out;
1317 +
1318 + timeout:
1319 +diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
1320 +index 965a45619f6b..9bd56116fd5a 100644
1321 +--- a/drivers/gpu/drm/radeon/atombios_crtc.c
1322 ++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
1323 +@@ -580,9 +580,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
1324 + else
1325 + radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
1326 +
1327 +- /* if there is no audio, set MINM_OVER_MAXP */
1328 +- if (!drm_detect_monitor_audio(radeon_connector_edid(connector)))
1329 +- radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
1330 + if (rdev->family < CHIP_RV770)
1331 + radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
1332 + /* use frac fb div on APUs */
1333 +@@ -1789,9 +1786,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
1334 + if ((crtc->mode.clock == test_crtc->mode.clock) &&
1335 + (adjusted_clock == test_adjusted_clock) &&
1336 + (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) &&
1337 +- (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) &&
1338 +- (drm_detect_monitor_audio(radeon_connector_edid(test_radeon_crtc->connector)) ==
1339 +- drm_detect_monitor_audio(radeon_connector_edid(radeon_crtc->connector))))
1340 ++ (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID))
1341 + return test_radeon_crtc->pll_id;
1342 + }
1343 + }
1344 +diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c
1345 +index f04205170b8a..cfa3a84a2af0 100644
1346 +--- a/drivers/gpu/drm/radeon/dce3_1_afmt.c
1347 ++++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c
1348 +@@ -173,7 +173,7 @@ void dce3_2_hdmi_update_acr(struct drm_encoder *encoder, long offset,
1349 + struct drm_device *dev = encoder->dev;
1350 + struct radeon_device *rdev = dev->dev_private;
1351 +
1352 +- WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
1353 ++ WREG32(DCE3_HDMI0_ACR_PACKET_CONTROL + offset,
1354 + HDMI0_ACR_SOURCE | /* select SW CTS value */
1355 + HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
1356 +
1357 +diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
1358 +index bd7519fdd3f4..aa232fd25992 100644
1359 +--- a/drivers/gpu/drm/radeon/radeon_device.c
1360 ++++ b/drivers/gpu/drm/radeon/radeon_device.c
1361 +@@ -1458,6 +1458,21 @@ int radeon_device_init(struct radeon_device *rdev,
1362 + if (r)
1363 + DRM_ERROR("ib ring test failed (%d).\n", r);
1364 +
1365 ++ /*
1366 ++ * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
1367 ++ * after the CP ring have chew one packet at least. Hence here we stop
1368 ++ * and restart DPM after the radeon_ib_ring_tests().
1369 ++ */
1370 ++ if (rdev->pm.dpm_enabled &&
1371 ++ (rdev->pm.pm_method == PM_METHOD_DPM) &&
1372 ++ (rdev->family == CHIP_TURKS) &&
1373 ++ (rdev->flags & RADEON_IS_MOBILITY)) {
1374 ++ mutex_lock(&rdev->pm.mutex);
1375 ++ radeon_dpm_disable(rdev);
1376 ++ radeon_dpm_enable(rdev);
1377 ++ mutex_unlock(&rdev->pm.mutex);
1378 ++ }
1379 ++
1380 + if ((radeon_testing & 1)) {
1381 + if (rdev->accel_working)
1382 + radeon_test_moves(rdev);
1383 +diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
1384 +index de42fc4a22b8..9c3377ca17b7 100644
1385 +--- a/drivers/gpu/drm/radeon/radeon_vm.c
1386 ++++ b/drivers/gpu/drm/radeon/radeon_vm.c
1387 +@@ -458,14 +458,16 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
1388 + /* make sure object fit at this offset */
1389 + eoffset = soffset + size;
1390 + if (soffset >= eoffset) {
1391 +- return -EINVAL;
1392 ++ r = -EINVAL;
1393 ++ goto error_unreserve;
1394 + }
1395 +
1396 + last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
1397 + if (last_pfn > rdev->vm_manager.max_pfn) {
1398 + dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
1399 + last_pfn, rdev->vm_manager.max_pfn);
1400 +- return -EINVAL;
1401 ++ r = -EINVAL;
1402 ++ goto error_unreserve;
1403 + }
1404 +
1405 + } else {
1406 +@@ -486,7 +488,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
1407 + "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
1408 + soffset, tmp->bo, tmp->it.start, tmp->it.last);
1409 + mutex_unlock(&vm->mutex);
1410 +- return -EINVAL;
1411 ++ r = -EINVAL;
1412 ++ goto error_unreserve;
1413 + }
1414 + }
1415 +
1416 +@@ -497,7 +500,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
1417 + tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
1418 + if (!tmp) {
1419 + mutex_unlock(&vm->mutex);
1420 +- return -ENOMEM;
1421 ++ r = -ENOMEM;
1422 ++ goto error_unreserve;
1423 + }
1424 + tmp->it.start = bo_va->it.start;
1425 + tmp->it.last = bo_va->it.last;
1426 +@@ -555,7 +559,6 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
1427 + r = radeon_vm_clear_bo(rdev, pt);
1428 + if (r) {
1429 + radeon_bo_unref(&pt);
1430 +- radeon_bo_reserve(bo_va->bo, false);
1431 + return r;
1432 + }
1433 +
1434 +@@ -575,6 +578,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
1435 +
1436 + mutex_unlock(&vm->mutex);
1437 + return 0;
1438 ++
1439 ++error_unreserve:
1440 ++ radeon_bo_unreserve(bo_va->bo);
1441 ++ return r;
1442 + }
1443 +
1444 + /**
1445 +diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
1446 +index 8fe78d08e01c..7c6966434ee7 100644
1447 +--- a/drivers/i2c/busses/i2c-hix5hd2.c
1448 ++++ b/drivers/i2c/busses/i2c-hix5hd2.c
1449 +@@ -554,4 +554,4 @@ module_platform_driver(hix5hd2_i2c_driver);
1450 + MODULE_DESCRIPTION("Hix5hd2 I2C Bus driver");
1451 + MODULE_AUTHOR("Wei Yan <sledge.yanwei@××××××.com>");
1452 + MODULE_LICENSE("GPL");
1453 +-MODULE_ALIAS("platform:i2c-hix5hd2");
1454 ++MODULE_ALIAS("platform:hix5hd2-i2c");
1455 +diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
1456 +index 958c8db4ec30..297e9c9ac943 100644
1457 +--- a/drivers/i2c/busses/i2c-s3c2410.c
1458 ++++ b/drivers/i2c/busses/i2c-s3c2410.c
1459 +@@ -1143,6 +1143,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
1460 + return -ENOMEM;
1461 +
1462 + i2c->quirks = s3c24xx_get_device_quirks(pdev);
1463 ++ i2c->sysreg = ERR_PTR(-ENOENT);
1464 + if (pdata)
1465 + memcpy(i2c->pdata, pdata, sizeof(*pdata));
1466 + else
1467 +diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c
1468 +index 89d8aa1d2818..df12c57e6ce0 100644
1469 +--- a/drivers/iio/adc/twl6030-gpadc.c
1470 ++++ b/drivers/iio/adc/twl6030-gpadc.c
1471 +@@ -1001,7 +1001,7 @@ static struct platform_driver twl6030_gpadc_driver = {
1472 +
1473 + module_platform_driver(twl6030_gpadc_driver);
1474 +
1475 +-MODULE_ALIAS("platform: " DRIVER_NAME);
1476 ++MODULE_ALIAS("platform:" DRIVER_NAME);
1477 + MODULE_AUTHOR("Balaji T K <balajitk@××.com>");
1478 + MODULE_AUTHOR("Graeme Gregory <gg@××××××××××××.uk>");
1479 + MODULE_AUTHOR("Oleksandr Kozaruk <oleksandr.kozaruk@××.com");
1480 +diff --git a/drivers/iio/imu/adis16400.h b/drivers/iio/imu/adis16400.h
1481 +index 0916bf6b6c31..73b189c1c0fb 100644
1482 +--- a/drivers/iio/imu/adis16400.h
1483 ++++ b/drivers/iio/imu/adis16400.h
1484 +@@ -139,6 +139,7 @@
1485 + #define ADIS16400_NO_BURST BIT(1)
1486 + #define ADIS16400_HAS_SLOW_MODE BIT(2)
1487 + #define ADIS16400_HAS_SERIAL_NUMBER BIT(3)
1488 ++#define ADIS16400_BURST_DIAG_STAT BIT(4)
1489 +
1490 + struct adis16400_state;
1491 +
1492 +@@ -165,6 +166,7 @@ struct adis16400_state {
1493 + int filt_int;
1494 +
1495 + struct adis adis;
1496 ++ unsigned long avail_scan_mask[2];
1497 + };
1498 +
1499 + /* At the moment triggers are only used for ring buffer
1500 +diff --git a/drivers/iio/imu/adis16400_buffer.c b/drivers/iio/imu/adis16400_buffer.c
1501 +index 6e727ffe5262..90c24a23c679 100644
1502 +--- a/drivers/iio/imu/adis16400_buffer.c
1503 ++++ b/drivers/iio/imu/adis16400_buffer.c
1504 +@@ -18,7 +18,8 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev,
1505 + {
1506 + struct adis16400_state *st = iio_priv(indio_dev);
1507 + struct adis *adis = &st->adis;
1508 +- uint16_t *tx;
1509 ++ unsigned int burst_length;
1510 ++ u8 *tx;
1511 +
1512 + if (st->variant->flags & ADIS16400_NO_BURST)
1513 + return adis_update_scan_mode(indio_dev, scan_mask);
1514 +@@ -26,26 +27,29 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev,
1515 + kfree(adis->xfer);
1516 + kfree(adis->buffer);
1517 +
1518 ++ /* All but the timestamp channel */
1519 ++ burst_length = (indio_dev->num_channels - 1) * sizeof(u16);
1520 ++ if (st->variant->flags & ADIS16400_BURST_DIAG_STAT)
1521 ++ burst_length += sizeof(u16);
1522 ++
1523 + adis->xfer = kcalloc(2, sizeof(*adis->xfer), GFP_KERNEL);
1524 + if (!adis->xfer)
1525 + return -ENOMEM;
1526 +
1527 +- adis->buffer = kzalloc(indio_dev->scan_bytes + sizeof(u16),
1528 +- GFP_KERNEL);
1529 ++ adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL);
1530 + if (!adis->buffer)
1531 + return -ENOMEM;
1532 +
1533 +- tx = adis->buffer + indio_dev->scan_bytes;
1534 +-
1535 ++ tx = adis->buffer + burst_length;
1536 + tx[0] = ADIS_READ_REG(ADIS16400_GLOB_CMD);
1537 + tx[1] = 0;
1538 +
1539 + adis->xfer[0].tx_buf = tx;
1540 + adis->xfer[0].bits_per_word = 8;
1541 + adis->xfer[0].len = 2;
1542 +- adis->xfer[1].tx_buf = tx;
1543 ++ adis->xfer[1].rx_buf = adis->buffer;
1544 + adis->xfer[1].bits_per_word = 8;
1545 +- adis->xfer[1].len = indio_dev->scan_bytes;
1546 ++ adis->xfer[1].len = burst_length;
1547 +
1548 + spi_message_init(&adis->msg);
1549 + spi_message_add_tail(&adis->xfer[0], &adis->msg);
1550 +@@ -61,6 +65,7 @@ irqreturn_t adis16400_trigger_handler(int irq, void *p)
1551 + struct adis16400_state *st = iio_priv(indio_dev);
1552 + struct adis *adis = &st->adis;
1553 + u32 old_speed_hz = st->adis.spi->max_speed_hz;
1554 ++ void *buffer;
1555 + int ret;
1556 +
1557 + if (!adis->buffer)
1558 +@@ -81,7 +86,12 @@ irqreturn_t adis16400_trigger_handler(int irq, void *p)
1559 + spi_setup(st->adis.spi);
1560 + }
1561 +
1562 +- iio_push_to_buffers_with_timestamp(indio_dev, adis->buffer,
1563 ++ if (st->variant->flags & ADIS16400_BURST_DIAG_STAT)
1564 ++ buffer = adis->buffer + sizeof(u16);
1565 ++ else
1566 ++ buffer = adis->buffer;
1567 ++
1568 ++ iio_push_to_buffers_with_timestamp(indio_dev, buffer,
1569 + pf->timestamp);
1570 +
1571 + iio_trigger_notify_done(indio_dev->trig);
1572 +diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
1573 +index fa795dcd5f75..2fd68f2219a7 100644
1574 +--- a/drivers/iio/imu/adis16400_core.c
1575 ++++ b/drivers/iio/imu/adis16400_core.c
1576 +@@ -405,6 +405,11 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
1577 + *val = st->variant->temp_scale_nano / 1000000;
1578 + *val2 = (st->variant->temp_scale_nano % 1000000);
1579 + return IIO_VAL_INT_PLUS_MICRO;
1580 ++ case IIO_PRESSURE:
1581 ++ /* 20 uBar = 0.002kPascal */
1582 ++ *val = 0;
1583 ++ *val2 = 2000;
1584 ++ return IIO_VAL_INT_PLUS_MICRO;
1585 + default:
1586 + return -EINVAL;
1587 + }
1588 +@@ -454,10 +459,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
1589 + }
1590 + }
1591 +
1592 +-#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si) { \
1593 ++#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si, chn) { \
1594 + .type = IIO_VOLTAGE, \
1595 + .indexed = 1, \
1596 +- .channel = 0, \
1597 ++ .channel = chn, \
1598 + .extend_name = name, \
1599 + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
1600 + BIT(IIO_CHAN_INFO_SCALE), \
1601 +@@ -474,10 +479,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
1602 + }
1603 +
1604 + #define ADIS16400_SUPPLY_CHAN(addr, bits) \
1605 +- ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY)
1606 ++ ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY, 0)
1607 +
1608 + #define ADIS16400_AUX_ADC_CHAN(addr, bits) \
1609 +- ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC)
1610 ++ ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC, 1)
1611 +
1612 + #define ADIS16400_GYRO_CHAN(mod, addr, bits) { \
1613 + .type = IIO_ANGL_VEL, \
1614 +@@ -773,7 +778,8 @@ static struct adis16400_chip_info adis16400_chips[] = {
1615 + .channels = adis16448_channels,
1616 + .num_channels = ARRAY_SIZE(adis16448_channels),
1617 + .flags = ADIS16400_HAS_PROD_ID |
1618 +- ADIS16400_HAS_SERIAL_NUMBER,
1619 ++ ADIS16400_HAS_SERIAL_NUMBER |
1620 ++ ADIS16400_BURST_DIAG_STAT,
1621 + .gyro_scale_micro = IIO_DEGREE_TO_RAD(10000), /* 0.01 deg/s */
1622 + .accel_scale_micro = IIO_G_TO_M_S_2(833), /* 1/1200 g */
1623 + .temp_scale_nano = 73860000, /* 0.07386 C */
1624 +@@ -791,11 +797,6 @@ static const struct iio_info adis16400_info = {
1625 + .debugfs_reg_access = adis_debugfs_reg_access,
1626 + };
1627 +
1628 +-static const unsigned long adis16400_burst_scan_mask[] = {
1629 +- ~0UL,
1630 +- 0,
1631 +-};
1632 +-
1633 + static const char * const adis16400_status_error_msgs[] = {
1634 + [ADIS16400_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure",
1635 + [ADIS16400_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure",
1636 +@@ -843,6 +844,20 @@ static const struct adis_data adis16400_data = {
1637 + BIT(ADIS16400_DIAG_STAT_POWER_LOW),
1638 + };
1639 +
1640 ++static void adis16400_setup_chan_mask(struct adis16400_state *st)
1641 ++{
1642 ++ const struct adis16400_chip_info *chip_info = st->variant;
1643 ++ unsigned i;
1644 ++
1645 ++ for (i = 0; i < chip_info->num_channels; i++) {
1646 ++ const struct iio_chan_spec *ch = &chip_info->channels[i];
1647 ++
1648 ++ if (ch->scan_index >= 0 &&
1649 ++ ch->scan_index != ADIS16400_SCAN_TIMESTAMP)
1650 ++ st->avail_scan_mask[0] |= BIT(ch->scan_index);
1651 ++ }
1652 ++}
1653 ++
1654 + static int adis16400_probe(struct spi_device *spi)
1655 + {
1656 + struct adis16400_state *st;
1657 +@@ -866,8 +881,10 @@ static int adis16400_probe(struct spi_device *spi)
1658 + indio_dev->info = &adis16400_info;
1659 + indio_dev->modes = INDIO_DIRECT_MODE;
1660 +
1661 +- if (!(st->variant->flags & ADIS16400_NO_BURST))
1662 +- indio_dev->available_scan_masks = adis16400_burst_scan_mask;
1663 ++ if (!(st->variant->flags & ADIS16400_NO_BURST)) {
1664 ++ adis16400_setup_chan_mask(st);
1665 ++ indio_dev->available_scan_masks = st->avail_scan_mask;
1666 ++ }
1667 +
1668 + ret = adis_init(&st->adis, indio_dev, spi, &adis16400_data);
1669 + if (ret)
1670 +diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
1671 +index ea6cb64dfb28..d5335e664240 100644
1672 +--- a/drivers/input/mouse/alps.c
1673 ++++ b/drivers/input/mouse/alps.c
1674 +@@ -1042,9 +1042,8 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse)
1675 + right = (packet[1] & 0x02) >> 1;
1676 + middle = (packet[1] & 0x04) >> 2;
1677 +
1678 +- /* Divide 2 since trackpoint's speed is too fast */
1679 +- input_report_rel(dev2, REL_X, (char)x / 2);
1680 +- input_report_rel(dev2, REL_Y, -((char)y / 2));
1681 ++ input_report_rel(dev2, REL_X, (char)x);
1682 ++ input_report_rel(dev2, REL_Y, -((char)y));
1683 +
1684 + input_report_key(dev2, BTN_LEFT, left);
1685 + input_report_key(dev2, BTN_RIGHT, right);
1686 +diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
1687 +index 79363b687195..ce3d40004458 100644
1688 +--- a/drivers/input/mouse/elantech.c
1689 ++++ b/drivers/input/mouse/elantech.c
1690 +@@ -1376,10 +1376,11 @@ static bool elantech_is_signature_valid(const unsigned char *param)
1691 + return true;
1692 +
1693 + /*
1694 +- * Some models have a revision higher then 20. Meaning param[2] may
1695 +- * be 10 or 20, skip the rates check for these.
1696 ++ * Some hw_version >= 4 models have a revision higher then 20. Meaning
1697 ++ * that param[2] may be 10 or 20, skip the rates check for these.
1698 + */
1699 +- if (param[0] == 0x46 && (param[1] & 0xef) == 0x0f && param[2] < 40)
1700 ++ if ((param[0] & 0x0f) >= 0x06 && (param[1] & 0xaf) == 0x0f &&
1701 ++ param[2] < 40)
1702 + return true;
1703 +
1704 + for (i = 0; i < ARRAY_SIZE(rates); i++)
1705 +@@ -1555,6 +1556,7 @@ static int elantech_set_properties(struct elantech_data *etd)
1706 + case 9:
1707 + case 10:
1708 + case 13:
1709 ++ case 14:
1710 + etd->hw_version = 4;
1711 + break;
1712 + default:
1713 +diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
1714 +index 3b06c8a360b6..907ac9bdd763 100644
1715 +--- a/drivers/input/mouse/synaptics.c
1716 ++++ b/drivers/input/mouse/synaptics.c
1717 +@@ -148,6 +148,11 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
1718 + 1024, 5112, 2024, 4832
1719 + },
1720 + {
1721 ++ (const char * const []){"LEN2000", NULL},
1722 ++ {ANY_BOARD_ID, ANY_BOARD_ID},
1723 ++ 1024, 5113, 2021, 4832
1724 ++ },
1725 ++ {
1726 + (const char * const []){"LEN2001", NULL},
1727 + {ANY_BOARD_ID, ANY_BOARD_ID},
1728 + 1024, 5022, 2508, 4832
1729 +@@ -188,7 +193,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
1730 + "LEN0045",
1731 + "LEN0047",
1732 + "LEN0049",
1733 +- "LEN2000",
1734 ++ "LEN2000", /* S540 */
1735 + "LEN2001", /* Edge E431 */
1736 + "LEN2002", /* Edge E531 */
1737 + "LEN2003",
1738 +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1739 +index 2d1e05bdbb53..272149d66f5b 100644
1740 +--- a/drivers/iommu/intel-iommu.c
1741 ++++ b/drivers/iommu/intel-iommu.c
1742 +@@ -50,6 +50,7 @@
1743 + #define CONTEXT_SIZE VTD_PAGE_SIZE
1744 +
1745 + #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
1746 ++#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
1747 + #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
1748 + #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
1749 +
1750 +@@ -672,6 +673,11 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
1751 + domain->iommu_superpage = domain_update_iommu_superpage(NULL);
1752 + }
1753 +
1754 ++static int iommu_dummy(struct device *dev)
1755 ++{
1756 ++ return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
1757 ++}
1758 ++
1759 + static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
1760 + {
1761 + struct dmar_drhd_unit *drhd = NULL;
1762 +@@ -681,6 +687,9 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
1763 + u16 segment = 0;
1764 + int i;
1765 +
1766 ++ if (iommu_dummy(dev))
1767 ++ return NULL;
1768 ++
1769 + if (dev_is_pci(dev)) {
1770 + pdev = to_pci_dev(dev);
1771 + segment = pci_domain_nr(pdev->bus);
1772 +@@ -2554,6 +2563,10 @@ static bool device_has_rmrr(struct device *dev)
1773 + * In both cases we assume that PCI USB devices with RMRRs have them largely
1774 + * for historical reasons and that the RMRR space is not actively used post
1775 + * boot. This exclusion may change if vendors begin to abuse it.
1776 ++ *
1777 ++ * The same exception is made for graphics devices, with the requirement that
1778 ++ * any use of the RMRR regions will be torn down before assigning the device
1779 ++ * to a guest.
1780 + */
1781 + static bool device_is_rmrr_locked(struct device *dev)
1782 + {
1783 +@@ -2563,7 +2576,7 @@ static bool device_is_rmrr_locked(struct device *dev)
1784 + if (dev_is_pci(dev)) {
1785 + struct pci_dev *pdev = to_pci_dev(dev);
1786 +
1787 +- if ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
1788 ++ if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
1789 + return false;
1790 + }
1791 +
1792 +@@ -2969,11 +2982,6 @@ static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
1793 + return __get_valid_domain_for_dev(dev);
1794 + }
1795 +
1796 +-static int iommu_dummy(struct device *dev)
1797 +-{
1798 +- return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
1799 +-}
1800 +-
1801 + /* Check if the dev needs to go through non-identity map and unmap process.*/
1802 + static int iommu_no_mapping(struct device *dev)
1803 + {
1804 +diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
1805 +index 4a9ce5b50c5b..6b2b582433bd 100644
1806 +--- a/drivers/irqchip/irq-sunxi-nmi.c
1807 ++++ b/drivers/irqchip/irq-sunxi-nmi.c
1808 +@@ -104,7 +104,7 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
1809 + irqd_set_trigger_type(data, flow_type);
1810 + irq_setup_alt_chip(data, flow_type);
1811 +
1812 +- for (i = 0; i <= gc->num_ct; i++, ct++)
1813 ++ for (i = 0; i < gc->num_ct; i++, ct++)
1814 + if (ct->type & flow_type)
1815 + ctrl_off = ct->regs.type;
1816 +
1817 +diff --git a/drivers/md/md.c b/drivers/md/md.c
1818 +index 907534b7f40d..b7bf8ee857fa 100644
1819 +--- a/drivers/md/md.c
1820 ++++ b/drivers/md/md.c
1821 +@@ -3765,7 +3765,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
1822 + err = -EBUSY;
1823 + }
1824 + spin_unlock(&mddev->lock);
1825 +- return err;
1826 ++ return err ?: len;
1827 + }
1828 + err = mddev_lock(mddev);
1829 + if (err)
1830 +@@ -4144,13 +4144,14 @@ action_store(struct mddev *mddev, const char *page, size_t len)
1831 + set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
1832 + else
1833 + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
1834 +- flush_workqueue(md_misc_wq);
1835 +- if (mddev->sync_thread) {
1836 +- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1837 +- if (mddev_lock(mddev) == 0) {
1838 ++ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
1839 ++ mddev_lock(mddev) == 0) {
1840 ++ flush_workqueue(md_misc_wq);
1841 ++ if (mddev->sync_thread) {
1842 ++ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1843 + md_reap_sync_thread(mddev);
1844 +- mddev_unlock(mddev);
1845 + }
1846 ++ mddev_unlock(mddev);
1847 + }
1848 + } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
1849 + test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
1850 +diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
1851 +index 4df28943d222..e8d3c1d35453 100644
1852 +--- a/drivers/net/bonding/bond_options.c
1853 ++++ b/drivers/net/bonding/bond_options.c
1854 +@@ -624,7 +624,7 @@ int __bond_opt_set(struct bonding *bond,
1855 + out:
1856 + if (ret)
1857 + bond_opt_error_interpret(bond, opt, ret, val);
1858 +- else
1859 ++ else if (bond->dev->reg_state == NETREG_REGISTERED)
1860 + call_netdevice_notifiers(NETDEV_CHANGEINFODATA, bond->dev);
1861 +
1862 + return ret;
1863 +diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
1864 +index 7f05f309e935..da36bcf32404 100644
1865 +--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
1866 ++++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
1867 +@@ -1773,9 +1773,9 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1868 + total_size = buf_len;
1869 +
1870 + get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1871 +- get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1872 +- get_fat_cmd.size,
1873 +- &get_fat_cmd.dma);
1874 ++ get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
1875 ++ get_fat_cmd.size,
1876 ++ &get_fat_cmd.dma, GFP_ATOMIC);
1877 + if (!get_fat_cmd.va) {
1878 + dev_err(&adapter->pdev->dev,
1879 + "Memory allocation failure while reading FAT data\n");
1880 +@@ -1820,8 +1820,8 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1881 + log_offset += buf_size;
1882 + }
1883 + err:
1884 +- pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1885 +- get_fat_cmd.va, get_fat_cmd.dma);
1886 ++ dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
1887 ++ get_fat_cmd.va, get_fat_cmd.dma);
1888 + spin_unlock_bh(&adapter->mcc_lock);
1889 + return status;
1890 + }
1891 +@@ -2272,12 +2272,12 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
1892 + return -EINVAL;
1893 +
1894 + cmd.size = sizeof(struct be_cmd_resp_port_type);
1895 +- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
1896 ++ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
1897 ++ GFP_ATOMIC);
1898 + if (!cmd.va) {
1899 + dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
1900 + return -ENOMEM;
1901 + }
1902 +- memset(cmd.va, 0, cmd.size);
1903 +
1904 + spin_lock_bh(&adapter->mcc_lock);
1905 +
1906 +@@ -2302,7 +2302,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
1907 + }
1908 + err:
1909 + spin_unlock_bh(&adapter->mcc_lock);
1910 +- pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
1911 ++ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
1912 + return status;
1913 + }
1914 +
1915 +@@ -2777,7 +2777,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
1916 + goto err;
1917 + }
1918 + cmd.size = sizeof(struct be_cmd_req_get_phy_info);
1919 +- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
1920 ++ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
1921 ++ GFP_ATOMIC);
1922 + if (!cmd.va) {
1923 + dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
1924 + status = -ENOMEM;
1925 +@@ -2811,7 +2812,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
1926 + BE_SUPPORTED_SPEED_1GBPS;
1927 + }
1928 + }
1929 +- pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
1930 ++ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
1931 + err:
1932 + spin_unlock_bh(&adapter->mcc_lock);
1933 + return status;
1934 +@@ -2862,8 +2863,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
1935 +
1936 + memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
1937 + attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
1938 +- attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
1939 +- &attribs_cmd.dma);
1940 ++ attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
1941 ++ attribs_cmd.size,
1942 ++ &attribs_cmd.dma, GFP_ATOMIC);
1943 + if (!attribs_cmd.va) {
1944 + dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
1945 + status = -ENOMEM;
1946 +@@ -2890,8 +2892,8 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
1947 + err:
1948 + mutex_unlock(&adapter->mbox_lock);
1949 + if (attribs_cmd.va)
1950 +- pci_free_consistent(adapter->pdev, attribs_cmd.size,
1951 +- attribs_cmd.va, attribs_cmd.dma);
1952 ++ dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size,
1953 ++ attribs_cmd.va, attribs_cmd.dma);
1954 + return status;
1955 + }
1956 +
1957 +@@ -3029,9 +3031,10 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
1958 +
1959 + memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
1960 + get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
1961 +- get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
1962 +- get_mac_list_cmd.size,
1963 +- &get_mac_list_cmd.dma);
1964 ++ get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
1965 ++ get_mac_list_cmd.size,
1966 ++ &get_mac_list_cmd.dma,
1967 ++ GFP_ATOMIC);
1968 +
1969 + if (!get_mac_list_cmd.va) {
1970 + dev_err(&adapter->pdev->dev,
1971 +@@ -3104,8 +3107,8 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
1972 +
1973 + out:
1974 + spin_unlock_bh(&adapter->mcc_lock);
1975 +- pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
1976 +- get_mac_list_cmd.va, get_mac_list_cmd.dma);
1977 ++ dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
1978 ++ get_mac_list_cmd.va, get_mac_list_cmd.dma);
1979 + return status;
1980 + }
1981 +
1982 +@@ -3158,8 +3161,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
1983 +
1984 + memset(&cmd, 0, sizeof(struct be_dma_mem));
1985 + cmd.size = sizeof(struct be_cmd_req_set_mac_list);
1986 +- cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
1987 +- &cmd.dma, GFP_KERNEL);
1988 ++ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
1989 ++ GFP_KERNEL);
1990 + if (!cmd.va)
1991 + return -ENOMEM;
1992 +
1993 +@@ -3348,7 +3351,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
1994 +
1995 + memset(&cmd, 0, sizeof(struct be_dma_mem));
1996 + cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
1997 +- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
1998 ++ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
1999 ++ GFP_ATOMIC);
2000 + if (!cmd.va) {
2001 + dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
2002 + status = -ENOMEM;
2003 +@@ -3383,7 +3387,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
2004 + err:
2005 + mutex_unlock(&adapter->mbox_lock);
2006 + if (cmd.va)
2007 +- pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2008 ++ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2009 ++ cmd.dma);
2010 + return status;
2011 +
2012 + }
2013 +@@ -3397,8 +3402,9 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
2014 +
2015 + memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
2016 + extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
2017 +- extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
2018 +- &extfat_cmd.dma);
2019 ++ extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
2020 ++ extfat_cmd.size, &extfat_cmd.dma,
2021 ++ GFP_ATOMIC);
2022 + if (!extfat_cmd.va)
2023 + return -ENOMEM;
2024 +
2025 +@@ -3420,8 +3426,8 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
2026 +
2027 + status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
2028 + err:
2029 +- pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
2030 +- extfat_cmd.dma);
2031 ++ dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
2032 ++ extfat_cmd.dma);
2033 + return status;
2034 + }
2035 +
2036 +@@ -3434,8 +3440,9 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
2037 +
2038 + memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
2039 + extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
2040 +- extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
2041 +- &extfat_cmd.dma);
2042 ++ extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
2043 ++ extfat_cmd.size, &extfat_cmd.dma,
2044 ++ GFP_ATOMIC);
2045 +
2046 + if (!extfat_cmd.va) {
2047 + dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
2048 +@@ -3453,8 +3460,8 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
2049 + level = cfgs->module[0].trace_lvl[j].dbg_lvl;
2050 + }
2051 + }
2052 +- pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
2053 +- extfat_cmd.dma);
2054 ++ dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
2055 ++ extfat_cmd.dma);
2056 + err:
2057 + return level;
2058 + }
2059 +@@ -3652,7 +3659,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
2060 +
2061 + memset(&cmd, 0, sizeof(struct be_dma_mem));
2062 + cmd.size = sizeof(struct be_cmd_resp_get_func_config);
2063 +- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2064 ++ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2065 ++ GFP_ATOMIC);
2066 + if (!cmd.va) {
2067 + dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2068 + status = -ENOMEM;
2069 +@@ -3692,7 +3700,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
2070 + err:
2071 + mutex_unlock(&adapter->mbox_lock);
2072 + if (cmd.va)
2073 +- pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2074 ++ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2075 ++ cmd.dma);
2076 + return status;
2077 + }
2078 +
2079 +@@ -3713,7 +3722,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
2080 +
2081 + memset(&cmd, 0, sizeof(struct be_dma_mem));
2082 + cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
2083 +- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2084 ++ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2085 ++ GFP_ATOMIC);
2086 + if (!cmd.va)
2087 + return -ENOMEM;
2088 +
2089 +@@ -3752,7 +3762,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
2090 + res->vf_if_cap_flags = vf_res->cap_flags;
2091 + err:
2092 + if (cmd.va)
2093 +- pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2094 ++ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2095 ++ cmd.dma);
2096 + return status;
2097 + }
2098 +
2099 +@@ -3767,7 +3778,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
2100 +
2101 + memset(&cmd, 0, sizeof(struct be_dma_mem));
2102 + cmd.size = sizeof(struct be_cmd_req_set_profile_config);
2103 +- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2104 ++ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2105 ++ GFP_ATOMIC);
2106 + if (!cmd.va)
2107 + return -ENOMEM;
2108 +
2109 +@@ -3783,7 +3795,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
2110 + status = be_cmd_notify_wait(adapter, &wrb);
2111 +
2112 + if (cmd.va)
2113 +- pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2114 ++ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2115 ++ cmd.dma);
2116 + return status;
2117 + }
2118 +
2119 +diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
2120 +index 4d2de4700769..22ffcd81a6b5 100644
2121 +--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
2122 ++++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
2123 +@@ -264,8 +264,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
2124 + int status = 0;
2125 +
2126 + read_cmd.size = LANCER_READ_FILE_CHUNK;
2127 +- read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
2128 +- &read_cmd.dma);
2129 ++ read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size,
2130 ++ &read_cmd.dma, GFP_ATOMIC);
2131 +
2132 + if (!read_cmd.va) {
2133 + dev_err(&adapter->pdev->dev,
2134 +@@ -289,8 +289,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
2135 + break;
2136 + }
2137 + }
2138 +- pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
2139 +- read_cmd.dma);
2140 ++ dma_free_coherent(&adapter->pdev->dev, read_cmd.size, read_cmd.va,
2141 ++ read_cmd.dma);
2142 +
2143 + return status;
2144 + }
2145 +@@ -818,8 +818,9 @@ static int be_test_ddr_dma(struct be_adapter *adapter)
2146 + };
2147 +
2148 + ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
2149 +- ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
2150 +- &ddrdma_cmd.dma, GFP_KERNEL);
2151 ++ ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
2152 ++ ddrdma_cmd.size, &ddrdma_cmd.dma,
2153 ++ GFP_KERNEL);
2154 + if (!ddrdma_cmd.va)
2155 + return -ENOMEM;
2156 +
2157 +@@ -941,8 +942,9 @@ static int be_read_eeprom(struct net_device *netdev,
2158 +
2159 + memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
2160 + eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
2161 +- eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
2162 +- &eeprom_cmd.dma, GFP_KERNEL);
2163 ++ eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
2164 ++ eeprom_cmd.size, &eeprom_cmd.dma,
2165 ++ GFP_KERNEL);
2166 +
2167 + if (!eeprom_cmd.va)
2168 + return -ENOMEM;
2169 +diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
2170 +index e6b790f0d9dc..893753f18098 100644
2171 +--- a/drivers/net/ethernet/emulex/benet/be_main.c
2172 ++++ b/drivers/net/ethernet/emulex/benet/be_main.c
2173 +@@ -4392,8 +4392,8 @@ static int lancer_fw_download(struct be_adapter *adapter,
2174 +
2175 + flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2176 + + LANCER_FW_DOWNLOAD_CHUNK;
2177 +- flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
2178 +- &flash_cmd.dma, GFP_KERNEL);
2179 ++ flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
2180 ++ &flash_cmd.dma, GFP_KERNEL);
2181 + if (!flash_cmd.va)
2182 + return -ENOMEM;
2183 +
2184 +@@ -4526,8 +4526,8 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2185 + }
2186 +
2187 + flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2188 +- flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
2189 +- GFP_KERNEL);
2190 ++ flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
2191 ++ GFP_KERNEL);
2192 + if (!flash_cmd.va)
2193 + return -ENOMEM;
2194 +
2195 +@@ -4941,10 +4941,10 @@ static int be_ctrl_init(struct be_adapter *adapter)
2196 + goto done;
2197 +
2198 + mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2199 +- mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2200 +- mbox_mem_alloc->size,
2201 +- &mbox_mem_alloc->dma,
2202 +- GFP_KERNEL);
2203 ++ mbox_mem_alloc->va = dma_zalloc_coherent(&adapter->pdev->dev,
2204 ++ mbox_mem_alloc->size,
2205 ++ &mbox_mem_alloc->dma,
2206 ++ GFP_KERNEL);
2207 + if (!mbox_mem_alloc->va) {
2208 + status = -ENOMEM;
2209 + goto unmap_pci_bars;
2210 +diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
2211 +index e22e602beef3..c5789cdf7778 100644
2212 +--- a/drivers/net/phy/dp83640.c
2213 ++++ b/drivers/net/phy/dp83640.c
2214 +@@ -47,7 +47,7 @@
2215 + #define PSF_TX 0x1000
2216 + #define EXT_EVENT 1
2217 + #define CAL_EVENT 7
2218 +-#define CAL_TRIGGER 7
2219 ++#define CAL_TRIGGER 1
2220 + #define DP83640_N_PINS 12
2221 +
2222 + #define MII_DP83640_MICR 0x11
2223 +@@ -495,7 +495,9 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
2224 + else
2225 + evnt |= EVNT_RISE;
2226 + }
2227 ++ mutex_lock(&clock->extreg_lock);
2228 + ext_write(0, phydev, PAGE5, PTP_EVNT, evnt);
2229 ++ mutex_unlock(&clock->extreg_lock);
2230 + return 0;
2231 +
2232 + case PTP_CLK_REQ_PEROUT:
2233 +@@ -531,6 +533,8 @@ static u8 status_frame_src[6] = { 0x08, 0x00, 0x17, 0x0B, 0x6B, 0x0F };
2234 +
2235 + static void enable_status_frames(struct phy_device *phydev, bool on)
2236 + {
2237 ++ struct dp83640_private *dp83640 = phydev->priv;
2238 ++ struct dp83640_clock *clock = dp83640->clock;
2239 + u16 cfg0 = 0, ver;
2240 +
2241 + if (on)
2242 +@@ -538,9 +542,13 @@ static void enable_status_frames(struct phy_device *phydev, bool on)
2243 +
2244 + ver = (PSF_PTPVER & VERSIONPTP_MASK) << VERSIONPTP_SHIFT;
2245 +
2246 ++ mutex_lock(&clock->extreg_lock);
2247 ++
2248 + ext_write(0, phydev, PAGE5, PSF_CFG0, cfg0);
2249 + ext_write(0, phydev, PAGE6, PSF_CFG1, ver);
2250 +
2251 ++ mutex_unlock(&clock->extreg_lock);
2252 ++
2253 + if (!phydev->attached_dev) {
2254 + pr_warn("expected to find an attached netdevice\n");
2255 + return;
2256 +@@ -837,7 +845,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
2257 + list_del_init(&rxts->list);
2258 + phy2rxts(phy_rxts, rxts);
2259 +
2260 +- spin_lock_irqsave(&dp83640->rx_queue.lock, flags);
2261 ++ spin_lock(&dp83640->rx_queue.lock);
2262 + skb_queue_walk(&dp83640->rx_queue, skb) {
2263 + struct dp83640_skb_info *skb_info;
2264 +
2265 +@@ -852,7 +860,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
2266 + break;
2267 + }
2268 + }
2269 +- spin_unlock_irqrestore(&dp83640->rx_queue.lock, flags);
2270 ++ spin_unlock(&dp83640->rx_queue.lock);
2271 +
2272 + if (!shhwtstamps)
2273 + list_add_tail(&rxts->list, &dp83640->rxts);
2274 +@@ -1172,11 +1180,18 @@ static int dp83640_config_init(struct phy_device *phydev)
2275 +
2276 + if (clock->chosen && !list_empty(&clock->phylist))
2277 + recalibrate(clock);
2278 +- else
2279 ++ else {
2280 ++ mutex_lock(&clock->extreg_lock);
2281 + enable_broadcast(phydev, clock->page, 1);
2282 ++ mutex_unlock(&clock->extreg_lock);
2283 ++ }
2284 +
2285 + enable_status_frames(phydev, true);
2286 ++
2287 ++ mutex_lock(&clock->extreg_lock);
2288 + ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
2289 ++ mutex_unlock(&clock->extreg_lock);
2290 ++
2291 + return 0;
2292 + }
2293 +
2294 +diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
2295 +index 52cd8db2c57d..757f28a4284c 100644
2296 +--- a/drivers/net/phy/phy.c
2297 ++++ b/drivers/net/phy/phy.c
2298 +@@ -1053,13 +1053,14 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
2299 + {
2300 + /* According to 802.3az,the EEE is supported only in full duplex-mode.
2301 + * Also EEE feature is active when core is operating with MII, GMII
2302 +- * or RGMII. Internal PHYs are also allowed to proceed and should
2303 +- * return an error if they do not support EEE.
2304 ++ * or RGMII (all kinds). Internal PHYs are also allowed to proceed and
2305 ++ * should return an error if they do not support EEE.
2306 + */
2307 + if ((phydev->duplex == DUPLEX_FULL) &&
2308 + ((phydev->interface == PHY_INTERFACE_MODE_MII) ||
2309 + (phydev->interface == PHY_INTERFACE_MODE_GMII) ||
2310 +- (phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
2311 ++ (phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
2312 ++ phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID) ||
2313 + phy_is_internal(phydev))) {
2314 + int eee_lp, eee_cap, eee_adv;
2315 + u32 lp, cap, adv;
2316 +diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
2317 +index c3e4da9e79ca..8067b8fbb0ee 100644
2318 +--- a/drivers/net/usb/cdc_ncm.c
2319 ++++ b/drivers/net/usb/cdc_ncm.c
2320 +@@ -1182,7 +1182,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
2321 + * payload data instead.
2322 + */
2323 + usbnet_set_skb_tx_stats(skb_out, n,
2324 +- ctx->tx_curr_frame_payload - skb_out->len);
2325 ++ (long)ctx->tx_curr_frame_payload - skb_out->len);
2326 +
2327 + return skb_out;
2328 +
2329 +diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
2330 +index 794204e34fba..152131a10047 100644
2331 +--- a/drivers/net/xen-netback/xenbus.c
2332 ++++ b/drivers/net/xen-netback/xenbus.c
2333 +@@ -34,6 +34,8 @@ struct backend_info {
2334 + enum xenbus_state frontend_state;
2335 + struct xenbus_watch hotplug_status_watch;
2336 + u8 have_hotplug_status_watch:1;
2337 ++
2338 ++ const char *hotplug_script;
2339 + };
2340 +
2341 + static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
2342 +@@ -236,6 +238,7 @@ static int netback_remove(struct xenbus_device *dev)
2343 + xenvif_free(be->vif);
2344 + be->vif = NULL;
2345 + }
2346 ++ kfree(be->hotplug_script);
2347 + kfree(be);
2348 + dev_set_drvdata(&dev->dev, NULL);
2349 + return 0;
2350 +@@ -253,6 +256,7 @@ static int netback_probe(struct xenbus_device *dev,
2351 + struct xenbus_transaction xbt;
2352 + int err;
2353 + int sg;
2354 ++ const char *script;
2355 + struct backend_info *be = kzalloc(sizeof(struct backend_info),
2356 + GFP_KERNEL);
2357 + if (!be) {
2358 +@@ -345,6 +349,15 @@ static int netback_probe(struct xenbus_device *dev,
2359 + if (err)
2360 + pr_debug("Error writing multi-queue-max-queues\n");
2361 +
2362 ++ script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL);
2363 ++ if (IS_ERR(script)) {
2364 ++ err = PTR_ERR(script);
2365 ++ xenbus_dev_fatal(dev, err, "reading script");
2366 ++ goto fail;
2367 ++ }
2368 ++
2369 ++ be->hotplug_script = script;
2370 ++
2371 + err = xenbus_switch_state(dev, XenbusStateInitWait);
2372 + if (err)
2373 + goto fail;
2374 +@@ -377,22 +390,14 @@ static int netback_uevent(struct xenbus_device *xdev,
2375 + struct kobj_uevent_env *env)
2376 + {
2377 + struct backend_info *be = dev_get_drvdata(&xdev->dev);
2378 +- char *val;
2379 +
2380 +- val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL);
2381 +- if (IS_ERR(val)) {
2382 +- int err = PTR_ERR(val);
2383 +- xenbus_dev_fatal(xdev, err, "reading script");
2384 +- return err;
2385 +- } else {
2386 +- if (add_uevent_var(env, "script=%s", val)) {
2387 +- kfree(val);
2388 +- return -ENOMEM;
2389 +- }
2390 +- kfree(val);
2391 +- }
2392 ++ if (!be)
2393 ++ return 0;
2394 ++
2395 ++ if (add_uevent_var(env, "script=%s", be->hotplug_script))
2396 ++ return -ENOMEM;
2397 +
2398 +- if (!be || !be->vif)
2399 ++ if (!be->vif)
2400 + return 0;
2401 +
2402 + return add_uevent_var(env, "vif=%s", be->vif->dev->name);
2403 +@@ -736,6 +741,7 @@ static void connect(struct backend_info *be)
2404 + goto err;
2405 + }
2406 +
2407 ++ queue->credit_bytes = credit_bytes;
2408 + queue->remaining_credit = credit_bytes;
2409 + queue->credit_usec = credit_usec;
2410 +
2411 +diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
2412 +index 3351ef408125..53826b84e0ec 100644
2413 +--- a/drivers/of/dynamic.c
2414 ++++ b/drivers/of/dynamic.c
2415 +@@ -225,7 +225,7 @@ void __of_attach_node(struct device_node *np)
2416 + phandle = __of_get_property(np, "phandle", &sz);
2417 + if (!phandle)
2418 + phandle = __of_get_property(np, "linux,phandle", &sz);
2419 +- if (IS_ENABLED(PPC_PSERIES) && !phandle)
2420 ++ if (IS_ENABLED(CONFIG_PPC_PSERIES) && !phandle)
2421 + phandle = __of_get_property(np, "ibm,phandle", &sz);
2422 + np->phandle = (phandle && (sz >= 4)) ? be32_to_cpup(phandle) : 0;
2423 +
2424 +diff --git a/drivers/staging/ozwpan/ozhcd.c b/drivers/staging/ozwpan/ozhcd.c
2425 +index 8543bb29a138..9737a979b8db 100644
2426 +--- a/drivers/staging/ozwpan/ozhcd.c
2427 ++++ b/drivers/staging/ozwpan/ozhcd.c
2428 +@@ -743,8 +743,8 @@ void oz_hcd_pd_reset(void *hpd, void *hport)
2429 + /*
2430 + * Context: softirq
2431 + */
2432 +-void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
2433 +- int length, int offset, int total_size)
2434 ++void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status, const u8 *desc,
2435 ++ u8 length, u16 offset, u16 total_size)
2436 + {
2437 + struct oz_port *port = hport;
2438 + struct urb *urb;
2439 +@@ -756,8 +756,8 @@ void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
2440 + if (!urb)
2441 + return;
2442 + if (status == 0) {
2443 +- int copy_len;
2444 +- int required_size = urb->transfer_buffer_length;
2445 ++ unsigned int copy_len;
2446 ++ unsigned int required_size = urb->transfer_buffer_length;
2447 +
2448 + if (required_size > total_size)
2449 + required_size = total_size;
2450 +diff --git a/drivers/staging/ozwpan/ozusbif.h b/drivers/staging/ozwpan/ozusbif.h
2451 +index 4249fa374012..d2a6085345be 100644
2452 +--- a/drivers/staging/ozwpan/ozusbif.h
2453 ++++ b/drivers/staging/ozwpan/ozusbif.h
2454 +@@ -29,8 +29,8 @@ void oz_usb_request_heartbeat(void *hpd);
2455 +
2456 + /* Confirmation functions.
2457 + */
2458 +-void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status,
2459 +- const u8 *desc, int length, int offset, int total_size);
2460 ++void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status,
2461 ++ const u8 *desc, u8 length, u16 offset, u16 total_size);
2462 + void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode,
2463 + const u8 *data, int data_len);
2464 +
2465 +diff --git a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c
2466 +index d434d8c6fff6..f660bb198c65 100644
2467 +--- a/drivers/staging/ozwpan/ozusbsvc1.c
2468 ++++ b/drivers/staging/ozwpan/ozusbsvc1.c
2469 +@@ -326,7 +326,11 @@ static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx,
2470 + struct oz_multiple_fixed *body =
2471 + (struct oz_multiple_fixed *)data_hdr;
2472 + u8 *data = body->data;
2473 +- int n = (len - sizeof(struct oz_multiple_fixed)+1)
2474 ++ unsigned int n;
2475 ++ if (!body->unit_size ||
2476 ++ len < sizeof(struct oz_multiple_fixed) - 1)
2477 ++ break;
2478 ++ n = (len - (sizeof(struct oz_multiple_fixed) - 1))
2479 + / body->unit_size;
2480 + while (n--) {
2481 + oz_hcd_data_ind(usb_ctx->hport, body->endpoint,
2482 +@@ -390,10 +394,15 @@ void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt)
2483 + case OZ_GET_DESC_RSP: {
2484 + struct oz_get_desc_rsp *body =
2485 + (struct oz_get_desc_rsp *)usb_hdr;
2486 +- int data_len = elt->length -
2487 +- sizeof(struct oz_get_desc_rsp) + 1;
2488 +- u16 offs = le16_to_cpu(get_unaligned(&body->offset));
2489 +- u16 total_size =
2490 ++ u16 offs, total_size;
2491 ++ u8 data_len;
2492 ++
2493 ++ if (elt->length < sizeof(struct oz_get_desc_rsp) - 1)
2494 ++ break;
2495 ++ data_len = elt->length -
2496 ++ (sizeof(struct oz_get_desc_rsp) - 1);
2497 ++ offs = le16_to_cpu(get_unaligned(&body->offset));
2498 ++ total_size =
2499 + le16_to_cpu(get_unaligned(&body->total_size));
2500 + oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n");
2501 + oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id,
2502 +diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
2503 +index cc57a3a6b02b..eee40b5cb025 100644
2504 +--- a/drivers/tty/n_tty.c
2505 ++++ b/drivers/tty/n_tty.c
2506 +@@ -162,6 +162,17 @@ static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
2507 + return put_user(x, ptr);
2508 + }
2509 +
2510 ++static inline int tty_copy_to_user(struct tty_struct *tty,
2511 ++ void __user *to,
2512 ++ const void *from,
2513 ++ unsigned long n)
2514 ++{
2515 ++ struct n_tty_data *ldata = tty->disc_data;
2516 ++
2517 ++ tty_audit_add_data(tty, to, n, ldata->icanon);
2518 ++ return copy_to_user(to, from, n);
2519 ++}
2520 ++
2521 + /**
2522 + * n_tty_kick_worker - start input worker (if required)
2523 + * @tty: terminal
2524 +@@ -2084,12 +2095,12 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
2525 + __func__, eol, found, n, c, size, more);
2526 +
2527 + if (n > size) {
2528 +- ret = copy_to_user(*b, read_buf_addr(ldata, tail), size);
2529 ++ ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), size);
2530 + if (ret)
2531 + return -EFAULT;
2532 +- ret = copy_to_user(*b + size, ldata->read_buf, n - size);
2533 ++ ret = tty_copy_to_user(tty, *b + size, ldata->read_buf, n - size);
2534 + } else
2535 +- ret = copy_to_user(*b, read_buf_addr(ldata, tail), n);
2536 ++ ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), n);
2537 +
2538 + if (ret)
2539 + return -EFAULT;
2540 +diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
2541 +index 23061918b0e4..f74f400fcb57 100644
2542 +--- a/drivers/tty/serial/imx.c
2543 ++++ b/drivers/tty/serial/imx.c
2544 +@@ -959,6 +959,14 @@ static void dma_rx_callback(void *data)
2545 +
2546 + status = dmaengine_tx_status(chan, (dma_cookie_t)0, &state);
2547 + count = RX_BUF_SIZE - state.residue;
2548 ++
2549 ++ if (readl(sport->port.membase + USR2) & USR2_IDLE) {
2550 ++ /* In condition [3] the SDMA counted up too early */
2551 ++ count--;
2552 ++
2553 ++ writel(USR2_IDLE, sport->port.membase + USR2);
2554 ++ }
2555 ++
2556 + dev_dbg(sport->port.dev, "We get %d bytes.\n", count);
2557 +
2558 + if (count) {
2559 +diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
2560 +index d201910b892f..f176941a92dd 100644
2561 +--- a/drivers/usb/dwc3/core.h
2562 ++++ b/drivers/usb/dwc3/core.h
2563 +@@ -339,7 +339,7 @@
2564 + #define DWC3_DGCMD_SET_ENDPOINT_NRDY 0x0c
2565 + #define DWC3_DGCMD_RUN_SOC_BUS_LOOPBACK 0x10
2566 +
2567 +-#define DWC3_DGCMD_STATUS(n) (((n) >> 15) & 1)
2568 ++#define DWC3_DGCMD_STATUS(n) (((n) >> 12) & 0x0F)
2569 + #define DWC3_DGCMD_CMDACT (1 << 10)
2570 + #define DWC3_DGCMD_CMDIOC (1 << 8)
2571 +
2572 +@@ -355,7 +355,7 @@
2573 + #define DWC3_DEPCMD_PARAM_SHIFT 16
2574 + #define DWC3_DEPCMD_PARAM(x) ((x) << DWC3_DEPCMD_PARAM_SHIFT)
2575 + #define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f)
2576 +-#define DWC3_DEPCMD_STATUS(x) (((x) >> 15) & 1)
2577 ++#define DWC3_DEPCMD_STATUS(x) (((x) >> 12) & 0x0F)
2578 + #define DWC3_DEPCMD_HIPRI_FORCERM (1 << 11)
2579 + #define DWC3_DEPCMD_CMDACT (1 << 10)
2580 + #define DWC3_DEPCMD_CMDIOC (1 << 8)
2581 +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
2582 +index ec8ac1674854..36bf089b708f 100644
2583 +--- a/drivers/usb/host/xhci.c
2584 ++++ b/drivers/usb/host/xhci.c
2585 +@@ -3682,18 +3682,21 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
2586 + {
2587 + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2588 + unsigned long flags;
2589 +- int ret;
2590 ++ int ret, slot_id;
2591 + struct xhci_command *command;
2592 +
2593 + command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
2594 + if (!command)
2595 + return 0;
2596 +
2597 ++ /* xhci->slot_id and xhci->addr_dev are not thread-safe */
2598 ++ mutex_lock(&xhci->mutex);
2599 + spin_lock_irqsave(&xhci->lock, flags);
2600 + command->completion = &xhci->addr_dev;
2601 + ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
2602 + if (ret) {
2603 + spin_unlock_irqrestore(&xhci->lock, flags);
2604 ++ mutex_unlock(&xhci->mutex);
2605 + xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
2606 + kfree(command);
2607 + return 0;
2608 +@@ -3702,8 +3705,10 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
2609 + spin_unlock_irqrestore(&xhci->lock, flags);
2610 +
2611 + wait_for_completion(command->completion);
2612 ++ slot_id = xhci->slot_id;
2613 ++ mutex_unlock(&xhci->mutex);
2614 +
2615 +- if (!xhci->slot_id || command->status != COMP_SUCCESS) {
2616 ++ if (!slot_id || command->status != COMP_SUCCESS) {
2617 + xhci_err(xhci, "Error while assigning device slot ID\n");
2618 + xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
2619 + HCS_MAX_SLOTS(
2620 +@@ -3728,11 +3733,11 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
2621 + * xhci_discover_or_reset_device(), which may be called as part of
2622 + * mass storage driver error handling.
2623 + */
2624 +- if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
2625 ++ if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
2626 + xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
2627 + goto disable_slot;
2628 + }
2629 +- udev->slot_id = xhci->slot_id;
2630 ++ udev->slot_id = slot_id;
2631 +
2632 + #ifndef CONFIG_USB_DEFAULT_PERSIST
2633 + /*
2634 +@@ -3778,12 +3783,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
2635 + struct xhci_slot_ctx *slot_ctx;
2636 + struct xhci_input_control_ctx *ctrl_ctx;
2637 + u64 temp_64;
2638 +- struct xhci_command *command;
2639 ++ struct xhci_command *command = NULL;
2640 ++
2641 ++ mutex_lock(&xhci->mutex);
2642 +
2643 + if (!udev->slot_id) {
2644 + xhci_dbg_trace(xhci, trace_xhci_dbg_address,
2645 + "Bad Slot ID %d", udev->slot_id);
2646 +- return -EINVAL;
2647 ++ ret = -EINVAL;
2648 ++ goto out;
2649 + }
2650 +
2651 + virt_dev = xhci->devs[udev->slot_id];
2652 +@@ -3796,7 +3804,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
2653 + */
2654 + xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
2655 + udev->slot_id);
2656 +- return -EINVAL;
2657 ++ ret = -EINVAL;
2658 ++ goto out;
2659 + }
2660 +
2661 + if (setup == SETUP_CONTEXT_ONLY) {
2662 +@@ -3804,13 +3813,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
2663 + if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
2664 + SLOT_STATE_DEFAULT) {
2665 + xhci_dbg(xhci, "Slot already in default state\n");
2666 +- return 0;
2667 ++ goto out;
2668 + }
2669 + }
2670 +
2671 + command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
2672 +- if (!command)
2673 +- return -ENOMEM;
2674 ++ if (!command) {
2675 ++ ret = -ENOMEM;
2676 ++ goto out;
2677 ++ }
2678 +
2679 + command->in_ctx = virt_dev->in_ctx;
2680 + command->completion = &xhci->addr_dev;
2681 +@@ -3820,8 +3831,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
2682 + if (!ctrl_ctx) {
2683 + xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2684 + __func__);
2685 +- kfree(command);
2686 +- return -EINVAL;
2687 ++ ret = -EINVAL;
2688 ++ goto out;
2689 + }
2690 + /*
2691 + * If this is the first Set Address since device plug-in or
2692 +@@ -3848,8 +3859,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
2693 + spin_unlock_irqrestore(&xhci->lock, flags);
2694 + xhci_dbg_trace(xhci, trace_xhci_dbg_address,
2695 + "FIXME: allocate a command ring segment");
2696 +- kfree(command);
2697 +- return ret;
2698 ++ goto out;
2699 + }
2700 + xhci_ring_cmd_db(xhci);
2701 + spin_unlock_irqrestore(&xhci->lock, flags);
2702 +@@ -3896,10 +3906,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
2703 + ret = -EINVAL;
2704 + break;
2705 + }
2706 +- if (ret) {
2707 +- kfree(command);
2708 +- return ret;
2709 +- }
2710 ++ if (ret)
2711 ++ goto out;
2712 + temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
2713 + xhci_dbg_trace(xhci, trace_xhci_dbg_address,
2714 + "Op regs DCBAA ptr = %#016llx", temp_64);
2715 +@@ -3932,8 +3940,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
2716 + xhci_dbg_trace(xhci, trace_xhci_dbg_address,
2717 + "Internal device address = %d",
2718 + le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
2719 ++out:
2720 ++ mutex_unlock(&xhci->mutex);
2721 + kfree(command);
2722 +- return 0;
2723 ++ return ret;
2724 + }
2725 +
2726 + int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
2727 +@@ -4855,6 +4865,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
2728 + return 0;
2729 + }
2730 +
2731 ++ mutex_init(&xhci->mutex);
2732 + xhci->cap_regs = hcd->regs;
2733 + xhci->op_regs = hcd->regs +
2734 + HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
2735 +@@ -5011,4 +5022,12 @@ static int __init xhci_hcd_init(void)
2736 + BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
2737 + return 0;
2738 + }
2739 ++
2740 ++/*
2741 ++ * If an init function is provided, an exit function must also be provided
2742 ++ * to allow module unload.
2743 ++ */
2744 ++static void __exit xhci_hcd_fini(void) { }
2745 ++
2746 + module_init(xhci_hcd_init);
2747 ++module_exit(xhci_hcd_fini);
2748 +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
2749 +index ea75e8ccd3c1..6977f8491fa7 100644
2750 +--- a/drivers/usb/host/xhci.h
2751 ++++ b/drivers/usb/host/xhci.h
2752 +@@ -1497,6 +1497,8 @@ struct xhci_hcd {
2753 + struct list_head lpm_failed_devs;
2754 +
2755 + /* slot enabling and address device helpers */
2756 ++ /* these are not thread safe so use mutex */
2757 ++ struct mutex mutex;
2758 + struct completion addr_dev;
2759 + int slot_id;
2760 + /* For USB 3.0 LPM enable/disable. */
2761 +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
2762 +index 9031750e7404..ffd739e31bfc 100644
2763 +--- a/drivers/usb/serial/cp210x.c
2764 ++++ b/drivers/usb/serial/cp210x.c
2765 +@@ -128,6 +128,7 @@ static const struct usb_device_id id_table[] = {
2766 + { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
2767 + { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
2768 + { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
2769 ++ { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
2770 + { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
2771 + { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
2772 + { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
2773 +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
2774 +index 8eb68a31cab6..4c8b3b82103d 100644
2775 +--- a/drivers/usb/serial/ftdi_sio.c
2776 ++++ b/drivers/usb/serial/ftdi_sio.c
2777 +@@ -699,6 +699,7 @@ static const struct usb_device_id id_table_combined[] = {
2778 + { USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) },
2779 + { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) },
2780 + { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) },
2781 ++ { USB_DEVICE(XSENS_VID, XSENS_MTDEVBOARD_PID) },
2782 + { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) },
2783 + { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
2784 + { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
2785 +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
2786 +index 4e4f46f3c89c..792e054126de 100644
2787 +--- a/drivers/usb/serial/ftdi_sio_ids.h
2788 ++++ b/drivers/usb/serial/ftdi_sio_ids.h
2789 +@@ -155,6 +155,7 @@
2790 + #define XSENS_AWINDA_STATION_PID 0x0101
2791 + #define XSENS_AWINDA_DONGLE_PID 0x0102
2792 + #define XSENS_MTW_PID 0x0200 /* Xsens MTw */
2793 ++#define XSENS_MTDEVBOARD_PID 0x0300 /* Motion Tracker Development Board */
2794 + #define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */
2795 +
2796 + /* Xsens devices using FTDI VID */
2797 +diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
2798 +index e894eb278d83..eba1b7ac7294 100644
2799 +--- a/drivers/virtio/virtio_pci_common.c
2800 ++++ b/drivers/virtio/virtio_pci_common.c
2801 +@@ -423,6 +423,7 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
2802 + if (cpu == -1)
2803 + irq_set_affinity_hint(irq, NULL);
2804 + else {
2805 ++ cpumask_clear(mask);
2806 + cpumask_set_cpu(cpu, mask);
2807 + irq_set_affinity_hint(irq, mask);
2808 + }
2809 +diff --git a/fs/aio.c b/fs/aio.c
2810 +index a793f7023755..a1736e98c278 100644
2811 +--- a/fs/aio.c
2812 ++++ b/fs/aio.c
2813 +@@ -77,6 +77,11 @@ struct kioctx_cpu {
2814 + unsigned reqs_available;
2815 + };
2816 +
2817 ++struct ctx_rq_wait {
2818 ++ struct completion comp;
2819 ++ atomic_t count;
2820 ++};
2821 ++
2822 + struct kioctx {
2823 + struct percpu_ref users;
2824 + atomic_t dead;
2825 +@@ -115,7 +120,7 @@ struct kioctx {
2826 + /*
2827 + * signals when all in-flight requests are done
2828 + */
2829 +- struct completion *requests_done;
2830 ++ struct ctx_rq_wait *rq_wait;
2831 +
2832 + struct {
2833 + /*
2834 +@@ -539,8 +544,8 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
2835 + struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
2836 +
2837 + /* At this point we know that there are no any in-flight requests */
2838 +- if (ctx->requests_done)
2839 +- complete(ctx->requests_done);
2840 ++ if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
2841 ++ complete(&ctx->rq_wait->comp);
2842 +
2843 + INIT_WORK(&ctx->free_work, free_ioctx);
2844 + schedule_work(&ctx->free_work);
2845 +@@ -751,7 +756,7 @@ err:
2846 + * the rapid destruction of the kioctx.
2847 + */
2848 + static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
2849 +- struct completion *requests_done)
2850 ++ struct ctx_rq_wait *wait)
2851 + {
2852 + struct kioctx_table *table;
2853 +
2854 +@@ -781,7 +786,7 @@ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
2855 + if (ctx->mmap_size)
2856 + vm_munmap(ctx->mmap_base, ctx->mmap_size);
2857 +
2858 +- ctx->requests_done = requests_done;
2859 ++ ctx->rq_wait = wait;
2860 + percpu_ref_kill(&ctx->users);
2861 + return 0;
2862 + }
2863 +@@ -813,18 +818,24 @@ EXPORT_SYMBOL(wait_on_sync_kiocb);
2864 + void exit_aio(struct mm_struct *mm)
2865 + {
2866 + struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table);
2867 +- int i;
2868 ++ struct ctx_rq_wait wait;
2869 ++ int i, skipped;
2870 +
2871 + if (!table)
2872 + return;
2873 +
2874 ++ atomic_set(&wait.count, table->nr);
2875 ++ init_completion(&wait.comp);
2876 ++
2877 ++ skipped = 0;
2878 + for (i = 0; i < table->nr; ++i) {
2879 + struct kioctx *ctx = table->table[i];
2880 +- struct completion requests_done =
2881 +- COMPLETION_INITIALIZER_ONSTACK(requests_done);
2882 +
2883 +- if (!ctx)
2884 ++ if (!ctx) {
2885 ++ skipped++;
2886 + continue;
2887 ++ }
2888 ++
2889 + /*
2890 + * We don't need to bother with munmap() here - exit_mmap(mm)
2891 + * is coming and it'll unmap everything. And we simply can't,
2892 +@@ -833,10 +844,12 @@ void exit_aio(struct mm_struct *mm)
2893 + * that it needs to unmap the area, just set it to 0.
2894 + */
2895 + ctx->mmap_size = 0;
2896 +- kill_ioctx(mm, ctx, &requests_done);
2897 ++ kill_ioctx(mm, ctx, &wait);
2898 ++ }
2899 +
2900 ++ if (!atomic_sub_and_test(skipped, &wait.count)) {
2901 + /* Wait until all IO for the context are done. */
2902 +- wait_for_completion(&requests_done);
2903 ++ wait_for_completion(&wait.comp);
2904 + }
2905 +
2906 + RCU_INIT_POINTER(mm->ioctx_table, NULL);
2907 +@@ -1321,15 +1334,17 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
2908 + {
2909 + struct kioctx *ioctx = lookup_ioctx(ctx);
2910 + if (likely(NULL != ioctx)) {
2911 +- struct completion requests_done =
2912 +- COMPLETION_INITIALIZER_ONSTACK(requests_done);
2913 ++ struct ctx_rq_wait wait;
2914 + int ret;
2915 +
2916 ++ init_completion(&wait.comp);
2917 ++ atomic_set(&wait.count, 1);
2918 ++
2919 + /* Pass requests_done to kill_ioctx() where it can be set
2920 + * in a thread-safe way. If we try to set it here then we have
2921 + * a race condition if two io_destroy() called simultaneously.
2922 + */
2923 +- ret = kill_ioctx(current->mm, ioctx, &requests_done);
2924 ++ ret = kill_ioctx(current->mm, ioctx, &wait);
2925 + percpu_ref_put(&ioctx->users);
2926 +
2927 + /* Wait until all IO for the context are done. Otherwise kernel
2928 +@@ -1337,7 +1352,7 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
2929 + * is destroyed.
2930 + */
2931 + if (!ret)
2932 +- wait_for_completion(&requests_done);
2933 ++ wait_for_completion(&wait.comp);
2934 +
2935 + return ret;
2936 + }
2937 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
2938 +index 8b33da6ec3dd..63be2a96ed6a 100644
2939 +--- a/fs/btrfs/extent-tree.c
2940 ++++ b/fs/btrfs/extent-tree.c
2941 +@@ -8535,6 +8535,24 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
2942 + trans = btrfs_join_transaction(root);
2943 + if (IS_ERR(trans))
2944 + return PTR_ERR(trans);
2945 ++ /*
2946 ++ * if we are changing raid levels, try to allocate a corresponding
2947 ++ * block group with the new raid level.
2948 ++ */
2949 ++ alloc_flags = update_block_group_flags(root, cache->flags);
2950 ++ if (alloc_flags != cache->flags) {
2951 ++ ret = do_chunk_alloc(trans, root, alloc_flags,
2952 ++ CHUNK_ALLOC_FORCE);
2953 ++ /*
2954 ++ * ENOSPC is allowed here, we may have enough space
2955 ++ * already allocated at the new raid level to
2956 ++ * carry on
2957 ++ */
2958 ++ if (ret == -ENOSPC)
2959 ++ ret = 0;
2960 ++ if (ret < 0)
2961 ++ goto out;
2962 ++ }
2963 +
2964 + ret = set_block_group_ro(cache, 0);
2965 + if (!ret)
2966 +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
2967 +index d688cfe5d496..782f3bc4651d 100644
2968 +--- a/fs/btrfs/extent_io.c
2969 ++++ b/fs/btrfs/extent_io.c
2970 +@@ -4514,8 +4514,11 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2971 + }
2972 + ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
2973 + em_len, flags);
2974 +- if (ret)
2975 ++ if (ret) {
2976 ++ if (ret == 1)
2977 ++ ret = 0;
2978 + goto out_free;
2979 ++ }
2980 + }
2981 + out_free:
2982 + free_extent_map(em);
2983 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
2984 +index 2b4c5423672d..64e8fb639f72 100644
2985 +--- a/fs/btrfs/ioctl.c
2986 ++++ b/fs/btrfs/ioctl.c
2987 +@@ -3206,6 +3206,8 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
2988 + key.offset = off;
2989 +
2990 + while (1) {
2991 ++ u64 next_key_min_offset = key.offset + 1;
2992 ++
2993 + /*
2994 + * note the key will change type as we walk through the
2995 + * tree.
2996 +@@ -3286,7 +3288,7 @@ process_slot:
2997 + } else if (key.offset >= off + len) {
2998 + break;
2999 + }
3000 +-
3001 ++ next_key_min_offset = key.offset + datal;
3002 + size = btrfs_item_size_nr(leaf, slot);
3003 + read_extent_buffer(leaf, buf,
3004 + btrfs_item_ptr_offset(leaf, slot),
3005 +@@ -3501,7 +3503,7 @@ process_slot:
3006 + break;
3007 + }
3008 + btrfs_release_path(path);
3009 +- key.offset++;
3010 ++ key.offset = next_key_min_offset;
3011 + }
3012 + ret = 0;
3013 +
3014 +diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
3015 +index d6033f540cc7..571de5a08fe7 100644
3016 +--- a/fs/btrfs/send.c
3017 ++++ b/fs/btrfs/send.c
3018 +@@ -5852,19 +5852,20 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
3019 + ret = PTR_ERR(clone_root);
3020 + goto out;
3021 + }
3022 +- clone_sources_to_rollback = i + 1;
3023 + spin_lock(&clone_root->root_item_lock);
3024 +- clone_root->send_in_progress++;
3025 +- if (!btrfs_root_readonly(clone_root)) {
3026 ++ if (!btrfs_root_readonly(clone_root) ||
3027 ++ btrfs_root_dead(clone_root)) {
3028 + spin_unlock(&clone_root->root_item_lock);
3029 + srcu_read_unlock(&fs_info->subvol_srcu, index);
3030 + ret = -EPERM;
3031 + goto out;
3032 + }
3033 ++ clone_root->send_in_progress++;
3034 + spin_unlock(&clone_root->root_item_lock);
3035 + srcu_read_unlock(&fs_info->subvol_srcu, index);
3036 +
3037 + sctx->clone_roots[i].root = clone_root;
3038 ++ clone_sources_to_rollback = i + 1;
3039 + }
3040 + vfree(clone_sources_tmp);
3041 + clone_sources_tmp = NULL;
3042 +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
3043 +index 05fef198ff94..e477ed67a49a 100644
3044 +--- a/fs/btrfs/super.c
3045 ++++ b/fs/btrfs/super.c
3046 +@@ -901,6 +901,15 @@ find_root:
3047 + if (IS_ERR(new_root))
3048 + return ERR_CAST(new_root);
3049 +
3050 ++ if (!(sb->s_flags & MS_RDONLY)) {
3051 ++ int ret;
3052 ++ down_read(&fs_info->cleanup_work_sem);
3053 ++ ret = btrfs_orphan_cleanup(new_root);
3054 ++ up_read(&fs_info->cleanup_work_sem);
3055 ++ if (ret)
3056 ++ return ERR_PTR(ret);
3057 ++ }
3058 ++
3059 + dir_id = btrfs_root_dirid(&new_root->root_item);
3060 + setup_root:
3061 + location.objectid = dir_id;
3062 +diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
3063 +index aff923ae8c4b..d87d8eced064 100644
3064 +--- a/include/linux/backing-dev.h
3065 ++++ b/include/linux/backing-dev.h
3066 +@@ -116,7 +116,6 @@ __printf(3, 4)
3067 + int bdi_register(struct backing_dev_info *bdi, struct device *parent,
3068 + const char *fmt, ...);
3069 + int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
3070 +-void bdi_unregister(struct backing_dev_info *bdi);
3071 + int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
3072 + void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
3073 + enum wb_reason reason);
3074 +diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
3075 +index 5976bdecf58b..9fe865ccc3f3 100644
3076 +--- a/include/net/inet_connection_sock.h
3077 ++++ b/include/net/inet_connection_sock.h
3078 +@@ -98,7 +98,8 @@ struct inet_connection_sock {
3079 + const struct tcp_congestion_ops *icsk_ca_ops;
3080 + const struct inet_connection_sock_af_ops *icsk_af_ops;
3081 + unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
3082 +- __u8 icsk_ca_state:7,
3083 ++ __u8 icsk_ca_state:6,
3084 ++ icsk_ca_setsockopt:1,
3085 + icsk_ca_dst_locked:1;
3086 + __u8 icsk_retransmits;
3087 + __u8 icsk_pending;
3088 +diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
3089 +index 856f01cb51dd..230775f5952a 100644
3090 +--- a/include/net/sctp/sctp.h
3091 ++++ b/include/net/sctp/sctp.h
3092 +@@ -571,11 +571,14 @@ static inline void sctp_v6_map_v4(union sctp_addr *addr)
3093 + /* Map v4 address to v4-mapped v6 address */
3094 + static inline void sctp_v4_map_v6(union sctp_addr *addr)
3095 + {
3096 ++ __be16 port;
3097 ++
3098 ++ port = addr->v4.sin_port;
3099 ++ addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
3100 ++ addr->v6.sin6_port = port;
3101 + addr->v6.sin6_family = AF_INET6;
3102 + addr->v6.sin6_flowinfo = 0;
3103 + addr->v6.sin6_scope_id = 0;
3104 +- addr->v6.sin6_port = addr->v4.sin_port;
3105 +- addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
3106 + addr->v6.sin6_addr.s6_addr32[0] = 0;
3107 + addr->v6.sin6_addr.s6_addr32[1] = 0;
3108 + addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff);
3109 +diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
3110 +index 5a14ead59696..885d3a380451 100644
3111 +--- a/include/trace/events/writeback.h
3112 ++++ b/include/trace/events/writeback.h
3113 +@@ -233,7 +233,6 @@ DEFINE_EVENT(writeback_class, name, \
3114 + DEFINE_WRITEBACK_EVENT(writeback_nowork);
3115 + DEFINE_WRITEBACK_EVENT(writeback_wake_background);
3116 + DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
3117 +-DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
3118 +
3119 + DECLARE_EVENT_CLASS(wbc_class,
3120 + TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
3121 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
3122 +index 241213be507c..486d00c408b0 100644
3123 +--- a/kernel/sched/fair.c
3124 ++++ b/kernel/sched/fair.c
3125 +@@ -2166,7 +2166,7 @@ void task_numa_work(struct callback_head *work)
3126 + }
3127 + for (; vma; vma = vma->vm_next) {
3128 + if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
3129 +- is_vm_hugetlb_page(vma)) {
3130 ++ is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
3131 + continue;
3132 + }
3133 +
3134 +diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
3135 +index 13d945c0d03f..1b28df2d9104 100644
3136 +--- a/kernel/trace/ring_buffer_benchmark.c
3137 ++++ b/kernel/trace/ring_buffer_benchmark.c
3138 +@@ -450,7 +450,7 @@ static int __init ring_buffer_benchmark_init(void)
3139 +
3140 + if (producer_fifo >= 0) {
3141 + struct sched_param param = {
3142 +- .sched_priority = consumer_fifo
3143 ++ .sched_priority = producer_fifo
3144 + };
3145 + sched_setscheduler(producer, SCHED_FIFO, &param);
3146 + } else
3147 +diff --git a/mm/backing-dev.c b/mm/backing-dev.c
3148 +index 6dc4580df2af..000e7b3b9896 100644
3149 +--- a/mm/backing-dev.c
3150 ++++ b/mm/backing-dev.c
3151 +@@ -359,23 +359,6 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
3152 + flush_delayed_work(&bdi->wb.dwork);
3153 + }
3154 +
3155 +-/*
3156 +- * Called when the device behind @bdi has been removed or ejected.
3157 +- *
3158 +- * We can't really do much here except for reducing the dirty ratio at
3159 +- * the moment. In the future we should be able to set a flag so that
3160 +- * the filesystem can handle errors at mark_inode_dirty time instead
3161 +- * of only at writeback time.
3162 +- */
3163 +-void bdi_unregister(struct backing_dev_info *bdi)
3164 +-{
3165 +- if (WARN_ON_ONCE(!bdi->dev))
3166 +- return;
3167 +-
3168 +- bdi_set_min_ratio(bdi, 0);
3169 +-}
3170 +-EXPORT_SYMBOL(bdi_unregister);
3171 +-
3172 + static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
3173 + {
3174 + memset(wb, 0, sizeof(*wb));
3175 +@@ -443,6 +426,7 @@ void bdi_destroy(struct backing_dev_info *bdi)
3176 + int i;
3177 +
3178 + bdi_wb_shutdown(bdi);
3179 ++ bdi_set_min_ratio(bdi, 0);
3180 +
3181 + WARN_ON(!list_empty(&bdi->work_list));
3182 + WARN_ON(delayed_work_pending(&bdi->wb.dwork));
3183 +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
3184 +index 65842d688b7c..93caba791cde 100644
3185 +--- a/mm/memory_hotplug.c
3186 ++++ b/mm/memory_hotplug.c
3187 +@@ -1978,8 +1978,10 @@ void try_offline_node(int nid)
3188 + * wait_table may be allocated from boot memory,
3189 + * here only free if it's allocated by vmalloc.
3190 + */
3191 +- if (is_vmalloc_addr(zone->wait_table))
3192 ++ if (is_vmalloc_addr(zone->wait_table)) {
3193 + vfree(zone->wait_table);
3194 ++ zone->wait_table = NULL;
3195 ++ }
3196 + }
3197 + }
3198 + EXPORT_SYMBOL(try_offline_node);
3199 +diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
3200 +index e0670d7054f9..659fb96672e4 100644
3201 +--- a/net/bridge/br_fdb.c
3202 ++++ b/net/bridge/br_fdb.c
3203 +@@ -796,9 +796,11 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p,
3204 + int err = 0;
3205 +
3206 + if (ndm->ndm_flags & NTF_USE) {
3207 ++ local_bh_disable();
3208 + rcu_read_lock();
3209 + br_fdb_update(p->br, p, addr, vid, true);
3210 + rcu_read_unlock();
3211 ++ local_bh_enable();
3212 + } else {
3213 + spin_lock_bh(&p->br->hash_lock);
3214 + err = fdb_add_entry(p, addr, ndm->ndm_state,
3215 +diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
3216 +index c465876c7861..b0aee78dba41 100644
3217 +--- a/net/bridge/br_multicast.c
3218 ++++ b/net/bridge/br_multicast.c
3219 +@@ -1071,7 +1071,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
3220 +
3221 + err = br_ip6_multicast_add_group(br, port, &grec->grec_mca,
3222 + vid);
3223 +- if (!err)
3224 ++ if (err)
3225 + break;
3226 + }
3227 +
3228 +@@ -1821,7 +1821,7 @@ static void br_multicast_query_expired(struct net_bridge *br,
3229 + if (query->startup_sent < br->multicast_startup_query_count)
3230 + query->startup_sent++;
3231 +
3232 +- RCU_INIT_POINTER(querier, NULL);
3233 ++ RCU_INIT_POINTER(querier->port, NULL);
3234 + br_multicast_send_query(br, NULL, query);
3235 + spin_unlock(&br->multicast_lock);
3236 + }
3237 +diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
3238 +index a6e2da0bc718..982101c12258 100644
3239 +--- a/net/caif/caif_socket.c
3240 ++++ b/net/caif/caif_socket.c
3241 +@@ -330,6 +330,10 @@ static long caif_stream_data_wait(struct sock *sk, long timeo)
3242 + release_sock(sk);
3243 + timeo = schedule_timeout(timeo);
3244 + lock_sock(sk);
3245 ++
3246 ++ if (sock_flag(sk, SOCK_DEAD))
3247 ++ break;
3248 ++
3249 + clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
3250 + }
3251 +
3252 +@@ -374,6 +378,10 @@ static int caif_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
3253 + struct sk_buff *skb;
3254 +
3255 + lock_sock(sk);
3256 ++ if (sock_flag(sk, SOCK_DEAD)) {
3257 ++ err = -ECONNRESET;
3258 ++ goto unlock;
3259 ++ }
3260 + skb = skb_dequeue(&sk->sk_receive_queue);
3261 + caif_check_flow_release(sk);
3262 +
3263 +diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
3264 +index a1ef53c04415..b1f2d1f44d37 100644
3265 +--- a/net/ceph/crush/mapper.c
3266 ++++ b/net/ceph/crush/mapper.c
3267 +@@ -290,6 +290,7 @@ static int is_out(const struct crush_map *map,
3268 + * @type: the type of item to choose
3269 + * @out: pointer to output vector
3270 + * @outpos: our position in that vector
3271 ++ * @out_size: size of the out vector
3272 + * @tries: number of attempts to make
3273 + * @recurse_tries: number of attempts to have recursive chooseleaf make
3274 + * @local_retries: localized retries
3275 +@@ -304,6 +305,7 @@ static int crush_choose_firstn(const struct crush_map *map,
3276 + const __u32 *weight, int weight_max,
3277 + int x, int numrep, int type,
3278 + int *out, int outpos,
3279 ++ int out_size,
3280 + unsigned int tries,
3281 + unsigned int recurse_tries,
3282 + unsigned int local_retries,
3283 +@@ -322,6 +324,7 @@ static int crush_choose_firstn(const struct crush_map *map,
3284 + int item = 0;
3285 + int itemtype;
3286 + int collide, reject;
3287 ++ int count = out_size;
3288 +
3289 + dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d tries %d recurse_tries %d local_retries %d local_fallback_retries %d parent_r %d\n",
3290 + recurse_to_leaf ? "_LEAF" : "",
3291 +@@ -329,7 +332,7 @@ static int crush_choose_firstn(const struct crush_map *map,
3292 + tries, recurse_tries, local_retries, local_fallback_retries,
3293 + parent_r);
3294 +
3295 +- for (rep = outpos; rep < numrep; rep++) {
3296 ++ for (rep = outpos; rep < numrep && count > 0 ; rep++) {
3297 + /* keep trying until we get a non-out, non-colliding item */
3298 + ftotal = 0;
3299 + skip_rep = 0;
3300 +@@ -403,7 +406,7 @@ static int crush_choose_firstn(const struct crush_map *map,
3301 + map->buckets[-1-item],
3302 + weight, weight_max,
3303 + x, outpos+1, 0,
3304 +- out2, outpos,
3305 ++ out2, outpos, count,
3306 + recurse_tries, 0,
3307 + local_retries,
3308 + local_fallback_retries,
3309 +@@ -463,6 +466,7 @@ reject:
3310 + dprintk("CHOOSE got %d\n", item);
3311 + out[outpos] = item;
3312 + outpos++;
3313 ++ count--;
3314 + }
3315 +
3316 + dprintk("CHOOSE returns %d\n", outpos);
3317 +@@ -654,6 +658,7 @@ int crush_do_rule(const struct crush_map *map,
3318 + __u32 step;
3319 + int i, j;
3320 + int numrep;
3321 ++ int out_size;
3322 + /*
3323 + * the original choose_total_tries value was off by one (it
3324 + * counted "retries" and not "tries"). add one.
3325 +@@ -761,6 +766,7 @@ int crush_do_rule(const struct crush_map *map,
3326 + x, numrep,
3327 + curstep->arg2,
3328 + o+osize, j,
3329 ++ result_max-osize,
3330 + choose_tries,
3331 + recurse_tries,
3332 + choose_local_retries,
3333 +@@ -770,11 +776,13 @@ int crush_do_rule(const struct crush_map *map,
3334 + c+osize,
3335 + 0);
3336 + } else {
3337 ++ out_size = ((numrep < (result_max-osize)) ?
3338 ++ numrep : (result_max-osize));
3339 + crush_choose_indep(
3340 + map,
3341 + map->buckets[-1-w[i]],
3342 + weight, weight_max,
3343 +- x, numrep, numrep,
3344 ++ x, out_size, numrep,
3345 + curstep->arg2,
3346 + o+osize, j,
3347 + choose_tries,
3348 +@@ -783,7 +791,7 @@ int crush_do_rule(const struct crush_map *map,
3349 + recurse_to_leaf,
3350 + c+osize,
3351 + 0);
3352 +- osize += numrep;
3353 ++ osize += out_size;
3354 + }
3355 + }
3356 +
3357 +diff --git a/net/core/dev.c b/net/core/dev.c
3358 +index 22a53acdb5bb..e977e15c2ac0 100644
3359 +--- a/net/core/dev.c
3360 ++++ b/net/core/dev.c
3361 +@@ -5170,7 +5170,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
3362 + if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
3363 + return -EBUSY;
3364 +
3365 +- if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper))
3366 ++ if (__netdev_find_adj(dev, upper_dev, &dev->adj_list.upper))
3367 + return -EEXIST;
3368 +
3369 + if (master && netdev_master_upper_dev_get(dev))
3370 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
3371 +index 7ebed55b5f7d..a2b90e1fc115 100644
3372 +--- a/net/core/rtnetlink.c
3373 ++++ b/net/core/rtnetlink.c
3374 +@@ -2337,6 +2337,9 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
3375 + {
3376 + struct sk_buff *skb;
3377 +
3378 ++ if (dev->reg_state != NETREG_REGISTERED)
3379 ++ return;
3380 ++
3381 + skb = rtmsg_ifinfo_build_skb(type, dev, change, flags);
3382 + if (skb)
3383 + rtmsg_ifinfo_send(skb, dev, flags);
3384 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
3385 +index 20fc0202cbbe..e262a087050b 100644
3386 +--- a/net/ipv4/route.c
3387 ++++ b/net/ipv4/route.c
3388 +@@ -903,6 +903,10 @@ static int ip_error(struct sk_buff *skb)
3389 + bool send;
3390 + int code;
3391 +
3392 ++ /* IP on this device is disabled. */
3393 ++ if (!in_dev)
3394 ++ goto out;
3395 ++
3396 + net = dev_net(rt->dst.dev);
3397 + if (!IN_DEV_FORWARD(in_dev)) {
3398 + switch (rt->dst.error) {
3399 +diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
3400 +index 62856e185a93..9d2fbd88df93 100644
3401 +--- a/net/ipv4/tcp_cong.c
3402 ++++ b/net/ipv4/tcp_cong.c
3403 +@@ -187,6 +187,7 @@ static void tcp_reinit_congestion_control(struct sock *sk,
3404 +
3405 + tcp_cleanup_congestion_control(sk);
3406 + icsk->icsk_ca_ops = ca;
3407 ++ icsk->icsk_ca_setsockopt = 1;
3408 +
3409 + if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init)
3410 + icsk->icsk_ca_ops->init(sk);
3411 +@@ -335,8 +336,10 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
3412 + rcu_read_lock();
3413 + ca = __tcp_ca_find_autoload(name);
3414 + /* No change asking for existing value */
3415 +- if (ca == icsk->icsk_ca_ops)
3416 ++ if (ca == icsk->icsk_ca_ops) {
3417 ++ icsk->icsk_ca_setsockopt = 1;
3418 + goto out;
3419 ++ }
3420 + if (!ca)
3421 + err = -ENOENT;
3422 + else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
3423 +diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
3424 +index dd11ac7798c6..50277af92485 100644
3425 +--- a/net/ipv4/tcp_minisocks.c
3426 ++++ b/net/ipv4/tcp_minisocks.c
3427 +@@ -316,7 +316,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
3428 + tw->tw_v6_daddr = sk->sk_v6_daddr;
3429 + tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
3430 + tw->tw_tclass = np->tclass;
3431 +- tw->tw_flowlabel = np->flow_label >> 12;
3432 ++ tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
3433 + tw->tw_ipv6only = sk->sk_ipv6only;
3434 + }
3435 + #endif
3436 +@@ -437,7 +437,10 @@ void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
3437 + rcu_read_unlock();
3438 + }
3439 +
3440 +- if (!ca_got_dst && !try_module_get(icsk->icsk_ca_ops->owner))
3441 ++ /* If no valid choice made yet, assign current system default ca. */
3442 ++ if (!ca_got_dst &&
3443 ++ (!icsk->icsk_ca_setsockopt ||
3444 ++ !try_module_get(icsk->icsk_ca_ops->owner)))
3445 + tcp_assign_congestion_control(sk);
3446 +
3447 + tcp_set_ca_state(sk, TCP_CA_Open);
3448 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
3449 +index 97ef1f8b7be8..51f17454bd7b 100644
3450 +--- a/net/ipv4/udp.c
3451 ++++ b/net/ipv4/udp.c
3452 +@@ -90,6 +90,7 @@
3453 + #include <linux/socket.h>
3454 + #include <linux/sockios.h>
3455 + #include <linux/igmp.h>
3456 ++#include <linux/inetdevice.h>
3457 + #include <linux/in.h>
3458 + #include <linux/errno.h>
3459 + #include <linux/timer.h>
3460 +@@ -1348,10 +1349,8 @@ csum_copy_err:
3461 + }
3462 + unlock_sock_fast(sk, slow);
3463 +
3464 +- if (noblock)
3465 +- return -EAGAIN;
3466 +-
3467 +- /* starting over for a new packet */
3468 ++ /* starting over for a new packet, but check if we need to yield */
3469 ++ cond_resched();
3470 + msg->msg_flags &= ~MSG_TRUNC;
3471 + goto try_again;
3472 + }
3473 +@@ -1968,6 +1967,7 @@ void udp_v4_early_demux(struct sk_buff *skb)
3474 + struct sock *sk;
3475 + struct dst_entry *dst;
3476 + int dif = skb->dev->ifindex;
3477 ++ int ours;
3478 +
3479 + /* validate the packet */
3480 + if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
3481 +@@ -1977,14 +1977,24 @@ void udp_v4_early_demux(struct sk_buff *skb)
3482 + uh = udp_hdr(skb);
3483 +
3484 + if (skb->pkt_type == PACKET_BROADCAST ||
3485 +- skb->pkt_type == PACKET_MULTICAST)
3486 ++ skb->pkt_type == PACKET_MULTICAST) {
3487 ++ struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
3488 ++
3489 ++ if (!in_dev)
3490 ++ return;
3491 ++
3492 ++ ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
3493 ++ iph->protocol);
3494 ++ if (!ours)
3495 ++ return;
3496 + sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
3497 + uh->source, iph->saddr, dif);
3498 +- else if (skb->pkt_type == PACKET_HOST)
3499 ++ } else if (skb->pkt_type == PACKET_HOST) {
3500 + sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
3501 + uh->source, iph->saddr, dif);
3502 +- else
3503 ++ } else {
3504 + return;
3505 ++ }
3506 +
3507 + if (!sk)
3508 + return;
3509 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
3510 +index 1f5e62229aaa..5ca3bc880fef 100644
3511 +--- a/net/ipv6/tcp_ipv6.c
3512 ++++ b/net/ipv6/tcp_ipv6.c
3513 +@@ -975,7 +975,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
3514 + tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
3515 + tcp_time_stamp + tcptw->tw_ts_offset,
3516 + tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
3517 +- tw->tw_tclass, (tw->tw_flowlabel << 12));
3518 ++ tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
3519 +
3520 + inet_twsk_put(tw);
3521 + }
3522 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
3523 +index d048d46779fc..1c9512aba77e 100644
3524 +--- a/net/ipv6/udp.c
3525 ++++ b/net/ipv6/udp.c
3526 +@@ -528,10 +528,8 @@ csum_copy_err:
3527 + }
3528 + unlock_sock_fast(sk, slow);
3529 +
3530 +- if (noblock)
3531 +- return -EAGAIN;
3532 +-
3533 +- /* starting over for a new packet */
3534 ++ /* starting over for a new packet, but check if we need to yield */
3535 ++ cond_resched();
3536 + msg->msg_flags &= ~MSG_TRUNC;
3537 + goto try_again;
3538 + }
3539 +@@ -734,7 +732,9 @@ static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
3540 + (inet->inet_dport && inet->inet_dport != rmt_port) ||
3541 + (!ipv6_addr_any(&sk->sk_v6_daddr) &&
3542 + !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
3543 +- (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
3544 ++ (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) ||
3545 ++ (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
3546 ++ !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
3547 + return false;
3548 + if (!inet6_mc_check(sk, loc_addr, rmt_addr))
3549 + return false;
3550 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
3551 +index d1d7a8166f46..0e9c28dc86b7 100644
3552 +--- a/net/netlink/af_netlink.c
3553 ++++ b/net/netlink/af_netlink.c
3554 +@@ -1052,7 +1052,7 @@ static int netlink_insert(struct sock *sk, u32 portid)
3555 + struct netlink_table *table = &nl_table[sk->sk_protocol];
3556 + int err;
3557 +
3558 +- lock_sock(sk);
3559 ++ mutex_lock(&table->hash.mutex);
3560 +
3561 + err = -EBUSY;
3562 + if (nlk_sk(sk)->portid)
3563 +@@ -1069,11 +1069,12 @@ static int netlink_insert(struct sock *sk, u32 portid)
3564 + err = 0;
3565 + if (!__netlink_insert(table, sk)) {
3566 + err = -EADDRINUSE;
3567 ++ nlk_sk(sk)->portid = 0;
3568 + sock_put(sk);
3569 + }
3570 +
3571 + err:
3572 +- release_sock(sk);
3573 ++ mutex_unlock(&table->hash.mutex);
3574 + return err;
3575 + }
3576 +
3577 +@@ -1082,10 +1083,12 @@ static void netlink_remove(struct sock *sk)
3578 + struct netlink_table *table;
3579 +
3580 + table = &nl_table[sk->sk_protocol];
3581 ++ mutex_lock(&table->hash.mutex);
3582 + if (rhashtable_remove(&table->hash, &nlk_sk(sk)->node)) {
3583 + WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
3584 + __sock_put(sk);
3585 + }
3586 ++ mutex_unlock(&table->hash.mutex);
3587 +
3588 + netlink_table_grab();
3589 + if (nlk_sk(sk)->subscriptions) {
3590 +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
3591 +index baef987fe2c0..d3328a19f5b2 100644
3592 +--- a/net/sched/cls_api.c
3593 ++++ b/net/sched/cls_api.c
3594 +@@ -81,6 +81,11 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
3595 + struct tcf_proto_ops *t;
3596 + int rc = -ENOENT;
3597 +
3598 ++ /* Wait for outstanding call_rcu()s, if any, from a
3599 ++ * tcf_proto_ops's destroy() handler.
3600 ++ */
3601 ++ rcu_barrier();
3602 ++
3603 + write_lock(&cls_mod_lock);
3604 + list_for_each_entry(t, &tcf_proto_base, head) {
3605 + if (t == ops) {
3606 +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
3607 +index 243b7d169d61..d9c2ee6d2959 100644
3608 +--- a/net/sched/sch_api.c
3609 ++++ b/net/sched/sch_api.c
3610 +@@ -815,10 +815,8 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
3611 + if (dev->flags & IFF_UP)
3612 + dev_deactivate(dev);
3613 +
3614 +- if (new && new->ops->attach) {
3615 +- new->ops->attach(new);
3616 +- num_q = 0;
3617 +- }
3618 ++ if (new && new->ops->attach)
3619 ++ goto skip;
3620 +
3621 + for (i = 0; i < num_q; i++) {
3622 + struct netdev_queue *dev_queue = dev_ingress_queue(dev);
3623 +@@ -834,12 +832,16 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
3624 + qdisc_destroy(old);
3625 + }
3626 +
3627 ++skip:
3628 + if (!ingress) {
3629 + notify_and_destroy(net, skb, n, classid,
3630 + dev->qdisc, new);
3631 + if (new && !new->ops->attach)
3632 + atomic_inc(&new->refcnt);
3633 + dev->qdisc = new ? : &noop_qdisc;
3634 ++
3635 ++ if (new && new->ops->attach)
3636 ++ new->ops->attach(new);
3637 + } else {
3638 + notify_and_destroy(net, skb, n, classid, old, new);
3639 + }
3640 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
3641 +index 526b6edab018..146881f068e2 100644
3642 +--- a/net/unix/af_unix.c
3643 ++++ b/net/unix/af_unix.c
3644 +@@ -1887,6 +1887,10 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
3645 + unix_state_unlock(sk);
3646 + timeo = freezable_schedule_timeout(timeo);
3647 + unix_state_lock(sk);
3648 ++
3649 ++ if (sock_flag(sk, SOCK_DEAD))
3650 ++ break;
3651 ++
3652 + clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
3653 + }
3654 +
3655 +@@ -1947,6 +1951,10 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
3656 + struct sk_buff *skb, *last;
3657 +
3658 + unix_state_lock(sk);
3659 ++ if (sock_flag(sk, SOCK_DEAD)) {
3660 ++ err = -ECONNRESET;
3661 ++ goto unlock;
3662 ++ }
3663 + last = skb = skb_peek(&sk->sk_receive_queue);
3664 + again:
3665 + if (skb == NULL) {
3666 +diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
3667 +index 5b24d39d7903..318026617b57 100644
3668 +--- a/net/wireless/wext-compat.c
3669 ++++ b/net/wireless/wext-compat.c
3670 +@@ -1333,6 +1333,8 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
3671 + memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN);
3672 + wdev_unlock(wdev);
3673 +
3674 ++ memset(&sinfo, 0, sizeof(sinfo));
3675 ++
3676 + if (rdev_get_station(rdev, dev, bssid, &sinfo))
3677 + return NULL;
3678 +
3679 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3680 +index 93c78c3c4b95..a556d63564e6 100644
3681 +--- a/sound/pci/hda/patch_realtek.c
3682 ++++ b/sound/pci/hda/patch_realtek.c
3683 +@@ -2167,6 +2167,7 @@ static const struct hda_fixup alc882_fixups[] = {
3684 + static const struct snd_pci_quirk alc882_fixup_tbl[] = {
3685 + SND_PCI_QUIRK(0x1025, 0x006c, "Acer Aspire 9810", ALC883_FIXUP_ACER_EAPD),
3686 + SND_PCI_QUIRK(0x1025, 0x0090, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
3687 ++ SND_PCI_QUIRK(0x1025, 0x0107, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
3688 + SND_PCI_QUIRK(0x1025, 0x010a, "Acer Ferrari 5000", ALC883_FIXUP_ACER_EAPD),
3689 + SND_PCI_QUIRK(0x1025, 0x0110, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
3690 + SND_PCI_QUIRK(0x1025, 0x0112, "Acer Aspire 9303", ALC883_FIXUP_ACER_EAPD),
3691 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
3692 +index 3e2ef61c627b..8b7e391dd0b8 100644
3693 +--- a/sound/usb/mixer.c
3694 ++++ b/sound/usb/mixer.c
3695 +@@ -918,6 +918,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
3696 + case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
3697 + case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
3698 + case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
3699 ++ case USB_ID(0x046d, 0x08ca): /* Logitech Quickcam Fusion */
3700 + case USB_ID(0x046d, 0x0991):
3701 + /* Most audio usb devices lie about volume resolution.
3702 + * Most Logitech webcams have res = 384.
3703 +@@ -1582,12 +1583,6 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
3704 + unitid);
3705 + return -EINVAL;
3706 + }
3707 +- /* no bmControls field (e.g. Maya44) -> ignore */
3708 +- if (desc->bLength <= 10 + input_pins) {
3709 +- usb_audio_dbg(state->chip, "MU %d has no bmControls field\n",
3710 +- unitid);
3711 +- return 0;
3712 +- }
3713 +
3714 + num_ins = 0;
3715 + ich = 0;
3716 +@@ -1595,6 +1590,9 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
3717 + err = parse_audio_unit(state, desc->baSourceID[pin]);
3718 + if (err < 0)
3719 + continue;
3720 ++ /* no bmControls field (e.g. Maya44) -> ignore */
3721 ++ if (desc->bLength <= 10 + input_pins)
3722 ++ continue;
3723 + err = check_input_term(state, desc->baSourceID[pin], &iterm);
3724 + if (err < 0)
3725 + return err;
3726 +diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
3727 +index b703cb3cda19..e5000da9e9d7 100644
3728 +--- a/sound/usb/mixer_maps.c
3729 ++++ b/sound/usb/mixer_maps.c
3730 +@@ -437,6 +437,11 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
3731 + .map = ebox44_map,
3732 + },
3733 + {
3734 ++ /* MAYA44 USB+ */
3735 ++ .id = USB_ID(0x2573, 0x0008),
3736 ++ .map = maya44_map,
3737 ++ },
3738 ++ {
3739 + /* KEF X300A */
3740 + .id = USB_ID(0x27ac, 0x1000),
3741 + .map = scms_usb3318_map,
3742 +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
3743 +index e21ec5abcc3a..2a408c60114b 100644
3744 +--- a/sound/usb/quirks.c
3745 ++++ b/sound/usb/quirks.c
3746 +@@ -1120,6 +1120,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
3747 + case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
3748 + case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
3749 + case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
3750 ++ case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
3751 + return true;
3752 + }
3753 + return false;
3754 +@@ -1266,8 +1267,9 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
3755 + if (fp->altsetting == 2)
3756 + return SNDRV_PCM_FMTBIT_DSD_U32_BE;
3757 + break;
3758 +- /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
3759 +- case USB_ID(0x20b1, 0x2009):
3760 ++
3761 ++ case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
3762 ++ case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */
3763 + if (fp->altsetting == 3)
3764 + return SNDRV_PCM_FMTBIT_DSD_U32_BE;
3765 + break;